aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Huang2018-08-21 05:01:29 -0500
committerDavid Huang2018-08-21 05:01:29 -0500
commitc4efa79993284d3b96ac49bccd84a9e9f113c755 (patch)
tree9346c8d62b4fe85810fa4876dc66c45b818c733f
parent5761aab0d699493c2eb9907e3b249d29407cba17 (diff)
parent09a53c9ff85affd1c6fbe66734e0d3e839dcb1c3 (diff)
downloadpsdkla-kernel-p-ti-lsk-linux-4.4.y.tar.gz
psdkla-kernel-p-ti-lsk-linux-4.4.y.tar.xz
psdkla-kernel-p-ti-lsk-linux-4.4.y.zip
Merge branch 'ti-lsk-linux-4.4.y' of git://git.ti.com/ti-linux-kernel/ti-linux-kernel into p-ti-lsk-linux-4.4.yp-ti-lsk-linux-4.4.y
Auto Merge of: TI-Feature: lcpd-linux-4.4.y_linux-4.4.y TI-Tree: git://git.ti.com/ti-linux-kernel/ti-linux-kernel.git TI-Branch: ti-lsk-linux-4.4.y * 'ti-lsk-linux-4.4.y' of git://git.ti.com/ti-linux-kernel/ti-linux-kernel: (2458 commits) Linux 4.4.150 x86/speculation/l1tf: Exempt zeroed PTEs from inversion Linux 4.4.149 x86/mm: Add TLB purge to free pmd/pte page interfaces ioremap: Update pgtable free interfaces with addr Bluetooth: hidp: buffer overflow in hidp_process_report ASoC: Intel: cht_bsw_max98090_ti: Fix jack initialization crypto: ablkcipher - fix crash flushing dcache in error path crypto: blkcipher - fix crash flushing dcache in error path crypto: vmac - separate tfm and request context crypto: vmac - require a block cipher with 128-bit block size kbuild: verify that $DEPMOD is installed i2c: ismt: fix wrong device address when unmap the data buffer kasan: don't emit builtin calls when sanitization is off tcp: Fix missing range_truesize enlargement in the backport x86/mm: Disable ioremap free page handling on x86-PAE Linux 4.4.148 x86/speculation/l1tf: Unbreak !__HAVE_ARCH_PFN_MODIFY_ALLOWED architectures x86/init: fix build with CONFIG_SWAP=n x86/speculation/l1tf: Fix up CPU feature flags ... Signed-off-by: David Huang <d-huang@ti.com>
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu1
-rw-r--r--Documentation/Changes17
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt8
-rw-r--r--Documentation/devicetree/bindings/dma/snps-dma.txt2
-rw-r--r--Documentation/filesystems/ext4.txt2
-rw-r--r--Documentation/filesystems/proc.txt26
-rw-r--r--Documentation/kernel-parameters.txt52
-rw-r--r--Documentation/networking/netdev-FAQ.txt9
-rw-r--r--Documentation/printk-formats.txt3
-rw-r--r--Documentation/spec_ctrl.txt94
-rw-r--r--Documentation/speculation.txt90
-rw-r--r--Makefile21
-rw-r--r--arch/alpha/include/asm/futex.h26
-rw-r--r--arch/alpha/include/asm/xchg.h30
-rw-r--r--arch/alpha/kernel/console.c1
-rw-r--r--arch/alpha/kernel/pci_impl.h3
-rw-r--r--arch/alpha/kernel/process.c3
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/include/asm/futex.h40
-rw-r--r--arch/arc/include/asm/page.h2
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi6
-rw-r--r--arch/arm/boot/dts/at91sam9g25.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4412-trats2.dts2
-rw-r--r--arch/arm/boot/dts/imx53-qsrb.dts2
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi2
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi8
-rw-r--r--arch/arm/boot/dts/ls1021a-qds.dts2
-rw-r--r--arch/arm/boot/dts/ls1021a-twr.dts2
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi2
-rw-r--r--arch/arm/boot/dts/moxart-uc7112lx.dts2
-rw-r--r--arch/arm/boot/dts/moxart.dtsi17
-rw-r--r--arch/arm/boot/dts/omap4.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi7
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts2
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi7
-rw-r--r--arch/arm/boot/dts/s5pv210.dtsi1
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi2
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi2
-rw-r--r--arch/arm/boot/dts/spear1310-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi4
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi6
-rw-r--r--arch/arm/boot/dts/spear600.dtsi1
-rw-r--r--arch/arm/boot/dts/stih407.dtsi3
-rw-r--r--arch/arm/boot/dts/stih410.dtsi3
-rw-r--r--arch/arm/include/asm/assembler.h10
-rw-r--r--arch/arm/include/asm/futex.h26
-rw-r--r--arch/arm/include/asm/kgdb.h2
-rw-r--r--arch/arm/include/asm/uaccess.h2
-rw-r--r--arch/arm/include/asm/vdso.h2
-rw-r--r--arch/arm/include/asm/xen/events.h2
-rw-r--r--arch/arm/kernel/ftrace.c11
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kernel/vdso.c12
-rw-r--r--arch/arm/kvm/handle_exit.c13
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S4
-rw-r--r--arch/arm/lib/getuser.S10
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c10
-rw-r--r--arch/arm/mach-imx/cpu.c3
-rw-r--r--arch/arm/mach-imx/mxc.h6
-rw-r--r--arch/arm/mach-mvebu/Kconfig4
-rw-r--r--arch/arm/mach-omap1/clock.c6
-rw-r--r--arch/arm/mach-omap2/omap-secure.c21
-rw-r--r--arch/arm/mach-omap2/omap-secure.h4
-rw-r--r--arch/arm/mach-omap2/pm.c4
-rw-r--r--arch/arm/mach-omap2/pm.h4
-rw-r--r--arch/arm/mach-omap2/pm34xx.c13
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S26
-rw-r--r--arch/arm/mach-omap2/timer.c19
-rw-r--r--arch/arm/mach-pxa/tosa-bt.c4
-rw-r--r--arch/arm/mach-tegra/Kconfig2
-rw-r--r--arch/arm/plat-omap/dmtimer.c7
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c4
-rw-r--r--arch/arm64/Kconfig17
-rw-r--r--arch/arm64/Kconfig.platforms2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi2
-rw-r--r--arch/arm64/include/asm/assembler.h40
-rw-r--r--arch/arm64/include/asm/atomic_lse.h14
-rw-r--r--arch/arm64/include/asm/bug.h33
-rw-r--r--arch/arm64/include/asm/cputype.h18
-rw-r--r--arch/arm64/include/asm/futex.h26
-rw-r--r--arch/arm64/include/asm/memory.h6
-rw-r--r--arch/arm64/include/asm/spinlock.h4
-rw-r--r--arch/arm64/kernel/traps.c2
-rw-r--r--arch/arm64/mm/mmu.c12
-rw-r--r--arch/arm64/mm/proc.S5
-rw-r--r--arch/frv/include/asm/futex.h3
-rw-r--r--arch/frv/include/asm/timex.h6
-rw-r--r--arch/frv/kernel/futex.c27
-rw-r--r--arch/hexagon/include/asm/futex.h38
-rw-r--r--arch/ia64/include/asm/futex.h25
-rw-r--r--arch/ia64/kernel/module.c4
-rw-r--r--arch/m68k/coldfire/device.c12
-rw-r--r--arch/m68k/mm/kmap.c3
-rw-r--r--arch/microblaze/boot/Makefile10
-rw-r--r--arch/microblaze/include/asm/futex.h38
-rw-r--r--arch/mips/ath25/board.c2
-rw-r--r--arch/mips/ath79/common.c2
-rw-r--r--arch/mips/bcm47xx/setup.c6
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c12
-rw-r--r--arch/mips/include/asm/futex.h25
-rw-r--r--arch/mips/include/asm/io.h2
-rw-r--r--arch/mips/include/asm/kprobes.h3
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h3
-rw-r--r--arch/mips/include/asm/pci.h2
-rw-r--r--arch/mips/include/asm/pgtable-32.h7
-rw-r--r--arch/mips/include/asm/uaccess.h11
-rw-r--r--arch/mips/kernel/mcount.S27
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c16
-rw-r--r--arch/mips/kernel/process.c35
-rw-r--r--arch/mips/kernel/ptrace.c24
-rw-r--r--arch/mips/kernel/ptrace32.c6
-rw-r--r--arch/mips/kernel/smp-bmips.c8
-rw-r--r--arch/mips/kernel/traps.c1
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/lib/Makefile3
-rw-r--r--arch/mips/lib/libgcc.h17
-rw-r--r--arch/mips/lib/memset.S11
-rw-r--r--arch/mips/lib/multi3.c54
-rw-r--r--arch/mips/mm/ioremap.c37
-rw-r--r--arch/mips/mm/pgtable-32.c6
-rw-r--r--arch/mips/net/bpf_jit.c16
-rw-r--r--arch/mips/net/bpf_jit_asm.S23
-rw-r--r--arch/mips/ralink/reset.c7
-rw-r--r--arch/mips/txx9/rbtx4939/setup.c4
-rw-r--r--arch/mn10300/mm/misalignment.c2
-rw-r--r--arch/openrisc/kernel/traps.c10
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/barrier.h32
-rw-r--r--arch/parisc/include/asm/futex.h25
-rw-r--r--arch/parisc/kernel/drivers.c4
-rw-r--r--arch/parisc/kernel/entry.S2
-rw-r--r--arch/parisc/kernel/pacache.S1
-rw-r--r--arch/parisc/kernel/syscall.S4
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/include/asm/barrier.h3
-rw-r--r--arch/powerpc/include/asm/exception-64e.h6
-rw-r--r--arch/powerpc/include/asm/exception-64s.h55
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h15
-rw-r--r--arch/powerpc/include/asm/firmware.h5
-rw-r--r--arch/powerpc/include/asm/futex.h26
-rw-r--r--arch/powerpc/include/asm/hvcall.h18
-rw-r--r--arch/powerpc/include/asm/irq_work.h1
-rw-r--r--arch/powerpc/include/asm/opal.h3
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/page.h12
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h14
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h12
-rw-r--r--arch/powerpc/include/asm/setup.h13
-rw-r--r--arch/powerpc/include/asm/synch.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S4
-rw-r--r--arch/powerpc/kernel/eeh_pe.c3
-rw-r--r--arch/powerpc/kernel/entry_64.S45
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S126
-rw-r--r--arch/powerpc/kernel/fadump.c3
-rw-r--r--arch/powerpc/kernel/head_8xx.S2
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c4
-rw-r--r--arch/powerpc/kernel/misc_64.S32
-rw-r--r--arch/powerpc/kernel/module_64.c12
-rw-r--r--arch/powerpc/kernel/pci_32.c1
-rw-r--r--arch/powerpc/kernel/process.c5
-rw-r--r--arch/powerpc/kernel/ptrace.c1
-rw-r--r--arch/powerpc/kernel/setup-common.c11
-rw-r--r--arch/powerpc/kernel/setup_64.c139
-rw-r--r--arch/powerpc/kernel/time.c14
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S7
-rw-r--r--arch/powerpc/kvm/book3s_pr.c6
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c34
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S7
-rw-r--r--arch/powerpc/kvm/book3s_segment.S4
-rw-r--r--arch/powerpc/lib/feature-fixups.c44
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/numa.c78
-rw-r--r--arch/powerpc/mm/slb.c8
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c3
-rw-r--r--arch/powerpc/perf/core-book3s.c29
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/chrp/time.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c5
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c4
-rw-r--r--arch/powerpc/platforms/powermac/setup.c1
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c4
-rw-r--r--arch/powerpc/platforms/powernv/idle.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c21
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal.c36
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c62
-rw-r--r--arch/powerpc/platforms/powernv/smp.c74
-rw-r--r--arch/powerpc/platforms/pseries/setup.c37
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/s390/Kconfig48
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/alternative-asm.h108
-rw-r--r--arch/s390/include/asm/alternative.h149
-rw-r--r--arch/s390/include/asm/barrier.h24
-rw-r--r--arch/s390/include/asm/cpu_mf.h6
-rw-r--r--arch/s390/include/asm/facility.h18
-rw-r--r--arch/s390/include/asm/futex.h23
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/asm/lowcore.h7
-rw-r--r--arch/s390/include/asm/nospec-branch.h17
-rw-r--r--arch/s390/include/asm/nospec-insn.h195
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/thread_info.h4
-rw-r--r--arch/s390/include/uapi/asm/kvm.h3
-rw-r--r--arch/s390/kernel/Makefile6
-rw-r--r--arch/s390/kernel/alternative.c112
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/base.S24
-rw-r--r--arch/s390/kernel/compat_linux.c8
-rw-r--r--arch/s390/kernel/early.c5
-rw-r--r--arch/s390/kernel/entry.S193
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/irq.c5
-rw-r--r--arch/s390/kernel/mcount.S14
-rw-r--r--arch/s390/kernel/module.c65
-rw-r--r--arch/s390/kernel/nospec-branch.c166
-rw-r--r--arch/s390/kernel/nospec-sysfs.c21
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c4
-rw-r--r--arch/s390/kernel/processor.c18
-rw-r--r--arch/s390/kernel/reipl.S5
-rw-r--r--arch/s390/kernel/setup.c14
-rw-r--r--arch/s390/kernel/smp.c7
-rw-r--r--arch/s390/kernel/swsusp.S10
-rw-r--r--arch/s390/kernel/uprobes.c9
-rw-r--r--arch/s390/kernel/vmlinux.lds.S45
-rw-r--r--arch/s390/kvm/kvm-s390.c16
-rw-r--r--arch/s390/lib/mem.S9
-rw-r--r--arch/s390/net/bpf_jit.S16
-rw-r--r--arch/s390/net/bpf_jit_comp.c63
-rw-r--r--arch/sh/boards/mach-se/770x/setup.c10
-rw-r--r--arch/sh/include/asm/futex.h26
-rw-r--r--arch/sh/kernel/entry-common.S2
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c3
-rw-r--r--arch/sh/kernel/traps_32.c3
-rw-r--r--arch/sh/lib/ashlsi3.S35
-rw-r--r--arch/sh/lib/ashrsi3.S33
-rw-r--r--arch/sh/lib/lshrsi3.S34
-rw-r--r--arch/sparc/include/asm/atomic_64.h6
-rw-r--r--arch/sparc/include/asm/futex_64.h26
-rw-r--r--arch/sparc/kernel/ds.c2
-rw-r--r--arch/sparc/kernel/ldc.c7
-rw-r--r--arch/tile/include/asm/futex.h40
-rw-r--r--arch/um/Makefile2
-rw-r--r--arch/um/os-Linux/signal.c2
-rw-r--r--arch/x86/Kconfig20
-rw-r--r--arch/x86/Kconfig.debug11
-rw-r--r--arch/x86/Makefile9
-rw-r--r--arch/x86/boot/Makefile5
-rw-r--r--arch/x86/boot/compressed/eboot.c6
-rw-r--r--arch/x86/boot/compressed/misc.c4
-rw-r--r--arch/x86/boot/cpuflags.h2
-rw-r--r--arch/x86/boot/mkcpustr.c2
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c2
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c3
-rw-r--r--arch/x86/crypto/chacha20_glue.c2
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c2
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c9
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c2
-rw-r--r--arch/x86/crypto/poly1305_glue.c1
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64-3way.S112
-rw-r--r--arch/x86/entry/common.c3
-rw-r--r--arch/x86/entry/entry_32.S2
-rw-r--r--arch/x86/entry/entry_64.S18
-rw-r--r--arch/x86/entry/entry_64_compat.S75
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c1
-rw-r--r--arch/x86/entry/vdso/vdso32/system_call.S2
-rw-r--r--arch/x86/entry/vdso/vma.c3
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c7
-rw-r--r--arch/x86/include/asm/alternative.h6
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/apm.h6
-rw-r--r--arch/x86/include/asm/arch_hweight.h26
-rw-r--r--arch/x86/include/asm/asm-prototypes.h1
-rw-r--r--arch/x86/include/asm/asm.h63
-rw-r--r--arch/x86/include/asm/atomic.h1
-rw-r--r--arch/x86/include/asm/atomic64_32.h1
-rw-r--r--arch/x86/include/asm/barrier.h28
-rw-r--r--arch/x86/include/asm/cmpxchg.h1
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h2
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h583
-rw-r--r--arch/x86/include/asm/cpufeatures.h337
-rw-r--r--arch/x86/include/asm/disabled-features.h18
-rw-r--r--arch/x86/include/asm/efi.h7
-rw-r--r--arch/x86/include/asm/fpu/internal.h188
-rw-r--r--arch/x86/include/asm/fpu/xstate.h2
-rw-r--r--arch/x86/include/asm/futex.h40
-rw-r--r--arch/x86/include/asm/intel-family.h72
-rw-r--r--arch/x86/include/asm/irq_work.h2
-rw-r--r--arch/x86/include/asm/irqflags.h4
-rw-r--r--arch/x86/include/asm/kvm_emulate.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/microcode_amd.h1
-rw-r--r--arch/x86/include/asm/mmu.h15
-rw-r--r--arch/x86/include/asm/mmu_context.h25
-rw-r--r--arch/x86/include/asm/msr-index.h22
-rw-r--r--arch/x86/include/asm/msr.h3
-rw-r--r--arch/x86/include/asm/mwait.h2
-rw-r--r--arch/x86/include/asm/nospec-branch.h101
-rw-r--r--arch/x86/include/asm/page_32_types.h9
-rw-r--r--arch/x86/include/asm/pgtable-2level.h17
-rw-r--r--arch/x86/include/asm/pgtable-3level.h37
-rw-r--r--arch/x86/include/asm/pgtable-invert.h41
-rw-r--r--arch/x86/include/asm/pgtable.h84
-rw-r--r--arch/x86/include/asm/pgtable_64.h54
-rw-r--r--arch/x86/include/asm/pgtable_types.h10
-rw-r--r--arch/x86/include/asm/processor.h12
-rw-r--r--arch/x86/include/asm/required-features.h10
-rw-r--r--arch/x86/include/asm/smap.h2
-rw-r--r--arch/x86/include/asm/smp.h10
-rw-r--r--arch/x86/include/asm/spec-ctrl.h80
-rw-r--r--arch/x86/include/asm/switch_to.h38
-rw-r--r--arch/x86/include/asm/thread_info.h19
-rw-r--r--arch/x86/include/asm/tlbflush.h13
-rw-r--r--arch/x86/include/asm/uaccess_64.h2
-rw-r--r--arch/x86/include/asm/vmx.h6
-rw-r--r--arch/x86/include/asm/vsyscall.h1
-rw-r--r--arch/x86/include/asm/xor_32.h2
-rw-r--r--arch/x86/include/uapi/asm/msgbuf.h31
-rw-r--r--arch/x86/include/uapi/asm/shmbuf.h42
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/alternative.c14
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c4
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/apic/vector.c14
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c42
-rw-r--r--arch/x86/kernel/cpu/bugs.c585
-rw-r--r--arch/x86/kernel/cpu/centaur.c4
-rw-r--r--arch/x86/kernel/cpu/common.c221
-rw-r--r--arch/x86/kernel/cpu/cpu.h3
-rw-r--r--arch/x86/kernel/cpu/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c78
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c10
-rw-r--r--arch/x86/kernel/cpu/match.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c5
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c70
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c17
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c47
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c21
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh6
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_bts.c44
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cstate.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_msr.c9
-rw-r--r--arch/x86/kernel/cpu/proc.c4
-rw-r--r--arch/x86/kernel/cpu/scattered.c20
-rw-r--r--arch/x86/kernel/cpu/transmeta.c6
-rw-r--r--arch/x86/kernel/devicetree.c21
-rw-r--r--arch/x86/kernel/e820.c1
-rw-r--r--arch/x86/kernel/fpu/core.c24
-rw-r--r--arch/x86/kernel/fpu/init.c169
-rw-r--r--arch/x86/kernel/fpu/xstate.c3
-rw-r--r--arch/x86/kernel/head_32.S11
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/hpet.c1
-rw-r--r--arch/x86/kernel/hw_breakpoint.c6
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c2
-rw-r--r--arch/x86/kernel/i8259.c1
-rw-r--r--arch/x86/kernel/irqflags.S26
-rw-r--r--arch/x86/kernel/kprobes/core.c61
-rw-r--r--arch/x86/kernel/kprobes/opt.c3
-rw-r--r--arch/x86/kernel/ldt.c4
-rw-r--r--arch/x86/kernel/machine_kexec_32.c6
-rw-r--r--arch/x86/kernel/machine_kexec_64.c5
-rw-r--r--arch/x86/kernel/module.c14
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/paravirt.c14
-rw-r--r--arch/x86/kernel/process.c224
-rw-r--r--arch/x86/kernel/setup.c6
-rw-r--r--arch/x86/kernel/smpboot.c10
-rw-r--r--arch/x86/kernel/tboot.c10
-rw-r--r--arch/x86/kernel/traps.c25
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kernel/uprobes.c2
-rw-r--r--arch/x86/kernel/verify_cpu.S2
-rw-r--r--arch/x86/kernel/vm86_32.c7
-rw-r--r--arch/x86/kernel/vmlinux.lds.S11
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c3
-rw-r--r--arch/x86/kvm/Kconfig3
-rw-r--r--arch/x86/kvm/emulate.c88
-rw-r--r--arch/x86/kvm/ioapic.c20
-rw-r--r--arch/x86/kvm/lapic.c10
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/svm.c26
-rw-r--r--arch/x86/kvm/vmx.c160
-rw-r--r--arch/x86/kvm/x86.c100
-rw-r--r--arch/x86/kvm/x86.h4
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/clear_page_64.S2
-rw-r--r--arch/x86/lib/cmdline.c34
-rw-r--r--arch/x86/lib/copy_page_64.S2
-rw-r--r--arch/x86/lib/copy_user_64.S2
-rw-r--r--arch/x86/lib/csum-copy_64.S12
-rw-r--r--arch/x86/lib/delay.c7
-rw-r--r--arch/x86/lib/getuser.S10
-rw-r--r--arch/x86/lib/hweight.S79
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/lib/memmove_64.S2
-rw-r--r--arch/x86/lib/memset_64.S2
-rw-r--r--arch/x86/lib/retpoline.S3
-rw-r--r--arch/x86/math-emu/Makefile4
-rw-r--r--arch/x86/math-emu/reg_compare.c16
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--arch/x86/mm/init.c25
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/kaiser.c4
-rw-r--r--arch/x86/mm/kmmio.c37
-rw-r--r--arch/x86/mm/mmap.c21
-rw-r--r--arch/x86/mm/pageattr.c8
-rw-r--r--arch/x86/mm/pgtable.c102
-rw-r--r--arch/x86/mm/setup_nx.c5
-rw-r--r--arch/x86/mm/tlb.c33
-rw-r--r--arch/x86/net/bpf_jit_comp.c21
-rw-r--r--arch/x86/oprofile/nmi_int.c2
-rw-r--r--arch/x86/oprofile/op_model_amd.c1
-rw-r--r--arch/x86/platform/efi/efi_64.c3
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c2
-rw-r--r--arch/x86/power/hibernate_32.c2
-rw-r--r--arch/x86/power/hibernate_64.c2
-rw-r--r--arch/x86/tools/relocs.c3
-rw-r--r--arch/x86/um/asm/barrier.h2
-rw-r--r--arch/x86/um/stub_segv.c2
-rw-r--r--arch/x86/xen/enlighten.c16
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/x86/xen/smp.c5
-rw-r--r--arch/x86/xen/suspend.c16
-rw-r--r--arch/xtensa/include/asm/futex.h50
-rw-r--r--arch/xtensa/kernel/traps.c2
-rw-r--r--block/bio-integrity.c3
-rw-r--r--block/blk-cgroup.c4
-rw-r--r--block/blk-core.c10
-rw-r--r--block/blk-mq.c7
-rw-r--r--block/blk-throttle.c11
-rw-r--r--block/partition-generic.c4
-rw-r--r--block/partitions/msdos.c4
-rw-r--r--certs/Makefile33
-rw-r--r--crypto/ablkcipher.c57
-rw-r--r--crypto/af_alg.c10
-rw-r--r--crypto/ahash.c18
-rw-r--r--crypto/async_tx/async_pq.c5
-rw-r--r--crypto/authenc.c1
-rw-r--r--crypto/authencesn.c1
-rw-r--r--crypto/blkcipher.c54
-rw-r--r--crypto/cryptd.c3
-rw-r--r--crypto/poly1305_generic.c17
-rw-r--r--crypto/tcrypt.c6
-rw-r--r--crypto/vmac.c412
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/acpi_lpss.c2
-rw-r--r--drivers/acpi/acpi_pad.c3
-rw-r--r--drivers/acpi/acpi_processor.c9
-rw-r--r--drivers/acpi/acpica/evevent.c9
-rw-r--r--drivers/acpi/acpica/evxfevnt.c18
-rw-r--r--drivers/acpi/acpica/nseval.c8
-rw-r--r--drivers/acpi/acpica/nsutils.c23
-rw-r--r--drivers/acpi/acpica/psobject.c14
-rw-r--r--drivers/acpi/device_sysfs.c4
-rw-r--r--drivers/acpi/glue.c12
-rw-r--r--drivers/acpi/numa.c10
-rw-r--r--drivers/acpi/pci_irq.c3
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c50
-rw-r--r--drivers/acpi/processor_driver.c10
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/acpi/processor_throttling.c71
-rw-r--r--drivers/acpi/sbshc.c4
-rw-r--r--drivers/acpi/video_detect.c9
-rw-r--r--drivers/amba/bus.c17
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/ata/ahci.c100
-rw-r--r--drivers/ata/libahci_platform.c5
-rw-r--r--drivers/ata/libata-core.c35
-rw-r--r--drivers/ata/libata-eh.c12
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/libata-zpodd.c4
-rw-r--r--drivers/atm/zatm.c9
-rw-r--r--drivers/base/cacheinfo.c15
-rw-r--r--drivers/base/core.c14
-rw-r--r--drivers/base/cpu.c16
-rw-r--r--drivers/base/dd.c8
-rw-r--r--drivers/base/power/trace.c4
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/loop.c106
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/bluetooth/btsdio.c9
-rw-r--r--drivers/bluetooth/btusb.c28
-rw-r--r--drivers/bluetooth/hci_qca.c11
-rw-r--r--drivers/bus/brcmstb_gisb.c42
-rw-r--r--drivers/cdrom/cdrom.c5
-rw-r--r--drivers/cdrom/gdrom.c3
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/hw_random/exynos-rng.c10
-rw-r--r--drivers/char/hw_random/stm32-rng.c9
-rw-r--r--drivers/char/hw_random/via-rng.c5
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c3
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c5
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c6
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/char/random.c22
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c4
-rw-r--r--drivers/char/tpm/tpm-chip.c13
-rw-r--r--drivers/char/tpm/tpm-dev.c43
-rw-r--r--drivers/char/tpm/tpm-interface.c12
-rw-r--r--drivers/char/tpm/tpm.h1
-rw-r--r--drivers/char/tpm/tpm2-cmd.c6
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c5
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c8
-rw-r--r--drivers/char/tpm/tpm_tis.c5
-rw-r--r--drivers/char/virtio_console.c49
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c12
-rw-r--r--drivers/clk/bcm/clk-ns2.c2
-rw-r--r--drivers/clk/clk-conf.c2
-rw-r--r--drivers/clk/clk-scpi.c6
-rw-r--r--drivers/clk/clk-si5351.c2
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/mvebu/armada-38x.c15
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c1
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c23
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c4
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c8
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c12
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c16
-rw-r--r--drivers/clk/tegra/clk-tegra30.c11
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c2
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c23
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c34
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c2
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c8
-rw-r--r--drivers/cpufreq/sh-cpufreq.c45
-rw-r--r--drivers/cpuidle/coupled.c1
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c34
-rw-r--r--drivers/cpuidle/dt_idle_states.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c23
-rw-r--r--drivers/crypto/caam/ctrl.c8
-rw-r--r--drivers/crypto/padlock-aes.c10
-rw-r--r--drivers/crypto/padlock-sha.c2
-rw-r--r--drivers/crypto/s5p-sss.c13
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c1
-rw-r--r--drivers/crypto/vmx/aes.c2
-rw-r--r--drivers/crypto/vmx/aes_cbc.c2
-rw-r--r--drivers/crypto/vmx/aes_ctr.c2
-rw-r--r--drivers/crypto/vmx/ghash.c2
-rw-r--r--drivers/devfreq/devfreq.c2
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/imx-sdma.c40
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/pl330.c6
-rw-r--r--drivers/dma/pxa_dma.c2
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/dma/sh/usb-dmac.c4
-rw-r--r--drivers/dma/ti-dma-crossbar.c10
-rw-r--r--drivers/dma/zx296702_dma.c2
-rw-r--r--drivers/edac/mv64x60_edac.c2
-rw-r--r--drivers/edac/octeon_edac-lmc.c1
-rw-r--r--drivers/firewire/ohci.c8
-rw-r--r--drivers/firmware/dmi_scan.c22
-rw-r--r--drivers/gpio/gpio-ath79.c3
-rw-r--r--drivers/gpio/gpio-intel-mid.c2
-rw-r--r--drivers/gpio/gpio-iop.c4
-rw-r--r--drivers/gpio/gpio-rcar.c46
-rw-r--r--drivers/gpio/gpio-xgene.c13
-rw-r--r--drivers/gpio/gpiolib.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c25
-rw-r--r--drivers/gpu/drm/drm_atomic.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c14
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c20
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c10
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c12
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c9
-rw-r--r--drivers/gpu/drm/radeon/cik.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c115
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c56
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c17
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c5
-rw-r--r--drivers/hid/hid-core.c25
-rw-r--r--drivers/hid/hid-debug.c8
-rw-r--r--drivers/hid/hid-elo.c6
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-input.c23
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-plantronics.c6
-rw-r--r--drivers/hid/hid-rmi.c4
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c2
-rw-r--r--drivers/hid/hidraw.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c21
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c11
-rw-r--r--drivers/hsi/clients/ssi_protocol.c5
-rw-r--r--drivers/hv/hv.c5
-rw-r--r--drivers/hwmon/ina2xx.c90
-rw-r--r--drivers/hwmon/nct6775.c10
-rw-r--r--drivers/hwmon/pmbus/adm1275.c8
-rw-r--r--drivers/hwmon/pmbus/max8688.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c21
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c13
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c2
-rw-r--r--drivers/hwtracing/stm/core.c7
-rw-r--r--drivers/i2c/busses/i2c-imx.c3
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c8
-rw-r--r--drivers/i2c/busses/i2c-rcar.c199
-rw-r--r--drivers/i2c/busses/i2c-scmi.c4
-rw-r--r--drivers/i2c/i2c-boardinfo.c4
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/idle/Kconfig1
-rw-r--r--drivers/iio/accel/st_accel_core.c9
-rw-r--r--drivers/iio/adc/axp288_adc.c2
-rw-r--r--drivers/iio/adc/hi8435.c27
-rw-r--r--drivers/iio/buffer/kfifo_buf.c11
-rw-r--r--drivers/iio/imu/adis_trigger.c7
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c10
-rw-r--r--drivers/infiniband/Kconfig12
-rw-r--r--drivers/infiniband/core/Makefile4
-rw-r--r--drivers/infiniband/core/addr.c16
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/iwpm_util.c1
-rw-r--r--drivers/infiniband/core/mad.c11
-rw-r--r--drivers/infiniband/core/ucma.c82
-rw-r--r--drivers/infiniband/core/umem.c13
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c13
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c19
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c52
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c7
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c27
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c4
-rw-r--r--drivers/infiniband/hw/qib/qib.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c44
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c16
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c18
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c6
-rw-r--r--drivers/input/input-leds.c8
-rw-r--r--drivers/input/keyboard/matrix_keypad.c4
-rw-r--r--drivers/input/keyboard/qt1070.c9
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c23
-rw-r--r--drivers/input/misc/drv260x.c2
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c13
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c9
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c32
-rw-r--r--drivers/input/mouse/elantech.c22
-rw-r--r--drivers/input/mouse/trackpoint.c3
-rw-r--r--drivers/input/mousedev.c62
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h31
-rw-r--r--drivers/input/touchscreen/ar1021_i2c.c2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c9
-rw-r--r--drivers/input/touchscreen/goodix.c1
-rw-r--r--drivers/input/touchscreen/tsc2007.c8
-rw-r--r--drivers/iommu/intel-svm.c10
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/iova.c2
-rw-r--r--drivers/iommu/omap-iommu.c21
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c9
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/isdn/hardware/eicon/diva.c22
-rw-r--r--drivers/isdn/hardware/eicon/diva.h5
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c18
-rw-r--r--drivers/isdn/hardware/eicon/message.c16
-rw-r--r--drivers/isdn/icn/icn.c2
-rw-r--r--drivers/isdn/mISDN/stack.c2
-rw-r--r--drivers/isdn/sc/init.c7
-rw-r--r--drivers/leds/led-triggers.c12
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/md/bcache/alloc.c23
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/btree.c14
-rw-r--r--drivers/md/bcache/request.c2
-rw-r--r--drivers/md/bcache/super.c40
-rw-r--r--drivers/md/bcache/sysfs.c11
-rw-r--r--drivers/md/bcache/writeback.c27
-rw-r--r--drivers/md/dm-bufio.c31
-rw-r--r--drivers/md/dm-io.c1
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm.c3
-rw-r--r--drivers/md/md-cluster.c4
-rw-r--r--drivers/md/md.c17
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c13
-rw-r--r--drivers/md/raid5.c37
-rw-r--r--drivers/media/common/b2c2/flexcop-fe-tuner.c4
-rw-r--r--drivers/media/common/siano/smsendian.c14
-rw-r--r--drivers/media/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c23
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c23
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c7
-rw-r--r--drivers/media/dvb-frontends/si2168.c3
-rw-r--r--drivers/media/dvb-frontends/ts2020.c4
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c64
-rw-r--r--drivers/media/i2c/s5k6aa.c5
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c11
-rw-r--r--drivers/media/i2c/soc_camera/ov6650.c2
-rw-r--r--drivers/media/i2c/tc358743.c46
-rw-r--r--drivers/media/pci/bt8xx/bt878.c3
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c10
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c7
-rw-r--r--drivers/media/pci/saa7164/saa7164-fw.c3
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c11
-rw-r--r--drivers/media/platform/omap3isp/isp.c7
-rw-r--r--drivers/media/platform/rcar_jpu.c4
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c7
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.c4
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c6
-rw-r--r--drivers/media/rc/mceusb.c9
-rw-r--r--drivers/media/tuners/r820t.c13
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c39
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c1
-rw-r--r--drivers/media/usb/em28xx/Kconfig2
-rw-r--r--drivers/media/usb/em28xx/em28xx.h2
-rw-r--r--drivers/media/usb/go7007/Kconfig2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c3
-rw-r--r--drivers/media/v4l2-core/Kconfig1
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c1023
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c5
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c13
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c6
-rw-r--r--drivers/memory/tegra/mc.c22
-rw-r--r--drivers/memory/tegra/mc.h9
-rw-r--r--drivers/memory/tegra/tegra114.c2
-rw-r--r--drivers/memory/tegra/tegra124.c6
-rw-r--r--drivers/memory/tegra/tegra210.c3
-rw-r--r--drivers/memory/tegra/tegra30.c2
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptctl.c2
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mfd/cros_ec.c6
-rw-r--r--drivers/mfd/intel-lpss.c4
-rw-r--r--drivers/mfd/palmas.c13
-rw-r--r--drivers/misc/enclosure.c7
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c27
-rw-r--r--drivers/misc/mei/main.c1
-rw-r--r--drivers/misc/vmw_balloon.c27
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c10
-rw-r--r--drivers/mmc/core/core.c8
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c4
-rw-r--r--drivers/mmc/host/sdhci-iproc.c30
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c24
-rw-r--r--drivers/mtd/chips/Kconfig4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c33
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c60
-rw-r--r--drivers/mtd/chips/jedec_probe.c2
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/ichxrom.c10
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c13
-rw-r--r--drivers/mtd/nand/denali_pci.c4
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c22
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c84
-rw-r--r--drivers/mtd/nand/mxc_nand.c5
-rw-r--r--drivers/mtd/nand/nand_base.c14
-rw-r--r--drivers/mtd/nand/sh_flctl.c5
-rw-r--r--drivers/mtd/nand/sunxi_nand.c8
-rw-r--r--drivers/mtd/ubi/attach.c139
-rw-r--r--drivers/mtd/ubi/block.c44
-rw-r--r--drivers/mtd/ubi/build.c14
-rw-r--r--drivers/mtd/ubi/eba.c96
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c7
-rw-r--r--drivers/mtd/ubi/fastmap.c51
-rw-r--r--drivers/mtd/ubi/ubi.h46
-rw-r--r--drivers/mtd/ubi/vmt.c15
-rw-r--r--drivers/mtd/ubi/wl.c118
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c98
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/can/cc770/cc770.c100
-rw-r--r--drivers/net/can/cc770/cc770.h2
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/can/xilinx_can.c323
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h1
-rw-r--r--drivers/net/ethernet/arc/emac_main.c53
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c34
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c33
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c31
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c23
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c18
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c9
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c13
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hp/hp100.c20
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c26
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c27
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c12
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c11
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c18
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c10
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c20
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c5
-rw-r--r--drivers/net/ethernet/sun/sungem.c22
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c19
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c5
-rw-r--r--drivers/net/irda/w83977af_ir.c4
-rw-r--r--drivers/net/phy/bcm-cygnus.c6
-rw-r--r--drivers/net/phy/bcm-phy-lib.h7
-rw-r--r--drivers/net/phy/bcm7xxx.c4
-rw-r--r--drivers/net/phy/dp83640.c18
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/phy/phy_device.c7
-rw-r--r--drivers/net/ppp/ppp_generic.c9
-rw-r--r--drivers/net/ppp/pppoe.c15
-rw-r--r--drivers/net/ppp/pptp.c1
-rw-r--r--drivers/net/slip/slhc.c5
-rw-r--r--drivers/net/team/team.c45
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/cdc_ether.c16
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c21
-rw-r--r--drivers/net/usb/lan78xx.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c19
-rw-r--r--drivers/net/usb/r8152.c7
-rw-r--r--drivers/net/usb/smsc75xx.c7
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c18
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c7
-rw-r--r--drivers/net/vrf.c8
-rw-r--r--drivers/net/vxlan.c7
-rw-r--r--drivers/net/wan/hdlc_ppp.c5
-rw-r--r--drivers/net/wan/pc300too.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c105
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ath/regd.c19
-rw-r--r--drivers/net/wireless/ath/regd.h5
-rw-r--r--drivers/net/wireless/ath/regd_common.h13
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c20
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c24
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c9
-rw-r--r--drivers/net/wireless/cw1200/pm.h9
-rw-r--r--drivers/net/wireless/cw1200/wsm.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c21
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.c10
-rw-r--r--drivers/net/wireless/mwifiex/usb.c3
-rw-r--r--drivers/net/wireless/mwifiex/util.c8
-rw-r--r--drivers/net/wireless/ray_cs.c7
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h1
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c5
-rw-r--r--drivers/net/xen-netfront.c88
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c2
-rw-r--r--drivers/nfc/nfcmrvl/spi.c5
-rw-r--r--drivers/ntb/ntb_transport.c3
-rw-r--r--drivers/nvdimm/bus.c14
-rw-r--r--drivers/nvme/host/pci.c22
-rw-r--r--drivers/of/device.c2
-rw-r--r--drivers/of/unittest.c8
-rw-r--r--drivers/parisc/lba_pci.c20
-rw-r--r--drivers/parport/parport_pc.c4
-rw-r--r--drivers/pci/controller/pci-keystone.c5
-rw-r--r--drivers/pci/controller/pci-layerscape.c10
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c23
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c13
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-driver.c17
-rw-r--r--drivers/pci/pci-sysfs.c15
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/pci/quirks.c7
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/perf/arm_pmu.c12
-rw-r--r--drivers/pinctrl/core.c24
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c6
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c8
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c2
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c2
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/bq27xxx_battery.c6
-rw-r--r--drivers/power/pda_power.c49
-rw-r--r--drivers/power/reset/zx-reboot.c4
-rw-r--r--drivers/powercap/powercap_sys.c1
-rw-r--r--drivers/ptp/ptp_chardev.c1
-rw-r--r--drivers/ptp/ptp_clock.c18
-rw-r--r--drivers/pwm/pwm-tegra.c7
-rw-r--r--drivers/regulator/anatop-regulator.c5
-rw-r--r--drivers/regulator/of_regulator.c1
-rw-r--r--drivers/regulator/pfuze100-regulator.c1
-rw-r--r--drivers/rtc/hctosys.c5
-rw-r--r--drivers/rtc/interface.c14
-rw-r--r--drivers/rtc/rtc-cmos.c17
-rw-r--r--drivers/rtc/rtc-ds1374.c10
-rw-r--r--drivers/rtc/rtc-opal.c22
-rw-r--r--drivers/rtc/rtc-snvs.c17
-rw-r--r--drivers/rtc/rtc-tx4939.c6
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c16
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/cio/chsc.c14
-rw-r--r--drivers/s390/cio/device_fsm.c7
-rw-r--r--drivers/s390/cio/io_sch.h1
-rw-r--r--drivers/s390/cio/qdio_main.c42
-rw-r--r--drivers/s390/cio/qdio_setup.c12
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c54
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c63
-rw-r--r--drivers/s390/scsi/zfcp_erp.c123
-rw-r--r--drivers/s390/scsi/zfcp_ext.h10
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c32
-rw-r--r--drivers/scsi/3w-9xxx.c5
-rw-r--r--drivers/scsi/3w-xxxx.c3
-rw-r--r--drivers/scsi/aacraid/commsup.c6
-rw-r--r--drivers/scsi/aacraid/linit.c5
-rw-r--r--drivers/scsi/advansys.c24
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c10
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1
-rw-r--r--drivers/scsi/csiostor/csio_hw.c5
-rw-r--r--drivers/scsi/dpt_i2o.c3
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/g_NCR5380.c5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/initio.c16
-rw-r--r--drivers/scsi/ipr.c16
-rw-r--r--drivers/scsi/libiscsi.c26
-rw-r--r--drivers/scsi/libsas/sas_expander.c4
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c5
-rw-r--r--drivers/scsi/mac_esp.c33
-rw-r--r--drivers/scsi/megaraid.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c30
-rw-r--r--drivers/scsi/mvumi.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c46
-rw-r--r--drivers/scsi/scsi_devinfo.c9
-rw-r--r--drivers/scsi/scsi_dh.c10
-rw-r--r--drivers/scsi/scsi_transport_srp.c22
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/scsi/ses.c1
-rw-r--r--drivers/scsi/sg.c85
-rw-r--r--drivers/scsi/sim710.c3
-rw-r--r--drivers/scsi/sr.c50
-rw-r--r--drivers/scsi/storvsc_drv.c5
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c11
-rw-r--r--drivers/scsi/virtio_scsi.c25
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-dw-mmio.c2
-rw-r--r--drivers/spi/spi-imx.c15
-rw-r--r--drivers/spi/spi-omap2-mcspi.c9
-rw-r--r--drivers/spi/spi-pxa2xx.h2
-rw-r--r--drivers/spi/spi-sun4i.c2
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi.c10
-rw-r--r--drivers/ssb/main.c7
-rw-r--r--drivers/staging/android/ashmem.c32
-rw-r--r--drivers/staging/android/ion/ion_heap.c2
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c2
-rw-r--r--drivers/staging/comedi/drivers.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c2
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c2
-rw-r--r--drivers/staging/iio/adc/ad7192.c27
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c14
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c2
-rw-r--r--drivers/staging/speakup/kobjects.c8
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c7
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c8
-rw-r--r--drivers/staging/unisys/visorinput/Kconfig2
-rw-r--r--drivers/staging/wilc1000/host_interface.c2
-rw-r--r--drivers/staging/wilc1000/linux_mon.c2
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h1
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/target/target_core_file.c23
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/thermal/Kconfig4
-rw-r--r--drivers/thermal/imx_thermal.c6
-rw-r--r--drivers/thermal/power_allocator.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c1
-rw-r--r--drivers/thermal/spear_thermal.c6
-rw-r--r--drivers/thunderbolt/nhi.c1
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/tty/hvc/hvc_opal.c1
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/n_gsm.c40
-rw-r--r--drivers/tty/n_tty.c61
-rw-r--r--drivers/tty/pty.c3
-rw-r--r--drivers/tty/serial/8250/8250_pci.c11
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/arc_uart.c5
-rw-r--r--drivers/tty/serial/atmel_serial.c1
-rw-r--r--drivers/tty/serial/fsl_lpuart.c4
-rw-r--r--drivers/tty/serial/imx.c20
-rw-r--r--drivers/tty/serial/mxs-auart.c4
-rw-r--r--drivers/tty/serial/samsung.c11
-rw-r--r--drivers/tty/serial/sccnxp.c15
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c5
-rw-r--r--drivers/tty/serial/sh-sci.c26
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_io.c16
-rw-r--r--drivers/tty/tty_ldisc.c16
-rw-r--r--drivers/tty/vt/vt.c14
-rw-r--r--drivers/usb/chipidea/core.c29
-rw-r--r--drivers/usb/class/cdc-acm.c11
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/generic.c9
-rw-r--r--drivers/usb/core/hcd.c1
-rw-r--r--drivers/usb/core/hub.c26
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/core/quirks.c13
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/gadget.c12
-rw-r--r--drivers/usb/dwc2/hcd.c6
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c4
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/gadget/composite.c47
-rw-r--r--drivers/usb/gadget/function/f_fs.c16
-rw-r--r--drivers/usb/gadget/function/f_hid.c24
-rw-r--r--drivers/usb/gadget/function/f_midi.c6
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c6
-rw-r--r--drivers/usb/gadget/function/f_uac2.c2
-rw-r--r--drivers/usb/gadget/function/g_zero.h1
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c16
-rw-r--r--drivers/usb/gadget/u_f.c6
-rw-r--r--drivers/usb/gadget/u_f.h26
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_pci.c1
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c20
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/udc/goku_udc.h2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ohci-hcd.c3
-rw-r--r--drivers/usb/host/ohci-q.c17
-rw-r--r--drivers/usb/host/xhci-mem.c4
-rw-r--r--drivers/usb/host/xhci-plat.c1
-rw-r--r--drivers/usb/host/xhci.c40
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/misc/ldusb.c6
-rw-r--r--drivers/usb/misc/yurex.c23
-rw-r--r--drivers/usb/mon/mon_text.c124
-rw-r--r--drivers/usb/musb/musb_core.c5
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c14
-rw-r--r--drivers/usb/musb/musb_host.c9
-rw-r--r--drivers/usb/musb/musb_host.h7
-rw-r--r--drivers/usb/musb/musb_virthub.c25
-rw-r--r--drivers/usb/musb/ux500_dma.c3
-rw-r--r--drivers/usb/phy/Kconfig1
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c5
-rw-r--r--drivers/usb/serial/Kconfig2
-rw-r--r--drivers/usb/serial/ch341.c2
-rw-r--r--drivers/usb/serial/cp210x.c26
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h9
-rw-r--r--drivers/usb/serial/io_edgeport.c1
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/mos7840.c3
-rw-r--r--drivers/usb/serial/option.c451
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/usb-serial-simple.c14
-rw-r--r--drivers/usb/serial/visor.c69
-rw-r--r--drivers/usb/storage/ene_ub6250.c11
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/usbip/stub.h2
-rw-r--r--drivers/usb/usbip/stub_dev.c49
-rw-r--r--drivers/usb/usbip/stub_main.c100
-rw-r--r--drivers/usb/usbip/stub_rx.c46
-rw-r--r--drivers/usb/usbip/usbip_common.c15
-rw-r--r--drivers/usb/usbip/usbip_common.h3
-rw-r--r--drivers/usb/usbip/usbip_event.c5
-rw-r--r--drivers/usb/usbip/vhci_hcd.c94
-rw-r--r--drivers/usb/usbip/vhci_rx.c30
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c45
-rw-r--r--drivers/usb/usbip/vhci_tx.c14
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c107
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--drivers/video/backlight/as3711_bl.c33
-rw-r--r--drivers/video/backlight/max8925_bl.c4
-rw-r--r--drivers/video/backlight/tps65217_bl.c4
-rw-r--r--drivers/video/console/dummycon.c1
-rw-r--r--drivers/video/console/vgacon.c34
-rw-r--r--drivers/video/fbdev/amba-clcd.c4
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c8
-rw-r--r--drivers/video/fbdev/exynos/s6e8ax0.c13
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/fbdev/mmp/core.c5
-rw-r--r--drivers/video/fbdev/sbuslib.c4
-rw-r--r--drivers/video/fbdev/sis/init301.c10
-rw-r--r--drivers/video/fbdev/sm501fb.c1
-rw-r--r--drivers/video/fbdev/udlfb.c14
-rw-r--r--drivers/video/fbdev/uvesafb.c3
-rw-r--r--drivers/video/fbdev/vfb.c17
-rw-r--r--drivers/video/fbdev/via/viafbdev.c8
-rw-r--r--drivers/video/hdmi.c51
-rw-r--r--drivers/virtio/virtio_balloon.c4
-rw-r--r--drivers/w1/masters/mxc_w1.c20
-rw-r--r--drivers/w1/w1.c2
-rw-r--r--drivers/watchdog/f71808e_wdt.c5
-rw-r--r--drivers/watchdog/hpwdt.c12
-rw-r--r--drivers/watchdog/imx2_wdt.c20
-rw-r--r--drivers/watchdog/sp5100_tco.h2
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/events/events_base.c6
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--drivers/xen/grant-table.c4
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--drivers/xen/xen-acpi-processor.c6
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c5
-rw-r--r--drivers/zorro/zorro.c12
-rw-r--r--fs/affs/namei.c10
-rw-r--r--fs/aio.c51
-rw-r--r--fs/autofs4/root.c2
-rw-r--r--fs/binfmt_misc.c12
-rw-r--r--fs/btrfs/acl.c19
-rw-r--r--fs/btrfs/ctree.c18
-rw-r--r--fs/btrfs/disk-io.c4
-rw-r--r--fs/btrfs/extent-tree.c1
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/file.c9
-rw-r--r--fs/btrfs/free-space-cache.c3
-rw-r--r--fs/btrfs/inode.c64
-rw-r--r--fs/btrfs/ioctl.c14
-rw-r--r--fs/btrfs/qgroup.c19
-rw-r--r--fs/btrfs/raid56.c18
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/btrfs/send.c26
-rw-r--r--fs/btrfs/tests/qgroup-tests.c2
-rw-r--r--fs/btrfs/tree-log.c56
-rw-r--r--fs/btrfs/volumes.c30
-rw-r--r--fs/cifs/cifsencrypt.c3
-rw-r--r--fs/cifs/cifssmb.c14
-rw-r--r--fs/cifs/connect.c6
-rw-r--r--fs/cifs/dir.c9
-rw-r--r--fs/cifs/file.c28
-rw-r--r--fs/cifs/misc.c14
-rw-r--r--fs/cifs/netmisc.c6
-rw-r--r--fs/cifs/sess.c22
-rw-r--r--fs/cifs/smb2pdu.c46
-rw-r--r--fs/compat_binfmt_elf.c2
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/dcache.c39
-rw-r--r--fs/ecryptfs/inode.c3
-rw-r--r--fs/ext2/acl.c36
-rw-r--r--fs/ext2/inode.c10
-rw-r--r--fs/ext2/namei.c6
-rw-r--r--fs/ext4/balloc.c44
-rw-r--r--fs/ext4/ext4.h5
-rw-r--r--fs/ext4/ext4_extents.h1
-rw-r--r--fs/ext4/extents.c22
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/ialloc.c76
-rw-r--r--fs/ext4/inline.c86
-rw-r--r--fs/ext4/inode.c108
-rw-r--r--fs/ext4/mballoc.c29
-rw-r--r--fs/ext4/namei.c7
-rw-r--r--fs/ext4/resize.c2
-rw-r--r--fs/ext4/super.c87
-rw-r--r--fs/ext4/symlink.c10
-rw-r--r--fs/ext4/xattr.c33
-rw-r--r--fs/ext4/xattr.h32
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/extent_cache.c10
-rw-r--r--fs/f2fs/gc.c6
-rw-r--r--fs/f2fs/inode.c1
-rw-r--r--fs/f2fs/namei.c17
-rw-r--r--fs/f2fs/segment.c3
-rw-r--r--fs/f2fs/super.c6
-rw-r--r--fs/fat/inode.c20
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/fs-writeback.c9
-rw-r--r--fs/fscache/page.c13
-rw-r--r--fs/fuse/control.c13
-rw-r--r--fs/fuse/dir.c13
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/gfs2/file.c5
-rw-r--r--fs/gfs2/quota.h2
-rw-r--r--fs/hfsplus/super.c1
-rw-r--r--fs/inode.c12
-rw-r--r--fs/jbd2/journal.c7
-rw-r--r--fs/jbd2/transaction.c10
-rw-r--r--fs/jffs2/dir.c12
-rw-r--r--fs/jffs2/fs.c1
-rw-r--r--fs/jffs2/super.c2
-rw-r--r--fs/jfs/namei.c12
-rw-r--r--fs/jfs/xattr.c10
-rw-r--r--fs/kernfs/file.c2
-rw-r--r--fs/lockd/svc.c4
-rw-r--r--fs/namei.c11
-rw-r--r--fs/namespace.c31
-rw-r--r--fs/ncpfs/dir.c3
-rw-r--r--fs/ncpfs/ncplib_kernel.c4
-rw-r--r--fs/nfs/direct.c6
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c1
-rw-r--r--fs/nfs/nfs4idmap.c11
-rw-r--r--fs/nfs/nfs4proc.c25
-rw-r--r--fs/nfs/nfs4state.c15
-rw-r--r--fs/nfs/nfs4sysctl.c2
-rw-r--r--fs/nfs/pagelist.c6
-rw-r--r--fs/nfs/pnfs.c4
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfs_common/grace.c10
-rw-r--r--fs/nfsd/auth.c5
-rw-r--r--fs/nfsd/nfs4proc.c6
-rw-r--r--fs/nfsd/nfs4state.c34
-rw-r--r--fs/nfsd/nfs4xdr.c7
-rw-r--r--fs/nfsd/vfs.c24
-rw-r--r--fs/nilfs2/namei.c6
-rw-r--r--fs/notify/fanotify/fanotify.c34
-rw-r--r--fs/nsfs.c1
-rw-r--r--fs/ocfs2/acl.c6
-rw-r--r--fs/ocfs2/cluster/nodemanager.c63
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c14
-rw-r--r--fs/ocfs2/dlm/dlmdomain.h25
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c9
-rw-r--r--fs/ocfs2/journal.c23
-rw-r--r--fs/ocfs2/super.c5
-rw-r--r--fs/ocfs2/xattr.c2
-rw-r--r--fs/overlayfs/inode.c12
-rw-r--r--fs/overlayfs/readdir.c6
-rw-r--r--fs/pipe.c3
-rw-r--r--fs/proc/array.c26
-rw-r--r--fs/proc/base.c84
-rw-r--r--fs/proc/meminfo.c5
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/proc/task_mmu.c29
-rw-r--r--fs/proc/task_nommu.c28
-rw-r--r--fs/quota/dquot.c3
-rw-r--r--fs/reiserfs/bitmap.c14
-rw-r--r--fs/reiserfs/journal.c4
-rw-r--r--fs/reiserfs/lbalance.c2
-rw-r--r--fs/reiserfs/namei.c12
-rw-r--r--fs/reiserfs/reiserfs.h2
-rw-r--r--fs/reiserfs/super.c21
-rw-r--r--fs/reiserfs/xattr_acl.c12
-rw-r--r--fs/select.c14
-rw-r--r--fs/squashfs/block.c2
-rw-r--r--fs/squashfs/cache.c3
-rw-r--r--fs/squashfs/file.c8
-rw-r--r--fs/squashfs/fragment.c17
-rw-r--r--fs/squashfs/squashfs_fs.h6
-rw-r--r--fs/squashfs/squashfs_fs_sb.h1
-rw-r--r--fs/squashfs/super.c5
-rw-r--r--fs/super.c6
-rw-r--r--fs/ubifs/journal.c2
-rw-r--r--fs/ubifs/super.c14
-rw-r--r--fs/udf/directory.c3
-rw-r--r--fs/udf/namei.c6
-rw-r--r--fs/udf/super.c5
-rw-r--r--fs/ufs/namei.c6
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c94
-rw-r--r--fs/xfs/libxfs/xfs_attr.c6
-rw-r--r--fs/xfs/xfs_aops.c6
-rw-r--r--fs/xfs/xfs_discard.c14
-rw-r--r--fs/xfs/xfs_file.c14
-rw-r--r--fs/xfs/xfs_log.c7
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_qm.c46
-rw-r--r--fs/xfs/xfs_trace.h9
-rw-r--r--include/asm-generic/futex.h50
-rw-r--r--include/asm-generic/pgtable.h37
-rw-r--r--include/crypto/internal/hash.h2
-rw-r--r--include/crypto/poly1305.h2
-rw-r--r--include/crypto/vmac.h63
-rw-r--r--include/drm/drm_crtc_helper.h1
-rw-r--r--include/drm/drm_dp_helper.h1
-rw-r--r--include/linux/audit.h24
-rw-r--r--include/linux/backing-dev-defs.h5
-rw-r--r--include/linux/backing-dev.h31
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/bpf.h16
-rw-r--r--include/linux/cacheinfo.h1
-rw-r--r--include/linux/compiler-clang.h5
-rw-r--r--include/linux/compiler-gcc.h35
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/cpumask.h10
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/device.h7
-rw-r--r--include/linux/dma-iommu.h1
-rw-r--r--include/linux/dmaengine.h20
-rw-r--r--include/linux/efi.h8
-rw-r--r--include/linux/fdtable.h5
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/hid.h6
-rw-r--r--include/linux/if_vlan.h7
-rw-r--r--include/linux/iio/buffer.h6
-rw-r--r--include/linux/init.h9
-rw-r--r--include/linux/jiffies.h13
-rw-r--r--include/linux/kaiser.h2
-rw-r--r--include/linux/ktime.h7
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/llist.h21
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/module.h9
-rw-r--r--include/linux/msi.h11
-rw-r--r--include/linux/mtd/flashchip.h1
-rw-r--r--include/linux/mtd/map.h130
-rw-r--r--include/linux/mtd/sh_flctl.h1
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h10
-rw-r--r--include/linux/netfilter/x_tables.h44
-rw-r--r--include/linux/nospec.h68
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/platform_data/isl9305.h2
-rw-r--r--include/linux/posix-clock.h10
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/seccomp.h3
-rw-r--r--include/linux/signal.h17
-rw-r--r--include/linux/skbuff.h20
-rw-r--r--include/linux/string.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swapfile.h2
-rw-r--r--include/linux/tcp.h9
-rw-r--r--include/linux/thread_info.h6
-rw-r--r--include/linux/timekeeper_internal.h4
-rw-r--r--include/linux/tty.h7
-rw-r--r--include/linux/usb/composite.h3
-rw-r--r--include/linux/usb/gadget.h17
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/vermagic.h8
-rw-r--r--include/linux/virtio.h3
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--include/net/arp.h3
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/dst_cache.h97
-rw-r--r--include/net/inet_timewait_sock.h1
-rw-r--r--include/net/ip.h11
-rw-r--r--include/net/ip6_tunnel.h15
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/ip_tunnels.h9
-rw-r--r--include/net/ipv6.h3
-rw-r--r--include/net/llc_conn.h3
-rw-r--r--include/net/mac80211.h16
-rw-r--r--include/net/net_namespace.h10
-rw-r--r--include/net/netfilter/nf_queue.h4
-rw-r--r--include/net/netlink.h73
-rw-r--r--include/net/netns/netfilter.h2
-rw-r--r--include/net/nexthop.h2
-rw-r--r--include/net/red.h13
-rw-r--r--include/net/regulatory.h2
-rw-r--r--include/net/route.h3
-rw-r--r--include/net/slhc_vj.h1
-rw-r--r--include/net/tcp.h10
-rw-r--r--include/net/udplite.h1
-rw-r--r--include/net/x25.h4
-rw-r--r--include/rdma/ib_addr.h2
-rw-r--r--include/rdma/ib_verbs.h14
-rw-r--r--include/soc/tegra/mc.h2
-rw-r--r--include/sound/control.h7
-rw-r--r--include/sound/pcm_oss.h1
-rw-r--r--include/trace/events/clk.h4
-rw-r--r--include/trace/events/timer.h20
-rw-r--r--include/trace/events/xen.h16
-rw-r--r--include/uapi/drm/virtgpu_drm.h1
-rw-r--r--include/uapi/linux/eventpoll.h13
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/nl80211.h2
-rw-r--r--include/uapi/linux/pci_regs.h2
-rw-r--r--include/uapi/linux/prctl.h12
-rw-r--r--include/uapi/linux/seccomp.h4
-rw-r--r--include/uapi/linux/usb/audio.h4
-rw-r--r--init/Kconfig20
-rw-r--r--ipc/msg.c5
-rw-r--r--ipc/shm.c42
-rw-r--r--kernel/async.c20
-rw-r--r--kernel/audit.c20
-rw-r--r--kernel/auditfilter.c2
-rw-r--r--kernel/auditsc.c17
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/core.c30
-rw-r--r--kernel/bpf/hashtab.c9
-rw-r--r--kernel/bpf/syscall.c22
-rw-r--r--kernel/bpf/verifier.c77
-rw-r--r--kernel/debug/kdb/kdb_main.c27
-rw-r--r--kernel/events/callchain.c10
-rw-r--r--kernel/events/core.c45
-rw-r--r--kernel/events/hw_breakpoint.c30
-rw-r--r--kernel/events/ring_buffer.c7
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/futex.c138
-rw-r--r--kernel/irq/manage.c11
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/locking/qspinlock.c8
-rw-r--r--kernel/module.c11
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/power/power.h3
-rw-r--r--kernel/power/user.c5
-rw-r--r--kernel/printk/braille.c15
-rw-r--r--kernel/printk/braille.h13
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/resource.c3
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/sched/deadline.c98
-rw-r--r--kernel/sched/fair.c3
-rw-r--r--kernel/sched/rt.c28
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/seccomp.c21
-rw-r--r--kernel/signal.c11
-rw-r--r--kernel/sys.c24
-rw-r--r--kernel/time/hrtimer.c12
-rw-r--r--kernel/time/posix-clock.c34
-rw-r--r--kernel/time/posix-timers.c34
-rw-r--r--kernel/time/sched_clock.c5
-rw-r--r--kernel/time/tick-broadcast.c8
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/time.c6
-rw-r--r--kernel/time/timekeeping.c20
-rw-r--r--kernel/time/timer.c9
-rw-r--r--kernel/time/timer_list.c6
-rw-r--r--kernel/trace/blktrace.c32
-rw-r--r--kernel/trace/ftrace.c1
-rw-r--r--kernel/trace/ring_buffer.c16
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace_events_filter.c3
-rw-r--r--kernel/trace/trace_events_trigger.c23
-rw-r--r--kernel/trace/trace_functions_graph.c5
-rw-r--r--kernel/trace/trace_kprobe.c19
-rw-r--r--kernel/trace/trace_probe.c8
-rw-r--r--kernel/trace/trace_probe.h2
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--kernel/tracepoint.c4
-rw-r--r--kernel/workqueue.c18
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile2
-rw-r--r--lib/atomic64_test.c4
-rw-r--r--lib/hweight.c4
-rw-r--r--lib/ioremap.c6
-rw-r--r--lib/kobject.c12
-rw-r--r--lib/mpi/longlong.h18
-rw-r--r--lib/oid_registry.c8
-rw-r--r--lib/rhashtable.c17
-rw-r--r--lib/test_bpf.c44
-rw-r--r--lib/vsprintf.c3
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/backing-dev.c4
-rw-r--r--mm/cma.c15
-rw-r--r--mm/early_ioremap.c2
-rw-r--r--mm/filemap.c106
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/kmemleak.c14
-rw-r--r--mm/ksm.c28
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory-failure.c7
-rw-r--r--mm/memory.c81
-rw-r--r--mm/mempolicy.c36
-rw-r--r--mm/mmap.c38
-rw-r--r--mm/mprotect.c49
-rw-r--r--mm/page-writeback.c18
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/percpu.c1
-rw-r--r--mm/slab.c3
-rw-r--r--mm/slub.c2
-rw-r--r--mm/swapfile.c50
-rw-r--r--mm/util.c40
-rw-r--r--mm/vmalloc.c3
-rw-r--r--mm/vmscan.c37
-rw-r--r--net/8021q/vlan_dev.c9
-rw-r--r--net/Kconfig7
-rw-r--r--net/atm/lec.c9
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c20
-rw-r--r--net/batman-adv/distributed-arp-table.c2
-rw-r--r--net/batman-adv/fragmentation.c3
-rw-r--r--net/batman-adv/gateway_client.c3
-rw-r--r--net/batman-adv/multicast.c4
-rw-r--r--net/batman-adv/soft-interface.c8
-rw-r--r--net/bluetooth/hci_conn.c27
-rw-r--r--net/bluetooth/hci_core.c17
-rw-r--r--net/bluetooth/hci_event.c15
-rw-r--r--net/bluetooth/hidp/core.c4
-rw-r--r--net/bluetooth/smp.c8
-rw-r--r--net/bridge/br_if.c4
-rw-r--r--net/bridge/br_sysfs_if.c3
-rw-r--r--net/bridge/netfilter/ebt_among.c55
-rw-r--r--net/bridge/netfilter/ebtables.c62
-rw-r--r--net/can/af_can.c22
-rw-r--r--net/ceph/messenger.c7
-rw-r--r--net/ceph/osdmap.c1
-rw-r--r--net/compat.c6
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c38
-rw-r--r--net/core/dev_addr_lists.c4
-rw-r--r--net/core/dst_cache.c168
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/flow_dissector.c3
-rw-r--r--net/core/neighbour.c58
-rw-r--r--net/core/net_namespace.c19
-rw-r--r--net/core/rtnetlink.c17
-rw-r--r--net/core/skbuff.c88
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/sysctl_net_core.c8
-rw-r--r--net/dccp/ccids/ccid2.c17
-rw-r--r--net/dccp/ccids/ccid3.c16
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c1
-rw-r--r--net/dccp/proto.c8
-rw-r--r--net/dccp/timer.c2
-rw-r--r--net/decnet/af_decnet.c62
-rw-r--r--net/dns_resolver/dns_key.c39
-rw-r--r--net/dsa/slave.c6
-rw-r--r--net/ieee802154/6lowpan/core.c12
-rw-r--r--net/ieee802154/socket.c8
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/ah4.c8
-rw-r--r--net/ipv4/arp.c25
-rw-r--r--net/ipv4/esp4.c13
-rw-r--r--net/ipv4/fib_frontend.c5
-rw-r--r--net/ipv4/fib_semantics.c7
-rw-r--r--net/ipv4/igmp.c6
-rw-r--r--net/ipv4/inet_fragment.c13
-rw-r--r--net/ipv4/inet_timewait_sock.c1
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/ip_sockglue.c36
-rw-r--r--net/ipv4/ip_tunnel.c89
-rw-r--r--net/ipv4/ip_vti.c1
-rw-r--r--net/ipv4/ipconfig.c17
-rw-r--r--net/ipv4/netfilter.c7
-rw-r--r--net/ipv4/netfilter/arp_tables.c69
-rw-r--r--net/ipv4/netfilter/ip_tables.c64
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c16
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c57
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c2
-rw-r--r--net/ipv4/ping.c7
-rw-r--r--net/ipv4/route.c34
-rw-r--r--net/ipv4/sysctl_net_ipv4.c23
-rw-r--r--net/ipv4/tcp.c19
-rw-r--r--net/ipv4/tcp_dctcp.c50
-rw-r--r--net/ipv4/tcp_illinois.c2
-rw-r--r--net/ipv4/tcp_input.c137
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_output.c40
-rw-r--r--net/ipv4/tcp_timer.c15
-rw-r--r--net/ipv4/udp.c12
-rw-r--r--net/ipv4/xfrm4_policy.c1
-rw-r--r--net/ipv6/Kconfig2
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/ah6.c8
-rw-r--r--net/ipv6/datagram.c7
-rw-r--r--net/ipv6/esp6.c12
-rw-r--r--net/ipv6/ip6_checksum.c5
-rw-r--r--net/ipv6/ip6_gre.c20
-rw-r--r--net/ipv6/ip6_output.c33
-rw-r--r--net/ipv6/ip6_tunnel.c160
-rw-r--r--net/ipv6/ip6_vti.c9
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/ipv6_sockglue.c29
-rw-r--r--net/ipv6/ndisc.c5
-rw-r--r--net/ipv6/netfilter/ip6_tables.c65
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c18
-rw-r--r--net/ipv6/netfilter/nf_dup_ipv6.c1
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c4
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c3
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/sit.c34
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/key/af_key.c47
-rw-r--r--net/l2tp/l2tp_core.c8
-rw-r--r--net/l2tp/l2tp_ppp.c7
-rw-r--r--net/llc/af_llc.c20
-rw-r--r--net/llc/llc_c_ac.c24
-rw-r--r--net/llc/llc_conn.c54
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/mesh_hwmp.c15
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/status.c1
-rw-r--r--net/mac80211/util.c5
-rw-r--r--net/mac80211/wep.c3
-rw-r--r--net/mac80211/wpa.c45
-rw-r--r--net/mpls/af_mpls.c36
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c44
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c155
-rw-r--r--net/netfilter/nf_conntrack_core.c7
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c14
-rw-r--r--net/netfilter/nf_conntrack_sip.c5
-rw-r--r--net/netfilter/nf_log.c9
-rw-r--r--net/netfilter/nf_nat_proto_common.c7
-rw-r--r--net/netfilter/nf_queue.c17
-rw-r--r--net/netfilter/nf_tables_core.c3
-rw-r--r--net/netfilter/nfnetlink_cthelper.c10
-rw-r--r--net/netfilter/nfnetlink_queue.c29
-rw-r--r--net/netfilter/x_tables.c143
-rw-r--r--net/netfilter/xt_CT.c11
-rw-r--r--net/netfilter/xt_IDLETIMER.c9
-rw-r--r--net/netfilter/xt_LED.c12
-rw-r--r--net/netfilter/xt_RATEEST.c22
-rw-r--r--net/netfilter/xt_hashlimit.c5
-rw-r--r--net/netfilter/xt_osf.c7
-rw-r--r--net/netfilter/xt_recent.c6
-rw-r--r--net/netlabel/netlabel_unlabeled.c10
-rw-r--r--net/netlink/af_netlink.c12
-rw-r--r--net/netlink/genetlink.c12
-rw-r--r--net/nfc/llcp_commands.c13
-rw-r--r--net/nfc/netlink.c3
-rw-r--r--net/openvswitch/conntrack.c30
-rw-r--r--net/openvswitch/flow_netlink.c25
-rw-r--r--net/packet/af_packet.c94
-rw-r--r--net/packet/internal.h10
-rw-r--r--net/rds/bind.c1
-rw-r--r--net/rds/ib.c3
-rw-r--r--net/rds/loop.c1
-rw-r--r--net/rds/rds.h5
-rw-r--r--net/rds/recv.c5
-rw-r--r--net/rfkill/rfkill-gpio.c7
-rw-r--r--net/rxrpc/rxkad.c21
-rw-r--r--net/sched/act_api.c4
-rw-r--r--net/sched/act_bpf.c12
-rw-r--r--net/sched/act_csum.c12
-rw-r--r--net/sched/sch_blackhole.c2
-rw-r--r--net/sched/sch_choke.c3
-rw-r--r--net/sched/sch_fq.c37
-rw-r--r--net/sched/sch_gred.c3
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfq.c3
-rw-r--r--net/sctp/associola.c30
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/ipv6.c75
-rw-r--r--net/sctp/protocol.c10
-rw-r--r--net/sctp/sm_make_chunk.c8
-rw-r--r--net/sctp/sm_statefuns.c89
-rw-r--r--net/sctp/socket.c51
-rw-r--r--net/socket.c11
-rw-r--r--net/sunrpc/rpc_pipe.c1
-rw-r--r--net/sunrpc/xprtsock.c8
-rw-r--r--net/tipc/net.c3
-rw-r--r--net/wireless/core.c11
-rw-r--r--net/wireless/nl80211.c9
-rw-r--r--net/x25/af_x25.c24
-rw-r--r--net/x25/sysctl_net_x25.c5
-rw-r--r--net/xfrm/xfrm_ipcomp.c2
-rw-r--r--net/xfrm/xfrm_policy.c15
-rw-r--r--net/xfrm/xfrm_state.c15
-rw-r--r--net/xfrm/xfrm_user.c47
-rw-r--r--scripts/Kbuild.include5
-rw-r--r--scripts/Makefile.kasan3
-rw-r--r--scripts/Makefile.lib10
-rwxr-xr-xscripts/depmod.sh8
-rw-r--r--scripts/genksyms/parse.tab.c_shipped1682
-rw-r--r--scripts/genksyms/parse.tab.h_shipped133
-rw-r--r--scripts/genksyms/parse.y2
-rw-r--r--scripts/kconfig/confdata.c2
-rw-r--r--scripts/kconfig/expr.c2
-rw-r--r--scripts/kconfig/menu.c1
-rw-r--r--scripts/kconfig/zconf.y33
-rwxr-xr-xscripts/kernel-doc2
-rw-r--r--scripts/mod/modpost.c12
-rwxr-xr-xscripts/tags.sh1
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/integrity/ima/Kconfig1
-rw-r--r--security/integrity/ima/ima_appraise.c11
-rw-r--r--security/integrity/ima/ima_crypto.c2
-rw-r--r--security/integrity/ima/ima_main.c13
-rw-r--r--security/keys/encrypted-keys/encrypted.c31
-rw-r--r--security/selinux/hooks.c36
-rw-r--r--security/selinux/ss/services.c25
-rw-r--r--sound/core/control_compat.c3
-rw-r--r--sound/core/oss/pcm_oss.c203
-rw-r--r--sound/core/pcm.c8
-rw-r--r--sound/core/pcm_compat.c2
-rw-r--r--sound/core/pcm_native.c3
-rw-r--r--sound/core/rawmidi.c20
-rw-r--r--sound/core/rawmidi_compat.c18
-rw-r--r--sound/core/seq/oss/seq_oss_event.c15
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c85
-rw-r--r--sound/core/seq/oss/seq_oss_synth.h3
-rw-r--r--sound/core/seq/seq_clientmgr.c36
-rw-r--r--sound/core/seq/seq_clientmgr.h1
-rw-r--r--sound/core/seq/seq_fifo.c2
-rw-r--r--sound/core/seq/seq_memory.c14
-rw-r--r--sound/core/seq/seq_memory.h3
-rw-r--r--sound/core/seq/seq_prioq.c28
-rw-r--r--sound/core/seq/seq_prioq.h6
-rw-r--r--sound/core/seq/seq_queue.c28
-rw-r--r--sound/core/seq/seq_virmidi.c4
-rw-r--r--sound/core/timer.c220
-rw-r--r--sound/core/vmaster.c5
-rw-r--r--sound/drivers/aloop.c46
-rw-r--r--sound/drivers/opl3/opl3_synth.c7
-rw-r--r--sound/firewire/digi00x/amdtp-dot.c55
-rw-r--r--sound/pci/asihpi/hpimsginit.c13
-rw-r--r--sound/pci/asihpi/hpioctl.c4
-rw-r--r--sound/pci/emu10k1/emupcm.c4
-rw-r--r--sound/pci/emu10k1/memory.c6
-rw-r--r--sound/pci/fm801.c16
-rw-r--r--sound/pci/hda/Kconfig1
-rw-r--r--sound/pci/hda/hda_controller.c4
-rw-r--r--sound/pci/hda/hda_hwdep.c12
-rw-r--r--sound/pci/hda/hda_intel.c44
-rw-r--r--sound/pci/hda/patch_ca0132.c11
-rw-r--r--sound/pci/hda/patch_conexant.c4
-rw-r--r--sound/pci/hda/patch_realtek.c70
-rw-r--r--sound/pci/rme9652/hdspm.c24
-rw-r--r--sound/pci/rme9652/rme9652.c6
-rw-r--r--sound/soc/au1x/ac97c.c6
-rw-r--r--sound/soc/cirrus/edb93xx.c2
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c26
-rw-r--r--sound/soc/cirrus/snappercl15.c2
-rw-r--r--sound/soc/codecs/pcm512x-spi.c4
-rw-r--r--sound/soc/codecs/ssm2602.c19
-rw-r--r--sound/soc/fsl/fsl_esai.c7
-rw-r--r--sound/soc/generic/simple-card.c8
-rw-r--r--sound/soc/intel/Kconfig7
-rw-r--r--sound/soc/intel/atom/sst/sst_stream.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c45
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c7
-rw-r--r--sound/soc/intel/common/sst-firmware.c2
-rw-r--r--sound/soc/intel/skylake/skl.c2
-rw-r--r--sound/soc/mediatek/Kconfig4
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c4
-rw-r--r--sound/soc/pxa/brownstone.c1
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c1
-rw-r--r--sound/soc/pxa/mmp-pcm.c1
-rw-r--r--sound/soc/pxa/mmp-sspa.c1
-rw-r--r--sound/soc/pxa/palm27x.c1
-rw-r--r--sound/soc/pxa/pxa-ssp.c1
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c1
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c1
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c22
-rw-r--r--sound/soc/samsung/i2s.c13
-rw-r--r--sound/soc/sh/rcar/rsnd.h2
-rw-r--r--sound/soc/sh/rcar/ssi.c25
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--sound/soc/soc-pcm.c6
-rw-r--r--sound/soc/soc-topology.c3
-rw-r--r--sound/soc/ux500/mop500.c4
-rw-r--r--sound/soc/ux500/ux500_pcm.c5
-rw-r--r--sound/usb/line6/midi.c2
-rw-r--r--sound/usb/mixer.c26
-rw-r--r--sound/usb/mixer_maps.c3
-rw-r--r--sound/usb/pcm.c11
-rw-r--r--sound/usb/quirks-table.h47
-rw-r--r--tools/arch/x86/include/asm/unistd_32.h9
-rw-r--r--tools/arch/x86/include/asm/unistd_64.h9
-rw-r--r--tools/build/Build.include9
-rw-r--r--tools/build/Makefile.build10
-rw-r--r--tools/lib/bpf/libbpf.c26
-rw-r--r--tools/lib/traceevent/event-parse.c17
-rw-r--r--tools/lib/traceevent/parse-filter.c10
-rw-r--r--tools/perf/bench/numa.c56
-rw-r--r--tools/perf/builtin-probe.c6
-rw-r--r--tools/perf/builtin-top.c15
-rw-r--r--tools/perf/builtin-trace.c4
-rw-r--r--tools/perf/config/Makefile1
-rw-r--r--tools/perf/perf-sys.h18
-rw-r--r--tools/perf/tests/kmod-path.c2
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c2
-rw-r--r--tools/perf/util/dso.c2
-rw-r--r--tools/perf/util/event.c4
-rw-r--r--tools/perf/util/evsel.c8
-rw-r--r--tools/perf/util/header.c12
-rw-r--r--tools/perf/util/hist.c4
-rw-r--r--tools/perf/util/hist.h1
-rw-r--r--tools/perf/util/include/asm/unistd_32.h1
-rw-r--r--tools/perf/util/include/asm/unistd_64.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c87
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h11
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c2
-rw-r--r--tools/perf/util/intel-pt.c42
-rw-r--r--tools/perf/util/ordered-events.c3
-rw-r--r--tools/perf/util/probe-event.c8
-rw-r--r--tools/perf/util/session.c17
-rw-r--r--tools/perf/util/sort.c5
-rw-r--r--tools/perf/util/unwind-libdw.c8
-rw-r--r--tools/perf/util/util.c2
-rw-r--r--tools/scripts/Makefile.include14
-rw-r--r--tools/testing/selftests/Makefile1
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh7
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc46
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc97
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc43
-rw-r--r--tools/testing/selftests/memfd/config1
-rw-r--r--tools/testing/selftests/net/psock_fanout.c3
-rw-r--r--tools/testing/selftests/powerpc/mm/subpage_prot.c14
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-resched-dscr.c2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c98
-rw-r--r--tools/testing/selftests/x86/entry_from_vm86.c117
-rw-r--r--tools/thermal/tmon/sysfs.c12
-rw-r--r--tools/thermal/tmon/tmon.c1
-rw-r--r--tools/usb/usbip/libsrc/usbip_common.c9
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_driver.c27
-rw-r--r--tools/usb/usbip/libsrc/vhci_driver.c8
-rw-r--r--tools/usb/usbip/src/usbip.c2
-rw-r--r--tools/usb/usbip/src/usbip_bind.c9
-rw-r--r--tools/usb/usbip/src/usbip_detach.c9
-rw-r--r--tools/usb/usbip/src/usbip_list.c9
-rw-r--r--tools/usb/usbip/src/usbipd.c2
-rw-r--r--virt/kvm/eventfd.c6
-rw-r--r--virt/kvm/kvm_main.c3
1967 files changed, 24539 insertions, 11245 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index ea6a043f5beb..50f95689ab38 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -276,6 +276,7 @@ What: /sys/devices/system/cpu/vulnerabilities
276 /sys/devices/system/cpu/vulnerabilities/meltdown 276 /sys/devices/system/cpu/vulnerabilities/meltdown
277 /sys/devices/system/cpu/vulnerabilities/spectre_v1 277 /sys/devices/system/cpu/vulnerabilities/spectre_v1
278 /sys/devices/system/cpu/vulnerabilities/spectre_v2 278 /sys/devices/system/cpu/vulnerabilities/spectre_v2
279 /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
279Date: January 2018 280Date: January 2018
280Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> 281Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
281Description: Information about CPU vulnerabilities 282Description: Information about CPU vulnerabilities
diff --git a/Documentation/Changes b/Documentation/Changes
index ec97b77c8b00..f25649ffb892 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -25,7 +25,7 @@ o GNU C 3.2 # gcc --version
25o GNU make 3.80 # make --version 25o GNU make 3.80 # make --version
26o binutils 2.12 # ld -v 26o binutils 2.12 # ld -v
27o util-linux 2.10o # fdformat --version 27o util-linux 2.10o # fdformat --version
28o module-init-tools 0.9.10 # depmod -V 28o kmod 13 # depmod -V
29o e2fsprogs 1.41.4 # e2fsck -V 29o e2fsprogs 1.41.4 # e2fsck -V
30o jfsutils 1.1.3 # fsck.jfs -V 30o jfsutils 1.1.3 # fsck.jfs -V
31o reiserfsprogs 3.6.3 # reiserfsck -V 31o reiserfsprogs 3.6.3 # reiserfsck -V
@@ -132,12 +132,6 @@ is not build with CONFIG_KALLSYMS and you have no way to rebuild and
132reproduce the Oops with that option, then you can still decode that Oops 132reproduce the Oops with that option, then you can still decode that Oops
133with ksymoops. 133with ksymoops.
134 134
135Module-Init-Tools
136-----------------
137
138A new module loader is now in the kernel that requires module-init-tools
139to use. It is backward compatible with the 2.4.x series kernels.
140
141Mkinitrd 135Mkinitrd
142-------- 136--------
143 137
@@ -319,14 +313,15 @@ Util-linux
319---------- 313----------
320o <ftp://ftp.kernel.org/pub/linux/utils/util-linux/> 314o <ftp://ftp.kernel.org/pub/linux/utils/util-linux/>
321 315
316Kmod
317----
318o <https://www.kernel.org/pub/linux/utils/kernel/kmod/>
319o <https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git>
320
322Ksymoops 321Ksymoops
323-------- 322--------
324o <ftp://ftp.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/> 323o <ftp://ftp.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
325 324
326Module-Init-Tools
327-----------------
328o <ftp://ftp.kernel.org/pub/linux/kernel/people/rusty/modules/>
329
330Mkinitrd 325Mkinitrd
331-------- 326--------
332o <https://code.launchpad.net/initrd-tools/main> 327o <https://code.launchpad.net/initrd-tools/main>
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 1699a55b7b70..ef639960b272 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -112,9 +112,11 @@ $low_water_mark is expressed in blocks of size $data_block_size. If
112free space on the data device drops below this level then a dm event 112free space on the data device drops below this level then a dm event
113will be triggered which a userspace daemon should catch allowing it to 113will be triggered which a userspace daemon should catch allowing it to
114extend the pool device. Only one such event will be sent. 114extend the pool device. Only one such event will be sent.
115Resuming a device with a new table itself triggers an event so the 115
116userspace daemon can use this to detect a situation where a new table 116No special event is triggered if a just resumed device's free space is below
117already exceeds the threshold. 117the low water mark. However, resuming a device always triggers an
118event; a userspace daemon should verify that free space exceeds the low
119water mark when handling this event.
118 120
119A low water mark for the metadata device is maintained in the kernel and 121A low water mark for the metadata device is maintained in the kernel and
120will trigger a dm event if free space on the metadata device drops below 122will trigger a dm event if free space on the metadata device drops below
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
index c261598164a7..17d43ca27f41 100644
--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -58,6 +58,6 @@ Example:
58 interrupts = <0 35 0x4>; 58 interrupts = <0 35 0x4>;
59 status = "disabled"; 59 status = "disabled";
60 dmas = <&dmahost 12 0 1>, 60 dmas = <&dmahost 12 0 1>,
61 <&dmahost 13 0 1 0>; 61 <&dmahost 13 1 0>;
62 dma-names = "rx", "rx"; 62 dma-names = "rx", "rx";
63 }; 63 };
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 6c0108eb0137..2139ea253142 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs
233data_err=abort Abort the journal if an error occurs in a file 233data_err=abort Abort the journal if an error occurs in a file
234 data buffer in ordered mode. 234 data buffer in ordered mode.
235 235
236grpid Give objects the same group ID as their creator. 236grpid New objects have the group ID of their parent.
237bsdgroups 237bsdgroups
238 238
239nogrpid (*) New objects have the group ID of their creator. 239nogrpid (*) New objects have the group ID of their creator.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 6716413c17ba..6d2689ebf824 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -383,32 +383,6 @@ is not associated with a file:
383 383
384 or if empty, the mapping is anonymous. 384 or if empty, the mapping is anonymous.
385 385
386The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
387of the individual tasks of a process. In this file you will see a mapping marked
388as [stack] if that task sees it as a stack. Hence, for the example above, the
389task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
390
39108048000-08049000 r-xp 00000000 03:00 8312 /opt/test
39208049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
3930804a000-0806b000 rw-p 00000000 00:00 0 [heap]
394a7cb1000-a7cb2000 ---p 00000000 00:00 0
395a7cb2000-a7eb2000 rw-p 00000000 00:00 0
396a7eb2000-a7eb3000 ---p 00000000 00:00 0
397a7eb3000-a7ed5000 rw-p 00000000 00:00 0 [stack]
398a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
399a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6
400a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6
401a800b000-a800e000 rw-p 00000000 00:00 0
402a800e000-a8022000 r-xp 00000000 03:00 14462 /lib/libpthread.so.0
403a8022000-a8023000 r--p 00013000 03:00 14462 /lib/libpthread.so.0
404a8023000-a8024000 rw-p 00014000 03:00 14462 /lib/libpthread.so.0
405a8024000-a8027000 rw-p 00000000 00:00 0
406a8027000-a8043000 r-xp 00000000 03:00 8317 /lib/ld-linux.so.2
407a8043000-a8044000 r--p 0001b000 03:00 8317 /lib/ld-linux.so.2
408a8044000-a8045000 rw-p 0001c000 03:00 8317 /lib/ld-linux.so.2
409aff35000-aff4a000 rw-p 00000000 00:00 0
410ffffe000-fffff000 r-xp 00000000 00:00 0 [vdso]
411
412The /proc/PID/smaps is an extension based on maps, showing the memory 386The /proc/PID/smaps is an extension based on maps, showing the memory
413consumption for each of the process's mappings. For each of mappings there 387consumption for each of the process's mappings. For each of mappings there
414is a series of lines such as the following: 388is a series of lines such as the following:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 41b24ef46d1e..44a2467ba15c 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -652,7 +652,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
652 652
653 clearcpuid=BITNUM [X86] 653 clearcpuid=BITNUM [X86]
654 Disable CPUID feature X for the kernel. See 654 Disable CPUID feature X for the kernel. See
655 arch/x86/include/asm/cpufeature.h for the valid bit 655 arch/x86/include/asm/cpufeatures.h for the valid bit
656 numbers. Note the Linux specific bits are not necessarily 656 numbers. Note the Linux specific bits are not necessarily
657 stable over kernel options, but the vendor specific 657 stable over kernel options, but the vendor specific
658 ones should be. 658 ones should be.
@@ -2402,6 +2402,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2402 2402
2403 noalign [KNL,ARM] 2403 noalign [KNL,ARM]
2404 2404
2405 noaltinstr [S390] Disables alternative instructions patching
2406 (CPU alternatives feature).
2407
2405 noapic [SMP,APIC] Tells the kernel to not make use of any 2408 noapic [SMP,APIC] Tells the kernel to not make use of any
2406 IOAPICs that may be present in the system. 2409 IOAPICs that may be present in the system.
2407 2410
@@ -2457,6 +2460,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2457 allow data leaks with this option, which is equivalent 2460 allow data leaks with this option, which is equivalent
2458 to spectre_v2=off. 2461 to spectre_v2=off.
2459 2462
2463 nospec_store_bypass_disable
2464 [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
2465
2460 noxsave [BUGS=X86] Disables x86 extended register state save 2466 noxsave [BUGS=X86] Disables x86 extended register state save
2461 and restore using xsave. The kernel will fallback to 2467 and restore using xsave. The kernel will fallback to
2462 enabling legacy floating-point and sse state. 2468 enabling legacy floating-point and sse state.
@@ -2565,8 +2571,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2565 norandmaps Don't use address space randomization. Equivalent to 2571 norandmaps Don't use address space randomization. Equivalent to
2566 echo 0 > /proc/sys/kernel/randomize_va_space 2572 echo 0 > /proc/sys/kernel/randomize_va_space
2567 2573
2568 noreplace-paravirt [X86,IA-64,PV_OPS] Don't patch paravirt_ops
2569
2570 noreplace-smp [X86-32,SMP] Don't replace SMP instructions 2574 noreplace-smp [X86-32,SMP] Don't replace SMP instructions
2571 with UP alternatives 2575 with UP alternatives
2572 2576
@@ -3626,6 +3630,48 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3626 Not specifying this option is equivalent to 3630 Not specifying this option is equivalent to
3627 spectre_v2=auto. 3631 spectre_v2=auto.
3628 3632
3633 spec_store_bypass_disable=
3634 [HW] Control Speculative Store Bypass (SSB) Disable mitigation
3635 (Speculative Store Bypass vulnerability)
3636
3637 Certain CPUs are vulnerable to an exploit against a
3638 a common industry wide performance optimization known
3639 as "Speculative Store Bypass" in which recent stores
3640 to the same memory location may not be observed by
3641 later loads during speculative execution. The idea
3642 is that such stores are unlikely and that they can
3643 be detected prior to instruction retirement at the
3644 end of a particular speculation execution window.
3645
3646 In vulnerable processors, the speculatively forwarded
3647 store can be used in a cache side channel attack, for
3648 example to read memory to which the attacker does not
3649 directly have access (e.g. inside sandboxed code).
3650
3651 This parameter controls whether the Speculative Store
3652 Bypass optimization is used.
3653
3654 on - Unconditionally disable Speculative Store Bypass
3655 off - Unconditionally enable Speculative Store Bypass
3656 auto - Kernel detects whether the CPU model contains an
3657 implementation of Speculative Store Bypass and
3658 picks the most appropriate mitigation. If the
3659 CPU is not vulnerable, "off" is selected. If the
3660 CPU is vulnerable the default mitigation is
3661 architecture and Kconfig dependent. See below.
3662 prctl - Control Speculative Store Bypass per thread
3663 via prctl. Speculative Store Bypass is enabled
3664 for a process by default. The state of the control
3665 is inherited on fork.
3666 seccomp - Same as "prctl" above, but all seccomp threads
3667 will disable SSB unless they explicitly opt out.
3668
3669 Not specifying this option is equivalent to
3670 spec_store_bypass_disable=auto.
3671
3672 Default mitigations:
3673 X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
3674
3629 spia_io_base= [HW,MTD] 3675 spia_io_base= [HW,MTD]
3630 spia_fio_base= 3676 spia_fio_base=
3631 spia_pedr= 3677 spia_pedr=
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
index 0fe1c6e0dbcd..bfc6b3e68cc4 100644
--- a/Documentation/networking/netdev-FAQ.txt
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -168,6 +168,15 @@ A: No. See above answer. In short, if you think it really belongs in
168 dash marker line as described in Documentation/SubmittingPatches to 168 dash marker line as described in Documentation/SubmittingPatches to
169 temporarily embed that information into the patch that you send. 169 temporarily embed that information into the patch that you send.
170 170
171Q: Are all networking bug fixes backported to all stable releases?
172
173A: Due to capacity, Dave could only take care of the backports for the last
174 2 stable releases. For earlier stable releases, each stable branch maintainer
175 is supposed to take care of them. If you find any patch is missing from an
176 earlier stable branch, please notify stable@vger.kernel.org with either a
177 commit ID or a formal patch backported, and CC Dave and other relevant
178 networking developers.
179
171Q: Someone said that the comment style and coding convention is different 180Q: Someone said that the comment style and coding convention is different
172 for the networking content. Is this true? 181 for the networking content. Is this true?
173 182
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index b784c270105f..ed6f6abaad57 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -273,11 +273,10 @@ struct clk:
273 273
274 %pC pll1 274 %pC pll1
275 %pCn pll1 275 %pCn pll1
276 %pCr 1560000000
277 276
278 For printing struct clk structures. '%pC' and '%pCn' print the name 277 For printing struct clk structures. '%pC' and '%pCn' print the name
279 (Common Clock Framework) or address (legacy clock framework) of the 278 (Common Clock Framework) or address (legacy clock framework) of the
280 structure; '%pCr' prints the current clock rate. 279 structure.
281 280
282 Passed by reference. 281 Passed by reference.
283 282
diff --git a/Documentation/spec_ctrl.txt b/Documentation/spec_ctrl.txt
new file mode 100644
index 000000000000..32f3d55c54b7
--- /dev/null
+++ b/Documentation/spec_ctrl.txt
@@ -0,0 +1,94 @@
1===================
2Speculation Control
3===================
4
5Quite some CPUs have speculation-related misfeatures which are in
6fact vulnerabilities causing data leaks in various forms even across
7privilege domains.
8
9The kernel provides mitigation for such vulnerabilities in various
10forms. Some of these mitigations are compile-time configurable and some
11can be supplied on the kernel command line.
12
13There is also a class of mitigations which are very expensive, but they can
14be restricted to a certain set of processes or tasks in controlled
15environments. The mechanism to control these mitigations is via
16:manpage:`prctl(2)`.
17
18There are two prctl options which are related to this:
19
20 * PR_GET_SPECULATION_CTRL
21
22 * PR_SET_SPECULATION_CTRL
23
24PR_GET_SPECULATION_CTRL
25-----------------------
26
27PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
28which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
29the following meaning:
30
31==== ===================== ===================================================
32Bit Define Description
33==== ===================== ===================================================
340 PR_SPEC_PRCTL Mitigation can be controlled per task by
35 PR_SET_SPECULATION_CTRL.
361 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
37 disabled.
382 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
39 enabled.
403 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
41 subsequent prctl(..., PR_SPEC_ENABLE) will fail.
42==== ===================== ===================================================
43
44If all bits are 0 the CPU is not affected by the speculation misfeature.
45
46If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
47available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
48misfeature will fail.
49
50PR_SET_SPECULATION_CTRL
51-----------------------
52
53PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
54is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
55in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
56PR_SPEC_FORCE_DISABLE.
57
58Common error codes
59------------------
60======= =================================================================
61Value Meaning
62======= =================================================================
63EINVAL The prctl is not implemented by the architecture or unused
64 prctl(2) arguments are not 0.
65
66ENODEV arg2 is selecting a not supported speculation misfeature.
67======= =================================================================
68
69PR_SET_SPECULATION_CTRL error codes
70-----------------------------------
71======= =================================================================
72Value Meaning
73======= =================================================================
740 Success
75
76ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
77 PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
78
79ENXIO Control of the selected speculation misfeature is not possible.
80 See PR_GET_SPECULATION_CTRL.
81
82EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
83 tried to enable it again.
84======= =================================================================
85
86Speculation misfeature controls
87-------------------------------
88- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
89
90 Invocations:
91 * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
92 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
93 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
94 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
diff --git a/Documentation/speculation.txt b/Documentation/speculation.txt
new file mode 100644
index 000000000000..e9e6cbae2841
--- /dev/null
+++ b/Documentation/speculation.txt
@@ -0,0 +1,90 @@
1This document explains potential effects of speculation, and how undesirable
2effects can be mitigated portably using common APIs.
3
4===========
5Speculation
6===========
7
8To improve performance and minimize average latencies, many contemporary CPUs
9employ speculative execution techniques such as branch prediction, performing
10work which may be discarded at a later stage.
11
12Typically speculative execution cannot be observed from architectural state,
13such as the contents of registers. However, in some cases it is possible to
14observe its impact on microarchitectural state, such as the presence or
15absence of data in caches. Such state may form side-channels which can be
16observed to extract secret information.
17
18For example, in the presence of branch prediction, it is possible for bounds
19checks to be ignored by code which is speculatively executed. Consider the
20following code:
21
22 int load_array(int *array, unsigned int index)
23 {
24 if (index >= MAX_ARRAY_ELEMS)
25 return 0;
26 else
27 return array[index];
28 }
29
30Which, on arm64, may be compiled to an assembly sequence such as:
31
32 CMP <index>, #MAX_ARRAY_ELEMS
33 B.LT less
34 MOV <returnval>, #0
35 RET
36 less:
37 LDR <returnval>, [<array>, <index>]
38 RET
39
40It is possible that a CPU mis-predicts the conditional branch, and
41speculatively loads array[index], even if index >= MAX_ARRAY_ELEMS. This
42value will subsequently be discarded, but the speculated load may affect
43microarchitectural state which can be subsequently measured.
44
45More complex sequences involving multiple dependent memory accesses may
46result in sensitive information being leaked. Consider the following
47code, building on the prior example:
48
49 int load_dependent_arrays(int *arr1, int *arr2, int index)
50 {
51 int val1, val2,
52
53 val1 = load_array(arr1, index);
54 val2 = load_array(arr2, val1);
55
56 return val2;
57 }
58
59Under speculation, the first call to load_array() may return the value
60of an out-of-bounds address, while the second call will influence
61microarchitectural state dependent on this value. This may provide an
62arbitrary read primitive.
63
64====================================
65Mitigating speculation side-channels
66====================================
67
68The kernel provides a generic API to ensure that bounds checks are
69respected even under speculation. Architectures which are affected by
70speculation-based side-channels are expected to implement these
71primitives.
72
73The array_index_nospec() helper in <linux/nospec.h> can be used to
74prevent information from being leaked via side-channels.
75
76A call to array_index_nospec(index, size) returns a sanitized index
77value that is bounded to [0, size) even under cpu speculation
78conditions.
79
80This can be used to protect the earlier load_array() example:
81
82 int load_array(int *array, unsigned int index)
83 {
84 if (index >= MAX_ARRAY_ELEMS)
85 return 0;
86 else {
87 index = array_index_nospec(index, MAX_ARRAY_ELEMS);
88 return array[index];
89 }
90 }
diff --git a/Makefile b/Makefile
index 39019c9d205c..7789195c6a59 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 4 2PATCHLEVEL = 4
3SUBLEVEL = 113 3SUBLEVEL = 150
4EXTRAVERSION = 4EXTRAVERSION =
5NAME = Blurry Fish Butt 5NAME = Blurry Fish Butt
6 6
@@ -87,10 +87,12 @@ endif
87ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4 87ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
88ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),) 88ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
89 quiet=silent_ 89 quiet=silent_
90 tools_silent=s
90endif 91endif
91else # make-3.8x 92else # make-3.8x
92ifneq ($(filter s% -s%,$(MAKEFLAGS)),) 93ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
93 quiet=silent_ 94 quiet=silent_
95 tools_silent=-s
94endif 96endif
95endif 97endif
96 98
@@ -416,7 +418,8 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
416export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS 418export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
417 419
418export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS 420export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
419export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KASAN 421export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV
422export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE
420export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE 423export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
421export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE 424export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
422export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL 425export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@ -622,6 +625,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
622KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) 625KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
623KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) 626KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
624KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) 627KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
628KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)
625 629
626ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE 630ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
627KBUILD_CFLAGS += -Os 631KBUILD_CFLAGS += -Os
@@ -782,6 +786,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
782# disable invalid "can't wrap" optimizations for signed / pointers 786# disable invalid "can't wrap" optimizations for signed / pointers
783KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) 787KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
784 788
789# clang sets -fmerge-all-constants by default as optimization, but this
790# is non-conforming behavior for C and in fact breaks the kernel, so we
791# need to disable it here generally.
792KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants)
793
794# for gcc -fno-merge-all-constants disables everything, but it is fine
795# to have actual conforming behavior enabled.
796KBUILD_CFLAGS += $(call cc-option,-fmerge-constants)
797
785# Make sure -fstack-check isn't enabled (like gentoo apparently did) 798# Make sure -fstack-check isn't enabled (like gentoo apparently did)
786KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) 799KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
787 800
@@ -1523,11 +1536,11 @@ image_name:
1523# Clear a bunch of variables before executing the submake 1536# Clear a bunch of variables before executing the submake
1524tools/: FORCE 1537tools/: FORCE
1525 $(Q)mkdir -p $(objtree)/tools 1538 $(Q)mkdir -p $(objtree)/tools
1526 $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ 1539 $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/
1527 1540
1528tools/%: FORCE 1541tools/%: FORCE
1529 $(Q)mkdir -p $(objtree)/tools 1542 $(Q)mkdir -p $(objtree)/tools
1530 $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ $* 1543 $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ $*
1531 1544
1532# Single targets 1545# Single targets
1533# --------------------------------------------------------------------------- 1546# ---------------------------------------------------------------------------
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
index f939794363ac..56474690e685 100644
--- a/arch/alpha/include/asm/futex.h
+++ b/arch/alpha/include/asm/futex.h
@@ -29,18 +29,10 @@
29 : "r" (uaddr), "r"(oparg) \ 29 : "r" (uaddr), "r"(oparg) \
30 : "memory") 30 : "memory")
31 31
32static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 32static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
33 u32 __user *uaddr)
33{ 34{
34 int op = (encoded_op >> 28) & 7;
35 int cmp = (encoded_op >> 24) & 15;
36 int oparg = (encoded_op << 8) >> 20;
37 int cmparg = (encoded_op << 20) >> 20;
38 int oldval = 0, ret; 35 int oldval = 0, ret;
39 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
40 oparg = 1 << oparg;
41
42 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
43 return -EFAULT;
44 36
45 pagefault_disable(); 37 pagefault_disable();
46 38
@@ -66,17 +58,9 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
66 58
67 pagefault_enable(); 59 pagefault_enable();
68 60
69 if (!ret) { 61 if (!ret)
70 switch (cmp) { 62 *oval = oldval;
71 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 63
72 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
73 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
74 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
75 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
76 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
77 default: ret = -ENOSYS;
78 }
79 }
80 return ret; 64 return ret;
81} 65}
82 66
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index 0ca9724597c1..7081e52291d0 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -11,6 +11,10 @@
11 * Atomic exchange. 11 * Atomic exchange.
12 * Since it can be used to implement critical sections 12 * Since it can be used to implement critical sections
13 * it must clobber "memory" (also for interrupts in UP). 13 * it must clobber "memory" (also for interrupts in UP).
14 *
15 * The leading and the trailing memory barriers guarantee that these
16 * operations are fully ordered.
17 *
14 */ 18 */
15 19
16static inline unsigned long 20static inline unsigned long
@@ -18,6 +22,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
18{ 22{
19 unsigned long ret, tmp, addr64; 23 unsigned long ret, tmp, addr64;
20 24
25 smp_mb();
21 __asm__ __volatile__( 26 __asm__ __volatile__(
22 " andnot %4,7,%3\n" 27 " andnot %4,7,%3\n"
23 " insbl %1,%4,%1\n" 28 " insbl %1,%4,%1\n"
@@ -42,6 +47,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
42{ 47{
43 unsigned long ret, tmp, addr64; 48 unsigned long ret, tmp, addr64;
44 49
50 smp_mb();
45 __asm__ __volatile__( 51 __asm__ __volatile__(
46 " andnot %4,7,%3\n" 52 " andnot %4,7,%3\n"
47 " inswl %1,%4,%1\n" 53 " inswl %1,%4,%1\n"
@@ -66,6 +72,7 @@ ____xchg(_u32, volatile int *m, unsigned long val)
66{ 72{
67 unsigned long dummy; 73 unsigned long dummy;
68 74
75 smp_mb();
69 __asm__ __volatile__( 76 __asm__ __volatile__(
70 "1: ldl_l %0,%4\n" 77 "1: ldl_l %0,%4\n"
71 " bis $31,%3,%1\n" 78 " bis $31,%3,%1\n"
@@ -86,6 +93,7 @@ ____xchg(_u64, volatile long *m, unsigned long val)
86{ 93{
87 unsigned long dummy; 94 unsigned long dummy;
88 95
96 smp_mb();
89 __asm__ __volatile__( 97 __asm__ __volatile__(
90 "1: ldq_l %0,%4\n" 98 "1: ldq_l %0,%4\n"
91 " bis $31,%3,%1\n" 99 " bis $31,%3,%1\n"
@@ -127,10 +135,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
127 * store NEW in MEM. Return the initial value in MEM. Success is 135 * store NEW in MEM. Return the initial value in MEM. Success is
128 * indicated by comparing RETURN with OLD. 136 * indicated by comparing RETURN with OLD.
129 * 137 *
130 * The memory barrier should be placed in SMP only when we actually 138 * The leading and the trailing memory barriers guarantee that these
131 * make the change. If we don't change anything (so if the returned 139 * operations are fully ordered.
132 * prev is equal to old) then we aren't acquiring anything new and 140 *
133 * we don't need any memory barrier as far I can tell. 141 * The trailing memory barrier is placed in SMP unconditionally, in
142 * order to guarantee that dependency ordering is preserved when a
143 * dependency is headed by an unsuccessful operation.
134 */ 144 */
135 145
136static inline unsigned long 146static inline unsigned long
@@ -138,6 +148,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
138{ 148{
139 unsigned long prev, tmp, cmp, addr64; 149 unsigned long prev, tmp, cmp, addr64;
140 150
151 smp_mb();
141 __asm__ __volatile__( 152 __asm__ __volatile__(
142 " andnot %5,7,%4\n" 153 " andnot %5,7,%4\n"
143 " insbl %1,%5,%1\n" 154 " insbl %1,%5,%1\n"
@@ -149,8 +160,8 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
149 " or %1,%2,%2\n" 160 " or %1,%2,%2\n"
150 " stq_c %2,0(%4)\n" 161 " stq_c %2,0(%4)\n"
151 " beq %2,3f\n" 162 " beq %2,3f\n"
152 __ASM__MB
153 "2:\n" 163 "2:\n"
164 __ASM__MB
154 ".subsection 2\n" 165 ".subsection 2\n"
155 "3: br 1b\n" 166 "3: br 1b\n"
156 ".previous" 167 ".previous"
@@ -165,6 +176,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
165{ 176{
166 unsigned long prev, tmp, cmp, addr64; 177 unsigned long prev, tmp, cmp, addr64;
167 178
179 smp_mb();
168 __asm__ __volatile__( 180 __asm__ __volatile__(
169 " andnot %5,7,%4\n" 181 " andnot %5,7,%4\n"
170 " inswl %1,%5,%1\n" 182 " inswl %1,%5,%1\n"
@@ -176,8 +188,8 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
176 " or %1,%2,%2\n" 188 " or %1,%2,%2\n"
177 " stq_c %2,0(%4)\n" 189 " stq_c %2,0(%4)\n"
178 " beq %2,3f\n" 190 " beq %2,3f\n"
179 __ASM__MB
180 "2:\n" 191 "2:\n"
192 __ASM__MB
181 ".subsection 2\n" 193 ".subsection 2\n"
182 "3: br 1b\n" 194 "3: br 1b\n"
183 ".previous" 195 ".previous"
@@ -192,6 +204,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
192{ 204{
193 unsigned long prev, cmp; 205 unsigned long prev, cmp;
194 206
207 smp_mb();
195 __asm__ __volatile__( 208 __asm__ __volatile__(
196 "1: ldl_l %0,%5\n" 209 "1: ldl_l %0,%5\n"
197 " cmpeq %0,%3,%1\n" 210 " cmpeq %0,%3,%1\n"
@@ -199,8 +212,8 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
199 " mov %4,%1\n" 212 " mov %4,%1\n"
200 " stl_c %1,%2\n" 213 " stl_c %1,%2\n"
201 " beq %1,3f\n" 214 " beq %1,3f\n"
202 __ASM__MB
203 "2:\n" 215 "2:\n"
216 __ASM__MB
204 ".subsection 2\n" 217 ".subsection 2\n"
205 "3: br 1b\n" 218 "3: br 1b\n"
206 ".previous" 219 ".previous"
@@ -215,6 +228,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
215{ 228{
216 unsigned long prev, cmp; 229 unsigned long prev, cmp;
217 230
231 smp_mb();
218 __asm__ __volatile__( 232 __asm__ __volatile__(
219 "1: ldq_l %0,%5\n" 233 "1: ldq_l %0,%5\n"
220 " cmpeq %0,%3,%1\n" 234 " cmpeq %0,%3,%1\n"
@@ -222,8 +236,8 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
222 " mov %4,%1\n" 236 " mov %4,%1\n"
223 " stq_c %1,%2\n" 237 " stq_c %1,%2\n"
224 " beq %1,3f\n" 238 " beq %1,3f\n"
225 __ASM__MB
226 "2:\n" 239 "2:\n"
240 __ASM__MB
227 ".subsection 2\n" 241 ".subsection 2\n"
228 "3: br 1b\n" 242 "3: br 1b\n"
229 ".previous" 243 ".previous"
diff --git a/arch/alpha/kernel/console.c b/arch/alpha/kernel/console.c
index 6a61deed4a85..ab228ed45945 100644
--- a/arch/alpha/kernel/console.c
+++ b/arch/alpha/kernel/console.c
@@ -20,6 +20,7 @@
20struct pci_controller *pci_vga_hose; 20struct pci_controller *pci_vga_hose;
21static struct resource alpha_vga = { 21static struct resource alpha_vga = {
22 .name = "alpha-vga+", 22 .name = "alpha-vga+",
23 .flags = IORESOURCE_IO,
23 .start = 0x3C0, 24 .start = 0x3C0,
24 .end = 0x3DF 25 .end = 0x3DF
25}; 26};
diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h
index 2b0ac429f5eb..412bb3c24f36 100644
--- a/arch/alpha/kernel/pci_impl.h
+++ b/arch/alpha/kernel/pci_impl.h
@@ -143,7 +143,8 @@ struct pci_iommu_arena
143}; 143};
144 144
145#if defined(CONFIG_ALPHA_SRM) && \ 145#if defined(CONFIG_ALPHA_SRM) && \
146 (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA)) 146 (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA) || \
147 defined(CONFIG_ALPHA_AVANTI))
147# define NEED_SRM_SAVE_RESTORE 148# define NEED_SRM_SAVE_RESTORE
148#else 149#else
149# undef NEED_SRM_SAVE_RESTORE 150# undef NEED_SRM_SAVE_RESTORE
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 84d13263ce46..8095fb2c5c94 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -273,12 +273,13 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
273 application calling fork. */ 273 application calling fork. */
274 if (clone_flags & CLONE_SETTLS) 274 if (clone_flags & CLONE_SETTLS)
275 childti->pcb.unique = regs->r20; 275 childti->pcb.unique = regs->r20;
276 else
277 regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */
276 childti->pcb.usp = usp ?: rdusp(); 278 childti->pcb.usp = usp ?: rdusp();
277 *childregs = *regs; 279 *childregs = *regs;
278 childregs->r0 = 0; 280 childregs->r0 = 0;
279 childregs->r19 = 0; 281 childregs->r19 = 0;
280 childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ 282 childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
281 regs->r20 = 0;
282 stack = ((struct switch_stack *) regs) - 1; 283 stack = ((struct switch_stack *) regs) - 1;
283 *childstack = *stack; 284 *childstack = *stack;
284 childstack->r26 = (unsigned long) ret_from_fork; 285 childstack->r26 = (unsigned long) ret_from_fork;
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 2d785f5a3041..c4ee25e88a7b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -479,7 +479,6 @@ config ARC_CURR_IN_REG
479 479
480config ARC_EMUL_UNALIGNED 480config ARC_EMUL_UNALIGNED
481 bool "Emulate unaligned memory access (userspace only)" 481 bool "Emulate unaligned memory access (userspace only)"
482 default N
483 select SYSCTL_ARCH_UNALIGN_NO_WARN 482 select SYSCTL_ARCH_UNALIGN_NO_WARN
484 select SYSCTL_ARCH_UNALIGN_ALLOW 483 select SYSCTL_ARCH_UNALIGN_ALLOW
485 depends on ISA_ARCOMPACT 484 depends on ISA_ARCOMPACT
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 11e1b1f3acda..eb887dd13e74 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -73,20 +73,11 @@
73 73
74#endif 74#endif
75 75
76static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 76static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
77 u32 __user *uaddr)
77{ 78{
78 int op = (encoded_op >> 28) & 7;
79 int cmp = (encoded_op >> 24) & 15;
80 int oparg = (encoded_op << 8) >> 20;
81 int cmparg = (encoded_op << 20) >> 20;
82 int oldval = 0, ret; 79 int oldval = 0, ret;
83 80
84 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
85 oparg = 1 << oparg;
86
87 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
88 return -EFAULT;
89
90#ifndef CONFIG_ARC_HAS_LLSC 81#ifndef CONFIG_ARC_HAS_LLSC
91 preempt_disable(); /* to guarantee atomic r-m-w of futex op */ 82 preempt_disable(); /* to guarantee atomic r-m-w of futex op */
92#endif 83#endif
@@ -118,30 +109,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
118 preempt_enable(); 109 preempt_enable();
119#endif 110#endif
120 111
121 if (!ret) { 112 if (!ret)
122 switch (cmp) { 113 *oval = oldval;
123 case FUTEX_OP_CMP_EQ: 114
124 ret = (oldval == cmparg);
125 break;
126 case FUTEX_OP_CMP_NE:
127 ret = (oldval != cmparg);
128 break;
129 case FUTEX_OP_CMP_LT:
130 ret = (oldval < cmparg);
131 break;
132 case FUTEX_OP_CMP_GE:
133 ret = (oldval >= cmparg);
134 break;
135 case FUTEX_OP_CMP_LE:
136 ret = (oldval <= cmparg);
137 break;
138 case FUTEX_OP_CMP_GT:
139 ret = (oldval > cmparg);
140 break;
141 default:
142 ret = -ENOSYS;
143 }
144 }
145 return ret; 115 return ret;
146} 116}
147 117
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 429957f1c236..8f1145ed0046 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -102,7 +102,7 @@ typedef pte_t * pgtable_t;
102#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 102#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
103 103
104/* Default Permissions for stack/heaps pages (Non Executable) */ 104/* Default Permissions for stack/heaps pages (Non Executable) */
105#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE) 105#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
106 106
107#define WANT_PAGE_VIRTUAL 1 107#define WANT_PAGE_VIRTUAL 1
108 108
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index e5fec320f158..c07d7b0a4058 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -372,7 +372,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
372 372
373/* Decode a PTE containing swap "identifier "into constituents */ 373/* Decode a PTE containing swap "identifier "into constituents */
374#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) 374#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
375#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13) 375#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
376 376
377/* NOPs, to keep generic kernel happy */ 377/* NOPs, to keep generic kernel happy */
378#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 378#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 0f296c1b5617..d70e96c64e6e 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -1084,7 +1084,8 @@
1084 reg = <0x48038000 0x2000>, 1084 reg = <0x48038000 0x2000>,
1085 <0x46000000 0x400000>; 1085 <0x46000000 0x400000>;
1086 reg-names = "mpu", "dat"; 1086 reg-names = "mpu", "dat";
1087 interrupts = <80>, <81>; 1087 interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
1088 <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
1088 interrupt-names = "tx", "rx"; 1089 interrupt-names = "tx", "rx";
1089 status = "disabled"; 1090 status = "disabled";
1090 dmas = <&edma 8 2>, 1091 dmas = <&edma 8 2>,
@@ -1098,7 +1099,8 @@
1098 reg = <0x4803C000 0x2000>, 1099 reg = <0x4803C000 0x2000>,
1099 <0x46400000 0x400000>; 1100 <0x46400000 0x400000>;
1100 reg-names = "mpu", "dat"; 1101 reg-names = "mpu", "dat";
1101 interrupts = <82>, <83>; 1102 interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
1103 <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
1102 interrupt-names = "tx", "rx"; 1104 interrupt-names = "tx", "rx";
1103 status = "disabled"; 1105 status = "disabled";
1104 dmas = <&edma 10 2>, 1106 dmas = <&edma 10 2>,
diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi
index a7da0dd0c98f..0898213f3bb2 100644
--- a/arch/arm/boot/dts/at91sam9g25.dtsi
+++ b/arch/arm/boot/dts/at91sam9g25.dtsi
@@ -21,7 +21,7 @@
21 atmel,mux-mask = < 21 atmel,mux-mask = <
22 /* A B C */ 22 /* A B C */
23 0xffffffff 0xffe0399f 0xc000001c /* pioA */ 23 0xffffffff 0xffe0399f 0xc000001c /* pioA */
24 0x0007ffff 0x8000fe3f 0x00000000 /* pioB */ 24 0x0007ffff 0x00047e3f 0x00000000 /* pioB */
25 0x80000000 0x07c0ffff 0xb83fffff /* pioC */ 25 0x80000000 0x07c0ffff 0xb83fffff /* pioC */
26 0x003fffff 0x003f8000 0x00000000 /* pioD */ 26 0x003fffff 0x003f8000 0x00000000 /* pioD */
27 >; 27 >;
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 40a474c4374b..4c52358734ef 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -359,7 +359,7 @@
359 reg = <0>; 359 reg = <0>;
360 vdd3-supply = <&lcd_vdd3_reg>; 360 vdd3-supply = <&lcd_vdd3_reg>;
361 vci-supply = <&ldo25_reg>; 361 vci-supply = <&ldo25_reg>;
362 reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>; 362 reset-gpios = <&gpf2 1 GPIO_ACTIVE_HIGH>;
363 power-on-delay= <50>; 363 power-on-delay= <50>;
364 reset-delay = <100>; 364 reset-delay = <100>;
365 init-delay = <100>; 365 init-delay = <100>;
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts
index 96d7eede412e..036c9bd9bf75 100644
--- a/arch/arm/boot/dts/imx53-qsrb.dts
+++ b/arch/arm/boot/dts/imx53-qsrb.dts
@@ -23,7 +23,7 @@
23 imx53-qsrb { 23 imx53-qsrb {
24 pinctrl_pmic: pmicgrp { 24 pinctrl_pmic: pmicgrp {
25 fsl,pins = < 25 fsl,pins = <
26 MX53_PAD_CSI0_DAT5__GPIO5_23 0x1e4 /* IRQ */ 26 MX53_PAD_CSI0_DAT5__GPIO5_23 0x1c4 /* IRQ */
27 >; 27 >;
28 }; 28 };
29 }; 29 };
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 399103b8e2c9..c81fb8fdc41f 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -95,7 +95,7 @@
95 clocks = <&clks IMX6Q_CLK_ECSPI5>, 95 clocks = <&clks IMX6Q_CLK_ECSPI5>,
96 <&clks IMX6Q_CLK_ECSPI5>; 96 <&clks IMX6Q_CLK_ECSPI5>;
97 clock-names = "ipg", "per"; 97 clock-names = "ipg", "per";
98 dmas = <&sdma 11 7 1>, <&sdma 12 7 2>; 98 dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
99 dma-names = "rx", "tx"; 99 dma-names = "rx", "tx";
100 status = "disabled"; 100 status = "disabled";
101 }; 101 };
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 167f77b3bd43..6963dff815dc 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -1250,7 +1250,7 @@
1250 /* non-prefetchable memory */ 1250 /* non-prefetchable memory */
1251 0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; 1251 0x82000000 0 0x08000000 0x08000000 0 0x00f00000>;
1252 num-lanes = <1>; 1252 num-lanes = <1>;
1253 interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>; 1253 interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
1254 clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>, 1254 clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>,
1255 <&clks IMX6SX_CLK_PCIE_AXI>, 1255 <&clks IMX6SX_CLK_PCIE_AXI>,
1256 <&clks IMX6SX_CLK_LVDS1_OUT>, 1256 <&clks IMX6SX_CLK_LVDS1_OUT>,
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index dde59c477a2b..ad5e5a128a97 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -94,6 +94,8 @@
94}; 94};
95 95
96&i2c1 { 96&i2c1 {
97 pinctrl-names = "default";
98 pinctrl-0 = <&i2c1_pins>;
97 clock-frequency = <2600000>; 99 clock-frequency = <2600000>;
98 100
99 twl: twl@48 { 101 twl: twl@48 {
@@ -141,6 +143,12 @@
141 OMAP3_CORE1_IOPAD(0x218e, PIN_OUTPUT | MUX_MODE4) /* mcbsp1_fsr.gpio_157 */ 143 OMAP3_CORE1_IOPAD(0x218e, PIN_OUTPUT | MUX_MODE4) /* mcbsp1_fsr.gpio_157 */
142 >; 144 >;
143 }; 145 };
146 i2c1_pins: pinmux_i2c1_pins {
147 pinctrl-single,pins = <
148 OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
149 OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
150 >;
151 };
144}; 152};
145 153
146&omap3_pmx_core2 { 154&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts
index 0521e6864cb7..76fce89d4f69 100644
--- a/arch/arm/boot/dts/ls1021a-qds.dts
+++ b/arch/arm/boot/dts/ls1021a-qds.dts
@@ -215,7 +215,7 @@
215 reg = <0x2a>; 215 reg = <0x2a>;
216 VDDA-supply = <&reg_3p3v>; 216 VDDA-supply = <&reg_3p3v>;
217 VDDIO-supply = <&reg_3p3v>; 217 VDDIO-supply = <&reg_3p3v>;
218 clocks = <&sys_mclk 1>; 218 clocks = <&sys_mclk>;
219 }; 219 };
220 }; 220 };
221 }; 221 };
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index fbb89d13401e..674df87629bd 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -167,7 +167,7 @@
167 reg = <0x0a>; 167 reg = <0x0a>;
168 VDDA-supply = <&reg_3p3v>; 168 VDDA-supply = <&reg_3p3v>;
169 VDDIO-supply = <&reg_3p3v>; 169 VDDIO-supply = <&reg_3p3v>;
170 clocks = <&sys_mclk 1>; 170 clocks = <&sys_mclk>;
171 }; 171 };
172}; 172};
173 173
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 9430a9928199..00de37fe5f8a 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -132,7 +132,7 @@
132 }; 132 };
133 133
134 esdhc: esdhc@1560000 { 134 esdhc: esdhc@1560000 {
135 compatible = "fsl,esdhc"; 135 compatible = "fsl,ls1021a-esdhc", "fsl,esdhc";
136 reg = <0x0 0x1560000 0x0 0x10000>; 136 reg = <0x0 0x1560000 0x0 0x10000>;
137 interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>; 137 interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
138 clock-frequency = <0>; 138 clock-frequency = <0>;
diff --git a/arch/arm/boot/dts/moxart-uc7112lx.dts b/arch/arm/boot/dts/moxart-uc7112lx.dts
index 10d088df0c35..4a962a26482d 100644
--- a/arch/arm/boot/dts/moxart-uc7112lx.dts
+++ b/arch/arm/boot/dts/moxart-uc7112lx.dts
@@ -6,7 +6,7 @@
6 */ 6 */
7 7
8/dts-v1/; 8/dts-v1/;
9/include/ "moxart.dtsi" 9#include "moxart.dtsi"
10 10
11/ { 11/ {
12 model = "MOXA UC-7112-LX"; 12 model = "MOXA UC-7112-LX";
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
index 1fd27ed65a01..64f2f44235d0 100644
--- a/arch/arm/boot/dts/moxart.dtsi
+++ b/arch/arm/boot/dts/moxart.dtsi
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8/include/ "skeleton.dtsi" 8/include/ "skeleton.dtsi"
9#include <dt-bindings/interrupt-controller/irq.h>
9 10
10/ { 11/ {
11 compatible = "moxa,moxart"; 12 compatible = "moxa,moxart";
@@ -36,8 +37,8 @@
36 ranges; 37 ranges;
37 38
38 intc: interrupt-controller@98800000 { 39 intc: interrupt-controller@98800000 {
39 compatible = "moxa,moxart-ic"; 40 compatible = "moxa,moxart-ic", "faraday,ftintc010";
40 reg = <0x98800000 0x38>; 41 reg = <0x98800000 0x100>;
41 interrupt-controller; 42 interrupt-controller;
42 #interrupt-cells = <2>; 43 #interrupt-cells = <2>;
43 interrupt-mask = <0x00080000>; 44 interrupt-mask = <0x00080000>;
@@ -59,7 +60,7 @@
59 timer: timer@98400000 { 60 timer: timer@98400000 {
60 compatible = "moxa,moxart-timer"; 61 compatible = "moxa,moxart-timer";
61 reg = <0x98400000 0x42>; 62 reg = <0x98400000 0x42>;
62 interrupts = <19 1>; 63 interrupts = <19 IRQ_TYPE_EDGE_FALLING>;
63 clocks = <&clk_apb>; 64 clocks = <&clk_apb>;
64 }; 65 };
65 66
@@ -80,7 +81,7 @@
80 dma: dma@90500000 { 81 dma: dma@90500000 {
81 compatible = "moxa,moxart-dma"; 82 compatible = "moxa,moxart-dma";
82 reg = <0x90500080 0x40>; 83 reg = <0x90500080 0x40>;
83 interrupts = <24 0>; 84 interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
84 #dma-cells = <1>; 85 #dma-cells = <1>;
85 }; 86 };
86 87
@@ -93,7 +94,7 @@
93 sdhci: sdhci@98e00000 { 94 sdhci: sdhci@98e00000 {
94 compatible = "moxa,moxart-sdhci"; 95 compatible = "moxa,moxart-sdhci";
95 reg = <0x98e00000 0x5C>; 96 reg = <0x98e00000 0x5C>;
96 interrupts = <5 0>; 97 interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
97 clocks = <&clk_apb>; 98 clocks = <&clk_apb>;
98 dmas = <&dma 5>, 99 dmas = <&dma 5>,
99 <&dma 5>; 100 <&dma 5>;
@@ -120,7 +121,7 @@
120 mac0: mac@90900000 { 121 mac0: mac@90900000 {
121 compatible = "moxa,moxart-mac"; 122 compatible = "moxa,moxart-mac";
122 reg = <0x90900000 0x90>; 123 reg = <0x90900000 0x90>;
123 interrupts = <25 0>; 124 interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
124 phy-handle = <&ethphy0>; 125 phy-handle = <&ethphy0>;
125 phy-mode = "mii"; 126 phy-mode = "mii";
126 status = "disabled"; 127 status = "disabled";
@@ -129,7 +130,7 @@
129 mac1: mac@92000000 { 130 mac1: mac@92000000 {
130 compatible = "moxa,moxart-mac"; 131 compatible = "moxa,moxart-mac";
131 reg = <0x92000000 0x90>; 132 reg = <0x92000000 0x90>;
132 interrupts = <27 0>; 133 interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
133 phy-handle = <&ethphy1>; 134 phy-handle = <&ethphy1>;
134 phy-mode = "mii"; 135 phy-mode = "mii";
135 status = "disabled"; 136 status = "disabled";
@@ -138,7 +139,7 @@
138 uart0: uart@98200000 { 139 uart0: uart@98200000 {
139 compatible = "ns16550a"; 140 compatible = "ns16550a";
140 reg = <0x98200000 0x20>; 141 reg = <0x98200000 0x20>;
141 interrupts = <31 8>; 142 interrupts = <31 IRQ_TYPE_LEVEL_HIGH>;
142 reg-shift = <2>; 143 reg-shift = <2>;
143 reg-io-width = <4>; 144 reg-io-width = <4>;
144 clock-frequency = <14745600>; 145 clock-frequency = <14745600>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 2d90867a1df7..8c77535095fc 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -866,14 +866,12 @@
866 usbhsohci: ohci@4a064800 { 866 usbhsohci: ohci@4a064800 {
867 compatible = "ti,ohci-omap3"; 867 compatible = "ti,ohci-omap3";
868 reg = <0x4a064800 0x400>; 868 reg = <0x4a064800 0x400>;
869 interrupt-parent = <&gic>;
870 interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>; 869 interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
871 }; 870 };
872 871
873 usbhsehci: ehci@4a064c00 { 872 usbhsehci: ehci@4a064c00 {
874 compatible = "ti,ehci-omap"; 873 compatible = "ti,ehci-omap";
875 reg = <0x4a064c00 0x400>; 874 reg = <0x4a064c00 0x400>;
876 interrupt-parent = <&gic>;
877 interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; 875 interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
878 }; 876 };
879 }; 877 };
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 7b39d8fae61e..bd83a61f724f 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -1360,8 +1360,11 @@
1360 compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks"; 1360 compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
1361 reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>; 1361 reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
1362 clocks = <&p_clk>, 1362 clocks = <&p_clk>,
1363 <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, 1363 <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
1364 <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, 1364 <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
1365 <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
1366 <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
1367 <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
1365 <&p_clk>, 1368 <&p_clk>,
1366 <&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>, 1369 <&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
1367 <&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>, 1370 <&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index fc44ea361a4b..62eae315af1f 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -280,7 +280,7 @@
280 x2_clk: x2-clock { 280 x2_clk: x2-clock {
281 compatible = "fixed-clock"; 281 compatible = "fixed-clock";
282 #clock-cells = <0>; 282 #clock-cells = <0>;
283 clock-frequency = <148500000>; 283 clock-frequency = <74250000>;
284 }; 284 };
285 285
286 x13_clk: x13-clock { 286 x13_clk: x13-clock {
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 328f48bd15e7..d2585a4c6098 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -1374,8 +1374,11 @@
1374 compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks"; 1374 compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks";
1375 reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>; 1375 reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
1376 clocks = <&p_clk>, 1376 clocks = <&p_clk>,
1377 <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, 1377 <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
1378 <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, 1378 <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
1379 <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
1380 <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
1381 <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
1379 <&p_clk>, 1382 <&p_clk>,
1380 <&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>, 1383 <&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
1381 <&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>, 1384 <&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
index 8344a0ee2b86..b03fe747b98c 100644
--- a/arch/arm/boot/dts/s5pv210.dtsi
+++ b/arch/arm/boot/dts/s5pv210.dtsi
@@ -461,6 +461,7 @@
461 compatible = "samsung,exynos4210-ohci"; 461 compatible = "samsung,exynos4210-ohci";
462 reg = <0xec300000 0x100>; 462 reg = <0xec300000 0x100>;
463 interrupts = <23>; 463 interrupts = <23>;
464 interrupt-parent = <&vic1>;
464 clocks = <&clocks CLK_USB_HOST>; 465 clocks = <&clocks CLK_USB_HOST>;
465 clock-names = "usbhost"; 466 clock-names = "usbhost";
466 #address-cells = <1>; 467 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 3daf8d5d7878..fb0d1b252dc8 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1354,7 +1354,7 @@
1354 pinctrl@fc06a000 { 1354 pinctrl@fc06a000 {
1355 #address-cells = <1>; 1355 #address-cells = <1>;
1356 #size-cells = <1>; 1356 #size-cells = <1>;
1357 compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus"; 1357 compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus";
1358 ranges = <0xfc068000 0xfc068000 0x100 1358 ranges = <0xfc068000 0xfc068000 0x100
1359 0xfc06a000 0xfc06a000 0x4000>; 1359 0xfc06a000 0xfc06a000 0x4000>;
1360 /* WARNING: revisit as pin spec has changed */ 1360 /* WARNING: revisit as pin spec has changed */
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 39c470e291f9..69381deeb703 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -738,7 +738,7 @@
738 timer@fffec600 { 738 timer@fffec600 {
739 compatible = "arm,cortex-a9-twd-timer"; 739 compatible = "arm,cortex-a9-twd-timer";
740 reg = <0xfffec600 0x100>; 740 reg = <0xfffec600 0x100>;
741 interrupts = <1 13 0xf04>; 741 interrupts = <1 13 0xf01>;
742 clocks = <&mpu_periph_clk>; 742 clocks = <&mpu_periph_clk>;
743 }; 743 };
744 744
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
index e48857249ce7..3d83992efd90 100644
--- a/arch/arm/boot/dts/spear1310-evb.dts
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -349,7 +349,7 @@
349 spi0: spi@e0100000 { 349 spi0: spi@e0100000 {
350 status = "okay"; 350 status = "okay";
351 num-cs = <3>; 351 num-cs = <3>;
352 cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>; 352 cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>;
353 353
354 stmpe610@0 { 354 stmpe610@0 {
355 compatible = "st,stmpe610"; 355 compatible = "st,stmpe610";
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index df2232d767ed..6361cbfcbe5e 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -141,8 +141,8 @@
141 reg = <0xb4100000 0x1000>; 141 reg = <0xb4100000 0x1000>;
142 interrupts = <0 105 0x4>; 142 interrupts = <0 105 0x4>;
143 status = "disabled"; 143 status = "disabled";
144 dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */ 144 dmas = <&dwdma0 12 0 1>,
145 <&dwdma0 0x680 0 1 0>; /* 0xD << 7 */ 145 <&dwdma0 13 1 0>;
146 dma-names = "tx", "rx"; 146 dma-names = "tx", "rx";
147 }; 147 };
148 148
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 14594ce8c18a..8fd8a3328acb 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -100,7 +100,7 @@
100 reg = <0xb2800000 0x1000>; 100 reg = <0xb2800000 0x1000>;
101 interrupts = <0 29 0x4>; 101 interrupts = <0 29 0x4>;
102 status = "disabled"; 102 status = "disabled";
103 dmas = <&dwdma0 0 0 0 0>; 103 dmas = <&dwdma0 0 0 0>;
104 dma-names = "data"; 104 dma-names = "data";
105 }; 105 };
106 106
@@ -288,8 +288,8 @@
288 #size-cells = <0>; 288 #size-cells = <0>;
289 interrupts = <0 31 0x4>; 289 interrupts = <0 31 0x4>;
290 status = "disabled"; 290 status = "disabled";
291 dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */ 291 dmas = <&dwdma0 4 0 0>,
292 <&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */ 292 <&dwdma0 5 0 0>;
293 dma-names = "tx", "rx"; 293 dma-names = "tx", "rx";
294 }; 294 };
295 295
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index 9f60a7b6a42b..bd379034993c 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -194,6 +194,7 @@
194 rtc@fc900000 { 194 rtc@fc900000 {
195 compatible = "st,spear600-rtc"; 195 compatible = "st,spear600-rtc";
196 reg = <0xfc900000 0x1000>; 196 reg = <0xfc900000 0x1000>;
197 interrupt-parent = <&vic0>;
197 interrupts = <10>; 198 interrupts = <10>;
198 status = "disabled"; 199 status = "disabled";
199 }; 200 };
diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
index d60f0d8add26..e4b508ce38a2 100644
--- a/arch/arm/boot/dts/stih407.dtsi
+++ b/arch/arm/boot/dts/stih407.dtsi
@@ -8,6 +8,7 @@
8 */ 8 */
9#include "stih407-clock.dtsi" 9#include "stih407-clock.dtsi"
10#include "stih407-family.dtsi" 10#include "stih407-family.dtsi"
11#include <dt-bindings/gpio/gpio.h>
11/ { 12/ {
12 soc { 13 soc {
13 sti-display-subsystem { 14 sti-display-subsystem {
@@ -112,7 +113,7 @@
112 <&clk_s_d2_quadfs 0>, 113 <&clk_s_d2_quadfs 0>,
113 <&clk_s_d2_quadfs 1>; 114 <&clk_s_d2_quadfs 1>;
114 115
115 hdmi,hpd-gpio = <&pio5 3>; 116 hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
116 reset-names = "hdmi"; 117 reset-names = "hdmi";
117 resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; 118 resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
118 ddc = <&hdmiddc>; 119 ddc = <&hdmiddc>;
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index 40318869c733..3c32fb8cdcac 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -9,6 +9,7 @@
9#include "stih410-clock.dtsi" 9#include "stih410-clock.dtsi"
10#include "stih407-family.dtsi" 10#include "stih407-family.dtsi"
11#include "stih410-pinctrl.dtsi" 11#include "stih410-pinctrl.dtsi"
12#include <dt-bindings/gpio/gpio.h>
12/ { 13/ {
13 aliases { 14 aliases {
14 bdisp0 = &bdisp0; 15 bdisp0 = &bdisp0;
@@ -203,7 +204,7 @@
203 <&clk_s_d2_quadfs 0>, 204 <&clk_s_d2_quadfs 0>,
204 <&clk_s_d2_quadfs 1>; 205 <&clk_s_d2_quadfs 1>;
205 206
206 hdmi,hpd-gpio = <&pio5 3>; 207 hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
207 reset-names = "hdmi"; 208 reset-names = "hdmi";
208 resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; 209 resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
209 ddc = <&hdmiddc>; 210 ddc = <&hdmiddc>;
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 2c16d9e7c03c..4a275fba6059 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -530,4 +530,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
530#endif 530#endif
531 .endm 531 .endm
532 532
533#ifdef CONFIG_KPROBES
534#define _ASM_NOKPROBE(entry) \
535 .pushsection "_kprobe_blacklist", "aw" ; \
536 .balign 4 ; \
537 .long entry; \
538 .popsection
539#else
540#define _ASM_NOKPROBE(entry)
541#endif
542
533#endif /* __ASM_ASSEMBLER_H__ */ 543#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 6795368ad023..cc414382dab4 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -128,20 +128,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
128#endif /* !SMP */ 128#endif /* !SMP */
129 129
130static inline int 130static inline int
131futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 131arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
132{ 132{
133 int op = (encoded_op >> 28) & 7;
134 int cmp = (encoded_op >> 24) & 15;
135 int oparg = (encoded_op << 8) >> 20;
136 int cmparg = (encoded_op << 20) >> 20;
137 int oldval = 0, ret, tmp; 133 int oldval = 0, ret, tmp;
138 134
139 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
140 oparg = 1 << oparg;
141
142 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
143 return -EFAULT;
144
145#ifndef CONFIG_SMP 135#ifndef CONFIG_SMP
146 preempt_disable(); 136 preempt_disable();
147#endif 137#endif
@@ -172,17 +162,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
172 preempt_enable(); 162 preempt_enable();
173#endif 163#endif
174 164
175 if (!ret) { 165 if (!ret)
176 switch (cmp) { 166 *oval = oldval;
177 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 167
178 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
179 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
180 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
181 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
182 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
183 default: ret = -ENOSYS;
184 }
185 }
186 return ret; 168 return ret;
187} 169}
188 170
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 0a9d5dd93294..6949c7d4481c 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -76,7 +76,7 @@ extern int kgdb_fault_expected;
76 76
77#define KGDB_MAX_NO_CPUS 1 77#define KGDB_MAX_NO_CPUS 1
78#define BUFMAX 400 78#define BUFMAX 400
79#define NUMREGBYTES (DBG_MAX_REG_NUM << 2) 79#define NUMREGBYTES (GDB_MAX_REGS << 2)
80#define NUMCRITREGBYTES (32 << 2) 80#define NUMCRITREGBYTES (32 << 2)
81 81
82#define _R0 0 82#define _R0 0
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7fb59199c6bb..7665bd2f4871 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -251,7 +251,7 @@ extern int __put_user_8(void *, unsigned long long);
251 ({ \ 251 ({ \
252 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 252 unsigned long __limit = current_thread_info()->addr_limit - 1; \
253 const typeof(*(p)) __user *__tmp_p = (p); \ 253 const typeof(*(p)) __user *__tmp_p = (p); \
254 register const typeof(*(p)) __r2 asm("r2") = (x); \ 254 register typeof(*(p)) __r2 asm("r2") = (x); \
255 register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ 255 register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
256 register unsigned long __l asm("r1") = __limit; \ 256 register unsigned long __l asm("r1") = __limit; \
257 register int __e asm("r0"); \ 257 register int __e asm("r0"); \
diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
index d0295f1dd1a3..ff65b6d96c7e 100644
--- a/arch/arm/include/asm/vdso.h
+++ b/arch/arm/include/asm/vdso.h
@@ -11,8 +11,6 @@ struct mm_struct;
11 11
12void arm_install_vdso(struct mm_struct *mm, unsigned long addr); 12void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
13 13
14extern char vdso_start, vdso_end;
15
16extern unsigned int vdso_total_pages; 14extern unsigned int vdso_total_pages;
17 15
18#else /* CONFIG_VDSO */ 16#else /* CONFIG_VDSO */
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 71e473d05fcc..620dc75362e5 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -16,7 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
16 return raw_irqs_disabled_flags(regs->ARM_cpsr); 16 return raw_irqs_disabled_flags(regs->ARM_cpsr);
17} 17}
18 18
19#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \ 19#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((long long*)(ptr),\
20 atomic64_t, \ 20 atomic64_t, \
21 counter), (val)) 21 counter), (val))
22 22
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 709ee1d6d4df..faa9a905826e 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -29,11 +29,6 @@
29#endif 29#endif
30 30
31#ifdef CONFIG_DYNAMIC_FTRACE 31#ifdef CONFIG_DYNAMIC_FTRACE
32#ifdef CONFIG_OLD_MCOUNT
33#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
34#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
35
36#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
37 32
38static int __ftrace_modify_code(void *data) 33static int __ftrace_modify_code(void *data)
39{ 34{
@@ -51,6 +46,12 @@ void arch_ftrace_update_code(int command)
51 stop_machine(__ftrace_modify_code, &command, NULL); 46 stop_machine(__ftrace_modify_code, &command, NULL);
52} 47}
53 48
49#ifdef CONFIG_OLD_MCOUNT
50#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
51#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
52
53#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
54
54static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) 55static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
55{ 56{
56 return rec->arch.old_mcount ? OLD_NOP : NOP; 57 return rec->arch.old_mcount ? OLD_NOP : NOP;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index c92b535150a0..306a2a581785 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -19,6 +19,7 @@
19#include <linux/uaccess.h> 19#include <linux/uaccess.h>
20#include <linux/hardirq.h> 20#include <linux/hardirq.h>
21#include <linux/kdebug.h> 21#include <linux/kdebug.h>
22#include <linux/kprobes.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/kexec.h> 24#include <linux/kexec.h>
24#include <linux/bug.h> 25#include <linux/bug.h>
@@ -395,7 +396,8 @@ void unregister_undef_hook(struct undef_hook *hook)
395 raw_spin_unlock_irqrestore(&undef_lock, flags); 396 raw_spin_unlock_irqrestore(&undef_lock, flags);
396} 397}
397 398
398static int call_undef_hook(struct pt_regs *regs, unsigned int instr) 399static nokprobe_inline
400int call_undef_hook(struct pt_regs *regs, unsigned int instr)
399{ 401{
400 struct undef_hook *hook; 402 struct undef_hook *hook;
401 unsigned long flags; 403 unsigned long flags;
@@ -468,6 +470,7 @@ die_sig:
468 470
469 arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6); 471 arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
470} 472}
473NOKPROBE_SYMBOL(do_undefinstr)
471 474
472/* 475/*
473 * Handle FIQ similarly to NMI on x86 systems. 476 * Handle FIQ similarly to NMI on x86 systems.
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 54a5aeab988d..2dee87273e51 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -38,6 +38,8 @@
38 38
39static struct page **vdso_text_pagelist; 39static struct page **vdso_text_pagelist;
40 40
41extern char vdso_start[], vdso_end[];
42
41/* Total number of pages needed for the data and text portions of the VDSO. */ 43/* Total number of pages needed for the data and text portions of the VDSO. */
42unsigned int vdso_total_pages __read_mostly; 44unsigned int vdso_total_pages __read_mostly;
43 45
@@ -178,13 +180,13 @@ static int __init vdso_init(void)
178 unsigned int text_pages; 180 unsigned int text_pages;
179 int i; 181 int i;
180 182
181 if (memcmp(&vdso_start, "\177ELF", 4)) { 183 if (memcmp(vdso_start, "\177ELF", 4)) {
182 pr_err("VDSO is not a valid ELF object!\n"); 184 pr_err("VDSO is not a valid ELF object!\n");
183 return -ENOEXEC; 185 return -ENOEXEC;
184 } 186 }
185 187
186 text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; 188 text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
187 pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start); 189 pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
188 190
189 /* Allocate the VDSO text pagelist */ 191 /* Allocate the VDSO text pagelist */
190 vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *), 192 vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
@@ -199,7 +201,7 @@ static int __init vdso_init(void)
199 for (i = 0; i < text_pages; i++) { 201 for (i = 0; i < text_pages; i++) {
200 struct page *page; 202 struct page *page;
201 203
202 page = virt_to_page(&vdso_start + i * PAGE_SIZE); 204 page = virt_to_page(vdso_start + i * PAGE_SIZE);
203 vdso_text_pagelist[i] = page; 205 vdso_text_pagelist[i] = page;
204 } 206 }
205 207
@@ -210,7 +212,7 @@ static int __init vdso_init(void)
210 212
211 cntvct_ok = cntvct_functional(); 213 cntvct_ok = cntvct_functional();
212 214
213 patch_vdso(&vdso_start); 215 patch_vdso(vdso_start);
214 216
215 return 0; 217 return 0;
216} 218}
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index f36b5b1acd1f..05b2f8294968 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -45,7 +45,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
45 45
46 ret = kvm_psci_call(vcpu); 46 ret = kvm_psci_call(vcpu);
47 if (ret < 0) { 47 if (ret < 0) {
48 kvm_inject_undefined(vcpu); 48 vcpu_set_reg(vcpu, 0, ~0UL);
49 return 1; 49 return 1;
50 } 50 }
51 51
@@ -54,7 +54,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
54 54
55static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) 55static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
56{ 56{
57 kvm_inject_undefined(vcpu); 57 /*
58 * "If an SMC instruction executed at Non-secure EL1 is
59 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
60 * Trap exception, not a Secure Monitor Call exception [...]"
61 *
62 * We need to advance the PC after the trap, as it would
63 * otherwise return to the same address...
64 */
65 vcpu_set_reg(vcpu, 0, ~0UL);
66 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
58 return 1; 67 return 1;
59} 68}
60 69
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1712f132b80d..b83fdc06286a 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -85,7 +85,11 @@
85 .pushsection .text.fixup,"ax" 85 .pushsection .text.fixup,"ax"
86 .align 4 86 .align 4
879001: mov r4, #-EFAULT 879001: mov r4, #-EFAULT
88#ifdef CONFIG_CPU_SW_DOMAIN_PAN
89 ldr r5, [sp, #9*4] @ *err_ptr
90#else
88 ldr r5, [sp, #8*4] @ *err_ptr 91 ldr r5, [sp, #8*4] @ *err_ptr
92#endif
89 str r4, [r5] 93 str r4, [r5]
90 ldmia sp, {r1, r2} @ retrieve dst, len 94 ldmia sp, {r1, r2} @ retrieve dst, len
91 add r2, r2, r1 95 add r2, r2, r1
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index df73914e81c8..746e7801dcdf 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -38,6 +38,7 @@ ENTRY(__get_user_1)
38 mov r0, #0 38 mov r0, #0
39 ret lr 39 ret lr
40ENDPROC(__get_user_1) 40ENDPROC(__get_user_1)
41_ASM_NOKPROBE(__get_user_1)
41 42
42ENTRY(__get_user_2) 43ENTRY(__get_user_2)
43 check_uaccess r0, 2, r1, r2, __get_user_bad 44 check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -58,6 +59,7 @@ rb .req r0
58 mov r0, #0 59 mov r0, #0
59 ret lr 60 ret lr
60ENDPROC(__get_user_2) 61ENDPROC(__get_user_2)
62_ASM_NOKPROBE(__get_user_2)
61 63
62ENTRY(__get_user_4) 64ENTRY(__get_user_4)
63 check_uaccess r0, 4, r1, r2, __get_user_bad 65 check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -65,6 +67,7 @@ ENTRY(__get_user_4)
65 mov r0, #0 67 mov r0, #0
66 ret lr 68 ret lr
67ENDPROC(__get_user_4) 69ENDPROC(__get_user_4)
70_ASM_NOKPROBE(__get_user_4)
68 71
69ENTRY(__get_user_8) 72ENTRY(__get_user_8)
70 check_uaccess r0, 8, r1, r2, __get_user_bad8 73 check_uaccess r0, 8, r1, r2, __get_user_bad8
@@ -78,6 +81,7 @@ ENTRY(__get_user_8)
78 mov r0, #0 81 mov r0, #0
79 ret lr 82 ret lr
80ENDPROC(__get_user_8) 83ENDPROC(__get_user_8)
84_ASM_NOKPROBE(__get_user_8)
81 85
82#ifdef __ARMEB__ 86#ifdef __ARMEB__
83ENTRY(__get_user_32t_8) 87ENTRY(__get_user_32t_8)
@@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8)
91 mov r0, #0 95 mov r0, #0
92 ret lr 96 ret lr
93ENDPROC(__get_user_32t_8) 97ENDPROC(__get_user_32t_8)
98_ASM_NOKPROBE(__get_user_32t_8)
94 99
95ENTRY(__get_user_64t_1) 100ENTRY(__get_user_64t_1)
96 check_uaccess r0, 1, r1, r2, __get_user_bad8 101 check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1)
98 mov r0, #0 103 mov r0, #0
99 ret lr 104 ret lr
100ENDPROC(__get_user_64t_1) 105ENDPROC(__get_user_64t_1)
106_ASM_NOKPROBE(__get_user_64t_1)
101 107
102ENTRY(__get_user_64t_2) 108ENTRY(__get_user_64t_2)
103 check_uaccess r0, 2, r1, r2, __get_user_bad8 109 check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -114,6 +120,7 @@ rb .req r0
114 mov r0, #0 120 mov r0, #0
115 ret lr 121 ret lr
116ENDPROC(__get_user_64t_2) 122ENDPROC(__get_user_64t_2)
123_ASM_NOKPROBE(__get_user_64t_2)
117 124
118ENTRY(__get_user_64t_4) 125ENTRY(__get_user_64t_4)
119 check_uaccess r0, 4, r1, r2, __get_user_bad8 126 check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4)
121 mov r0, #0 128 mov r0, #0
122 ret lr 129 ret lr
123ENDPROC(__get_user_64t_4) 130ENDPROC(__get_user_64t_4)
131_ASM_NOKPROBE(__get_user_64t_4)
124#endif 132#endif
125 133
126__get_user_bad8: 134__get_user_bad8:
@@ -131,6 +139,8 @@ __get_user_bad:
131 ret lr 139 ret lr
132ENDPROC(__get_user_bad) 140ENDPROC(__get_user_bad)
133ENDPROC(__get_user_bad8) 141ENDPROC(__get_user_bad8)
142_ASM_NOKPROBE(__get_user_bad)
143_ASM_NOKPROBE(__get_user_bad8)
134 144
135.pushsection __ex_table, "a" 145.pushsection __ex_table, "a"
136 .long 1b, __get_user_bad 146 .long 1b, __get_user_bad
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index cf432868e990..d6b6f96246fd 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -841,6 +841,8 @@ static struct platform_device da8xx_dsp = {
841 .resource = da8xx_rproc_resources, 841 .resource = da8xx_rproc_resources,
842}; 842};
843 843
844static bool rproc_mem_inited __initdata;
845
844#if IS_ENABLED(CONFIG_DA8XX_REMOTEPROC) 846#if IS_ENABLED(CONFIG_DA8XX_REMOTEPROC)
845 847
846static phys_addr_t rproc_base __initdata; 848static phys_addr_t rproc_base __initdata;
@@ -879,6 +881,8 @@ void __init da8xx_rproc_reserve_cma(void)
879 ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0); 881 ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0);
880 if (ret) 882 if (ret)
881 pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret); 883 pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret);
884 else
885 rproc_mem_inited = true;
882} 886}
883 887
884#else 888#else
@@ -893,6 +897,12 @@ int __init da8xx_register_rproc(void)
893{ 897{
894 int ret; 898 int ret;
895 899
900 if (!rproc_mem_inited) {
901 pr_warn("%s: memory not reserved for DSP, not registering DSP device\n",
902 __func__);
903 return -ENOMEM;
904 }
905
896 ret = platform_device_register(&da8xx_dsp); 906 ret = platform_device_register(&da8xx_dsp);
897 if (ret) 907 if (ret)
898 pr_err("%s: can't register DSP device: %d\n", __func__, ret); 908 pr_err("%s: can't register DSP device: %d\n", __func__, ret);
diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c
index 5b0f752d5507..24be631e487d 100644
--- a/arch/arm/mach-imx/cpu.c
+++ b/arch/arm/mach-imx/cpu.c
@@ -133,6 +133,9 @@ struct device * __init imx_soc_device_init(void)
133 case MXC_CPU_IMX6UL: 133 case MXC_CPU_IMX6UL:
134 soc_id = "i.MX6UL"; 134 soc_id = "i.MX6UL";
135 break; 135 break;
136 case MXC_CPU_IMX6ULL:
137 soc_id = "i.MX6ULL";
138 break;
136 case MXC_CPU_IMX7D: 139 case MXC_CPU_IMX7D:
137 soc_id = "i.MX7D"; 140 soc_id = "i.MX7D";
138 break; 141 break;
diff --git a/arch/arm/mach-imx/mxc.h b/arch/arm/mach-imx/mxc.h
index a5b1af6d7441..478cd91d0885 100644
--- a/arch/arm/mach-imx/mxc.h
+++ b/arch/arm/mach-imx/mxc.h
@@ -39,6 +39,7 @@
39#define MXC_CPU_IMX6SX 0x62 39#define MXC_CPU_IMX6SX 0x62
40#define MXC_CPU_IMX6Q 0x63 40#define MXC_CPU_IMX6Q 0x63
41#define MXC_CPU_IMX6UL 0x64 41#define MXC_CPU_IMX6UL 0x64
42#define MXC_CPU_IMX6ULL 0x65
42#define MXC_CPU_IMX7D 0x72 43#define MXC_CPU_IMX7D 0x72
43 44
44#define IMX_DDR_TYPE_LPDDR2 1 45#define IMX_DDR_TYPE_LPDDR2 1
@@ -171,6 +172,11 @@ static inline bool cpu_is_imx6ul(void)
171 return __mxc_cpu_type == MXC_CPU_IMX6UL; 172 return __mxc_cpu_type == MXC_CPU_IMX6UL;
172} 173}
173 174
175static inline bool cpu_is_imx6ull(void)
176{
177 return __mxc_cpu_type == MXC_CPU_IMX6ULL;
178}
179
174static inline bool cpu_is_imx6q(void) 180static inline bool cpu_is_imx6q(void)
175{ 181{
176 return __mxc_cpu_type == MXC_CPU_IMX6Q; 182 return __mxc_cpu_type == MXC_CPU_IMX6Q;
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index e20fc4178b15..1c8a6098a2ca 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -37,7 +37,7 @@ config MACH_ARMADA_370
37config MACH_ARMADA_375 37config MACH_ARMADA_375
38 bool "Marvell Armada 375 boards" if ARCH_MULTI_V7 38 bool "Marvell Armada 375 boards" if ARCH_MULTI_V7
39 select ARM_ERRATA_720789 39 select ARM_ERRATA_720789
40 select ARM_ERRATA_753970 40 select PL310_ERRATA_753970
41 select ARM_GIC 41 select ARM_GIC
42 select ARMADA_375_CLK 42 select ARMADA_375_CLK
43 select HAVE_ARM_SCU 43 select HAVE_ARM_SCU
@@ -52,7 +52,7 @@ config MACH_ARMADA_375
52config MACH_ARMADA_38X 52config MACH_ARMADA_38X
53 bool "Marvell Armada 380/385 boards" if ARCH_MULTI_V7 53 bool "Marvell Armada 380/385 boards" if ARCH_MULTI_V7
54 select ARM_ERRATA_720789 54 select ARM_ERRATA_720789
55 select ARM_ERRATA_753970 55 select PL310_ERRATA_753970
56 select ARM_GIC 56 select ARM_GIC
57 select ARMADA_38X_CLK 57 select ARMADA_38X_CLK
58 select HAVE_ARM_SCU 58 select HAVE_ARM_SCU
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index 4f5fd4a084c0..034b89499bd7 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -1031,17 +1031,17 @@ static int clk_debugfs_register_one(struct clk *c)
1031 return -ENOMEM; 1031 return -ENOMEM;
1032 c->dent = d; 1032 c->dent = d;
1033 1033
1034 d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); 1034 d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
1035 if (!d) { 1035 if (!d) {
1036 err = -ENOMEM; 1036 err = -ENOMEM;
1037 goto err_out; 1037 goto err_out;
1038 } 1038 }
1039 d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); 1039 d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
1040 if (!d) { 1040 if (!d) {
1041 err = -ENOMEM; 1041 err = -ENOMEM;
1042 goto err_out; 1042 goto err_out;
1043 } 1043 }
1044 d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); 1044 d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
1045 if (!d) { 1045 if (!d) {
1046 err = -ENOMEM; 1046 err = -ENOMEM;
1047 goto err_out; 1047 goto err_out;
diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
index 5ac122e88f67..fa7f308c9027 100644
--- a/arch/arm/mach-omap2/omap-secure.c
+++ b/arch/arm/mach-omap2/omap-secure.c
@@ -73,6 +73,27 @@ phys_addr_t omap_secure_ram_mempool_base(void)
73 return omap_secure_memblock_base; 73 return omap_secure_memblock_base;
74} 74}
75 75
76#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
77u32 omap3_save_secure_ram(void __iomem *addr, int size)
78{
79 u32 ret;
80 u32 param[5];
81
82 if (size != OMAP3_SAVE_SECURE_RAM_SZ)
83 return OMAP3_SAVE_SECURE_RAM_SZ;
84
85 param[0] = 4; /* Number of arguments */
86 param[1] = __pa(addr); /* Physical address for saving */
87 param[2] = 0;
88 param[3] = 1;
89 param[4] = 1;
90
91 ret = save_secure_ram_context(__pa(param));
92
93 return ret;
94}
95#endif
96
76/** 97/**
77 * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls 98 * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
78 * @idx: The PPA API index 99 * @idx: The PPA API index
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index bae263fba640..c509cde71f93 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -31,6 +31,8 @@
31/* Maximum Secure memory storage size */ 31/* Maximum Secure memory storage size */
32#define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K) 32#define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K)
33 33
34#define OMAP3_SAVE_SECURE_RAM_SZ 0x803F
35
34/* Secure low power HAL API index */ 36/* Secure low power HAL API index */
35#define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a 37#define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a
36#define OMAP4_HAL_SAVEHW_INDEX 0x1b 38#define OMAP4_HAL_SAVEHW_INDEX 0x1b
@@ -65,6 +67,8 @@ extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
65extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs); 67extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs);
66extern phys_addr_t omap_secure_ram_mempool_base(void); 68extern phys_addr_t omap_secure_ram_mempool_base(void);
67extern int omap_secure_ram_reserve_memblock(void); 69extern int omap_secure_ram_reserve_memblock(void);
70extern u32 save_secure_ram_context(u32 args_pa);
71extern u32 omap3_save_secure_ram(void __iomem *save_regs, int size);
68 72
69extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs, 73extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
70 u32 arg1, u32 arg2, u32 arg3, u32 arg4); 74 u32 arg1, u32 arg2, u32 arg3, u32 arg4);
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 66e0003073fa..93c75b548415 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -226,7 +226,7 @@ static void omap_pm_end(void)
226 cpu_idle_poll_ctrl(false); 226 cpu_idle_poll_ctrl(false);
227} 227}
228 228
229static void omap_pm_finish(void) 229static void omap_pm_wake(void)
230{ 230{
231 if (cpu_is_omap34xx()) 231 if (cpu_is_omap34xx())
232 omap_prcm_irq_complete(); 232 omap_prcm_irq_complete();
@@ -236,7 +236,7 @@ static const struct platform_suspend_ops omap_pm_ops = {
236 .begin = omap_pm_begin, 236 .begin = omap_pm_begin,
237 .end = omap_pm_end, 237 .end = omap_pm_end,
238 .enter = omap_pm_enter, 238 .enter = omap_pm_enter,
239 .finish = omap_pm_finish, 239 .wake = omap_pm_wake,
240 .valid = suspend_valid_only_mem, 240 .valid = suspend_valid_only_mem,
241}; 241};
242 242
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 2f9649b89053..6f26371ba80f 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -86,10 +86,6 @@ struct am33xx_pm_platform_data *am33xx_pm_get_pdata(void);
86extern struct am33xx_pm_sram_addr am33xx_pm_sram; 86extern struct am33xx_pm_sram_addr am33xx_pm_sram;
87extern struct am33xx_pm_sram_addr am43xx_pm_sram; 87extern struct am33xx_pm_sram_addr am43xx_pm_sram;
88 88
89/* save_secure_ram_context function pointer and size, for copy to SRAM */
90extern int save_secure_ram_context(u32 *addr);
91extern unsigned int save_secure_ram_context_sz;
92
93extern void omap3_save_scratchpad_contents(void); 89extern void omap3_save_scratchpad_contents(void);
94 90
95#define PM_RTA_ERRATUM_i608 (1 << 0) 91#define PM_RTA_ERRATUM_i608 (1 << 0)
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 2dbd3785ee6f..181da202f981 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -48,6 +48,7 @@
48#include "prm3xxx.h" 48#include "prm3xxx.h"
49#include "pm.h" 49#include "pm.h"
50#include "sdrc.h" 50#include "sdrc.h"
51#include "omap-secure.h"
51#include "sram.h" 52#include "sram.h"
52#include "control.h" 53#include "control.h"
53#include "vc.h" 54#include "vc.h"
@@ -66,7 +67,6 @@ struct power_state {
66 67
67static LIST_HEAD(pwrst_list); 68static LIST_HEAD(pwrst_list);
68 69
69static int (*_omap_save_secure_sram)(u32 *addr);
70void (*omap3_do_wfi_sram)(void); 70void (*omap3_do_wfi_sram)(void);
71 71
72static struct powerdomain *mpu_pwrdm, *neon_pwrdm; 72static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
@@ -121,8 +121,8 @@ static void omap3_save_secure_ram_context(void)
121 * will hang the system. 121 * will hang the system.
122 */ 122 */
123 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); 123 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
124 ret = _omap_save_secure_sram((u32 *)(unsigned long) 124 ret = omap3_save_secure_ram(omap3_secure_ram_storage,
125 __pa(omap3_secure_ram_storage)); 125 OMAP3_SAVE_SECURE_RAM_SZ);
126 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); 126 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
127 /* Following is for error tracking, it should not happen */ 127 /* Following is for error tracking, it should not happen */
128 if (ret) { 128 if (ret) {
@@ -431,15 +431,10 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
431 * 431 *
432 * The minimum set of functions is pushed to SRAM for execution: 432 * The minimum set of functions is pushed to SRAM for execution:
433 * - omap3_do_wfi for erratum i581 WA, 433 * - omap3_do_wfi for erratum i581 WA,
434 * - save_secure_ram_context for security extensions.
435 */ 434 */
436void omap_push_sram_idle(void) 435void omap_push_sram_idle(void)
437{ 436{
438 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz); 437 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
439
440 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
441 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
442 save_secure_ram_context_sz);
443} 438}
444 439
445static void __init pm_errata_configure(void) 440static void __init pm_errata_configure(void)
@@ -551,7 +546,7 @@ int __init omap3_pm_init(void)
551 clkdm_add_wkdep(neon_clkdm, mpu_clkdm); 546 clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
552 if (omap_type() != OMAP2_DEVICE_TYPE_GP) { 547 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
553 omap3_secure_ram_storage = 548 omap3_secure_ram_storage =
554 kmalloc(0x803F, GFP_KERNEL); 549 kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL);
555 if (!omap3_secure_ram_storage) 550 if (!omap3_secure_ram_storage)
556 pr_err("Memory allocation failed when allocating for secure sram context\n"); 551 pr_err("Memory allocation failed when allocating for secure sram context\n");
557 552
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index 1b9f0520dea9..3e0d802c59da 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -93,20 +93,13 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
93ENDPROC(enable_omap3630_toggle_l2_on_restore) 93ENDPROC(enable_omap3630_toggle_l2_on_restore)
94 94
95/* 95/*
96 * Function to call rom code to save secure ram context. This gets 96 * Function to call rom code to save secure ram context.
97 * relocated to SRAM, so it can be all in .data section. Otherwise 97 *
98 * we need to initialize api_params separately. 98 * r0 = physical address of the parameters
99 */ 99 */
100 .data
101 .align 3
102ENTRY(save_secure_ram_context) 100ENTRY(save_secure_ram_context)
103 stmfd sp!, {r4 - r11, lr} @ save registers on stack 101 stmfd sp!, {r4 - r11, lr} @ save registers on stack
104 adr r3, api_params @ r3 points to parameters 102 mov r3, r0 @ physical address of parameters
105 str r0, [r3,#0x4] @ r0 has sdram address
106 ldr r12, high_mask
107 and r3, r3, r12
108 ldr r12, sram_phy_addr_mask
109 orr r3, r3, r12
110 mov r0, #25 @ set service ID for PPA 103 mov r0, #25 @ set service ID for PPA
111 mov r12, r0 @ copy secure service ID in r12 104 mov r12, r0 @ copy secure service ID in r12
112 mov r1, #0 @ set task id for ROM code in r1 105 mov r1, #0 @ set task id for ROM code in r1
@@ -120,18 +113,7 @@ ENTRY(save_secure_ram_context)
120 nop 113 nop
121 nop 114 nop
122 ldmfd sp!, {r4 - r11, pc} 115 ldmfd sp!, {r4 - r11, pc}
123 .align
124sram_phy_addr_mask:
125 .word SRAM_BASE_P
126high_mask:
127 .word 0xffff
128api_params:
129 .word 0x4, 0x0, 0x0, 0x1, 0x1
130ENDPROC(save_secure_ram_context) 116ENDPROC(save_secure_ram_context)
131ENTRY(save_secure_ram_context_sz)
132 .word . - save_secure_ram_context
133
134 .text
135 117
136/* 118/*
137 * ====================== 119 * ======================
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index a67af98e532b..a15116080047 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -156,12 +156,6 @@ static struct clock_event_device clockevent_gpt = {
156 .tick_resume = omap2_gp_timer_shutdown, 156 .tick_resume = omap2_gp_timer_shutdown,
157}; 157};
158 158
159static struct property device_disabled = {
160 .name = "status",
161 .length = sizeof("disabled"),
162 .value = "disabled",
163};
164
165static const struct of_device_id omap_timer_match[] __initconst = { 159static const struct of_device_id omap_timer_match[] __initconst = {
166 { .compatible = "ti,omap2420-timer", }, 160 { .compatible = "ti,omap2420-timer", },
167 { .compatible = "ti,omap3430-timer", }, 161 { .compatible = "ti,omap3430-timer", },
@@ -203,8 +197,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id *
203 of_get_property(np, "ti,timer-secure", NULL))) 197 of_get_property(np, "ti,timer-secure", NULL)))
204 continue; 198 continue;
205 199
206 if (!of_device_is_compatible(np, "ti,omap-counter32k")) 200 if (!of_device_is_compatible(np, "ti,omap-counter32k")) {
207 of_add_property(np, &device_disabled); 201 struct property *prop;
202
203 prop = kzalloc(sizeof(*prop), GFP_KERNEL);
204 if (!prop)
205 return NULL;
206 prop->name = "status";
207 prop->value = "disabled";
208 prop->length = strlen(prop->value);
209 of_add_property(np, prop);
210 }
208 return np; 211 return np;
209 } 212 }
210 213
diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c
index e0a53208880a..b59a7a2df4e3 100644
--- a/arch/arm/mach-pxa/tosa-bt.c
+++ b/arch/arm/mach-pxa/tosa-bt.c
@@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = {
132 }, 132 },
133}; 133};
134module_platform_driver(tosa_bt_driver); 134module_platform_driver(tosa_bt_driver);
135
136MODULE_LICENSE("GPL");
137MODULE_AUTHOR("Dmitry Baryshkov");
138MODULE_DESCRIPTION("Bluetooth built-in chip control");
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 0fa4c5f8b1be..2d43357d4a0a 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -12,8 +12,6 @@ menuconfig ARCH_TEGRA
12 select ARCH_HAS_RESET_CONTROLLER 12 select ARCH_HAS_RESET_CONTROLLER
13 select RESET_CONTROLLER 13 select RESET_CONTROLLER
14 select SOC_BUS 14 select SOC_BUS
15 select USB_ULPI if USB_PHY
16 select USB_ULPI_VIEWPORT if USB_PHY
17 help 15 help
18 This enables support for NVIDIA Tegra based systems. 16 This enables support for NVIDIA Tegra based systems.
19 17
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 4c48b52c4a7c..6c786b25ed74 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -861,11 +861,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
861 timer->irq = irq->start; 861 timer->irq = irq->start;
862 timer->pdev = pdev; 862 timer->pdev = pdev;
863 863
864 /* Skip pm_runtime_enable for OMAP1 */ 864 pm_runtime_enable(dev);
865 if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) { 865 pm_runtime_irq_safe(dev);
866 pm_runtime_enable(dev);
867 pm_runtime_irq_safe(dev);
868 }
869 866
870 if (!timer->reserved) { 867 if (!timer->reserved) {
871 ret = pm_runtime_get_sync(dev); 868 ret = pm_runtime_get_sync(dev);
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index bcdecc25461b..b2aa9b32bff2 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
165{ 165{
166 unsigned long flags; 166 unsigned long flags;
167 struct kprobe *p = &op->kp; 167 struct kprobe *p = &op->kp;
168 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 168 struct kprobe_ctlblk *kcb;
169 169
170 /* Save skipped registers */ 170 /* Save skipped registers */
171 regs->ARM_pc = (unsigned long)op->kp.addr; 171 regs->ARM_pc = (unsigned long)op->kp.addr;
172 regs->ARM_ORIG_r0 = ~0UL; 172 regs->ARM_ORIG_r0 = ~0UL;
173 173
174 local_irq_save(flags); 174 local_irq_save(flags);
175 kcb = get_kprobe_ctlblk();
175 176
176 if (kprobe_running()) { 177 if (kprobe_running()) {
177 kprobes_inc_nmissed_count(&op->kp); 178 kprobes_inc_nmissed_count(&op->kp);
@@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
191 192
192 local_irq_restore(flags); 193 local_irq_restore(flags);
193} 194}
195NOKPROBE_SYMBOL(optimized_callback)
194 196
195int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig) 197int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
196{ 198{
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 703b7f921806..2c4babb4c9f1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -57,6 +57,7 @@ config ARM64
57 select HAVE_ARCH_SECCOMP_FILTER 57 select HAVE_ARCH_SECCOMP_FILTER
58 select HAVE_ARCH_TRACEHOOK 58 select HAVE_ARCH_TRACEHOOK
59 select HAVE_BPF_JIT 59 select HAVE_BPF_JIT
60 select HAVE_EBPF_JIT
60 select HAVE_C_RECORDMCOUNT 61 select HAVE_C_RECORDMCOUNT
61 select HAVE_CC_STACKPROTECTOR 62 select HAVE_CC_STACKPROTECTOR
62 select HAVE_CMPXCHG_DOUBLE 63 select HAVE_CMPXCHG_DOUBLE
@@ -395,6 +396,20 @@ config ARM64_ERRATUM_843419
395 396
396 If unsure, say Y. 397 If unsure, say Y.
397 398
399config ARM64_ERRATUM_1024718
400 bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
401 default y
402 help
403 This option adds work around for Arm Cortex-A55 Erratum 1024718.
404
405 Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
406 update of the hardware dirty bit when the DBM/AP bits are updated
407 without a break-before-make. The work around is to disable the usage
408 of hardware DBM locally on the affected cores. CPUs not affected by
409 erratum will continue to use the feature.
410
411 If unsure, say Y.
412
398config CAVIUM_ERRATUM_22375 413config CAVIUM_ERRATUM_22375
399 bool "Cavium erratum 22375, 24313" 414 bool "Cavium erratum 22375, 24313"
400 default y 415 default y
@@ -910,7 +925,7 @@ source "fs/Kconfig.binfmt"
910config COMPAT 925config COMPAT
911 bool "Kernel support for 32-bit EL0" 926 bool "Kernel support for 32-bit EL0"
912 depends on ARM64_4K_PAGES || EXPERT 927 depends on ARM64_4K_PAGES || EXPERT
913 select COMPAT_BINFMT_ELF 928 select COMPAT_BINFMT_ELF if BINFMT_ELF
914 select HAVE_UID16 929 select HAVE_UID16
915 select OLD_SIGSUSPEND3 930 select OLD_SIGSUSPEND3
916 select COMPAT_OLD_SIGACTION 931 select COMPAT_OLD_SIGACTION
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 4043c35962cc..5edb50772c11 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -90,8 +90,6 @@ config ARCH_TEGRA_132_SOC
90 bool "NVIDIA Tegra132 SoC" 90 bool "NVIDIA Tegra132 SoC"
91 depends on ARCH_TEGRA 91 depends on ARCH_TEGRA
92 select PINCTRL_TEGRA124 92 select PINCTRL_TEGRA124
93 select USB_ULPI if USB_PHY
94 select USB_ULPI_VIEWPORT if USB_PHY
95 help 93 help
96 Enable support for NVIDIA Tegra132 SoC, based on the Denver 94 Enable support for NVIDIA Tegra132 SoC, based on the Denver
97 ARMv8 CPU. The Tegra132 SoC is similar to the Tegra124 SoC, 95 ARMv8 CPU. The Tegra132 SoC is similar to the Tegra124 SoC,
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 4dd5f93d0303..7f42b646d528 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -54,6 +54,7 @@
54 reg = <0x000>; 54 reg = <0x000>;
55 enable-method = "psci"; 55 enable-method = "psci";
56 cpu-idle-states = <&CPU_SLEEP_0>; 56 cpu-idle-states = <&CPU_SLEEP_0>;
57 #cooling-cells = <2>;
57 }; 58 };
58 59
59 cpu1: cpu@1 { 60 cpu1: cpu@1 {
@@ -70,6 +71,7 @@
70 reg = <0x100>; 71 reg = <0x100>;
71 enable-method = "psci"; 72 enable-method = "psci";
72 cpu-idle-states = <&CPU_SLEEP_0>; 73 cpu-idle-states = <&CPU_SLEEP_0>;
74 #cooling-cells = <2>;
73 }; 75 };
74 76
75 cpu3: cpu@101 { 77 cpu3: cpu@101 {
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 290e13428f4a..4fd2fe7a5525 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -26,6 +26,7 @@
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27#include <asm/page.h> 27#include <asm/page.h>
28#include <asm/pgtable-hwdef.h> 28#include <asm/pgtable-hwdef.h>
29#include <asm/cputype.h>
29#include <asm/ptrace.h> 30#include <asm/ptrace.h>
30#include <asm/thread_info.h> 31#include <asm/thread_info.h>
31 32
@@ -352,4 +353,43 @@ lr .req x30 // link register
352 movk \reg, :abs_g0_nc:\val 353 movk \reg, :abs_g0_nc:\val
353 .endm 354 .endm
354 355
356/*
357 * Check the MIDR_EL1 of the current CPU for a given model and a range of
358 * variant/revision. See asm/cputype.h for the macros used below.
359 *
360 * model: MIDR_CPU_PART of CPU
361 * rv_min: Minimum of MIDR_CPU_VAR_REV()
362 * rv_max: Maximum of MIDR_CPU_VAR_REV()
363 * res: Result register.
364 * tmp1, tmp2, tmp3: Temporary registers
365 *
366 * Corrupts: res, tmp1, tmp2, tmp3
367 * Returns: 0, if the CPU id doesn't match. Non-zero otherwise
368 */
369 .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
370 mrs \res, midr_el1
371 mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
372 mov_q \tmp2, MIDR_CPU_PART_MASK
373 and \tmp3, \res, \tmp2 // Extract model
374 and \tmp1, \res, \tmp1 // rev & variant
375 mov_q \tmp2, \model
376 cmp \tmp3, \tmp2
377 cset \res, eq
378 cbz \res, .Ldone\@ // Model matches ?
379
380 .if (\rv_min != 0) // Skip min check if rv_min == 0
381 mov_q \tmp3, \rv_min
382 cmp \tmp1, \tmp3
383 cset \res, ge
384 .endif // \rv_min != 0
385 /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
386 .if ((\rv_min != \rv_max) || \rv_min == 0)
387 mov_q \tmp2, \rv_max
388 cmp \tmp1, \tmp2
389 cset \tmp2, le
390 and \res, \res, \tmp2
391 .endif
392.Ldone\@:
393 .endm
394
355#endif /* __ASM_ASSEMBLER_H */ 395#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 39c1d340fec5..a000e47d5016 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -114,7 +114,7 @@ static inline void atomic_and(int i, atomic_t *v)
114 /* LSE atomics */ 114 /* LSE atomics */
115 " mvn %w[i], %w[i]\n" 115 " mvn %w[i], %w[i]\n"
116 " stclr %w[i], %[v]") 116 " stclr %w[i], %[v]")
117 : [i] "+r" (w0), [v] "+Q" (v->counter) 117 : [i] "+&r" (w0), [v] "+Q" (v->counter)
118 : "r" (x1) 118 : "r" (x1)
119 : __LL_SC_CLOBBERS); 119 : __LL_SC_CLOBBERS);
120} 120}
@@ -131,7 +131,7 @@ static inline void atomic_sub(int i, atomic_t *v)
131 /* LSE atomics */ 131 /* LSE atomics */
132 " neg %w[i], %w[i]\n" 132 " neg %w[i], %w[i]\n"
133 " stadd %w[i], %[v]") 133 " stadd %w[i], %[v]")
134 : [i] "+r" (w0), [v] "+Q" (v->counter) 134 : [i] "+&r" (w0), [v] "+Q" (v->counter)
135 : "r" (x1) 135 : "r" (x1)
136 : __LL_SC_CLOBBERS); 136 : __LL_SC_CLOBBERS);
137} 137}
@@ -151,7 +151,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
151 " neg %w[i], %w[i]\n" \ 151 " neg %w[i], %w[i]\n" \
152 " ldadd" #mb " %w[i], w30, %[v]\n" \ 152 " ldadd" #mb " %w[i], w30, %[v]\n" \
153 " add %w[i], %w[i], w30") \ 153 " add %w[i], %w[i], w30") \
154 : [i] "+r" (w0), [v] "+Q" (v->counter) \ 154 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
155 : "r" (x1) \ 155 : "r" (x1) \
156 : __LL_SC_CLOBBERS , ##cl); \ 156 : __LL_SC_CLOBBERS , ##cl); \
157 \ 157 \
@@ -255,7 +255,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
255 /* LSE atomics */ 255 /* LSE atomics */
256 " mvn %[i], %[i]\n" 256 " mvn %[i], %[i]\n"
257 " stclr %[i], %[v]") 257 " stclr %[i], %[v]")
258 : [i] "+r" (x0), [v] "+Q" (v->counter) 258 : [i] "+&r" (x0), [v] "+Q" (v->counter)
259 : "r" (x1) 259 : "r" (x1)
260 : __LL_SC_CLOBBERS); 260 : __LL_SC_CLOBBERS);
261} 261}
@@ -272,7 +272,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
272 /* LSE atomics */ 272 /* LSE atomics */
273 " neg %[i], %[i]\n" 273 " neg %[i], %[i]\n"
274 " stadd %[i], %[v]") 274 " stadd %[i], %[v]")
275 : [i] "+r" (x0), [v] "+Q" (v->counter) 275 : [i] "+&r" (x0), [v] "+Q" (v->counter)
276 : "r" (x1) 276 : "r" (x1)
277 : __LL_SC_CLOBBERS); 277 : __LL_SC_CLOBBERS);
278} 278}
@@ -292,7 +292,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
292 " neg %[i], %[i]\n" \ 292 " neg %[i], %[i]\n" \
293 " ldadd" #mb " %[i], x30, %[v]\n" \ 293 " ldadd" #mb " %[i], x30, %[v]\n" \
294 " add %[i], %[i], x30") \ 294 " add %[i], %[i], x30") \
295 : [i] "+r" (x0), [v] "+Q" (v->counter) \ 295 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
296 : "r" (x1) \ 296 : "r" (x1) \
297 : __LL_SC_CLOBBERS, ##cl); \ 297 : __LL_SC_CLOBBERS, ##cl); \
298 \ 298 \
@@ -412,7 +412,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
412 " eor %[old1], %[old1], %[oldval1]\n" \ 412 " eor %[old1], %[old1], %[oldval1]\n" \
413 " eor %[old2], %[old2], %[oldval2]\n" \ 413 " eor %[old2], %[old2], %[oldval2]\n" \
414 " orr %[old1], %[old1], %[old2]") \ 414 " orr %[old1], %[old1], %[old2]") \
415 : [old1] "+r" (x0), [old2] "+r" (x1), \ 415 : [old1] "+&r" (x0), [old2] "+&r" (x1), \
416 [v] "+Q" (*(unsigned long *)ptr) \ 416 [v] "+Q" (*(unsigned long *)ptr) \
417 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ 417 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
418 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ 418 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
index 561190d15881..0bfe1df12b19 100644
--- a/arch/arm64/include/asm/bug.h
+++ b/arch/arm64/include/asm/bug.h
@@ -20,9 +20,6 @@
20 20
21#include <asm/brk-imm.h> 21#include <asm/brk-imm.h>
22 22
23#ifdef CONFIG_GENERIC_BUG
24#define HAVE_ARCH_BUG
25
26#ifdef CONFIG_DEBUG_BUGVERBOSE 23#ifdef CONFIG_DEBUG_BUGVERBOSE
27#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line) 24#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
28#define __BUGVERBOSE_LOCATION(file, line) \ 25#define __BUGVERBOSE_LOCATION(file, line) \
@@ -36,28 +33,36 @@
36#define _BUGVERBOSE_LOCATION(file, line) 33#define _BUGVERBOSE_LOCATION(file, line)
37#endif 34#endif
38 35
39#define _BUG_FLAGS(flags) __BUG_FLAGS(flags) 36#ifdef CONFIG_GENERIC_BUG
40 37
41#define __BUG_FLAGS(flags) asm volatile ( \ 38#define __BUG_ENTRY(flags) \
42 ".pushsection __bug_table,\"a\"\n\t" \ 39 ".pushsection __bug_table,\"a\"\n\t" \
43 ".align 2\n\t" \ 40 ".align 2\n\t" \
44 "0: .long 1f - 0b\n\t" \ 41 "0: .long 1f - 0b\n\t" \
45_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ 42_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
46 ".short " #flags "\n\t" \ 43 ".short " #flags "\n\t" \
47 ".popsection\n" \ 44 ".popsection\n" \
48 \ 45 "1: "
49 "1: brk %[imm]" \ 46#else
50 :: [imm] "i" (BUG_BRK_IMM) \ 47#define __BUG_ENTRY(flags) ""
51) 48#endif
49
50#define __BUG_FLAGS(flags) \
51 asm volatile ( \
52 __BUG_ENTRY(flags) \
53 "brk %[imm]" :: [imm] "i" (BUG_BRK_IMM) \
54 );
52 55
53#define BUG() do { \ 56
54 _BUG_FLAGS(0); \ 57#define BUG() do { \
55 unreachable(); \ 58 __BUG_FLAGS(0); \
59 unreachable(); \
56} while (0) 60} while (0)
57 61
58#define __WARN_TAINT(taint) _BUG_FLAGS(BUGFLAG_TAINT(taint)) 62#define __WARN_TAINT(taint) \
63 __BUG_FLAGS(BUGFLAG_TAINT(taint))
59 64
60#endif /* ! CONFIG_GENERIC_BUG */ 65#define HAVE_ARCH_BUG
61 66
62#include <asm-generic/bug.h> 67#include <asm-generic/bug.h>
63 68
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index b3a83da152a7..8042c98ec040 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -51,7 +51,15 @@
51#define MIDR_IMPLEMENTOR(midr) \ 51#define MIDR_IMPLEMENTOR(midr) \
52 (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) 52 (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
53 53
54#define MIDR_CPU_MODEL(imp, partnum) \ 54#define MIDR_CPU_VAR_REV(var, rev) \
55 (((var) << MIDR_VARIANT_SHIFT) | (rev))
56
57#define MIDR_CPU_PART_MASK \
58 (MIDR_IMPLEMENTOR_MASK | \
59 MIDR_ARCHITECTURE_MASK | \
60 MIDR_PARTNUM_MASK)
61
62#define MIDR_CPU_PART(imp, partnum) \
55 (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ 63 (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
56 (0xf << MIDR_ARCHITECTURE_SHIFT) | \ 64 (0xf << MIDR_ARCHITECTURE_SHIFT) | \
57 ((partnum) << MIDR_PARTNUM_SHIFT)) 65 ((partnum) << MIDR_PARTNUM_SHIFT))
@@ -75,14 +83,16 @@
75#define ARM_CPU_PART_FOUNDATION 0xD00 83#define ARM_CPU_PART_FOUNDATION 0xD00
76#define ARM_CPU_PART_CORTEX_A57 0xD07 84#define ARM_CPU_PART_CORTEX_A57 0xD07
77#define ARM_CPU_PART_CORTEX_A53 0xD03 85#define ARM_CPU_PART_CORTEX_A53 0xD03
86#define ARM_CPU_PART_CORTEX_A55 0xD05
78 87
79#define APM_CPU_PART_POTENZA 0x000 88#define APM_CPU_PART_POTENZA 0x000
80 89
81#define CAVIUM_CPU_PART_THUNDERX 0x0A1 90#define CAVIUM_CPU_PART_THUNDERX 0x0A1
82 91
83#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) 92#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
84#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) 93#define MIDR_CORTEX_A55 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
85#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) 94#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
95#define MIDR_THUNDERX MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
86 96
87#ifndef __ASSEMBLY__ 97#ifndef __ASSEMBLY__
88 98
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index f2585cdd32c2..764dc2cced4f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -51,20 +51,10 @@
51 : "memory") 51 : "memory")
52 52
53static inline int 53static inline int
54futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 54arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
55{ 55{
56 int op = (encoded_op >> 28) & 7;
57 int cmp = (encoded_op >> 24) & 15;
58 int oparg = (encoded_op << 8) >> 20;
59 int cmparg = (encoded_op << 20) >> 20;
60 int oldval = 0, ret, tmp; 56 int oldval = 0, ret, tmp;
61 57
62 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
63 oparg = 1 << oparg;
64
65 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
66 return -EFAULT;
67
68 pagefault_disable(); 58 pagefault_disable();
69 59
70 switch (op) { 60 switch (op) {
@@ -94,17 +84,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
94 84
95 pagefault_enable(); 85 pagefault_enable();
96 86
97 if (!ret) { 87 if (!ret)
98 switch (cmp) { 88 *oval = oldval;
99 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 89
100 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
101 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
102 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
103 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
104 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
105 default: ret = -ENOSYS;
106 }
107 }
108 return ret; 90 return ret;
109} 91}
110 92
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index d776037d199f..eac5a3d38b90 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -48,8 +48,10 @@
48 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. 48 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
49 */ 49 */
50#define VA_BITS (CONFIG_ARM64_VA_BITS) 50#define VA_BITS (CONFIG_ARM64_VA_BITS)
51#define VA_START (UL(0xffffffffffffffff) << VA_BITS) 51#define VA_START (UL(0xffffffffffffffff) - \
52#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) 52 (UL(1) << VA_BITS) + 1)
53#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
54 (UL(1) << (VA_BITS - 1)) + 1)
53#define KIMAGE_VADDR (MODULES_END) 55#define KIMAGE_VADDR (MODULES_END)
54#define MODULES_END (MODULES_VADDR + MODULES_VSIZE) 56#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
55#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE) 57#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 73f5d548bba1..6d95f5d62d53 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -123,8 +123,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
123 " cbnz %w1, 1f\n" 123 " cbnz %w1, 1f\n"
124 " add %w1, %w0, %3\n" 124 " add %w1, %w0, %3\n"
125 " casa %w0, %w1, %2\n" 125 " casa %w0, %w1, %2\n"
126 " and %w1, %w1, #0xffff\n" 126 " sub %w1, %w1, %3\n"
127 " eor %w1, %w1, %w0, lsr #16\n" 127 " eor %w1, %w1, %w0\n"
128 "1:") 128 "1:")
129 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) 129 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
130 : "I" (1 << TICKET_SHIFT) 130 : "I" (1 << TICKET_SHIFT)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index b62ec1e8a843..22e413293fa3 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -49,7 +49,7 @@ static const char *handler[]= {
49 "Error" 49 "Error"
50}; 50};
51 51
52int show_unhandled_signals = 1; 52int show_unhandled_signals = 0;
53 53
54/* 54/*
55 * Dump out the contents of some memory nicely... 55 * Dump out the contents of some memory nicely...
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index d23342804230..5feb47778aa5 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -838,3 +838,15 @@ int pmd_clear_huge(pmd_t *pmd)
838 pmd_clear(pmd); 838 pmd_clear(pmd);
839 return 1; 839 return 1;
840} 840}
841
842#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
843int pud_free_pmd_page(pud_t *pud, unsigned long addr)
844{
845 return pud_none(*pud);
846}
847
848int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
849{
850 return pmd_none(*pmd);
851}
852#endif
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 9d37e967fa19..178af2dd15c4 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -242,6 +242,11 @@ ENTRY(__cpu_setup)
242 cbz x9, 2f 242 cbz x9, 2f
243 cmp x9, #2 243 cmp x9, #2
244 b.lt 1f 244 b.lt 1f
245#ifdef CONFIG_ARM64_ERRATUM_1024718
246 /* Disable hardware DBM on Cortex-A55 r0p0, r0p1 & r1p0 */
247 cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, 0), x1, x2, x3, x4
248 cbnz x1, 1f
249#endif
245 orr x10, x10, #TCR_HD // hardware Dirty flag update 250 orr x10, x10, #TCR_HD // hardware Dirty flag update
2461: orr x10, x10, #TCR_HA // hardware Access flag update 2511: orr x10, x10, #TCR_HA // hardware Access flag update
2472: 2522:
diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h
index 4bea27f50a7a..2702bd802d44 100644
--- a/arch/frv/include/asm/futex.h
+++ b/arch/frv/include/asm/futex.h
@@ -7,7 +7,8 @@
7#include <asm/errno.h> 7#include <asm/errno.h>
8#include <asm/uaccess.h> 8#include <asm/uaccess.h>
9 9
10extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr); 10extern int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
11 u32 __user *uaddr);
11 12
12static inline int 13static inline int
13futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 14futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
diff --git a/arch/frv/include/asm/timex.h b/arch/frv/include/asm/timex.h
index a89bddefdacf..139093fab326 100644
--- a/arch/frv/include/asm/timex.h
+++ b/arch/frv/include/asm/timex.h
@@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
16#define vxtime_lock() do {} while (0) 16#define vxtime_lock() do {} while (0)
17#define vxtime_unlock() do {} while (0) 17#define vxtime_unlock() do {} while (0)
18 18
19/* This attribute is used in include/linux/jiffies.h alongside with
20 * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
21 * for frv does not contain another section specification.
22 */
23#define __jiffy_arch_data __attribute__((__section__(".data")))
24
19#endif 25#endif
20 26
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
index d155ca9e5098..37f7b2bf7f73 100644
--- a/arch/frv/kernel/futex.c
+++ b/arch/frv/kernel/futex.c
@@ -186,20 +186,10 @@ static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_o
186/* 186/*
187 * do the futex operations 187 * do the futex operations
188 */ 188 */
189int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 189int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
190{ 190{
191 int op = (encoded_op >> 28) & 7;
192 int cmp = (encoded_op >> 24) & 15;
193 int oparg = (encoded_op << 8) >> 20;
194 int cmparg = (encoded_op << 20) >> 20;
195 int oldval = 0, ret; 191 int oldval = 0, ret;
196 192
197 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
198 oparg = 1 << oparg;
199
200 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
201 return -EFAULT;
202
203 pagefault_disable(); 193 pagefault_disable();
204 194
205 switch (op) { 195 switch (op) {
@@ -225,18 +215,9 @@ int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
225 215
226 pagefault_enable(); 216 pagefault_enable();
227 217
228 if (!ret) { 218 if (!ret)
229 switch (cmp) { 219 *oval = oldval;
230 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
231 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
232 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
233 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
234 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
235 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
236 default: ret = -ENOSYS; break;
237 }
238 }
239 220
240 return ret; 221 return ret;
241 222
242} /* end futex_atomic_op_inuser() */ 223} /* end arch_futex_atomic_op_inuser() */
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
index 7e597f8434da..c607b77c8215 100644
--- a/arch/hexagon/include/asm/futex.h
+++ b/arch/hexagon/include/asm/futex.h
@@ -31,18 +31,9 @@
31 31
32 32
33static inline int 33static inline int
34futex_atomic_op_inuser(int encoded_op, int __user *uaddr) 34arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
35{ 35{
36 int op = (encoded_op >> 28) & 7;
37 int cmp = (encoded_op >> 24) & 15;
38 int oparg = (encoded_op << 8) >> 20;
39 int cmparg = (encoded_op << 20) >> 20;
40 int oldval = 0, ret; 36 int oldval = 0, ret;
41 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
42 oparg = 1 << oparg;
43
44 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
45 return -EFAULT;
46 37
47 pagefault_disable(); 38 pagefault_disable();
48 39
@@ -72,30 +63,9 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
72 63
73 pagefault_enable(); 64 pagefault_enable();
74 65
75 if (!ret) { 66 if (!ret)
76 switch (cmp) { 67 *oval = oldval;
77 case FUTEX_OP_CMP_EQ: 68
78 ret = (oldval == cmparg);
79 break;
80 case FUTEX_OP_CMP_NE:
81 ret = (oldval != cmparg);
82 break;
83 case FUTEX_OP_CMP_LT:
84 ret = (oldval < cmparg);
85 break;
86 case FUTEX_OP_CMP_GE:
87 ret = (oldval >= cmparg);
88 break;
89 case FUTEX_OP_CMP_LE:
90 ret = (oldval <= cmparg);
91 break;
92 case FUTEX_OP_CMP_GT:
93 ret = (oldval > cmparg);
94 break;
95 default:
96 ret = -ENOSYS;
97 }
98 }
99 return ret; 69 return ret;
100} 70}
101 71
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index 76acbcd5c060..6d67dc1eaf2b 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -45,18 +45,9 @@ do { \
45} while (0) 45} while (0)
46 46
47static inline int 47static inline int
48futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 48arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
49{ 49{
50 int op = (encoded_op >> 28) & 7;
51 int cmp = (encoded_op >> 24) & 15;
52 int oparg = (encoded_op << 8) >> 20;
53 int cmparg = (encoded_op << 20) >> 20;
54 int oldval = 0, ret; 50 int oldval = 0, ret;
55 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
56 oparg = 1 << oparg;
57
58 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
59 return -EFAULT;
60 51
61 pagefault_disable(); 52 pagefault_disable();
62 53
@@ -84,17 +75,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
84 75
85 pagefault_enable(); 76 pagefault_enable();
86 77
87 if (!ret) { 78 if (!ret)
88 switch (cmp) { 79 *oval = oldval;
89 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 80
90 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
91 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
92 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
93 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
94 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
95 default: ret = -ENOSYS;
96 }
97 }
98 return ret; 81 return ret;
99} 82}
100 83
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index b15933c31b2f..36b2c94a8eb5 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -153,7 +153,7 @@ slot (const struct insn *insn)
153static int 153static int
154apply_imm64 (struct module *mod, struct insn *insn, uint64_t val) 154apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
155{ 155{
156 if (slot(insn) != 2) { 156 if (slot(insn) != 1 && slot(insn) != 2) {
157 printk(KERN_ERR "%s: invalid slot number %d for IMM64\n", 157 printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
158 mod->name, slot(insn)); 158 mod->name, slot(insn));
159 return 0; 159 return 0;
@@ -165,7 +165,7 @@ apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
165static int 165static int
166apply_imm60 (struct module *mod, struct insn *insn, uint64_t val) 166apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
167{ 167{
168 if (slot(insn) != 2) { 168 if (slot(insn) != 1 && slot(insn) != 2) {
169 printk(KERN_ERR "%s: invalid slot number %d for IMM60\n", 169 printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
170 mod->name, slot(insn)); 170 mod->name, slot(insn));
171 return 0; 171 return 0;
diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
index 71ea4c02795d..8a2dc0af4cad 100644
--- a/arch/m68k/coldfire/device.c
+++ b/arch/m68k/coldfire/device.c
@@ -135,7 +135,11 @@ static struct platform_device mcf_fec0 = {
135 .id = 0, 135 .id = 0,
136 .num_resources = ARRAY_SIZE(mcf_fec0_resources), 136 .num_resources = ARRAY_SIZE(mcf_fec0_resources),
137 .resource = mcf_fec0_resources, 137 .resource = mcf_fec0_resources,
138 .dev.platform_data = FEC_PDATA, 138 .dev = {
139 .dma_mask = &mcf_fec0.dev.coherent_dma_mask,
140 .coherent_dma_mask = DMA_BIT_MASK(32),
141 .platform_data = FEC_PDATA,
142 }
139}; 143};
140 144
141#ifdef MCFFEC_BASE1 145#ifdef MCFFEC_BASE1
@@ -167,7 +171,11 @@ static struct platform_device mcf_fec1 = {
167 .id = 1, 171 .id = 1,
168 .num_resources = ARRAY_SIZE(mcf_fec1_resources), 172 .num_resources = ARRAY_SIZE(mcf_fec1_resources),
169 .resource = mcf_fec1_resources, 173 .resource = mcf_fec1_resources,
170 .dev.platform_data = FEC_PDATA, 174 .dev = {
175 .dma_mask = &mcf_fec1.dev.coherent_dma_mask,
176 .coherent_dma_mask = DMA_BIT_MASK(32),
177 .platform_data = FEC_PDATA,
178 }
171}; 179};
172#endif /* MCFFEC_BASE1 */ 180#endif /* MCFFEC_BASE1 */
173#endif /* CONFIG_FEC */ 181#endif /* CONFIG_FEC */
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 6e4955bc542b..fcd52cefee29 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -88,7 +88,8 @@ static inline void free_io_area(void *addr)
88 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) { 88 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
89 if (tmp->addr == addr) { 89 if (tmp->addr == addr) {
90 *p = tmp->next; 90 *p = tmp->next;
91 __iounmap(tmp->addr, tmp->size); 91 /* remove gap added in get_io_area() */
92 __iounmap(tmp->addr, tmp->size - IO_SIZE);
92 kfree(tmp); 93 kfree(tmp);
93 return; 94 return;
94 } 95 }
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 91d2068da1b9..0f3fe6a151dc 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -21,17 +21,19 @@ $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
21quiet_cmd_cp = CP $< $@$2 21quiet_cmd_cp = CP $< $@$2
22 cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) 22 cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
23 23
24quiet_cmd_strip = STRIP $@ 24quiet_cmd_strip = STRIP $< $@$2
25 cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \ 25 cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \
26 -K _fdt_start vmlinux -o $@ 26 -K _fdt_start $< -o $@$2
27 27
28UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR) 28UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR)
29UIMAGE_IN = $@
30UIMAGE_OUT = $@.ub
29 31
30$(obj)/simpleImage.%: vmlinux FORCE 32$(obj)/simpleImage.%: vmlinux FORCE
31 $(call if_changed,cp,.unstrip) 33 $(call if_changed,cp,.unstrip)
32 $(call if_changed,objcopy) 34 $(call if_changed,objcopy)
33 $(call if_changed,uimage) 35 $(call if_changed,uimage)
34 $(call if_changed,strip) 36 $(call if_changed,strip,.strip)
35 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 37 @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')'
36 38
37clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb 39clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index 01848f056f43..a9dad9e5e132 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -29,18 +29,9 @@
29}) 29})
30 30
31static inline int 31static inline int
32futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 32arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
33{ 33{
34 int op = (encoded_op >> 28) & 7;
35 int cmp = (encoded_op >> 24) & 15;
36 int oparg = (encoded_op << 8) >> 20;
37 int cmparg = (encoded_op << 20) >> 20;
38 int oldval = 0, ret; 34 int oldval = 0, ret;
39 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
40 oparg = 1 << oparg;
41
42 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
43 return -EFAULT;
44 35
45 pagefault_disable(); 36 pagefault_disable();
46 37
@@ -66,30 +57,9 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
66 57
67 pagefault_enable(); 58 pagefault_enable();
68 59
69 if (!ret) { 60 if (!ret)
70 switch (cmp) { 61 *oval = oldval;
71 case FUTEX_OP_CMP_EQ: 62
72 ret = (oldval == cmparg);
73 break;
74 case FUTEX_OP_CMP_NE:
75 ret = (oldval != cmparg);
76 break;
77 case FUTEX_OP_CMP_LT:
78 ret = (oldval < cmparg);
79 break;
80 case FUTEX_OP_CMP_GE:
81 ret = (oldval >= cmparg);
82 break;
83 case FUTEX_OP_CMP_LE:
84 ret = (oldval <= cmparg);
85 break;
86 case FUTEX_OP_CMP_GT:
87 ret = (oldval > cmparg);
88 break;
89 default:
90 ret = -ENOSYS;
91 }
92 }
93 return ret; 63 return ret;
94} 64}
95 65
diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
index 9ab48ff80c1c..6d11ae581ea7 100644
--- a/arch/mips/ath25/board.c
+++ b/arch/mips/ath25/board.c
@@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
135 } 135 }
136 136
137 board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL); 137 board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
138 if (!board_data)
139 goto error;
138 ath25_board.config = (struct ath25_boarddata *)board_data; 140 ath25_board.config = (struct ath25_boarddata *)board_data;
139 memcpy_fromio(board_data, bcfg, 0x100); 141 memcpy_fromio(board_data, bcfg, 0x100);
140 if (broken_boarddata) { 142 if (broken_boarddata) {
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index 8ae4067a5eda..40ecb6e700cd 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
58 58
59void ath79_ddr_wb_flush(u32 reg) 59void ath79_ddr_wb_flush(u32 reg)
60{ 60{
61 void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg; 61 void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
62 62
63 /* Flush the DDR write buffer. */ 63 /* Flush the DDR write buffer. */
64 __raw_writel(0x1, flush_reg); 64 __raw_writel(0x1, flush_reg);
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 6d38948f0f1e..4ca33175ec05 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -249,6 +249,12 @@ static int __init bcm47xx_cpu_fixes(void)
249 */ 249 */
250 if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) 250 if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
251 cpu_wait = NULL; 251 cpu_wait = NULL;
252
253 /*
254 * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
255 * Enable ExternalSync for sync instruction to take effect
256 */
257 set_c0_config7(MIPS_CONF7_ES);
252 break; 258 break;
253#endif 259#endif
254 } 260 }
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 4f9eb0576884..63d35076722d 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -2240,17 +2240,19 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
2240 2240
2241 parent_irq = irq_of_parse_and_map(ciu_node, 0); 2241 parent_irq = irq_of_parse_and_map(ciu_node, 0);
2242 if (!parent_irq) { 2242 if (!parent_irq) {
2243 pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", 2243 pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
2244 ciu_node->name); 2244 ciu_node->name);
2245 return -EINVAL; 2245 return -EINVAL;
2246 } 2246 }
2247 2247
2248 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); 2248 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
2249 if (!host_data)
2250 return -ENOMEM;
2249 raw_spin_lock_init(&host_data->lock); 2251 raw_spin_lock_init(&host_data->lock);
2250 2252
2251 addr = of_get_address(ciu_node, 0, NULL, NULL); 2253 addr = of_get_address(ciu_node, 0, NULL, NULL);
2252 if (!addr) { 2254 if (!addr) {
2253 pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); 2255 pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
2254 return -EINVAL; 2256 return -EINVAL;
2255 } 2257 }
2256 host_data->raw_reg = (u64)phys_to_virt( 2258 host_data->raw_reg = (u64)phys_to_virt(
@@ -2258,7 +2260,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
2258 2260
2259 addr = of_get_address(ciu_node, 1, NULL, NULL); 2261 addr = of_get_address(ciu_node, 1, NULL, NULL);
2260 if (!addr) { 2262 if (!addr) {
2261 pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); 2263 pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
2262 return -EINVAL; 2264 return -EINVAL;
2263 } 2265 }
2264 host_data->en_reg = (u64)phys_to_virt( 2266 host_data->en_reg = (u64)phys_to_virt(
@@ -2266,7 +2268,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
2266 2268
2267 r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); 2269 r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
2268 if (r) { 2270 if (r) {
2269 pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", 2271 pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
2270 ciu_node->name); 2272 ciu_node->name);
2271 return r; 2273 return r;
2272 } 2274 }
@@ -2276,7 +2278,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
2276 &octeon_irq_domain_cib_ops, 2278 &octeon_irq_domain_cib_ops,
2277 host_data); 2279 host_data);
2278 if (!cib_domain) { 2280 if (!cib_domain) {
2279 pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); 2281 pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
2280 return -ENOMEM; 2282 return -ENOMEM;
2281 } 2283 }
2282 2284
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 1de190bdfb9c..a9e61ea54ca9 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -83,18 +83,9 @@
83} 83}
84 84
85static inline int 85static inline int
86futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 86arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
87{ 87{
88 int op = (encoded_op >> 28) & 7;
89 int cmp = (encoded_op >> 24) & 15;
90 int oparg = (encoded_op << 8) >> 20;
91 int cmparg = (encoded_op << 20) >> 20;
92 int oldval = 0, ret; 88 int oldval = 0, ret;
93 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
94 oparg = 1 << oparg;
95
96 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
97 return -EFAULT;
98 89
99 pagefault_disable(); 90 pagefault_disable();
100 91
@@ -125,17 +116,9 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
125 116
126 pagefault_enable(); 117 pagefault_enable();
127 118
128 if (!ret) { 119 if (!ret)
129 switch (cmp) { 120 *oval = oldval;
130 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 121
131 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
132 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
133 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
134 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
135 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
136 default: ret = -ENOSYS;
137 }
138 }
139 return ret; 122 return ret;
140} 123}
141 124
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index d10fd80dbb7e..75fa296836fc 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -411,6 +411,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
411 __val = *__addr; \ 411 __val = *__addr; \
412 slow; \ 412 slow; \
413 \ 413 \
414 /* prevent prefetching of coherent DMA data prematurely */ \
415 rmb(); \
414 return pfx##ioswab##bwlq(__addr, __val); \ 416 return pfx##ioswab##bwlq(__addr, __val); \
415} 417}
416 418
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index daba1f9a4f79..174aedce3167 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -40,7 +40,8 @@ typedef union mips_instruction kprobe_opcode_t;
40 40
41#define flush_insn_slot(p) \ 41#define flush_insn_slot(p) \
42do { \ 42do { \
43 flush_icache_range((unsigned long)p->addr, \ 43 if (p->addr) \
44 flush_icache_range((unsigned long)p->addr, \
44 (unsigned long)p->addr + \ 45 (unsigned long)p->addr + \
45 (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ 46 (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
46} while (0) 47} while (0)
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index aa3800c82332..d99ca862dae3 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -167,7 +167,7 @@
167#define AR71XX_AHB_DIV_MASK 0x7 167#define AR71XX_AHB_DIV_MASK 0x7
168 168
169#define AR724X_PLL_REG_CPU_CONFIG 0x00 169#define AR724X_PLL_REG_CPU_CONFIG 0x00
170#define AR724X_PLL_REG_PCIE_CONFIG 0x18 170#define AR724X_PLL_REG_PCIE_CONFIG 0x10
171 171
172#define AR724X_PLL_FB_SHIFT 0 172#define AR724X_PLL_FB_SHIFT 0
173#define AR724X_PLL_FB_MASK 0x3ff 173#define AR724X_PLL_FB_MASK 0x3ff
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index e43aca183c99..15c183ce9d4f 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -605,6 +605,8 @@
605#define MIPS_CONF7_WII (_ULCAST_(1) << 31) 605#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
606 606
607#define MIPS_CONF7_RPS (_ULCAST_(1) << 2) 607#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
608/* ExternalSync */
609#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
608 610
609#define MIPS_CONF7_IAR (_ULCAST_(1) << 10) 611#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
610#define MIPS_CONF7_AR (_ULCAST_(1) << 16) 612#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
@@ -2012,6 +2014,7 @@ __BUILD_SET_C0(status)
2012__BUILD_SET_C0(cause) 2014__BUILD_SET_C0(cause)
2013__BUILD_SET_C0(config) 2015__BUILD_SET_C0(config)
2014__BUILD_SET_C0(config5) 2016__BUILD_SET_C0(config5)
2017__BUILD_SET_C0(config7)
2015__BUILD_SET_C0(intcontrol) 2018__BUILD_SET_C0(intcontrol)
2016__BUILD_SET_C0(intctl) 2019__BUILD_SET_C0(intctl)
2017__BUILD_SET_C0(srsmap) 2020__BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 98c31e5d9579..a7bc901819c8 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -89,7 +89,7 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
89 phys_addr_t size = resource_size(rsrc); 89 phys_addr_t size = resource_size(rsrc);
90 90
91 *start = fixup_bigphys_addr(rsrc->start, size); 91 *start = fixup_bigphys_addr(rsrc->start, size);
92 *end = rsrc->start + size; 92 *end = rsrc->start + size - 1;
93} 93}
94 94
95/* 95/*
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 832e2167d00f..ef7c02af7522 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -18,6 +18,10 @@
18 18
19#include <asm-generic/pgtable-nopmd.h> 19#include <asm-generic/pgtable-nopmd.h>
20 20
21#ifdef CONFIG_HIGHMEM
22#include <asm/highmem.h>
23#endif
24
21extern int temp_tlb_entry; 25extern int temp_tlb_entry;
22 26
23/* 27/*
@@ -61,7 +65,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
61 65
62#define VMALLOC_START MAP_BASE 66#define VMALLOC_START MAP_BASE
63 67
64#define PKMAP_BASE (0xfe000000UL) 68#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
69#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
65 70
66#ifdef CONFIG_HIGHMEM 71#ifdef CONFIG_HIGHMEM
67# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) 72# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index c74c32ccc647..4f281768937f 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -1238,6 +1238,13 @@ __clear_user(void __user *addr, __kernel_size_t size)
1238{ 1238{
1239 __kernel_size_t res; 1239 __kernel_size_t res;
1240 1240
1241#ifdef CONFIG_CPU_MICROMIPS
1242/* micromips memset / bzero also clobbers t7 & t8 */
1243#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
1244#else
1245#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
1246#endif /* CONFIG_CPU_MICROMIPS */
1247
1241 if (eva_kernel_access()) { 1248 if (eva_kernel_access()) {
1242 __asm__ __volatile__( 1249 __asm__ __volatile__(
1243 "move\t$4, %1\n\t" 1250 "move\t$4, %1\n\t"
@@ -1247,7 +1254,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
1247 "move\t%0, $6" 1254 "move\t%0, $6"
1248 : "=r" (res) 1255 : "=r" (res)
1249 : "r" (addr), "r" (size) 1256 : "r" (addr), "r" (size)
1250 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 1257 : bzero_clobbers);
1251 } else { 1258 } else {
1252 might_fault(); 1259 might_fault();
1253 __asm__ __volatile__( 1260 __asm__ __volatile__(
@@ -1258,7 +1265,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
1258 "move\t%0, $6" 1265 "move\t%0, $6"
1259 : "=r" (res) 1266 : "=r" (res)
1260 : "r" (addr), "r" (size) 1267 : "r" (addr), "r" (size)
1261 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 1268 : bzero_clobbers);
1262 } 1269 }
1263 1270
1264 return res; 1271 return res;
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 2f7c734771f4..0df911e772ae 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -116,10 +116,20 @@ ftrace_stub:
116NESTED(_mcount, PT_SIZE, ra) 116NESTED(_mcount, PT_SIZE, ra)
117 PTR_LA t1, ftrace_stub 117 PTR_LA t1, ftrace_stub
118 PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ 118 PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
119 bne t1, t2, static_trace 119 beq t1, t2, fgraph_trace
120 nop 120 nop
121 121
122 MCOUNT_SAVE_REGS
123
124 move a0, ra /* arg1: self return address */
125 jalr t2 /* (1) call *ftrace_trace_function */
126 move a1, AT /* arg2: parent's return address */
127
128 MCOUNT_RESTORE_REGS
129
130fgraph_trace:
122#ifdef CONFIG_FUNCTION_GRAPH_TRACER 131#ifdef CONFIG_FUNCTION_GRAPH_TRACER
132 PTR_LA t1, ftrace_stub
123 PTR_L t3, ftrace_graph_return 133 PTR_L t3, ftrace_graph_return
124 bne t1, t3, ftrace_graph_caller 134 bne t1, t3, ftrace_graph_caller
125 nop 135 nop
@@ -128,24 +138,11 @@ NESTED(_mcount, PT_SIZE, ra)
128 bne t1, t3, ftrace_graph_caller 138 bne t1, t3, ftrace_graph_caller
129 nop 139 nop
130#endif 140#endif
131 b ftrace_stub
132#ifdef CONFIG_32BIT
133 addiu sp, sp, 8
134#else
135 nop
136#endif
137 141
138static_trace:
139 MCOUNT_SAVE_REGS
140
141 move a0, ra /* arg1: self return address */
142 jalr t2 /* (1) call *ftrace_trace_function */
143 move a1, AT /* arg2: parent's return address */
144
145 MCOUNT_RESTORE_REGS
146#ifdef CONFIG_32BIT 142#ifdef CONFIG_32BIT
147 addiu sp, sp, 8 143 addiu sp, sp, 8
148#endif 144#endif
145
149 .globl ftrace_stub 146 .globl ftrace_stub
150ftrace_stub: 147ftrace_stub:
151 RETURN_BACK 148 RETURN_BACK
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index e3384065f5e7..cbe0f025856d 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -1097,10 +1097,20 @@ repeat:
1097 } 1097 }
1098 break; 1098 break;
1099 1099
1100 case beql_op:
1101 case bnel_op:
1102 case blezl_op: 1100 case blezl_op:
1103 case bgtzl_op: 1101 case bgtzl_op:
1102 /*
1103 * For BLEZL and BGTZL, rt field must be set to 0. If this
1104 * is not the case, this may be an encoding of a MIPS R6
1105 * instruction, so return to CPU execution if this occurs
1106 */
1107 if (MIPSInst_RT(inst)) {
1108 err = SIGILL;
1109 break;
1110 }
1111 /* fall through */
1112 case beql_op:
1113 case bnel_op:
1104 if (delay_slot(regs)) { 1114 if (delay_slot(regs)) {
1105 err = SIGILL; 1115 err = SIGILL;
1106 break; 1116 break;
@@ -2330,6 +2340,8 @@ static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
2330 __this_cpu_write((mipsr2bremustats).bgezl, 0); 2340 __this_cpu_write((mipsr2bremustats).bgezl, 0);
2331 __this_cpu_write((mipsr2bremustats).bltzll, 0); 2341 __this_cpu_write((mipsr2bremustats).bltzll, 0);
2332 __this_cpu_write((mipsr2bremustats).bgezll, 0); 2342 __this_cpu_write((mipsr2bremustats).bgezll, 0);
2343 __this_cpu_write((mipsr2bremustats).bltzall, 0);
2344 __this_cpu_write((mipsr2bremustats).bgezall, 0);
2333 __this_cpu_write((mipsr2bremustats).bltzal, 0); 2345 __this_cpu_write((mipsr2bremustats).bltzal, 0);
2334 __this_cpu_write((mipsr2bremustats).bgezal, 0); 2346 __this_cpu_write((mipsr2bremustats).bgezal, 0);
2335 __this_cpu_write((mipsr2bremustats).beql, 0); 2347 __this_cpu_write((mipsr2bremustats).beql, 0);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index fcbc4e57d765..354b99f56c1e 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -629,21 +629,48 @@ unsigned long arch_align_stack(unsigned long sp)
629 return sp & ALMASK; 629 return sp & ALMASK;
630} 630}
631 631
632static DEFINE_PER_CPU(struct call_single_data, backtrace_csd);
633static struct cpumask backtrace_csd_busy;
634
632static void arch_dump_stack(void *info) 635static void arch_dump_stack(void *info)
633{ 636{
634 struct pt_regs *regs; 637 struct pt_regs *regs;
638 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
635 639
640 arch_spin_lock(&lock);
636 regs = get_irq_regs(); 641 regs = get_irq_regs();
637 642
638 if (regs) 643 if (regs)
639 show_regs(regs); 644 show_regs(regs);
645 else
646 dump_stack();
647 arch_spin_unlock(&lock);
640 648
641 dump_stack(); 649 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
642} 650}
643 651
644void arch_trigger_all_cpu_backtrace(bool include_self) 652void arch_trigger_all_cpu_backtrace(bool include_self)
645{ 653{
646 smp_call_function(arch_dump_stack, NULL, 1); 654 struct call_single_data *csd;
655 int cpu;
656
657 for_each_cpu(cpu, cpu_online_mask) {
658 /*
659 * If we previously sent an IPI to the target CPU & it hasn't
660 * cleared its bit in the busy cpumask then it didn't handle
661 * our previous IPI & it's not safe for us to reuse the
662 * call_single_data_t.
663 */
664 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
665 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
666 cpu);
667 continue;
668 }
669
670 csd = &per_cpu(backtrace_csd, cpu);
671 csd->func = arch_dump_stack;
672 smp_call_function_single_async(cpu, csd);
673 }
647} 674}
648 675
649int mips_get_process_fp_mode(struct task_struct *task) 676int mips_get_process_fp_mode(struct task_struct *task)
@@ -680,6 +707,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
680 if (value & ~known_bits) 707 if (value & ~known_bits)
681 return -EOPNOTSUPP; 708 return -EOPNOTSUPP;
682 709
710 /* Setting FRE without FR is not supported. */
711 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
712 return -EOPNOTSUPP;
713
683 /* Avoid inadvertently triggering emulation */ 714 /* Avoid inadvertently triggering emulation */
684 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && 715 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
685 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) 716 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index c3d2d2c05fdb..32fa3ae1a0a6 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -483,7 +483,7 @@ static int fpr_get_msa(struct task_struct *target,
483/* 483/*
484 * Copy the floating-point context to the supplied NT_PRFPREG buffer. 484 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
485 * Choose the appropriate helper for general registers, and then copy 485 * Choose the appropriate helper for general registers, and then copy
486 * the FCSR register separately. 486 * the FCSR and FIR registers separately.
487 */ 487 */
488static int fpr_get(struct task_struct *target, 488static int fpr_get(struct task_struct *target,
489 const struct user_regset *regset, 489 const struct user_regset *regset,
@@ -491,6 +491,7 @@ static int fpr_get(struct task_struct *target,
491 void *kbuf, void __user *ubuf) 491 void *kbuf, void __user *ubuf)
492{ 492{
493 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); 493 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
494 const int fir_pos = fcr31_pos + sizeof(u32);
494 int err; 495 int err;
495 496
496 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 497 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
@@ -503,6 +504,12 @@ static int fpr_get(struct task_struct *target,
503 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 504 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
504 &target->thread.fpu.fcr31, 505 &target->thread.fpu.fcr31,
505 fcr31_pos, fcr31_pos + sizeof(u32)); 506 fcr31_pos, fcr31_pos + sizeof(u32));
507 if (err)
508 return err;
509
510 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
511 &boot_cpu_data.fpu_id,
512 fir_pos, fir_pos + sizeof(u32));
506 513
507 return err; 514 return err;
508} 515}
@@ -551,7 +558,8 @@ static int fpr_set_msa(struct task_struct *target,
551/* 558/*
552 * Copy the supplied NT_PRFPREG buffer to the floating-point context. 559 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
553 * Choose the appropriate helper for general registers, and then copy 560 * Choose the appropriate helper for general registers, and then copy
554 * the FCSR register separately. 561 * the FCSR register separately. Ignore the incoming FIR register
562 * contents though, as the register is read-only.
555 * 563 *
556 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', 564 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
557 * which is supposed to have been guaranteed by the kernel before 565 * which is supposed to have been guaranteed by the kernel before
@@ -565,6 +573,7 @@ static int fpr_set(struct task_struct *target,
565 const void *kbuf, const void __user *ubuf) 573 const void *kbuf, const void __user *ubuf)
566{ 574{
567 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); 575 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
576 const int fir_pos = fcr31_pos + sizeof(u32);
568 u32 fcr31; 577 u32 fcr31;
569 int err; 578 int err;
570 579
@@ -592,6 +601,11 @@ static int fpr_set(struct task_struct *target,
592 ptrace_setfcr31(target, fcr31); 601 ptrace_setfcr31(target, fcr31);
593 } 602 }
594 603
604 if (count > 0)
605 err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
606 fir_pos,
607 fir_pos + sizeof(u32));
608
595 return err; 609 return err;
596} 610}
597 611
@@ -816,7 +830,7 @@ long arch_ptrace(struct task_struct *child, long request,
816 fregs = get_fpu_regs(child); 830 fregs = get_fpu_regs(child);
817 831
818#ifdef CONFIG_32BIT 832#ifdef CONFIG_32BIT
819 if (test_thread_flag(TIF_32BIT_FPREGS)) { 833 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
820 /* 834 /*
821 * The odd registers are actually the high 835 * The odd registers are actually the high
822 * order bits of the values stored in the even 836 * order bits of the values stored in the even
@@ -827,7 +841,7 @@ long arch_ptrace(struct task_struct *child, long request,
827 break; 841 break;
828 } 842 }
829#endif 843#endif
830 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); 844 tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
831 break; 845 break;
832 case PC: 846 case PC:
833 tmp = regs->cp0_epc; 847 tmp = regs->cp0_epc;
@@ -905,7 +919,7 @@ long arch_ptrace(struct task_struct *child, long request,
905 919
906 init_fp_ctx(child); 920 init_fp_ctx(child);
907#ifdef CONFIG_32BIT 921#ifdef CONFIG_32BIT
908 if (test_thread_flag(TIF_32BIT_FPREGS)) { 922 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
909 /* 923 /*
910 * The odd registers are actually the high 924 * The odd registers are actually the high
911 * order bits of the values stored in the even 925 * order bits of the values stored in the even
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 283b5a1967d1..d95117e71f69 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -97,7 +97,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
97 break; 97 break;
98 } 98 }
99 fregs = get_fpu_regs(child); 99 fregs = get_fpu_regs(child);
100 if (test_thread_flag(TIF_32BIT_FPREGS)) { 100 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
101 /* 101 /*
102 * The odd registers are actually the high 102 * The odd registers are actually the high
103 * order bits of the values stored in the even 103 * order bits of the values stored in the even
@@ -107,7 +107,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
107 addr & 1); 107 addr & 1);
108 break; 108 break;
109 } 109 }
110 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); 110 tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
111 break; 111 break;
112 case PC: 112 case PC:
113 tmp = regs->cp0_epc; 113 tmp = regs->cp0_epc;
@@ -203,7 +203,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
203 sizeof(child->thread.fpu)); 203 sizeof(child->thread.fpu));
204 child->thread.fpu.fcr31 = 0; 204 child->thread.fpu.fcr31 = 0;
205 } 205 }
206 if (test_thread_flag(TIF_32BIT_FPREGS)) { 206 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
207 /* 207 /*
208 * The odd registers are actually the high 208 * The odd registers are actually the high
209 * order bits of the values stored in the even 209 * order bits of the values stored in the even
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 78cf8c2f1de0..4874712b475e 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -166,11 +166,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
166 return; 166 return;
167 } 167 }
168 168
169 if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, 169 if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
170 "smp_ipi0", NULL)) 170 IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
171 panic("Can't request IPI0 interrupt"); 171 panic("Can't request IPI0 interrupt");
172 if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, 172 if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
173 "smp_ipi1", NULL)) 173 IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
174 panic("Can't request IPI1 interrupt"); 174 panic("Can't request IPI1 interrupt");
175} 175}
176 176
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 31ca2edd7218..1b901218e3ae 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -344,6 +344,7 @@ static void __show_regs(const struct pt_regs *regs)
344void show_regs(struct pt_regs *regs) 344void show_regs(struct pt_regs *regs)
345{ 345{
346 __show_regs((struct pt_regs *)regs); 346 __show_regs((struct pt_regs *)regs);
347 dump_stack();
347} 348}
348 349
349void show_registers(struct pt_regs *regs) 350void show_registers(struct pt_regs *regs)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index a017b23ee4aa..8a95c3d76a9a 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -40,7 +40,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
40 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, 40 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
41 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, 41 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
42 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, 42 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
43 { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, 43 { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
44 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, 44 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
45 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, 45 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
46 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, 46 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 0344e575f522..fba4ca56e46a 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -15,4 +15,5 @@ obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
15obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o 15obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
16 16
17# libgcc-style stuff needed in the kernel 17# libgcc-style stuff needed in the kernel
18obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o ucmpdi2.o 18obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o multi3.o \
19 ucmpdi2.o
diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
index 05909d58e2fe..56ea0df60a44 100644
--- a/arch/mips/lib/libgcc.h
+++ b/arch/mips/lib/libgcc.h
@@ -9,10 +9,18 @@ typedef int word_type __attribute__ ((mode (__word__)));
9struct DWstruct { 9struct DWstruct {
10 int high, low; 10 int high, low;
11}; 11};
12
13struct TWstruct {
14 long long high, low;
15};
12#elif defined(__LITTLE_ENDIAN) 16#elif defined(__LITTLE_ENDIAN)
13struct DWstruct { 17struct DWstruct {
14 int low, high; 18 int low, high;
15}; 19};
20
21struct TWstruct {
22 long long low, high;
23};
16#else 24#else
17#error I feel sick. 25#error I feel sick.
18#endif 26#endif
@@ -22,4 +30,13 @@ typedef union {
22 long long ll; 30 long long ll;
23} DWunion; 31} DWunion;
24 32
33#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6)
34typedef int ti_type __attribute__((mode(TI)));
35
36typedef union {
37 struct TWstruct s;
38 ti_type ti;
39} TWunion;
40#endif
41
25#endif /* __ASM_LIBGCC_H */ 42#endif /* __ASM_LIBGCC_H */
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 8f0019a2e5c8..2d33cf2185d9 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -218,7 +218,7 @@
2181: PTR_ADDIU a0, 1 /* fill bytewise */ 2181: PTR_ADDIU a0, 1 /* fill bytewise */
219 R10KCBARRIER(0(ra)) 219 R10KCBARRIER(0(ra))
220 bne t1, a0, 1b 220 bne t1, a0, 1b
221 sb a1, -1(a0) 221 EX(sb, a1, -1(a0), .Lsmall_fixup\@)
222 222
2232: jr ra /* done */ 2232: jr ra /* done */
224 move a2, zero 224 move a2, zero
@@ -249,13 +249,18 @@
249 PTR_L t0, TI_TASK($28) 249 PTR_L t0, TI_TASK($28)
250 andi a2, STORMASK 250 andi a2, STORMASK
251 LONG_L t0, THREAD_BUADDR(t0) 251 LONG_L t0, THREAD_BUADDR(t0)
252 LONG_ADDU a2, t1 252 LONG_ADDU a2, a0
253 jr ra 253 jr ra
254 LONG_SUBU a2, t0 254 LONG_SUBU a2, t0
255 255
256.Llast_fixup\@: 256.Llast_fixup\@:
257 jr ra 257 jr ra
258 andi v1, a2, STORMASK 258 nop
259
260.Lsmall_fixup\@:
261 PTR_SUBU a2, t1, a0
262 jr ra
263 PTR_ADDIU a2, 1
259 264
260 .endm 265 .endm
261 266
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
new file mode 100644
index 000000000000..111ad475aa0c
--- /dev/null
+++ b/arch/mips/lib/multi3.c
@@ -0,0 +1,54 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3
4#include "libgcc.h"
5
6/*
7 * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
8 * specific case only we'll implement it here.
9 *
10 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
11 */
12#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
13
14/* multiply 64-bit values, low 64-bits returned */
15static inline long long notrace dmulu(long long a, long long b)
16{
17 long long res;
18
19 asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
20 return res;
21}
22
23/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */
24static inline long long notrace dmuhu(long long a, long long b)
25{
26 long long res;
27
28 asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
29 return res;
30}
31
32/* multiply 128-bit values, low 128-bits returned */
33ti_type notrace __multi3(ti_type a, ti_type b)
34{
35 TWunion res, aa, bb;
36
37 aa.ti = a;
38 bb.ti = b;
39
40 /*
41 * a * b = (a.lo * b.lo)
42 * + 2^64 * (a.hi * b.lo + a.lo * b.hi)
43 * [+ 2^128 * (a.hi * b.hi)]
44 */
45 res.s.low = dmulu(aa.s.low, bb.s.low);
46 res.s.high = dmuhu(aa.s.low, bb.s.low);
47 res.s.high += dmulu(aa.s.high, bb.s.low);
48 res.s.high += dmulu(aa.s.low, bb.s.high);
49
50 return res.ti;
51}
52EXPORT_SYMBOL(__multi3);
53
54#endif /* 64BIT && CPU_MIPSR6 && GCC7 */
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 8d5008cbdc0f..a853a83f2944 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <asm/addrspace.h> 10#include <asm/addrspace.h>
11#include <asm/byteorder.h> 11#include <asm/byteorder.h>
12#include <linux/ioport.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
@@ -97,6 +98,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
97 return error; 98 return error;
98} 99}
99 100
101static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
102 void *arg)
103{
104 unsigned long i;
105
106 for (i = 0; i < nr_pages; i++) {
107 if (pfn_valid(start_pfn + i) &&
108 !PageReserved(pfn_to_page(start_pfn + i)))
109 return 1;
110 }
111
112 return 0;
113}
114
100/* 115/*
101 * Generic mapping function (not visible outside): 116 * Generic mapping function (not visible outside):
102 */ 117 */
@@ -115,8 +130,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
115 130
116void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags) 131void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
117{ 132{
133 unsigned long offset, pfn, last_pfn;
118 struct vm_struct * area; 134 struct vm_struct * area;
119 unsigned long offset;
120 phys_addr_t last_addr; 135 phys_addr_t last_addr;
121 void * addr; 136 void * addr;
122 137
@@ -136,18 +151,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
136 return (void __iomem *) CKSEG1ADDR(phys_addr); 151 return (void __iomem *) CKSEG1ADDR(phys_addr);
137 152
138 /* 153 /*
139 * Don't allow anybody to remap normal RAM that we're using.. 154 * Don't allow anybody to remap RAM that may be allocated by the page
155 * allocator, since that could lead to races & data clobbering.
140 */ 156 */
141 if (phys_addr < virt_to_phys(high_memory)) { 157 pfn = PFN_DOWN(phys_addr);
142 char *t_addr, *t_end; 158 last_pfn = PFN_DOWN(last_addr);
143 struct page *page; 159 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
144 160 __ioremap_check_ram) == 1) {
145 t_addr = __va(phys_addr); 161 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
146 t_end = t_addr + (size - 1); 162 &phys_addr, &last_addr);
147 163 return NULL;
148 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
149 if(!PageReserved(page))
150 return NULL;
151 } 164 }
152 165
153 /* 166 /*
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index adc6911ba748..b19a3c506b1e 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -51,15 +51,15 @@ void __init pagetable_init(void)
51 /* 51 /*
52 * Fixed mappings: 52 * Fixed mappings:
53 */ 53 */
54 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 54 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
55 fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); 55 fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
56 56
57#ifdef CONFIG_HIGHMEM 57#ifdef CONFIG_HIGHMEM
58 /* 58 /*
59 * Permanent kmaps: 59 * Permanent kmaps:
60 */ 60 */
61 vaddr = PKMAP_BASE; 61 vaddr = PKMAP_BASE;
62 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 62 fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
63 63
64 pgd = swapper_pg_dir + __pgd_offset(vaddr); 64 pgd = swapper_pg_dir + __pgd_offset(vaddr);
65 pud = pud_offset(pgd, vaddr); 65 pud = pud_offset(pgd, vaddr);
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 1a8c96035716..c0c1e9529dbd 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -527,7 +527,8 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
527 u32 sflags, tmp_flags; 527 u32 sflags, tmp_flags;
528 528
529 /* Adjust the stack pointer */ 529 /* Adjust the stack pointer */
530 emit_stack_offset(-align_sp(offset), ctx); 530 if (offset)
531 emit_stack_offset(-align_sp(offset), ctx);
531 532
532 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; 533 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
533 /* sflags is essentially a bitmap */ 534 /* sflags is essentially a bitmap */
@@ -579,7 +580,8 @@ static void restore_bpf_jit_regs(struct jit_ctx *ctx,
579 emit_load_stack_reg(r_ra, r_sp, real_off, ctx); 580 emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
580 581
581 /* Restore the sp and discard the scrach memory */ 582 /* Restore the sp and discard the scrach memory */
582 emit_stack_offset(align_sp(offset), ctx); 583 if (offset)
584 emit_stack_offset(align_sp(offset), ctx);
583} 585}
584 586
585static unsigned int get_stack_depth(struct jit_ctx *ctx) 587static unsigned int get_stack_depth(struct jit_ctx *ctx)
@@ -626,8 +628,14 @@ static void build_prologue(struct jit_ctx *ctx)
626 if (ctx->flags & SEEN_X) 628 if (ctx->flags & SEEN_X)
627 emit_jit_reg_move(r_X, r_zero, ctx); 629 emit_jit_reg_move(r_X, r_zero, ctx);
628 630
629 /* Do not leak kernel data to userspace */ 631 /*
630 if (bpf_needs_clear_a(&ctx->skf->insns[0])) 632 * Do not leak kernel data to userspace, we only need to clear
633 * r_A if it is ever used. In fact if it is never used, we
634 * will not save/restore it, so clearing it in this case would
635 * corrupt the state of the caller.
636 */
637 if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
638 (ctx->flags & SEEN_A))
631 emit_jit_reg_move(r_A, r_zero, ctx); 639 emit_jit_reg_move(r_A, r_zero, ctx);
632} 640}
633 641
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
index 5d2e0c8d29c0..88a2075305d1 100644
--- a/arch/mips/net/bpf_jit_asm.S
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -90,18 +90,14 @@ FEXPORT(sk_load_half_positive)
90 is_offset_in_header(2, half) 90 is_offset_in_header(2, half)
91 /* Offset within header boundaries */ 91 /* Offset within header boundaries */
92 PTR_ADDU t1, $r_skb_data, offset 92 PTR_ADDU t1, $r_skb_data, offset
93 .set reorder 93 lhu $r_A, 0(t1)
94 lh $r_A, 0(t1)
95 .set noreorder
96#ifdef CONFIG_CPU_LITTLE_ENDIAN 94#ifdef CONFIG_CPU_LITTLE_ENDIAN
97# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) 95# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
98 wsbh t0, $r_A 96 wsbh $r_A, $r_A
99 seh $r_A, t0
100# else 97# else
101 sll t0, $r_A, 24 98 sll t0, $r_A, 8
102 andi t1, $r_A, 0xff00 99 srl t1, $r_A, 8
103 sra t0, t0, 16 100 andi t0, t0, 0xff00
104 srl t1, t1, 8
105 or $r_A, t0, t1 101 or $r_A, t0, t1
106# endif 102# endif
107#endif 103#endif
@@ -115,7 +111,7 @@ FEXPORT(sk_load_byte_positive)
115 is_offset_in_header(1, byte) 111 is_offset_in_header(1, byte)
116 /* Offset within header boundaries */ 112 /* Offset within header boundaries */
117 PTR_ADDU t1, $r_skb_data, offset 113 PTR_ADDU t1, $r_skb_data, offset
118 lb $r_A, 0(t1) 114 lbu $r_A, 0(t1)
119 jr $r_ra 115 jr $r_ra
120 move $r_ret, zero 116 move $r_ret, zero
121 END(sk_load_byte) 117 END(sk_load_byte)
@@ -139,6 +135,11 @@ FEXPORT(sk_load_byte_positive)
139 * (void *to) is returned in r_s0 135 * (void *to) is returned in r_s0
140 * 136 *
141 */ 137 */
138#ifdef CONFIG_CPU_LITTLE_ENDIAN
139#define DS_OFFSET(SIZE) (4 * SZREG)
140#else
141#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
142#endif
142#define bpf_slow_path_common(SIZE) \ 143#define bpf_slow_path_common(SIZE) \
143 /* Quick check. Are we within reasonable boundaries? */ \ 144 /* Quick check. Are we within reasonable boundaries? */ \
144 LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \ 145 LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
@@ -150,7 +151,7 @@ FEXPORT(sk_load_byte_positive)
150 PTR_LA t0, skb_copy_bits; \ 151 PTR_LA t0, skb_copy_bits; \
151 PTR_S $r_ra, (5 * SZREG)($r_sp); \ 152 PTR_S $r_ra, (5 * SZREG)($r_sp); \
152 /* Assign low slot to a2 */ \ 153 /* Assign low slot to a2 */ \
153 move a2, $r_sp; \ 154 PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
154 jalr t0; \ 155 jalr t0; \
155 /* Reset our destination slot (DS but it's ok) */ \ 156 /* Reset our destination slot (DS but it's ok) */ \
156 INT_S zero, (4 * SZREG)($r_sp); \ 157 INT_S zero, (4 * SZREG)($r_sp); \
diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c
index ee117c4bc4a3..8037a4bd84fd 100644
--- a/arch/mips/ralink/reset.c
+++ b/arch/mips/ralink/reset.c
@@ -96,16 +96,9 @@ static void ralink_restart(char *command)
96 unreachable(); 96 unreachable();
97} 97}
98 98
99static void ralink_halt(void)
100{
101 local_irq_disable();
102 unreachable();
103}
104
105static int __init mips_reboot_setup(void) 99static int __init mips_reboot_setup(void)
106{ 100{
107 _machine_restart = ralink_restart; 101 _machine_restart = ralink_restart;
108 _machine_halt = ralink_halt;
109 102
110 return 0; 103 return 0;
111} 104}
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
index 37030409745c..586ca7ea3e7c 100644
--- a/arch/mips/txx9/rbtx4939/setup.c
+++ b/arch/mips/txx9/rbtx4939/setup.c
@@ -186,7 +186,7 @@ static void __init rbtx4939_update_ioc_pen(void)
186 186
187#define RBTX4939_MAX_7SEGLEDS 8 187#define RBTX4939_MAX_7SEGLEDS 8
188 188
189#if IS_ENABLED(CONFIG_LEDS_CLASS) 189#if IS_BUILTIN(CONFIG_LEDS_CLASS)
190static u8 led_val[RBTX4939_MAX_7SEGLEDS]; 190static u8 led_val[RBTX4939_MAX_7SEGLEDS];
191struct rbtx4939_led_data { 191struct rbtx4939_led_data {
192 struct led_classdev cdev; 192 struct led_classdev cdev;
@@ -261,7 +261,7 @@ static inline void rbtx4939_led_setup(void)
261 261
262static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val) 262static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
263{ 263{
264#if IS_ENABLED(CONFIG_LEDS_CLASS) 264#if IS_BUILTIN(CONFIG_LEDS_CLASS)
265 unsigned long flags; 265 unsigned long flags;
266 local_irq_save(flags); 266 local_irq_save(flags);
267 /* bit7: reserved for LED class */ 267 /* bit7: reserved for LED class */
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
index b9920b1edd5a..70cef54dc40f 100644
--- a/arch/mn10300/mm/misalignment.c
+++ b/arch/mn10300/mm/misalignment.c
@@ -437,7 +437,7 @@ transfer_failed:
437 437
438 info.si_signo = SIGSEGV; 438 info.si_signo = SIGSEGV;
439 info.si_errno = 0; 439 info.si_errno = 0;
440 info.si_code = 0; 440 info.si_code = SEGV_MAPERR;
441 info.si_addr = (void *) regs->pc; 441 info.si_addr = (void *) regs->pc;
442 force_sig_info(SIGSEGV, &info, current); 442 force_sig_info(SIGSEGV, &info, current);
443 return; 443 return;
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 3d3f6062f49c..605a284922fb 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -302,12 +302,12 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
302 siginfo_t info; 302 siginfo_t info;
303 303
304 if (user_mode(regs)) { 304 if (user_mode(regs)) {
305 /* Send a SIGSEGV */ 305 /* Send a SIGBUS */
306 info.si_signo = SIGSEGV; 306 info.si_signo = SIGBUS;
307 info.si_errno = 0; 307 info.si_errno = 0;
308 /* info.si_code has been set above */ 308 info.si_code = BUS_ADRALN;
309 info.si_addr = (void *)address; 309 info.si_addr = (void __user *)address;
310 force_sig_info(SIGSEGV, &info, current); 310 force_sig_info(SIGBUS, &info, current);
311 } else { 311 } else {
312 printk("KERNEL: Unaligned Access 0x%.8lx\n", address); 312 printk("KERNEL: Unaligned Access 0x%.8lx\n", address);
313 show_registers(regs); 313 show_registers(regs);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index d2256fa97ea0..f7f89310a7a1 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -178,7 +178,7 @@ config PREFETCH
178 178
179config MLONGCALLS 179config MLONGCALLS
180 bool "Enable the -mlong-calls compiler option for big kernels" 180 bool "Enable the -mlong-calls compiler option for big kernels"
181 def_bool y if (!MODULES) 181 default y
182 depends on PA8X00 182 depends on PA8X00
183 help 183 help
184 If you configure the kernel to include many drivers built-in instead 184 If you configure the kernel to include many drivers built-in instead
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
new file mode 100644
index 000000000000..dbaaca84f27f
--- /dev/null
+++ b/arch/parisc/include/asm/barrier.h
@@ -0,0 +1,32 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_BARRIER_H
3#define __ASM_BARRIER_H
4
5#ifndef __ASSEMBLY__
6
7/* The synchronize caches instruction executes as a nop on systems in
8 which all memory references are performed in order. */
9#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
10
11#if defined(CONFIG_SMP)
12#define mb() do { synchronize_caches(); } while (0)
13#define rmb() mb()
14#define wmb() mb()
15#define dma_rmb() mb()
16#define dma_wmb() mb()
17#else
18#define mb() barrier()
19#define rmb() barrier()
20#define wmb() barrier()
21#define dma_rmb() barrier()
22#define dma_wmb() barrier()
23#endif
24
25#define __smp_mb() mb()
26#define __smp_rmb() mb()
27#define __smp_wmb() mb()
28
29#include <asm-generic/barrier.h>
30
31#endif /* !__ASSEMBLY__ */
32#endif /* __ASM_BARRIER_H */
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 49df14805a9b..ae5b64981d72 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -32,20 +32,11 @@ _futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
32} 32}
33 33
34static inline int 34static inline int
35futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 35arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
36{ 36{
37 unsigned long int flags; 37 unsigned long int flags;
38 u32 val; 38 u32 val;
39 int op = (encoded_op >> 28) & 7;
40 int cmp = (encoded_op >> 24) & 15;
41 int oparg = (encoded_op << 8) >> 20;
42 int cmparg = (encoded_op << 20) >> 20;
43 int oldval = 0, ret; 39 int oldval = 0, ret;
44 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
45 oparg = 1 << oparg;
46
47 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
48 return -EFAULT;
49 40
50 pagefault_disable(); 41 pagefault_disable();
51 42
@@ -98,17 +89,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
98 89
99 pagefault_enable(); 90 pagefault_enable();
100 91
101 if (!ret) { 92 if (!ret)
102 switch (cmp) { 93 *oval = oldval;
103 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 94
104 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
105 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
106 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
107 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
108 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
109 default: ret = -ENOSYS;
110 }
111 }
112 return ret; 95 return ret;
113} 96}
114 97
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index dba508fe1683..4f7060ec6875 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -648,6 +648,10 @@ static int match_pci_device(struct device *dev, int index,
648 (modpath->mod == PCI_FUNC(devfn))); 648 (modpath->mod == PCI_FUNC(devfn)));
649 } 649 }
650 650
651 /* index might be out of bounds for bc[] */
652 if (index >= 6)
653 return 0;
654
651 id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5); 655 id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
652 return (modpath->bc[index] == id); 656 return (modpath->bc[index] == id);
653} 657}
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 5dc831955de5..13cb2461fef5 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -482,6 +482,8 @@
482 .macro tlb_unlock0 spc,tmp 482 .macro tlb_unlock0 spc,tmp
483#ifdef CONFIG_SMP 483#ifdef CONFIG_SMP
484 or,COND(=) %r0,\spc,%r0 484 or,COND(=) %r0,\spc,%r0
485 sync
486 or,COND(=) %r0,\spc,%r0
485 stw \spc,0(\tmp) 487 stw \spc,0(\tmp)
486#endif 488#endif
487 .endm 489 .endm
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 16073f472118..b3434a7fd3c9 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -354,6 +354,7 @@ ENDPROC(flush_data_cache_local)
354 .macro tlb_unlock la,flags,tmp 354 .macro tlb_unlock la,flags,tmp
355#ifdef CONFIG_SMP 355#ifdef CONFIG_SMP
356 ldi 1,\tmp 356 ldi 1,\tmp
357 sync
357 stw \tmp,0(\la) 358 stw \tmp,0(\la)
358 mtsm \flags 359 mtsm \flags
359#endif 360#endif
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 9f22195b90ed..f68eedc72484 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -631,6 +631,7 @@ cas_action:
631 sub,<> %r28, %r25, %r0 631 sub,<> %r28, %r25, %r0
6322: stw,ma %r24, 0(%r26) 6322: stw,ma %r24, 0(%r26)
633 /* Free lock */ 633 /* Free lock */
634 sync
634 stw,ma %r20, 0(%sr2,%r20) 635 stw,ma %r20, 0(%sr2,%r20)
635#if ENABLE_LWS_DEBUG 636#if ENABLE_LWS_DEBUG
636 /* Clear thread register indicator */ 637 /* Clear thread register indicator */
@@ -645,6 +646,7 @@ cas_action:
6453: 6463:
646 /* Error occurred on load or store */ 647 /* Error occurred on load or store */
647 /* Free lock */ 648 /* Free lock */
649 sync
648 stw %r20, 0(%sr2,%r20) 650 stw %r20, 0(%sr2,%r20)
649#if ENABLE_LWS_DEBUG 651#if ENABLE_LWS_DEBUG
650 stw %r0, 4(%sr2,%r20) 652 stw %r0, 4(%sr2,%r20)
@@ -846,6 +848,7 @@ cas2_action:
846 848
847cas2_end: 849cas2_end:
848 /* Free lock */ 850 /* Free lock */
851 sync
849 stw,ma %r20, 0(%sr2,%r20) 852 stw,ma %r20, 0(%sr2,%r20)
850 /* Enable interrupts */ 853 /* Enable interrupts */
851 ssm PSW_SM_I, %r0 854 ssm PSW_SM_I, %r0
@@ -856,6 +859,7 @@ cas2_end:
85622: 85922:
857 /* Error occurred on load or store */ 860 /* Error occurred on load or store */
858 /* Free lock */ 861 /* Free lock */
862 sync
859 stw %r20, 0(%sr2,%r20) 863 stw %r20, 0(%sr2,%r20)
860 ssm PSW_SM_I, %r0 864 ssm PSW_SM_I, %r0
861 ldo 1(%r0),%r28 865 ldo 1(%r0),%r28
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c628f47a9052..755eb1275dbb 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -129,13 +129,14 @@ config PPC
129 select IRQ_FORCED_THREADING 129 select IRQ_FORCED_THREADING
130 select HAVE_RCU_TABLE_FREE if SMP 130 select HAVE_RCU_TABLE_FREE if SMP
131 select HAVE_SYSCALL_TRACEPOINTS 131 select HAVE_SYSCALL_TRACEPOINTS
132 select HAVE_BPF_JIT 132 select HAVE_BPF_JIT if CPU_BIG_ENDIAN
133 select HAVE_ARCH_JUMP_LABEL 133 select HAVE_ARCH_JUMP_LABEL
134 select ARCH_HAVE_NMI_SAFE_CMPXCHG 134 select ARCH_HAVE_NMI_SAFE_CMPXCHG
135 select ARCH_HAS_GCOV_PROFILE_ALL 135 select ARCH_HAS_GCOV_PROFILE_ALL
136 select GENERIC_SMP_IDLE_THREAD 136 select GENERIC_SMP_IDLE_THREAD
137 select GENERIC_CMOS_UPDATE 137 select GENERIC_CMOS_UPDATE
138 select GENERIC_TIME_VSYSCALL_OLD 138 select GENERIC_TIME_VSYSCALL_OLD
139 select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64
139 select GENERIC_CLOCKEVENTS 140 select GENERIC_CLOCKEVENTS
140 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 141 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
141 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 142 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 0eca6efc0631..b9e16855a037 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -36,7 +36,8 @@
36 36
37#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) 37#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
38 38
39#ifdef __SUBARCH_HAS_LWSYNC 39/* The sub-arch has lwsync */
40#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
40# define SMPWMB LWSYNC 41# define SMPWMB LWSYNC
41#else 42#else
42# define SMPWMB eieio 43# define SMPWMB eieio
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index a703452d67b6..555e22d5e07f 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -209,5 +209,11 @@ exc_##label##_book3e:
209 ori r3,r3,vector_offset@l; \ 209 ori r3,r3,vector_offset@l; \
210 mtspr SPRN_IVOR##vector_number,r3; 210 mtspr SPRN_IVOR##vector_number,r3;
211 211
212#define RFI_TO_KERNEL \
213 rfi
214
215#define RFI_TO_USER \
216 rfi
217
212#endif /* _ASM_POWERPC_EXCEPTION_64E_H */ 218#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
213 219
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 77f52b26dad6..9bddbec441b8 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -50,6 +50,59 @@
50#define EX_PPR 88 /* SMT thread status register (priority) */ 50#define EX_PPR 88 /* SMT thread status register (priority) */
51#define EX_CTR 96 51#define EX_CTR 96
52 52
53/*
54 * Macros for annotating the expected destination of (h)rfid
55 *
56 * The nop instructions allow us to insert one or more instructions to flush the
57 * L1-D cache when returning to userspace or a guest.
58 */
59#define RFI_FLUSH_SLOT \
60 RFI_FLUSH_FIXUP_SECTION; \
61 nop; \
62 nop; \
63 nop
64
65#define RFI_TO_KERNEL \
66 rfid
67
68#define RFI_TO_USER \
69 RFI_FLUSH_SLOT; \
70 rfid; \
71 b rfi_flush_fallback
72
73#define RFI_TO_USER_OR_KERNEL \
74 RFI_FLUSH_SLOT; \
75 rfid; \
76 b rfi_flush_fallback
77
78#define RFI_TO_GUEST \
79 RFI_FLUSH_SLOT; \
80 rfid; \
81 b rfi_flush_fallback
82
83#define HRFI_TO_KERNEL \
84 hrfid
85
86#define HRFI_TO_USER \
87 RFI_FLUSH_SLOT; \
88 hrfid; \
89 b hrfi_flush_fallback
90
91#define HRFI_TO_USER_OR_KERNEL \
92 RFI_FLUSH_SLOT; \
93 hrfid; \
94 b hrfi_flush_fallback
95
96#define HRFI_TO_GUEST \
97 RFI_FLUSH_SLOT; \
98 hrfid; \
99 b hrfi_flush_fallback
100
101#define HRFI_TO_UNKNOWN \
102 RFI_FLUSH_SLOT; \
103 hrfid; \
104 b hrfi_flush_fallback
105
53#ifdef CONFIG_RELOCATABLE 106#ifdef CONFIG_RELOCATABLE
54#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ 107#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
55 ld r12,PACAKBASE(r13); /* get high part of &label */ \ 108 ld r12,PACAKBASE(r13); /* get high part of &label */ \
@@ -191,7 +244,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
191 mtspr SPRN_##h##SRR0,r12; \ 244 mtspr SPRN_##h##SRR0,r12; \
192 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 245 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
193 mtspr SPRN_##h##SRR1,r10; \ 246 mtspr SPRN_##h##SRR1,r10; \
194 h##rfid; \ 247 h##RFI_TO_KERNEL; \
195 b . /* prevent speculative execution */ 248 b . /* prevent speculative execution */
196#define EXCEPTION_PROLOG_PSERIES_1(label, h) \ 249#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
197 __EXCEPTION_PROLOG_PSERIES_1(label, h) 250 __EXCEPTION_PROLOG_PSERIES_1(label, h)
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 9a67a38bf7b9..7068bafbb2d6 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -184,4 +184,19 @@ label##3: \
184 FTR_ENTRY_OFFSET label##1b-label##3b; \ 184 FTR_ENTRY_OFFSET label##1b-label##3b; \
185 .popsection; 185 .popsection;
186 186
187#define RFI_FLUSH_FIXUP_SECTION \
188951: \
189 .pushsection __rfi_flush_fixup,"a"; \
190 .align 2; \
191952: \
192 FTR_ENTRY_OFFSET 951b-952b; \
193 .popsection;
194
195
196#ifndef __ASSEMBLY__
197
198extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
199
200#endif
201
187#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ 202#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index e05808a328db..b0629249778b 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -47,12 +47,10 @@
47#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000) 47#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
48#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000) 48#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000)
49#define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000) 49#define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000)
50#define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000)
51#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000) 50#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
52#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) 51#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
53#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) 52#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
54#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) 53#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
55#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000)
56 54
57#ifndef __ASSEMBLY__ 55#ifndef __ASSEMBLY__
58 56
@@ -70,8 +68,7 @@ enum {
70 FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | 68 FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
71 FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, 69 FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
72 FW_FEATURE_PSERIES_ALWAYS = 0, 70 FW_FEATURE_PSERIES_ALWAYS = 0,
73 FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 | 71 FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL,
74 FW_FEATURE_OPALv3,
75 FW_FEATURE_POWERNV_ALWAYS = 0, 72 FW_FEATURE_POWERNV_ALWAYS = 0,
76 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 73 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
77 FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 74 FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 2a9cf845473b..f4c7467f7465 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -31,18 +31,10 @@
31 : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ 31 : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
32 : "cr0", "memory") 32 : "cr0", "memory")
33 33
34static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 34static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
35 u32 __user *uaddr)
35{ 36{
36 int op = (encoded_op >> 28) & 7;
37 int cmp = (encoded_op >> 24) & 15;
38 int oparg = (encoded_op << 8) >> 20;
39 int cmparg = (encoded_op << 20) >> 20;
40 int oldval = 0, ret; 37 int oldval = 0, ret;
41 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
42 oparg = 1 << oparg;
43
44 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
45 return -EFAULT;
46 38
47 pagefault_disable(); 39 pagefault_disable();
48 40
@@ -68,17 +60,9 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
68 60
69 pagefault_enable(); 61 pagefault_enable();
70 62
71 if (!ret) { 63 if (!ret)
72 switch (cmp) { 64 *oval = oldval;
73 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 65
74 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
75 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
76 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
77 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
78 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
79 default: ret = -ENOSYS;
80 }
81 }
82 return ret; 66 return ret;
83} 67}
84 68
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 85bc8c0d257b..449bbb87c257 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -239,6 +239,7 @@
239#define H_GET_HCA_INFO 0x1B8 239#define H_GET_HCA_INFO 0x1B8
240#define H_GET_PERF_COUNT 0x1BC 240#define H_GET_PERF_COUNT 0x1BC
241#define H_MANAGE_TRACE 0x1C0 241#define H_MANAGE_TRACE 0x1C0
242#define H_GET_CPU_CHARACTERISTICS 0x1C8
242#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 243#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
243#define H_QUERY_INT_STATE 0x1E4 244#define H_QUERY_INT_STATE 0x1E4
244#define H_POLL_PENDING 0x1D8 245#define H_POLL_PENDING 0x1D8
@@ -285,7 +286,19 @@
285#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3 286#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
286#define H_SET_MODE_RESOURCE_LE 4 287#define H_SET_MODE_RESOURCE_LE 4
287 288
289/* H_GET_CPU_CHARACTERISTICS return values */
290#define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0
291#define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1
292#define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
293#define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
294#define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
295
296#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
297#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
298#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
299
288#ifndef __ASSEMBLY__ 300#ifndef __ASSEMBLY__
301#include <linux/types.h>
289 302
290/** 303/**
291 * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments 304 * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments
@@ -423,6 +436,11 @@ extern long pseries_big_endian_exceptions(void);
423 436
424#endif /* CONFIG_PPC_PSERIES */ 437#endif /* CONFIG_PPC_PSERIES */
425 438
439struct h_cpu_char_result {
440 u64 character;
441 u64 behaviour;
442};
443
426#endif /* __ASSEMBLY__ */ 444#endif /* __ASSEMBLY__ */
427#endif /* __KERNEL__ */ 445#endif /* __KERNEL__ */
428#endif /* _ASM_POWERPC_HVCALL_H */ 446#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
index 744fd54de374..1bcc84903930 100644
--- a/arch/powerpc/include/asm/irq_work.h
+++ b/arch/powerpc/include/asm/irq_work.h
@@ -5,5 +5,6 @@ static inline bool arch_irq_work_has_interrupt(void)
5{ 5{
6 return true; 6 return true;
7} 7}
8extern void arch_irq_work_raise(void);
8 9
9#endif /* _ASM_POWERPC_IRQ_WORK_H */ 10#endif /* _ASM_POWERPC_IRQ_WORK_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 07a99e638449..bab3461115bb 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -21,6 +21,9 @@
21/* We calculate number of sg entries based on PAGE_SIZE */ 21/* We calculate number of sg entries based on PAGE_SIZE */
22#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) 22#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
23 23
24/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
25#define OPAL_BUSY_DELAY_MS 10
26
24/* /sys/firmware/opal */ 27/* /sys/firmware/opal */
25extern struct kobject *opal_kobj; 28extern struct kobject *opal_kobj;
26 29
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 70bd4381f8e6..45e2aefece16 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -192,6 +192,16 @@ struct paca_struct {
192#endif 192#endif
193 struct kvmppc_host_state kvm_hstate; 193 struct kvmppc_host_state kvm_hstate;
194#endif 194#endif
195#ifdef CONFIG_PPC_BOOK3S_64
196 /*
197 * rfi fallback flush must be in its own cacheline to prevent
198 * other paca data leaking into the L1d
199 */
200 u64 exrfi[13] __aligned(0x80);
201 void *rfi_flush_fallback_area;
202 u64 l1d_flush_congruence;
203 u64 l1d_flush_sets;
204#endif
195}; 205};
196 206
197extern struct paca_struct *paca; 207extern struct paca_struct *paca;
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 3140c19c448c..70b379ee6b7e 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -132,7 +132,19 @@ extern long long virt_phys_offset;
132#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 132#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
133#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 133#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
134#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 134#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
135
136#ifdef CONFIG_PPC_BOOK3S_64
137/*
138 * On hash the vmalloc and other regions alias to the kernel region when passed
139 * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
140 * return true for some vmalloc addresses, which is incorrect. So explicitly
141 * check that the address is in the kernel region.
142 */
143#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
144 pfn_valid(virt_to_pfn(kaddr)))
145#else
135#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) 146#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
147#endif
136 148
137/* 149/*
138 * On Book-E parts we need __va to parse the device tree and we can't 150 * On Book-E parts we need __va to parse the device tree and we can't
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 67859edbf8fd..6e05cb397a5c 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -323,4 +323,18 @@ static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawr
323 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0); 323 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0);
324} 324}
325 325
326static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
327{
328 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
329 long rc;
330
331 rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
332 if (rc == H_SUCCESS) {
333 p->character = retbuf[0];
334 p->behaviour = retbuf[1];
335 }
336
337 return rc;
338}
339
326#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ 340#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index dd0fc18d8103..160bb2311bbb 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -224,6 +224,16 @@ name: \
224 .globl name; \ 224 .globl name; \
225name: 225name:
226 226
227#define _KPROBE_TOC(name) \
228 .section ".kprobes.text","a"; \
229 .align 2 ; \
230 .type name,@function; \
231 .globl name; \
232name: \
2330: addis r2,r12,(.TOC.-0b)@ha; \
234 addi r2,r2,(.TOC.-0b)@l; \
235 .localentry name,.-name
236
227#define DOTSYM(a) a 237#define DOTSYM(a) a
228 238
229#else 239#else
@@ -261,6 +271,8 @@ name: \
261 .type GLUE(.,name),@function; \ 271 .type GLUE(.,name),@function; \
262GLUE(.,name): 272GLUE(.,name):
263 273
274#define _KPROBE_TOC(n) _KPROBE(n)
275
264#define DOTSYM(a) GLUE(.,a) 276#define DOTSYM(a) GLUE(.,a)
265 277
266#endif 278#endif
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index e9d384cbd021..7916b56f2e60 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -26,6 +26,19 @@ void initmem_init(void);
26void setup_panic(void); 26void setup_panic(void);
27#define ARCH_PANIC_TIMEOUT 180 27#define ARCH_PANIC_TIMEOUT 180
28 28
29void rfi_flush_enable(bool enable);
30
31/* These are bit flags */
32enum l1d_flush_type {
33 L1D_FLUSH_NONE = 0x1,
34 L1D_FLUSH_FALLBACK = 0x2,
35 L1D_FLUSH_ORI = 0x4,
36 L1D_FLUSH_MTTRIG = 0x8,
37};
38
39void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
40void do_rfi_flush_fixups(enum l1d_flush_type types);
41
29#endif /* !__ASSEMBLY__ */ 42#endif /* !__ASSEMBLY__ */
30 43
31#endif /* _ASM_POWERPC_SETUP_H */ 44#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index c50868681f9e..e8d6a842f4bb 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -5,10 +5,6 @@
5#include <linux/stringify.h> 5#include <linux/stringify.h>
6#include <asm/feature-fixups.h> 6#include <asm/feature-fixups.h>
7 7
8#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
9#define __SUBARCH_HAS_LWSYNC
10#endif
11
12#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
13extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; 9extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
14extern void do_lwsync_fixups(unsigned long value, void *fixup_start, 10extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 40da69163d51..d92705e3a0c1 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -243,6 +243,10 @@ int main(void)
243#ifdef CONFIG_PPC_BOOK3S_64 243#ifdef CONFIG_PPC_BOOK3S_64
244 DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp)); 244 DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp));
245 DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce)); 245 DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce));
246 DEFINE(PACA_RFI_FLUSH_FALLBACK_AREA, offsetof(struct paca_struct, rfi_flush_fallback_area));
247 DEFINE(PACA_EXRFI, offsetof(struct paca_struct, exrfi));
248 DEFINE(PACA_L1D_FLUSH_CONGRUENCE, offsetof(struct paca_struct, l1d_flush_congruence));
249 DEFINE(PACA_L1D_FLUSH_SETS, offsetof(struct paca_struct, l1d_flush_sets));
246#endif 250#endif
247 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); 251 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
248 DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); 252 DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 9c9b7411b28b..55eb3b752ca0 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -27,6 +27,7 @@ _GLOBAL(__setup_cpu_power7)
27 beqlr 27 beqlr
28 li r0,0 28 li r0,0
29 mtspr SPRN_LPID,r0 29 mtspr SPRN_LPID,r0
30 mtspr SPRN_PCR,r0
30 mfspr r3,SPRN_LPCR 31 mfspr r3,SPRN_LPCR
31 bl __init_LPCR 32 bl __init_LPCR
32 bl __init_tlb_power7 33 bl __init_tlb_power7
@@ -40,6 +41,7 @@ _GLOBAL(__restore_cpu_power7)
40 beqlr 41 beqlr
41 li r0,0 42 li r0,0
42 mtspr SPRN_LPID,r0 43 mtspr SPRN_LPID,r0
44 mtspr SPRN_PCR,r0
43 mfspr r3,SPRN_LPCR 45 mfspr r3,SPRN_LPCR
44 bl __init_LPCR 46 bl __init_LPCR
45 bl __init_tlb_power7 47 bl __init_tlb_power7
@@ -55,6 +57,7 @@ _GLOBAL(__setup_cpu_power8)
55 beqlr 57 beqlr
56 li r0,0 58 li r0,0
57 mtspr SPRN_LPID,r0 59 mtspr SPRN_LPID,r0
60 mtspr SPRN_PCR,r0
58 mfspr r3,SPRN_LPCR 61 mfspr r3,SPRN_LPCR
59 ori r3, r3, LPCR_PECEDH 62 ori r3, r3, LPCR_PECEDH
60 bl __init_LPCR 63 bl __init_LPCR
@@ -74,6 +77,7 @@ _GLOBAL(__restore_cpu_power8)
74 beqlr 77 beqlr
75 li r0,0 78 li r0,0
76 mtspr SPRN_LPID,r0 79 mtspr SPRN_LPID,r0
80 mtspr SPRN_PCR,r0
77 mfspr r3,SPRN_LPCR 81 mfspr r3,SPRN_LPCR
78 ori r3, r3, LPCR_PECEDH 82 ori r3, r3, LPCR_PECEDH
79 bl __init_LPCR 83 bl __init_LPCR
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 98f81800e00c..304f07cfa262 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -788,7 +788,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
788 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); 788 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
789 789
790 /* PCI Command: 0x4 */ 790 /* PCI Command: 0x4 */
791 eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); 791 eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
792 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
792 793
793 /* Check the PCIe link is ready */ 794 /* Check the PCIe link is ready */
794 eeh_bridge_check_link(edev); 795 eeh_bridge_check_link(edev);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index f6fd0332c3a2..59be96917369 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -36,6 +36,11 @@
36#include <asm/hw_irq.h> 36#include <asm/hw_irq.h>
37#include <asm/context_tracking.h> 37#include <asm/context_tracking.h>
38#include <asm/tm.h> 38#include <asm/tm.h>
39#ifdef CONFIG_PPC_BOOK3S
40#include <asm/exception-64s.h>
41#else
42#include <asm/exception-64e.h>
43#endif
39 44
40/* 45/*
41 * System calls. 46 * System calls.
@@ -225,13 +230,23 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
225 ACCOUNT_CPU_USER_EXIT(r11, r12) 230 ACCOUNT_CPU_USER_EXIT(r11, r12)
226 HMT_MEDIUM_LOW_HAS_PPR 231 HMT_MEDIUM_LOW_HAS_PPR
227 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ 232 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
233 ld r2,GPR2(r1)
234 ld r1,GPR1(r1)
235 mtlr r4
236 mtcr r5
237 mtspr SPRN_SRR0,r7
238 mtspr SPRN_SRR1,r8
239 RFI_TO_USER
240 b . /* prevent speculative execution */
241
242 /* exit to kernel */
2281: ld r2,GPR2(r1) 2431: ld r2,GPR2(r1)
229 ld r1,GPR1(r1) 244 ld r1,GPR1(r1)
230 mtlr r4 245 mtlr r4
231 mtcr r5 246 mtcr r5
232 mtspr SPRN_SRR0,r7 247 mtspr SPRN_SRR0,r7
233 mtspr SPRN_SRR1,r8 248 mtspr SPRN_SRR1,r8
234 RFI 249 RFI_TO_KERNEL
235 b . /* prevent speculative execution */ 250 b . /* prevent speculative execution */
236 251
237syscall_error: 252syscall_error:
@@ -353,8 +368,7 @@ tabort_syscall:
353 mtmsrd r10, 1 368 mtmsrd r10, 1
354 mtspr SPRN_SRR0, r11 369 mtspr SPRN_SRR0, r11
355 mtspr SPRN_SRR1, r12 370 mtspr SPRN_SRR1, r12
356 371 RFI_TO_USER
357 rfid
358 b . /* prevent speculative execution */ 372 b . /* prevent speculative execution */
359#endif 373#endif
360 374
@@ -560,6 +574,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
560 * actually hit this code path. 574 * actually hit this code path.
561 */ 575 */
562 576
577 isync
563 slbie r6 578 slbie r6
564 slbie r6 /* Workaround POWER5 < DD2.1 issue */ 579 slbie r6 /* Workaround POWER5 < DD2.1 issue */
565 slbmte r7,r0 580 slbmte r7,r0
@@ -887,7 +902,7 @@ BEGIN_FTR_SECTION
887END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 902END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
888 ACCOUNT_CPU_USER_EXIT(r2, r4) 903 ACCOUNT_CPU_USER_EXIT(r2, r4)
889 REST_GPR(13, r1) 904 REST_GPR(13, r1)
8901: 905
891 mtspr SPRN_SRR1,r3 906 mtspr SPRN_SRR1,r3
892 907
893 ld r2,_CCR(r1) 908 ld r2,_CCR(r1)
@@ -900,8 +915,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
900 ld r3,GPR3(r1) 915 ld r3,GPR3(r1)
901 ld r4,GPR4(r1) 916 ld r4,GPR4(r1)
902 ld r1,GPR1(r1) 917 ld r1,GPR1(r1)
918 RFI_TO_USER
919 b . /* prevent speculative execution */
903 920
904 rfid 9211: mtspr SPRN_SRR1,r3
922
923 ld r2,_CCR(r1)
924 mtcrf 0xFF,r2
925 ld r2,_NIP(r1)
926 mtspr SPRN_SRR0,r2
927
928 ld r0,GPR0(r1)
929 ld r2,GPR2(r1)
930 ld r3,GPR3(r1)
931 ld r4,GPR4(r1)
932 ld r1,GPR1(r1)
933 RFI_TO_KERNEL
905 b . /* prevent speculative execution */ 934 b . /* prevent speculative execution */
906 935
907#endif /* CONFIG_PPC_BOOK3E */ 936#endif /* CONFIG_PPC_BOOK3E */
@@ -1077,7 +1106,7 @@ _GLOBAL(enter_rtas)
1077 1106
1078 mtspr SPRN_SRR0,r5 1107 mtspr SPRN_SRR0,r5
1079 mtspr SPRN_SRR1,r6 1108 mtspr SPRN_SRR1,r6
1080 rfid 1109 RFI_TO_KERNEL
1081 b . /* prevent speculative execution */ 1110 b . /* prevent speculative execution */
1082 1111
1083rtas_return_loc: 1112rtas_return_loc:
@@ -1102,7 +1131,7 @@ rtas_return_loc:
1102 1131
1103 mtspr SPRN_SRR0,r3 1132 mtspr SPRN_SRR0,r3
1104 mtspr SPRN_SRR1,r4 1133 mtspr SPRN_SRR1,r4
1105 rfid 1134 RFI_TO_KERNEL
1106 b . /* prevent speculative execution */ 1135 b . /* prevent speculative execution */
1107 1136
1108 .align 3 1137 .align 3
@@ -1173,7 +1202,7 @@ _GLOBAL(enter_prom)
1173 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) 1202 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1174 andc r11,r11,r12 1203 andc r11,r11,r12
1175 mtsrr1 r11 1204 mtsrr1 r11
1176 rfid 1205 RFI_TO_KERNEL
1177#endif /* CONFIG_PPC_BOOK3E */ 1206#endif /* CONFIG_PPC_BOOK3E */
1178 1207
11791: /* Return from OF */ 12081: /* Return from OF */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index b81ccc5fb32d..938a30fef031 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -46,7 +46,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
46 mtspr SPRN_SRR0,r10 ; \ 46 mtspr SPRN_SRR0,r10 ; \
47 ld r10,PACAKMSR(r13) ; \ 47 ld r10,PACAKMSR(r13) ; \
48 mtspr SPRN_SRR1,r10 ; \ 48 mtspr SPRN_SRR1,r10 ; \
49 rfid ; \ 49 RFI_TO_KERNEL ; \
50 b . ; /* prevent speculative execution */ 50 b . ; /* prevent speculative execution */
51 51
52#define SYSCALL_PSERIES_3 \ 52#define SYSCALL_PSERIES_3 \
@@ -54,7 +54,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
541: mfspr r12,SPRN_SRR1 ; \ 541: mfspr r12,SPRN_SRR1 ; \
55 xori r12,r12,MSR_LE ; \ 55 xori r12,r12,MSR_LE ; \
56 mtspr SPRN_SRR1,r12 ; \ 56 mtspr SPRN_SRR1,r12 ; \
57 rfid ; /* return to userspace */ \ 57 RFI_TO_USER ; /* return to userspace */ \
58 b . ; /* prevent speculative execution */ 58 b . ; /* prevent speculative execution */
59 59
60#if defined(CONFIG_RELOCATABLE) 60#if defined(CONFIG_RELOCATABLE)
@@ -507,7 +507,7 @@ BEGIN_FTR_SECTION
507 LOAD_HANDLER(r12, machine_check_handle_early) 507 LOAD_HANDLER(r12, machine_check_handle_early)
5081: mtspr SPRN_SRR0,r12 5081: mtspr SPRN_SRR0,r12
509 mtspr SPRN_SRR1,r11 509 mtspr SPRN_SRR1,r11
510 rfid 510 RFI_TO_KERNEL
511 b . /* prevent speculative execution */ 511 b . /* prevent speculative execution */
5122: 5122:
513 /* Stack overflow. Stay on emergency stack and panic. 513 /* Stack overflow. Stay on emergency stack and panic.
@@ -601,7 +601,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
601 ld r11,PACA_EXGEN+EX_R11(r13) 601 ld r11,PACA_EXGEN+EX_R11(r13)
602 ld r12,PACA_EXGEN+EX_R12(r13) 602 ld r12,PACA_EXGEN+EX_R12(r13)
603 ld r13,PACA_EXGEN+EX_R13(r13) 603 ld r13,PACA_EXGEN+EX_R13(r13)
604 HRFID 604 HRFI_TO_UNKNOWN
605 b . 605 b .
606#endif 606#endif
607 607
@@ -666,7 +666,7 @@ masked_##_H##interrupt: \
666 ld r10,PACA_EXGEN+EX_R10(r13); \ 666 ld r10,PACA_EXGEN+EX_R10(r13); \
667 ld r11,PACA_EXGEN+EX_R11(r13); \ 667 ld r11,PACA_EXGEN+EX_R11(r13); \
668 GET_SCRATCH0(r13); \ 668 GET_SCRATCH0(r13); \
669 ##_H##rfid; \ 669 ##_H##RFI_TO_KERNEL; \
670 b . 670 b .
671 671
672 MASKED_INTERRUPT() 672 MASKED_INTERRUPT()
@@ -756,7 +756,7 @@ kvmppc_skip_interrupt:
756 addi r13, r13, 4 756 addi r13, r13, 4
757 mtspr SPRN_SRR0, r13 757 mtspr SPRN_SRR0, r13
758 GET_SCRATCH0(r13) 758 GET_SCRATCH0(r13)
759 rfid 759 RFI_TO_KERNEL
760 b . 760 b .
761 761
762kvmppc_skip_Hinterrupt: 762kvmppc_skip_Hinterrupt:
@@ -768,7 +768,7 @@ kvmppc_skip_Hinterrupt:
768 addi r13, r13, 4 768 addi r13, r13, 4
769 mtspr SPRN_HSRR0, r13 769 mtspr SPRN_HSRR0, r13
770 GET_SCRATCH0(r13) 770 GET_SCRATCH0(r13)
771 hrfid 771 HRFI_TO_KERNEL
772 b . 772 b .
773#endif 773#endif
774 774
@@ -1439,7 +1439,7 @@ machine_check_handle_early:
1439 li r3,MSR_ME 1439 li r3,MSR_ME
1440 andc r10,r10,r3 /* Turn off MSR_ME */ 1440 andc r10,r10,r3 /* Turn off MSR_ME */
1441 mtspr SPRN_SRR1,r10 1441 mtspr SPRN_SRR1,r10
1442 rfid 1442 RFI_TO_KERNEL
1443 b . 1443 b .
14442: 14442:
1445 /* 1445 /*
@@ -1457,7 +1457,7 @@ machine_check_handle_early:
1457 */ 1457 */
1458 bl machine_check_queue_event 1458 bl machine_check_queue_event
1459 MACHINE_CHECK_HANDLER_WINDUP 1459 MACHINE_CHECK_HANDLER_WINDUP
1460 rfid 1460 RFI_TO_USER_OR_KERNEL
14619: 14619:
1462 /* Deliver the machine check to host kernel in V mode. */ 1462 /* Deliver the machine check to host kernel in V mode. */
1463 MACHINE_CHECK_HANDLER_WINDUP 1463 MACHINE_CHECK_HANDLER_WINDUP
@@ -1503,6 +1503,8 @@ slb_miss_realmode:
1503 1503
1504 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1504 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1505 beq- 2f 1505 beq- 2f
1506 andi. r10,r12,MSR_PR /* check for user mode (PR != 0) */
1507 bne 1f
1506 1508
1507.machine push 1509.machine push
1508.machine "power4" 1510.machine "power4"
@@ -1516,7 +1518,23 @@ slb_miss_realmode:
1516 ld r11,PACA_EXSLB+EX_R11(r13) 1518 ld r11,PACA_EXSLB+EX_R11(r13)
1517 ld r12,PACA_EXSLB+EX_R12(r13) 1519 ld r12,PACA_EXSLB+EX_R12(r13)
1518 ld r13,PACA_EXSLB+EX_R13(r13) 1520 ld r13,PACA_EXSLB+EX_R13(r13)
1519 rfid 1521 RFI_TO_KERNEL
1522 b . /* prevent speculative execution */
1523
15241:
1525.machine push
1526.machine "power4"
1527 mtcrf 0x80,r9
1528 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1529.machine pop
1530
1531 RESTORE_PPR_PACA(PACA_EXSLB, r9)
1532 ld r9,PACA_EXSLB+EX_R9(r13)
1533 ld r10,PACA_EXSLB+EX_R10(r13)
1534 ld r11,PACA_EXSLB+EX_R11(r13)
1535 ld r12,PACA_EXSLB+EX_R12(r13)
1536 ld r13,PACA_EXSLB+EX_R13(r13)
1537 RFI_TO_USER
1520 b . /* prevent speculative execution */ 1538 b . /* prevent speculative execution */
1521 1539
15222: mfspr r11,SPRN_SRR0 15402: mfspr r11,SPRN_SRR0
@@ -1525,7 +1543,7 @@ slb_miss_realmode:
1525 mtspr SPRN_SRR0,r10 1543 mtspr SPRN_SRR0,r10
1526 ld r10,PACAKMSR(r13) 1544 ld r10,PACAKMSR(r13)
1527 mtspr SPRN_SRR1,r10 1545 mtspr SPRN_SRR1,r10
1528 rfid 1546 RFI_TO_KERNEL
1529 b . 1547 b .
1530 1548
1531unrecov_slb: 1549unrecov_slb:
@@ -1546,6 +1564,92 @@ power4_fixup_nap:
1546 blr 1564 blr
1547#endif 1565#endif
1548 1566
1567 .globl rfi_flush_fallback
1568rfi_flush_fallback:
1569 SET_SCRATCH0(r13);
1570 GET_PACA(r13);
1571 std r9,PACA_EXRFI+EX_R9(r13)
1572 std r10,PACA_EXRFI+EX_R10(r13)
1573 std r11,PACA_EXRFI+EX_R11(r13)
1574 std r12,PACA_EXRFI+EX_R12(r13)
1575 std r8,PACA_EXRFI+EX_R13(r13)
1576 mfctr r9
1577 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
1578 ld r11,PACA_L1D_FLUSH_SETS(r13)
1579 ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
1580 /*
1581 * The load adresses are at staggered offsets within cachelines,
1582 * which suits some pipelines better (on others it should not
1583 * hurt).
1584 */
1585 addi r12,r12,8
1586 mtctr r11
1587 DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
1588
1589 /* order ld/st prior to dcbt stop all streams with flushing */
1590 sync
15911: li r8,0
1592 .rept 8 /* 8-way set associative */
1593 ldx r11,r10,r8
1594 add r8,r8,r12
1595 xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
1596 add r8,r8,r11 // Add 0, this creates a dependency on the ldx
1597 .endr
1598 addi r10,r10,128 /* 128 byte cache line */
1599 bdnz 1b
1600
1601 mtctr r9
1602 ld r9,PACA_EXRFI+EX_R9(r13)
1603 ld r10,PACA_EXRFI+EX_R10(r13)
1604 ld r11,PACA_EXRFI+EX_R11(r13)
1605 ld r12,PACA_EXRFI+EX_R12(r13)
1606 ld r8,PACA_EXRFI+EX_R13(r13)
1607 GET_SCRATCH0(r13);
1608 rfid
1609
1610 .globl hrfi_flush_fallback
1611hrfi_flush_fallback:
1612 SET_SCRATCH0(r13);
1613 GET_PACA(r13);
1614 std r9,PACA_EXRFI+EX_R9(r13)
1615 std r10,PACA_EXRFI+EX_R10(r13)
1616 std r11,PACA_EXRFI+EX_R11(r13)
1617 std r12,PACA_EXRFI+EX_R12(r13)
1618 std r8,PACA_EXRFI+EX_R13(r13)
1619 mfctr r9
1620 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
1621 ld r11,PACA_L1D_FLUSH_SETS(r13)
1622 ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
1623 /*
1624 * The load adresses are at staggered offsets within cachelines,
1625 * which suits some pipelines better (on others it should not
1626 * hurt).
1627 */
1628 addi r12,r12,8
1629 mtctr r11
1630 DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
1631
1632 /* order ld/st prior to dcbt stop all streams with flushing */
1633 sync
16341: li r8,0
1635 .rept 8 /* 8-way set associative */
1636 ldx r11,r10,r8
1637 add r8,r8,r12
1638 xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
1639 add r8,r8,r11 // Add 0, this creates a dependency on the ldx
1640 .endr
1641 addi r10,r10,128 /* 128 byte cache line */
1642 bdnz 1b
1643
1644 mtctr r9
1645 ld r9,PACA_EXRFI+EX_R9(r13)
1646 ld r10,PACA_EXRFI+EX_R10(r13)
1647 ld r11,PACA_EXRFI+EX_R11(r13)
1648 ld r12,PACA_EXRFI+EX_R12(r13)
1649 ld r8,PACA_EXRFI+EX_R13(r13)
1650 GET_SCRATCH0(r13);
1651 hrfid
1652
1549/* 1653/*
1550 * Hash table stuff 1654 * Hash table stuff
1551 */ 1655 */
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 26d091a1a54c..791d4c3329c3 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -1025,6 +1025,9 @@ void fadump_cleanup(void)
1025 init_fadump_mem_struct(&fdm, 1025 init_fadump_mem_struct(&fdm,
1026 be64_to_cpu(fdm_active->cpu_state_data.destination_address)); 1026 be64_to_cpu(fdm_active->cpu_state_data.destination_address));
1027 fadump_invalidate_dump(&fdm); 1027 fadump_invalidate_dump(&fdm);
1028 } else if (fw_dump.dump_registered) {
1029 /* Un-register Firmware-assisted dump if it was registered. */
1030 fadump_unregister_dump(&fdm);
1028 } 1031 }
1029} 1032}
1030 1033
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 78c1eba4c04a..01e274e6907b 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -720,7 +720,7 @@ start_here:
720 tovirt(r6,r6) 720 tovirt(r6,r6)
721 lis r5, abatron_pteptrs@h 721 lis r5, abatron_pteptrs@h
722 ori r5, r5, abatron_pteptrs@l 722 ori r5, r5, abatron_pteptrs@l
723 stw r5, 0xf0(r0) /* Must match your Abatron config file */ 723 stw r5, 0xf0(0) /* Must match your Abatron config file */
724 tophys(r5,r5) 724 tophys(r5,r5)
725 stw r6, 0(r5) 725 stw r6, 0(r5)
726 726
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index fdf48785d3e9..56e4571e3a02 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -174,8 +174,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
174 if (cpu_has_feature(CPU_FTR_DAWR)) { 174 if (cpu_has_feature(CPU_FTR_DAWR)) {
175 length_max = 512 ; /* 64 doublewords */ 175 length_max = 512 ; /* 64 doublewords */
176 /* DAWR region can't cross 512 boundary */ 176 /* DAWR region can't cross 512 boundary */
177 if ((bp->attr.bp_addr >> 10) != 177 if ((bp->attr.bp_addr >> 9) !=
178 ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10)) 178 ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
179 return -EINVAL; 179 return -EINVAL;
180 } 180 }
181 if (info->len > 181 if (info->len >
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index db475d41b57a..107588295b39 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -66,7 +66,7 @@ PPC64_CACHES:
66 * flush all bytes from start through stop-1 inclusive 66 * flush all bytes from start through stop-1 inclusive
67 */ 67 */
68 68
69_KPROBE(flush_icache_range) 69_KPROBE_TOC(flush_icache_range)
70BEGIN_FTR_SECTION 70BEGIN_FTR_SECTION
71 PURGE_PREFETCHED_INS 71 PURGE_PREFETCHED_INS
72 blr 72 blr
@@ -117,7 +117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
117 * 117 *
118 * flush all bytes from start to stop-1 inclusive 118 * flush all bytes from start to stop-1 inclusive
119 */ 119 */
120_GLOBAL(flush_dcache_range) 120_GLOBAL_TOC(flush_dcache_range)
121 121
122/* 122/*
123 * Flush the data cache to memory 123 * Flush the data cache to memory
@@ -701,31 +701,3 @@ _GLOBAL(kexec_sequence)
701 li r5,0 701 li r5,0
702 blr /* image->start(physid, image->start, 0); */ 702 blr /* image->start(physid, image->start, 0); */
703#endif /* CONFIG_KEXEC */ 703#endif /* CONFIG_KEXEC */
704
705#ifdef CONFIG_MODULES
706#if defined(_CALL_ELF) && _CALL_ELF == 2
707
708#ifdef CONFIG_MODVERSIONS
709.weak __crc_TOC.
710.section "___kcrctab+TOC.","a"
711.globl __kcrctab_TOC.
712__kcrctab_TOC.:
713 .llong __crc_TOC.
714#endif
715
716/*
717 * Export a fake .TOC. since both modpost and depmod will complain otherwise.
718 * Both modpost and depmod strip the leading . so we do the same here.
719 */
720.section "__ksymtab_strings","a"
721__kstrtab_TOC.:
722 .asciz "TOC."
723
724.section "___ksymtab+TOC.","a"
725/* This symbol name is important: it's used by modpost to find exported syms */
726.globl __ksymtab_TOC.
727__ksymtab_TOC.:
728 .llong 0 /* .value */
729 .llong __kstrtab_TOC.
730#endif /* ELFv2 */
731#endif /* MODULES */
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index e4f7d4eed20c..08b7a40de5f8 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -326,7 +326,10 @@ static void dedotify_versions(struct modversion_info *vers,
326 } 326 }
327} 327}
328 328
329/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */ 329/*
330 * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
331 * seem to be defined (value set later).
332 */
330static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) 333static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
331{ 334{
332 unsigned int i; 335 unsigned int i;
@@ -334,8 +337,11 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
334 for (i = 1; i < numsyms; i++) { 337 for (i = 1; i < numsyms; i++) {
335 if (syms[i].st_shndx == SHN_UNDEF) { 338 if (syms[i].st_shndx == SHN_UNDEF) {
336 char *name = strtab + syms[i].st_name; 339 char *name = strtab + syms[i].st_name;
337 if (name[0] == '.') 340 if (name[0] == '.') {
341 if (strcmp(name+1, "TOC.") == 0)
342 syms[i].st_shndx = SHN_ABS;
338 syms[i].st_name++; 343 syms[i].st_name++;
344 }
339 } 345 }
340 } 346 }
341} 347}
@@ -351,7 +357,7 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
351 numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); 357 numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
352 358
353 for (i = 1; i < numsyms; i++) { 359 for (i = 1; i < numsyms; i++) {
354 if (syms[i].st_shndx == SHN_UNDEF 360 if (syms[i].st_shndx == SHN_ABS
355 && strcmp(strtab + syms[i].st_name, "TOC.") == 0) 361 && strcmp(strtab + syms[i].st_name, "TOC.") == 0)
356 return &syms[i]; 362 return &syms[i];
357 } 363 }
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 1f7930037cb7..d9e41b77dd13 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -11,6 +11,7 @@
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <linux/syscalls.h>
14#include <linux/irq.h> 15#include <linux/irq.h>
15#include <linux/list.h> 16#include <linux/list.h>
16#include <linux/of.h> 17#include <linux/of.h>
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index cf788d7d7e56..a9b10812cbfd 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -209,7 +209,8 @@ void enable_kernel_vsx(void)
209 WARN_ON(preemptible()); 209 WARN_ON(preemptible());
210 210
211#ifdef CONFIG_SMP 211#ifdef CONFIG_SMP
212 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) 212 if (current->thread.regs &&
213 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)))
213 giveup_vsx(current); 214 giveup_vsx(current);
214 else 215 else
215 giveup_vsx(NULL); /* just enable vsx for kernel - force */ 216 giveup_vsx(NULL); /* just enable vsx for kernel - force */
@@ -231,7 +232,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
231{ 232{
232 if (tsk->thread.regs) { 233 if (tsk->thread.regs) {
233 preempt_disable(); 234 preempt_disable();
234 if (tsk->thread.regs->msr & MSR_VSX) { 235 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
235#ifdef CONFIG_SMP 236#ifdef CONFIG_SMP
236 BUG_ON(tsk != current); 237 BUG_ON(tsk != current);
237#endif 238#endif
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index b38fd081b222..3b63655efa3c 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1004,6 +1004,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
1004 /* Create a new breakpoint request if one doesn't exist already */ 1004 /* Create a new breakpoint request if one doesn't exist already */
1005 hw_breakpoint_init(&attr); 1005 hw_breakpoint_init(&attr);
1006 attr.bp_addr = hw_brk.address; 1006 attr.bp_addr = hw_brk.address;
1007 attr.bp_len = 8;
1007 arch_bp_generic_fields(hw_brk.type, 1008 arch_bp_generic_fields(hw_brk.type,
1008 &attr.bp_type); 1009 &attr.bp_type);
1009 1010
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 44c8d03558ac..318224784114 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -217,14 +217,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
217 unsigned short maj; 217 unsigned short maj;
218 unsigned short min; 218 unsigned short min;
219 219
220 /* We only show online cpus: disable preempt (overzealous, I
221 * knew) to prevent cpu going down. */
222 preempt_disable();
223 if (!cpu_online(cpu_id)) {
224 preempt_enable();
225 return 0;
226 }
227
228#ifdef CONFIG_SMP 220#ifdef CONFIG_SMP
229 pvr = per_cpu(cpu_pvr, cpu_id); 221 pvr = per_cpu(cpu_pvr, cpu_id);
230#else 222#else
@@ -329,9 +321,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
329#ifdef CONFIG_SMP 321#ifdef CONFIG_SMP
330 seq_printf(m, "\n"); 322 seq_printf(m, "\n");
331#endif 323#endif
332
333 preempt_enable();
334
335 /* If this is the last cpu, print the summary */ 324 /* If this is the last cpu, print the summary */
336 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) 325 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
337 show_cpuinfo_summary(m); 326 show_cpuinfo_summary(m);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a20823210ac0..9eb469bed22b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -38,6 +38,7 @@
38#include <linux/hugetlb.h> 38#include <linux/hugetlb.h>
39#include <linux/memory.h> 39#include <linux/memory.h>
40#include <linux/nmi.h> 40#include <linux/nmi.h>
41#include <linux/debugfs.h>
41 42
42#include <asm/io.h> 43#include <asm/io.h>
43#include <asm/kdump.h> 44#include <asm/kdump.h>
@@ -835,3 +836,141 @@ static int __init disable_hardlockup_detector(void)
835} 836}
836early_initcall(disable_hardlockup_detector); 837early_initcall(disable_hardlockup_detector);
837#endif 838#endif
839
840#ifdef CONFIG_PPC_BOOK3S_64
841static enum l1d_flush_type enabled_flush_types;
842static void *l1d_flush_fallback_area;
843static bool no_rfi_flush;
844bool rfi_flush;
845
846static int __init handle_no_rfi_flush(char *p)
847{
848 pr_info("rfi-flush: disabled on command line.");
849 no_rfi_flush = true;
850 return 0;
851}
852early_param("no_rfi_flush", handle_no_rfi_flush);
853
854/*
855 * The RFI flush is not KPTI, but because users will see doco that says to use
856 * nopti we hijack that option here to also disable the RFI flush.
857 */
858static int __init handle_no_pti(char *p)
859{
860 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
861 handle_no_rfi_flush(NULL);
862 return 0;
863}
864early_param("nopti", handle_no_pti);
865
866static void do_nothing(void *unused)
867{
868 /*
869 * We don't need to do the flush explicitly, just enter+exit kernel is
870 * sufficient, the RFI exit handlers will do the right thing.
871 */
872}
873
874void rfi_flush_enable(bool enable)
875{
876 if (rfi_flush == enable)
877 return;
878
879 if (enable) {
880 do_rfi_flush_fixups(enabled_flush_types);
881 on_each_cpu(do_nothing, NULL, 1);
882 } else
883 do_rfi_flush_fixups(L1D_FLUSH_NONE);
884
885 rfi_flush = enable;
886}
887
888static void init_fallback_flush(void)
889{
890 u64 l1d_size, limit;
891 int cpu;
892
893 l1d_size = ppc64_caches.dsize;
894 limit = min(safe_stack_limit(), ppc64_rma_size);
895
896 /*
897 * Align to L1d size, and size it at 2x L1d size, to catch possible
898 * hardware prefetch runoff. We don't have a recipe for load patterns to
899 * reliably avoid the prefetcher.
900 */
901 l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
902 memset(l1d_flush_fallback_area, 0, l1d_size * 2);
903
904 for_each_possible_cpu(cpu) {
905 /*
906 * The fallback flush is currently coded for 8-way
907 * associativity. Different associativity is possible, but it
908 * will be treated as 8-way and may not evict the lines as
909 * effectively.
910 *
911 * 128 byte lines are mandatory.
912 */
913 u64 c = l1d_size / 8;
914
915 paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
916 paca[cpu].l1d_flush_congruence = c;
917 paca[cpu].l1d_flush_sets = c / 128;
918 }
919}
920
921void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
922{
923 if (types & L1D_FLUSH_FALLBACK) {
924 pr_info("rfi-flush: Using fallback displacement flush\n");
925 init_fallback_flush();
926 }
927
928 if (types & L1D_FLUSH_ORI)
929 pr_info("rfi-flush: Using ori type flush\n");
930
931 if (types & L1D_FLUSH_MTTRIG)
932 pr_info("rfi-flush: Using mttrig type flush\n");
933
934 enabled_flush_types = types;
935
936 if (!no_rfi_flush)
937 rfi_flush_enable(enable);
938}
939
940#ifdef CONFIG_DEBUG_FS
941static int rfi_flush_set(void *data, u64 val)
942{
943 if (val == 1)
944 rfi_flush_enable(true);
945 else if (val == 0)
946 rfi_flush_enable(false);
947 else
948 return -EINVAL;
949
950 return 0;
951}
952
953static int rfi_flush_get(void *data, u64 *val)
954{
955 *val = rfi_flush ? 1 : 0;
956 return 0;
957}
958
959DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
960
961static __init int rfi_flush_debugfs_init(void)
962{
963 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
964 return 0;
965}
966device_initcall(rfi_flush_debugfs_init);
967#endif
968
969ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
970{
971 if (rfi_flush)
972 return sprintf(buf, "Mitigation: RFI Flush\n");
973
974 return sprintf(buf, "Vulnerable\n");
975}
976#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1be1092c7204..9baba9576e99 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -686,12 +686,20 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
686static void start_cpu_decrementer(void) 686static void start_cpu_decrementer(void)
687{ 687{
688#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 688#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
689 unsigned int tcr;
690
689 /* Clear any pending timer interrupts */ 691 /* Clear any pending timer interrupts */
690 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); 692 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
691 693
692 /* Enable decrementer interrupt */ 694 tcr = mfspr(SPRN_TCR);
693 mtspr(SPRN_TCR, TCR_DIE); 695 /*
694#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */ 696 * The watchdog may have already been enabled by u-boot. So leave
697 * TRC[WP] (Watchdog Period) alone.
698 */
699 tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
700 tcr |= TCR_DIE; /* Enable decrementer */
701 mtspr(SPRN_TCR, tcr);
702#endif
695} 703}
696 704
697void __init generic_calibrate_decr(void) 705void __init generic_calibrate_decr(void)
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index d41fd0af8980..072a23a17350 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -72,6 +72,15 @@ SECTIONS
72 /* Read-only data */ 72 /* Read-only data */
73 RODATA 73 RODATA
74 74
75#ifdef CONFIG_PPC64
76 . = ALIGN(8);
77 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
78 __start___rfi_flush_fixup = .;
79 *(__rfi_flush_fixup)
80 __stop___rfi_flush_fixup = .;
81 }
82#endif
83
75 EXCEPTION_TABLE(0) 84 EXCEPTION_TABLE(0)
76 85
77 NOTES :kernel :notes 86 NOTES :kernel :notes
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 79ad35abd196..ddec22828673 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -177,12 +177,15 @@ map_again:
177 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, 177 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
178 hpsize, hpsize, MMU_SEGSIZE_256M); 178 hpsize, hpsize, MMU_SEGSIZE_256M);
179 179
180 if (ret < 0) { 180 if (ret == -1) {
181 /* If we couldn't map a primary PTE, try a secondary */ 181 /* If we couldn't map a primary PTE, try a secondary */
182 hash = ~hash; 182 hash = ~hash;
183 vflags ^= HPTE_V_SECONDARY; 183 vflags ^= HPTE_V_SECONDARY;
184 attempt++; 184 attempt++;
185 goto map_again; 185 goto map_again;
186 } else if (ret < 0) {
187 r = -EIO;
188 goto out_unlock;
186 } else { 189 } else {
187 trace_kvm_book3s_64_mmu_map(rflags, hpteg, 190 trace_kvm_book3s_64_mmu_map(rflags, hpteg,
188 vpn, hpaddr, orig_pte); 191 vpn, hpaddr, orig_pte);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 428563b195c3..767ac1572c02 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3002,15 +3002,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
3002 goto up_out; 3002 goto up_out;
3003 3003
3004 psize = vma_kernel_pagesize(vma); 3004 psize = vma_kernel_pagesize(vma);
3005 porder = __ilog2(psize);
3006 3005
3007 up_read(&current->mm->mmap_sem); 3006 up_read(&current->mm->mmap_sem);
3008 3007
3009 /* We can handle 4k, 64k or 16M pages in the VRMA */ 3008 /* We can handle 4k, 64k or 16M pages in the VRMA */
3010 err = -EINVAL; 3009 if (psize >= 0x1000000)
3011 if (!(psize == 0x1000 || psize == 0x10000 || 3010 psize = 0x1000000;
3012 psize == 0x1000000)) 3011 else if (psize >= 0x10000)
3013 goto out_srcu; 3012 psize = 0x10000;
3013 else
3014 psize = 0x1000;
3015 porder = __ilog2(psize);
3014 3016
3015 /* Update VRMASD field in the LPCR */ 3017 /* Update VRMASD field in the LPCR */
3016 senc = slb_pgsize_encoding(psize); 3018 senc = slb_pgsize_encoding(psize);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index ffab9269bfe4..4463718ae614 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -64,7 +64,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
64 mtmsrd r0,1 /* clear RI in MSR */ 64 mtmsrd r0,1 /* clear RI in MSR */
65 mtsrr0 r5 65 mtsrr0 r5
66 mtsrr1 r6 66 mtsrr1 r6
67 RFI 67 RFI_TO_KERNEL
68 68
69kvmppc_call_hv_entry: 69kvmppc_call_hv_entry:
70 ld r4, HSTATE_KVM_VCPU(r13) 70 ld r4, HSTATE_KVM_VCPU(r13)
@@ -170,7 +170,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
170 mtsrr0 r8 170 mtsrr0 r8
171 mtsrr1 r7 171 mtsrr1 r7
172 beq cr1, 13f /* machine check */ 172 beq cr1, 13f /* machine check */
173 RFI 173 RFI_TO_KERNEL
174 174
175 /* On POWER7, we have external interrupts set to use HSRR0/1 */ 175 /* On POWER7, we have external interrupts set to use HSRR0/1 */
17611: mtspr SPRN_HSRR0, r8 17611: mtspr SPRN_HSRR0, r8
@@ -965,8 +965,7 @@ BEGIN_FTR_SECTION
965END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 965END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
966 ld r0, VCPU_GPR(R0)(r4) 966 ld r0, VCPU_GPR(R0)(r4)
967 ld r4, VCPU_GPR(R4)(r4) 967 ld r4, VCPU_GPR(R4)(r4)
968 968 HRFI_TO_GUEST
969 hrfid
970 b . 969 b .
971 970
972secondary_too_late: 971secondary_too_late:
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 64891b081ad5..81313844d81c 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -625,7 +625,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
625 kvmppc_mmu_unmap_page(vcpu, &pte); 625 kvmppc_mmu_unmap_page(vcpu, &pte);
626 } 626 }
627 /* The guest's PTE is not mapped yet. Map on the host */ 627 /* The guest's PTE is not mapped yet. Map on the host */
628 kvmppc_mmu_map_page(vcpu, &pte, iswrite); 628 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
629 /* Exit KVM if mapping failed */
630 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
631 return RESUME_HOST;
632 }
629 if (data) 633 if (data)
630 vcpu->stat.sp_storage++; 634 vcpu->stat.sp_storage++;
631 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 635 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index f2c75a1e0536..0d91baf63fed 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
50 pteg_addr = get_pteg_addr(vcpu, pte_index); 50 pteg_addr = get_pteg_addr(vcpu, pte_index);
51 51
52 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 52 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
53 copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); 53 ret = H_FUNCTION;
54 if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
55 goto done;
54 hpte = pteg; 56 hpte = pteg;
55 57
56 ret = H_PTEG_FULL; 58 ret = H_PTEG_FULL;
@@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
71 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); 73 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
72 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); 74 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
73 pteg_addr += i * HPTE_SIZE; 75 pteg_addr += i * HPTE_SIZE;
74 copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); 76 ret = H_FUNCTION;
77 if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
78 goto done;
75 kvmppc_set_gpr(vcpu, 4, pte_index | i); 79 kvmppc_set_gpr(vcpu, 4, pte_index | i);
76 ret = H_SUCCESS; 80 ret = H_SUCCESS;
77 81
@@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
93 97
94 pteg = get_pteg_addr(vcpu, pte_index); 98 pteg = get_pteg_addr(vcpu, pte_index);
95 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 99 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
96 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 100 ret = H_FUNCTION;
101 if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
102 goto done;
97 pte[0] = be64_to_cpu((__force __be64)pte[0]); 103 pte[0] = be64_to_cpu((__force __be64)pte[0]);
98 pte[1] = be64_to_cpu((__force __be64)pte[1]); 104 pte[1] = be64_to_cpu((__force __be64)pte[1]);
99 105
@@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
103 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) 109 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
104 goto done; 110 goto done;
105 111
106 copy_to_user((void __user *)pteg, &v, sizeof(v)); 112 ret = H_FUNCTION;
113 if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
114 goto done;
107 115
108 rb = compute_tlbie_rb(pte[0], pte[1], pte_index); 116 rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
109 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); 117 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
@@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
171 } 179 }
172 180
173 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); 181 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
174 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 182 if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
183 ret = H_FUNCTION;
184 break;
185 }
175 pte[0] = be64_to_cpu((__force __be64)pte[0]); 186 pte[0] = be64_to_cpu((__force __be64)pte[0]);
176 pte[1] = be64_to_cpu((__force __be64)pte[1]); 187 pte[1] = be64_to_cpu((__force __be64)pte[1]);
177 188
@@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
184 tsh |= H_BULK_REMOVE_NOT_FOUND; 195 tsh |= H_BULK_REMOVE_NOT_FOUND;
185 } else { 196 } else {
186 /* Splat the pteg in (userland) hpt */ 197 /* Splat the pteg in (userland) hpt */
187 copy_to_user((void __user *)pteg, &v, sizeof(v)); 198 if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
199 ret = H_FUNCTION;
200 break;
201 }
188 202
189 rb = compute_tlbie_rb(pte[0], pte[1], 203 rb = compute_tlbie_rb(pte[0], pte[1],
190 tsh & H_BULK_REMOVE_PTEX); 204 tsh & H_BULK_REMOVE_PTEX);
@@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
211 225
212 pteg = get_pteg_addr(vcpu, pte_index); 226 pteg = get_pteg_addr(vcpu, pte_index);
213 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 227 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
214 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 228 ret = H_FUNCTION;
229 if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
230 goto done;
215 pte[0] = be64_to_cpu((__force __be64)pte[0]); 231 pte[0] = be64_to_cpu((__force __be64)pte[0]);
216 pte[1] = be64_to_cpu((__force __be64)pte[1]); 232 pte[1] = be64_to_cpu((__force __be64)pte[1]);
217 233
@@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
234 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); 250 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
235 pte[0] = (__force u64)cpu_to_be64(pte[0]); 251 pte[0] = (__force u64)cpu_to_be64(pte[0]);
236 pte[1] = (__force u64)cpu_to_be64(pte[1]); 252 pte[1] = (__force u64)cpu_to_be64(pte[1]);
237 copy_to_user((void __user *)pteg, pte, sizeof(pte)); 253 ret = H_FUNCTION;
254 if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
255 goto done;
238 ret = H_SUCCESS; 256 ret = H_SUCCESS;
239 257
240 done: 258 done:
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 16c4d88ba27d..a328f99a887c 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -46,6 +46,9 @@
46 46
47#define FUNC(name) name 47#define FUNC(name) name
48 48
49#define RFI_TO_KERNEL RFI
50#define RFI_TO_GUEST RFI
51
49.macro INTERRUPT_TRAMPOLINE intno 52.macro INTERRUPT_TRAMPOLINE intno
50 53
51.global kvmppc_trampoline_\intno 54.global kvmppc_trampoline_\intno
@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins:
141 GET_SCRATCH0(r13) 144 GET_SCRATCH0(r13)
142 145
143 /* And get back into the code */ 146 /* And get back into the code */
144 RFI 147 RFI_TO_KERNEL
145#endif 148#endif
146 149
147/* 150/*
@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline)
164 ori r5, r5, MSR_EE 167 ori r5, r5, MSR_EE
165 mtsrr0 r7 168 mtsrr0 r7
166 mtsrr1 r6 169 mtsrr1 r6
167 RFI 170 RFI_TO_KERNEL
168 171
169#include "book3s_segment.S" 172#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index ca8f174289bb..7c982956d709 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -156,7 +156,7 @@ no_dcbz32_on:
156 PPC_LL r9, SVCPU_R9(r3) 156 PPC_LL r9, SVCPU_R9(r3)
157 PPC_LL r3, (SVCPU_R3)(r3) 157 PPC_LL r3, (SVCPU_R3)(r3)
158 158
159 RFI 159 RFI_TO_GUEST
160kvmppc_handler_trampoline_enter_end: 160kvmppc_handler_trampoline_enter_end:
161 161
162 162
@@ -389,5 +389,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
389 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL 389 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
390 beqa BOOK3S_INTERRUPT_DOORBELL 390 beqa BOOK3S_INTERRUPT_DOORBELL
391 391
392 RFI 392 RFI_TO_KERNEL
393kvmppc_handler_trampoline_exit_end: 393kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 7ce3870d7ddd..3af014684872 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -20,6 +20,7 @@
20#include <asm/code-patching.h> 20#include <asm/code-patching.h>
21#include <asm/page.h> 21#include <asm/page.h>
22#include <asm/sections.h> 22#include <asm/sections.h>
23#include <asm/setup.h>
23 24
24 25
25struct fixup_entry { 26struct fixup_entry {
@@ -52,7 +53,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
52 unsigned int *target = (unsigned int *)branch_target(src); 53 unsigned int *target = (unsigned int *)branch_target(src);
53 54
54 /* Branch within the section doesn't need translating */ 55 /* Branch within the section doesn't need translating */
55 if (target < alt_start || target >= alt_end) { 56 if (target < alt_start || target > alt_end) {
56 instr = translate_branch(dest, src); 57 instr = translate_branch(dest, src);
57 if (!instr) 58 if (!instr)
58 return 1; 59 return 1;
@@ -113,6 +114,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
113 } 114 }
114} 115}
115 116
117#ifdef CONFIG_PPC_BOOK3S_64
118void do_rfi_flush_fixups(enum l1d_flush_type types)
119{
120 unsigned int instrs[3], *dest;
121 long *start, *end;
122 int i;
123
124 start = PTRRELOC(&__start___rfi_flush_fixup),
125 end = PTRRELOC(&__stop___rfi_flush_fixup);
126
127 instrs[0] = 0x60000000; /* nop */
128 instrs[1] = 0x60000000; /* nop */
129 instrs[2] = 0x60000000; /* nop */
130
131 if (types & L1D_FLUSH_FALLBACK)
132 /* b .+16 to fallback flush */
133 instrs[0] = 0x48000010;
134
135 i = 0;
136 if (types & L1D_FLUSH_ORI) {
137 instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
138 instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
139 }
140
141 if (types & L1D_FLUSH_MTTRIG)
142 instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
143
144 for (i = 0; start < end; start++, i++) {
145 dest = (void *)start + *start;
146
147 pr_devel("patching dest %lx\n", (unsigned long)dest);
148
149 patch_instruction(dest, instrs[0]);
150 patch_instruction(dest + 1, instrs[1]);
151 patch_instruction(dest + 2, instrs[2]);
152 }
153
154 printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
155}
156#endif /* CONFIG_PPC_BOOK3S_64 */
157
116void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) 158void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
117{ 159{
118 long *start, *end; 160 long *start, *end;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index a67c6d781c52..d154e333f76b 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -294,7 +294,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
294 * can result in fault, which will cause a deadlock when called with 294 * can result in fault, which will cause a deadlock when called with
295 * mmap_sem held 295 * mmap_sem held
296 */ 296 */
297 if (user_mode(regs)) 297 if (!is_exec && user_mode(regs))
298 store_update_sp = store_updates_sp(regs); 298 store_update_sp = store_updates_sp(regs);
299 299
300 if (user_mode(regs)) 300 if (user_mode(regs))
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 669a15e7fa76..3c4faa4c2742 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -551,7 +551,7 @@ static int numa_setup_cpu(unsigned long lcpu)
551 nid = of_node_to_nid_single(cpu); 551 nid = of_node_to_nid_single(cpu);
552 552
553out_present: 553out_present:
554 if (nid < 0 || !node_online(nid)) 554 if (nid < 0 || !node_possible(nid))
555 nid = first_online_node; 555 nid = first_online_node;
556 556
557 map_cpu_to_node(lcpu, nid); 557 map_cpu_to_node(lcpu, nid);
@@ -951,6 +951,32 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
951 NODE_DATA(nid)->node_spanned_pages = spanned_pages; 951 NODE_DATA(nid)->node_spanned_pages = spanned_pages;
952} 952}
953 953
954static void __init find_possible_nodes(void)
955{
956 struct device_node *rtas;
957 u32 numnodes, i;
958
959 if (min_common_depth <= 0)
960 return;
961
962 rtas = of_find_node_by_path("/rtas");
963 if (!rtas)
964 return;
965
966 if (of_property_read_u32_index(rtas,
967 "ibm,max-associativity-domains",
968 min_common_depth, &numnodes))
969 goto out;
970
971 for (i = 0; i < numnodes; i++) {
972 if (!node_possible(i))
973 node_set(i, node_possible_map);
974 }
975
976out:
977 of_node_put(rtas);
978}
979
954void __init initmem_init(void) 980void __init initmem_init(void)
955{ 981{
956 int nid, cpu; 982 int nid, cpu;
@@ -966,12 +992,15 @@ void __init initmem_init(void)
966 memblock_dump_all(); 992 memblock_dump_all();
967 993
968 /* 994 /*
969 * Reduce the possible NUMA nodes to the online NUMA nodes, 995 * Modify the set of possible NUMA nodes to reflect information
970 * since we do not support node hotplug. This ensures that we 996 * available about the set of online nodes, and the set of nodes
971 * lower the maximum NUMA node ID to what is actually present. 997 * that we expect to make use of for this platform's affinity
998 * calculations.
972 */ 999 */
973 nodes_and(node_possible_map, node_possible_map, node_online_map); 1000 nodes_and(node_possible_map, node_possible_map, node_online_map);
974 1001
1002 find_possible_nodes();
1003
975 for_each_online_node(nid) { 1004 for_each_online_node(nid) {
976 unsigned long start_pfn, end_pfn; 1005 unsigned long start_pfn, end_pfn;
977 1006
@@ -1304,6 +1333,40 @@ static long vphn_get_associativity(unsigned long cpu,
1304 return rc; 1333 return rc;
1305} 1334}
1306 1335
1336static inline int find_and_online_cpu_nid(int cpu)
1337{
1338 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1339 int new_nid;
1340
1341 /* Use associativity from first thread for all siblings */
1342 vphn_get_associativity(cpu, associativity);
1343 new_nid = associativity_to_nid(associativity);
1344 if (new_nid < 0 || !node_possible(new_nid))
1345 new_nid = first_online_node;
1346
1347 if (NODE_DATA(new_nid) == NULL) {
1348#ifdef CONFIG_MEMORY_HOTPLUG
1349 /*
1350 * Need to ensure that NODE_DATA is initialized for a node from
1351 * available memory (see memblock_alloc_try_nid). If unable to
1352 * init the node, then default to nearest node that has memory
1353 * installed.
1354 */
1355 if (try_online_node(new_nid))
1356 new_nid = first_online_node;
1357#else
1358 /*
1359 * Default to using the nearest node that has memory installed.
1360 * Otherwise, it would be necessary to patch the kernel MM code
1361 * to deal with more memoryless-node error conditions.
1362 */
1363 new_nid = first_online_node;
1364#endif
1365 }
1366
1367 return new_nid;
1368}
1369
1307/* 1370/*
1308 * Update the CPU maps and sysfs entries for a single CPU when its NUMA 1371 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1309 * characteristics change. This function doesn't perform any locking and is 1372 * characteristics change. This function doesn't perform any locking and is
@@ -1369,7 +1432,6 @@ int arch_update_cpu_topology(void)
1369{ 1432{
1370 unsigned int cpu, sibling, changed = 0; 1433 unsigned int cpu, sibling, changed = 0;
1371 struct topology_update_data *updates, *ud; 1434 struct topology_update_data *updates, *ud;
1372 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1373 cpumask_t updated_cpus; 1435 cpumask_t updated_cpus;
1374 struct device *dev; 1436 struct device *dev;
1375 int weight, new_nid, i = 0; 1437 int weight, new_nid, i = 0;
@@ -1404,11 +1466,7 @@ int arch_update_cpu_topology(void)
1404 continue; 1466 continue;
1405 } 1467 }
1406 1468
1407 /* Use associativity from first thread for all siblings */ 1469 new_nid = find_and_online_cpu_nid(cpu);
1408 vphn_get_associativity(cpu, associativity);
1409 new_nid = associativity_to_nid(associativity);
1410 if (new_nid < 0 || !node_online(new_nid))
1411 new_nid = first_online_node;
1412 1470
1413 if (new_nid == numa_cpu_lookup_table[cpu]) { 1471 if (new_nid == numa_cpu_lookup_table[cpu]) {
1414 cpumask_andnot(&cpu_associativity_changes_mask, 1472 cpumask_andnot(&cpu_associativity_changes_mask,
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 515730e499fe..309027208f7c 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -69,14 +69,14 @@ static inline void slb_shadow_update(unsigned long ea, int ssize,
69 * updating it. No write barriers are needed here, provided 69 * updating it. No write barriers are needed here, provided
70 * we only update the current CPU's SLB shadow buffer. 70 * we only update the current CPU's SLB shadow buffer.
71 */ 71 */
72 p->save_area[index].esid = 0; 72 WRITE_ONCE(p->save_area[index].esid, 0);
73 p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags)); 73 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
74 p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index)); 74 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
75} 75}
76 76
77static inline void slb_shadow_clear(enum slb_index index) 77static inline void slb_shadow_clear(enum slb_index index)
78{ 78{
79 get_slb_shadow()->save_area[index].esid = 0; 79 WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0);
80} 80}
81 81
82static inline void create_shadowed_slbe(unsigned long ea, int ssize, 82static inline void create_shadowed_slbe(unsigned long ea, int ssize,
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 2d66a8446198..345e255c06a2 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -329,6 +329,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
329 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 329 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
330 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); 330 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
331 break; 331 break;
332 case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
333 PPC_LWZ_OFFS(r_A, r_skb, K);
334 break;
332 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ 335 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
333 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); 336 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
334 break; 337 break;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index b2ab164a8094..30e2e8efbe6b 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -448,6 +448,16 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
448 /* invalid entry */ 448 /* invalid entry */
449 continue; 449 continue;
450 450
451 /*
452 * BHRB rolling buffer could very much contain the kernel
453 * addresses at this point. Check the privileges before
454 * exporting it to userspace (avoid exposure of regions
455 * where we could have speculative execution)
456 */
457 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
458 is_kernel_addr(addr))
459 continue;
460
451 /* Branches are read most recent first (ie. mfbhrb 0 is 461 /* Branches are read most recent first (ie. mfbhrb 0 is
452 * the most recent branch). 462 * the most recent branch).
453 * There are two types of valid entries: 463 * There are two types of valid entries:
@@ -1188,6 +1198,7 @@ static void power_pmu_disable(struct pmu *pmu)
1188 */ 1198 */
1189 write_mmcr0(cpuhw, val); 1199 write_mmcr0(cpuhw, val);
1190 mb(); 1200 mb();
1201 isync();
1191 1202
1192 /* 1203 /*
1193 * Disable instruction sampling if it was enabled 1204 * Disable instruction sampling if it was enabled
@@ -1196,12 +1207,26 @@ static void power_pmu_disable(struct pmu *pmu)
1196 mtspr(SPRN_MMCRA, 1207 mtspr(SPRN_MMCRA,
1197 cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); 1208 cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
1198 mb(); 1209 mb();
1210 isync();
1199 } 1211 }
1200 1212
1201 cpuhw->disabled = 1; 1213 cpuhw->disabled = 1;
1202 cpuhw->n_added = 0; 1214 cpuhw->n_added = 0;
1203 1215
1204 ebb_switch_out(mmcr0); 1216 ebb_switch_out(mmcr0);
1217
1218#ifdef CONFIG_PPC64
1219 /*
1220 * These are readable by userspace, may contain kernel
1221 * addresses and are not switched by context switch, so clear
1222 * them now to avoid leaking anything to userspace in general
1223 * including to another process.
1224 */
1225 if (ppmu->flags & PPMU_ARCH_207S) {
1226 mtspr(SPRN_SDAR, 0);
1227 mtspr(SPRN_SIAR, 0);
1228 }
1229#endif
1205 } 1230 }
1206 1231
1207 local_irq_restore(flags); 1232 local_irq_restore(flags);
@@ -1381,7 +1406,7 @@ static int collect_events(struct perf_event *group, int max_count,
1381 int n = 0; 1406 int n = 0;
1382 struct perf_event *event; 1407 struct perf_event *event;
1383 1408
1384 if (!is_software_event(group)) { 1409 if (group->pmu->task_ctx_nr == perf_hw_context) {
1385 if (n >= max_count) 1410 if (n >= max_count)
1386 return -1; 1411 return -1;
1387 ctrs[n] = group; 1412 ctrs[n] = group;
@@ -1389,7 +1414,7 @@ static int collect_events(struct perf_event *group, int max_count,
1389 events[n++] = group->hw.config; 1414 events[n++] = group->hw.config;
1390 } 1415 }
1391 list_for_each_entry(event, &group->sibling_list, group_entry) { 1416 list_for_each_entry(event, &group->sibling_list, group_entry) {
1392 if (!is_software_event(event) && 1417 if (event->pmu->task_ctx_nr == perf_hw_context &&
1393 event->state != PERF_EVENT_STATE_OFF) { 1418 event->state != PERF_EVENT_STATE_OFF) {
1394 if (n >= max_count) 1419 if (n >= max_count)
1395 return -1; 1420 return -1;
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index be6212ddbf06..7e42e3ec2142 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -174,6 +174,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
174 if (!dump_skip(cprm, 174 if (!dump_skip(cprm,
175 roundup(cprm->written - total + sz, 4) - cprm->written)) 175 roundup(cprm->written - total + sz, 4) - cprm->written))
176 goto Eio; 176 goto Eio;
177
178 rc = 0;
177out: 179out:
178 free_page((unsigned long)buf); 180 free_page((unsigned long)buf);
179 return rc; 181 return rc;
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
index f803f4b8ab6f..8608e358217f 100644
--- a/arch/powerpc/platforms/chrp/time.c
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -27,6 +27,8 @@
27#include <asm/sections.h> 27#include <asm/sections.h>
28#include <asm/time.h> 28#include <asm/time.h>
29 29
30#include <platforms/chrp/chrp.h>
31
30extern spinlock_t rtc_lock; 32extern spinlock_t rtc_lock;
31 33
32#define NVRAM_AS0 0x74 34#define NVRAM_AS0 0x74
@@ -62,7 +64,7 @@ long __init chrp_time_init(void)
62 return 0; 64 return 0;
63} 65}
64 66
65int chrp_cmos_clock_read(int addr) 67static int chrp_cmos_clock_read(int addr)
66{ 68{
67 if (nvram_as1 != 0) 69 if (nvram_as1 != 0)
68 outb(addr>>8, nvram_as1); 70 outb(addr>>8, nvram_as1);
@@ -70,7 +72,7 @@ int chrp_cmos_clock_read(int addr)
70 return (inb(nvram_data)); 72 return (inb(nvram_data));
71} 73}
72 74
73void chrp_cmos_clock_write(unsigned long val, int addr) 75static void chrp_cmos_clock_write(unsigned long val, int addr)
74{ 76{
75 if (nvram_as1 != 0) 77 if (nvram_as1 != 0)
76 outb(addr>>8, nvram_as1); 78 outb(addr>>8, nvram_as1);
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 9b7975706bfc..9485f1024d46 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -35,6 +35,8 @@
35 */ 35 */
36#define HW_BROADWAY_ICR 0x00 36#define HW_BROADWAY_ICR 0x00
37#define HW_BROADWAY_IMR 0x04 37#define HW_BROADWAY_IMR 0x04
38#define HW_STARLET_ICR 0x08
39#define HW_STARLET_IMR 0x0c
38 40
39 41
40/* 42/*
@@ -74,6 +76,9 @@ static void hlwd_pic_unmask(struct irq_data *d)
74 void __iomem *io_base = irq_data_get_irq_chip_data(d); 76 void __iomem *io_base = irq_data_get_irq_chip_data(d);
75 77
76 setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); 78 setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
79
80 /* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */
81 clrbits32(io_base + HW_STARLET_IMR, 1 << irq);
77} 82}
78 83
79 84
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index 76f5013c35e5..89237b84b096 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -467,7 +467,7 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
467 boot_infos_t *bi = (boot_infos_t *) r4; 467 boot_infos_t *bi = (boot_infos_t *) r4;
468 unsigned long hdr; 468 unsigned long hdr;
469 unsigned long space; 469 unsigned long space;
470 unsigned long ptr, x; 470 unsigned long ptr;
471 char *model; 471 char *model;
472 unsigned long offset = reloc_offset(); 472 unsigned long offset = reloc_offset();
473 473
@@ -561,6 +561,8 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
561 * MMU switched OFF, so this should not be useful anymore. 561 * MMU switched OFF, so this should not be useful anymore.
562 */ 562 */
563 if (bi->version < 4) { 563 if (bi->version < 4) {
564 unsigned long x __maybe_unused;
565
564 bootx_printf("Touching pages...\n"); 566 bootx_printf("Touching pages...\n");
565 567
566 /* 568 /*
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 8dd78f4e1af4..32fc56cf6261 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -359,6 +359,7 @@ static int pmac_late_init(void)
359} 359}
360machine_late_initcall(powermac, pmac_late_init); 360machine_late_initcall(powermac, pmac_late_init);
361 361
362void note_bootable_part(dev_t dev, int part, int goodness);
362/* 363/*
363 * This is __init_refok because we check for "initializing" before 364 * This is __init_refok because we check for "initializing" before
364 * touching any of the __init sensitive things and "initializing" 365 * touching any of the __init sensitive things and "initializing"
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 92736851c795..3f653f5201e7 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -48,8 +48,8 @@ static int pnv_eeh_init(void)
48 struct pci_controller *hose; 48 struct pci_controller *hose;
49 struct pnv_phb *phb; 49 struct pnv_phb *phb;
50 50
51 if (!firmware_has_feature(FW_FEATURE_OPALv3)) { 51 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
52 pr_warn("%s: OPALv3 is required !\n", 52 pr_warn("%s: OPAL is required !\n",
53 __func__); 53 __func__);
54 return -EINVAL; 54 return -EINVAL;
55 } 55 }
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 59d735d2e5c0..15bfbcd5debc 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -242,7 +242,7 @@ static int __init pnv_init_idle_states(void)
242 if (cpuidle_disable != IDLE_NO_OVERRIDE) 242 if (cpuidle_disable != IDLE_NO_OVERRIDE)
243 goto out; 243 goto out;
244 244
245 if (!firmware_has_feature(FW_FEATURE_OPALv3)) 245 if (!firmware_has_feature(FW_FEATURE_OPAL))
246 goto out; 246 goto out;
247 247
248 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); 248 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index 9db4398ded5d..5584247f5029 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -11,6 +11,7 @@
11 11
12#define DEBUG 12#define DEBUG
13 13
14#include <linux/delay.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/of.h> 17#include <linux/of.h>
@@ -43,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index)
43 return count; 44 return count;
44} 45}
45 46
47/*
48 * This can be called in the panic path with interrupts off, so use
49 * mdelay in that case.
50 */
46static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) 51static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
47{ 52{
48 s64 rc = OPAL_BUSY; 53 s64 rc = OPAL_BUSY;
@@ -56,9 +61,23 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
56 61
57 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 62 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
58 rc = opal_write_nvram(__pa(buf), count, off); 63 rc = opal_write_nvram(__pa(buf), count, off);
59 if (rc == OPAL_BUSY_EVENT) 64 if (rc == OPAL_BUSY_EVENT) {
65 if (in_interrupt() || irqs_disabled())
66 mdelay(OPAL_BUSY_DELAY_MS);
67 else
68 msleep(OPAL_BUSY_DELAY_MS);
60 opal_poll_events(NULL); 69 opal_poll_events(NULL);
70 } else if (rc == OPAL_BUSY) {
71 if (in_interrupt() || irqs_disabled())
72 mdelay(OPAL_BUSY_DELAY_MS);
73 else
74 msleep(OPAL_BUSY_DELAY_MS);
75 }
61 } 76 }
77
78 if (rc)
79 return -EIO;
80
62 *index += count; 81 *index += count;
63 return count; 82 return count;
64} 83}
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index 7634d1c62299..d0ac535cf5d7 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -126,7 +126,7 @@ static const struct scom_controller opal_scom_controller = {
126 126
127static int opal_xscom_init(void) 127static int opal_xscom_init(void)
128{ 128{
129 if (firmware_has_feature(FW_FEATURE_OPALv3)) 129 if (firmware_has_feature(FW_FEATURE_OPAL))
130 scom_init(&opal_scom_controller); 130 scom_init(&opal_scom_controller);
131 return 0; 131 return 0;
132} 132}
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index ae29eaf85e9e..e48826aa314c 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -98,16 +98,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
98 pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n", 98 pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
99 opal.size, sizep, runtimesz); 99 opal.size, sizep, runtimesz);
100 100
101 powerpc_firmware_features |= FW_FEATURE_OPAL;
102 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { 101 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
103 powerpc_firmware_features |= FW_FEATURE_OPALv2; 102 powerpc_firmware_features |= FW_FEATURE_OPAL;
104 powerpc_firmware_features |= FW_FEATURE_OPALv3; 103 pr_info("OPAL detected !\n");
105 pr_info("OPAL V3 detected !\n");
106 } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
107 powerpc_firmware_features |= FW_FEATURE_OPALv2;
108 pr_info("OPAL V2 detected !\n");
109 } else { 104 } else {
110 pr_info("OPAL V1 detected !\n"); 105 panic("OPAL != V3 detected, no longer supported.\n");
111 } 106 }
112 107
113 /* Reinit all cores with the right endian */ 108 /* Reinit all cores with the right endian */
@@ -352,17 +347,15 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
352 * enough room and be done with it 347 * enough room and be done with it
353 */ 348 */
354 spin_lock_irqsave(&opal_write_lock, flags); 349 spin_lock_irqsave(&opal_write_lock, flags);
355 if (firmware_has_feature(FW_FEATURE_OPALv2)) { 350 rc = opal_console_write_buffer_space(vtermno, &olen);
356 rc = opal_console_write_buffer_space(vtermno, &olen); 351 len = be64_to_cpu(olen);
357 len = be64_to_cpu(olen); 352 if (rc || len < total_len) {
358 if (rc || len < total_len) { 353 spin_unlock_irqrestore(&opal_write_lock, flags);
359 spin_unlock_irqrestore(&opal_write_lock, flags); 354 /* Closed -> drop characters */
360 /* Closed -> drop characters */ 355 if (rc)
361 if (rc) 356 return total_len;
362 return total_len; 357 opal_poll_events(NULL);
363 opal_poll_events(NULL); 358 return -EAGAIN;
364 return -EAGAIN;
365 }
366 } 359 }
367 360
368 /* We still try to handle partial completions, though they 361 /* We still try to handle partial completions, though they
@@ -696,10 +689,7 @@ static int __init opal_init(void)
696 } 689 }
697 690
698 /* Register OPAL consoles if any ports */ 691 /* Register OPAL consoles if any ports */
699 if (firmware_has_feature(FW_FEATURE_OPALv2)) 692 consoles = of_find_node_by_path("/ibm,opal/consoles");
700 consoles = of_find_node_by_path("/ibm,opal/consoles");
701 else
702 consoles = of_node_get(opal_node);
703 if (consoles) { 693 if (consoles) {
704 for_each_child_of_node(consoles, np) { 694 for_each_child_of_node(consoles, np) {
705 if (strcmp(np->name, "serial")) 695 if (strcmp(np->name, "serial"))
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index ecb7f3220355..eac3b7cc78c6 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -344,7 +344,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
344 return; 344 return;
345 } 345 }
346 346
347 if (!firmware_has_feature(FW_FEATURE_OPALv3)) { 347 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
348 pr_info(" Firmware too old to support M64 window\n"); 348 pr_info(" Firmware too old to support M64 window\n");
349 return; 349 return;
350 } 350 }
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index f48afc06ba14..c57afc619b20 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -35,13 +35,63 @@
35#include <asm/opal.h> 35#include <asm/opal.h>
36#include <asm/kexec.h> 36#include <asm/kexec.h>
37#include <asm/smp.h> 37#include <asm/smp.h>
38#include <asm/tm.h>
39#include <asm/setup.h>
38 40
39#include "powernv.h" 41#include "powernv.h"
40 42
43static void pnv_setup_rfi_flush(void)
44{
45 struct device_node *np, *fw_features;
46 enum l1d_flush_type type;
47 int enable;
48
49 /* Default to fallback in case fw-features are not available */
50 type = L1D_FLUSH_FALLBACK;
51 enable = 1;
52
53 np = of_find_node_by_name(NULL, "ibm,opal");
54 fw_features = of_get_child_by_name(np, "fw-features");
55 of_node_put(np);
56
57 if (fw_features) {
58 np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
59 if (np && of_property_read_bool(np, "enabled"))
60 type = L1D_FLUSH_MTTRIG;
61
62 of_node_put(np);
63
64 np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
65 if (np && of_property_read_bool(np, "enabled"))
66 type = L1D_FLUSH_ORI;
67
68 of_node_put(np);
69
70 /* Enable unless firmware says NOT to */
71 enable = 2;
72 np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
73 if (np && of_property_read_bool(np, "disabled"))
74 enable--;
75
76 of_node_put(np);
77
78 np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
79 if (np && of_property_read_bool(np, "disabled"))
80 enable--;
81
82 of_node_put(np);
83 of_node_put(fw_features);
84 }
85
86 setup_rfi_flush(type, enable > 0);
87}
88
41static void __init pnv_setup_arch(void) 89static void __init pnv_setup_arch(void)
42{ 90{
43 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 91 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
44 92
93 pnv_setup_rfi_flush();
94
45 /* Initialize SMP */ 95 /* Initialize SMP */
46 pnv_smp_init(); 96 pnv_smp_init();
47 97
@@ -90,12 +140,8 @@ static void pnv_show_cpuinfo(struct seq_file *m)
90 if (root) 140 if (root)
91 model = of_get_property(root, "model", NULL); 141 model = of_get_property(root, "model", NULL);
92 seq_printf(m, "machine\t\t: PowerNV %s\n", model); 142 seq_printf(m, "machine\t\t: PowerNV %s\n", model);
93 if (firmware_has_feature(FW_FEATURE_OPALv3)) 143 if (firmware_has_feature(FW_FEATURE_OPAL))
94 seq_printf(m, "firmware\t: OPAL v3\n"); 144 seq_printf(m, "firmware\t: OPAL\n");
95 else if (firmware_has_feature(FW_FEATURE_OPALv2))
96 seq_printf(m, "firmware\t: OPAL v2\n");
97 else if (firmware_has_feature(FW_FEATURE_OPAL))
98 seq_printf(m, "firmware\t: OPAL v1\n");
99 else 145 else
100 seq_printf(m, "firmware\t: BML\n"); 146 seq_printf(m, "firmware\t: BML\n");
101 of_node_put(root); 147 of_node_put(root);
@@ -224,9 +270,9 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
224{ 270{
225 xics_kexec_teardown_cpu(secondary); 271 xics_kexec_teardown_cpu(secondary);
226 272
227 /* On OPAL v3, we return all CPUs to firmware */ 273 /* On OPAL, we return all CPUs to firmware */
228 274
229 if (!firmware_has_feature(FW_FEATURE_OPALv3)) 275 if (!firmware_has_feature(FW_FEATURE_OPAL))
230 return; 276 return;
231 277
232 if (secondary) { 278 if (secondary) {
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index ca264833ee64..ad7b1a3dbed0 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -61,14 +61,15 @@ static int pnv_smp_kick_cpu(int nr)
61 unsigned long start_here = 61 unsigned long start_here =
62 __pa(ppc_function_entry(generic_secondary_smp_init)); 62 __pa(ppc_function_entry(generic_secondary_smp_init));
63 long rc; 63 long rc;
64 uint8_t status;
64 65
65 BUG_ON(nr < 0 || nr >= NR_CPUS); 66 BUG_ON(nr < 0 || nr >= NR_CPUS);
66 67
67 /* 68 /*
68 * If we already started or OPALv2 is not supported, we just 69 * If we already started or OPAL is not supported, we just
69 * kick the CPU via the PACA 70 * kick the CPU via the PACA
70 */ 71 */
71 if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2)) 72 if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPAL))
72 goto kick; 73 goto kick;
73 74
74 /* 75 /*
@@ -77,55 +78,42 @@ static int pnv_smp_kick_cpu(int nr)
77 * first time. OPAL v3 allows us to query OPAL to know if it 78 * first time. OPAL v3 allows us to query OPAL to know if it
78 * has the CPUs, so we do that 79 * has the CPUs, so we do that
79 */ 80 */
80 if (firmware_has_feature(FW_FEATURE_OPALv3)) { 81 rc = opal_query_cpu_status(pcpu, &status);
81 uint8_t status; 82 if (rc != OPAL_SUCCESS) {
82 83 pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr);
83 rc = opal_query_cpu_status(pcpu, &status); 84 return -ENODEV;
84 if (rc != OPAL_SUCCESS) { 85 }
85 pr_warn("OPAL Error %ld querying CPU %d state\n",
86 rc, nr);
87 return -ENODEV;
88 }
89 86
90 /* 87 /*
91 * Already started, just kick it, probably coming from 88 * Already started, just kick it, probably coming from
92 * kexec and spinning 89 * kexec and spinning
93 */ 90 */
94 if (status == OPAL_THREAD_STARTED) 91 if (status == OPAL_THREAD_STARTED)
95 goto kick; 92 goto kick;
96 93
97 /* 94 /*
98 * Available/inactive, let's kick it 95 * Available/inactive, let's kick it
99 */ 96 */
100 if (status == OPAL_THREAD_INACTIVE) { 97 if (status == OPAL_THREAD_INACTIVE) {
101 pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", 98 pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
102 nr, pcpu); 99 rc = opal_start_cpu(pcpu, start_here);
103 rc = opal_start_cpu(pcpu, start_here); 100 if (rc != OPAL_SUCCESS) {
104 if (rc != OPAL_SUCCESS) { 101 pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr);
105 pr_warn("OPAL Error %ld starting CPU %d\n",
106 rc, nr);
107 return -ENODEV;
108 }
109 } else {
110 /*
111 * An unavailable CPU (or any other unknown status)
112 * shouldn't be started. It should also
113 * not be in the possible map but currently it can
114 * happen
115 */
116 pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
117 " (status %d)...\n", nr, pcpu, status);
118 return -ENODEV; 102 return -ENODEV;
119 } 103 }
120 } else { 104 } else {
121 /* 105 /*
122 * On OPAL v2, we just kick it and hope for the best, 106 * An unavailable CPU (or any other unknown status)
123 * we must not test the error from opal_start_cpu() or 107 * shouldn't be started. It should also
124 * we would fail to get CPUs from kexec. 108 * not be in the possible map but currently it can
109 * happen
125 */ 110 */
126 opal_start_cpu(pcpu, start_here); 111 pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
112 " (status %d)...\n", nr, pcpu, status);
113 return -ENODEV;
127 } 114 }
128 kick: 115
116kick:
129 return smp_generic_kick_cpu(nr); 117 return smp_generic_kick_cpu(nr);
130} 118}
131 119
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 36df46eaba24..dd2545fc9947 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -499,6 +499,39 @@ static void __init find_and_init_phbs(void)
499 of_pci_check_probe_only(); 499 of_pci_check_probe_only();
500} 500}
501 501
502static void pseries_setup_rfi_flush(void)
503{
504 struct h_cpu_char_result result;
505 enum l1d_flush_type types;
506 bool enable;
507 long rc;
508
509 /* Enable by default */
510 enable = true;
511
512 rc = plpar_get_cpu_characteristics(&result);
513 if (rc == H_SUCCESS) {
514 types = L1D_FLUSH_NONE;
515
516 if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
517 types |= L1D_FLUSH_MTTRIG;
518 if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
519 types |= L1D_FLUSH_ORI;
520
521 /* Use fallback if nothing set in hcall */
522 if (types == L1D_FLUSH_NONE)
523 types = L1D_FLUSH_FALLBACK;
524
525 if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
526 enable = false;
527 } else {
528 /* Default to fallback if case hcall is not available */
529 types = L1D_FLUSH_FALLBACK;
530 }
531
532 setup_rfi_flush(types, enable);
533}
534
502static void __init pSeries_setup_arch(void) 535static void __init pSeries_setup_arch(void)
503{ 536{
504 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 537 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
@@ -515,7 +548,9 @@ static void __init pSeries_setup_arch(void)
515 548
516 fwnmi_init(); 549 fwnmi_init();
517 550
518 /* By default, only probe PCI (can be overriden by rtas_pci) */ 551 pseries_setup_rfi_flush();
552
553 /* By default, only probe PCI (can be overridden by rtas_pci) */
519 pci_add_flags(PCI_PROBE_ONLY); 554 pci_add_flags(PCI_PROBE_ONLY);
520 555
521 /* Find and initialize PCI host bridges */ 556 /* Find and initialize PCI host bridges */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 2a0452e364ba..d11f931cac69 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -626,7 +626,7 @@ static inline u32 mpic_physmask(u32 cpumask)
626 int i; 626 int i;
627 u32 mask = 0; 627 u32 mask = 0;
628 628
629 for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1) 629 for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1)
630 mask |= (cpumask & 1) << get_hard_smp_processor_id(i); 630 mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
631 return mask; 631 return mask;
632} 632}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c41094ca3b73..8862d043977a 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -114,6 +114,7 @@ config S390
114 select GENERIC_CLOCKEVENTS 114 select GENERIC_CLOCKEVENTS
115 select GENERIC_CPU_AUTOPROBE 115 select GENERIC_CPU_AUTOPROBE
116 select GENERIC_CPU_DEVICES if !SMP 116 select GENERIC_CPU_DEVICES if !SMP
117 select GENERIC_CPU_VULNERABILITIES
117 select GENERIC_FIND_FIRST_BIT 118 select GENERIC_FIND_FIRST_BIT
118 select GENERIC_SMP_IDLE_THREAD 119 select GENERIC_SMP_IDLE_THREAD
119 select GENERIC_TIME_VSYSCALL 120 select GENERIC_TIME_VSYSCALL
@@ -127,6 +128,7 @@ config S390
127 select HAVE_ARCH_TRACEHOOK 128 select HAVE_ARCH_TRACEHOOK
128 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 129 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
129 select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES 130 select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
131 select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
130 select HAVE_CMPXCHG_DOUBLE 132 select HAVE_CMPXCHG_DOUBLE
131 select HAVE_CMPXCHG_LOCAL 133 select HAVE_CMPXCHG_LOCAL
132 select HAVE_DEBUG_KMEMLEAK 134 select HAVE_DEBUG_KMEMLEAK
@@ -708,6 +710,51 @@ config SECCOMP
708 710
709 If unsure, say Y. 711 If unsure, say Y.
710 712
713config KERNEL_NOBP
714 def_bool n
715 prompt "Enable modified branch prediction for the kernel by default"
716 help
717 If this option is selected the kernel will switch to a modified
718 branch prediction mode if the firmware interface is available.
719 The modified branch prediction mode improves the behaviour in
720 regard to speculative execution.
721
722 With the option enabled the kernel parameter "nobp=0" or "nospec"
723 can be used to run the kernel in the normal branch prediction mode.
724
725 With the option disabled the modified branch prediction mode is
726 enabled with the "nobp=1" kernel parameter.
727
728 If unsure, say N.
729
730config EXPOLINE
731 def_bool n
732 prompt "Avoid speculative indirect branches in the kernel"
733 help
734 Compile the kernel with the expoline compiler options to guard
735 against kernel-to-user data leaks by avoiding speculative indirect
736 branches.
737 Requires a compiler with -mindirect-branch=thunk support for full
738 protection. The kernel may run slower.
739
740 If unsure, say N.
741
742choice
743 prompt "Expoline default"
744 depends on EXPOLINE
745 default EXPOLINE_FULL
746
747config EXPOLINE_OFF
748 bool "spectre_v2=off"
749
750config EXPOLINE_AUTO
751 bool "spectre_v2=auto"
752
753config EXPOLINE_FULL
754 bool "spectre_v2=on"
755
756endchoice
757
711endmenu 758endmenu
712 759
713menu "Power Management" 760menu "Power Management"
@@ -757,6 +804,7 @@ config PFAULT
757config SHARED_KERNEL 804config SHARED_KERNEL
758 bool "VM shared kernel support" 805 bool "VM shared kernel support"
759 depends on !JUMP_LABEL 806 depends on !JUMP_LABEL
807 depends on !ALTERNATIVES
760 help 808 help
761 Select this option, if you want to share the text segment of the 809 Select this option, if you want to share the text segment of the
762 Linux kernel between different VM guests. This reduces memory 810 Linux kernel between different VM guests. This reduces memory
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index e8d4423e4f85..d924f9b6dc73 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -77,6 +77,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
77cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack 77cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
78endif 78endif
79 79
80ifdef CONFIG_EXPOLINE
81 ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
82 CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
83 CC_FLAGS_EXPOLINE += -mfunction-return=thunk
84 CC_FLAGS_EXPOLINE += -mindirect-branch-table
85 export CC_FLAGS_EXPOLINE
86 cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
87 endif
88endif
89
80ifdef CONFIG_FUNCTION_TRACER 90ifdef CONFIG_FUNCTION_TRACER
81# make use of hotpatch feature if the compiler supports it 91# make use of hotpatch feature if the compiler supports it
82cc_hotpatch := -mhotpatch=0,3 92cc_hotpatch := -mhotpatch=0,3
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index b2e5902bd8f4..c670279b33f0 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -318,7 +318,7 @@ static void hypfs_kill_super(struct super_block *sb)
318 318
319 if (sb->s_root) 319 if (sb->s_root)
320 hypfs_delete_tree(sb->s_root); 320 hypfs_delete_tree(sb->s_root);
321 if (sb_info->update_file) 321 if (sb_info && sb_info->update_file)
322 hypfs_remove(sb_info->update_file); 322 hypfs_remove(sb_info->update_file);
323 kfree(sb->s_fs_info); 323 kfree(sb->s_fs_info);
324 sb->s_fs_info = NULL; 324 sb->s_fs_info = NULL;
diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
new file mode 100644
index 000000000000..955d620db23e
--- /dev/null
+++ b/arch/s390/include/asm/alternative-asm.h
@@ -0,0 +1,108 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_ALTERNATIVE_ASM_H
3#define _ASM_S390_ALTERNATIVE_ASM_H
4
5#ifdef __ASSEMBLY__
6
7/*
8 * Check the length of an instruction sequence. The length may not be larger
9 * than 254 bytes and it has to be divisible by 2.
10 */
11.macro alt_len_check start,end
12 .if ( \end - \start ) > 254
13 .error "cpu alternatives does not support instructions blocks > 254 bytes\n"
14 .endif
15 .if ( \end - \start ) % 2
16 .error "cpu alternatives instructions length is odd\n"
17 .endif
18.endm
19
20/*
21 * Issue one struct alt_instr descriptor entry (need to put it into
22 * the section .altinstructions, see below). This entry contains
23 * enough information for the alternatives patching code to patch an
24 * instruction. See apply_alternatives().
25 */
26.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
27 .long \orig_start - .
28 .long \alt_start - .
29 .word \feature
30 .byte \orig_end - \orig_start
31 .byte \alt_end - \alt_start
32.endm
33
34/*
35 * Fill up @bytes with nops. The macro emits 6-byte nop instructions
36 * for the bulk of the area, possibly followed by a 4-byte and/or
37 * a 2-byte nop if the size of the area is not divisible by 6.
38 */
39.macro alt_pad_fill bytes
40 .fill ( \bytes ) / 6, 6, 0xc0040000
41 .fill ( \bytes ) % 6 / 4, 4, 0x47000000
42 .fill ( \bytes ) % 6 % 4 / 2, 2, 0x0700
43.endm
44
45/*
46 * Fill up @bytes with nops. If the number of bytes is larger
47 * than 6, emit a jg instruction to branch over all nops, then
48 * fill an area of size (@bytes - 6) with nop instructions.
49 */
50.macro alt_pad bytes
51 .if ( \bytes > 0 )
52 .if ( \bytes > 6 )
53 jg . + \bytes
54 alt_pad_fill \bytes - 6
55 .else
56 alt_pad_fill \bytes
57 .endif
58 .endif
59.endm
60
61/*
62 * Define an alternative between two instructions. If @feature is
63 * present, early code in apply_alternatives() replaces @oldinstr with
64 * @newinstr. ".skip" directive takes care of proper instruction padding
65 * in case @newinstr is longer than @oldinstr.
66 */
67.macro ALTERNATIVE oldinstr, newinstr, feature
68 .pushsection .altinstr_replacement,"ax"
69770: \newinstr
70771: .popsection
71772: \oldinstr
72773: alt_len_check 770b, 771b
73 alt_len_check 772b, 773b
74 alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) )
75774: .pushsection .altinstructions,"a"
76 alt_entry 772b, 774b, 770b, 771b, \feature
77 .popsection
78.endm
79
80/*
81 * Define an alternative between two instructions. If @feature is
82 * present, early code in apply_alternatives() replaces @oldinstr with
83 * @newinstr. ".skip" directive takes care of proper instruction padding
84 * in case @newinstr is longer than @oldinstr.
85 */
86.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
87 .pushsection .altinstr_replacement,"ax"
88770: \newinstr1
89771: \newinstr2
90772: .popsection
91773: \oldinstr
92774: alt_len_check 770b, 771b
93 alt_len_check 771b, 772b
94 alt_len_check 773b, 774b
95 .if ( 771b - 770b > 772b - 771b )
96 alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) )
97 .else
98 alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) )
99 .endif
100775: .pushsection .altinstructions,"a"
101 alt_entry 773b, 775b, 770b, 771b,\feature1
102 alt_entry 773b, 775b, 771b, 772b,\feature2
103 .popsection
104.endm
105
106#endif /* __ASSEMBLY__ */
107
108#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
new file mode 100644
index 000000000000..a72002056b54
--- /dev/null
+++ b/arch/s390/include/asm/alternative.h
@@ -0,0 +1,149 @@
1#ifndef _ASM_S390_ALTERNATIVE_H
2#define _ASM_S390_ALTERNATIVE_H
3
4#ifndef __ASSEMBLY__
5
6#include <linux/types.h>
7#include <linux/stddef.h>
8#include <linux/stringify.h>
9
10struct alt_instr {
11 s32 instr_offset; /* original instruction */
12 s32 repl_offset; /* offset to replacement instruction */
13 u16 facility; /* facility bit set for replacement */
14 u8 instrlen; /* length of original instruction */
15 u8 replacementlen; /* length of new instruction */
16} __packed;
17
18void apply_alternative_instructions(void);
19void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
20
21/*
22 * |661: |662: |6620 |663:
23 * +-----------+---------------------+
24 * | oldinstr | oldinstr_padding |
25 * | +----------+----------+
26 * | | | |
27 * | | >6 bytes |6/4/2 nops|
28 * | |6 bytes jg----------->
29 * +-----------+---------------------+
30 * ^^ static padding ^^
31 *
32 * .altinstr_replacement section
33 * +---------------------+-----------+
34 * |6641: |6651:
35 * | alternative instr 1 |
36 * +-----------+---------+- - - - - -+
37 * |6642: |6652: |
38 * | alternative instr 2 | padding
39 * +---------------------+- - - - - -+
40 * ^ runtime ^
41 *
42 * .altinstructions section
43 * +---------------------------------+
44 * | alt_instr entries for each |
45 * | alternative instr |
46 * +---------------------------------+
47 */
48
49#define b_altinstr(num) "664"#num
50#define e_altinstr(num) "665"#num
51
52#define e_oldinstr_pad_end "663"
53#define oldinstr_len "662b-661b"
54#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
55#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
56#define oldinstr_pad_len(num) \
57 "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
58 "((" altinstr_len(num) ")-(" oldinstr_len "))"
59
60#define INSTR_LEN_SANITY_CHECK(len) \
61 ".if " len " > 254\n" \
62 "\t.error \"cpu alternatives does not support instructions " \
63 "blocks > 254 bytes\"\n" \
64 ".endif\n" \
65 ".if (" len ") %% 2\n" \
66 "\t.error \"cpu alternatives instructions length is odd\"\n" \
67 ".endif\n"
68
69#define OLDINSTR_PADDING(oldinstr, num) \
70 ".if " oldinstr_pad_len(num) " > 6\n" \
71 "\tjg " e_oldinstr_pad_end "f\n" \
72 "6620:\n" \
73 "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
74 ".else\n" \
75 "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
76 "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
77 "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
78 ".endif\n"
79
80#define OLDINSTR(oldinstr, num) \
81 "661:\n\t" oldinstr "\n662:\n" \
82 OLDINSTR_PADDING(oldinstr, num) \
83 e_oldinstr_pad_end ":\n" \
84 INSTR_LEN_SANITY_CHECK(oldinstr_len)
85
86#define OLDINSTR_2(oldinstr, num1, num2) \
87 "661:\n\t" oldinstr "\n662:\n" \
88 ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
89 OLDINSTR_PADDING(oldinstr, num2) \
90 ".else\n" \
91 OLDINSTR_PADDING(oldinstr, num1) \
92 ".endif\n" \
93 e_oldinstr_pad_end ":\n" \
94 INSTR_LEN_SANITY_CHECK(oldinstr_len)
95
96#define ALTINSTR_ENTRY(facility, num) \
97 "\t.long 661b - .\n" /* old instruction */ \
98 "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
99 "\t.word " __stringify(facility) "\n" /* facility bit */ \
100 "\t.byte " oldinstr_total_len "\n" /* source len */ \
101 "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
102
103#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
104 b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
105 INSTR_LEN_SANITY_CHECK(altinstr_len(num))
106
107/* alternative assembly primitive: */
108#define ALTERNATIVE(oldinstr, altinstr, facility) \
109 ".pushsection .altinstr_replacement, \"ax\"\n" \
110 ALTINSTR_REPLACEMENT(altinstr, 1) \
111 ".popsection\n" \
112 OLDINSTR(oldinstr, 1) \
113 ".pushsection .altinstructions,\"a\"\n" \
114 ALTINSTR_ENTRY(facility, 1) \
115 ".popsection\n"
116
117#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
118 ".pushsection .altinstr_replacement, \"ax\"\n" \
119 ALTINSTR_REPLACEMENT(altinstr1, 1) \
120 ALTINSTR_REPLACEMENT(altinstr2, 2) \
121 ".popsection\n" \
122 OLDINSTR_2(oldinstr, 1, 2) \
123 ".pushsection .altinstructions,\"a\"\n" \
124 ALTINSTR_ENTRY(facility1, 1) \
125 ALTINSTR_ENTRY(facility2, 2) \
126 ".popsection\n"
127
128/*
129 * Alternative instructions for different CPU types or capabilities.
130 *
131 * This allows to use optimized instructions even on generic binary
132 * kernels.
133 *
134 * oldinstr is padded with jump and nops at compile time if altinstr is
135 * longer. altinstr is padded with jump and nops at run-time during patching.
136 *
137 * For non barrier like inlines please define new variants
138 * without volatile and memory clobber.
139 */
140#define alternative(oldinstr, altinstr, facility) \
141 asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
142
143#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
144 asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
145 altinstr2, facility2) ::: "memory")
146
147#endif /* __ASSEMBLY__ */
148
149#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index d68e11e0df5e..e903b28e7358 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -53,4 +53,28 @@ do { \
53 ___p1; \ 53 ___p1; \
54}) 54})
55 55
56/**
57 * array_index_mask_nospec - generate a mask for array_idx() that is
58 * ~0UL when the bounds check succeeds and 0 otherwise
59 * @index: array element index
60 * @size: number of elements in array
61 */
62#define array_index_mask_nospec array_index_mask_nospec
63static inline unsigned long array_index_mask_nospec(unsigned long index,
64 unsigned long size)
65{
66 unsigned long mask;
67
68 if (__builtin_constant_p(size) && size > 0) {
69 asm(" clgr %2,%1\n"
70 " slbgr %0,%0\n"
71 :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
72 return mask;
73 }
74 asm(" clgr %1,%2\n"
75 " slbgr %0,%0\n"
76 :"=d" (mask) : "d" (size), "d" (index) :"cc");
77 return ~mask;
78}
79
56#endif /* __ASM_BARRIER_H */ 80#endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 9dd04b9e9782..b2f8c52b3840 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -113,7 +113,7 @@ struct hws_basic_entry {
113 113
114struct hws_diag_entry { 114struct hws_diag_entry {
115 unsigned int def:16; /* 0-15 Data Entry Format */ 115 unsigned int def:16; /* 0-15 Data Entry Format */
116 unsigned int R:14; /* 16-19 and 20-30 reserved */ 116 unsigned int R:15; /* 16-19 and 20-30 reserved */
117 unsigned int I:1; /* 31 entry valid or invalid */ 117 unsigned int I:1; /* 31 entry valid or invalid */
118 u8 data[]; /* Machine-dependent sample data */ 118 u8 data[]; /* Machine-dependent sample data */
119} __packed; 119} __packed;
@@ -129,7 +129,9 @@ struct hws_trailer_entry {
129 unsigned int f:1; /* 0 - Block Full Indicator */ 129 unsigned int f:1; /* 0 - Block Full Indicator */
130 unsigned int a:1; /* 1 - Alert request control */ 130 unsigned int a:1; /* 1 - Alert request control */
131 unsigned int t:1; /* 2 - Timestamp format */ 131 unsigned int t:1; /* 2 - Timestamp format */
132 unsigned long long:61; /* 3 - 63: Reserved */ 132 unsigned int :29; /* 3 - 31: Reserved */
133 unsigned int bsdes:16; /* 32-47: size of basic SDE */
134 unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */
133 }; 135 };
134 unsigned long long flags; /* 0 - 63: All indicators */ 136 unsigned long long flags; /* 0 - 63: All indicators */
135 }; 137 };
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 0aa6a7ed95a3..155fcc7bcba6 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -13,6 +13,24 @@
13 13
14#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */ 14#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
15 15
16static inline void __set_facility(unsigned long nr, void *facilities)
17{
18 unsigned char *ptr = (unsigned char *) facilities;
19
20 if (nr >= MAX_FACILITY_BIT)
21 return;
22 ptr[nr >> 3] |= 0x80 >> (nr & 7);
23}
24
25static inline void __clear_facility(unsigned long nr, void *facilities)
26{
27 unsigned char *ptr = (unsigned char *) facilities;
28
29 if (nr >= MAX_FACILITY_BIT)
30 return;
31 ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
32}
33
16static inline int __test_facility(unsigned long nr, void *facilities) 34static inline int __test_facility(unsigned long nr, void *facilities)
17{ 35{
18 unsigned char *ptr; 36 unsigned char *ptr;
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index a4811aa0304d..8f8eec9e1198 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -21,17 +21,12 @@
21 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ 21 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
22 "m" (*uaddr) : "cc"); 22 "m" (*uaddr) : "cc");
23 23
24static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 24static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
25 u32 __user *uaddr)
25{ 26{
26 int op = (encoded_op >> 28) & 7;
27 int cmp = (encoded_op >> 24) & 15;
28 int oparg = (encoded_op << 8) >> 20;
29 int cmparg = (encoded_op << 20) >> 20;
30 int oldval = 0, newval, ret; 27 int oldval = 0, newval, ret;
31 28
32 load_kernel_asce(); 29 load_kernel_asce();
33 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
34 oparg = 1 << oparg;
35 30
36 pagefault_disable(); 31 pagefault_disable();
37 switch (op) { 32 switch (op) {
@@ -60,17 +55,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
60 } 55 }
61 pagefault_enable(); 56 pagefault_enable();
62 57
63 if (!ret) { 58 if (!ret)
64 switch (cmp) { 59 *oval = oldval;
65 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 60
66 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
67 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
68 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
69 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
70 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
71 default: ret = -ENOSYS;
72 }
73 }
74 return ret; 61 return ret;
75} 62}
76 63
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e9a983f40a24..7d9c5917da2b 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -136,7 +136,8 @@ struct kvm_s390_sie_block {
136 __u16 ipa; /* 0x0056 */ 136 __u16 ipa; /* 0x0056 */
137 __u32 ipb; /* 0x0058 */ 137 __u32 ipb; /* 0x0058 */
138 __u32 scaoh; /* 0x005c */ 138 __u32 scaoh; /* 0x005c */
139 __u8 reserved60; /* 0x0060 */ 139#define FPF_BPBC 0x20
140 __u8 fpf; /* 0x0060 */
140 __u8 ecb; /* 0x0061 */ 141 __u8 ecb; /* 0x0061 */
141 __u8 ecb2; /* 0x0062 */ 142 __u8 ecb2; /* 0x0062 */
142#define ECB3_AES 0x04 143#define ECB3_AES 0x04
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index afe1cfebf1a4..8520c23e419b 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -155,7 +155,9 @@ struct _lowcore {
155 /* Per cpu primary space access list */ 155 /* Per cpu primary space access list */
156 __u32 paste[16]; /* 0x0400 */ 156 __u32 paste[16]; /* 0x0400 */
157 157
158 __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */ 158 /* br %r1 trampoline */
159 __u16 br_r1_trampoline; /* 0x0440 */
160 __u8 pad_0x0442[0x0e00-0x0442]; /* 0x0442 */
159 161
160 /* 162 /*
161 * 0xe00 contains the address of the IPL Parameter Information 163 * 0xe00 contains the address of the IPL Parameter Information
@@ -170,7 +172,8 @@ struct _lowcore {
170 __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */ 172 __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
171 173
172 /* Extended facility list */ 174 /* Extended facility list */
173 __u64 stfle_fac_list[32]; /* 0x0f00 */ 175 __u64 stfle_fac_list[16]; /* 0x0f00 */
176 __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
174 __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */ 177 __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
175 178
176 /* Pointer to vector register save area */ 179 /* Pointer to vector register save area */
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
new file mode 100644
index 000000000000..b4bd8c41e9d3
--- /dev/null
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -0,0 +1,17 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_EXPOLINE_H
3#define _ASM_S390_EXPOLINE_H
4
5#ifndef __ASSEMBLY__
6
7#include <linux/types.h>
8
9extern int nospec_disable;
10
11void nospec_init_branches(void);
12void nospec_auto_detect(void);
13void nospec_revert(s32 *start, s32 *end);
14
15#endif /* __ASSEMBLY__ */
16
17#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
new file mode 100644
index 000000000000..9a56e738d645
--- /dev/null
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -0,0 +1,195 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_NOSPEC_ASM_H
3#define _ASM_S390_NOSPEC_ASM_H
4
5#include <asm/alternative-asm.h>
6#include <asm/asm-offsets.h>
7
8#ifdef __ASSEMBLY__
9
10#ifdef CONFIG_EXPOLINE
11
12_LC_BR_R1 = __LC_BR_R1
13
14/*
15 * The expoline macros are used to create thunks in the same format
16 * as gcc generates them. The 'comdat' section flag makes sure that
17 * the various thunks are merged into a single copy.
18 */
19 .macro __THUNK_PROLOG_NAME name
20 .pushsection .text.\name,"axG",@progbits,\name,comdat
21 .globl \name
22 .hidden \name
23 .type \name,@function
24\name:
25 .cfi_startproc
26 .endm
27
28 .macro __THUNK_EPILOG
29 .cfi_endproc
30 .popsection
31 .endm
32
33 .macro __THUNK_PROLOG_BR r1,r2
34 __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
35 .endm
36
37 .macro __THUNK_PROLOG_BC d0,r1,r2
38 __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
39 .endm
40
41 .macro __THUNK_BR r1,r2
42 jg __s390x_indirect_jump_r\r2\()use_r\r1
43 .endm
44
45 .macro __THUNK_BC d0,r1,r2
46 jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1
47 .endm
48
49 .macro __THUNK_BRASL r1,r2,r3
50 brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2
51 .endm
52
53 .macro __DECODE_RR expand,reg,ruse
54 .set __decode_fail,1
55 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
56 .ifc \reg,%r\r1
57 .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
58 .ifc \ruse,%r\r2
59 \expand \r1,\r2
60 .set __decode_fail,0
61 .endif
62 .endr
63 .endif
64 .endr
65 .if __decode_fail == 1
66 .error "__DECODE_RR failed"
67 .endif
68 .endm
69
70 .macro __DECODE_RRR expand,rsave,rtarget,ruse
71 .set __decode_fail,1
72 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
73 .ifc \rsave,%r\r1
74 .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
75 .ifc \rtarget,%r\r2
76 .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
77 .ifc \ruse,%r\r3
78 \expand \r1,\r2,\r3
79 .set __decode_fail,0
80 .endif
81 .endr
82 .endif
83 .endr
84 .endif
85 .endr
86 .if __decode_fail == 1
87 .error "__DECODE_RRR failed"
88 .endif
89 .endm
90
91 .macro __DECODE_DRR expand,disp,reg,ruse
92 .set __decode_fail,1
93 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
94 .ifc \reg,%r\r1
95 .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
96 .ifc \ruse,%r\r2
97 \expand \disp,\r1,\r2
98 .set __decode_fail,0
99 .endif
100 .endr
101 .endif
102 .endr
103 .if __decode_fail == 1
104 .error "__DECODE_DRR failed"
105 .endif
106 .endm
107
108 .macro __THUNK_EX_BR reg,ruse
109 # Be very careful when adding instructions to this macro!
110 # The ALTERNATIVE replacement code has a .+10 which targets
111 # the "br \reg" after the code has been patched.
112#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
113 exrl 0,555f
114 j .
115#else
116 .ifc \reg,%r1
117 ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
118 j .
119 .else
120 larl \ruse,555f
121 ex 0,0(\ruse)
122 j .
123 .endif
124#endif
125555: br \reg
126 .endm
127
128 .macro __THUNK_EX_BC disp,reg,ruse
129#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
130 exrl 0,556f
131 j .
132#else
133 larl \ruse,556f
134 ex 0,0(\ruse)
135 j .
136#endif
137556: b \disp(\reg)
138 .endm
139
140 .macro GEN_BR_THUNK reg,ruse=%r1
141 __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
142 __THUNK_EX_BR \reg,\ruse
143 __THUNK_EPILOG
144 .endm
145
146 .macro GEN_B_THUNK disp,reg,ruse=%r1
147 __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
148 __THUNK_EX_BC \disp,\reg,\ruse
149 __THUNK_EPILOG
150 .endm
151
152 .macro BR_EX reg,ruse=%r1
153557: __DECODE_RR __THUNK_BR,\reg,\ruse
154 .pushsection .s390_indirect_branches,"a",@progbits
155 .long 557b-.
156 .popsection
157 .endm
158
159 .macro B_EX disp,reg,ruse=%r1
160558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
161 .pushsection .s390_indirect_branches,"a",@progbits
162 .long 558b-.
163 .popsection
164 .endm
165
166 .macro BASR_EX rsave,rtarget,ruse=%r1
167559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
168 .pushsection .s390_indirect_branches,"a",@progbits
169 .long 559b-.
170 .popsection
171 .endm
172
173#else
174 .macro GEN_BR_THUNK reg,ruse=%r1
175 .endm
176
177 .macro GEN_B_THUNK disp,reg,ruse=%r1
178 .endm
179
180 .macro BR_EX reg,ruse=%r1
181 br \reg
182 .endm
183
184 .macro B_EX disp,reg,ruse=%r1
185 b \disp(\reg)
186 .endm
187
188 .macro BASR_EX rsave,rtarget,ruse=%r1
189 basr \rsave,\rtarget
190 .endm
191#endif
192
193#endif /* __ASSEMBLY__ */
194
195#endif /* _ASM_S390_NOSPEC_ASM_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c61ed7890cef..f915a0f1b0fc 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -69,6 +69,7 @@ extern void s390_adjust_jiffies(void);
69extern const struct seq_operations cpuinfo_op; 69extern const struct seq_operations cpuinfo_op;
70extern int sysctl_ieee_emulation_warnings; 70extern int sysctl_ieee_emulation_warnings;
71extern void execve_tail(void); 71extern void execve_tail(void);
72extern void __bpon(void);
72 73
73/* 74/*
74 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. 75 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
@@ -315,6 +316,9 @@ extern void memcpy_absolute(void *, void *, size_t);
315 memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \ 316 memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
316} 317}
317 318
319extern int s390_isolate_bp(void);
320extern int s390_isolate_bp_guest(void);
321
318#endif /* __ASSEMBLY__ */ 322#endif /* __ASSEMBLY__ */
319 323
320#endif /* __ASM_S390_PROCESSOR_H */ 324#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 692b9247c019..b2504163c8fa 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -78,6 +78,8 @@ void arch_release_task_struct(struct task_struct *tsk);
78#define TIF_SECCOMP 5 /* secure computing */ 78#define TIF_SECCOMP 5 /* secure computing */
79#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ 79#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
80#define TIF_UPROBE 7 /* breakpointed or single-stepping */ 80#define TIF_UPROBE 7 /* breakpointed or single-stepping */
81#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
82#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
81#define TIF_31BIT 16 /* 32bit process */ 83#define TIF_31BIT 16 /* 32bit process */
82#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 84#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
83#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ 85#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
@@ -93,6 +95,8 @@ void arch_release_task_struct(struct task_struct *tsk);
93#define _TIF_SECCOMP _BITUL(TIF_SECCOMP) 95#define _TIF_SECCOMP _BITUL(TIF_SECCOMP)
94#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT) 96#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT)
95#define _TIF_UPROBE _BITUL(TIF_UPROBE) 97#define _TIF_UPROBE _BITUL(TIF_UPROBE)
98#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP)
99#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
96#define _TIF_31BIT _BITUL(TIF_31BIT) 100#define _TIF_31BIT _BITUL(TIF_31BIT)
97#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) 101#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
98 102
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index ef1a5fcc6c66..beb508a9e72c 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -151,6 +151,7 @@ struct kvm_guest_debug_arch {
151#define KVM_SYNC_ARCH0 (1UL << 4) 151#define KVM_SYNC_ARCH0 (1UL << 4)
152#define KVM_SYNC_PFAULT (1UL << 5) 152#define KVM_SYNC_PFAULT (1UL << 5)
153#define KVM_SYNC_VRS (1UL << 6) 153#define KVM_SYNC_VRS (1UL << 6)
154#define KVM_SYNC_BPBC (1UL << 10)
154/* definition of registers in kvm_run */ 155/* definition of registers in kvm_run */
155struct kvm_sync_regs { 156struct kvm_sync_regs {
156 __u64 prefix; /* prefix register */ 157 __u64 prefix; /* prefix register */
@@ -168,6 +169,8 @@ struct kvm_sync_regs {
168 __u64 vrs[32][2]; /* vector registers */ 169 __u64 vrs[32][2]; /* vector registers */
169 __u8 reserved[512]; /* for future vector expansion */ 170 __u8 reserved[512]; /* for future vector expansion */
170 __u32 fpc; /* only valid with vector registers */ 171 __u32 fpc; /* only valid with vector registers */
172 __u8 bpbc : 1; /* bp mode */
173 __u8 reserved2 : 7;
171}; 174};
172 175
173#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) 176#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index dc167a23b920..c4d4d4ef5e58 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -44,10 +44,14 @@ obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
44obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o 44obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
45obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 45obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
46obj-y += runtime_instr.o cache.o dumpstack.o 46obj-y += runtime_instr.o cache.o dumpstack.o
47obj-y += entry.o reipl.o relocate_kernel.o 47obj-y += entry.o reipl.o relocate_kernel.o alternative.o
48obj-y += nospec-branch.o
48 49
49extra-y += head.o head64.o vmlinux.lds 50extra-y += head.o head64.o vmlinux.lds
50 51
52obj-$(CONFIG_SYSFS) += nospec-sysfs.o
53CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
54
51obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 55obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
52obj-$(CONFIG_SMP) += smp.o 56obj-$(CONFIG_SMP) += smp.o
53obj-$(CONFIG_SCHED_BOOK) += topology.o 57obj-$(CONFIG_SCHED_BOOK) += topology.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
new file mode 100644
index 000000000000..b57b293998dc
--- /dev/null
+++ b/arch/s390/kernel/alternative.c
@@ -0,0 +1,112 @@
1#include <linux/module.h>
2#include <asm/alternative.h>
3#include <asm/facility.h>
4#include <asm/nospec-branch.h>
5
6#define MAX_PATCH_LEN (255 - 1)
7
8static int __initdata_or_module alt_instr_disabled;
9
10static int __init disable_alternative_instructions(char *str)
11{
12 alt_instr_disabled = 1;
13 return 0;
14}
15
16early_param("noaltinstr", disable_alternative_instructions);
17
18struct brcl_insn {
19 u16 opc;
20 s32 disp;
21} __packed;
22
23static u16 __initdata_or_module nop16 = 0x0700;
24static u32 __initdata_or_module nop32 = 0x47000000;
25static struct brcl_insn __initdata_or_module nop48 = {
26 0xc004, 0
27};
28
29static const void *nops[] __initdata_or_module = {
30 &nop16,
31 &nop32,
32 &nop48
33};
34
35static void __init_or_module add_jump_padding(void *insns, unsigned int len)
36{
37 struct brcl_insn brcl = {
38 0xc0f4,
39 len / 2
40 };
41
42 memcpy(insns, &brcl, sizeof(brcl));
43 insns += sizeof(brcl);
44 len -= sizeof(brcl);
45
46 while (len > 0) {
47 memcpy(insns, &nop16, 2);
48 insns += 2;
49 len -= 2;
50 }
51}
52
53static void __init_or_module add_padding(void *insns, unsigned int len)
54{
55 if (len > 6)
56 add_jump_padding(insns, len);
57 else if (len >= 2)
58 memcpy(insns, nops[len / 2 - 1], len);
59}
60
61static void __init_or_module __apply_alternatives(struct alt_instr *start,
62 struct alt_instr *end)
63{
64 struct alt_instr *a;
65 u8 *instr, *replacement;
66 u8 insnbuf[MAX_PATCH_LEN];
67
68 /*
69 * The scan order should be from start to end. A later scanned
70 * alternative code can overwrite previously scanned alternative code.
71 */
72 for (a = start; a < end; a++) {
73 int insnbuf_sz = 0;
74
75 instr = (u8 *)&a->instr_offset + a->instr_offset;
76 replacement = (u8 *)&a->repl_offset + a->repl_offset;
77
78 if (!__test_facility(a->facility,
79 S390_lowcore.alt_stfle_fac_list))
80 continue;
81
82 if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
83 WARN_ONCE(1, "cpu alternatives instructions length is "
84 "odd, skipping patching\n");
85 continue;
86 }
87
88 memcpy(insnbuf, replacement, a->replacementlen);
89 insnbuf_sz = a->replacementlen;
90
91 if (a->instrlen > a->replacementlen) {
92 add_padding(insnbuf + a->replacementlen,
93 a->instrlen - a->replacementlen);
94 insnbuf_sz += a->instrlen - a->replacementlen;
95 }
96
97 s390_kernel_write(instr, insnbuf, insnbuf_sz);
98 }
99}
100
101void __init_or_module apply_alternatives(struct alt_instr *start,
102 struct alt_instr *end)
103{
104 if (!alt_instr_disabled)
105 __apply_alternatives(start, end);
106}
107
108extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
109void __init apply_alternative_instructions(void)
110{
111 apply_alternatives(__alt_instructions, __alt_instructions_end);
112}
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index dc6c9c604543..39572281e213 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -170,6 +170,7 @@ int main(void)
170 OFFSET(__LC_MACHINE_FLAGS, _lowcore, machine_flags); 170 OFFSET(__LC_MACHINE_FLAGS, _lowcore, machine_flags);
171 OFFSET(__LC_GMAP, _lowcore, gmap); 171 OFFSET(__LC_GMAP, _lowcore, gmap);
172 OFFSET(__LC_PASTE, _lowcore, paste); 172 OFFSET(__LC_PASTE, _lowcore, paste);
173 OFFSET(__LC_BR_R1, _lowcore, br_r1_trampoline);
173 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ 174 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
174 OFFSET(__LC_DUMP_REIPL, _lowcore, ipib); 175 OFFSET(__LC_DUMP_REIPL, _lowcore, ipib);
175 /* hardware defined lowcore locations 0x1000 - 0x18ff */ 176 /* hardware defined lowcore locations 0x1000 - 0x18ff */
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 326f717df587..61fca549a93b 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -8,18 +8,22 @@
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/asm-offsets.h> 10#include <asm/asm-offsets.h>
11#include <asm/nospec-insn.h>
11#include <asm/ptrace.h> 12#include <asm/ptrace.h>
12#include <asm/sigp.h> 13#include <asm/sigp.h>
13 14
15 GEN_BR_THUNK %r9
16 GEN_BR_THUNK %r14
17
14ENTRY(s390_base_mcck_handler) 18ENTRY(s390_base_mcck_handler)
15 basr %r13,0 19 basr %r13,0
160: lg %r15,__LC_PANIC_STACK # load panic stack 200: lg %r15,__LC_PANIC_STACK # load panic stack
17 aghi %r15,-STACK_FRAME_OVERHEAD 21 aghi %r15,-STACK_FRAME_OVERHEAD
18 larl %r1,s390_base_mcck_handler_fn 22 larl %r1,s390_base_mcck_handler_fn
19 lg %r1,0(%r1) 23 lg %r9,0(%r1)
20 ltgr %r1,%r1 24 ltgr %r9,%r9
21 jz 1f 25 jz 1f
22 basr %r14,%r1 26 BASR_EX %r14,%r9
231: la %r1,4095 271: la %r1,4095
24 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) 28 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
25 lpswe __LC_MCK_OLD_PSW 29 lpswe __LC_MCK_OLD_PSW
@@ -36,10 +40,10 @@ ENTRY(s390_base_ext_handler)
36 basr %r13,0 40 basr %r13,0
370: aghi %r15,-STACK_FRAME_OVERHEAD 410: aghi %r15,-STACK_FRAME_OVERHEAD
38 larl %r1,s390_base_ext_handler_fn 42 larl %r1,s390_base_ext_handler_fn
39 lg %r1,0(%r1) 43 lg %r9,0(%r1)
40 ltgr %r1,%r1 44 ltgr %r9,%r9
41 jz 1f 45 jz 1f
42 basr %r14,%r1 46 BASR_EX %r14,%r9
431: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC 471: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
44 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit 48 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
45 lpswe __LC_EXT_OLD_PSW 49 lpswe __LC_EXT_OLD_PSW
@@ -56,10 +60,10 @@ ENTRY(s390_base_pgm_handler)
56 basr %r13,0 60 basr %r13,0
570: aghi %r15,-STACK_FRAME_OVERHEAD 610: aghi %r15,-STACK_FRAME_OVERHEAD
58 larl %r1,s390_base_pgm_handler_fn 62 larl %r1,s390_base_pgm_handler_fn
59 lg %r1,0(%r1) 63 lg %r9,0(%r1)
60 ltgr %r1,%r1 64 ltgr %r9,%r9
61 jz 1f 65 jz 1f
62 basr %r14,%r1 66 BASR_EX %r14,%r9
63 lmg %r0,%r15,__LC_SAVE_AREA_SYNC 67 lmg %r0,%r15,__LC_SAVE_AREA_SYNC
64 lpswe __LC_PGM_OLD_PSW 68 lpswe __LC_PGM_OLD_PSW
651: lpswe disabled_wait_psw-0b(%r13) 691: lpswe disabled_wait_psw-0b(%r13)
@@ -116,7 +120,7 @@ ENTRY(diag308_reset)
116 larl %r4,.Lcontinue_psw # Restore PSW flags 120 larl %r4,.Lcontinue_psw # Restore PSW flags
117 lpswe 0(%r4) 121 lpswe 0(%r4)
118.Lcontinue: 122.Lcontinue:
119 br %r14 123 BR_EX %r14
120.align 16 124.align 16
121.Lrestart_psw: 125.Lrestart_psw:
122 .long 0x00080000,0x80000000 + .Lrestart_part2 126 .long 0x00080000,0x80000000 + .Lrestart_part2
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 0176ebc97bfd..86f934255eb6 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)
110 110
111COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid) 111COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
112{ 112{
113 return sys_setgid((gid_t)gid); 113 return sys_setgid(low2highgid(gid));
114} 114}
115 115
116COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) 116COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
@@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
120 120
121COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid) 121COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
122{ 122{
123 return sys_setuid((uid_t)uid); 123 return sys_setuid(low2highuid(uid));
124} 124}
125 125
126COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid) 126COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
@@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,
173 173
174COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid) 174COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
175{ 175{
176 return sys_setfsuid((uid_t)uid); 176 return sys_setfsuid(low2highuid(uid));
177} 177}
178 178
179COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid) 179COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
180{ 180{
181 return sys_setfsgid((gid_t)gid); 181 return sys_setfsgid(low2highgid(gid));
182} 182}
183 183
184static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info) 184static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index ee7b8e7ca4f8..8eccead675d4 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -279,6 +279,11 @@ static noinline __init void setup_facility_list(void)
279{ 279{
280 stfle(S390_lowcore.stfle_fac_list, 280 stfle(S390_lowcore.stfle_fac_list,
281 ARRAY_SIZE(S390_lowcore.stfle_fac_list)); 281 ARRAY_SIZE(S390_lowcore.stfle_fac_list));
282 memcpy(S390_lowcore.alt_stfle_fac_list,
283 S390_lowcore.stfle_fac_list,
284 sizeof(S390_lowcore.alt_stfle_fac_list));
285 if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
286 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
282} 287}
283 288
284static __init void detect_diag9c(void) 289static __init void detect_diag9c(void)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 4612ed7ec2e5..4cad1adff16b 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -23,6 +23,7 @@
23#include <asm/vx-insn.h> 23#include <asm/vx-insn.h>
24#include <asm/setup.h> 24#include <asm/setup.h>
25#include <asm/nmi.h> 25#include <asm/nmi.h>
26#include <asm/nospec-insn.h>
26 27
27__PT_R0 = __PT_GPRS 28__PT_R0 = __PT_GPRS
28__PT_R1 = __PT_GPRS + 8 29__PT_R1 = __PT_GPRS + 8
@@ -104,6 +105,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
104 j 3f 105 j 3f
1051: LAST_BREAK %r14 1061: LAST_BREAK %r14
106 UPDATE_VTIME %r14,%r15,\timer 107 UPDATE_VTIME %r14,%r15,\timer
108 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1072: lg %r15,__LC_ASYNC_STACK # load async stack 1092: lg %r15,__LC_ASYNC_STACK # load async stack
1083: la %r11,STACK_FRAME_OVERHEAD(%r15) 1103: la %r11,STACK_FRAME_OVERHEAD(%r15)
109 .endm 111 .endm
@@ -162,8 +164,79 @@ _PIF_WORK = (_PIF_PER_TRAP)
162 tm off+\addr, \mask 164 tm off+\addr, \mask
163 .endm 165 .endm
164 166
167 .macro BPOFF
168 .pushsection .altinstr_replacement, "ax"
169660: .long 0xb2e8c000
170 .popsection
171661: .long 0x47000000
172 .pushsection .altinstructions, "a"
173 .long 661b - .
174 .long 660b - .
175 .word 82
176 .byte 4
177 .byte 4
178 .popsection
179 .endm
180
181 .macro BPON
182 .pushsection .altinstr_replacement, "ax"
183662: .long 0xb2e8d000
184 .popsection
185663: .long 0x47000000
186 .pushsection .altinstructions, "a"
187 .long 663b - .
188 .long 662b - .
189 .word 82
190 .byte 4
191 .byte 4
192 .popsection
193 .endm
194
195 .macro BPENTER tif_ptr,tif_mask
196 .pushsection .altinstr_replacement, "ax"
197662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
198 .word 0xc004, 0x0000, 0x0000 # 6 byte nop
199 .popsection
200664: TSTMSK \tif_ptr,\tif_mask
201 jz . + 8
202 .long 0xb2e8d000
203 .pushsection .altinstructions, "a"
204 .long 664b - .
205 .long 662b - .
206 .word 82
207 .byte 12
208 .byte 12
209 .popsection
210 .endm
211
212 .macro BPEXIT tif_ptr,tif_mask
213 TSTMSK \tif_ptr,\tif_mask
214 .pushsection .altinstr_replacement, "ax"
215662: jnz . + 8
216 .long 0xb2e8d000
217 .popsection
218664: jz . + 8
219 .long 0xb2e8c000
220 .pushsection .altinstructions, "a"
221 .long 664b - .
222 .long 662b - .
223 .word 82
224 .byte 8
225 .byte 8
226 .popsection
227 .endm
228
229 GEN_BR_THUNK %r9
230 GEN_BR_THUNK %r14
231 GEN_BR_THUNK %r14,%r11
232
165 .section .kprobes.text, "ax" 233 .section .kprobes.text, "ax"
166 234
235ENTRY(__bpon)
236 .globl __bpon
237 BPON
238 BR_EX %r14
239
167/* 240/*
168 * Scheduler resume function, called by switch_to 241 * Scheduler resume function, called by switch_to
169 * gpr2 = (task_struct *) prev 242 * gpr2 = (task_struct *) prev
@@ -190,9 +263,9 @@ ENTRY(__switch_to)
190 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next 263 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
191 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 264 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
192 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP 265 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
193 bzr %r14 266 jz 0f
194 .insn s,0xb2800000,__LC_LPP # set program parameter 267 .insn s,0xb2800000,__LC_LPP # set program parameter
195 br %r14 2680: BR_EX %r14
196 269
197.L__critical_start: 270.L__critical_start:
198 271
@@ -204,9 +277,11 @@ ENTRY(__switch_to)
204 */ 277 */
205ENTRY(sie64a) 278ENTRY(sie64a)
206 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 279 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
280 lg %r12,__LC_CURRENT
207 stg %r2,__SF_EMPTY(%r15) # save control block pointer 281 stg %r2,__SF_EMPTY(%r15) # save control block pointer
208 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area 282 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
209 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0 283 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
284 mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
210 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? 285 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
211 jno .Lsie_load_guest_gprs 286 jno .Lsie_load_guest_gprs
212 brasl %r14,load_fpu_regs # load guest fp/vx regs 287 brasl %r14,load_fpu_regs # load guest fp/vx regs
@@ -223,7 +298,11 @@ ENTRY(sie64a)
223 jnz .Lsie_skip 298 jnz .Lsie_skip
224 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 299 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
225 jo .Lsie_skip # exit if fp/vx regs changed 300 jo .Lsie_skip # exit if fp/vx regs changed
301 BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
226 sie 0(%r14) 302 sie 0(%r14)
303.Lsie_exit:
304 BPOFF
305 BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
227.Lsie_skip: 306.Lsie_skip:
228 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 307 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
229 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 308 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
@@ -244,9 +323,15 @@ ENTRY(sie64a)
244sie_exit: 323sie_exit:
245 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 324 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
246 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 325 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
326 xgr %r0,%r0 # clear guest registers to
327 xgr %r1,%r1 # prevent speculative use
328 xgr %r2,%r2
329 xgr %r3,%r3
330 xgr %r4,%r4
331 xgr %r5,%r5
247 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 332 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
248 lg %r2,__SF_EMPTY+16(%r15) # return exit reason code 333 lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
249 br %r14 334 BR_EX %r14
250.Lsie_fault: 335.Lsie_fault:
251 lghi %r14,-EFAULT 336 lghi %r14,-EFAULT
252 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code 337 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
@@ -267,6 +352,7 @@ ENTRY(system_call)
267 stpt __LC_SYNC_ENTER_TIMER 352 stpt __LC_SYNC_ENTER_TIMER
268.Lsysc_stmg: 353.Lsysc_stmg:
269 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 354 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
355 BPOFF
270 lg %r10,__LC_LAST_BREAK 356 lg %r10,__LC_LAST_BREAK
271 lg %r12,__LC_THREAD_INFO 357 lg %r12,__LC_THREAD_INFO
272 lghi %r14,_PIF_SYSCALL 358 lghi %r14,_PIF_SYSCALL
@@ -276,12 +362,15 @@ ENTRY(system_call)
276 LAST_BREAK %r13 362 LAST_BREAK %r13
277.Lsysc_vtime: 363.Lsysc_vtime:
278 UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER 364 UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
365 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
279 stmg %r0,%r7,__PT_R0(%r11) 366 stmg %r0,%r7,__PT_R0(%r11)
280 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 367 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
281 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 368 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
282 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 369 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
283 stg %r14,__PT_FLAGS(%r11) 370 stg %r14,__PT_FLAGS(%r11)
284.Lsysc_do_svc: 371.Lsysc_do_svc:
372 # clear user controlled register to prevent speculative use
373 xgr %r0,%r0
285 lg %r10,__TI_sysc_table(%r12) # address of system call table 374 lg %r10,__TI_sysc_table(%r12) # address of system call table
286 llgh %r8,__PT_INT_CODE+2(%r11) 375 llgh %r8,__PT_INT_CODE+2(%r11)
287 slag %r8,%r8,2 # shift and test for svc 0 376 slag %r8,%r8,2 # shift and test for svc 0
@@ -299,7 +388,7 @@ ENTRY(system_call)
299 lgf %r9,0(%r8,%r10) # get system call add. 388 lgf %r9,0(%r8,%r10) # get system call add.
300 TSTMSK __TI_flags(%r12),_TIF_TRACE 389 TSTMSK __TI_flags(%r12),_TIF_TRACE
301 jnz .Lsysc_tracesys 390 jnz .Lsysc_tracesys
302 basr %r14,%r9 # call sys_xxxx 391 BASR_EX %r14,%r9 # call sys_xxxx
303 stg %r2,__PT_R2(%r11) # store return value 392 stg %r2,__PT_R2(%r11) # store return value
304 393
305.Lsysc_return: 394.Lsysc_return:
@@ -311,6 +400,7 @@ ENTRY(system_call)
311 jnz .Lsysc_work # check for work 400 jnz .Lsysc_work # check for work
312 TSTMSK __LC_CPU_FLAGS,_CIF_WORK 401 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
313 jnz .Lsysc_work 402 jnz .Lsysc_work
403 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
314.Lsysc_restore: 404.Lsysc_restore:
315 lg %r14,__LC_VDSO_PER_CPU 405 lg %r14,__LC_VDSO_PER_CPU
316 lmg %r0,%r10,__PT_R0(%r11) 406 lmg %r0,%r10,__PT_R0(%r11)
@@ -438,7 +528,7 @@ ENTRY(system_call)
438 lmg %r3,%r7,__PT_R3(%r11) 528 lmg %r3,%r7,__PT_R3(%r11)
439 stg %r7,STACK_FRAME_OVERHEAD(%r15) 529 stg %r7,STACK_FRAME_OVERHEAD(%r15)
440 lg %r2,__PT_ORIG_GPR2(%r11) 530 lg %r2,__PT_ORIG_GPR2(%r11)
441 basr %r14,%r9 # call sys_xxx 531 BASR_EX %r14,%r9 # call sys_xxx
442 stg %r2,__PT_R2(%r11) # store return value 532 stg %r2,__PT_R2(%r11) # store return value
443.Lsysc_tracenogo: 533.Lsysc_tracenogo:
444 TSTMSK __TI_flags(%r12),_TIF_TRACE 534 TSTMSK __TI_flags(%r12),_TIF_TRACE
@@ -462,7 +552,7 @@ ENTRY(ret_from_fork)
462 lmg %r9,%r10,__PT_R9(%r11) # load gprs 552 lmg %r9,%r10,__PT_R9(%r11) # load gprs
463ENTRY(kernel_thread_starter) 553ENTRY(kernel_thread_starter)
464 la %r2,0(%r10) 554 la %r2,0(%r10)
465 basr %r14,%r9 555 BASR_EX %r14,%r9
466 j .Lsysc_tracenogo 556 j .Lsysc_tracenogo
467 557
468/* 558/*
@@ -471,6 +561,7 @@ ENTRY(kernel_thread_starter)
471 561
472ENTRY(pgm_check_handler) 562ENTRY(pgm_check_handler)
473 stpt __LC_SYNC_ENTER_TIMER 563 stpt __LC_SYNC_ENTER_TIMER
564 BPOFF
474 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 565 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
475 lg %r10,__LC_LAST_BREAK 566 lg %r10,__LC_LAST_BREAK
476 lg %r12,__LC_THREAD_INFO 567 lg %r12,__LC_THREAD_INFO
@@ -495,6 +586,7 @@ ENTRY(pgm_check_handler)
495 j 3f 586 j 3f
4962: LAST_BREAK %r14 5872: LAST_BREAK %r14
497 UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 588 UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
589 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
498 lg %r15,__LC_KERNEL_STACK 590 lg %r15,__LC_KERNEL_STACK
499 lg %r14,__TI_task(%r12) 591 lg %r14,__TI_task(%r12)
500 aghi %r14,__TASK_thread # pointer to thread_struct 592 aghi %r14,__TASK_thread # pointer to thread_struct
@@ -504,6 +596,15 @@ ENTRY(pgm_check_handler)
504 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 596 mvc __THREAD_trap_tdb(256,%r14),0(%r13)
5053: la %r11,STACK_FRAME_OVERHEAD(%r15) 5973: la %r11,STACK_FRAME_OVERHEAD(%r15)
506 stmg %r0,%r7,__PT_R0(%r11) 598 stmg %r0,%r7,__PT_R0(%r11)
599 # clear user controlled registers to prevent speculative use
600 xgr %r0,%r0
601 xgr %r1,%r1
602 xgr %r2,%r2
603 xgr %r3,%r3
604 xgr %r4,%r4
605 xgr %r5,%r5
606 xgr %r6,%r6
607 xgr %r7,%r7
507 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 608 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
508 stmg %r8,%r9,__PT_PSW(%r11) 609 stmg %r8,%r9,__PT_PSW(%r11)
509 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 610 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
@@ -525,9 +626,9 @@ ENTRY(pgm_check_handler)
525 nill %r10,0x007f 626 nill %r10,0x007f
526 sll %r10,2 627 sll %r10,2
527 je .Lpgm_return 628 je .Lpgm_return
528 lgf %r1,0(%r10,%r1) # load address of handler routine 629 lgf %r9,0(%r10,%r1) # load address of handler routine
529 lgr %r2,%r11 # pass pointer to pt_regs 630 lgr %r2,%r11 # pass pointer to pt_regs
530 basr %r14,%r1 # branch to interrupt-handler 631 BASR_EX %r14,%r9 # branch to interrupt-handler
531.Lpgm_return: 632.Lpgm_return:
532 LOCKDEP_SYS_EXIT 633 LOCKDEP_SYS_EXIT
533 tm __PT_PSW+1(%r11),0x01 # returning to user ? 634 tm __PT_PSW+1(%r11),0x01 # returning to user ?
@@ -560,6 +661,7 @@ ENTRY(pgm_check_handler)
560ENTRY(io_int_handler) 661ENTRY(io_int_handler)
561 STCK __LC_INT_CLOCK 662 STCK __LC_INT_CLOCK
562 stpt __LC_ASYNC_ENTER_TIMER 663 stpt __LC_ASYNC_ENTER_TIMER
664 BPOFF
563 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 665 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
564 lg %r10,__LC_LAST_BREAK 666 lg %r10,__LC_LAST_BREAK
565 lg %r12,__LC_THREAD_INFO 667 lg %r12,__LC_THREAD_INFO
@@ -567,6 +669,16 @@ ENTRY(io_int_handler)
567 lmg %r8,%r9,__LC_IO_OLD_PSW 669 lmg %r8,%r9,__LC_IO_OLD_PSW
568 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 670 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
569 stmg %r0,%r7,__PT_R0(%r11) 671 stmg %r0,%r7,__PT_R0(%r11)
672 # clear user controlled registers to prevent speculative use
673 xgr %r0,%r0
674 xgr %r1,%r1
675 xgr %r2,%r2
676 xgr %r3,%r3
677 xgr %r4,%r4
678 xgr %r5,%r5
679 xgr %r6,%r6
680 xgr %r7,%r7
681 xgr %r10,%r10
570 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 682 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
571 stmg %r8,%r9,__PT_PSW(%r11) 683 stmg %r8,%r9,__PT_PSW(%r11)
572 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 684 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
@@ -601,9 +713,13 @@ ENTRY(io_int_handler)
601 lg %r14,__LC_VDSO_PER_CPU 713 lg %r14,__LC_VDSO_PER_CPU
602 lmg %r0,%r10,__PT_R0(%r11) 714 lmg %r0,%r10,__PT_R0(%r11)
603 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 715 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
716 tm __PT_PSW+1(%r11),0x01 # returning to user ?
717 jno .Lio_exit_kernel
718 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
604.Lio_exit_timer: 719.Lio_exit_timer:
605 stpt __LC_EXIT_TIMER 720 stpt __LC_EXIT_TIMER
606 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 721 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
722.Lio_exit_kernel:
607 lmg %r11,%r15,__PT_R11(%r11) 723 lmg %r11,%r15,__PT_R11(%r11)
608 lpswe __LC_RETURN_PSW 724 lpswe __LC_RETURN_PSW
609.Lio_done: 725.Lio_done:
@@ -735,6 +851,7 @@ ENTRY(io_int_handler)
735ENTRY(ext_int_handler) 851ENTRY(ext_int_handler)
736 STCK __LC_INT_CLOCK 852 STCK __LC_INT_CLOCK
737 stpt __LC_ASYNC_ENTER_TIMER 853 stpt __LC_ASYNC_ENTER_TIMER
854 BPOFF
738 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 855 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
739 lg %r10,__LC_LAST_BREAK 856 lg %r10,__LC_LAST_BREAK
740 lg %r12,__LC_THREAD_INFO 857 lg %r12,__LC_THREAD_INFO
@@ -742,6 +859,16 @@ ENTRY(ext_int_handler)
742 lmg %r8,%r9,__LC_EXT_OLD_PSW 859 lmg %r8,%r9,__LC_EXT_OLD_PSW
743 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 860 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
744 stmg %r0,%r7,__PT_R0(%r11) 861 stmg %r0,%r7,__PT_R0(%r11)
862 # clear user controlled registers to prevent speculative use
863 xgr %r0,%r0
864 xgr %r1,%r1
865 xgr %r2,%r2
866 xgr %r3,%r3
867 xgr %r4,%r4
868 xgr %r5,%r5
869 xgr %r6,%r6
870 xgr %r7,%r7
871 xgr %r10,%r10
745 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 872 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
746 stmg %r8,%r9,__PT_PSW(%r11) 873 stmg %r8,%r9,__PT_PSW(%r11)
747 lghi %r1,__LC_EXT_PARAMS2 874 lghi %r1,__LC_EXT_PARAMS2
@@ -773,11 +900,12 @@ ENTRY(psw_idle)
773 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) 900 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
774.Lpsw_idle_stcctm: 901.Lpsw_idle_stcctm:
775#endif 902#endif
903 BPON
776 STCK __CLOCK_IDLE_ENTER(%r2) 904 STCK __CLOCK_IDLE_ENTER(%r2)
777 stpt __TIMER_IDLE_ENTER(%r2) 905 stpt __TIMER_IDLE_ENTER(%r2)
778.Lpsw_idle_lpsw: 906.Lpsw_idle_lpsw:
779 lpswe __SF_EMPTY(%r15) 907 lpswe __SF_EMPTY(%r15)
780 br %r14 908 BR_EX %r14
781.Lpsw_idle_end: 909.Lpsw_idle_end:
782 910
783/* 911/*
@@ -791,7 +919,7 @@ ENTRY(save_fpu_regs)
791 lg %r2,__LC_CURRENT 919 lg %r2,__LC_CURRENT
792 aghi %r2,__TASK_thread 920 aghi %r2,__TASK_thread
793 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 921 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
794 bor %r14 922 jo .Lsave_fpu_regs_exit
795 stfpc __THREAD_FPU_fpc(%r2) 923 stfpc __THREAD_FPU_fpc(%r2)
796.Lsave_fpu_regs_fpc_end: 924.Lsave_fpu_regs_fpc_end:
797 lg %r3,__THREAD_FPU_regs(%r2) 925 lg %r3,__THREAD_FPU_regs(%r2)
@@ -821,7 +949,8 @@ ENTRY(save_fpu_regs)
821 std 15,120(%r3) 949 std 15,120(%r3)
822.Lsave_fpu_regs_done: 950.Lsave_fpu_regs_done:
823 oi __LC_CPU_FLAGS+7,_CIF_FPU 951 oi __LC_CPU_FLAGS+7,_CIF_FPU
824 br %r14 952.Lsave_fpu_regs_exit:
953 BR_EX %r14
825.Lsave_fpu_regs_end: 954.Lsave_fpu_regs_end:
826 955
827/* 956/*
@@ -838,7 +967,7 @@ load_fpu_regs:
838 lg %r4,__LC_CURRENT 967 lg %r4,__LC_CURRENT
839 aghi %r4,__TASK_thread 968 aghi %r4,__TASK_thread
840 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 969 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
841 bnor %r14 970 jno .Lload_fpu_regs_exit
842 lfpc __THREAD_FPU_fpc(%r4) 971 lfpc __THREAD_FPU_fpc(%r4)
843 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 972 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
844 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 973 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
@@ -867,7 +996,8 @@ load_fpu_regs:
867 ld 15,120(%r4) 996 ld 15,120(%r4)
868.Lload_fpu_regs_done: 997.Lload_fpu_regs_done:
869 ni __LC_CPU_FLAGS+7,255-_CIF_FPU 998 ni __LC_CPU_FLAGS+7,255-_CIF_FPU
870 br %r14 999.Lload_fpu_regs_exit:
1000 BR_EX %r14
871.Lload_fpu_regs_end: 1001.Lload_fpu_regs_end:
872 1002
873.L__critical_end: 1003.L__critical_end:
@@ -877,6 +1007,7 @@ load_fpu_regs:
877 */ 1007 */
878ENTRY(mcck_int_handler) 1008ENTRY(mcck_int_handler)
879 STCK __LC_MCCK_CLOCK 1009 STCK __LC_MCCK_CLOCK
1010 BPOFF
880 la %r1,4095 # revalidate r1 1011 la %r1,4095 # revalidate r1
881 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer 1012 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
882 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs 1013 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
@@ -908,6 +1039,16 @@ ENTRY(mcck_int_handler)
908.Lmcck_skip: 1039.Lmcck_skip:
909 lghi %r14,__LC_GPREGS_SAVE_AREA+64 1040 lghi %r14,__LC_GPREGS_SAVE_AREA+64
910 stmg %r0,%r7,__PT_R0(%r11) 1041 stmg %r0,%r7,__PT_R0(%r11)
1042 # clear user controlled registers to prevent speculative use
1043 xgr %r0,%r0
1044 xgr %r1,%r1
1045 xgr %r2,%r2
1046 xgr %r3,%r3
1047 xgr %r4,%r4
1048 xgr %r5,%r5
1049 xgr %r6,%r6
1050 xgr %r7,%r7
1051 xgr %r10,%r10
911 mvc __PT_R8(64,%r11),0(%r14) 1052 mvc __PT_R8(64,%r11),0(%r14)
912 stmg %r8,%r9,__PT_PSW(%r11) 1053 stmg %r8,%r9,__PT_PSW(%r11)
913 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 1054 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
@@ -933,6 +1074,7 @@ ENTRY(mcck_int_handler)
933 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 1074 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
934 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 1075 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
935 jno 0f 1076 jno 0f
1077 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
936 stpt __LC_EXIT_TIMER 1078 stpt __LC_EXIT_TIMER
937 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 1079 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
9380: lmg %r11,%r15,__PT_R11(%r11) 10800: lmg %r11,%r15,__PT_R11(%r11)
@@ -1028,7 +1170,7 @@ cleanup_critical:
1028 jl 0f 1170 jl 0f
1029 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end 1171 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1030 jl .Lcleanup_load_fpu_regs 1172 jl .Lcleanup_load_fpu_regs
10310: br %r14 11730: BR_EX %r14,%r11
1032 1174
1033 .align 8 1175 .align 8
1034.Lcleanup_table: 1176.Lcleanup_table:
@@ -1053,11 +1195,12 @@ cleanup_critical:
1053 .quad .Lsie_done 1195 .quad .Lsie_done
1054 1196
1055.Lcleanup_sie: 1197.Lcleanup_sie:
1198 BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1056 lg %r9,__SF_EMPTY(%r15) # get control block pointer 1199 lg %r9,__SF_EMPTY(%r15) # get control block pointer
1057 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1200 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1058 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1201 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1059 larl %r9,sie_exit # skip forward to sie_exit 1202 larl %r9,sie_exit # skip forward to sie_exit
1060 br %r14 1203 BR_EX %r14,%r11
1061#endif 1204#endif
1062 1205
1063.Lcleanup_system_call: 1206.Lcleanup_system_call:
@@ -1099,7 +1242,8 @@ cleanup_critical:
1099 srag %r9,%r9,23 1242 srag %r9,%r9,23
1100 jz 0f 1243 jz 0f
1101 mvc __TI_last_break(8,%r12),16(%r11) 1244 mvc __TI_last_break(8,%r12),16(%r11)
11020: # set up saved register r11 12450: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1246 # set up saved register r11
1103 lg %r15,__LC_KERNEL_STACK 1247 lg %r15,__LC_KERNEL_STACK
1104 la %r9,STACK_FRAME_OVERHEAD(%r15) 1248 la %r9,STACK_FRAME_OVERHEAD(%r15)
1105 stg %r9,24(%r11) # r11 pt_regs pointer 1249 stg %r9,24(%r11) # r11 pt_regs pointer
@@ -1114,7 +1258,7 @@ cleanup_critical:
1114 stg %r15,56(%r11) # r15 stack pointer 1258 stg %r15,56(%r11) # r15 stack pointer
1115 # set new psw address and exit 1259 # set new psw address and exit
1116 larl %r9,.Lsysc_do_svc 1260 larl %r9,.Lsysc_do_svc
1117 br %r14 1261 BR_EX %r14,%r11
1118.Lcleanup_system_call_insn: 1262.Lcleanup_system_call_insn:
1119 .quad system_call 1263 .quad system_call
1120 .quad .Lsysc_stmg 1264 .quad .Lsysc_stmg
@@ -1124,7 +1268,7 @@ cleanup_critical:
1124 1268
1125.Lcleanup_sysc_tif: 1269.Lcleanup_sysc_tif:
1126 larl %r9,.Lsysc_tif 1270 larl %r9,.Lsysc_tif
1127 br %r14 1271 BR_EX %r14,%r11
1128 1272
1129.Lcleanup_sysc_restore: 1273.Lcleanup_sysc_restore:
1130 # check if stpt has been executed 1274 # check if stpt has been executed
@@ -1141,14 +1285,14 @@ cleanup_critical:
1141 mvc 0(64,%r11),__PT_R8(%r9) 1285 mvc 0(64,%r11),__PT_R8(%r9)
1142 lmg %r0,%r7,__PT_R0(%r9) 1286 lmg %r0,%r7,__PT_R0(%r9)
11431: lmg %r8,%r9,__LC_RETURN_PSW 12871: lmg %r8,%r9,__LC_RETURN_PSW
1144 br %r14 1288 BR_EX %r14,%r11
1145.Lcleanup_sysc_restore_insn: 1289.Lcleanup_sysc_restore_insn:
1146 .quad .Lsysc_exit_timer 1290 .quad .Lsysc_exit_timer
1147 .quad .Lsysc_done - 4 1291 .quad .Lsysc_done - 4
1148 1292
1149.Lcleanup_io_tif: 1293.Lcleanup_io_tif:
1150 larl %r9,.Lio_tif 1294 larl %r9,.Lio_tif
1151 br %r14 1295 BR_EX %r14,%r11
1152 1296
1153.Lcleanup_io_restore: 1297.Lcleanup_io_restore:
1154 # check if stpt has been executed 1298 # check if stpt has been executed
@@ -1162,7 +1306,7 @@ cleanup_critical:
1162 mvc 0(64,%r11),__PT_R8(%r9) 1306 mvc 0(64,%r11),__PT_R8(%r9)
1163 lmg %r0,%r7,__PT_R0(%r9) 1307 lmg %r0,%r7,__PT_R0(%r9)
11641: lmg %r8,%r9,__LC_RETURN_PSW 13081: lmg %r8,%r9,__LC_RETURN_PSW
1165 br %r14 1309 BR_EX %r14,%r11
1166.Lcleanup_io_restore_insn: 1310.Lcleanup_io_restore_insn:
1167 .quad .Lio_exit_timer 1311 .quad .Lio_exit_timer
1168 .quad .Lio_done - 4 1312 .quad .Lio_done - 4
@@ -1214,17 +1358,17 @@ cleanup_critical:
1214 # prepare return psw 1358 # prepare return psw
1215 nihh %r8,0xfcfd # clear irq & wait state bits 1359 nihh %r8,0xfcfd # clear irq & wait state bits
1216 lg %r9,48(%r11) # return from psw_idle 1360 lg %r9,48(%r11) # return from psw_idle
1217 br %r14 1361 BR_EX %r14,%r11
1218.Lcleanup_idle_insn: 1362.Lcleanup_idle_insn:
1219 .quad .Lpsw_idle_lpsw 1363 .quad .Lpsw_idle_lpsw
1220 1364
1221.Lcleanup_save_fpu_regs: 1365.Lcleanup_save_fpu_regs:
1222 larl %r9,save_fpu_regs 1366 larl %r9,save_fpu_regs
1223 br %r14 1367 BR_EX %r14,%r11
1224 1368
1225.Lcleanup_load_fpu_regs: 1369.Lcleanup_load_fpu_regs:
1226 larl %r9,load_fpu_regs 1370 larl %r9,load_fpu_regs
1227 br %r14 1371 BR_EX %r14,%r11
1228 1372
1229/* 1373/*
1230 * Integer constants 1374 * Integer constants
@@ -1240,7 +1384,6 @@ cleanup_critical:
1240.Lsie_critical_length: 1384.Lsie_critical_length:
1241 .quad .Lsie_done - .Lsie_gmap 1385 .quad .Lsie_done - .Lsie_gmap
1242#endif 1386#endif
1243
1244 .section .rodata, "a" 1387 .section .rodata, "a"
1245#define SYSCALL(esame,emu) .long esame 1388#define SYSCALL(esame,emu) .long esame
1246 .globl sys_call_table 1389 .globl sys_call_table
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 42570d8fb265..837bb301023f 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -563,6 +563,7 @@ static struct kset *ipl_kset;
563 563
564static void __ipl_run(void *unused) 564static void __ipl_run(void *unused)
565{ 565{
566 __bpon();
566 diag308(DIAG308_IPL, NULL); 567 diag308(DIAG308_IPL, NULL);
567 if (MACHINE_IS_VM) 568 if (MACHINE_IS_VM)
568 __cpcmd("IPL", NULL, 0, NULL); 569 __cpcmd("IPL", NULL, 0, NULL);
@@ -798,6 +799,7 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
798 /* copy and convert to ebcdic */ 799 /* copy and convert to ebcdic */
799 memcpy(ipb->hdr.loadparm, buf, lp_len); 800 memcpy(ipb->hdr.loadparm, buf, lp_len);
800 ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN); 801 ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
802 ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
801 return len; 803 return len;
802} 804}
803 805
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index f41d5208aaf7..590e9394b4dd 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -173,10 +173,9 @@ void do_softirq_own_stack(void)
173 new -= STACK_FRAME_OVERHEAD; 173 new -= STACK_FRAME_OVERHEAD;
174 ((struct stack_frame *) new)->back_chain = old; 174 ((struct stack_frame *) new)->back_chain = old;
175 asm volatile(" la 15,0(%0)\n" 175 asm volatile(" la 15,0(%0)\n"
176 " basr 14,%2\n" 176 " brasl 14,__do_softirq\n"
177 " la 15,0(%1)\n" 177 " la 15,0(%1)\n"
178 : : "a" (new), "a" (old), 178 : : "a" (new), "a" (old)
179 "a" (__do_softirq)
180 : "0", "1", "2", "3", "4", "5", "14", 179 : "0", "1", "2", "3", "4", "5", "14",
181 "cc", "memory" ); 180 "cc", "memory" );
182 } else { 181 } else {
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index e499370fbccb..6c1c7d399bf9 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -8,12 +8,16 @@
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/asm-offsets.h> 9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h> 10#include <asm/ftrace.h>
11#include <asm/nospec-insn.h>
11#include <asm/ptrace.h> 12#include <asm/ptrace.h>
12 13
14 GEN_BR_THUNK %r1
15 GEN_BR_THUNK %r14
16
13 .section .kprobes.text, "ax" 17 .section .kprobes.text, "ax"
14 18
15ENTRY(ftrace_stub) 19ENTRY(ftrace_stub)
16 br %r14 20 BR_EX %r14
17 21
18#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) 22#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
19#define STACK_PTREGS (STACK_FRAME_OVERHEAD) 23#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
@@ -21,7 +25,7 @@ ENTRY(ftrace_stub)
21#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) 25#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
22 26
23ENTRY(_mcount) 27ENTRY(_mcount)
24 br %r14 28 BR_EX %r14
25 29
26ENTRY(ftrace_caller) 30ENTRY(ftrace_caller)
27 .globl ftrace_regs_caller 31 .globl ftrace_regs_caller
@@ -49,7 +53,7 @@ ENTRY(ftrace_caller)
49#endif 53#endif
50 lgr %r3,%r14 54 lgr %r3,%r14
51 la %r5,STACK_PTREGS(%r15) 55 la %r5,STACK_PTREGS(%r15)
52 basr %r14,%r1 56 BASR_EX %r14,%r1
53#ifdef CONFIG_FUNCTION_GRAPH_TRACER 57#ifdef CONFIG_FUNCTION_GRAPH_TRACER
54# The j instruction gets runtime patched to a nop instruction. 58# The j instruction gets runtime patched to a nop instruction.
55# See ftrace_enable_ftrace_graph_caller. 59# See ftrace_enable_ftrace_graph_caller.
@@ -64,7 +68,7 @@ ftrace_graph_caller_end:
64#endif 68#endif
65 lg %r1,(STACK_PTREGS_PSW+8)(%r15) 69 lg %r1,(STACK_PTREGS_PSW+8)(%r15)
66 lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15) 70 lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
67 br %r1 71 BR_EX %r1
68 72
69#ifdef CONFIG_FUNCTION_GRAPH_TRACER 73#ifdef CONFIG_FUNCTION_GRAPH_TRACER
70 74
@@ -77,6 +81,6 @@ ENTRY(return_to_handler)
77 aghi %r15,STACK_FRAME_OVERHEAD 81 aghi %r15,STACK_FRAME_OVERHEAD
78 lgr %r14,%r2 82 lgr %r14,%r2
79 lmg %r2,%r5,32(%r15) 83 lmg %r2,%r5,32(%r15)
80 br %r14 84 BR_EX %r14
81 85
82#endif 86#endif
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 0c1a679314dd..9bd1933848b8 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -31,6 +31,9 @@
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/moduleloader.h> 32#include <linux/moduleloader.h>
33#include <linux/bug.h> 33#include <linux/bug.h>
34#include <asm/alternative.h>
35#include <asm/nospec-branch.h>
36#include <asm/facility.h>
34 37
35#if 0 38#if 0
36#define DEBUGP printk 39#define DEBUGP printk
@@ -163,7 +166,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
163 me->arch.got_offset = me->core_size; 166 me->arch.got_offset = me->core_size;
164 me->core_size += me->arch.got_size; 167 me->core_size += me->arch.got_size;
165 me->arch.plt_offset = me->core_size; 168 me->arch.plt_offset = me->core_size;
166 me->core_size += me->arch.plt_size; 169 if (me->arch.plt_size) {
170 if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
171 me->arch.plt_size += PLT_ENTRY_SIZE;
172 me->core_size += me->arch.plt_size;
173 }
167 return 0; 174 return 0;
168} 175}
169 176
@@ -317,9 +324,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
317 unsigned int *ip; 324 unsigned int *ip;
318 ip = me->module_core + me->arch.plt_offset + 325 ip = me->module_core + me->arch.plt_offset +
319 info->plt_offset; 326 info->plt_offset;
320 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ 327 ip[0] = 0x0d10e310; /* basr 1,0 */
321 ip[1] = 0x100a0004; 328 ip[1] = 0x100a0004; /* lg 1,10(1) */
322 ip[2] = 0x07f10000; 329 if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
330 unsigned int *ij;
331 ij = me->module_core +
332 me->arch.plt_offset +
333 me->arch.plt_size - PLT_ENTRY_SIZE;
334 ip[2] = 0xa7f40000 + /* j __jump_r1 */
335 (unsigned int)(u16)
336 (((unsigned long) ij - 8 -
337 (unsigned long) ip) / 2);
338 } else {
339 ip[2] = 0x07f10000; /* br %r1 */
340 }
323 ip[3] = (unsigned int) (val >> 32); 341 ip[3] = (unsigned int) (val >> 32);
324 ip[4] = (unsigned int) val; 342 ip[4] = (unsigned int) val;
325 info->plt_initialized = 1; 343 info->plt_initialized = 1;
@@ -424,6 +442,45 @@ int module_finalize(const Elf_Ehdr *hdr,
424 const Elf_Shdr *sechdrs, 442 const Elf_Shdr *sechdrs,
425 struct module *me) 443 struct module *me)
426{ 444{
445 const Elf_Shdr *s;
446 char *secstrings, *secname;
447 void *aseg;
448
449 if (IS_ENABLED(CONFIG_EXPOLINE) &&
450 !nospec_disable && me->arch.plt_size) {
451 unsigned int *ij;
452
453 ij = me->module_core + me->arch.plt_offset +
454 me->arch.plt_size - PLT_ENTRY_SIZE;
455 if (test_facility(35)) {
456 ij[0] = 0xc6000000; /* exrl %r0,.+10 */
457 ij[1] = 0x0005a7f4; /* j . */
458 ij[2] = 0x000007f1; /* br %r1 */
459 } else {
460 ij[0] = 0x44000000 | (unsigned int)
461 offsetof(struct _lowcore, br_r1_trampoline);
462 ij[1] = 0xa7f40000; /* j . */
463 }
464 }
465
466 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
467 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
468 aseg = (void *) s->sh_addr;
469 secname = secstrings + s->sh_name;
470
471 if (!strcmp(".altinstructions", secname))
472 /* patch .altinstructions */
473 apply_alternatives(aseg, aseg + s->sh_size);
474
475 if (IS_ENABLED(CONFIG_EXPOLINE) &&
476 (!strncmp(".s390_indirect", secname, 14)))
477 nospec_revert(aseg, aseg + s->sh_size);
478
479 if (IS_ENABLED(CONFIG_EXPOLINE) &&
480 (!strncmp(".s390_return", secname, 12)))
481 nospec_revert(aseg, aseg + s->sh_size);
482 }
483
427 jump_label_apply_nops(me); 484 jump_label_apply_nops(me);
428 vfree(me->arch.syminfo); 485 vfree(me->arch.syminfo);
429 me->arch.syminfo = NULL; 486 me->arch.syminfo = NULL;
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
new file mode 100644
index 000000000000..d5eed651b5ab
--- /dev/null
+++ b/arch/s390/kernel/nospec-branch.c
@@ -0,0 +1,166 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/module.h>
3#include <linux/device.h>
4#include <asm/facility.h>
5#include <asm/nospec-branch.h>
6
7static int __init nobp_setup_early(char *str)
8{
9 bool enabled;
10 int rc;
11
12 rc = kstrtobool(str, &enabled);
13 if (rc)
14 return rc;
15 if (enabled && test_facility(82)) {
16 /*
17 * The user explicitely requested nobp=1, enable it and
18 * disable the expoline support.
19 */
20 __set_facility(82, S390_lowcore.alt_stfle_fac_list);
21 if (IS_ENABLED(CONFIG_EXPOLINE))
22 nospec_disable = 1;
23 } else {
24 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
25 }
26 return 0;
27}
28early_param("nobp", nobp_setup_early);
29
30static int __init nospec_setup_early(char *str)
31{
32 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
33 return 0;
34}
35early_param("nospec", nospec_setup_early);
36
37static int __init nospec_report(void)
38{
39 if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
40 pr_info("Spectre V2 mitigation: execute trampolines.\n");
41 if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
42 pr_info("Spectre V2 mitigation: limited branch prediction.\n");
43 return 0;
44}
45arch_initcall(nospec_report);
46
47#ifdef CONFIG_EXPOLINE
48
49int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
50
51static int __init nospectre_v2_setup_early(char *str)
52{
53 nospec_disable = 1;
54 return 0;
55}
56early_param("nospectre_v2", nospectre_v2_setup_early);
57
58void __init nospec_auto_detect(void)
59{
60 if (IS_ENABLED(CC_USING_EXPOLINE)) {
61 /*
62 * The kernel has been compiled with expolines.
63 * Keep expolines enabled and disable nobp.
64 */
65 nospec_disable = 0;
66 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
67 }
68 /*
69 * If the kernel has not been compiled with expolines the
70 * nobp setting decides what is done, this depends on the
71 * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
72 */
73}
74
75static int __init spectre_v2_setup_early(char *str)
76{
77 if (str && !strncmp(str, "on", 2)) {
78 nospec_disable = 0;
79 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
80 }
81 if (str && !strncmp(str, "off", 3))
82 nospec_disable = 1;
83 if (str && !strncmp(str, "auto", 4))
84 nospec_auto_detect();
85 return 0;
86}
87early_param("spectre_v2", spectre_v2_setup_early);
88
89static void __init_or_module __nospec_revert(s32 *start, s32 *end)
90{
91 enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
92 u8 *instr, *thunk, *br;
93 u8 insnbuf[6];
94 s32 *epo;
95
96 /* Second part of the instruction replace is always a nop */
97 for (epo = start; epo < end; epo++) {
98 instr = (u8 *) epo + *epo;
99 if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
100 type = BRCL_EXPOLINE; /* brcl instruction */
101 else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
102 type = BRASL_EXPOLINE; /* brasl instruction */
103 else
104 continue;
105 thunk = instr + (*(int *)(instr + 2)) * 2;
106 if (thunk[0] == 0xc6 && thunk[1] == 0x00)
107 /* exrl %r0,<target-br> */
108 br = thunk + (*(int *)(thunk + 2)) * 2;
109 else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
110 thunk[6] == 0x44 && thunk[7] == 0x00 &&
111 (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
112 (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
113 /* larl %rx,<target br> + ex %r0,0(%rx) */
114 br = thunk + (*(int *)(thunk + 2)) * 2;
115 else
116 continue;
117 /* Check for unconditional branch 0x07f? or 0x47f???? */
118 if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
119 continue;
120
121 memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
122 switch (type) {
123 case BRCL_EXPOLINE:
124 insnbuf[0] = br[0];
125 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
126 if (br[0] == 0x47) {
127 /* brcl to b, replace with bc + nopr */
128 insnbuf[2] = br[2];
129 insnbuf[3] = br[3];
130 } else {
131 /* brcl to br, replace with bcr + nop */
132 }
133 break;
134 case BRASL_EXPOLINE:
135 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
136 if (br[0] == 0x47) {
137 /* brasl to b, replace with bas + nopr */
138 insnbuf[0] = 0x4d;
139 insnbuf[2] = br[2];
140 insnbuf[3] = br[3];
141 } else {
142 /* brasl to br, replace with basr + nop */
143 insnbuf[0] = 0x0d;
144 }
145 break;
146 }
147
148 s390_kernel_write(instr, insnbuf, 6);
149 }
150}
151
152void __init_or_module nospec_revert(s32 *start, s32 *end)
153{
154 if (nospec_disable)
155 __nospec_revert(start, end);
156}
157
158extern s32 __nospec_call_start[], __nospec_call_end[];
159extern s32 __nospec_return_start[], __nospec_return_end[];
160void __init nospec_init_branches(void)
161{
162 nospec_revert(__nospec_call_start, __nospec_call_end);
163 nospec_revert(__nospec_return_start, __nospec_return_end);
164}
165
166#endif /* CONFIG_EXPOLINE */
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
new file mode 100644
index 000000000000..8affad5f18cb
--- /dev/null
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -0,0 +1,21 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/device.h>
3#include <linux/cpu.h>
4#include <asm/facility.h>
5#include <asm/nospec-branch.h>
6
7ssize_t cpu_show_spectre_v1(struct device *dev,
8 struct device_attribute *attr, char *buf)
9{
10 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
11}
12
13ssize_t cpu_show_spectre_v2(struct device *dev,
14 struct device_attribute *attr, char *buf)
15{
16 if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
17 return sprintf(buf, "Mitigation: execute trampolines\n");
18 if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
19 return sprintf(buf, "Mitigation: limited branch prediction\n");
20 return sprintf(buf, "Vulnerable\n");
21}
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 3d8da1e742c2..b79d51459cf2 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -744,6 +744,10 @@ static int __hw_perf_event_init(struct perf_event *event)
744 */ 744 */
745 rate = 0; 745 rate = 0;
746 if (attr->freq) { 746 if (attr->freq) {
747 if (!attr->sample_freq) {
748 err = -EINVAL;
749 goto out;
750 }
747 rate = freq_to_sample_rate(&si, attr->sample_freq); 751 rate = freq_to_sample_rate(&si, attr->sample_freq);
748 rate = hw_limit_rate(&si, rate); 752 rate = hw_limit_rate(&si, rate);
749 attr->freq = 0; 753 attr->freq = 0;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 7ce00e7a709a..ab236bd970bb 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -13,6 +13,7 @@
13#include <linux/cpu.h> 13#include <linux/cpu.h>
14#include <asm/diag.h> 14#include <asm/diag.h>
15#include <asm/elf.h> 15#include <asm/elf.h>
16#include <asm/facility.h>
16#include <asm/lowcore.h> 17#include <asm/lowcore.h>
17#include <asm/param.h> 18#include <asm/param.h>
18#include <asm/smp.h> 19#include <asm/smp.h>
@@ -113,3 +114,20 @@ const struct seq_operations cpuinfo_op = {
113 .show = show_cpuinfo, 114 .show = show_cpuinfo,
114}; 115};
115 116
117int s390_isolate_bp(void)
118{
119 if (!test_facility(82))
120 return -EOPNOTSUPP;
121 set_thread_flag(TIF_ISOLATE_BP);
122 return 0;
123}
124EXPORT_SYMBOL(s390_isolate_bp);
125
126int s390_isolate_bp_guest(void)
127{
128 if (!test_facility(82))
129 return -EOPNOTSUPP;
130 set_thread_flag(TIF_ISOLATE_BP_GUEST);
131 return 0;
132}
133EXPORT_SYMBOL(s390_isolate_bp_guest);
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 52aab0bd84f8..6b1b91c17b40 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -6,8 +6,11 @@
6 6
7#include <linux/linkage.h> 7#include <linux/linkage.h>
8#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
9#include <asm/nospec-insn.h>
9#include <asm/sigp.h> 10#include <asm/sigp.h>
10 11
12 GEN_BR_THUNK %r14
13
11# 14#
12# store_status 15# store_status
13# 16#
@@ -62,7 +65,7 @@ ENTRY(store_status)
62 st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1) 65 st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
63 larl %r2,store_status 66 larl %r2,store_status
64 stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) 67 stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
65 br %r14 68 BR_EX %r14
66 69
67 .section .bss 70 .section .bss
68 .align 8 71 .align 8
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index d097d71685df..e7a43a30e3ff 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -63,6 +63,8 @@
63#include <asm/sclp.h> 63#include <asm/sclp.h>
64#include <asm/sysinfo.h> 64#include <asm/sysinfo.h>
65#include <asm/numa.h> 65#include <asm/numa.h>
66#include <asm/alternative.h>
67#include <asm/nospec-branch.h>
66#include "entry.h" 68#include "entry.h"
67 69
68/* 70/*
@@ -333,7 +335,9 @@ static void __init setup_lowcore(void)
333 lc->machine_flags = S390_lowcore.machine_flags; 335 lc->machine_flags = S390_lowcore.machine_flags;
334 lc->stfl_fac_list = S390_lowcore.stfl_fac_list; 336 lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
335 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 337 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
336 MAX_FACILITY_BIT/8); 338 sizeof(lc->stfle_fac_list));
339 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
340 sizeof(lc->alt_stfle_fac_list));
337 if (MACHINE_HAS_VX) 341 if (MACHINE_HAS_VX)
338 lc->vector_save_area_addr = 342 lc->vector_save_area_addr =
339 (unsigned long) &lc->vector_save_area; 343 (unsigned long) &lc->vector_save_area;
@@ -370,6 +374,7 @@ static void __init setup_lowcore(void)
370#ifdef CONFIG_SMP 374#ifdef CONFIG_SMP
371 lc->spinlock_lockval = arch_spin_lockval(0); 375 lc->spinlock_lockval = arch_spin_lockval(0);
372#endif 376#endif
377 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
373 378
374 set_prefix((u32)(unsigned long) lc); 379 set_prefix((u32)(unsigned long) lc);
375 lowcore_ptr[0] = lc; 380 lowcore_ptr[0] = lc;
@@ -841,6 +846,9 @@ void __init setup_arch(char **cmdline_p)
841 init_mm.end_data = (unsigned long) &_edata; 846 init_mm.end_data = (unsigned long) &_edata;
842 init_mm.brk = (unsigned long) &_end; 847 init_mm.brk = (unsigned long) &_end;
843 848
849 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
850 nospec_auto_detect();
851
844 parse_early_param(); 852 parse_early_param();
845 os_info_init(); 853 os_info_init();
846 setup_ipl(); 854 setup_ipl();
@@ -893,6 +901,10 @@ void __init setup_arch(char **cmdline_p)
893 conmode_default(); 901 conmode_default();
894 set_preferred_console(); 902 set_preferred_console();
895 903
904 apply_alternative_instructions();
905 if (IS_ENABLED(CONFIG_EXPOLINE))
906 nospec_init_branches();
907
896 /* Setup zfcpdump support */ 908 /* Setup zfcpdump support */
897 setup_zfcpdump(); 909 setup_zfcpdump();
898 910
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 9062df575afe..77f4f334a465 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -200,6 +200,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
200 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; 200 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
201 lc->cpu_nr = cpu; 201 lc->cpu_nr = cpu;
202 lc->spinlock_lockval = arch_spin_lockval(cpu); 202 lc->spinlock_lockval = arch_spin_lockval(cpu);
203 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
203 if (MACHINE_HAS_VX) 204 if (MACHINE_HAS_VX)
204 lc->vector_save_area_addr = 205 lc->vector_save_area_addr =
205 (unsigned long) &lc->vector_save_area; 206 (unsigned long) &lc->vector_save_area;
@@ -250,7 +251,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
250 __ctl_store(lc->cregs_save_area, 0, 15); 251 __ctl_store(lc->cregs_save_area, 0, 15);
251 save_access_regs((unsigned int *) lc->access_regs_save_area); 252 save_access_regs((unsigned int *) lc->access_regs_save_area);
252 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 253 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
253 MAX_FACILITY_BIT/8); 254 sizeof(lc->stfle_fac_list));
255 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
256 sizeof(lc->alt_stfle_fac_list));
254} 257}
255 258
256static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) 259static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
@@ -299,6 +302,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
299 mem_assign_absolute(lc->restart_fn, (unsigned long) func); 302 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
300 mem_assign_absolute(lc->restart_data, (unsigned long) data); 303 mem_assign_absolute(lc->restart_data, (unsigned long) data);
301 mem_assign_absolute(lc->restart_source, source_cpu); 304 mem_assign_absolute(lc->restart_source, source_cpu);
305 __bpon();
302 asm volatile( 306 asm volatile(
303 "0: sigp 0,%0,%2 # sigp restart to target cpu\n" 307 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
304 " brc 2,0b # busy, try again\n" 308 " brc 2,0b # busy, try again\n"
@@ -888,6 +892,7 @@ void __cpu_die(unsigned int cpu)
888void __noreturn cpu_die(void) 892void __noreturn cpu_die(void)
889{ 893{
890 idle_task_exit(); 894 idle_task_exit();
895 __bpon();
891 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); 896 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
892 for (;;) ; 897 for (;;) ;
893} 898}
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 2d6b6e81f812..60a829c77378 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -12,6 +12,7 @@
12#include <asm/ptrace.h> 12#include <asm/ptrace.h>
13#include <asm/thread_info.h> 13#include <asm/thread_info.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/nospec-insn.h>
15#include <asm/sigp.h> 16#include <asm/sigp.h>
16 17
17/* 18/*
@@ -23,6 +24,8 @@
23 * (see below) in the resume process. 24 * (see below) in the resume process.
24 * This function runs with disabled interrupts. 25 * This function runs with disabled interrupts.
25 */ 26 */
27 GEN_BR_THUNK %r14
28
26 .section .text 29 .section .text
27ENTRY(swsusp_arch_suspend) 30ENTRY(swsusp_arch_suspend)
28 stmg %r6,%r15,__SF_GPRS(%r15) 31 stmg %r6,%r15,__SF_GPRS(%r15)
@@ -102,7 +105,7 @@ ENTRY(swsusp_arch_suspend)
102 spx 0x318(%r1) 105 spx 0x318(%r1)
103 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 106 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
104 lghi %r2,0 107 lghi %r2,0
105 br %r14 108 BR_EX %r14
106 109
107/* 110/*
108 * Restore saved memory image to correct place and restore register context. 111 * Restore saved memory image to correct place and restore register context.
@@ -196,11 +199,10 @@ pgm_check_entry:
196 larl %r15,init_thread_union 199 larl %r15,init_thread_union
197 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) 200 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER)
198 larl %r2,.Lpanic_string 201 larl %r2,.Lpanic_string
199 larl %r3,_sclp_print_early
200 lghi %r1,0 202 lghi %r1,0
201 sam31 203 sam31
202 sigp %r1,%r0,SIGP_SET_ARCHITECTURE 204 sigp %r1,%r0,SIGP_SET_ARCHITECTURE
203 basr %r14,%r3 205 brasl %r14,_sclp_print_early
204 larl %r3,.Ldisabled_wait_31 206 larl %r3,.Ldisabled_wait_31
205 lpsw 0(%r3) 207 lpsw 0(%r3)
2064: 2084:
@@ -266,7 +268,7 @@ restore_registers:
266 /* Return 0 */ 268 /* Return 0 */
267 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 269 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
268 lghi %r2,0 270 lghi %r2,0
269 br %r14 271 BR_EX %r14
270 272
271 .section .data..nosave,"aw",@progbits 273 .section .data..nosave,"aw",@progbits
272 .align 8 274 .align 8
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index 66956c09d5bf..3d04dfdabc9f 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -147,6 +147,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
147 return orig; 147 return orig;
148} 148}
149 149
150bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
151 struct pt_regs *regs)
152{
153 if (ctx == RP_CHECK_CHAIN_CALL)
154 return user_stack_pointer(regs) <= ret->stack;
155 else
156 return user_stack_pointer(regs) < ret->stack;
157}
158
150/* Instruction Emulation */ 159/* Instruction Emulation */
151 160
152static void adjust_psw_addr(psw_t *psw, unsigned long len) 161static void adjust_psw_addr(psw_t *psw, unsigned long len)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 445657fe658c..a4ae08e416e6 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -21,8 +21,14 @@ SECTIONS
21{ 21{
22 . = 0x00000000; 22 . = 0x00000000;
23 .text : { 23 .text : {
24 _text = .; /* Text and read-only data */ 24 /* Text and read-only data */
25 HEAD_TEXT 25 HEAD_TEXT
26 /*
27 * E.g. perf doesn't like symbols starting at address zero,
28 * therefore skip the initial PSW and channel program located
29 * at address zero and let _text start at 0x200.
30 */
31 _text = 0x200;
26 TEXT_TEXT 32 TEXT_TEXT
27 SCHED_TEXT 33 SCHED_TEXT
28 LOCK_TEXT 34 LOCK_TEXT
@@ -72,6 +78,43 @@ SECTIONS
72 EXIT_DATA 78 EXIT_DATA
73 } 79 }
74 80
81 /*
82 * struct alt_inst entries. From the header (alternative.h):
83 * "Alternative instructions for different CPU types or capabilities"
84 * Think locking instructions on spinlocks.
85 * Note, that it is a part of __init region.
86 */
87 . = ALIGN(8);
88 .altinstructions : {
89 __alt_instructions = .;
90 *(.altinstructions)
91 __alt_instructions_end = .;
92 }
93
94 /*
95 * And here are the replacement instructions. The linker sticks
96 * them as binary blobs. The .altinstructions has enough data to
97 * get the address and the length of them to patch the kernel safely.
98 * Note, that it is a part of __init region.
99 */
100 .altinstr_replacement : {
101 *(.altinstr_replacement)
102 }
103
104 /*
105 * Table with the patch locations to undo expolines
106 */
107 .nospec_call_table : {
108 __nospec_call_start = . ;
109 *(.s390_indirect*)
110 __nospec_call_end = . ;
111 }
112 .nospec_return_table : {
113 __nospec_return_start = . ;
114 *(.s390_return*)
115 __nospec_return_end = . ;
116 }
117
75 /* early.c uses stsi, which requires page aligned data. */ 118 /* early.c uses stsi, which requires page aligned data. */
76 . = ALIGN(PAGE_SIZE); 119 . = ALIGN(PAGE_SIZE);
77 INIT_DATA_SECTION(0x100) 120 INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 23e3f5d77a24..5ddb1debba95 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -118,8 +118,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
118 118
119/* upper facilities limit for kvm */ 119/* upper facilities limit for kvm */
120unsigned long kvm_s390_fac_list_mask[] = { 120unsigned long kvm_s390_fac_list_mask[] = {
121 0xffe6fffbfcfdfc40UL, 121 0xffe6ffffffffffffUL,
122 0x005e800000000000UL, 122 0x005effffffffffffUL,
123}; 123};
124 124
125unsigned long kvm_s390_fac_list_mask_size(void) 125unsigned long kvm_s390_fac_list_mask_size(void)
@@ -257,6 +257,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
257 case KVM_CAP_S390_VECTOR_REGISTERS: 257 case KVM_CAP_S390_VECTOR_REGISTERS:
258 r = MACHINE_HAS_VX; 258 r = MACHINE_HAS_VX;
259 break; 259 break;
260 case KVM_CAP_S390_BPB:
261 r = test_facility(82);
262 break;
260 default: 263 default:
261 r = 0; 264 r = 0;
262 } 265 }
@@ -1264,6 +1267,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1264 KVM_SYNC_PFAULT; 1267 KVM_SYNC_PFAULT;
1265 if (test_kvm_facility(vcpu->kvm, 129)) 1268 if (test_kvm_facility(vcpu->kvm, 129))
1266 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; 1269 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1270 if (test_kvm_facility(vcpu->kvm, 82))
1271 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
1267 1272
1268 if (kvm_is_ucontrol(vcpu->kvm)) 1273 if (kvm_is_ucontrol(vcpu->kvm))
1269 return __kvm_ucontrol_vcpu_init(vcpu); 1274 return __kvm_ucontrol_vcpu_init(vcpu);
@@ -1327,6 +1332,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1327 current->thread.fpu.fpc = 0; 1332 current->thread.fpu.fpc = 0;
1328 vcpu->arch.sie_block->gbea = 1; 1333 vcpu->arch.sie_block->gbea = 1;
1329 vcpu->arch.sie_block->pp = 0; 1334 vcpu->arch.sie_block->pp = 0;
1335 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
1330 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 1336 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1331 kvm_clear_async_pf_completion_queue(vcpu); 1337 kvm_clear_async_pf_completion_queue(vcpu);
1332 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 1338 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -2145,6 +2151,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2145 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 2151 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2146 kvm_clear_async_pf_completion_queue(vcpu); 2152 kvm_clear_async_pf_completion_queue(vcpu);
2147 } 2153 }
2154 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
2155 test_kvm_facility(vcpu->kvm, 82)) {
2156 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
2157 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
2158 }
2148 kvm_run->kvm_dirty_regs = 0; 2159 kvm_run->kvm_dirty_regs = 0;
2149} 2160}
2150 2161
@@ -2162,6 +2173,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2162 kvm_run->s.regs.pft = vcpu->arch.pfault_token; 2173 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2163 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 2174 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2164 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 2175 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2176 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
2165} 2177}
2166 2178
2167int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2179int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index c6d553e85ab1..16c5998b9792 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -5,6 +5,9 @@
5 */ 5 */
6 6
7#include <linux/linkage.h> 7#include <linux/linkage.h>
8#include <asm/nospec-insn.h>
9
10 GEN_BR_THUNK %r14
8 11
9/* 12/*
10 * memset implementation 13 * memset implementation
@@ -38,7 +41,7 @@ ENTRY(memset)
38.Lmemset_clear_rest: 41.Lmemset_clear_rest:
39 larl %r3,.Lmemset_xc 42 larl %r3,.Lmemset_xc
40 ex %r4,0(%r3) 43 ex %r4,0(%r3)
41 br %r14 44 BR_EX %r14
42.Lmemset_fill: 45.Lmemset_fill:
43 stc %r3,0(%r2) 46 stc %r3,0(%r2)
44 cghi %r4,1 47 cghi %r4,1
@@ -55,7 +58,7 @@ ENTRY(memset)
55.Lmemset_fill_rest: 58.Lmemset_fill_rest:
56 larl %r3,.Lmemset_mvc 59 larl %r3,.Lmemset_mvc
57 ex %r4,0(%r3) 60 ex %r4,0(%r3)
58 br %r14 61 BR_EX %r14
59.Lmemset_xc: 62.Lmemset_xc:
60 xc 0(1,%r1),0(%r1) 63 xc 0(1,%r1),0(%r1)
61.Lmemset_mvc: 64.Lmemset_mvc:
@@ -77,7 +80,7 @@ ENTRY(memcpy)
77.Lmemcpy_rest: 80.Lmemcpy_rest:
78 larl %r5,.Lmemcpy_mvc 81 larl %r5,.Lmemcpy_mvc
79 ex %r4,0(%r5) 82 ex %r4,0(%r5)
80 br %r14 83 BR_EX %r14
81.Lmemcpy_loop: 84.Lmemcpy_loop:
82 mvc 0(256,%r1),0(%r3) 85 mvc 0(256,%r1),0(%r3)
83 la %r1,256(%r1) 86 la %r1,256(%r1)
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
index a1c917d881ec..fa716f2a95a7 100644
--- a/arch/s390/net/bpf_jit.S
+++ b/arch/s390/net/bpf_jit.S
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/nospec-insn.h>
11#include "bpf_jit.h" 12#include "bpf_jit.h"
12 13
13/* 14/*
@@ -53,7 +54,7 @@ ENTRY(sk_load_##NAME##_pos); \
53 clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ 54 clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
54 jh sk_load_##NAME##_slow; \ 55 jh sk_load_##NAME##_slow; \
55 LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ 56 LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
56 b OFF_OK(%r6); /* Return */ \ 57 B_EX OFF_OK,%r6; /* Return */ \
57 \ 58 \
58sk_load_##NAME##_slow:; \ 59sk_load_##NAME##_slow:; \
59 lgr %r2,%r7; /* Arg1 = skb pointer */ \ 60 lgr %r2,%r7; /* Arg1 = skb pointer */ \
@@ -63,11 +64,14 @@ sk_load_##NAME##_slow:; \
63 brasl %r14,skb_copy_bits; /* Get data from skb */ \ 64 brasl %r14,skb_copy_bits; /* Get data from skb */ \
64 LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ 65 LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
65 ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ 66 ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
66 br %r6; /* Return */ 67 BR_EX %r6; /* Return */
67 68
68sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ 69sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
69sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ 70sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
70 71
72 GEN_BR_THUNK %r6
73 GEN_B_THUNK OFF_OK,%r6
74
71/* 75/*
72 * Load 1 byte from SKB (optimized version) 76 * Load 1 byte from SKB (optimized version)
73 */ 77 */
@@ -79,7 +83,7 @@ ENTRY(sk_load_byte_pos)
79 clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? 83 clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
80 jnl sk_load_byte_slow 84 jnl sk_load_byte_slow
81 llgc %r14,0(%r3,%r12) # Get byte from skb 85 llgc %r14,0(%r3,%r12) # Get byte from skb
82 b OFF_OK(%r6) # Return OK 86 B_EX OFF_OK,%r6 # Return OK
83 87
84sk_load_byte_slow: 88sk_load_byte_slow:
85 lgr %r2,%r7 # Arg1 = skb pointer 89 lgr %r2,%r7 # Arg1 = skb pointer
@@ -89,7 +93,7 @@ sk_load_byte_slow:
89 brasl %r14,skb_copy_bits # Get data from skb 93 brasl %r14,skb_copy_bits # Get data from skb
90 llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer 94 llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
91 ltgr %r2,%r2 # Set cc to (%r2 != 0) 95 ltgr %r2,%r2 # Set cc to (%r2 != 0)
92 br %r6 # Return cc 96 BR_EX %r6 # Return cc
93 97
94#define sk_negative_common(NAME, SIZE, LOAD) \ 98#define sk_negative_common(NAME, SIZE, LOAD) \
95sk_load_##NAME##_slow_neg:; \ 99sk_load_##NAME##_slow_neg:; \
@@ -103,7 +107,7 @@ sk_load_##NAME##_slow_neg:; \
103 jz bpf_error; \ 107 jz bpf_error; \
104 LOAD %r14,0(%r2); /* Get data from pointer */ \ 108 LOAD %r14,0(%r2); /* Get data from pointer */ \
105 xr %r3,%r3; /* Set cc to zero */ \ 109 xr %r3,%r3; /* Set cc to zero */ \
106 br %r6; /* Return cc */ 110 BR_EX %r6; /* Return cc */
107 111
108sk_negative_common(word, 4, llgf) 112sk_negative_common(word, 4, llgf)
109sk_negative_common(half, 2, llgh) 113sk_negative_common(half, 2, llgh)
@@ -112,4 +116,4 @@ sk_negative_common(byte, 1, llgc)
112bpf_error: 116bpf_error:
113# force a return 0 from jit handler 117# force a return 0 from jit handler
114 ltgr %r15,%r15 # Set condition code 118 ltgr %r15,%r15 # Set condition code
115 br %r6 119 BR_EX %r6
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 1395eeb6005f..a26528afceb2 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -24,6 +24,8 @@
24#include <linux/bpf.h> 24#include <linux/bpf.h>
25#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
26#include <asm/dis.h> 26#include <asm/dis.h>
27#include <asm/facility.h>
28#include <asm/nospec-branch.h>
27#include "bpf_jit.h" 29#include "bpf_jit.h"
28 30
29int bpf_jit_enable __read_mostly; 31int bpf_jit_enable __read_mostly;
@@ -41,6 +43,8 @@ struct bpf_jit {
41 int base_ip; /* Base address for literal pool */ 43 int base_ip; /* Base address for literal pool */
42 int ret0_ip; /* Address of return 0 */ 44 int ret0_ip; /* Address of return 0 */
43 int exit_ip; /* Address of exit */ 45 int exit_ip; /* Address of exit */
46 int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
47 int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
44 int tail_call_start; /* Tail call start offset */ 48 int tail_call_start; /* Tail call start offset */
45 int labels[1]; /* Labels for local jumps */ 49 int labels[1]; /* Labels for local jumps */
46}; 50};
@@ -248,6 +252,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
248 REG_SET_SEEN(b2); \ 252 REG_SET_SEEN(b2); \
249}) 253})
250 254
255#define EMIT6_PCREL_RILB(op, b, target) \
256({ \
257 int rel = (target - jit->prg) / 2; \
258 _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \
259 REG_SET_SEEN(b); \
260})
261
262#define EMIT6_PCREL_RIL(op, target) \
263({ \
264 int rel = (target - jit->prg) / 2; \
265 _EMIT6(op | rel >> 16, rel & 0xffff); \
266})
267
251#define _EMIT6_IMM(op, imm) \ 268#define _EMIT6_IMM(op, imm) \
252({ \ 269({ \
253 unsigned int __imm = (imm); \ 270 unsigned int __imm = (imm); \
@@ -475,8 +492,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
475 EMIT4(0xb9040000, REG_2, BPF_REG_0); 492 EMIT4(0xb9040000, REG_2, BPF_REG_0);
476 /* Restore registers */ 493 /* Restore registers */
477 save_restore_regs(jit, REGS_RESTORE); 494 save_restore_regs(jit, REGS_RESTORE);
495 if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
496 jit->r14_thunk_ip = jit->prg;
497 /* Generate __s390_indirect_jump_r14 thunk */
498 if (test_facility(35)) {
499 /* exrl %r0,.+10 */
500 EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
501 } else {
502 /* larl %r1,.+14 */
503 EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
504 /* ex 0,0(%r1) */
505 EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
506 }
507 /* j . */
508 EMIT4_PCREL(0xa7f40000, 0);
509 }
478 /* br %r14 */ 510 /* br %r14 */
479 _EMIT2(0x07fe); 511 _EMIT2(0x07fe);
512
513 if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
514 (jit->seen & SEEN_FUNC)) {
515 jit->r1_thunk_ip = jit->prg;
516 /* Generate __s390_indirect_jump_r1 thunk */
517 if (test_facility(35)) {
518 /* exrl %r0,.+10 */
519 EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
520 /* j . */
521 EMIT4_PCREL(0xa7f40000, 0);
522 /* br %r1 */
523 _EMIT2(0x07f1);
524 } else {
525 /* larl %r1,.+14 */
526 EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
527 /* ex 0,S390_lowcore.br_r1_tampoline */
528 EMIT4_DISP(0x44000000, REG_0, REG_0,
529 offsetof(struct _lowcore, br_r1_trampoline));
530 /* j . */
531 EMIT4_PCREL(0xa7f40000, 0);
532 }
533 }
480} 534}
481 535
482/* 536/*
@@ -980,8 +1034,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
980 /* lg %w1,<d(imm)>(%l) */ 1034 /* lg %w1,<d(imm)>(%l) */
981 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, 1035 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
982 EMIT_CONST_U64(func)); 1036 EMIT_CONST_U64(func));
983 /* basr %r14,%w1 */ 1037 if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
984 EMIT2(0x0d00, REG_14, REG_W1); 1038 /* brasl %r14,__s390_indirect_jump_r1 */
1039 EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
1040 } else {
1041 /* basr %r14,%w1 */
1042 EMIT2(0x0d00, REG_14, REG_W1);
1043 }
985 /* lgr %b0,%r2: load return value into %b0 */ 1044 /* lgr %b0,%r2: load return value into %b0 */
986 EMIT4(0xb9040000, BPF_REG_0, REG_2); 1045 EMIT4(0xb9040000, BPF_REG_0, REG_2);
987 if (bpf_helper_changes_skb_data((void *)func)) { 1046 if (bpf_helper_changes_skb_data((void *)func)) {
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 658326f44df8..5e0267624d8d 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/platform_device.h> 10#include <linux/platform_device.h>
11#include <linux/sh_eth.h>
11#include <mach-se/mach/se.h> 12#include <mach-se/mach/se.h>
12#include <mach-se/mach/mrshpc.h> 13#include <mach-se/mach/mrshpc.h>
13#include <asm/machvec.h> 14#include <asm/machvec.h>
@@ -114,6 +115,11 @@ static struct platform_device heartbeat_device = {
114#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\ 115#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
115 defined(CONFIG_CPU_SUBTYPE_SH7712) 116 defined(CONFIG_CPU_SUBTYPE_SH7712)
116/* SH771X Ethernet driver */ 117/* SH771X Ethernet driver */
118static struct sh_eth_plat_data sh_eth_plat = {
119 .phy = PHY_ID,
120 .phy_interface = PHY_INTERFACE_MODE_MII,
121};
122
117static struct resource sh_eth0_resources[] = { 123static struct resource sh_eth0_resources[] = {
118 [0] = { 124 [0] = {
119 .start = SH_ETH0_BASE, 125 .start = SH_ETH0_BASE,
@@ -131,7 +137,7 @@ static struct platform_device sh_eth0_device = {
131 .name = "sh771x-ether", 137 .name = "sh771x-ether",
132 .id = 0, 138 .id = 0,
133 .dev = { 139 .dev = {
134 .platform_data = PHY_ID, 140 .platform_data = &sh_eth_plat,
135 }, 141 },
136 .num_resources = ARRAY_SIZE(sh_eth0_resources), 142 .num_resources = ARRAY_SIZE(sh_eth0_resources),
137 .resource = sh_eth0_resources, 143 .resource = sh_eth0_resources,
@@ -154,7 +160,7 @@ static struct platform_device sh_eth1_device = {
154 .name = "sh771x-ether", 160 .name = "sh771x-ether",
155 .id = 1, 161 .id = 1,
156 .dev = { 162 .dev = {
157 .platform_data = PHY_ID, 163 .platform_data = &sh_eth_plat,
158 }, 164 },
159 .num_resources = ARRAY_SIZE(sh_eth1_resources), 165 .num_resources = ARRAY_SIZE(sh_eth1_resources),
160 .resource = sh_eth1_resources, 166 .resource = sh_eth1_resources,
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
index 7be39a646fbd..e05187d26d76 100644
--- a/arch/sh/include/asm/futex.h
+++ b/arch/sh/include/asm/futex.h
@@ -10,20 +10,11 @@
10/* XXX: UP variants, fix for SH-4A and SMP.. */ 10/* XXX: UP variants, fix for SH-4A and SMP.. */
11#include <asm/futex-irq.h> 11#include <asm/futex-irq.h>
12 12
13static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 13static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
14 u32 __user *uaddr)
14{ 15{
15 int op = (encoded_op >> 28) & 7;
16 int cmp = (encoded_op >> 24) & 15;
17 int oparg = (encoded_op << 8) >> 20;
18 int cmparg = (encoded_op << 20) >> 20;
19 int oldval = 0, ret; 16 int oldval = 0, ret;
20 17
21 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
22 oparg = 1 << oparg;
23
24 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
25 return -EFAULT;
26
27 pagefault_disable(); 18 pagefault_disable();
28 19
29 switch (op) { 20 switch (op) {
@@ -49,17 +40,8 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
49 40
50 pagefault_enable(); 41 pagefault_enable();
51 42
52 if (!ret) { 43 if (!ret)
53 switch (cmp) { 44 *oval = oldval;
54 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
55 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
56 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
57 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
58 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
59 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
60 default: ret = -ENOSYS;
61 }
62 }
63 45
64 return ret; 46 return ret;
65} 47}
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 13047a4facd2..5a9017ba26ab 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -255,7 +255,7 @@ debug_trap:
255 mov.l @r8, r8 255 mov.l @r8, r8
256 jsr @r8 256 jsr @r8
257 nop 257 nop
258 bra __restore_all 258 bra ret_from_exception
259 nop 259 nop
260 CFI_ENDPROC 260 CFI_ENDPROC
261 261
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index d77f2f6c7ff0..0b30b9dfc87f 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -34,6 +34,9 @@ DECLARE_EXPORT(__sdivsi3);
34DECLARE_EXPORT(__lshrsi3); 34DECLARE_EXPORT(__lshrsi3);
35DECLARE_EXPORT(__ashrsi3); 35DECLARE_EXPORT(__ashrsi3);
36DECLARE_EXPORT(__ashlsi3); 36DECLARE_EXPORT(__ashlsi3);
37DECLARE_EXPORT(__lshrsi3_r0);
38DECLARE_EXPORT(__ashrsi3_r0);
39DECLARE_EXPORT(__ashlsi3_r0);
37DECLARE_EXPORT(__ashiftrt_r4_6); 40DECLARE_EXPORT(__ashiftrt_r4_6);
38DECLARE_EXPORT(__ashiftrt_r4_7); 41DECLARE_EXPORT(__ashiftrt_r4_7);
39DECLARE_EXPORT(__ashiftrt_r4_8); 42DECLARE_EXPORT(__ashiftrt_r4_8);
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index ff639342a8be..c5b997757988 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -607,7 +607,8 @@ asmlinkage void do_divide_error(unsigned long r4)
607 break; 607 break;
608 } 608 }
609 609
610 force_sig_info(SIGFPE, &info, current); 610 info.si_signo = SIGFPE;
611 force_sig_info(info.si_signo, &info, current);
611} 612}
612#endif 613#endif
613 614
diff --git a/arch/sh/lib/ashlsi3.S b/arch/sh/lib/ashlsi3.S
index bd47e9b403a5..70a6434945ab 100644
--- a/arch/sh/lib/ashlsi3.S
+++ b/arch/sh/lib/ashlsi3.S
@@ -54,21 +54,38 @@ Boston, MA 02110-1301, USA. */
54! 54!
55! (none) 55! (none)
56! 56!
57! __ashlsi3_r0
58!
59! Entry:
60!
61! r4: Value to shift
62! r0: Shifts
63!
64! Exit:
65!
66! r0: Result
67!
68! Destroys:
69!
70! (none)
71
72
57 .global __ashlsi3 73 .global __ashlsi3
74 .global __ashlsi3_r0
58 75
59 .align 2 76 .align 2
60__ashlsi3: 77__ashlsi3:
61 mov #31,r0 78 mov r5,r0
62 and r0,r5 79 .align 2
80__ashlsi3_r0:
81 and #31,r0
82 mov.l r4,@-r15
83 mov r0,r4
63 mova ashlsi3_table,r0 84 mova ashlsi3_table,r0
64 mov.b @(r0,r5),r5 85 mov.b @(r0,r4),r4
65#ifdef __sh1__ 86 add r4,r0
66 add r5,r0
67 jmp @r0 87 jmp @r0
68#else 88 mov.l @r15+,r0
69 braf r5
70#endif
71 mov r4,r0
72 89
73 .align 2 90 .align 2
74ashlsi3_table: 91ashlsi3_table:
diff --git a/arch/sh/lib/ashrsi3.S b/arch/sh/lib/ashrsi3.S
index 6f3cf46b77c2..602599d80209 100644
--- a/arch/sh/lib/ashrsi3.S
+++ b/arch/sh/lib/ashrsi3.S
@@ -54,22 +54,37 @@ Boston, MA 02110-1301, USA. */
54! 54!
55! (none) 55! (none)
56! 56!
57! __ashrsi3_r0
58!
59! Entry:
60!
61! r4: Value to shift
62! r0: Shifts
63!
64! Exit:
65!
66! r0: Result
67!
68! Destroys:
69!
70! (none)
57 71
58 .global __ashrsi3 72 .global __ashrsi3
73 .global __ashrsi3_r0
59 74
60 .align 2 75 .align 2
61__ashrsi3: 76__ashrsi3:
62 mov #31,r0 77 mov r5,r0
63 and r0,r5 78 .align 2
79__ashrsi3_r0:
80 and #31,r0
81 mov.l r4,@-r15
82 mov r0,r4
64 mova ashrsi3_table,r0 83 mova ashrsi3_table,r0
65 mov.b @(r0,r5),r5 84 mov.b @(r0,r4),r4
66#ifdef __sh1__ 85 add r4,r0
67 add r5,r0
68 jmp @r0 86 jmp @r0
69#else 87 mov.l @r15+,r0
70 braf r5
71#endif
72 mov r4,r0
73 88
74 .align 2 89 .align 2
75ashrsi3_table: 90ashrsi3_table:
diff --git a/arch/sh/lib/lshrsi3.S b/arch/sh/lib/lshrsi3.S
index 1e7aaa557130..f2a6959f526d 100644
--- a/arch/sh/lib/lshrsi3.S
+++ b/arch/sh/lib/lshrsi3.S
@@ -54,21 +54,37 @@ Boston, MA 02110-1301, USA. */
54! 54!
55! (none) 55! (none)
56! 56!
57! __lshrsi3_r0
58!
59! Entry:
60!
61! r0: Value to shift
62! r5: Shifts
63!
64! Exit:
65!
66! r0: Result
67!
68! Destroys:
69!
70! (none)
71!
57 .global __lshrsi3 72 .global __lshrsi3
73 .global __lshrsi3_r0
58 74
59 .align 2 75 .align 2
60__lshrsi3: 76__lshrsi3:
61 mov #31,r0 77 mov r5,r0
62 and r0,r5 78 .align 2
79__lshrsi3_r0:
80 and #31,r0
81 mov.l r4,@-r15
82 mov r0,r4
63 mova lshrsi3_table,r0 83 mova lshrsi3_table,r0
64 mov.b @(r0,r5),r5 84 mov.b @(r0,r4),r4
65#ifdef __sh1__ 85 add r4,r0
66 add r5,r0
67 jmp @r0 86 jmp @r0
68#else 87 mov.l @r15+,r0
69 braf r5
70#endif
71 mov r4,r0
72 88
73 .align 2 89 .align 2
74lshrsi3_table: 90lshrsi3_table:
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index f2fbf9e16faf..29070c9a70f9 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -74,7 +74,11 @@ ATOMIC_OP(xor)
74#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0) 74#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
75 75
76#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 76#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
77#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 77
78static inline int atomic_xchg(atomic_t *v, int new)
79{
80 return xchg(&v->counter, new);
81}
78 82
79static inline int __atomic_add_unless(atomic_t *v, int a, int u) 83static inline int __atomic_add_unless(atomic_t *v, int a, int u)
80{ 84{
diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h
index 4e899b0dabf7..1cfd89d92208 100644
--- a/arch/sparc/include/asm/futex_64.h
+++ b/arch/sparc/include/asm/futex_64.h
@@ -29,22 +29,14 @@
29 : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ 29 : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
30 : "memory") 30 : "memory")
31 31
32static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 32static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
33 u32 __user *uaddr)
33{ 34{
34 int op = (encoded_op >> 28) & 7;
35 int cmp = (encoded_op >> 24) & 15;
36 int oparg = (encoded_op << 8) >> 20;
37 int cmparg = (encoded_op << 20) >> 20;
38 int oldval = 0, ret, tem; 35 int oldval = 0, ret, tem;
39 36
40 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
41 return -EFAULT;
42 if (unlikely((((unsigned long) uaddr) & 0x3UL))) 37 if (unlikely((((unsigned long) uaddr) & 0x3UL)))
43 return -EINVAL; 38 return -EINVAL;
44 39
45 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
46 oparg = 1 << oparg;
47
48 pagefault_disable(); 40 pagefault_disable();
49 41
50 switch (op) { 42 switch (op) {
@@ -69,17 +61,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
69 61
70 pagefault_enable(); 62 pagefault_enable();
71 63
72 if (!ret) { 64 if (!ret)
73 switch (cmp) { 65 *oval = oldval;
74 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 66
75 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
76 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
77 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
78 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
79 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
80 default: ret = -ENOSYS;
81 }
82 }
83 return ret; 67 return ret;
84} 68}
85 69
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index f87a55d77094..9b3f2e212b37 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -908,7 +908,7 @@ static int register_services(struct ds_info *dp)
908 pbuf.req.handle = cp->handle; 908 pbuf.req.handle = cp->handle;
909 pbuf.req.major = 1; 909 pbuf.req.major = 1;
910 pbuf.req.minor = 0; 910 pbuf.req.minor = 0;
911 strcpy(pbuf.req.svc_id, cp->service_id); 911 strcpy(pbuf.id_buf, cp->service_id);
912 912
913 err = __ds_send(lp, &pbuf, msg_len); 913 err = __ds_send(lp, &pbuf, msg_len);
914 if (err > 0) 914 if (err > 0)
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 59d503866431..9cc600b2d68c 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1733,9 +1733,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
1733 1733
1734 lp->rcv_nxt = p->seqid; 1734 lp->rcv_nxt = p->seqid;
1735 1735
1736 /*
1737 * If this is a control-only packet, there is nothing
1738 * else to do but advance the rx queue since the packet
1739 * was already processed above.
1740 */
1736 if (!(p->type & LDC_DATA)) { 1741 if (!(p->type & LDC_DATA)) {
1737 new = rx_advance(lp, new); 1742 new = rx_advance(lp, new);
1738 goto no_data; 1743 break;
1739 } 1744 }
1740 if (p->stype & (LDC_ACK | LDC_NACK)) { 1745 if (p->stype & (LDC_ACK | LDC_NACK)) {
1741 err = data_ack_nack(lp, p); 1746 err = data_ack_nack(lp, p);
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index 1a6ef1b69cb1..d96d9dab5c0b 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -106,12 +106,9 @@
106 lock = __atomic_hashed_lock((int __force *)uaddr) 106 lock = __atomic_hashed_lock((int __force *)uaddr)
107#endif 107#endif
108 108
109static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 109static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
110 u32 __user *uaddr)
110{ 111{
111 int op = (encoded_op >> 28) & 7;
112 int cmp = (encoded_op >> 24) & 15;
113 int oparg = (encoded_op << 8) >> 20;
114 int cmparg = (encoded_op << 20) >> 20;
115 int uninitialized_var(val), ret; 112 int uninitialized_var(val), ret;
116 113
117 __futex_prolog(); 114 __futex_prolog();
@@ -119,12 +116,6 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
119 /* The 32-bit futex code makes this assumption, so validate it here. */ 116 /* The 32-bit futex code makes this assumption, so validate it here. */
120 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int)); 117 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
121 118
122 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
123 oparg = 1 << oparg;
124
125 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
126 return -EFAULT;
127
128 pagefault_disable(); 119 pagefault_disable();
129 switch (op) { 120 switch (op) {
130 case FUTEX_OP_SET: 121 case FUTEX_OP_SET:
@@ -148,30 +139,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
148 } 139 }
149 pagefault_enable(); 140 pagefault_enable();
150 141
151 if (!ret) { 142 if (!ret)
152 switch (cmp) { 143 *oval = val;
153 case FUTEX_OP_CMP_EQ: 144
154 ret = (val == cmparg);
155 break;
156 case FUTEX_OP_CMP_NE:
157 ret = (val != cmparg);
158 break;
159 case FUTEX_OP_CMP_LT:
160 ret = (val < cmparg);
161 break;
162 case FUTEX_OP_CMP_GE:
163 ret = (val >= cmparg);
164 break;
165 case FUTEX_OP_CMP_LE:
166 ret = (val <= cmparg);
167 break;
168 case FUTEX_OP_CMP_GT:
169 ret = (val > cmparg);
170 break;
171 default:
172 ret = -ENOSYS;
173 }
174 }
175 return ret; 145 return ret;
176} 146}
177 147
diff --git a/arch/um/Makefile b/arch/um/Makefile
index e3abe6f3156d..9ccf462131c4 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -117,7 +117,7 @@ archheaders:
117archprepare: include/generated/user_constants.h 117archprepare: include/generated/user_constants.h
118 118
119LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static 119LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
120LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib 120LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
121 121
122CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \ 122CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
123 $(call cc-option, -fno-stack-protector,) \ 123 $(call cc-option, -fno-stack-protector,) \
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index c211153ca69a..56648f4f8b41 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -140,7 +140,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
140 140
141static void hard_handler(int sig, siginfo_t *si, void *p) 141static void hard_handler(int sig, siginfo_t *si, void *p)
142{ 142{
143 struct ucontext *uc = p; 143 ucontext_t *uc = p;
144 mcontext_t *mc = &uc->uc_mcontext; 144 mcontext_t *mc = &uc->uc_mcontext;
145 unsigned long pending = 1UL << sig; 145 unsigned long pending = 1UL << sig;
146 146
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 76f0921181d8..9ddd716116ca 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -89,6 +89,8 @@ config X86
89 select HAVE_ARCH_TRACEHOOK 89 select HAVE_ARCH_TRACEHOOK
90 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 90 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
91 select HAVE_ARCH_WITHIN_STACK_FRAMES 91 select HAVE_ARCH_WITHIN_STACK_FRAMES
92 select HAVE_BPF_JIT if X86_64
93 select HAVE_EBPF_JIT if X86_64
92 select HAVE_CC_STACKPROTECTOR 94 select HAVE_CC_STACKPROTECTOR
93 select HAVE_CMPXCHG_DOUBLE 95 select HAVE_CMPXCHG_DOUBLE
94 select HAVE_CMPXCHG_LOCAL 96 select HAVE_CMPXCHG_LOCAL
@@ -280,11 +282,6 @@ config X86_32_LAZY_GS
280 def_bool y 282 def_bool y
281 depends on X86_32 && !CC_STACKPROTECTOR 283 depends on X86_32 && !CC_STACKPROTECTOR
282 284
283config ARCH_HWEIGHT_CFLAGS
284 string
285 default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
286 default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
287
288config ARCH_SUPPORTS_UPROBES 285config ARCH_SUPPORTS_UPROBES
289 def_bool y 286 def_bool y
290 287
@@ -354,6 +351,17 @@ config X86_FEATURE_NAMES
354 351
355 If in doubt, say Y. 352 If in doubt, say Y.
356 353
354config X86_FAST_FEATURE_TESTS
355 bool "Fast CPU feature tests" if EMBEDDED
356 default y
357 ---help---
358 Some fast-paths in the kernel depend on the capabilities of the CPU.
359 Say Y here for the kernel to patch in the appropriate code at runtime
360 based on the capabilities of the CPU. The infrastructure for patching
361 code at runtime takes up some additional space; space-constrained
362 embedded systems may wish to say N here to produce smaller, slightly
363 slower code.
364
357config X86_X2APIC 365config X86_X2APIC
358 bool "Support x2apic" 366 bool "Support x2apic"
359 depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST) 367 depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST)
@@ -1012,7 +1020,7 @@ config X86_MCE_THRESHOLD
1012 def_bool y 1020 def_bool y
1013 1021
1014config X86_MCE_INJECT 1022config X86_MCE_INJECT
1015 depends on X86_MCE 1023 depends on X86_MCE && X86_LOCAL_APIC
1016 tristate "Machine check injector support" 1024 tristate "Machine check injector support"
1017 ---help--- 1025 ---help---
1018 Provide support for injecting machine checks for testing purposes. 1026 Provide support for injecting machine checks for testing purposes.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 1f6c306a9a00..3cb8e179f2f2 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -355,16 +355,6 @@ config DEBUG_IMR_SELFTEST
355 355
356 If unsure say N here. 356 If unsure say N here.
357 357
358config X86_DEBUG_STATIC_CPU_HAS
359 bool "Debug alternatives"
360 depends on DEBUG_KERNEL
361 ---help---
362 This option causes additional code to be generated which
363 fails if static_cpu_has() is used before alternatives have
364 run.
365
366 If unsure, say N.
367
368config X86_DEBUG_FPU 358config X86_DEBUG_FPU
369 bool "Debug the x86 FPU code" 359 bool "Debug the x86 FPU code"
370 depends on DEBUG_KERNEL 360 depends on DEBUG_KERNEL
@@ -379,6 +369,7 @@ config X86_DEBUG_FPU
379 369
380config PUNIT_ATOM_DEBUG 370config PUNIT_ATOM_DEBUG
381 tristate "ATOM Punit debug driver" 371 tristate "ATOM Punit debug driver"
372 depends on PCI
382 select DEBUG_FS 373 select DEBUG_FS
383 select IOSF_MBI 374 select IOSF_MBI
384 ---help--- 375 ---help---
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 1f9caa041bf7..d2c663aeccba 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -179,6 +179,15 @@ KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr)
179 179
180LDFLAGS := -m elf_$(UTS_MACHINE) 180LDFLAGS := -m elf_$(UTS_MACHINE)
181 181
182#
183# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to
184# the linker to force 2MB page size regardless of the default page size used
185# by the linker.
186#
187ifdef CONFIG_X86_64
188LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
189endif
190
182# Speed up the build 191# Speed up the build
183KBUILD_CFLAGS += -pipe 192KBUILD_CFLAGS += -pipe
184# Workaround for a gcc prelease that unfortunately was shipped in a suse release 193# Workaround for a gcc prelease that unfortunately was shipped in a suse release
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index c0cc2a6be0bf..6da2cd0897f3 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -64,12 +64,13 @@ GCOV_PROFILE := n
64$(obj)/bzImage: asflags-y := $(SVGA_MODE) 64$(obj)/bzImage: asflags-y := $(SVGA_MODE)
65 65
66quiet_cmd_image = BUILD $@ 66quiet_cmd_image = BUILD $@
67silent_redirect_image = >/dev/null
67cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \ 68cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
68 $(obj)/zoffset.h $@ 69 $(obj)/zoffset.h $@ $($(quiet)redirect_image)
69 70
70$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE 71$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
71 $(call if_changed,image) 72 $(call if_changed,image)
72 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 73 @$(kecho) 'Kernel: $@ is ready' ' (#'`cat .version`')'
73 74
74OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S 75OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S
75$(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE 76$(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 583d539a4197..2bc6651791cc 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -364,7 +364,8 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom)
364 if (status != EFI_SUCCESS) 364 if (status != EFI_SUCCESS)
365 goto free_struct; 365 goto free_struct;
366 366
367 memcpy(rom->romdata, pci->romimage, pci->romsize); 367 memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
368 pci->romsize);
368 return status; 369 return status;
369 370
370free_struct: 371free_struct:
@@ -470,7 +471,8 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom)
470 if (status != EFI_SUCCESS) 471 if (status != EFI_SUCCESS)
471 goto free_struct; 472 goto free_struct;
472 473
473 memcpy(rom->romdata, pci->romimage, pci->romsize); 474 memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
475 pci->romsize);
474 return status; 476 return status;
475 477
476free_struct: 478free_struct:
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 79dac1758e7c..16df89c30c20 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -366,6 +366,10 @@ static void parse_elf(void *output)
366 366
367 switch (phdr->p_type) { 367 switch (phdr->p_type) {
368 case PT_LOAD: 368 case PT_LOAD:
369#ifdef CONFIG_X86_64
370 if ((phdr->p_align % 0x200000) != 0)
371 error("Alignment of LOAD segment isn't multiple of 2MB");
372#endif
369#ifdef CONFIG_RELOCATABLE 373#ifdef CONFIG_RELOCATABLE
370 dest = output; 374 dest = output;
371 dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); 375 dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h
index ea97697e51e4..4cb404fd45ce 100644
--- a/arch/x86/boot/cpuflags.h
+++ b/arch/x86/boot/cpuflags.h
@@ -1,7 +1,7 @@
1#ifndef BOOT_CPUFLAGS_H 1#ifndef BOOT_CPUFLAGS_H
2#define BOOT_CPUFLAGS_H 2#define BOOT_CPUFLAGS_H
3 3
4#include <asm/cpufeature.h> 4#include <asm/cpufeatures.h>
5#include <asm/processor-flags.h> 5#include <asm/processor-flags.h>
6 6
7struct cpu_features { 7struct cpu_features {
diff --git a/arch/x86/boot/mkcpustr.c b/arch/x86/boot/mkcpustr.c
index 637097e66a62..f72498dc90d2 100644
--- a/arch/x86/boot/mkcpustr.c
+++ b/arch/x86/boot/mkcpustr.c
@@ -17,7 +17,7 @@
17 17
18#include "../include/asm/required-features.h" 18#include "../include/asm/required-features.h"
19#include "../include/asm/disabled-features.h" 19#include "../include/asm/disabled-features.h"
20#include "../include/asm/cpufeature.h" 20#include "../include/asm/cpufeatures.h"
21#include "../kernel/cpu/capflags.c" 21#include "../kernel/cpu/capflags.c"
22 22
23int main(void) 23int main(void)
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 3633ad6145c5..c18806b5db2a 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -965,7 +965,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
965 965
966 if (sg_is_last(req->src) && 966 if (sg_is_last(req->src) &&
967 req->src->offset + req->src->length <= PAGE_SIZE && 967 req->src->offset + req->src->length <= PAGE_SIZE &&
968 sg_is_last(req->dst) && 968 sg_is_last(req->dst) && req->dst->length &&
969 req->dst->offset + req->dst->length <= PAGE_SIZE) { 969 req->dst->offset + req->dst->length <= PAGE_SIZE) {
970 one_entry_in_sg = 1; 970 one_entry_in_sg = 1;
971 scatterwalk_start(&src_sg_walk, req->src); 971 scatterwalk_start(&src_sg_walk, req->src);
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index 8648158f3916..f8fe11d24cde 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -66,8 +66,6 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
66 void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src); 66 void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
67 int err; 67 int err;
68 68
69 fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
70
71 err = blkcipher_walk_virt(desc, walk); 69 err = blkcipher_walk_virt(desc, walk);
72 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 70 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
73 71
@@ -79,6 +77,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
79 77
80 /* Process multi-block batch */ 78 /* Process multi-block batch */
81 if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { 79 if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
80 fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
82 do { 81 do {
83 fn(ctx, wdst, wsrc); 82 fn(ctx, wdst, wsrc);
84 83
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index 722bacea040e..8baaff5af0b5 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -125,7 +125,7 @@ static struct crypto_alg alg = {
125 125
126static int __init chacha20_simd_mod_init(void) 126static int __init chacha20_simd_mod_init(void)
127{ 127{
128 if (!cpu_has_ssse3) 128 if (!boot_cpu_has(X86_FEATURE_SSSE3))
129 return -ENODEV; 129 return -ENODEV;
130 130
131#ifdef CONFIG_AS_AVX2 131#ifdef CONFIG_AS_AVX2
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index 07d2c6c86a54..27226df3f7d8 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -33,7 +33,7 @@
33#include <linux/crc32.h> 33#include <linux/crc32.h>
34#include <crypto/internal/hash.h> 34#include <crypto/internal/hash.h>
35 35
36#include <asm/cpufeature.h> 36#include <asm/cpufeatures.h>
37#include <asm/cpu_device_id.h> 37#include <asm/cpu_device_id.h>
38#include <asm/fpu/api.h> 38#include <asm/fpu/api.h>
39 39
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 81a595d75cf5..715399b14ed7 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -30,7 +30,7 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <crypto/internal/hash.h> 31#include <crypto/internal/hash.h>
32 32
33#include <asm/cpufeature.h> 33#include <asm/cpufeatures.h>
34#include <asm/cpu_device_id.h> 34#include <asm/cpu_device_id.h>
35#include <asm/fpu/internal.h> 35#include <asm/fpu/internal.h>
36 36
@@ -58,16 +58,11 @@
58asmlinkage unsigned int crc_pcl(const u8 *buffer, int len, 58asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
59 unsigned int crc_init); 59 unsigned int crc_init);
60static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU; 60static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
61#if defined(X86_FEATURE_EAGER_FPU)
62#define set_pcl_breakeven_point() \ 61#define set_pcl_breakeven_point() \
63do { \ 62do { \
64 if (!use_eager_fpu()) \ 63 if (!use_eager_fpu()) \
65 crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \ 64 crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
66} while (0) 65} while (0)
67#else
68#define set_pcl_breakeven_point() \
69 (crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU)
70#endif
71#endif /* CONFIG_X86_64 */ 66#endif /* CONFIG_X86_64 */
72 67
73static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length) 68static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
@@ -257,7 +252,7 @@ static int __init crc32c_intel_mod_init(void)
257 if (!x86_match_cpu(crc32c_cpu_id)) 252 if (!x86_match_cpu(crc32c_cpu_id))
258 return -ENODEV; 253 return -ENODEV;
259#ifdef CONFIG_X86_64 254#ifdef CONFIG_X86_64
260 if (cpu_has_pclmulqdq) { 255 if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
261 alg.update = crc32c_pcl_intel_update; 256 alg.update = crc32c_pcl_intel_update;
262 alg.finup = crc32c_pcl_intel_finup; 257 alg.finup = crc32c_pcl_intel_finup;
263 alg.digest = crc32c_pcl_intel_digest; 258 alg.digest = crc32c_pcl_intel_digest;
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index a3fcfc97a311..cd4df9322501 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -30,7 +30,7 @@
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <asm/fpu/api.h> 32#include <asm/fpu/api.h>
33#include <asm/cpufeature.h> 33#include <asm/cpufeatures.h>
34#include <asm/cpu_device_id.h> 34#include <asm/cpu_device_id.h>
35 35
36asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf, 36asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 4264a3d59589..7c064887b783 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -164,7 +164,6 @@ static struct shash_alg alg = {
164 .init = poly1305_simd_init, 164 .init = poly1305_simd_init,
165 .update = poly1305_simd_update, 165 .update = poly1305_simd_update,
166 .final = crypto_poly1305_final, 166 .final = crypto_poly1305_final,
167 .setkey = crypto_poly1305_setkey,
168 .descsize = sizeof(struct poly1305_simd_desc_ctx), 167 .descsize = sizeof(struct poly1305_simd_desc_ctx),
169 .base = { 168 .base = {
170 .cra_name = "poly1305", 169 .cra_name = "poly1305",
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
index 1c3b7ceb36d2..e7273a606a07 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
@@ -55,29 +55,31 @@
55#define RAB1bl %bl 55#define RAB1bl %bl
56#define RAB2bl %cl 56#define RAB2bl %cl
57 57
58#define CD0 0x0(%rsp)
59#define CD1 0x8(%rsp)
60#define CD2 0x10(%rsp)
61
62# used only before/after all rounds
58#define RCD0 %r8 63#define RCD0 %r8
59#define RCD1 %r9 64#define RCD1 %r9
60#define RCD2 %r10 65#define RCD2 %r10
61 66
62#define RCD0d %r8d 67# used only during rounds
63#define RCD1d %r9d 68#define RX0 %r8
64#define RCD2d %r10d 69#define RX1 %r9
65 70#define RX2 %r10
66#define RX0 %rbp
67#define RX1 %r11
68#define RX2 %r12
69 71
70#define RX0d %ebp 72#define RX0d %r8d
71#define RX1d %r11d 73#define RX1d %r9d
72#define RX2d %r12d 74#define RX2d %r10d
73 75
74#define RY0 %r13 76#define RY0 %r11
75#define RY1 %r14 77#define RY1 %r12
76#define RY2 %r15 78#define RY2 %r13
77 79
78#define RY0d %r13d 80#define RY0d %r11d
79#define RY1d %r14d 81#define RY1d %r12d
80#define RY2d %r15d 82#define RY2d %r13d
81 83
82#define RT0 %rdx 84#define RT0 %rdx
83#define RT1 %rsi 85#define RT1 %rsi
@@ -85,6 +87,8 @@
85#define RT0d %edx 87#define RT0d %edx
86#define RT1d %esi 88#define RT1d %esi
87 89
90#define RT1bl %sil
91
88#define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \ 92#define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \
89 movzbl ab ## bl, tmp2 ## d; \ 93 movzbl ab ## bl, tmp2 ## d; \
90 movzbl ab ## bh, tmp1 ## d; \ 94 movzbl ab ## bh, tmp1 ## d; \
@@ -92,6 +96,11 @@
92 op1##l T0(CTX, tmp2, 4), dst ## d; \ 96 op1##l T0(CTX, tmp2, 4), dst ## d; \
93 op2##l T1(CTX, tmp1, 4), dst ## d; 97 op2##l T1(CTX, tmp1, 4), dst ## d;
94 98
99#define swap_ab_with_cd(ab, cd, tmp) \
100 movq cd, tmp; \
101 movq ab, cd; \
102 movq tmp, ab;
103
95/* 104/*
96 * Combined G1 & G2 function. Reordered with help of rotates to have moves 105 * Combined G1 & G2 function. Reordered with help of rotates to have moves
97 * at begining. 106 * at begining.
@@ -110,15 +119,15 @@
110 /* G1,2 && G2,2 */ \ 119 /* G1,2 && G2,2 */ \
111 do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \ 120 do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \
112 do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \ 121 do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \
113 xchgq cd ## 0, ab ## 0; \ 122 swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \
114 \ 123 \
115 do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \ 124 do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \
116 do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \ 125 do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \
117 xchgq cd ## 1, ab ## 1; \ 126 swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \
118 \ 127 \
119 do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \ 128 do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \
120 do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \ 129 do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \
121 xchgq cd ## 2, ab ## 2; 130 swap_ab_with_cd(ab ## 2, cd ## 2, RT0);
122 131
123#define enc_round_end(ab, x, y, n) \ 132#define enc_round_end(ab, x, y, n) \
124 addl y ## d, x ## d; \ 133 addl y ## d, x ## d; \
@@ -168,6 +177,16 @@
168 decrypt_round3(ba, dc, (n*2)+1); \ 177 decrypt_round3(ba, dc, (n*2)+1); \
169 decrypt_round3(ba, dc, (n*2)); 178 decrypt_round3(ba, dc, (n*2));
170 179
180#define push_cd() \
181 pushq RCD2; \
182 pushq RCD1; \
183 pushq RCD0;
184
185#define pop_cd() \
186 popq RCD0; \
187 popq RCD1; \
188 popq RCD2;
189
171#define inpack3(in, n, xy, m) \ 190#define inpack3(in, n, xy, m) \
172 movq 4*(n)(in), xy ## 0; \ 191 movq 4*(n)(in), xy ## 0; \
173 xorq w+4*m(CTX), xy ## 0; \ 192 xorq w+4*m(CTX), xy ## 0; \
@@ -223,11 +242,8 @@ ENTRY(__twofish_enc_blk_3way)
223 * %rdx: src, RIO 242 * %rdx: src, RIO
224 * %rcx: bool, if true: xor output 243 * %rcx: bool, if true: xor output
225 */ 244 */
226 pushq %r15;
227 pushq %r14;
228 pushq %r13; 245 pushq %r13;
229 pushq %r12; 246 pushq %r12;
230 pushq %rbp;
231 pushq %rbx; 247 pushq %rbx;
232 248
233 pushq %rcx; /* bool xor */ 249 pushq %rcx; /* bool xor */
@@ -235,40 +251,36 @@ ENTRY(__twofish_enc_blk_3way)
235 251
236 inpack_enc3(); 252 inpack_enc3();
237 253
238 encrypt_cycle3(RAB, RCD, 0); 254 push_cd();
239 encrypt_cycle3(RAB, RCD, 1); 255 encrypt_cycle3(RAB, CD, 0);
240 encrypt_cycle3(RAB, RCD, 2); 256 encrypt_cycle3(RAB, CD, 1);
241 encrypt_cycle3(RAB, RCD, 3); 257 encrypt_cycle3(RAB, CD, 2);
242 encrypt_cycle3(RAB, RCD, 4); 258 encrypt_cycle3(RAB, CD, 3);
243 encrypt_cycle3(RAB, RCD, 5); 259 encrypt_cycle3(RAB, CD, 4);
244 encrypt_cycle3(RAB, RCD, 6); 260 encrypt_cycle3(RAB, CD, 5);
245 encrypt_cycle3(RAB, RCD, 7); 261 encrypt_cycle3(RAB, CD, 6);
262 encrypt_cycle3(RAB, CD, 7);
263 pop_cd();
246 264
247 popq RIO; /* dst */ 265 popq RIO; /* dst */
248 popq %rbp; /* bool xor */ 266 popq RT1; /* bool xor */
249 267
250 testb %bpl, %bpl; 268 testb RT1bl, RT1bl;
251 jnz .L__enc_xor3; 269 jnz .L__enc_xor3;
252 270
253 outunpack_enc3(mov); 271 outunpack_enc3(mov);
254 272
255 popq %rbx; 273 popq %rbx;
256 popq %rbp;
257 popq %r12; 274 popq %r12;
258 popq %r13; 275 popq %r13;
259 popq %r14;
260 popq %r15;
261 ret; 276 ret;
262 277
263.L__enc_xor3: 278.L__enc_xor3:
264 outunpack_enc3(xor); 279 outunpack_enc3(xor);
265 280
266 popq %rbx; 281 popq %rbx;
267 popq %rbp;
268 popq %r12; 282 popq %r12;
269 popq %r13; 283 popq %r13;
270 popq %r14;
271 popq %r15;
272 ret; 284 ret;
273ENDPROC(__twofish_enc_blk_3way) 285ENDPROC(__twofish_enc_blk_3way)
274 286
@@ -278,35 +290,31 @@ ENTRY(twofish_dec_blk_3way)
278 * %rsi: dst 290 * %rsi: dst
279 * %rdx: src, RIO 291 * %rdx: src, RIO
280 */ 292 */
281 pushq %r15;
282 pushq %r14;
283 pushq %r13; 293 pushq %r13;
284 pushq %r12; 294 pushq %r12;
285 pushq %rbp;
286 pushq %rbx; 295 pushq %rbx;
287 296
288 pushq %rsi; /* dst */ 297 pushq %rsi; /* dst */
289 298
290 inpack_dec3(); 299 inpack_dec3();
291 300
292 decrypt_cycle3(RAB, RCD, 7); 301 push_cd();
293 decrypt_cycle3(RAB, RCD, 6); 302 decrypt_cycle3(RAB, CD, 7);
294 decrypt_cycle3(RAB, RCD, 5); 303 decrypt_cycle3(RAB, CD, 6);
295 decrypt_cycle3(RAB, RCD, 4); 304 decrypt_cycle3(RAB, CD, 5);
296 decrypt_cycle3(RAB, RCD, 3); 305 decrypt_cycle3(RAB, CD, 4);
297 decrypt_cycle3(RAB, RCD, 2); 306 decrypt_cycle3(RAB, CD, 3);
298 decrypt_cycle3(RAB, RCD, 1); 307 decrypt_cycle3(RAB, CD, 2);
299 decrypt_cycle3(RAB, RCD, 0); 308 decrypt_cycle3(RAB, CD, 1);
309 decrypt_cycle3(RAB, CD, 0);
310 pop_cd();
300 311
301 popq RIO; /* dst */ 312 popq RIO; /* dst */
302 313
303 outunpack_dec3(); 314 outunpack_dec3();
304 315
305 popq %rbx; 316 popq %rbx;
306 popq %rbp;
307 popq %r12; 317 popq %r12;
308 popq %r13; 318 popq %r13;
309 popq %r14;
310 popq %r15;
311 ret; 319 ret;
312ENDPROC(twofish_dec_blk_3way) 320ENDPROC(twofish_dec_blk_3way)
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 1a4477cedc49..071582a3b5c0 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -20,12 +20,14 @@
20#include <linux/export.h> 20#include <linux/export.h>
21#include <linux/context_tracking.h> 21#include <linux/context_tracking.h>
22#include <linux/user-return-notifier.h> 22#include <linux/user-return-notifier.h>
23#include <linux/nospec.h>
23#include <linux/uprobes.h> 24#include <linux/uprobes.h>
24 25
25#include <asm/desc.h> 26#include <asm/desc.h>
26#include <asm/traps.h> 27#include <asm/traps.h>
27#include <asm/vdso.h> 28#include <asm/vdso.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/cpufeature.h>
29 31
30#define CREATE_TRACE_POINTS 32#define CREATE_TRACE_POINTS
31#include <trace/events/syscalls.h> 33#include <trace/events/syscalls.h>
@@ -381,6 +383,7 @@ __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
381 } 383 }
382 384
383 if (likely(nr < IA32_NR_syscalls)) { 385 if (likely(nr < IA32_NR_syscalls)) {
386 nr = array_index_nospec(nr, IA32_NR_syscalls);
384 /* 387 /*
385 * It's possible that a 32-bit syscall implementation 388 * It's possible that a 32-bit syscall implementation
386 * takes a 64-bit parameter but nonetheless assumes that 389 * takes a 64-bit parameter but nonetheless assumes that
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index d437f3871e53..49a8c9f7a379 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -40,7 +40,7 @@
40#include <asm/processor-flags.h> 40#include <asm/processor-flags.h>
41#include <asm/ftrace.h> 41#include <asm/ftrace.h>
42#include <asm/irq_vectors.h> 42#include <asm/irq_vectors.h>
43#include <asm/cpufeature.h> 43#include <asm/cpufeatures.h>
44#include <asm/alternative-asm.h> 44#include <asm/alternative-asm.h>
45#include <asm/asm.h> 45#include <asm/asm.h>
46#include <asm/smap.h> 46#include <asm/smap.h>
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index a03b22c615d9..92b840c94f17 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -178,12 +178,14 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
178 jnz tracesys 178 jnz tracesys
179entry_SYSCALL_64_fastpath: 179entry_SYSCALL_64_fastpath:
180#if __SYSCALL_MASK == ~0 180#if __SYSCALL_MASK == ~0
181 cmpq $__NR_syscall_max, %rax 181 cmpq $NR_syscalls, %rax
182#else 182#else
183 andl $__SYSCALL_MASK, %eax 183 andl $__SYSCALL_MASK, %eax
184 cmpl $__NR_syscall_max, %eax 184 cmpl $NR_syscalls, %eax
185#endif 185#endif
186 ja 1f /* return -ENOSYS (already in pt_regs->ax) */ 186 jae 1f /* return -ENOSYS (already in pt_regs->ax) */
187 sbb %rcx, %rcx /* array_index_mask_nospec() */
188 and %rcx, %rax
187 movq %r10, %rcx 189 movq %r10, %rcx
188#ifdef CONFIG_RETPOLINE 190#ifdef CONFIG_RETPOLINE
189 movq sys_call_table(, %rax, 8), %rax 191 movq sys_call_table(, %rax, 8), %rax
@@ -276,12 +278,14 @@ tracesys_phase2:
276 RESTORE_C_REGS_EXCEPT_RAX 278 RESTORE_C_REGS_EXCEPT_RAX
277 RESTORE_EXTRA_REGS 279 RESTORE_EXTRA_REGS
278#if __SYSCALL_MASK == ~0 280#if __SYSCALL_MASK == ~0
279 cmpq $__NR_syscall_max, %rax 281 cmpq $NR_syscalls, %rax
280#else 282#else
281 andl $__SYSCALL_MASK, %eax 283 andl $__SYSCALL_MASK, %eax
282 cmpl $__NR_syscall_max, %eax 284 cmpl $NR_syscalls, %eax
283#endif 285#endif
284 ja 1f /* return -ENOSYS (already in pt_regs->ax) */ 286 jae 1f /* return -ENOSYS (already in pt_regs->ax) */
287 sbb %rcx, %rcx /* array_index_mask_nospec() */
288 and %rcx, %rax
285 movq %r10, %rcx /* fixup for C */ 289 movq %r10, %rcx /* fixup for C */
286#ifdef CONFIG_RETPOLINE 290#ifdef CONFIG_RETPOLINE
287 movq sys_call_table(, %rax, 8), %rax 291 movq sys_call_table(, %rax, 8), %rax
@@ -1014,7 +1018,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
1014#endif /* CONFIG_HYPERV */ 1018#endif /* CONFIG_HYPERV */
1015 1019
1016idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1020idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
1017idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1021idtentry int3 do_int3 has_error_code=0
1018idtentry stack_segment do_stack_segment has_error_code=1 1022idtentry stack_segment do_stack_segment has_error_code=1
1019 1023
1020#ifdef CONFIG_XEN 1024#ifdef CONFIG_XEN
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index d03bf0e28b8b..48c27c3fdfdb 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -79,24 +79,33 @@ ENTRY(entry_SYSENTER_compat)
79 ASM_CLAC /* Clear AC after saving FLAGS */ 79 ASM_CLAC /* Clear AC after saving FLAGS */
80 80
81 pushq $__USER32_CS /* pt_regs->cs */ 81 pushq $__USER32_CS /* pt_regs->cs */
82 xorq %r8,%r8 82 pushq $0 /* pt_regs->ip = 0 (placeholder) */
83 pushq %r8 /* pt_regs->ip = 0 (placeholder) */
84 pushq %rax /* pt_regs->orig_ax */ 83 pushq %rax /* pt_regs->orig_ax */
85 pushq %rdi /* pt_regs->di */ 84 pushq %rdi /* pt_regs->di */
86 pushq %rsi /* pt_regs->si */ 85 pushq %rsi /* pt_regs->si */
87 pushq %rdx /* pt_regs->dx */ 86 pushq %rdx /* pt_regs->dx */
88 pushq %rcx /* pt_regs->cx */ 87 pushq %rcx /* pt_regs->cx */
89 pushq $-ENOSYS /* pt_regs->ax */ 88 pushq $-ENOSYS /* pt_regs->ax */
90 pushq %r8 /* pt_regs->r8 = 0 */ 89 pushq $0 /* pt_regs->r8 = 0 */
91 pushq %r8 /* pt_regs->r9 = 0 */ 90 xorq %r8, %r8 /* nospec r8 */
92 pushq %r8 /* pt_regs->r10 = 0 */ 91 pushq $0 /* pt_regs->r9 = 0 */
93 pushq %r8 /* pt_regs->r11 = 0 */ 92 xorq %r9, %r9 /* nospec r9 */
93 pushq $0 /* pt_regs->r10 = 0 */
94 xorq %r10, %r10 /* nospec r10 */
95 pushq $0 /* pt_regs->r11 = 0 */
96 xorq %r11, %r11 /* nospec r11 */
94 pushq %rbx /* pt_regs->rbx */ 97 pushq %rbx /* pt_regs->rbx */
98 xorl %ebx, %ebx /* nospec rbx */
95 pushq %rbp /* pt_regs->rbp (will be overwritten) */ 99 pushq %rbp /* pt_regs->rbp (will be overwritten) */
96 pushq %r8 /* pt_regs->r12 = 0 */ 100 xorl %ebp, %ebp /* nospec rbp */
97 pushq %r8 /* pt_regs->r13 = 0 */ 101 pushq $0 /* pt_regs->r12 = 0 */
98 pushq %r8 /* pt_regs->r14 = 0 */ 102 xorq %r12, %r12 /* nospec r12 */
99 pushq %r8 /* pt_regs->r15 = 0 */ 103 pushq $0 /* pt_regs->r13 = 0 */
104 xorq %r13, %r13 /* nospec r13 */
105 pushq $0 /* pt_regs->r14 = 0 */
106 xorq %r14, %r14 /* nospec r14 */
107 pushq $0 /* pt_regs->r15 = 0 */
108 xorq %r15, %r15 /* nospec r15 */
100 cld 109 cld
101 110
102 /* 111 /*
@@ -185,17 +194,26 @@ ENTRY(entry_SYSCALL_compat)
185 pushq %rdx /* pt_regs->dx */ 194 pushq %rdx /* pt_regs->dx */
186 pushq %rbp /* pt_regs->cx (stashed in bp) */ 195 pushq %rbp /* pt_regs->cx (stashed in bp) */
187 pushq $-ENOSYS /* pt_regs->ax */ 196 pushq $-ENOSYS /* pt_regs->ax */
188 xorq %r8,%r8 197 pushq $0 /* pt_regs->r8 = 0 */
189 pushq %r8 /* pt_regs->r8 = 0 */ 198 xorq %r8, %r8 /* nospec r8 */
190 pushq %r8 /* pt_regs->r9 = 0 */ 199 pushq $0 /* pt_regs->r9 = 0 */
191 pushq %r8 /* pt_regs->r10 = 0 */ 200 xorq %r9, %r9 /* nospec r9 */
192 pushq %r8 /* pt_regs->r11 = 0 */ 201 pushq $0 /* pt_regs->r10 = 0 */
202 xorq %r10, %r10 /* nospec r10 */
203 pushq $0 /* pt_regs->r11 = 0 */
204 xorq %r11, %r11 /* nospec r11 */
193 pushq %rbx /* pt_regs->rbx */ 205 pushq %rbx /* pt_regs->rbx */
206 xorl %ebx, %ebx /* nospec rbx */
194 pushq %rbp /* pt_regs->rbp (will be overwritten) */ 207 pushq %rbp /* pt_regs->rbp (will be overwritten) */
195 pushq %r8 /* pt_regs->r12 = 0 */ 208 xorl %ebp, %ebp /* nospec rbp */
196 pushq %r8 /* pt_regs->r13 = 0 */ 209 pushq $0 /* pt_regs->r12 = 0 */
197 pushq %r8 /* pt_regs->r14 = 0 */ 210 xorq %r12, %r12 /* nospec r12 */
198 pushq %r8 /* pt_regs->r15 = 0 */ 211 pushq $0 /* pt_regs->r13 = 0 */
212 xorq %r13, %r13 /* nospec r13 */
213 pushq $0 /* pt_regs->r14 = 0 */
214 xorq %r14, %r14 /* nospec r14 */
215 pushq $0 /* pt_regs->r15 = 0 */
216 xorq %r15, %r15 /* nospec r15 */
199 217
200 /* 218 /*
201 * User mode is traced as though IRQs are on, and SYSENTER 219 * User mode is traced as though IRQs are on, and SYSENTER
@@ -292,17 +310,26 @@ ENTRY(entry_INT80_compat)
292 pushq %rdx /* pt_regs->dx */ 310 pushq %rdx /* pt_regs->dx */
293 pushq %rcx /* pt_regs->cx */ 311 pushq %rcx /* pt_regs->cx */
294 pushq $-ENOSYS /* pt_regs->ax */ 312 pushq $-ENOSYS /* pt_regs->ax */
295 xorq %r8,%r8 313 pushq $0 /* pt_regs->r8 = 0 */
296 pushq %r8 /* pt_regs->r8 = 0 */ 314 xorq %r8, %r8 /* nospec r8 */
297 pushq %r8 /* pt_regs->r9 = 0 */ 315 pushq $0 /* pt_regs->r9 = 0 */
298 pushq %r8 /* pt_regs->r10 = 0 */ 316 xorq %r9, %r9 /* nospec r9 */
299 pushq %r8 /* pt_regs->r11 = 0 */ 317 pushq $0 /* pt_regs->r10 = 0 */
318 xorq %r10, %r10 /* nospec r10 */
319 pushq $0 /* pt_regs->r11 = 0 */
320 xorq %r11, %r11 /* nospec r11 */
300 pushq %rbx /* pt_regs->rbx */ 321 pushq %rbx /* pt_regs->rbx */
322 xorl %ebx, %ebx /* nospec rbx */
301 pushq %rbp /* pt_regs->rbp */ 323 pushq %rbp /* pt_regs->rbp */
324 xorl %ebp, %ebp /* nospec rbp */
302 pushq %r12 /* pt_regs->r12 */ 325 pushq %r12 /* pt_regs->r12 */
326 xorq %r12, %r12 /* nospec r12 */
303 pushq %r13 /* pt_regs->r13 */ 327 pushq %r13 /* pt_regs->r13 */
328 xorq %r13, %r13 /* nospec r13 */
304 pushq %r14 /* pt_regs->r14 */ 329 pushq %r14 /* pt_regs->r14 */
330 xorq %r14, %r14 /* nospec r14 */
305 pushq %r15 /* pt_regs->r15 */ 331 pushq %r15 /* pt_regs->r15 */
332 xorq %r15, %r15 /* nospec r15 */
306 cld 333 cld
307 334
308 /* 335 /*
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index a7508d7e20b7..3f9d1a83891a 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -11,7 +11,6 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/mm_types.h> 12#include <linux/mm_types.h>
13 13
14#include <asm/cpufeature.h>
15#include <asm/processor.h> 14#include <asm/processor.h>
16#include <asm/vdso.h> 15#include <asm/vdso.h>
17 16
diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S
index 3a1d9297074b..0109ac6cb79c 100644
--- a/arch/x86/entry/vdso/vdso32/system_call.S
+++ b/arch/x86/entry/vdso/vdso32/system_call.S
@@ -3,7 +3,7 @@
3*/ 3*/
4 4
5#include <asm/dwarf2.h> 5#include <asm/dwarf2.h>
6#include <asm/cpufeature.h> 6#include <asm/cpufeatures.h>
7#include <asm/alternative-asm.h> 7#include <asm/alternative-asm.h>
8 8
9/* 9/*
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index b8f69e264ac4..6b46648588d8 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -20,6 +20,7 @@
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/hpet.h> 21#include <asm/hpet.h>
22#include <asm/desc.h> 22#include <asm/desc.h>
23#include <asm/cpufeature.h>
23 24
24#if defined(CONFIG_X86_64) 25#if defined(CONFIG_X86_64)
25unsigned int __read_mostly vdso64_enabled = 1; 26unsigned int __read_mostly vdso64_enabled = 1;
@@ -254,7 +255,7 @@ static void vgetcpu_cpu_init(void *arg)
254#ifdef CONFIG_NUMA 255#ifdef CONFIG_NUMA
255 node = cpu_to_node(cpu); 256 node = cpu_to_node(cpu);
256#endif 257#endif
257 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) 258 if (static_cpu_has(X86_FEATURE_RDTSCP))
258 write_rdtscp_aux((node << 12) | cpu); 259 write_rdtscp_aux((node << 12) | cpu);
259 260
260 /* 261 /*
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 112178b401a1..2d359991a273 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -46,6 +46,7 @@ static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
46#else 46#else
47 EMULATE; 47 EMULATE;
48#endif 48#endif
49unsigned long vsyscall_pgprot = __PAGE_KERNEL_VSYSCALL;
49 50
50static int __init vsyscall_setup(char *str) 51static int __init vsyscall_setup(char *str)
51{ 52{
@@ -336,11 +337,11 @@ void __init map_vsyscall(void)
336 extern char __vsyscall_page; 337 extern char __vsyscall_page;
337 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); 338 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
338 339
340 if (vsyscall_mode != NATIVE)
341 vsyscall_pgprot = __PAGE_KERNEL_VVAR;
339 if (vsyscall_mode != NONE) 342 if (vsyscall_mode != NONE)
340 __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, 343 __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
341 vsyscall_mode == NATIVE 344 __pgprot(vsyscall_pgprot));
342 ? PAGE_KERNEL_VSYSCALL
343 : PAGE_KERNEL_VVAR);
344 345
345 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != 346 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
346 (unsigned long)VSYSCALL_ADDR); 347 (unsigned long)VSYSCALL_ADDR);
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 215ea9214215..002fcd901f07 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -154,12 +154,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
154 ".popsection\n" 154 ".popsection\n"
155 155
156/* 156/*
157 * This must be included *after* the definition of ALTERNATIVE due to
158 * <asm/arch_hweight.h>
159 */
160#include <asm/cpufeature.h>
161
162/*
163 * Alternative instructions for different CPU types or capabilities. 157 * Alternative instructions for different CPU types or capabilities.
164 * 158 *
165 * This allows to use optimized instructions even on generic binary 159 * This allows to use optimized instructions even on generic binary
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 163769d82475..fd810a57ab1b 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -6,7 +6,6 @@
6 6
7#include <asm/alternative.h> 7#include <asm/alternative.h>
8#include <asm/cpufeature.h> 8#include <asm/cpufeature.h>
9#include <asm/processor.h>
10#include <asm/apicdef.h> 9#include <asm/apicdef.h>
11#include <linux/atomic.h> 10#include <linux/atomic.h>
12#include <asm/fixmap.h> 11#include <asm/fixmap.h>
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
index 20370c6db74b..3d1ec41ae09a 100644
--- a/arch/x86/include/asm/apm.h
+++ b/arch/x86/include/asm/apm.h
@@ -6,6 +6,8 @@
6#ifndef _ASM_X86_MACH_DEFAULT_APM_H 6#ifndef _ASM_X86_MACH_DEFAULT_APM_H
7#define _ASM_X86_MACH_DEFAULT_APM_H 7#define _ASM_X86_MACH_DEFAULT_APM_H
8 8
9#include <asm/nospec-branch.h>
10
9#ifdef APM_ZERO_SEGS 11#ifdef APM_ZERO_SEGS
10# define APM_DO_ZERO_SEGS \ 12# define APM_DO_ZERO_SEGS \
11 "pushl %%ds\n\t" \ 13 "pushl %%ds\n\t" \
@@ -31,6 +33,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
31 * N.B. We do NOT need a cld after the BIOS call 33 * N.B. We do NOT need a cld after the BIOS call
32 * because we always save and restore the flags. 34 * because we always save and restore the flags.
33 */ 35 */
36 firmware_restrict_branch_speculation_start();
34 __asm__ __volatile__(APM_DO_ZERO_SEGS 37 __asm__ __volatile__(APM_DO_ZERO_SEGS
35 "pushl %%edi\n\t" 38 "pushl %%edi\n\t"
36 "pushl %%ebp\n\t" 39 "pushl %%ebp\n\t"
@@ -43,6 +46,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
43 "=S" (*esi) 46 "=S" (*esi)
44 : "a" (func), "b" (ebx_in), "c" (ecx_in) 47 : "a" (func), "b" (ebx_in), "c" (ecx_in)
45 : "memory", "cc"); 48 : "memory", "cc");
49 firmware_restrict_branch_speculation_end();
46} 50}
47 51
48static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, 52static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@@ -55,6 +59,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
55 * N.B. We do NOT need a cld after the BIOS call 59 * N.B. We do NOT need a cld after the BIOS call
56 * because we always save and restore the flags. 60 * because we always save and restore the flags.
57 */ 61 */
62 firmware_restrict_branch_speculation_start();
58 __asm__ __volatile__(APM_DO_ZERO_SEGS 63 __asm__ __volatile__(APM_DO_ZERO_SEGS
59 "pushl %%edi\n\t" 64 "pushl %%edi\n\t"
60 "pushl %%ebp\n\t" 65 "pushl %%ebp\n\t"
@@ -67,6 +72,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
67 "=S" (si) 72 "=S" (si)
68 : "a" (func), "b" (ebx_in), "c" (ecx_in) 73 : "a" (func), "b" (ebx_in), "c" (ecx_in)
69 : "memory", "cc"); 74 : "memory", "cc");
75 firmware_restrict_branch_speculation_end();
70 return error; 76 return error;
71} 77}
72 78
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index 259a7c1ef709..e7cd63175de4 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -1,9 +1,11 @@
1#ifndef _ASM_X86_HWEIGHT_H 1#ifndef _ASM_X86_HWEIGHT_H
2#define _ASM_X86_HWEIGHT_H 2#define _ASM_X86_HWEIGHT_H
3 3
4#include <asm/cpufeatures.h>
5
4#ifdef CONFIG_64BIT 6#ifdef CONFIG_64BIT
5/* popcnt %edi, %eax -- redundant REX prefix for alignment */ 7/* popcnt %edi, %eax */
6#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7" 8#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7"
7/* popcnt %rdi, %rax */ 9/* popcnt %rdi, %rax */
8#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7" 10#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
9#define REG_IN "D" 11#define REG_IN "D"
@@ -15,19 +17,15 @@
15#define REG_OUT "a" 17#define REG_OUT "a"
16#endif 18#endif
17 19
18/* 20#define __HAVE_ARCH_SW_HWEIGHT
19 * __sw_hweightXX are called from within the alternatives below 21
20 * and callee-clobbered registers need to be taken care of. See
21 * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
22 * compiler switches.
23 */
24static __always_inline unsigned int __arch_hweight32(unsigned int w) 22static __always_inline unsigned int __arch_hweight32(unsigned int w)
25{ 23{
26 unsigned int res = 0; 24 unsigned int res;
27 25
28 asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT) 26 asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
29 : "="REG_OUT (res) 27 : "="REG_OUT (res)
30 : REG_IN (w)); 28 : REG_IN (w));
31 29
32 return res; 30 return res;
33} 31}
@@ -51,11 +49,11 @@ static inline unsigned long __arch_hweight64(__u64 w)
51#else 49#else
52static __always_inline unsigned long __arch_hweight64(__u64 w) 50static __always_inline unsigned long __arch_hweight64(__u64 w)
53{ 51{
54 unsigned long res = 0; 52 unsigned long res;
55 53
56 asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT) 54 asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
57 : "="REG_OUT (res) 55 : "="REG_OUT (res)
58 : REG_IN (w)); 56 : REG_IN (w));
59 57
60 return res; 58 return res;
61} 59}
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index b15aa4083dfd..5a25ada75aeb 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -37,5 +37,4 @@ INDIRECT_THUNK(dx)
37INDIRECT_THUNK(si) 37INDIRECT_THUNK(si)
38INDIRECT_THUNK(di) 38INDIRECT_THUNK(di)
39INDIRECT_THUNK(bp) 39INDIRECT_THUNK(bp)
40INDIRECT_THUNK(sp)
41#endif /* CONFIG_RETPOLINE */ 40#endif /* CONFIG_RETPOLINE */
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index b9c6c7a6f5a6..21e84a31d211 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -11,10 +11,12 @@
11# define __ASM_FORM_COMMA(x) " " #x "," 11# define __ASM_FORM_COMMA(x) " " #x ","
12#endif 12#endif
13 13
14#ifdef CONFIG_X86_32 14#ifndef __x86_64__
15/* 32 bit */
15# define __ASM_SEL(a,b) __ASM_FORM(a) 16# define __ASM_SEL(a,b) __ASM_FORM(a)
16# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a) 17# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
17#else 18#else
19/* 64 bit */
18# define __ASM_SEL(a,b) __ASM_FORM(b) 20# define __ASM_SEL(a,b) __ASM_FORM(b)
19# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b) 21# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
20#endif 22#endif
@@ -42,6 +44,65 @@
42#define _ASM_SI __ASM_REG(si) 44#define _ASM_SI __ASM_REG(si)
43#define _ASM_DI __ASM_REG(di) 45#define _ASM_DI __ASM_REG(di)
44 46
47#ifndef __x86_64__
48/* 32 bit */
49
50#define _ASM_ARG1 _ASM_AX
51#define _ASM_ARG2 _ASM_DX
52#define _ASM_ARG3 _ASM_CX
53
54#define _ASM_ARG1L eax
55#define _ASM_ARG2L edx
56#define _ASM_ARG3L ecx
57
58#define _ASM_ARG1W ax
59#define _ASM_ARG2W dx
60#define _ASM_ARG3W cx
61
62#define _ASM_ARG1B al
63#define _ASM_ARG2B dl
64#define _ASM_ARG3B cl
65
66#else
67/* 64 bit */
68
69#define _ASM_ARG1 _ASM_DI
70#define _ASM_ARG2 _ASM_SI
71#define _ASM_ARG3 _ASM_DX
72#define _ASM_ARG4 _ASM_CX
73#define _ASM_ARG5 r8
74#define _ASM_ARG6 r9
75
76#define _ASM_ARG1Q rdi
77#define _ASM_ARG2Q rsi
78#define _ASM_ARG3Q rdx
79#define _ASM_ARG4Q rcx
80#define _ASM_ARG5Q r8
81#define _ASM_ARG6Q r9
82
83#define _ASM_ARG1L edi
84#define _ASM_ARG2L esi
85#define _ASM_ARG3L edx
86#define _ASM_ARG4L ecx
87#define _ASM_ARG5L r8d
88#define _ASM_ARG6L r9d
89
90#define _ASM_ARG1W di
91#define _ASM_ARG2W si
92#define _ASM_ARG3W dx
93#define _ASM_ARG4W cx
94#define _ASM_ARG5W r8w
95#define _ASM_ARG6W r9w
96
97#define _ASM_ARG1B dil
98#define _ASM_ARG2B sil
99#define _ASM_ARG3B dl
100#define _ASM_ARG4B cl
101#define _ASM_ARG5B r8b
102#define _ASM_ARG6B r9b
103
104#endif
105
45/* Exception table entry */ 106/* Exception table entry */
46#ifdef __ASSEMBLY__ 107#ifdef __ASSEMBLY__
47# define _ASM_EXTABLE(from,to) \ 108# define _ASM_EXTABLE(from,to) \
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index ae5fb83e6d91..3e8674288198 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <asm/processor.h>
7#include <asm/alternative.h> 6#include <asm/alternative.h>
8#include <asm/cmpxchg.h> 7#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h> 8#include <asm/rmwcc.h>
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index a11c30b77fb5..a984111135b1 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <asm/processor.h>
7//#include <asm/cmpxchg.h> 6//#include <asm/cmpxchg.h>
8 7
9/* An 64bit atomic type */ 8/* An 64bit atomic type */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 0681d2532527..7f5dcb64cedb 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -24,6 +24,34 @@
24#define wmb() asm volatile("sfence" ::: "memory") 24#define wmb() asm volatile("sfence" ::: "memory")
25#endif 25#endif
26 26
27/**
28 * array_index_mask_nospec() - generate a mask that is ~0UL when the
29 * bounds check succeeds and 0 otherwise
30 * @index: array element index
31 * @size: number of elements in array
32 *
33 * Returns:
34 * 0 - (index < size)
35 */
36static inline unsigned long array_index_mask_nospec(unsigned long index,
37 unsigned long size)
38{
39 unsigned long mask;
40
41 asm volatile ("cmp %1,%2; sbb %0,%0;"
42 :"=r" (mask)
43 :"g"(size),"r" (index)
44 :"cc");
45 return mask;
46}
47
48/* Override the default implementation from linux/nospec.h. */
49#define array_index_mask_nospec array_index_mask_nospec
50
51/* Prevent speculative execution past this barrier. */
52#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
53 "lfence", X86_FEATURE_LFENCE_RDTSC)
54
27#ifdef CONFIG_X86_PPRO_FENCE 55#ifdef CONFIG_X86_PPRO_FENCE
28#define dma_rmb() rmb() 56#define dma_rmb() rmb()
29#else 57#else
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index ad19841eddfe..9733361fed6f 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -2,6 +2,7 @@
2#define ASM_X86_CMPXCHG_H 2#define ASM_X86_CMPXCHG_H
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/cpufeatures.h>
5#include <asm/alternative.h> /* Provides LOCK_PREFIX */ 6#include <asm/alternative.h> /* Provides LOCK_PREFIX */
6 7
7/* 8/*
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index f7e142926481..e4959d023af8 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
109 109
110#endif 110#endif
111 111
112#define system_has_cmpxchg_double() cpu_has_cx8 112#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
113 113
114#endif /* _ASM_X86_CMPXCHG_32_H */ 114#endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 1af94697aae5..caa23a34c963 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
18 cmpxchg_local((ptr), (o), (n)); \ 18 cmpxchg_local((ptr), (o), (n)); \
19}) 19})
20 20
21#define system_has_cmpxchg_double() cpu_has_cx16 21#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
22 22
23#endif /* _ASM_X86_CMPXCHG_64_H */ 23#endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0fbc98568018..d72c1db64679 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -1,293 +1,36 @@
1/*
2 * Defines x86 CPU feature bits
3 */
4#ifndef _ASM_X86_CPUFEATURE_H 1#ifndef _ASM_X86_CPUFEATURE_H
5#define _ASM_X86_CPUFEATURE_H 2#define _ASM_X86_CPUFEATURE_H
6 3
7#ifndef _ASM_X86_REQUIRED_FEATURES_H 4#include <asm/processor.h>
8#include <asm/required-features.h>
9#endif
10
11#ifndef _ASM_X86_DISABLED_FEATURES_H
12#include <asm/disabled-features.h>
13#endif
14
15#define NCAPINTS 14 /* N 32-bit words worth of info */
16#define NBUGINTS 1 /* N 32-bit bug flags */
17
18/*
19 * Note: If the comment begins with a quoted string, that string is used
20 * in /proc/cpuinfo instead of the macro name. If the string is "",
21 * this feature bit is not displayed in /proc/cpuinfo at all.
22 */
23
24/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
25#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
26#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
27#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
28#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
29#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
30#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
31#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
32#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
33#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
34#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
35#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
36#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
37#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
38#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
39#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
40 /* (plus FCMOVcc, FCOMI with FPU) */
41#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
42#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
43#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
44#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
45#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
46#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
47#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
48#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
49#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
50#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
51#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
52#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
53#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
54#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
55#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
56
57/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
58/* Don't duplicate feature flags which are redundant with Intel! */
59#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
60#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
61#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
62#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
63#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
64#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
65#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
66#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
67#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
68#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
69
70/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
71#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
72#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
73#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
74
75/* Other features, Linux-defined mapping, word 3 */
76/* This range is used for feature bits which conflict or are synthesized */
77#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
78#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
79#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
80#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
81/* cpu types for specific tunings: */
82#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
83#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
84#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
85#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
86#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
87#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
88/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
89#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
90#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
91#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
92#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
93#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
94#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
95#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
96#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
97/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
98#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
99#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
100#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
101#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
102#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
103/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
104#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
105#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
106#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
107#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
108#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
109
110/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
111#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
112#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
113#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
114#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
115#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
116#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
117#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
118#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
119#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
120#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
121#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
122#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
123#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
124#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
125#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
126#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
127#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
128#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
129#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
130#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
131#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
132#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
133#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
134#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
135#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
136#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
137#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
138#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
139#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
140#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
141#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
142
143/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
144#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
145#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
146#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
147#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
148#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
149#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
150#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
151#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
152#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
153#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
154
155/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
156#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
157#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
158#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
159#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
160#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
161#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
162#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
163#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
164#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
165#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
166#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
167#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
168#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
169#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
170#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
171#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
172#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
173#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
174#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
175#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
176#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
177#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
178#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
179#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
180#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
181
182/*
183 * Auxiliary flags: Linux defined - For features scattered in various
184 * CPUID levels like 0x6, 0xA etc, word 7
185 */
186#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */
187#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
188#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
189#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
190#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
191#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */
192#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
193#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
194#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
195#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
196#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
197#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
198#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
199#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
200#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
201#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
202
203#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
204#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
205/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
206#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
207
208/* Virtualization flags: Linux defined, word 8 */
209#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
210#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
211#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
212#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
213#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
214#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */
215#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
216#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
217#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
218#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
219#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
220#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
221#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
222#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
223#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
224#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
225#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
226
227
228/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
229#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
230#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
231#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
232#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
233#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
234#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
235#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
236#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
237#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
238#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
239#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
240#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
241#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
242#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
243#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
244#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
245#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
246#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
247#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
248#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
249#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
250#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
251#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
252
253/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
254#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
255#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
256#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
257#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
258
259/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
260#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
261
262/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
263#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
264
265/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
266#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
267
268/*
269 * BUG word(s)
270 */
271#define X86_BUG(x) (NCAPINTS*32 + (x))
272
273#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
274#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
275#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
276#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
277#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
278#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
279#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
280#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
281#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
282#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
283#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
284#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
285 5
286#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 6#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
287 7
288#include <asm/asm.h> 8#include <asm/asm.h>
289#include <linux/bitops.h> 9#include <linux/bitops.h>
290 10
11enum cpuid_leafs
12{
13 CPUID_1_EDX = 0,
14 CPUID_8000_0001_EDX,
15 CPUID_8086_0001_EDX,
16 CPUID_LNX_1,
17 CPUID_1_ECX,
18 CPUID_C000_0001_EDX,
19 CPUID_8000_0001_ECX,
20 CPUID_LNX_2,
21 CPUID_LNX_3,
22 CPUID_7_0_EBX,
23 CPUID_D_1_EAX,
24 CPUID_F_0_EDX,
25 CPUID_F_1_EDX,
26 CPUID_8000_0008_EBX,
27 CPUID_6_EAX,
28 CPUID_8000_000A_EDX,
29 CPUID_7_ECX,
30 CPUID_8000_0007_EBX,
31 CPUID_7_EDX,
32};
33
291#ifdef CONFIG_X86_FEATURE_NAMES 34#ifdef CONFIG_X86_FEATURE_NAMES
292extern const char * const x86_cap_flags[NCAPINTS*32]; 35extern const char * const x86_cap_flags[NCAPINTS*32];
293extern const char * const x86_power_flags[32]; 36extern const char * const x86_power_flags[32];
@@ -307,29 +50,61 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
307#define test_cpu_cap(c, bit) \ 50#define test_cpu_cap(c, bit) \
308 test_bit(bit, (unsigned long *)((c)->x86_capability)) 51 test_bit(bit, (unsigned long *)((c)->x86_capability))
309 52
310#define REQUIRED_MASK_BIT_SET(bit) \ 53/*
311 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ 54 * There are 32 bits/features in each mask word. The high bits
312 (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ 55 * (selected with (bit>>5) give us the word number and the low 5
313 (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ 56 * bits give us the bit/feature number inside the word.
314 (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ 57 * (1UL<<((bit)&31) gives us a mask for the feature_bit so we can
315 (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ 58 * see if it is set in the mask word.
316 (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ 59 */
317 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ 60#define CHECK_BIT_IN_MASK_WORD(maskname, word, bit) \
318 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \ 61 (((bit)>>5)==(word) && (1UL<<((bit)&31) & maskname##word ))
319 (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \ 62
320 (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) 63#define REQUIRED_MASK_BIT_SET(feature_bit) \
321 64 ( CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 0, feature_bit) || \
322#define DISABLED_MASK_BIT_SET(bit) \ 65 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 1, feature_bit) || \
323 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \ 66 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 2, feature_bit) || \
324 (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \ 67 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 3, feature_bit) || \
325 (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \ 68 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 4, feature_bit) || \
326 (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \ 69 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 5, feature_bit) || \
327 (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \ 70 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 6, feature_bit) || \
328 (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \ 71 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 7, feature_bit) || \
329 (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \ 72 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 8, feature_bit) || \
330 (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \ 73 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 9, feature_bit) || \
331 (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \ 74 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 10, feature_bit) || \
332 (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) ) 75 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 11, feature_bit) || \
76 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 12, feature_bit) || \
77 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 13, feature_bit) || \
78 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 14, feature_bit) || \
79 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \
80 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
81 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
82 CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
83 REQUIRED_MASK_CHECK || \
84 BUILD_BUG_ON_ZERO(NCAPINTS != 19))
85
86#define DISABLED_MASK_BIT_SET(feature_bit) \
87 ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
88 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 1, feature_bit) || \
89 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 2, feature_bit) || \
90 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 3, feature_bit) || \
91 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 4, feature_bit) || \
92 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 5, feature_bit) || \
93 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 6, feature_bit) || \
94 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 7, feature_bit) || \
95 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 8, feature_bit) || \
96 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 9, feature_bit) || \
97 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 10, feature_bit) || \
98 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 11, feature_bit) || \
99 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 12, feature_bit) || \
100 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 13, feature_bit) || \
101 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 14, feature_bit) || \
102 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \
103 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
104 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
105 CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
106 DISABLED_MASK_CHECK || \
107 BUILD_BUG_ON_ZERO(NCAPINTS != 19))
333 108
334#define cpu_has(c, bit) \ 109#define cpu_has(c, bit) \
335 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ 110 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
@@ -348,8 +123,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
348 * is not relevant. 123 * is not relevant.
349 */ 124 */
350#define cpu_feature_enabled(bit) \ 125#define cpu_feature_enabled(bit) \
351 (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \ 126 (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : static_cpu_has(bit))
352 cpu_has(&boot_cpu_data, bit))
353 127
354#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) 128#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
355 129
@@ -367,155 +141,39 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
367#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) 141#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
368 142
369#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) 143#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
370#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
371#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) 144#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
372#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) 145#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
373#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) 146#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
374#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) 147#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
375#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
376#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
377#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
378#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) 148#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
379#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) 149#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
380#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) 150#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
381#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
382#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
383#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) 151#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
384#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) 152#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
385#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) 153#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
386#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
387#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
388#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
389#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
390#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
391#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
392#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
393#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
394#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
395#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
396#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
397#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
398#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
399#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
400#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH) 154#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
401#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
402#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) 155#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
403#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) 156#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
404#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) 157#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
405#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
406#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
407#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 158#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
408#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 159#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
409#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
410#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) 160#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
411#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) 161#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
412#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 162#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
413#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 163/*
414#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 164 * Do not add any more of those clumsy macros - use static_cpu_has() for
415#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) 165 * fast paths and boot_cpu_has() otherwise!
416#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2) 166 */
417#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
418#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
419#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
420#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
421#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
422
423#if __GNUC__ >= 4
424extern void warn_pre_alternatives(void);
425extern bool __static_cpu_has_safe(u16 bit);
426 167
168#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
427/* 169/*
428 * Static testing of CPU features. Used the same as boot_cpu_has(). 170 * Static testing of CPU features. Used the same as boot_cpu_has().
429 * These are only valid after alternatives have run, but will statically 171 * These will statically patch the target code for additional
430 * patch the target code for additional performance. 172 * performance.
431 */ 173 */
432static __always_inline __pure bool __static_cpu_has(u16 bit) 174static __always_inline __pure bool _static_cpu_has(u16 bit)
433{ 175{
434#ifdef CC_HAVE_ASM_GOTO 176 asm_volatile_goto("1: jmp 6f\n"
435
436#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
437
438 /*
439 * Catch too early usage of this before alternatives
440 * have run.
441 */
442 asm_volatile_goto("1: jmp %l[t_warn]\n"
443 "2:\n"
444 ".section .altinstructions,\"a\"\n"
445 " .long 1b - .\n"
446 " .long 0\n" /* no replacement */
447 " .word %P0\n" /* 1: do replace */
448 " .byte 2b - 1b\n" /* source len */
449 " .byte 0\n" /* replacement len */
450 " .byte 0\n" /* pad len */
451 ".previous\n"
452 /* skipping size check since replacement size = 0 */
453 : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
454
455#endif
456
457 asm_volatile_goto("1: jmp %l[t_no]\n"
458 "2:\n"
459 ".section .altinstructions,\"a\"\n"
460 " .long 1b - .\n"
461 " .long 0\n" /* no replacement */
462 " .word %P0\n" /* feature bit */
463 " .byte 2b - 1b\n" /* source len */
464 " .byte 0\n" /* replacement len */
465 " .byte 0\n" /* pad len */
466 ".previous\n"
467 /* skipping size check since replacement size = 0 */
468 : : "i" (bit) : : t_no);
469 return true;
470 t_no:
471 return false;
472
473#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
474 t_warn:
475 warn_pre_alternatives();
476 return false;
477#endif
478
479#else /* CC_HAVE_ASM_GOTO */
480
481 u8 flag;
482 /* Open-coded due to __stringify() in ALTERNATIVE() */
483 asm volatile("1: movb $0,%0\n"
484 "2:\n"
485 ".section .altinstructions,\"a\"\n"
486 " .long 1b - .\n"
487 " .long 3f - .\n"
488 " .word %P1\n" /* feature bit */
489 " .byte 2b - 1b\n" /* source len */
490 " .byte 4f - 3f\n" /* replacement len */
491 " .byte 0\n" /* pad len */
492 ".previous\n"
493 ".section .discard,\"aw\",@progbits\n"
494 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
495 ".previous\n"
496 ".section .altinstr_replacement,\"ax\"\n"
497 "3: movb $1,%0\n"
498 "4:\n"
499 ".previous\n"
500 : "=qm" (flag) : "i" (bit));
501 return flag;
502
503#endif /* CC_HAVE_ASM_GOTO */
504}
505
506#define static_cpu_has(bit) \
507( \
508 __builtin_constant_p(boot_cpu_has(bit)) ? \
509 boot_cpu_has(bit) : \
510 __builtin_constant_p(bit) ? \
511 __static_cpu_has(bit) : \
512 boot_cpu_has(bit) \
513)
514
515static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
516{
517#ifdef CC_HAVE_ASM_GOTO
518 asm_volatile_goto("1: jmp %l[t_dynamic]\n"
519 "2:\n" 177 "2:\n"
520 ".skip -(((5f-4f) - (2b-1b)) > 0) * " 178 ".skip -(((5f-4f) - (2b-1b)) > 0) * "
521 "((5f-4f) - (2b-1b)),0x90\n" 179 "((5f-4f) - (2b-1b)),0x90\n"
@@ -540,66 +198,34 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
540 " .byte 0\n" /* repl len */ 198 " .byte 0\n" /* repl len */
541 " .byte 0\n" /* pad len */ 199 " .byte 0\n" /* pad len */
542 ".previous\n" 200 ".previous\n"
543 : : "i" (bit), "i" (X86_FEATURE_ALWAYS) 201 ".section .altinstr_aux,\"ax\"\n"
544 : : t_dynamic, t_no); 202 "6:\n"
203 " testb %[bitnum],%[cap_byte]\n"
204 " jnz %l[t_yes]\n"
205 " jmp %l[t_no]\n"
206 ".previous\n"
207 : : "i" (bit), "i" (X86_FEATURE_ALWAYS),
208 [bitnum] "i" (1 << (bit & 7)),
209 [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
210 : : t_yes, t_no);
211 t_yes:
545 return true; 212 return true;
546 t_no: 213 t_no:
547 return false; 214 return false;
548 t_dynamic:
549 return __static_cpu_has_safe(bit);
550#else
551 u8 flag;
552 /* Open-coded due to __stringify() in ALTERNATIVE() */
553 asm volatile("1: movb $2,%0\n"
554 "2:\n"
555 ".section .altinstructions,\"a\"\n"
556 " .long 1b - .\n" /* src offset */
557 " .long 3f - .\n" /* repl offset */
558 " .word %P2\n" /* always replace */
559 " .byte 2b - 1b\n" /* source len */
560 " .byte 4f - 3f\n" /* replacement len */
561 " .byte 0\n" /* pad len */
562 ".previous\n"
563 ".section .discard,\"aw\",@progbits\n"
564 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
565 ".previous\n"
566 ".section .altinstr_replacement,\"ax\"\n"
567 "3: movb $0,%0\n"
568 "4:\n"
569 ".previous\n"
570 ".section .altinstructions,\"a\"\n"
571 " .long 1b - .\n" /* src offset */
572 " .long 5f - .\n" /* repl offset */
573 " .word %P1\n" /* feature bit */
574 " .byte 4b - 3b\n" /* src len */
575 " .byte 6f - 5f\n" /* repl len */
576 " .byte 0\n" /* pad len */
577 ".previous\n"
578 ".section .discard,\"aw\",@progbits\n"
579 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
580 ".previous\n"
581 ".section .altinstr_replacement,\"ax\"\n"
582 "5: movb $1,%0\n"
583 "6:\n"
584 ".previous\n"
585 : "=qm" (flag)
586 : "i" (bit), "i" (X86_FEATURE_ALWAYS));
587 return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
588#endif /* CC_HAVE_ASM_GOTO */
589} 215}
590 216
591#define static_cpu_has_safe(bit) \ 217#define static_cpu_has(bit) \
592( \ 218( \
593 __builtin_constant_p(boot_cpu_has(bit)) ? \ 219 __builtin_constant_p(boot_cpu_has(bit)) ? \
594 boot_cpu_has(bit) : \ 220 boot_cpu_has(bit) : \
595 _static_cpu_has_safe(bit) \ 221 _static_cpu_has(bit) \
596) 222)
597#else 223#else
598/* 224/*
599 * gcc 3.x is too stupid to do the static test; fall back to dynamic. 225 * Fall back to dynamic for gcc versions which don't support asm goto. Should be
226 * a minority now anyway.
600 */ 227 */
601#define static_cpu_has(bit) boot_cpu_has(bit) 228#define static_cpu_has(bit) boot_cpu_has(bit)
602#define static_cpu_has_safe(bit) boot_cpu_has(bit)
603#endif 229#endif
604 230
605#define cpu_has_bug(c, bit) cpu_has(c, (bit)) 231#define cpu_has_bug(c, bit) cpu_has(c, (bit))
@@ -607,7 +233,6 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
607#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit)) 233#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit))
608 234
609#define static_cpu_has_bug(bit) static_cpu_has((bit)) 235#define static_cpu_has_bug(bit) static_cpu_has((bit))
610#define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit))
611#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit)) 236#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
612 237
613#define MAX_CPU_FEATURES (NCAPINTS * 32) 238#define MAX_CPU_FEATURES (NCAPINTS * 32)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
new file mode 100644
index 000000000000..dd2269dcbc47
--- /dev/null
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -0,0 +1,337 @@
1#ifndef _ASM_X86_CPUFEATURES_H
2#define _ASM_X86_CPUFEATURES_H
3
4#ifndef _ASM_X86_REQUIRED_FEATURES_H
5#include <asm/required-features.h>
6#endif
7
8#ifndef _ASM_X86_DISABLED_FEATURES_H
9#include <asm/disabled-features.h>
10#endif
11
12/*
13 * Defines x86 CPU feature bits
14 */
15#define NCAPINTS 19 /* N 32-bit words worth of info */
16#define NBUGINTS 1 /* N 32-bit bug flags */
17
18/*
19 * Note: If the comment begins with a quoted string, that string is used
20 * in /proc/cpuinfo instead of the macro name. If the string is "",
21 * this feature bit is not displayed in /proc/cpuinfo at all.
22 */
23
24/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
25#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
26#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
27#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
28#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
29#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
30#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
31#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
32#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
33#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
34#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
35#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
36#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
37#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
38#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
39#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
40 /* (plus FCMOVcc, FCOMI with FPU) */
41#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
42#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
43#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
44#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
45#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
46#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
47#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
48#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
49#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
50#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
51#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
52#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
53#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
54#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
55#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
56
57/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
58/* Don't duplicate feature flags which are redundant with Intel! */
59#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
60#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
61#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
62#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
63#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
64#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
65#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
66#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
67#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
68#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
69
70/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
71#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
72#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
73#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
74
75/* Other features, Linux-defined mapping, word 3 */
76/* This range is used for feature bits which conflict or are synthesized */
77#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
78#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
79#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
80#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
81/* cpu types for specific tunings: */
82#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
83#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
84#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
85#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
86#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
87#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
88/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
89#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
90#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
91#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
92#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
93#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
94#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
95#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
96#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
97/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
98#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
99#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
100#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
101#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
102#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
103/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
104#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
105#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
106#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
107/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
108#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
109
110/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
111#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
112#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
113#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
114#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
115#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
116#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
117#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
118#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
119#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
120#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
121#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
122#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
123#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
124#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
125#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
126#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
127#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
128#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
129#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
130#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
131#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
132#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
133#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
134#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
135#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
136#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
137#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
138#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
139#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
140#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
141#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
142
143/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
144#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
145#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
146#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
147#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
148#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
149#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
150#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
151#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
152#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
153#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
154
155/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
156#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
157#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
158#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
159#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
160#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
161#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
162#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
163#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
164#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
165#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
166#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
167#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
168#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
169#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
170#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
171#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
172#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
173#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
174#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
175#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
176#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
177#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
178#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
179#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
180#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
181
182/*
183 * Auxiliary flags: Linux defined - For features scattered in various
184 * CPUID levels like 0x6, 0xA etc, word 7.
185 *
186 * Reuse free bits when adding new feature flags!
187 */
188
189#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
190#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
191#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
192
193#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
194#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
195
196#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
197#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
198
199#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
200#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
201
202#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
203#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
204
205/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
206#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
207
208#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled*/
209#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
210#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
211#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
212
213#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
214#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
215#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
216#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
217#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
218
219/* Virtualization flags: Linux defined, word 8 */
220#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
221#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
222#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
223#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
224#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
225
226#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
227#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
228
229
230/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
231#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
232#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
233#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
234#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
235#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
236#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
237#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
238#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
239#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
240#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
241#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
242#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
243#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
244#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
245#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
246#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
247#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
248#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
249#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
250#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
251#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
252#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
253#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
254
255/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
256#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
257#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
258#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
259#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
260
261/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
262#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
263
264/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
265#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
266
267/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
268#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
269#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
270#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
271#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
272#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
273
274/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
275#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
276#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
277#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
278#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
279#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
280#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
281#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
282#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
283#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
284#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
285
286/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
287#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
288#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
289#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
290#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
291#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
292#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
293#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
294#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
295#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
296#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
297
298/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
299#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
300#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
301
302/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
303#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
304#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
305#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
306
307
308/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
309#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
310#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
311#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
312#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
313#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
314#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
315#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
316
317/*
318 * BUG word(s)
319 */
320#define X86_BUG(x) (NCAPINTS*32 + (x))
321
322#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
323#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
324#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
325#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
326#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
327#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
328#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
329#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
330#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
331#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
332#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
333#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
334#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
335#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
336
337#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 8b17c2ad1048..1f8cca459c6c 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -30,6 +30,14 @@
30# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31)) 30# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
31#endif /* CONFIG_X86_64 */ 31#endif /* CONFIG_X86_64 */
32 32
33#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
34# define DISABLE_PKU 0
35# define DISABLE_OSPKE 0
36#else
37# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31))
38# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
39#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
40
33/* 41/*
34 * Make sure to add features to the correct mask 42 * Make sure to add features to the correct mask
35 */ 43 */
@@ -43,5 +51,15 @@
43#define DISABLED_MASK7 0 51#define DISABLED_MASK7 0
44#define DISABLED_MASK8 0 52#define DISABLED_MASK8 0
45#define DISABLED_MASK9 (DISABLE_MPX) 53#define DISABLED_MASK9 (DISABLE_MPX)
54#define DISABLED_MASK10 0
55#define DISABLED_MASK11 0
56#define DISABLED_MASK12 0
57#define DISABLED_MASK13 0
58#define DISABLED_MASK14 0
59#define DISABLED_MASK15 0
60#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
61#define DISABLED_MASK17 0
62#define DISABLED_MASK18 0
63#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
46 64
47#endif /* _ASM_X86_DISABLED_FEATURES_H */ 65#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 08b1f2f6ea50..cfde088f8e95 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -3,6 +3,7 @@
3 3
4#include <asm/fpu/api.h> 4#include <asm/fpu/api.h>
5#include <asm/pgtable.h> 5#include <asm/pgtable.h>
6#include <asm/nospec-branch.h>
6 7
7/* 8/*
8 * We map the EFI regions needed for runtime services non-contiguously, 9 * We map the EFI regions needed for runtime services non-contiguously,
@@ -41,8 +42,10 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
41({ \ 42({ \
42 efi_status_t __s; \ 43 efi_status_t __s; \
43 kernel_fpu_begin(); \ 44 kernel_fpu_begin(); \
45 firmware_restrict_branch_speculation_start(); \
44 __s = ((efi_##f##_t __attribute__((regparm(0)))*) \ 46 __s = ((efi_##f##_t __attribute__((regparm(0)))*) \
45 efi.systab->runtime->f)(args); \ 47 efi.systab->runtime->f)(args); \
48 firmware_restrict_branch_speculation_end(); \
46 kernel_fpu_end(); \ 49 kernel_fpu_end(); \
47 __s; \ 50 __s; \
48}) 51})
@@ -51,8 +54,10 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
51#define __efi_call_virt(f, args...) \ 54#define __efi_call_virt(f, args...) \
52({ \ 55({ \
53 kernel_fpu_begin(); \ 56 kernel_fpu_begin(); \
57 firmware_restrict_branch_speculation_start(); \
54 ((efi_##f##_t __attribute__((regparm(0)))*) \ 58 ((efi_##f##_t __attribute__((regparm(0)))*) \
55 efi.systab->runtime->f)(args); \ 59 efi.systab->runtime->f)(args); \
60 firmware_restrict_branch_speculation_end(); \
56 kernel_fpu_end(); \ 61 kernel_fpu_end(); \
57}) 62})
58 63
@@ -73,7 +78,9 @@ extern u64 asmlinkage efi_call(void *fp, ...);
73 efi_sync_low_kernel_mappings(); \ 78 efi_sync_low_kernel_mappings(); \
74 preempt_disable(); \ 79 preempt_disable(); \
75 __kernel_fpu_begin(); \ 80 __kernel_fpu_begin(); \
81 firmware_restrict_branch_speculation_start(); \
76 __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \ 82 __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
83 firmware_restrict_branch_speculation_end(); \
77 __kernel_fpu_end(); \ 84 __kernel_fpu_end(); \
78 preempt_enable(); \ 85 preempt_enable(); \
79 __s; \ 86 __s; \
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 3c3550c3a4a3..ec2aedb6f92a 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -17,6 +17,7 @@
17#include <asm/user.h> 17#include <asm/user.h>
18#include <asm/fpu/api.h> 18#include <asm/fpu/api.h>
19#include <asm/fpu/xstate.h> 19#include <asm/fpu/xstate.h>
20#include <asm/cpufeature.h>
20 21
21/* 22/*
22 * High level FPU state handling functions: 23 * High level FPU state handling functions:
@@ -42,6 +43,7 @@ extern void fpu__init_cpu_xstate(void);
42extern void fpu__init_system(struct cpuinfo_x86 *c); 43extern void fpu__init_system(struct cpuinfo_x86 *c);
43extern void fpu__init_check_bugs(void); 44extern void fpu__init_check_bugs(void);
44extern void fpu__resume_cpu(void); 45extern void fpu__resume_cpu(void);
46extern u64 fpu__get_supported_xfeatures_mask(void);
45 47
46/* 48/*
47 * Debugging facility: 49 * Debugging facility:
@@ -57,22 +59,22 @@ extern void fpu__resume_cpu(void);
57 */ 59 */
58static __always_inline __pure bool use_eager_fpu(void) 60static __always_inline __pure bool use_eager_fpu(void)
59{ 61{
60 return static_cpu_has_safe(X86_FEATURE_EAGER_FPU); 62 return true;
61} 63}
62 64
63static __always_inline __pure bool use_xsaveopt(void) 65static __always_inline __pure bool use_xsaveopt(void)
64{ 66{
65 return static_cpu_has_safe(X86_FEATURE_XSAVEOPT); 67 return static_cpu_has(X86_FEATURE_XSAVEOPT);
66} 68}
67 69
68static __always_inline __pure bool use_xsave(void) 70static __always_inline __pure bool use_xsave(void)
69{ 71{
70 return static_cpu_has_safe(X86_FEATURE_XSAVE); 72 return static_cpu_has(X86_FEATURE_XSAVE);
71} 73}
72 74
73static __always_inline __pure bool use_fxsr(void) 75static __always_inline __pure bool use_fxsr(void)
74{ 76{
75 return static_cpu_has_safe(X86_FEATURE_FXSR); 77 return static_cpu_has(X86_FEATURE_FXSR);
76} 78}
77 79
78/* 80/*
@@ -224,18 +226,67 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
224#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" 226#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
225#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" 227#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
226 228
227/* xstate instruction fault handler: */ 229#define XSTATE_OP(op, st, lmask, hmask, err) \
228#define xstate_fault(__err) \ 230 asm volatile("1:" op "\n\t" \
229 \ 231 "xor %[err], %[err]\n" \
230 ".section .fixup,\"ax\"\n" \ 232 "2:\n\t" \
231 \ 233 ".pushsection .fixup,\"ax\"\n\t" \
232 "3: movl $-2,%[_err]\n" \ 234 "3: movl $-2,%[err]\n\t" \
233 " jmp 2b\n" \ 235 "jmp 2b\n\t" \
234 \ 236 ".popsection\n\t" \
235 ".previous\n" \ 237 _ASM_EXTABLE(1b, 3b) \
236 \ 238 : [err] "=r" (err) \
237 _ASM_EXTABLE(1b, 3b) \ 239 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
238 : [_err] "=r" (__err) 240 : "memory")
241
242/*
243 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
244 * format and supervisor states in addition to modified optimization in
245 * XSAVEOPT.
246 *
247 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
248 * supports modified optimization which is not supported by XSAVE.
249 *
250 * We use XSAVE as a fallback.
251 *
252 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
253 * original instruction which gets replaced. We need to use it here as the
254 * address of the instruction where we might get an exception at.
255 */
256#define XSTATE_XSAVE(st, lmask, hmask, err) \
257 asm volatile(ALTERNATIVE_2(XSAVE, \
258 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
259 XSAVES, X86_FEATURE_XSAVES) \
260 "\n" \
261 "xor %[err], %[err]\n" \
262 "3:\n" \
263 ".pushsection .fixup,\"ax\"\n" \
264 "4: movl $-2, %[err]\n" \
265 "jmp 3b\n" \
266 ".popsection\n" \
267 _ASM_EXTABLE(661b, 4b) \
268 : [err] "=r" (err) \
269 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
270 : "memory")
271
272/*
273 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
274 * XSAVE area format.
275 */
276#define XSTATE_XRESTORE(st, lmask, hmask, err) \
277 asm volatile(ALTERNATIVE(XRSTOR, \
278 XRSTORS, X86_FEATURE_XSAVES) \
279 "\n" \
280 "xor %[err], %[err]\n" \
281 "3:\n" \
282 ".pushsection .fixup,\"ax\"\n" \
283 "4: movl $-2, %[err]\n" \
284 "jmp 3b\n" \
285 ".popsection\n" \
286 _ASM_EXTABLE(661b, 4b) \
287 : [err] "=r" (err) \
288 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
289 : "memory")
239 290
240/* 291/*
241 * This function is called only during boot time when x86 caps are not set 292 * This function is called only during boot time when x86 caps are not set
@@ -246,22 +297,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
246 u64 mask = -1; 297 u64 mask = -1;
247 u32 lmask = mask; 298 u32 lmask = mask;
248 u32 hmask = mask >> 32; 299 u32 hmask = mask >> 32;
249 int err = 0; 300 int err;
250 301
251 WARN_ON(system_state != SYSTEM_BOOTING); 302 WARN_ON(system_state != SYSTEM_BOOTING);
252 303
253 if (boot_cpu_has(X86_FEATURE_XSAVES)) 304 if (static_cpu_has(X86_FEATURE_XSAVES))
254 asm volatile("1:"XSAVES"\n\t" 305 XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
255 "2:\n\t"
256 xstate_fault(err)
257 : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
258 : "memory");
259 else 306 else
260 asm volatile("1:"XSAVE"\n\t" 307 XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
261 "2:\n\t"
262 xstate_fault(err)
263 : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
264 : "memory");
265 308
266 /* We should never fault when copying to a kernel buffer: */ 309 /* We should never fault when copying to a kernel buffer: */
267 WARN_ON_FPU(err); 310 WARN_ON_FPU(err);
@@ -276,22 +319,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
276 u64 mask = -1; 319 u64 mask = -1;
277 u32 lmask = mask; 320 u32 lmask = mask;
278 u32 hmask = mask >> 32; 321 u32 hmask = mask >> 32;
279 int err = 0; 322 int err;
280 323
281 WARN_ON(system_state != SYSTEM_BOOTING); 324 WARN_ON(system_state != SYSTEM_BOOTING);
282 325
283 if (boot_cpu_has(X86_FEATURE_XSAVES)) 326 if (static_cpu_has(X86_FEATURE_XSAVES))
284 asm volatile("1:"XRSTORS"\n\t" 327 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
285 "2:\n\t"
286 xstate_fault(err)
287 : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
288 : "memory");
289 else 328 else
290 asm volatile("1:"XRSTOR"\n\t" 329 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
291 "2:\n\t"
292 xstate_fault(err)
293 : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
294 : "memory");
295 330
296 /* We should never fault when copying from a kernel buffer: */ 331 /* We should never fault when copying from a kernel buffer: */
297 WARN_ON_FPU(err); 332 WARN_ON_FPU(err);
@@ -305,33 +340,11 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
305 u64 mask = -1; 340 u64 mask = -1;
306 u32 lmask = mask; 341 u32 lmask = mask;
307 u32 hmask = mask >> 32; 342 u32 hmask = mask >> 32;
308 int err = 0; 343 int err;
309 344
310 WARN_ON(!alternatives_patched); 345 WARN_ON(!alternatives_patched);
311 346
312 /* 347 XSTATE_XSAVE(xstate, lmask, hmask, err);
313 * If xsaves is enabled, xsaves replaces xsaveopt because
314 * it supports compact format and supervisor states in addition to
315 * modified optimization in xsaveopt.
316 *
317 * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
318 * because xsaveopt supports modified optimization which is not
319 * supported by xsave.
320 *
321 * If none of xsaves and xsaveopt is enabled, use xsave.
322 */
323 alternative_input_2(
324 "1:"XSAVE,
325 XSAVEOPT,
326 X86_FEATURE_XSAVEOPT,
327 XSAVES,
328 X86_FEATURE_XSAVES,
329 [xstate] "D" (xstate), "a" (lmask), "d" (hmask) :
330 "memory");
331 asm volatile("2:\n\t"
332 xstate_fault(err)
333 : "0" (err)
334 : "memory");
335 348
336 /* We should never fault when copying to a kernel buffer: */ 349 /* We should never fault when copying to a kernel buffer: */
337 WARN_ON_FPU(err); 350 WARN_ON_FPU(err);
@@ -344,23 +357,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
344{ 357{
345 u32 lmask = mask; 358 u32 lmask = mask;
346 u32 hmask = mask >> 32; 359 u32 hmask = mask >> 32;
347 int err = 0; 360 int err;
348 361
349 /* 362 XSTATE_XRESTORE(xstate, lmask, hmask, err);
350 * Use xrstors to restore context if it is enabled. xrstors supports
351 * compacted format of xsave area which is not supported by xrstor.
352 */
353 alternative_input(
354 "1: " XRSTOR,
355 XRSTORS,
356 X86_FEATURE_XSAVES,
357 "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask)
358 : "memory");
359
360 asm volatile("2:\n"
361 xstate_fault(err)
362 : "0" (err)
363 : "memory");
364 363
365 /* We should never fault when copying from a kernel buffer: */ 364 /* We should never fault when copying from a kernel buffer: */
366 WARN_ON_FPU(err); 365 WARN_ON_FPU(err);
@@ -388,12 +387,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
388 if (unlikely(err)) 387 if (unlikely(err))
389 return -EFAULT; 388 return -EFAULT;
390 389
391 __asm__ __volatile__(ASM_STAC "\n" 390 stac();
392 "1:"XSAVE"\n" 391 XSTATE_OP(XSAVE, buf, -1, -1, err);
393 "2: " ASM_CLAC "\n" 392 clac();
394 xstate_fault(err) 393
395 : "D" (buf), "a" (-1), "d" (-1), "0" (err)
396 : "memory");
397 return err; 394 return err;
398} 395}
399 396
@@ -405,14 +402,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
405 struct xregs_state *xstate = ((__force struct xregs_state *)buf); 402 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
406 u32 lmask = mask; 403 u32 lmask = mask;
407 u32 hmask = mask >> 32; 404 u32 hmask = mask >> 32;
408 int err = 0; 405 int err;
409 406
410 __asm__ __volatile__(ASM_STAC "\n" 407 stac();
411 "1:"XRSTOR"\n" 408 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
412 "2: " ASM_CLAC "\n" 409 clac();
413 xstate_fault(err) 410
414 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err)
415 : "memory"); /* memory required? */
416 return err; 411 return err;
417} 412}
418 413
@@ -466,7 +461,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
466 * pending. Clear the x87 state here by setting it to fixed values. 461 * pending. Clear the x87 state here by setting it to fixed values.
467 * "m" is a random variable that should be in L1. 462 * "m" is a random variable that should be in L1.
468 */ 463 */
469 if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { 464 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
470 asm volatile( 465 asm volatile(
471 "fnclex\n\t" 466 "fnclex\n\t"
472 "emms\n\t" 467 "emms\n\t"
@@ -595,7 +590,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
595 * If the task has used the math, pre-load the FPU on xsave processors 590 * If the task has used the math, pre-load the FPU on xsave processors
596 * or if the past 5 consecutive context-switches used math. 591 * or if the past 5 consecutive context-switches used math.
597 */ 592 */
598 fpu.preload = new_fpu->fpstate_active && 593 fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
594 new_fpu->fpstate_active &&
599 (use_eager_fpu() || new_fpu->counter > 5); 595 (use_eager_fpu() || new_fpu->counter > 5);
600 596
601 if (old_fpu->fpregs_active) { 597 if (old_fpu->fpregs_active) {
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 3a6c89b70307..f23cd8c80b1c 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -22,7 +22,7 @@
22#define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \ 22#define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \
23 XFEATURE_MASK_SSE | \ 23 XFEATURE_MASK_SSE | \
24 XFEATURE_MASK_YMM | \ 24 XFEATURE_MASK_YMM | \
25 XFEATURE_MASK_OPMASK | \ 25 XFEATURE_MASK_OPMASK | \
26 XFEATURE_MASK_ZMM_Hi256 | \ 26 XFEATURE_MASK_ZMM_Hi256 | \
27 XFEATURE_MASK_Hi16_ZMM) 27 XFEATURE_MASK_Hi16_ZMM)
28 28
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index b4c1f5453436..f4dc9b63bdda 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -41,20 +41,11 @@
41 "+m" (*uaddr), "=&r" (tem) \ 41 "+m" (*uaddr), "=&r" (tem) \
42 : "r" (oparg), "i" (-EFAULT), "1" (0)) 42 : "r" (oparg), "i" (-EFAULT), "1" (0))
43 43
44static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 44static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
45 u32 __user *uaddr)
45{ 46{
46 int op = (encoded_op >> 28) & 7;
47 int cmp = (encoded_op >> 24) & 15;
48 int oparg = (encoded_op << 8) >> 20;
49 int cmparg = (encoded_op << 20) >> 20;
50 int oldval = 0, ret, tem; 47 int oldval = 0, ret, tem;
51 48
52 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
53 oparg = 1 << oparg;
54
55 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
56 return -EFAULT;
57
58 pagefault_disable(); 49 pagefault_disable();
59 50
60 switch (op) { 51 switch (op) {
@@ -80,30 +71,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
80 71
81 pagefault_enable(); 72 pagefault_enable();
82 73
83 if (!ret) { 74 if (!ret)
84 switch (cmp) { 75 *oval = oldval;
85 case FUTEX_OP_CMP_EQ: 76
86 ret = (oldval == cmparg);
87 break;
88 case FUTEX_OP_CMP_NE:
89 ret = (oldval != cmparg);
90 break;
91 case FUTEX_OP_CMP_LT:
92 ret = (oldval < cmparg);
93 break;
94 case FUTEX_OP_CMP_GE:
95 ret = (oldval >= cmparg);
96 break;
97 case FUTEX_OP_CMP_LE:
98 ret = (oldval <= cmparg);
99 break;
100 case FUTEX_OP_CMP_GT:
101 ret = (oldval > cmparg);
102 break;
103 default:
104 ret = -ENOSYS;
105 }
106 }
107 return ret; 77 return ret;
108} 78}
109 79
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
new file mode 100644
index 000000000000..e13ff5a14633
--- /dev/null
+++ b/arch/x86/include/asm/intel-family.h
@@ -0,0 +1,72 @@
1#ifndef _ASM_X86_INTEL_FAMILY_H
2#define _ASM_X86_INTEL_FAMILY_H
3
4/*
5 * "Big Core" Processors (Branded as Core, Xeon, etc...)
6 *
7 * The "_X" parts are generally the EP and EX Xeons, or the
8 * "Extreme" ones, like Broadwell-E.
9 *
10 * Things ending in "2" are usually because we have no better
11 * name for them. There's no processor called "WESTMERE2".
12 */
13
14#define INTEL_FAM6_CORE_YONAH 0x0E
15
16#define INTEL_FAM6_CORE2_MEROM 0x0F
17#define INTEL_FAM6_CORE2_MEROM_L 0x16
18#define INTEL_FAM6_CORE2_PENRYN 0x17
19#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
20
21#define INTEL_FAM6_NEHALEM 0x1E
22#define INTEL_FAM6_NEHALEM_EP 0x1A
23#define INTEL_FAM6_NEHALEM_EX 0x2E
24
25#define INTEL_FAM6_WESTMERE 0x25
26#define INTEL_FAM6_WESTMERE2 0x1F
27#define INTEL_FAM6_WESTMERE_EP 0x2C
28#define INTEL_FAM6_WESTMERE_EX 0x2F
29
30#define INTEL_FAM6_SANDYBRIDGE 0x2A
31#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
32#define INTEL_FAM6_IVYBRIDGE 0x3A
33#define INTEL_FAM6_IVYBRIDGE_X 0x3E
34
35#define INTEL_FAM6_HASWELL_CORE 0x3C
36#define INTEL_FAM6_HASWELL_X 0x3F
37#define INTEL_FAM6_HASWELL_ULT 0x45
38#define INTEL_FAM6_HASWELL_GT3E 0x46
39
40#define INTEL_FAM6_BROADWELL_CORE 0x3D
41#define INTEL_FAM6_BROADWELL_GT3E 0x47
42#define INTEL_FAM6_BROADWELL_X 0x4F
43#define INTEL_FAM6_BROADWELL_XEON_D 0x56
44
45#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
46#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
47#define INTEL_FAM6_SKYLAKE_X 0x55
48#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
49#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
50
51/* "Small Core" Processors (Atom) */
52
53#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
54#define INTEL_FAM6_ATOM_LINCROFT 0x26
55#define INTEL_FAM6_ATOM_PENWELL 0x27
56#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
57#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
58#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
59#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
60#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
61#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
62#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Annidale */
63#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
64#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
65#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A
66
67/* Xeon Phi */
68
69#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
70#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
71
72#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h
index 78162f8e248b..d0afb05c84fc 100644
--- a/arch/x86/include/asm/irq_work.h
+++ b/arch/x86/include/asm/irq_work.h
@@ -1,7 +1,7 @@
1#ifndef _ASM_IRQ_WORK_H 1#ifndef _ASM_IRQ_WORK_H
2#define _ASM_IRQ_WORK_H 2#define _ASM_IRQ_WORK_H
3 3
4#include <asm/processor.h> 4#include <asm/cpufeature.h>
5 5
6static inline bool arch_irq_work_has_interrupt(void) 6static inline bool arch_irq_work_has_interrupt(void)
7{ 7{
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index b77f5edb03b0..cb7f04981c6b 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -8,7 +8,9 @@
8 * Interrupt control: 8 * Interrupt control:
9 */ 9 */
10 10
11static inline unsigned long native_save_fl(void) 11/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
12extern inline unsigned long native_save_fl(void);
13extern inline unsigned long native_save_fl(void)
12{ 14{
13 unsigned long flags; 15 unsigned long flags;
14 16
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index fc3c7e49c8e4..ae357d0afc91 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -105,11 +105,12 @@ struct x86_emulate_ops {
105 * @addr: [IN ] Linear address from which to read. 105 * @addr: [IN ] Linear address from which to read.
106 * @val: [OUT] Value read from memory, zero-extended to 'u_long'. 106 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
107 * @bytes: [IN ] Number of bytes to read from memory. 107 * @bytes: [IN ] Number of bytes to read from memory.
108 * @system:[IN ] Whether the access is forced to be at CPL0.
108 */ 109 */
109 int (*read_std)(struct x86_emulate_ctxt *ctxt, 110 int (*read_std)(struct x86_emulate_ctxt *ctxt,
110 unsigned long addr, void *val, 111 unsigned long addr, void *val,
111 unsigned int bytes, 112 unsigned int bytes,
112 struct x86_exception *fault); 113 struct x86_exception *fault, bool system);
113 114
114 /* 115 /*
115 * read_phys: Read bytes of standard (non-emulated/special) memory. 116 * read_phys: Read bytes of standard (non-emulated/special) memory.
@@ -127,10 +128,11 @@ struct x86_emulate_ops {
127 * @addr: [IN ] Linear address to which to write. 128 * @addr: [IN ] Linear address to which to write.
128 * @val: [OUT] Value write to memory, zero-extended to 'u_long'. 129 * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
129 * @bytes: [IN ] Number of bytes to write to memory. 130 * @bytes: [IN ] Number of bytes to write to memory.
131 * @system:[IN ] Whether the access is forced to be at CPL0.
130 */ 132 */
131 int (*write_std)(struct x86_emulate_ctxt *ctxt, 133 int (*write_std)(struct x86_emulate_ctxt *ctxt,
132 unsigned long addr, void *val, unsigned int bytes, 134 unsigned long addr, void *val, unsigned int bytes,
133 struct x86_exception *fault); 135 struct x86_exception *fault, bool system);
134 /* 136 /*
135 * fetch: Read bytes of standard (non-emulated/special) memory. 137 * fetch: Read bytes of standard (non-emulated/special) memory.
136 * Used for instruction fetch. 138 * Used for instruction fetch.
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9d2abb2a41d2..74fda1a453bd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -998,7 +998,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
998static inline int emulate_instruction(struct kvm_vcpu *vcpu, 998static inline int emulate_instruction(struct kvm_vcpu *vcpu,
999 int emulation_type) 999 int emulation_type)
1000{ 1000{
1001 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 1001 return x86_emulate_instruction(vcpu, 0,
1002 emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
1002} 1003}
1003 1004
1004void kvm_enable_efer_bits(u64); 1005void kvm_enable_efer_bits(u64);
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index adfc847a395e..fb163f02ebb1 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -59,7 +59,6 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
59 59
60extern int __apply_microcode_amd(struct microcode_amd *mc_amd); 60extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
61extern int apply_microcode_amd(int cpu); 61extern int apply_microcode_amd(int cpu);
62extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
63 62
64#define PATCH_MAX_SIZE PAGE_SIZE 63#define PATCH_MAX_SIZE PAGE_SIZE
65extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; 64extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 7680b76adafc..3359dfedc7ee 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -3,12 +3,18 @@
3 3
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <linux/mutex.h> 5#include <linux/mutex.h>
6#include <linux/atomic.h>
6 7
7/* 8/*
8 * The x86 doesn't have a mmu context, but 9 * x86 has arch-specific MMU state beyond what lives in mm_struct.
9 * we put the segment information here.
10 */ 10 */
11typedef struct { 11typedef struct {
12 /*
13 * ctx_id uniquely identifies this mm_struct. A ctx_id will never
14 * be reused, and zero is not a valid ctx_id.
15 */
16 u64 ctx_id;
17
12#ifdef CONFIG_MODIFY_LDT_SYSCALL 18#ifdef CONFIG_MODIFY_LDT_SYSCALL
13 struct ldt_struct *ldt; 19 struct ldt_struct *ldt;
14#endif 20#endif
@@ -24,6 +30,11 @@ typedef struct {
24 atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */ 30 atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
25} mm_context_t; 31} mm_context_t;
26 32
33#define INIT_MM_CONTEXT(mm) \
34 .context = { \
35 .ctx_id = 1, \
36 }
37
27void leave_mm(int cpu); 38void leave_mm(int cpu);
28 39
29#endif /* _ASM_X86_MMU_H */ 40#endif /* _ASM_X86_MMU_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 9bfc5fd77015..effc12767cbf 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -11,6 +11,9 @@
11#include <asm/tlbflush.h> 11#include <asm/tlbflush.h>
12#include <asm/paravirt.h> 12#include <asm/paravirt.h>
13#include <asm/mpx.h> 13#include <asm/mpx.h>
14
15extern atomic64_t last_mm_ctx_id;
16
14#ifndef CONFIG_PARAVIRT 17#ifndef CONFIG_PARAVIRT
15static inline void paravirt_activate_mm(struct mm_struct *prev, 18static inline void paravirt_activate_mm(struct mm_struct *prev,
16 struct mm_struct *next) 19 struct mm_struct *next)
@@ -52,15 +55,15 @@ struct ldt_struct {
52/* 55/*
53 * Used for LDT copy/destruction. 56 * Used for LDT copy/destruction.
54 */ 57 */
55int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 58int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
56void destroy_context(struct mm_struct *mm); 59void destroy_context_ldt(struct mm_struct *mm);
57#else /* CONFIG_MODIFY_LDT_SYSCALL */ 60#else /* CONFIG_MODIFY_LDT_SYSCALL */
58static inline int init_new_context(struct task_struct *tsk, 61static inline int init_new_context_ldt(struct task_struct *tsk,
59 struct mm_struct *mm) 62 struct mm_struct *mm)
60{ 63{
61 return 0; 64 return 0;
62} 65}
63static inline void destroy_context(struct mm_struct *mm) {} 66static inline void destroy_context_ldt(struct mm_struct *mm) {}
64#endif 67#endif
65 68
66static inline void load_mm_ldt(struct mm_struct *mm) 69static inline void load_mm_ldt(struct mm_struct *mm)
@@ -102,6 +105,18 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
102 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); 105 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
103} 106}
104 107
108static inline int init_new_context(struct task_struct *tsk,
109 struct mm_struct *mm)
110{
111 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
112 init_new_context_ldt(tsk, mm);
113 return 0;
114}
115static inline void destroy_context(struct mm_struct *mm)
116{
117 destroy_context_ldt(mm);
118}
119
105extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, 120extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
106 struct task_struct *tsk); 121 struct task_struct *tsk);
107 122
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index b8911aecf035..caa00191e565 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -32,6 +32,15 @@
32#define EFER_FFXSR (1<<_EFER_FFXSR) 32#define EFER_FFXSR (1<<_EFER_FFXSR)
33 33
34/* Intel MSRs. Some also available on other CPUs */ 34/* Intel MSRs. Some also available on other CPUs */
35#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
36#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
37#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
38#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
39#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
40
41#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
42#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
43
35#define MSR_IA32_PERFCTR0 0x000000c1 44#define MSR_IA32_PERFCTR0 0x000000c1
36#define MSR_IA32_PERFCTR1 0x000000c2 45#define MSR_IA32_PERFCTR1 0x000000c2
37#define MSR_FSB_FREQ 0x000000cd 46#define MSR_FSB_FREQ 0x000000cd
@@ -45,6 +54,16 @@
45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 54#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
46 55
47#define MSR_MTRRcap 0x000000fe 56#define MSR_MTRRcap 0x000000fe
57
58#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
59#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
60#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
61#define ARCH_CAP_SSB_NO (1 << 4) /*
62 * Not susceptible to Speculative Store Bypass
63 * attack, so no Speculative Store Bypass
64 * control required.
65 */
66
48#define MSR_IA32_BBL_CR_CTL 0x00000119 67#define MSR_IA32_BBL_CR_CTL 0x00000119
49#define MSR_IA32_BBL_CR_CTL3 0x0000011e 68#define MSR_IA32_BBL_CR_CTL3 0x0000011e
50 69
@@ -132,6 +151,7 @@
132 151
133/* DEBUGCTLMSR bits (others vary by model): */ 152/* DEBUGCTLMSR bits (others vary by model): */
134#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ 153#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
154#define DEBUGCTLMSR_BTF_SHIFT 1
135#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */ 155#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
136#define DEBUGCTLMSR_TR (1UL << 6) 156#define DEBUGCTLMSR_TR (1UL << 6)
137#define DEBUGCTLMSR_BTS (1UL << 7) 157#define DEBUGCTLMSR_BTS (1UL << 7)
@@ -308,6 +328,8 @@
308#define MSR_AMD64_IBSOPDATA4 0xc001103d 328#define MSR_AMD64_IBSOPDATA4 0xc001103d
309#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ 329#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
310 330
331#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
332
311/* Fam 16h MSRs */ 333/* Fam 16h MSRs */
312#define MSR_F16H_L2I_PERF_CTL 0xc0010230 334#define MSR_F16H_L2I_PERF_CTL 0xc0010230
313#define MSR_F16H_L2I_PERF_CTR 0xc0010231 335#define MSR_F16H_L2I_PERF_CTR 0xc0010231
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 77d8b284e4a7..5a10ac8c131e 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -147,8 +147,7 @@ static __always_inline unsigned long long rdtsc_ordered(void)
147 * that some other imaginary CPU is updating continuously with a 147 * that some other imaginary CPU is updating continuously with a
148 * time stamp. 148 * time stamp.
149 */ 149 */
150 alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, 150 barrier_nospec();
151 "lfence", X86_FEATURE_LFENCE_RDTSC);
152 return rdtsc(); 151 return rdtsc();
153} 152}
154 153
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index c70689b5e5aa..0deeb2d26df7 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/sched.h> 4#include <linux/sched.h>
5 5
6#include <asm/cpufeature.h>
7
6#define MWAIT_SUBSTATE_MASK 0xf 8#define MWAIT_SUBSTATE_MASK 0xf
7#define MWAIT_CSTATE_MASK 0xf 9#define MWAIT_CSTATE_MASK 0xf
8#define MWAIT_SUBSTATE_SIZE 4 10#define MWAIT_SUBSTATE_SIZE 4
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 492370b9b35b..b4c74c24c890 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -1,11 +1,12 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2 2
3#ifndef __NOSPEC_BRANCH_H__ 3#ifndef _ASM_X86_NOSPEC_BRANCH_H_
4#define __NOSPEC_BRANCH_H__ 4#define _ASM_X86_NOSPEC_BRANCH_H_
5 5
6#include <asm/alternative.h> 6#include <asm/alternative.h>
7#include <asm/alternative-asm.h> 7#include <asm/alternative-asm.h>
8#include <asm/cpufeature.h> 8#include <asm/cpufeatures.h>
9#include <asm/msr-index.h>
9 10
10/* 11/*
11 * Fill the CPU return stack buffer. 12 * Fill the CPU return stack buffer.
@@ -171,6 +172,14 @@ enum spectre_v2_mitigation {
171 SPECTRE_V2_IBRS, 172 SPECTRE_V2_IBRS,
172}; 173};
173 174
175/* The Speculative Store Bypass disable variants */
176enum ssb_mitigation {
177 SPEC_STORE_BYPASS_NONE,
178 SPEC_STORE_BYPASS_DISABLE,
179 SPEC_STORE_BYPASS_PRCTL,
180 SPEC_STORE_BYPASS_SECCOMP,
181};
182
174extern char __indirect_thunk_start[]; 183extern char __indirect_thunk_start[];
175extern char __indirect_thunk_end[]; 184extern char __indirect_thunk_end[];
176 185
@@ -178,7 +187,7 @@ extern char __indirect_thunk_end[];
178 * On VMEXIT we must ensure that no RSB predictions learned in the guest 187 * On VMEXIT we must ensure that no RSB predictions learned in the guest
179 * can be followed in the host, by overwriting the RSB completely. Both 188 * can be followed in the host, by overwriting the RSB completely. Both
180 * retpoline and IBRS mitigations for Spectre v2 need this; only on future 189 * retpoline and IBRS mitigations for Spectre v2 need this; only on future
181 * CPUs with IBRS_ATT *might* it be avoided. 190 * CPUs with IBRS_ALL *might* it be avoided.
182 */ 191 */
183static inline void vmexit_fill_RSB(void) 192static inline void vmexit_fill_RSB(void)
184{ 193{
@@ -194,5 +203,87 @@ static inline void vmexit_fill_RSB(void)
194#endif 203#endif
195} 204}
196 205
206static __always_inline
207void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
208{
209 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
210 : : "c" (msr),
211 "a" ((u32)val),
212 "d" ((u32)(val >> 32)),
213 [feature] "i" (feature)
214 : "memory");
215}
216
217static inline void indirect_branch_prediction_barrier(void)
218{
219 u64 val = PRED_CMD_IBPB;
220
221 alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
222}
223
224/* The Intel SPEC CTRL MSR base value cache */
225extern u64 x86_spec_ctrl_base;
226
227/*
228 * With retpoline, we must use IBRS to restrict branch prediction
229 * before calling into firmware.
230 *
231 * (Implemented as CPP macros due to header hell.)
232 */
233#define firmware_restrict_branch_speculation_start() \
234do { \
235 u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
236 \
237 preempt_disable(); \
238 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
239 X86_FEATURE_USE_IBRS_FW); \
240} while (0)
241
242#define firmware_restrict_branch_speculation_end() \
243do { \
244 u64 val = x86_spec_ctrl_base; \
245 \
246 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
247 X86_FEATURE_USE_IBRS_FW); \
248 preempt_enable(); \
249} while (0)
250
197#endif /* __ASSEMBLY__ */ 251#endif /* __ASSEMBLY__ */
198#endif /* __NOSPEC_BRANCH_H__ */ 252
253/*
254 * Below is used in the eBPF JIT compiler and emits the byte sequence
255 * for the following assembly:
256 *
257 * With retpolines configured:
258 *
259 * callq do_rop
260 * spec_trap:
261 * pause
262 * lfence
263 * jmp spec_trap
264 * do_rop:
265 * mov %rax,(%rsp)
266 * retq
267 *
268 * Without retpolines configured:
269 *
270 * jmp *%rax
271 */
272#ifdef CONFIG_RETPOLINE
273# define RETPOLINE_RAX_BPF_JIT_SIZE 17
274# define RETPOLINE_RAX_BPF_JIT() \
275 EMIT1_off32(0xE8, 7); /* callq do_rop */ \
276 /* spec_trap: */ \
277 EMIT2(0xF3, 0x90); /* pause */ \
278 EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
279 EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
280 /* do_rop: */ \
281 EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
282 EMIT1(0xC3); /* retq */
283#else
284# define RETPOLINE_RAX_BPF_JIT_SIZE 2
285# define RETPOLINE_RAX_BPF_JIT() \
286 EMIT2(0xFF, 0xE0); /* jmp *%rax */
287#endif
288
289#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index 3a52ee0e726d..bfceb5cc6347 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -27,8 +27,13 @@
27#define N_EXCEPTION_STACKS 1 27#define N_EXCEPTION_STACKS 1
28 28
29#ifdef CONFIG_X86_PAE 29#ifdef CONFIG_X86_PAE
30/* 44=32+12, the limit we can fit into an unsigned long pfn */ 30/*
31#define __PHYSICAL_MASK_SHIFT 44 31 * This is beyond the 44 bit limit imposed by the 32bit long pfns,
32 * but we need the full mask to make sure inverted PROT_NONE
33 * entries have all the host bits set in a guest.
34 * The real limit is still 44 bits.
35 */
36#define __PHYSICAL_MASK_SHIFT 52
32#define __VIRTUAL_MASK_SHIFT 32 37#define __VIRTUAL_MASK_SHIFT 32
33 38
34#else /* !CONFIG_X86_PAE */ 39#else /* !CONFIG_X86_PAE */
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index fd74a11959de..89c50332a71e 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -77,4 +77,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi
77#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) 77#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
78#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) 78#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
79 79
80/* No inverted PFNs on 2 level page tables */
81
82static inline u64 protnone_mask(u64 val)
83{
84 return 0;
85}
86
87static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
88{
89 return val;
90}
91
92static inline bool __pte_needs_invert(u64 val)
93{
94 return false;
95}
96
80#endif /* _ASM_X86_PGTABLE_2LEVEL_H */ 97#endif /* _ASM_X86_PGTABLE_2LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index cdaa58c9b39e..5c686382d84b 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -177,11 +177,44 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
177#endif 177#endif
178 178
179/* Encode and de-code a swap entry */ 179/* Encode and de-code a swap entry */
180#define SWP_TYPE_BITS 5
181
182#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
183
184/* We always extract/encode the offset by shifting it all the way up, and then down again */
185#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
186
180#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) 187#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
181#define __swp_type(x) (((x).val) & 0x1f) 188#define __swp_type(x) (((x).val) & 0x1f)
182#define __swp_offset(x) ((x).val >> 5) 189#define __swp_offset(x) ((x).val >> 5)
183#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) 190#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
184#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) 191
185#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) 192/*
193 * Normally, __swp_entry() converts from arch-independent swp_entry_t to
194 * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
195 * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
196 * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
197 * __swp_entry_to_pte() through the following helper macro based on 64bit
198 * __swp_entry().
199 */
200#define __swp_pteval_entry(type, offset) ((pteval_t) { \
201 (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
202 | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
203
204#define __swp_entry_to_pte(x) ((pte_t){ .pte = \
205 __swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
206/*
207 * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
208 * swp_entry_t, but also has to convert it from 64bit to the 32bit
209 * intermediate representation, using the following macros based on 64bit
210 * __swp_type() and __swp_offset().
211 */
212#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
213#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
214
215#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
216 __pteval_swp_offset(pte)))
217
218#include <asm/pgtable-invert.h>
186 219
187#endif /* _ASM_X86_PGTABLE_3LEVEL_H */ 220#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h
new file mode 100644
index 000000000000..a0c1525f1b6f
--- /dev/null
+++ b/arch/x86/include/asm/pgtable-invert.h
@@ -0,0 +1,41 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_PGTABLE_INVERT_H
3#define _ASM_PGTABLE_INVERT_H 1
4
5#ifndef __ASSEMBLY__
6
7/*
8 * A clear pte value is special, and doesn't get inverted.
9 *
10 * Note that even users that only pass a pgprot_t (rather
11 * than a full pte) won't trigger the special zero case,
12 * because even PAGE_NONE has _PAGE_PROTNONE | _PAGE_ACCESSED
13 * set. So the all zero case really is limited to just the
14 * cleared page table entry case.
15 */
16static inline bool __pte_needs_invert(u64 val)
17{
18 return val && !(val & _PAGE_PRESENT);
19}
20
21/* Get a mask to xor with the page table entry to get the correct pfn. */
22static inline u64 protnone_mask(u64 val)
23{
24 return __pte_needs_invert(val) ? ~0ull : 0;
25}
26
27static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
28{
29 /*
30 * When a PTE transitions from NONE to !NONE or vice-versa
31 * invert the PFN part to stop speculation.
32 * pte_pfn undoes this when needed.
33 */
34 if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
35 val = (val & ~mask) | (~val & mask);
36 return val;
37}
38
39#endif /* __ASSEMBLY__ */
40
41#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 84c62d950023..4de6c282c02a 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -148,19 +148,29 @@ static inline int pte_special(pte_t pte)
148 return pte_flags(pte) & _PAGE_SPECIAL; 148 return pte_flags(pte) & _PAGE_SPECIAL;
149} 149}
150 150
151/* Entries that were set to PROT_NONE are inverted */
152
153static inline u64 protnone_mask(u64 val);
154
151static inline unsigned long pte_pfn(pte_t pte) 155static inline unsigned long pte_pfn(pte_t pte)
152{ 156{
153 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; 157 phys_addr_t pfn = pte_val(pte);
158 pfn ^= protnone_mask(pfn);
159 return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
154} 160}
155 161
156static inline unsigned long pmd_pfn(pmd_t pmd) 162static inline unsigned long pmd_pfn(pmd_t pmd)
157{ 163{
158 return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; 164 phys_addr_t pfn = pmd_val(pmd);
165 pfn ^= protnone_mask(pfn);
166 return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
159} 167}
160 168
161static inline unsigned long pud_pfn(pud_t pud) 169static inline unsigned long pud_pfn(pud_t pud)
162{ 170{
163 return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; 171 phys_addr_t pfn = pud_val(pud);
172 pfn ^= protnone_mask(pfn);
173 return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
164} 174}
165 175
166#define pte_page(pte) pfn_to_page(pte_pfn(pte)) 176#define pte_page(pte) pfn_to_page(pte_pfn(pte))
@@ -305,11 +315,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
305 return pmd_set_flags(pmd, _PAGE_RW); 315 return pmd_set_flags(pmd, _PAGE_RW);
306} 316}
307 317
308static inline pmd_t pmd_mknotpresent(pmd_t pmd)
309{
310 return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
311}
312
313#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 318#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
314static inline int pte_soft_dirty(pte_t pte) 319static inline int pte_soft_dirty(pte_t pte)
315{ 320{
@@ -359,19 +364,58 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
359 364
360static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) 365static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
361{ 366{
362 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | 367 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
363 massage_pgprot(pgprot)); 368 pfn ^= protnone_mask(pgprot_val(pgprot));
369 pfn &= PTE_PFN_MASK;
370 return __pte(pfn | massage_pgprot(pgprot));
364} 371}
365 372
366static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) 373static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
367{ 374{
368 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | 375 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
369 massage_pgprot(pgprot)); 376 pfn ^= protnone_mask(pgprot_val(pgprot));
377 pfn &= PHYSICAL_PMD_PAGE_MASK;
378 return __pmd(pfn | massage_pgprot(pgprot));
379}
380
381static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
382{
383 phys_addr_t pfn = page_nr << PAGE_SHIFT;
384 pfn ^= protnone_mask(pgprot_val(pgprot));
385 pfn &= PHYSICAL_PUD_PAGE_MASK;
386 return __pud(pfn | massage_pgprot(pgprot));
387}
388
389static inline pmd_t pmd_mknotpresent(pmd_t pmd)
390{
391 return pfn_pmd(pmd_pfn(pmd),
392 __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
370} 393}
371 394
395static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
396{
397 pudval_t v = native_pud_val(pud);
398
399 return __pud(v | set);
400}
401
402static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
403{
404 pudval_t v = native_pud_val(pud);
405
406 return __pud(v & ~clear);
407}
408
409static inline pud_t pud_mkhuge(pud_t pud)
410{
411 return pud_set_flags(pud, _PAGE_PSE);
412}
413
414static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
415
372static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 416static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
373{ 417{
374 pteval_t val = pte_val(pte); 418 pteval_t val = pte_val(pte), oldval = val;
375 419
376 /* 420 /*
377 * Chop off the NX bit (if present), and add the NX portion of 421 * Chop off the NX bit (if present), and add the NX portion of
@@ -379,17 +423,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
379 */ 423 */
380 val &= _PAGE_CHG_MASK; 424 val &= _PAGE_CHG_MASK;
381 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; 425 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
382 426 val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
383 return __pte(val); 427 return __pte(val);
384} 428}
385 429
386static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 430static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
387{ 431{
388 pmdval_t val = pmd_val(pmd); 432 pmdval_t val = pmd_val(pmd), oldval = val;
389 433
390 val &= _HPAGE_CHG_MASK; 434 val &= _HPAGE_CHG_MASK;
391 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; 435 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
392 436 val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
393 return __pmd(val); 437 return __pmd(val);
394} 438}
395 439
@@ -926,6 +970,14 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
926} 970}
927#endif 971#endif
928 972
973#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
974extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
975
976static inline bool arch_has_pfn_modify_check(void)
977{
978 return boot_cpu_has_bug(X86_BUG_L1TF);
979}
980
929#include <asm-generic/pgtable.h> 981#include <asm-generic/pgtable.h>
930#endif /* __ASSEMBLY__ */ 982#endif /* __ASSEMBLY__ */
931 983
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index c810226e741a..221a32ed1372 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -163,18 +163,52 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
163#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) 163#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
164#define pte_unmap(pte) ((void)(pte))/* NOP */ 164#define pte_unmap(pte) ((void)(pte))/* NOP */
165 165
166/* Encode and de-code a swap entry */ 166/*
167#define SWP_TYPE_BITS 5 167 * Encode and de-code a swap entry
168#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1) 168 *
169 * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
170 * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
171 * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
172 *
173 * G (8) is aliased and used as a PROT_NONE indicator for
174 * !present ptes. We need to start storing swap entries above
175 * there. We also need to avoid using A and D because of an
176 * erratum where they can be incorrectly set by hardware on
177 * non-present PTEs.
178 *
179 * SD (1) in swp entry is used to store soft dirty bit, which helps us
180 * remember soft dirty over page migration
181 *
182 * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
183 * but also L and G.
184 *
185 * The offset is inverted by a binary not operation to make the high
186 * physical bits set.
187 */
188#define SWP_TYPE_BITS 5
189
190#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
191
192/* We always extract/encode the offset by shifting it all the way up, and then down again */
193#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
169 194
170#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) 195#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
171 196
172#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \ 197/* Extract the high bits for type */
173 & ((1U << SWP_TYPE_BITS) - 1)) 198#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
174#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT) 199
175#define __swp_entry(type, offset) ((swp_entry_t) { \ 200/* Shift up (to get rid of type), then down to get value */
176 ((type) << (_PAGE_BIT_PRESENT + 1)) \ 201#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
177 | ((offset) << SWP_OFFSET_SHIFT) }) 202
203/*
204 * Shift the offset up "too far" by TYPE bits, then down again
205 * The offset is inverted by a binary not operation to make the high
206 * physical bits set.
207 */
208#define __swp_entry(type, offset) ((swp_entry_t) { \
209 (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
210 | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
211
178#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) 212#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
179#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) 213#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
180 214
@@ -201,6 +235,8 @@ extern void cleanup_highmap(void);
201extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); 235extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
202extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); 236extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
203 237
238#include <asm/pgtable-invert.h>
239
204#endif /* !__ASSEMBLY__ */ 240#endif /* !__ASSEMBLY__ */
205 241
206#endif /* _ASM_X86_PGTABLE_64_H */ 242#endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 8dba273da25a..7572ce32055e 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -70,15 +70,15 @@
70/* 70/*
71 * Tracking soft dirty bit when a page goes to a swap is tricky. 71 * Tracking soft dirty bit when a page goes to a swap is tricky.
72 * We need a bit which can be stored in pte _and_ not conflict 72 * We need a bit which can be stored in pte _and_ not conflict
73 * with swap entry format. On x86 bits 6 and 7 are *not* involved 73 * with swap entry format. On x86 bits 1-4 are *not* involved
74 * into swap entry computation, but bit 6 is used for nonlinear 74 * into swap entry computation, but bit 7 is used for thp migration,
75 * file mapping, so we borrow bit 7 for soft dirty tracking. 75 * so we borrow bit 1 for soft dirty tracking.
76 * 76 *
77 * Please note that this bit must be treated as swap dirty page 77 * Please note that this bit must be treated as swap dirty page
78 * mark if and only if the PTE has present bit clear! 78 * mark if and only if the PTE/PMD has present bit clear!
79 */ 79 */
80#ifdef CONFIG_MEM_SOFT_DIRTY 80#ifdef CONFIG_MEM_SOFT_DIRTY
81#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE 81#define _PAGE_SWP_SOFT_DIRTY _PAGE_RW
82#else 82#else
83#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) 83#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
84#endif 84#endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c124d6ab4bf9..a3a53955f01c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -13,7 +13,7 @@ struct vm86;
13#include <asm/types.h> 13#include <asm/types.h>
14#include <uapi/asm/sigcontext.h> 14#include <uapi/asm/sigcontext.h>
15#include <asm/current.h> 15#include <asm/current.h>
16#include <asm/cpufeature.h> 16#include <asm/cpufeatures.h>
17#include <asm/page.h> 17#include <asm/page.h>
18#include <asm/pgtable_types.h> 18#include <asm/pgtable_types.h>
19#include <asm/percpu.h> 19#include <asm/percpu.h>
@@ -24,7 +24,6 @@ struct vm86;
24#include <asm/fpu/types.h> 24#include <asm/fpu/types.h>
25 25
26#include <linux/personality.h> 26#include <linux/personality.h>
27#include <linux/cpumask.h>
28#include <linux/cache.h> 27#include <linux/cache.h>
29#include <linux/threads.h> 28#include <linux/threads.h>
30#include <linux/math64.h> 29#include <linux/math64.h>
@@ -113,7 +112,7 @@ struct cpuinfo_x86 {
113 char x86_vendor_id[16]; 112 char x86_vendor_id[16];
114 char x86_model_id[64]; 113 char x86_model_id[64];
115 /* in KB - valid for CPUS which support this call: */ 114 /* in KB - valid for CPUS which support this call: */
116 int x86_cache_size; 115 unsigned int x86_cache_size;
117 int x86_cache_alignment; /* In bytes */ 116 int x86_cache_alignment; /* In bytes */
118 /* Cache QoS architectural values: */ 117 /* Cache QoS architectural values: */
119 int x86_cache_max_rmid; /* max index */ 118 int x86_cache_max_rmid; /* max index */
@@ -173,6 +172,11 @@ extern const struct seq_operations cpuinfo_op;
173 172
174extern void cpu_detect(struct cpuinfo_x86 *c); 173extern void cpu_detect(struct cpuinfo_x86 *c);
175 174
175static inline unsigned long l1tf_pfn_limit(void)
176{
177 return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
178}
179
176extern void early_cpu_init(void); 180extern void early_cpu_init(void);
177extern void identify_boot_cpu(void); 181extern void identify_boot_cpu(void);
178extern void identify_secondary_cpu(struct cpuinfo_x86 *); 182extern void identify_secondary_cpu(struct cpuinfo_x86 *);
@@ -574,7 +578,7 @@ static inline void sync_core(void)
574{ 578{
575 int tmp; 579 int tmp;
576 580
577#ifdef CONFIG_M486 581#ifdef CONFIG_X86_32
578 /* 582 /*
579 * Do a CPUID if available, otherwise do a jump. The jump 583 * Do a CPUID if available, otherwise do a jump. The jump
580 * can conveniently enough be the jump around CPUID. 584 * can conveniently enough be the jump around CPUID.
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 5c6e4fb370f5..6847d85400a8 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -92,5 +92,15 @@
92#define REQUIRED_MASK7 0 92#define REQUIRED_MASK7 0
93#define REQUIRED_MASK8 0 93#define REQUIRED_MASK8 0
94#define REQUIRED_MASK9 0 94#define REQUIRED_MASK9 0
95#define REQUIRED_MASK10 0
96#define REQUIRED_MASK11 0
97#define REQUIRED_MASK12 0
98#define REQUIRED_MASK13 0
99#define REQUIRED_MASK14 0
100#define REQUIRED_MASK15 0
101#define REQUIRED_MASK16 0
102#define REQUIRED_MASK17 0
103#define REQUIRED_MASK18 0
104#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
95 105
96#endif /* _ASM_X86_REQUIRED_FEATURES_H */ 106#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index ba665ebd17bb..db333300bd4b 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -15,7 +15,7 @@
15 15
16#include <linux/stringify.h> 16#include <linux/stringify.h>
17#include <asm/nops.h> 17#include <asm/nops.h>
18#include <asm/cpufeature.h> 18#include <asm/cpufeatures.h>
19 19
20/* "Raw" instruction opcodes */ 20/* "Raw" instruction opcodes */
21#define __ASM_CLAC .byte 0x0f,0x01,0xca 21#define __ASM_CLAC .byte 0x0f,0x01,0xca
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 222a6a3ca2b5..04d6eef5f8a5 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -16,20 +16,10 @@
16#endif 16#endif
17#include <asm/thread_info.h> 17#include <asm/thread_info.h>
18#include <asm/cpumask.h> 18#include <asm/cpumask.h>
19#include <asm/cpufeature.h>
20 19
21extern int smp_num_siblings; 20extern int smp_num_siblings;
22extern unsigned int num_processors; 21extern unsigned int num_processors;
23 22
24static inline bool cpu_has_ht_siblings(void)
25{
26 bool has_siblings = false;
27#ifdef CONFIG_SMP
28 has_siblings = cpu_has_ht && smp_num_siblings > 1;
29#endif
30 return has_siblings;
31}
32
33DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); 23DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
34DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); 24DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
35/* cpus sharing the last level cache: */ 25/* cpus sharing the last level cache: */
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
new file mode 100644
index 000000000000..ae7c2c5cd7f0
--- /dev/null
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -0,0 +1,80 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_SPECCTRL_H_
3#define _ASM_X86_SPECCTRL_H_
4
5#include <linux/thread_info.h>
6#include <asm/nospec-branch.h>
7
8/*
9 * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
10 * the guest has, while on VMEXIT we restore the host view. This
11 * would be easier if SPEC_CTRL were architecturally maskable or
12 * shadowable for guests but this is not (currently) the case.
13 * Takes the guest view of SPEC_CTRL MSR as a parameter and also
14 * the guest's version of VIRT_SPEC_CTRL, if emulated.
15 */
16extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
17
18/**
19 * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
20 * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
21 * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
22 * (may get translated to MSR_AMD64_LS_CFG bits)
23 *
24 * Avoids writing to the MSR if the content/bits are the same
25 */
26static inline
27void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
28{
29 x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
30}
31
32/**
33 * x86_spec_ctrl_restore_host - Restore host speculation control registers
34 * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
35 * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
36 * (may get translated to MSR_AMD64_LS_CFG bits)
37 *
38 * Avoids writing to the MSR if the content/bits are the same
39 */
40static inline
41void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
42{
43 x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
44}
45
46/* AMD specific Speculative Store Bypass MSR data */
47extern u64 x86_amd_ls_cfg_base;
48extern u64 x86_amd_ls_cfg_ssbd_mask;
49
50static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
51{
52 BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
53 return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
54}
55
56static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
57{
58 BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
59 return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
60}
61
62static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
63{
64 return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
65}
66
67#ifdef CONFIG_SMP
68extern void speculative_store_bypass_ht_init(void);
69#else
70static inline void speculative_store_bypass_ht_init(void) { }
71#endif
72
73extern void speculative_store_bypass_update(unsigned long tif);
74
75static inline void speculative_store_bypass_update_current(void)
76{
77 speculative_store_bypass_update(current_thread_info()->flags);
78}
79
80#endif
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 751bf4b7bf11..025ecfaba9c9 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_SWITCH_TO_H 1#ifndef _ASM_X86_SWITCH_TO_H
2#define _ASM_X86_SWITCH_TO_H 2#define _ASM_X86_SWITCH_TO_H
3 3
4#include <asm/nospec-branch.h>
5
4struct task_struct; /* one of the stranger aspects of C forward declarations */ 6struct task_struct; /* one of the stranger aspects of C forward declarations */
5__visible struct task_struct *__switch_to(struct task_struct *prev, 7__visible struct task_struct *__switch_to(struct task_struct *prev,
6 struct task_struct *next); 8 struct task_struct *next);
@@ -24,6 +26,23 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
24#define __switch_canary_iparam 26#define __switch_canary_iparam
25#endif /* CC_STACKPROTECTOR */ 27#endif /* CC_STACKPROTECTOR */
26 28
29#ifdef CONFIG_RETPOLINE
30 /*
31 * When switching from a shallower to a deeper call stack
32 * the RSB may either underflow or use entries populated
33 * with userspace addresses. On CPUs where those concerns
34 * exist, overwrite the RSB with entries which capture
35 * speculative execution to prevent attack.
36 */
37#define __retpoline_fill_return_buffer \
38 ALTERNATIVE("jmp 910f", \
39 __stringify(__FILL_RETURN_BUFFER(%%ebx, RSB_CLEAR_LOOPS, %%esp)),\
40 X86_FEATURE_RSB_CTXSW) \
41 "910:\n\t"
42#else
43#define __retpoline_fill_return_buffer
44#endif
45
27/* 46/*
28 * Saving eflags is important. It switches not only IOPL between tasks, 47 * Saving eflags is important. It switches not only IOPL between tasks,
29 * it also protects other tasks from NT leaking through sysenter etc. 48 * it also protects other tasks from NT leaking through sysenter etc.
@@ -46,6 +65,7 @@ do { \
46 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ 65 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
47 "pushl %[next_ip]\n\t" /* restore EIP */ \ 66 "pushl %[next_ip]\n\t" /* restore EIP */ \
48 __switch_canary \ 67 __switch_canary \
68 __retpoline_fill_return_buffer \
49 "jmp __switch_to\n" /* regparm call */ \ 69 "jmp __switch_to\n" /* regparm call */ \
50 "1:\t" \ 70 "1:\t" \
51 "popl %%ebp\n\t" /* restore EBP */ \ 71 "popl %%ebp\n\t" /* restore EBP */ \
@@ -100,6 +120,23 @@ do { \
100#define __switch_canary_iparam 120#define __switch_canary_iparam
101#endif /* CC_STACKPROTECTOR */ 121#endif /* CC_STACKPROTECTOR */
102 122
123#ifdef CONFIG_RETPOLINE
124 /*
125 * When switching from a shallower to a deeper call stack
126 * the RSB may either underflow or use entries populated
127 * with userspace addresses. On CPUs where those concerns
128 * exist, overwrite the RSB with entries which capture
129 * speculative execution to prevent attack.
130 */
131#define __retpoline_fill_return_buffer \
132 ALTERNATIVE("jmp 910f", \
133 __stringify(__FILL_RETURN_BUFFER(%%r12, RSB_CLEAR_LOOPS, %%rsp)),\
134 X86_FEATURE_RSB_CTXSW) \
135 "910:\n\t"
136#else
137#define __retpoline_fill_return_buffer
138#endif
139
103/* 140/*
104 * There is no need to save or restore flags, because flags are always 141 * There is no need to save or restore flags, because flags are always
105 * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL 142 * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
@@ -112,6 +149,7 @@ do { \
112 "call __switch_to\n\t" \ 149 "call __switch_to\n\t" \
113 "movq "__percpu_arg([current_task])",%%rsi\n\t" \ 150 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
114 __switch_canary \ 151 __switch_canary \
152 __retpoline_fill_return_buffer \
115 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 153 "movq %P[thread_info](%%rsi),%%r8\n\t" \
116 "movq %%rax,%%rdi\n\t" \ 154 "movq %%rax,%%rdi\n\t" \
117 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ 155 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index c706b7796870..128a7105cbe2 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -49,7 +49,7 @@
49 */ 49 */
50#ifndef __ASSEMBLY__ 50#ifndef __ASSEMBLY__
51struct task_struct; 51struct task_struct;
52#include <asm/processor.h> 52#include <asm/cpufeature.h>
53#include <linux/atomic.h> 53#include <linux/atomic.h>
54 54
55struct thread_info { 55struct thread_info {
@@ -92,6 +92,7 @@ struct thread_info {
92#define TIF_SIGPENDING 2 /* signal pending */ 92#define TIF_SIGPENDING 2 /* signal pending */
93#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 93#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
94#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ 94#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
95#define TIF_SSBD 5 /* Reduced data speculation */
95#define TIF_SYSCALL_EMU 6 /* syscall emulation active */ 96#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
96#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 97#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
97#define TIF_SECCOMP 8 /* secure computing */ 98#define TIF_SECCOMP 8 /* secure computing */
@@ -114,8 +115,9 @@ struct thread_info {
114#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 115#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
115#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 116#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
116#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 117#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
117#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
118#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 118#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
119#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
120#define _TIF_SSBD (1 << TIF_SSBD)
119#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) 121#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
120#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 122#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
121#define _TIF_SECCOMP (1 << TIF_SECCOMP) 123#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -147,7 +149,7 @@ struct thread_info {
147 149
148/* flags to check in __switch_to() */ 150/* flags to check in __switch_to() */
149#define _TIF_WORK_CTXSW \ 151#define _TIF_WORK_CTXSW \
150 (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) 152 (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
151 153
152#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) 154#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
153#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) 155#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
@@ -166,6 +168,17 @@ static inline struct thread_info *current_thread_info(void)
166 return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE); 168 return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
167} 169}
168 170
171static inline unsigned long current_stack_pointer(void)
172{
173 unsigned long sp;
174#ifdef CONFIG_X86_64
175 asm("mov %%rsp,%0" : "=g" (sp));
176#else
177 asm("mov %%esp,%0" : "=g" (sp));
178#endif
179 return sp;
180}
181
169/* 182/*
170 * Walks up the stack frames to make sure that the specified object is 183 * Walks up the stack frames to make sure that the specified object is
171 * entirely contained by a single stack frame. 184 * entirely contained by a single stack frame.
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index a691b66cc40a..72cfe3e53af1 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -5,6 +5,7 @@
5#include <linux/sched.h> 5#include <linux/sched.h>
6 6
7#include <asm/processor.h> 7#include <asm/processor.h>
8#include <asm/cpufeature.h>
8#include <asm/special_insns.h> 9#include <asm/special_insns.h>
9#include <asm/smp.h> 10#include <asm/smp.h>
10 11
@@ -67,6 +68,8 @@ static inline void invpcid_flush_all_nonglobals(void)
67struct tlb_state { 68struct tlb_state {
68 struct mm_struct *active_mm; 69 struct mm_struct *active_mm;
69 int state; 70 int state;
71 /* last user mm's ctx id */
72 u64 last_ctx_id;
70 73
71 /* 74 /*
72 * Access to this CR4 shadow and to H/W CR4 is protected by 75 * Access to this CR4 shadow and to H/W CR4 is protected by
@@ -108,6 +111,16 @@ static inline void cr4_clear_bits(unsigned long mask)
108 } 111 }
109} 112}
110 113
114static inline void cr4_toggle_bits(unsigned long mask)
115{
116 unsigned long cr4;
117
118 cr4 = this_cpu_read(cpu_tlbstate.cr4);
119 cr4 ^= mask;
120 this_cpu_write(cpu_tlbstate.cr4, cr4);
121 __write_cr4(cr4);
122}
123
111/* Read the CR4 shadow. */ 124/* Read the CR4 shadow. */
112static inline unsigned long cr4_read_shadow(void) 125static inline unsigned long cr4_read_shadow(void)
113{ 126{
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 2957c8237c28..ec9d2bcc8c24 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -8,7 +8,7 @@
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/lockdep.h> 9#include <linux/lockdep.h>
10#include <asm/alternative.h> 10#include <asm/alternative.h>
11#include <asm/cpufeature.h> 11#include <asm/cpufeatures.h>
12#include <asm/page.h> 12#include <asm/page.h>
13 13
14/* 14/*
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 14c63c7e8337..dd11f5cb4149 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -310,6 +310,7 @@ enum vmcs_field {
310#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ 310#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
311#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ 311#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
312#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ 312#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
313#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */
313#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ 314#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
314 315
315/* GUEST_INTERRUPTIBILITY_INFO flags. */ 316/* GUEST_INTERRUPTIBILITY_INFO flags. */
@@ -400,10 +401,11 @@ enum vmcs_field {
400#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2) 401#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2)
401 402
402#define VMX_NR_VPIDS (1 << 16) 403#define VMX_NR_VPIDS (1 << 16)
404#define VMX_VPID_EXTENT_INDIVIDUAL_ADDR 0
403#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 405#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
404#define VMX_VPID_EXTENT_ALL_CONTEXT 2 406#define VMX_VPID_EXTENT_ALL_CONTEXT 2
407#define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL 3
405 408
406#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
407#define VMX_EPT_EXTENT_CONTEXT 1 409#define VMX_EPT_EXTENT_CONTEXT 1
408#define VMX_EPT_EXTENT_GLOBAL 2 410#define VMX_EPT_EXTENT_GLOBAL 2
409#define VMX_EPT_EXTENT_SHIFT 24 411#define VMX_EPT_EXTENT_SHIFT 24
@@ -420,8 +422,10 @@ enum vmcs_field {
420#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) 422#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
421 423
422#define VMX_VPID_INVVPID_BIT (1ull << 0) /* (32 - 32) */ 424#define VMX_VPID_INVVPID_BIT (1ull << 0) /* (32 - 32) */
425#define VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT (1ull << 8) /* (40 - 32) */
423#define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT (1ull << 9) /* (41 - 32) */ 426#define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT (1ull << 9) /* (41 - 32) */
424#define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */ 427#define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */
428#define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT (1ull << 11) /* (43 - 32) */
425 429
426#define VMX_EPT_DEFAULT_GAW 3 430#define VMX_EPT_DEFAULT_GAW 3
427#define VMX_EPT_MAX_GAW 0x4 431#define VMX_EPT_MAX_GAW 0x4
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index 4865e10dbb55..62210da19a92 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -21,5 +21,6 @@ static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
21} 21}
22static inline bool vsyscall_enabled(void) { return false; } 22static inline bool vsyscall_enabled(void) { return false; }
23#endif 23#endif
24extern unsigned long vsyscall_pgprot;
24 25
25#endif /* _ASM_X86_VSYSCALL_H */ 26#endif /* _ASM_X86_VSYSCALL_H */
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index 5a08bc8bff33..c54beb44c4c1 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -553,7 +553,7 @@ do { \
553 if (cpu_has_xmm) { \ 553 if (cpu_has_xmm) { \
554 xor_speed(&xor_block_pIII_sse); \ 554 xor_speed(&xor_block_pIII_sse); \
555 xor_speed(&xor_block_sse_pf64); \ 555 xor_speed(&xor_block_sse_pf64); \
556 } else if (cpu_has_mmx) { \ 556 } else if (boot_cpu_has(X86_FEATURE_MMX)) { \
557 xor_speed(&xor_block_pII_mmx); \ 557 xor_speed(&xor_block_pII_mmx); \
558 xor_speed(&xor_block_p5_mmx); \ 558 xor_speed(&xor_block_p5_mmx); \
559 } else { \ 559 } else { \
diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
index 809134c644a6..90ab9a795b49 100644
--- a/arch/x86/include/uapi/asm/msgbuf.h
+++ b/arch/x86/include/uapi/asm/msgbuf.h
@@ -1 +1,32 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __ASM_X64_MSGBUF_H
3#define __ASM_X64_MSGBUF_H
4
5#if !defined(__x86_64__) || !defined(__ILP32__)
1#include <asm-generic/msgbuf.h> 6#include <asm-generic/msgbuf.h>
7#else
8/*
9 * The msqid64_ds structure for x86 architecture with x32 ABI.
10 *
11 * On x86-32 and x86-64 we can just use the generic definition, but
12 * x32 uses the same binary layout as x86_64, which is differnet
13 * from other 32-bit architectures.
14 */
15
16struct msqid64_ds {
17 struct ipc64_perm msg_perm;
18 __kernel_time_t msg_stime; /* last msgsnd time */
19 __kernel_time_t msg_rtime; /* last msgrcv time */
20 __kernel_time_t msg_ctime; /* last change time */
21 __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */
22 __kernel_ulong_t msg_qnum; /* number of messages in queue */
23 __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */
24 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
25 __kernel_pid_t msg_lrpid; /* last receive pid */
26 __kernel_ulong_t __unused4;
27 __kernel_ulong_t __unused5;
28};
29
30#endif
31
32#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
index 83c05fc2de38..644421f3823b 100644
--- a/arch/x86/include/uapi/asm/shmbuf.h
+++ b/arch/x86/include/uapi/asm/shmbuf.h
@@ -1 +1,43 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __ASM_X86_SHMBUF_H
3#define __ASM_X86_SHMBUF_H
4
5#if !defined(__x86_64__) || !defined(__ILP32__)
1#include <asm-generic/shmbuf.h> 6#include <asm-generic/shmbuf.h>
7#else
8/*
9 * The shmid64_ds structure for x86 architecture with x32 ABI.
10 *
11 * On x86-32 and x86-64 we can just use the generic definition, but
12 * x32 uses the same binary layout as x86_64, which is differnet
13 * from other 32-bit architectures.
14 */
15
16struct shmid64_ds {
17 struct ipc64_perm shm_perm; /* operation perms */
18 size_t shm_segsz; /* size of segment (bytes) */
19 __kernel_time_t shm_atime; /* last attach time */
20 __kernel_time_t shm_dtime; /* last detach time */
21 __kernel_time_t shm_ctime; /* last change time */
22 __kernel_pid_t shm_cpid; /* pid of creator */
23 __kernel_pid_t shm_lpid; /* pid of last operator */
24 __kernel_ulong_t shm_nattch; /* no. of current attaches */
25 __kernel_ulong_t __unused4;
26 __kernel_ulong_t __unused5;
27};
28
29struct shminfo64 {
30 __kernel_ulong_t shmmax;
31 __kernel_ulong_t shmmin;
32 __kernel_ulong_t shmmni;
33 __kernel_ulong_t shmseg;
34 __kernel_ulong_t shmall;
35 __kernel_ulong_t __unused1;
36 __kernel_ulong_t __unused2;
37 __kernel_ulong_t __unused3;
38 __kernel_ulong_t __unused4;
39};
40
41#endif
42
43#endif /* __ASM_X86_SHMBUF_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index b1b78ffe01d0..7947cee61f61 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -41,6 +41,7 @@ obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
41obj-y += tsc.o tsc_msr.o io_delay.o rtc.o 41obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
42obj-y += pci-iommu_table.o 42obj-y += pci-iommu_table.o
43obj-y += resource.o 43obj-y += resource.o
44obj-y += irqflags.o
44 45
45obj-y += process.o 46obj-y += process.o
46obj-y += fpu/ 47obj-y += fpu/
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index d6f375f1b928..89829c3d5a74 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -45,17 +45,6 @@ static int __init setup_noreplace_smp(char *str)
45} 45}
46__setup("noreplace-smp", setup_noreplace_smp); 46__setup("noreplace-smp", setup_noreplace_smp);
47 47
48#ifdef CONFIG_PARAVIRT
49static int __initdata_or_module noreplace_paravirt = 0;
50
51static int __init setup_noreplace_paravirt(char *str)
52{
53 noreplace_paravirt = 1;
54 return 1;
55}
56__setup("noreplace-paravirt", setup_noreplace_paravirt);
57#endif
58
59#define DPRINTK(fmt, args...) \ 48#define DPRINTK(fmt, args...) \
60do { \ 49do { \
61 if (debug_alternative) \ 50 if (debug_alternative) \
@@ -587,9 +576,6 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
587 struct paravirt_patch_site *p; 576 struct paravirt_patch_site *p;
588 char insnbuf[MAX_PATCH_LEN]; 577 char insnbuf[MAX_PATCH_LEN];
589 578
590 if (noreplace_paravirt)
591 return;
592
593 for (p = start; p < end; p++) { 579 for (p = start; p < end; p++) {
594 unsigned int used; 580 unsigned int used;
595 581
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a3e1f8497f8c..deddc9b93299 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1368,7 +1368,7 @@ void setup_local_APIC(void)
1368 * TODO: set up through-local-APIC from through-I/O-APIC? --macro 1368 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
1369 */ 1369 */
1370 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; 1370 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1371 if (!cpu && (pic_mode || !value)) { 1371 if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
1372 value = APIC_DM_EXTINT; 1372 value = APIC_DM_EXTINT;
1373 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu); 1373 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
1374 } else { 1374 } else {
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 2bd2292a316d..bac0805ea1d9 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -30,7 +30,7 @@ static unsigned int numachip1_get_apic_id(unsigned long x)
30 unsigned long value; 30 unsigned long value;
31 unsigned int id = (x >> 24) & 0xff; 31 unsigned int id = (x >> 24) & 0xff;
32 32
33 if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { 33 if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
34 rdmsrl(MSR_FAM10H_NODE_ID, value); 34 rdmsrl(MSR_FAM10H_NODE_ID, value);
35 id |= (value << 2) & 0xff00; 35 id |= (value << 2) & 0xff00;
36 } 36 }
@@ -178,7 +178,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
178 this_cpu_write(cpu_llc_id, node); 178 this_cpu_write(cpu_llc_id, node);
179 179
180 /* Account for nodes per socket in multi-core-module processors */ 180 /* Account for nodes per socket in multi-core-module processors */
181 if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { 181 if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
182 rdmsrl(MSR_FAM10H_NODE_ID, val); 182 rdmsrl(MSR_FAM10H_NODE_ID, val);
183 nodes = ((val >> 3) & 7) + 1; 183 nodes = ((val >> 3) & 7) + 1;
184 } 184 }
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index fc91c98bee01..fd945099fc95 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2592,8 +2592,8 @@ static struct resource * __init ioapic_setup_resources(void)
2592 res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 2592 res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
2593 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 2593 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
2594 mem += IOAPIC_RESOURCE_NAME_SIZE; 2594 mem += IOAPIC_RESOURCE_NAME_SIZE;
2595 ioapics[i].iomem_res = &res[num];
2595 num++; 2596 num++;
2596 ioapics[i].iomem_res = res;
2597 } 2597 }
2598 2598
2599 ioapic_resources = res; 2599 ioapic_resources = res;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index a41e523536a2..592e260ba05b 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -91,8 +91,12 @@ out_data:
91 return NULL; 91 return NULL;
92} 92}
93 93
94static void free_apic_chip_data(struct apic_chip_data *data) 94static void free_apic_chip_data(unsigned int virq, struct apic_chip_data *data)
95{ 95{
96#ifdef CONFIG_X86_IO_APIC
97 if (virq < nr_legacy_irqs())
98 legacy_irq_data[virq] = NULL;
99#endif
96 if (data) { 100 if (data) {
97 free_cpumask_var(data->domain); 101 free_cpumask_var(data->domain);
98 free_cpumask_var(data->old_domain); 102 free_cpumask_var(data->old_domain);
@@ -316,11 +320,7 @@ static void x86_vector_free_irqs(struct irq_domain *domain,
316 apic_data = irq_data->chip_data; 320 apic_data = irq_data->chip_data;
317 irq_domain_reset_irq_data(irq_data); 321 irq_domain_reset_irq_data(irq_data);
318 raw_spin_unlock_irqrestore(&vector_lock, flags); 322 raw_spin_unlock_irqrestore(&vector_lock, flags);
319 free_apic_chip_data(apic_data); 323 free_apic_chip_data(virq + i, apic_data);
320#ifdef CONFIG_X86_IO_APIC
321 if (virq + i < nr_legacy_irqs())
322 legacy_irq_data[virq + i] = NULL;
323#endif
324 } 324 }
325 } 325 }
326} 326}
@@ -361,7 +361,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
361 err = assign_irq_vector_policy(virq + i, node, data, info); 361 err = assign_irq_vector_policy(virq + i, node, data, info);
362 if (err) { 362 if (err) {
363 irq_data->chip_data = NULL; 363 irq_data->chip_data = NULL;
364 free_apic_chip_data(data); 364 free_apic_chip_data(virq + i, data);
365 goto error; 365 goto error;
366 } 366 }
367 } 367 }
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 8f184615053b..924b65794abd 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -62,7 +62,7 @@ ifdef CONFIG_X86_FEATURE_NAMES
62quiet_cmd_mkcapflags = MKCAP $@ 62quiet_cmd_mkcapflags = MKCAP $@
63 cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@ 63 cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@
64 64
65cpufeature = $(src)/../../include/asm/cpufeature.h 65cpufeature = $(src)/../../include/asm/cpufeatures.h
66 66
67targets += capflags.c 67targets += capflags.c
68$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE 68$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 4bf9e77f3e05..9f6151884249 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -9,6 +9,7 @@
9#include <asm/processor.h> 9#include <asm/processor.h>
10#include <asm/apic.h> 10#include <asm/apic.h>
11#include <asm/cpu.h> 11#include <asm/cpu.h>
12#include <asm/spec-ctrl.h>
12#include <asm/smp.h> 13#include <asm/smp.h>
13#include <asm/pci-direct.h> 14#include <asm/pci-direct.h>
14#include <asm/delay.h> 15#include <asm/delay.h>
@@ -304,7 +305,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
304 int cpu = smp_processor_id(); 305 int cpu = smp_processor_id();
305 306
306 /* get information required for multi-node processors */ 307 /* get information required for multi-node processors */
307 if (cpu_has_topoext) { 308 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
308 u32 eax, ebx, ecx, edx; 309 u32 eax, ebx, ecx, edx;
309 310
310 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); 311 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
@@ -519,6 +520,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
519 520
520 if (cpu_has(c, X86_FEATURE_MWAITX)) 521 if (cpu_has(c, X86_FEATURE_MWAITX))
521 use_mwaitx_delay(); 522 use_mwaitx_delay();
523
524 if (c->x86 >= 0x15 && c->x86 <= 0x17) {
525 unsigned int bit;
526
527 switch (c->x86) {
528 case 0x15: bit = 54; break;
529 case 0x16: bit = 33; break;
530 case 0x17: bit = 10; break;
531 default: return;
532 }
533 /*
534 * Try to cache the base value so further operations can
535 * avoid RMW. If that faults, do not enable SSBD.
536 */
537 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
538 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
539 setup_force_cpu_cap(X86_FEATURE_SSBD);
540 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
541 }
542 }
522} 543}
523 544
524static void early_init_amd(struct cpuinfo_x86 *c) 545static void early_init_amd(struct cpuinfo_x86 *c)
@@ -692,6 +713,17 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
692 } 713 }
693} 714}
694 715
716static void init_amd_zn(struct cpuinfo_x86 *c)
717{
718 set_cpu_cap(c, X86_FEATURE_ZEN);
719 /*
720 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
721 * all up to and including B1.
722 */
723 if (c->x86_model <= 1 && c->x86_mask <= 1)
724 set_cpu_cap(c, X86_FEATURE_CPB);
725}
726
695static void init_amd(struct cpuinfo_x86 *c) 727static void init_amd(struct cpuinfo_x86 *c)
696{ 728{
697 u32 dummy; 729 u32 dummy;
@@ -722,6 +754,7 @@ static void init_amd(struct cpuinfo_x86 *c)
722 case 0x10: init_amd_gh(c); break; 754 case 0x10: init_amd_gh(c); break;
723 case 0x12: init_amd_ln(c); break; 755 case 0x12: init_amd_ln(c); break;
724 case 0x15: init_amd_bd(c); break; 756 case 0x15: init_amd_bd(c); break;
757 case 0x17: init_amd_zn(c); break;
725 } 758 }
726 759
727 /* Enable workaround for FXSAVE leak */ 760 /* Enable workaround for FXSAVE leak */
@@ -791,8 +824,9 @@ static void init_amd(struct cpuinfo_x86 *c)
791 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) 824 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
792 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); 825 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
793 826
794 /* AMD CPUs don't reset SS attributes on SYSRET */ 827 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
795 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); 828 if (!cpu_has(c, X86_FEATURE_XENPV))
829 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
796} 830}
797 831
798#ifdef CONFIG_X86_32 832#ifdef CONFIG_X86_32
@@ -954,7 +988,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
954 988
955void set_dr_addr_mask(unsigned long mask, int dr) 989void set_dr_addr_mask(unsigned long mask, int dr)
956{ 990{
957 if (!cpu_has_bpext) 991 if (!boot_cpu_has(X86_FEATURE_BPEXT))
958 return; 992 return;
959 993
960 switch (dr) { 994 switch (dr) {
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 49d25ddf0e9f..34e4aaaf03d2 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -10,8 +10,11 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/utsname.h> 11#include <linux/utsname.h>
12#include <linux/cpu.h> 12#include <linux/cpu.h>
13#include <linux/module.h>
14#include <linux/nospec.h>
15#include <linux/prctl.h>
13 16
14#include <asm/nospec-branch.h> 17#include <asm/spec-ctrl.h>
15#include <asm/cmdline.h> 18#include <asm/cmdline.h>
16#include <asm/bugs.h> 19#include <asm/bugs.h>
17#include <asm/processor.h> 20#include <asm/processor.h>
@@ -22,8 +25,32 @@
22#include <asm/alternative.h> 25#include <asm/alternative.h>
23#include <asm/pgtable.h> 26#include <asm/pgtable.h>
24#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28#include <asm/intel-family.h>
29#include <asm/e820.h>
25 30
26static void __init spectre_v2_select_mitigation(void); 31static void __init spectre_v2_select_mitigation(void);
32static void __init ssb_select_mitigation(void);
33static void __init l1tf_select_mitigation(void);
34
35/*
36 * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
37 * writes to SPEC_CTRL contain whatever reserved bits have been set.
38 */
39u64 x86_spec_ctrl_base;
40EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
41
42/*
43 * The vendor and possibly platform specific bits which can be modified in
44 * x86_spec_ctrl_base.
45 */
46static u64 x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
47
48/*
49 * AMD specific MSR info for Speculative Store Bypass control.
50 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
51 */
52u64 x86_amd_ls_cfg_base;
53u64 x86_amd_ls_cfg_ssbd_mask;
27 54
28void __init check_bugs(void) 55void __init check_bugs(void)
29{ 56{
@@ -34,9 +61,29 @@ void __init check_bugs(void)
34 print_cpu_info(&boot_cpu_data); 61 print_cpu_info(&boot_cpu_data);
35 } 62 }
36 63
64 /*
65 * Read the SPEC_CTRL MSR to account for reserved bits which may
66 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
67 * init code as it is not enumerated and depends on the family.
68 */
69 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
70 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
71
72 /* Allow STIBP in MSR_SPEC_CTRL if supported */
73 if (boot_cpu_has(X86_FEATURE_STIBP))
74 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
75
37 /* Select the proper spectre mitigation before patching alternatives */ 76 /* Select the proper spectre mitigation before patching alternatives */
38 spectre_v2_select_mitigation(); 77 spectre_v2_select_mitigation();
39 78
79 /*
80 * Select proper mitigation for any exposure to the Speculative Store
81 * Bypass vulnerability.
82 */
83 ssb_select_mitigation();
84
85 l1tf_select_mitigation();
86
40#ifdef CONFIG_X86_32 87#ifdef CONFIG_X86_32
41 /* 88 /*
42 * Check whether we are able to run this kernel safely on SMP. 89 * Check whether we are able to run this kernel safely on SMP.
@@ -88,20 +135,109 @@ static const char *spectre_v2_strings[] = {
88}; 135};
89 136
90#undef pr_fmt 137#undef pr_fmt
91#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt 138#define pr_fmt(fmt) "Spectre V2 : " fmt
92 139
93static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; 140static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
94 141
142void
143x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
144{
145 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
146 struct thread_info *ti = current_thread_info();
147
148 /* Is MSR_SPEC_CTRL implemented ? */
149 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
150 /*
151 * Restrict guest_spec_ctrl to supported values. Clear the
152 * modifiable bits in the host base value and or the
153 * modifiable bits from the guest value.
154 */
155 guestval = hostval & ~x86_spec_ctrl_mask;
156 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
157
158 /* SSBD controlled in MSR_SPEC_CTRL */
159 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
160 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
161
162 if (hostval != guestval) {
163 msrval = setguest ? guestval : hostval;
164 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
165 }
166 }
167
168 /*
169 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
170 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
171 */
172 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
173 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
174 return;
175
176 /*
177 * If the host has SSBD mitigation enabled, force it in the host's
178 * virtual MSR value. If its not permanently enabled, evaluate
179 * current's TIF_SSBD thread flag.
180 */
181 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
182 hostval = SPEC_CTRL_SSBD;
183 else
184 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
185
186 /* Sanitize the guest value */
187 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
188
189 if (hostval != guestval) {
190 unsigned long tif;
191
192 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
193 ssbd_spec_ctrl_to_tif(hostval);
194
195 speculative_store_bypass_update(tif);
196 }
197}
198EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
199
200static void x86_amd_ssb_disable(void)
201{
202 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
203
204 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
205 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
206 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
207 wrmsrl(MSR_AMD64_LS_CFG, msrval);
208}
209
210#ifdef RETPOLINE
211static bool spectre_v2_bad_module;
212
213bool retpoline_module_ok(bool has_retpoline)
214{
215 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
216 return true;
217
218 pr_err("System may be vulnerable to spectre v2\n");
219 spectre_v2_bad_module = true;
220 return false;
221}
222
223static inline const char *spectre_v2_module_string(void)
224{
225 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
226}
227#else
228static inline const char *spectre_v2_module_string(void) { return ""; }
229#endif
230
95static void __init spec2_print_if_insecure(const char *reason) 231static void __init spec2_print_if_insecure(const char *reason)
96{ 232{
97 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 233 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
98 pr_info("%s\n", reason); 234 pr_info("%s selected on command line.\n", reason);
99} 235}
100 236
101static void __init spec2_print_if_secure(const char *reason) 237static void __init spec2_print_if_secure(const char *reason)
102{ 238{
103 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 239 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
104 pr_info("%s\n", reason); 240 pr_info("%s selected on command line.\n", reason);
105} 241}
106 242
107static inline bool retp_compiler(void) 243static inline bool retp_compiler(void)
@@ -116,42 +252,65 @@ static inline bool match_option(const char *arg, int arglen, const char *opt)
116 return len == arglen && !strncmp(arg, opt, len); 252 return len == arglen && !strncmp(arg, opt, len);
117} 253}
118 254
255static const struct {
256 const char *option;
257 enum spectre_v2_mitigation_cmd cmd;
258 bool secure;
259} mitigation_options[] = {
260 { "off", SPECTRE_V2_CMD_NONE, false },
261 { "on", SPECTRE_V2_CMD_FORCE, true },
262 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
263 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
264 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
265 { "auto", SPECTRE_V2_CMD_AUTO, false },
266};
267
119static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 268static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
120{ 269{
121 char arg[20]; 270 char arg[20];
122 int ret; 271 int ret, i;
123 272 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
124 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, 273
125 sizeof(arg)); 274 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
126 if (ret > 0) { 275 return SPECTRE_V2_CMD_NONE;
127 if (match_option(arg, ret, "off")) { 276 else {
128 goto disable; 277 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
129 } else if (match_option(arg, ret, "on")) { 278 if (ret < 0)
130 spec2_print_if_secure("force enabled on command line."); 279 return SPECTRE_V2_CMD_AUTO;
131 return SPECTRE_V2_CMD_FORCE; 280
132 } else if (match_option(arg, ret, "retpoline")) { 281 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
133 spec2_print_if_insecure("retpoline selected on command line."); 282 if (!match_option(arg, ret, mitigation_options[i].option))
134 return SPECTRE_V2_CMD_RETPOLINE; 283 continue;
135 } else if (match_option(arg, ret, "retpoline,amd")) { 284 cmd = mitigation_options[i].cmd;
136 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { 285 break;
137 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); 286 }
138 return SPECTRE_V2_CMD_AUTO; 287
139 } 288 if (i >= ARRAY_SIZE(mitigation_options)) {
140 spec2_print_if_insecure("AMD retpoline selected on command line."); 289 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
141 return SPECTRE_V2_CMD_RETPOLINE_AMD;
142 } else if (match_option(arg, ret, "retpoline,generic")) {
143 spec2_print_if_insecure("generic retpoline selected on command line.");
144 return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
145 } else if (match_option(arg, ret, "auto")) {
146 return SPECTRE_V2_CMD_AUTO; 290 return SPECTRE_V2_CMD_AUTO;
147 } 291 }
148 } 292 }
149 293
150 if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2")) 294 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
295 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
296 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
297 !IS_ENABLED(CONFIG_RETPOLINE)) {
298 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
299 return SPECTRE_V2_CMD_AUTO;
300 }
301
302 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
303 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
304 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
151 return SPECTRE_V2_CMD_AUTO; 305 return SPECTRE_V2_CMD_AUTO;
152disable: 306 }
153 spec2_print_if_insecure("disabled on command line."); 307
154 return SPECTRE_V2_CMD_NONE; 308 if (mitigation_options[i].secure)
309 spec2_print_if_secure(mitigation_options[i].option);
310 else
311 spec2_print_if_insecure(mitigation_options[i].option);
312
313 return cmd;
155} 314}
156 315
157static void __init spectre_v2_select_mitigation(void) 316static void __init spectre_v2_select_mitigation(void)
@@ -172,10 +331,10 @@ static void __init spectre_v2_select_mitigation(void)
172 return; 331 return;
173 332
174 case SPECTRE_V2_CMD_FORCE: 333 case SPECTRE_V2_CMD_FORCE:
175 /* FALLTRHU */
176 case SPECTRE_V2_CMD_AUTO: 334 case SPECTRE_V2_CMD_AUTO:
177 goto retpoline_auto; 335 if (IS_ENABLED(CONFIG_RETPOLINE))
178 336 goto retpoline_auto;
337 break;
179 case SPECTRE_V2_CMD_RETPOLINE_AMD: 338 case SPECTRE_V2_CMD_RETPOLINE_AMD:
180 if (IS_ENABLED(CONFIG_RETPOLINE)) 339 if (IS_ENABLED(CONFIG_RETPOLINE))
181 goto retpoline_amd; 340 goto retpoline_amd;
@@ -189,14 +348,14 @@ static void __init spectre_v2_select_mitigation(void)
189 goto retpoline_auto; 348 goto retpoline_auto;
190 break; 349 break;
191 } 350 }
192 pr_err("kernel not compiled with retpoline; no mitigation available!"); 351 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
193 return; 352 return;
194 353
195retpoline_auto: 354retpoline_auto:
196 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { 355 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
197 retpoline_amd: 356 retpoline_amd:
198 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 357 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
199 pr_err("LFENCE not serializing. Switching to generic retpoline\n"); 358 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
200 goto retpoline_generic; 359 goto retpoline_generic;
201 } 360 }
202 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : 361 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
@@ -212,35 +371,357 @@ retpoline_auto:
212 371
213 spectre_v2_enabled = mode; 372 spectre_v2_enabled = mode;
214 pr_info("%s\n", spectre_v2_strings[mode]); 373 pr_info("%s\n", spectre_v2_strings[mode]);
374
375 /*
376 * If spectre v2 protection has been enabled, unconditionally fill
377 * RSB during a context switch; this protects against two independent
378 * issues:
379 *
380 * - RSB underflow (and switch to BTB) on Skylake+
381 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
382 */
383 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
384 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
385
386 /* Initialize Indirect Branch Prediction Barrier if supported */
387 if (boot_cpu_has(X86_FEATURE_IBPB)) {
388 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
389 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
390 }
391
392 /*
393 * Retpoline means the kernel is safe because it has no indirect
394 * branches. But firmware isn't, so use IBRS to protect that.
395 */
396 if (boot_cpu_has(X86_FEATURE_IBRS)) {
397 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
398 pr_info("Enabling Restricted Speculation for firmware calls\n");
399 }
400}
401
402#undef pr_fmt
403#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
404
405static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
406
407/* The kernel command line selection */
408enum ssb_mitigation_cmd {
409 SPEC_STORE_BYPASS_CMD_NONE,
410 SPEC_STORE_BYPASS_CMD_AUTO,
411 SPEC_STORE_BYPASS_CMD_ON,
412 SPEC_STORE_BYPASS_CMD_PRCTL,
413 SPEC_STORE_BYPASS_CMD_SECCOMP,
414};
415
416static const char *ssb_strings[] = {
417 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
418 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
419 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
420 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
421};
422
423static const struct {
424 const char *option;
425 enum ssb_mitigation_cmd cmd;
426} ssb_mitigation_options[] = {
427 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
428 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
429 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
430 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
431 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
432};
433
434static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
435{
436 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
437 char arg[20];
438 int ret, i;
439
440 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
441 return SPEC_STORE_BYPASS_CMD_NONE;
442 } else {
443 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
444 arg, sizeof(arg));
445 if (ret < 0)
446 return SPEC_STORE_BYPASS_CMD_AUTO;
447
448 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
449 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
450 continue;
451
452 cmd = ssb_mitigation_options[i].cmd;
453 break;
454 }
455
456 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
457 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
458 return SPEC_STORE_BYPASS_CMD_AUTO;
459 }
460 }
461
462 return cmd;
463}
464
465static enum ssb_mitigation __init __ssb_select_mitigation(void)
466{
467 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
468 enum ssb_mitigation_cmd cmd;
469
470 if (!boot_cpu_has(X86_FEATURE_SSBD))
471 return mode;
472
473 cmd = ssb_parse_cmdline();
474 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
475 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
476 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
477 return mode;
478
479 switch (cmd) {
480 case SPEC_STORE_BYPASS_CMD_AUTO:
481 case SPEC_STORE_BYPASS_CMD_SECCOMP:
482 /*
483 * Choose prctl+seccomp as the default mode if seccomp is
484 * enabled.
485 */
486 if (IS_ENABLED(CONFIG_SECCOMP))
487 mode = SPEC_STORE_BYPASS_SECCOMP;
488 else
489 mode = SPEC_STORE_BYPASS_PRCTL;
490 break;
491 case SPEC_STORE_BYPASS_CMD_ON:
492 mode = SPEC_STORE_BYPASS_DISABLE;
493 break;
494 case SPEC_STORE_BYPASS_CMD_PRCTL:
495 mode = SPEC_STORE_BYPASS_PRCTL;
496 break;
497 case SPEC_STORE_BYPASS_CMD_NONE:
498 break;
499 }
500
501 /*
502 * We have three CPU feature flags that are in play here:
503 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
504 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
505 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
506 */
507 if (mode == SPEC_STORE_BYPASS_DISABLE) {
508 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
509 /*
510 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
511 * a completely different MSR and bit dependent on family.
512 */
513 switch (boot_cpu_data.x86_vendor) {
514 case X86_VENDOR_INTEL:
515 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
516 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
517 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
518 break;
519 case X86_VENDOR_AMD:
520 x86_amd_ssb_disable();
521 break;
522 }
523 }
524
525 return mode;
526}
527
528static void ssb_select_mitigation(void)
529{
530 ssb_mode = __ssb_select_mitigation();
531
532 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
533 pr_info("%s\n", ssb_strings[ssb_mode]);
215} 534}
216 535
217#undef pr_fmt 536#undef pr_fmt
537#define pr_fmt(fmt) "Speculation prctl: " fmt
538
539static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
540{
541 bool update;
542
543 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
544 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
545 return -ENXIO;
546
547 switch (ctrl) {
548 case PR_SPEC_ENABLE:
549 /* If speculation is force disabled, enable is not allowed */
550 if (task_spec_ssb_force_disable(task))
551 return -EPERM;
552 task_clear_spec_ssb_disable(task);
553 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
554 break;
555 case PR_SPEC_DISABLE:
556 task_set_spec_ssb_disable(task);
557 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
558 break;
559 case PR_SPEC_FORCE_DISABLE:
560 task_set_spec_ssb_disable(task);
561 task_set_spec_ssb_force_disable(task);
562 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
563 break;
564 default:
565 return -ERANGE;
566 }
567
568 /*
569 * If being set on non-current task, delay setting the CPU
570 * mitigation until it is next scheduled.
571 */
572 if (task == current && update)
573 speculative_store_bypass_update_current();
574
575 return 0;
576}
577
578int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
579 unsigned long ctrl)
580{
581 switch (which) {
582 case PR_SPEC_STORE_BYPASS:
583 return ssb_prctl_set(task, ctrl);
584 default:
585 return -ENODEV;
586 }
587}
588
589#ifdef CONFIG_SECCOMP
590void arch_seccomp_spec_mitigate(struct task_struct *task)
591{
592 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
593 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
594}
595#endif
596
597static int ssb_prctl_get(struct task_struct *task)
598{
599 switch (ssb_mode) {
600 case SPEC_STORE_BYPASS_DISABLE:
601 return PR_SPEC_DISABLE;
602 case SPEC_STORE_BYPASS_SECCOMP:
603 case SPEC_STORE_BYPASS_PRCTL:
604 if (task_spec_ssb_force_disable(task))
605 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
606 if (task_spec_ssb_disable(task))
607 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
608 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
609 default:
610 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
611 return PR_SPEC_ENABLE;
612 return PR_SPEC_NOT_AFFECTED;
613 }
614}
615
616int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
617{
618 switch (which) {
619 case PR_SPEC_STORE_BYPASS:
620 return ssb_prctl_get(task);
621 default:
622 return -ENODEV;
623 }
624}
625
626void x86_spec_ctrl_setup_ap(void)
627{
628 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
629 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
630
631 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
632 x86_amd_ssb_disable();
633}
634
635#undef pr_fmt
636#define pr_fmt(fmt) "L1TF: " fmt
637static void __init l1tf_select_mitigation(void)
638{
639 u64 half_pa;
640
641 if (!boot_cpu_has_bug(X86_BUG_L1TF))
642 return;
643
644#if CONFIG_PGTABLE_LEVELS == 2
645 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
646 return;
647#endif
648
649 /*
650 * This is extremely unlikely to happen because almost all
651 * systems have far more MAX_PA/2 than RAM can be fit into
652 * DIMM slots.
653 */
654 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
655 if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
656 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
657 return;
658 }
659
660 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
661}
662#undef pr_fmt
218 663
219#ifdef CONFIG_SYSFS 664#ifdef CONFIG_SYSFS
220ssize_t cpu_show_meltdown(struct device *dev, 665
221 struct device_attribute *attr, char *buf) 666static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
667 char *buf, unsigned int bug)
222{ 668{
223 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) 669 if (!boot_cpu_has_bug(bug))
224 return sprintf(buf, "Not affected\n"); 670 return sprintf(buf, "Not affected\n");
225 if (boot_cpu_has(X86_FEATURE_KAISER)) 671
226 return sprintf(buf, "Mitigation: PTI\n"); 672 switch (bug) {
673 case X86_BUG_CPU_MELTDOWN:
674 if (boot_cpu_has(X86_FEATURE_KAISER))
675 return sprintf(buf, "Mitigation: PTI\n");
676
677 break;
678
679 case X86_BUG_SPECTRE_V1:
680 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
681
682 case X86_BUG_SPECTRE_V2:
683 return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
684 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
685 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
686 spectre_v2_module_string());
687
688 case X86_BUG_SPEC_STORE_BYPASS:
689 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
690
691 case X86_BUG_L1TF:
692 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
693 return sprintf(buf, "Mitigation: Page Table Inversion\n");
694 break;
695
696 default:
697 break;
698 }
699
227 return sprintf(buf, "Vulnerable\n"); 700 return sprintf(buf, "Vulnerable\n");
228} 701}
229 702
230ssize_t cpu_show_spectre_v1(struct device *dev, 703ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
231 struct device_attribute *attr, char *buf)
232{ 704{
233 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) 705 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
234 return sprintf(buf, "Not affected\n");
235 return sprintf(buf, "Vulnerable\n");
236} 706}
237 707
238ssize_t cpu_show_spectre_v2(struct device *dev, 708ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
239 struct device_attribute *attr, char *buf)
240{ 709{
241 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 710 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
242 return sprintf(buf, "Not affected\n"); 711}
243 712
244 return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]); 713ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
714{
715 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
716}
717
718ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
719{
720 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
721}
722
723ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
724{
725 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
245} 726}
246#endif 727#endif
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index d8fba5c15fbd..6608c03c2126 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -1,7 +1,7 @@
1#include <linux/bitops.h> 1#include <linux/bitops.h>
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3 3
4#include <asm/processor.h> 4#include <asm/cpufeature.h>
5#include <asm/e820.h> 5#include <asm/e820.h>
6#include <asm/mtrr.h> 6#include <asm/mtrr.h>
7#include <asm/msr.h> 7#include <asm/msr.h>
@@ -43,7 +43,7 @@ static void init_c3(struct cpuinfo_x86 *c)
43 /* store Centaur Extended Feature Flags as 43 /* store Centaur Extended Feature Flags as
44 * word 5 of the CPU capability bit array 44 * word 5 of the CPU capability bit array
45 */ 45 */
46 c->x86_capability[5] = cpuid_edx(0xC0000001); 46 c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
47 } 47 }
48#ifdef CONFIG_X86_32 48#ifdef CONFIG_X86_32
49 /* Cyrix III family needs CX8 & PGE explicitly enabled. */ 49 /* Cyrix III family needs CX8 & PGE explicitly enabled. */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index f7f2ad3687ee..4d3fa79c0f09 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -43,6 +43,8 @@
43#include <asm/pat.h> 43#include <asm/pat.h>
44#include <asm/microcode.h> 44#include <asm/microcode.h>
45#include <asm/microcode_intel.h> 45#include <asm/microcode_intel.h>
46#include <asm/intel-family.h>
47#include <asm/cpu_device_id.h>
46 48
47#ifdef CONFIG_X86_LOCAL_APIC 49#ifdef CONFIG_X86_LOCAL_APIC
48#include <asm/uv/uv.h> 50#include <asm/uv/uv.h>
@@ -674,52 +676,86 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
674 } 676 }
675} 677}
676 678
679static void init_speculation_control(struct cpuinfo_x86 *c)
680{
681 /*
682 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
683 * and they also have a different bit for STIBP support. Also,
684 * a hypervisor might have set the individual AMD bits even on
685 * Intel CPUs, for finer-grained selection of what's available.
686 */
687 if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
688 set_cpu_cap(c, X86_FEATURE_IBRS);
689 set_cpu_cap(c, X86_FEATURE_IBPB);
690 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
691 }
692
693 if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
694 set_cpu_cap(c, X86_FEATURE_STIBP);
695
696 if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
697 set_cpu_cap(c, X86_FEATURE_SSBD);
698
699 if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
700 set_cpu_cap(c, X86_FEATURE_IBRS);
701 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
702 }
703
704 if (cpu_has(c, X86_FEATURE_AMD_IBPB))
705 set_cpu_cap(c, X86_FEATURE_IBPB);
706
707 if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
708 set_cpu_cap(c, X86_FEATURE_STIBP);
709 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
710 }
711}
712
677void get_cpu_cap(struct cpuinfo_x86 *c) 713void get_cpu_cap(struct cpuinfo_x86 *c)
678{ 714{
679 u32 tfms, xlvl; 715 u32 eax, ebx, ecx, edx;
680 u32 ebx;
681 716
682 /* Intel-defined flags: level 0x00000001 */ 717 /* Intel-defined flags: level 0x00000001 */
683 if (c->cpuid_level >= 0x00000001) { 718 if (c->cpuid_level >= 0x00000001) {
684 u32 capability, excap; 719 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
685 720
686 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 721 c->x86_capability[CPUID_1_ECX] = ecx;
687 c->x86_capability[0] = capability; 722 c->x86_capability[CPUID_1_EDX] = edx;
688 c->x86_capability[4] = excap;
689 } 723 }
690 724
725 /* Thermal and Power Management Leaf: level 0x00000006 (eax) */
726 if (c->cpuid_level >= 0x00000006)
727 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
728
691 /* Additional Intel-defined flags: level 0x00000007 */ 729 /* Additional Intel-defined flags: level 0x00000007 */
692 if (c->cpuid_level >= 0x00000007) { 730 if (c->cpuid_level >= 0x00000007) {
693 u32 eax, ebx, ecx, edx;
694
695 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 731 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
696 732 c->x86_capability[CPUID_7_0_EBX] = ebx;
697 c->x86_capability[9] = ebx; 733 c->x86_capability[CPUID_7_ECX] = ecx;
734 c->x86_capability[CPUID_7_EDX] = edx;
698 } 735 }
699 736
700 /* Extended state features: level 0x0000000d */ 737 /* Extended state features: level 0x0000000d */
701 if (c->cpuid_level >= 0x0000000d) { 738 if (c->cpuid_level >= 0x0000000d) {
702 u32 eax, ebx, ecx, edx;
703
704 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); 739 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
705 740
706 c->x86_capability[10] = eax; 741 c->x86_capability[CPUID_D_1_EAX] = eax;
707 } 742 }
708 743
709 /* Additional Intel-defined flags: level 0x0000000F */ 744 /* Additional Intel-defined flags: level 0x0000000F */
710 if (c->cpuid_level >= 0x0000000F) { 745 if (c->cpuid_level >= 0x0000000F) {
711 u32 eax, ebx, ecx, edx;
712 746
713 /* QoS sub-leaf, EAX=0Fh, ECX=0 */ 747 /* QoS sub-leaf, EAX=0Fh, ECX=0 */
714 cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); 748 cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
715 c->x86_capability[11] = edx; 749 c->x86_capability[CPUID_F_0_EDX] = edx;
750
716 if (cpu_has(c, X86_FEATURE_CQM_LLC)) { 751 if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
717 /* will be overridden if occupancy monitoring exists */ 752 /* will be overridden if occupancy monitoring exists */
718 c->x86_cache_max_rmid = ebx; 753 c->x86_cache_max_rmid = ebx;
719 754
720 /* QoS sub-leaf, EAX=0Fh, ECX=1 */ 755 /* QoS sub-leaf, EAX=0Fh, ECX=1 */
721 cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); 756 cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
722 c->x86_capability[12] = edx; 757 c->x86_capability[CPUID_F_1_EDX] = edx;
758
723 if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) { 759 if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) {
724 c->x86_cache_max_rmid = ecx; 760 c->x86_cache_max_rmid = ecx;
725 c->x86_cache_occ_scale = ebx; 761 c->x86_cache_occ_scale = ebx;
@@ -731,32 +767,49 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
731 } 767 }
732 768
733 /* AMD-defined flags: level 0x80000001 */ 769 /* AMD-defined flags: level 0x80000001 */
734 xlvl = cpuid_eax(0x80000000); 770 eax = cpuid_eax(0x80000000);
735 c->extended_cpuid_level = xlvl; 771 c->extended_cpuid_level = eax;
736 772
737 if ((xlvl & 0xffff0000) == 0x80000000) { 773 if ((eax & 0xffff0000) == 0x80000000) {
738 if (xlvl >= 0x80000001) { 774 if (eax >= 0x80000001) {
739 c->x86_capability[1] = cpuid_edx(0x80000001); 775 cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
740 c->x86_capability[6] = cpuid_ecx(0x80000001); 776
777 c->x86_capability[CPUID_8000_0001_ECX] = ecx;
778 c->x86_capability[CPUID_8000_0001_EDX] = edx;
741 } 779 }
742 } 780 }
743 781
782 if (c->extended_cpuid_level >= 0x80000007) {
783 cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
784
785 c->x86_capability[CPUID_8000_0007_EBX] = ebx;
786 c->x86_power = edx;
787 }
788
744 if (c->extended_cpuid_level >= 0x80000008) { 789 if (c->extended_cpuid_level >= 0x80000008) {
745 u32 eax = cpuid_eax(0x80000008); 790 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
746 791
747 c->x86_virt_bits = (eax >> 8) & 0xff; 792 c->x86_virt_bits = (eax >> 8) & 0xff;
748 c->x86_phys_bits = eax & 0xff; 793 c->x86_phys_bits = eax & 0xff;
749 c->x86_capability[13] = cpuid_ebx(0x80000008); 794 c->x86_capability[CPUID_8000_0008_EBX] = ebx;
750 } 795 }
751#ifdef CONFIG_X86_32 796#ifdef CONFIG_X86_32
752 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 797 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
753 c->x86_phys_bits = 36; 798 c->x86_phys_bits = 36;
754#endif 799#endif
755 800
756 if (c->extended_cpuid_level >= 0x80000007) 801 if (c->extended_cpuid_level >= 0x8000000a)
757 c->x86_power = cpuid_edx(0x80000007); 802 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
758 803
759 init_scattered_cpuid_features(c); 804 init_scattered_cpuid_features(c);
805 init_speculation_control(c);
806
807 /*
808 * Clear/Set all flags overridden by options, after probe.
809 * This needs to happen each time we re-probe, which may happen
810 * several times during CPU initialization.
811 */
812 apply_forced_caps(c);
760} 813}
761 814
762static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 815static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
@@ -785,6 +838,95 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
785#endif 838#endif
786} 839}
787 840
841static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
842 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
843 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
844 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
845 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
846 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
847 { X86_VENDOR_CENTAUR, 5 },
848 { X86_VENDOR_INTEL, 5 },
849 { X86_VENDOR_NSC, 5 },
850 { X86_VENDOR_ANY, 4 },
851 {}
852};
853
854static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
855 { X86_VENDOR_AMD },
856 {}
857};
858
859static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
860 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
861 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
862 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
863 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
864 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
865 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
866 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
867 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
868 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
869 { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
870 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
871 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
872 { X86_VENDOR_CENTAUR, 5, },
873 { X86_VENDOR_INTEL, 5, },
874 { X86_VENDOR_NSC, 5, },
875 { X86_VENDOR_AMD, 0x12, },
876 { X86_VENDOR_AMD, 0x11, },
877 { X86_VENDOR_AMD, 0x10, },
878 { X86_VENDOR_AMD, 0xf, },
879 { X86_VENDOR_ANY, 4, },
880 {}
881};
882
883static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
884 /* in addition to cpu_no_speculation */
885 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
886 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
887 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
888 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
889 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
890 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
891 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
892 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
893 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
894 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
895 {}
896};
897
898static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
899{
900 u64 ia32_cap = 0;
901
902 if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
903 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
904
905 if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
906 !(ia32_cap & ARCH_CAP_SSB_NO))
907 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
908
909 if (x86_match_cpu(cpu_no_speculation))
910 return;
911
912 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
913 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
914
915 if (x86_match_cpu(cpu_no_meltdown))
916 return;
917
918 /* Rogue Data Cache Load? No! */
919 if (ia32_cap & ARCH_CAP_RDCL_NO)
920 return;
921
922 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
923
924 if (x86_match_cpu(cpu_no_l1tf))
925 return;
926
927 setup_force_cpu_bug(X86_BUG_L1TF);
928}
929
788/* 930/*
789 * Do minimum CPU detection early. 931 * Do minimum CPU detection early.
790 * Fields really needed: vendor, cpuid_level, family, model, mask, 932 * Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -831,11 +973,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
831 973
832 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 974 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
833 975
834 if (c->x86_vendor != X86_VENDOR_AMD) 976 cpu_set_bug_bits(c);
835 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
836
837 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
838 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
839 977
840 fpu__init_system(c); 978 fpu__init_system(c);
841 979
@@ -955,7 +1093,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
955 int i; 1093 int i;
956 1094
957 c->loops_per_jiffy = loops_per_jiffy; 1095 c->loops_per_jiffy = loops_per_jiffy;
958 c->x86_cache_size = -1; 1096 c->x86_cache_size = 0;
959 c->x86_vendor = X86_VENDOR_UNKNOWN; 1097 c->x86_vendor = X86_VENDOR_UNKNOWN;
960 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 1098 c->x86_model = c->x86_mask = 0; /* So far unknown... */
961 c->x86_vendor_id[0] = '\0'; /* Unset */ 1099 c->x86_vendor_id[0] = '\0'; /* Unset */
@@ -1124,6 +1262,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
1124 enable_sep_cpu(); 1262 enable_sep_cpu();
1125#endif 1263#endif
1126 mtrr_ap_init(); 1264 mtrr_ap_init();
1265 x86_spec_ctrl_setup_ap();
1127} 1266}
1128 1267
1129struct msr_range { 1268struct msr_range {
@@ -1539,7 +1678,9 @@ void cpu_init(void)
1539 1678
1540 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1679 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1541 1680
1542 if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de) 1681 if (cpu_feature_enabled(X86_FEATURE_VME) ||
1682 cpu_has_tsc ||
1683 boot_cpu_has(X86_FEATURE_DE))
1543 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1684 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1544 1685
1545 load_current_idt(); 1686 load_current_idt();
@@ -1572,20 +1713,6 @@ void cpu_init(void)
1572} 1713}
1573#endif 1714#endif
1574 1715
1575#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
1576void warn_pre_alternatives(void)
1577{
1578 WARN(1, "You're using static_cpu_has before alternatives have run!\n");
1579}
1580EXPORT_SYMBOL_GPL(warn_pre_alternatives);
1581#endif
1582
1583inline bool __static_cpu_has_safe(u16 bit)
1584{
1585 return boot_cpu_has(bit);
1586}
1587EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
1588
1589static void bsp_resume(void) 1716static void bsp_resume(void)
1590{ 1717{
1591 if (this_cpu->c_bsp_resume) 1718 if (this_cpu->c_bsp_resume)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 2584265d4745..3b19d82f7932 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -46,4 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
46 46
47extern void get_cpu_cap(struct cpuinfo_x86 *c); 47extern void get_cpu_cap(struct cpuinfo_x86 *c);
48extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); 48extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
49
50extern void x86_spec_ctrl_setup_ap(void);
51
49#endif /* ARCH_X86_CPU_H */ 52#endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index aaf152e79637..15e47c1cd412 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -8,6 +8,7 @@
8#include <linux/timer.h> 8#include <linux/timer.h>
9#include <asm/pci-direct.h> 9#include <asm/pci-direct.h>
10#include <asm/tsc.h> 10#include <asm/tsc.h>
11#include <asm/cpufeature.h>
11 12
12#include "cpu.h" 13#include "cpu.h"
13 14
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 209ac1e7d1f0..4dce22d3cb06 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -8,11 +8,12 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/uaccess.h> 9#include <linux/uaccess.h>
10 10
11#include <asm/processor.h> 11#include <asm/cpufeature.h>
12#include <asm/pgtable.h> 12#include <asm/pgtable.h>
13#include <asm/msr.h> 13#include <asm/msr.h>
14#include <asm/bugs.h> 14#include <asm/bugs.h>
15#include <asm/cpu.h> 15#include <asm/cpu.h>
16#include <asm/intel-family.h>
16 17
17#ifdef CONFIG_X86_64 18#ifdef CONFIG_X86_64
18#include <linux/topology.h> 19#include <linux/topology.h>
@@ -25,6 +26,62 @@
25#include <asm/apic.h> 26#include <asm/apic.h>
26#endif 27#endif
27 28
29/*
30 * Early microcode releases for the Spectre v2 mitigation were broken.
31 * Information taken from;
32 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
33 * - https://kb.vmware.com/s/article/52345
34 * - Microcode revisions observed in the wild
35 * - Release note from 20180108 microcode release
36 */
37struct sku_microcode {
38 u8 model;
39 u8 stepping;
40 u32 microcode;
41};
42static const struct sku_microcode spectre_bad_microcodes[] = {
43 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
44 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
45 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
46 { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
47 { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
48 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
49 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
50 { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
51 { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
52 { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
53 { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
54 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
55 { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
56 { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
57 { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
58 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
59 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
60 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
61 /* Observed in the wild */
62 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
63 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
64};
65
66static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
67{
68 int i;
69
70 /*
71 * We know that the hypervisor lie to us on the microcode version so
72 * we may as well hope that it is running the correct version.
73 */
74 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
75 return false;
76
77 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
78 if (c->x86_model == spectre_bad_microcodes[i].model &&
79 c->x86_mask == spectre_bad_microcodes[i].stepping)
80 return (c->microcode <= spectre_bad_microcodes[i].microcode);
81 }
82 return false;
83}
84
28static void early_init_intel(struct cpuinfo_x86 *c) 85static void early_init_intel(struct cpuinfo_x86 *c)
29{ 86{
30 u64 misc_enable; 87 u64 misc_enable;
@@ -51,6 +108,22 @@ static void early_init_intel(struct cpuinfo_x86 *c)
51 rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode); 108 rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
52 } 109 }
53 110
111 /* Now if any of them are set, check the blacklist and clear the lot */
112 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
113 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
114 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
115 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
116 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
117 setup_clear_cpu_cap(X86_FEATURE_IBRS);
118 setup_clear_cpu_cap(X86_FEATURE_IBPB);
119 setup_clear_cpu_cap(X86_FEATURE_STIBP);
120 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
121 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
122 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
123 setup_clear_cpu_cap(X86_FEATURE_SSBD);
124 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
125 }
126
54 /* 127 /*
55 * Atom erratum AAE44/AAF40/AAG38/AAH41: 128 * Atom erratum AAE44/AAF40/AAG38/AAH41:
56 * 129 *
@@ -445,7 +518,8 @@ static void init_intel(struct cpuinfo_x86 *c)
445 518
446 if (cpu_has_xmm2) 519 if (cpu_has_xmm2)
447 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 520 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
448 if (cpu_has_ds) { 521
522 if (boot_cpu_has(X86_FEATURE_DS)) {
449 unsigned int l1; 523 unsigned int l1;
450 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 524 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
451 if (!(l1 & (1<<11))) 525 if (!(l1 & (1<<11)))
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index e38d338a6447..3557b3ceab14 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -14,7 +14,7 @@
14#include <linux/sysfs.h> 14#include <linux/sysfs.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16 16
17#include <asm/processor.h> 17#include <asm/cpufeature.h>
18#include <asm/amd_nb.h> 18#include <asm/amd_nb.h>
19#include <asm/smp.h> 19#include <asm/smp.h>
20 20
@@ -591,7 +591,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
591 unsigned edx; 591 unsigned edx;
592 592
593 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { 593 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
594 if (cpu_has_topoext) 594 if (boot_cpu_has(X86_FEATURE_TOPOEXT))
595 cpuid_count(0x8000001d, index, &eax.full, 595 cpuid_count(0x8000001d, index, &eax.full,
596 &ebx.full, &ecx.full, &edx); 596 &ebx.full, &ecx.full, &edx);
597 else 597 else
@@ -637,7 +637,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
637void init_amd_cacheinfo(struct cpuinfo_x86 *c) 637void init_amd_cacheinfo(struct cpuinfo_x86 *c)
638{ 638{
639 639
640 if (cpu_has_topoext) { 640 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
641 num_cache_leaves = find_num_cache_leaves(c); 641 num_cache_leaves = find_num_cache_leaves(c);
642 } else if (c->extended_cpuid_level >= 0x80000006) { 642 } else if (c->extended_cpuid_level >= 0x80000006) {
643 if (cpuid_edx(0x80000006) & 0xf000) 643 if (cpuid_edx(0x80000006) & 0xf000)
@@ -809,7 +809,7 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
809 struct cacheinfo *this_leaf; 809 struct cacheinfo *this_leaf;
810 int i, sibling; 810 int i, sibling;
811 811
812 if (cpu_has_topoext) { 812 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
813 unsigned int apicid, nshared, first, last; 813 unsigned int apicid, nshared, first, last;
814 814
815 this_leaf = this_cpu_ci->info_list + index; 815 this_leaf = this_cpu_ci->info_list + index;
@@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
934 ci_leaf_init(this_leaf++, &id4_regs); 934 ci_leaf_init(this_leaf++, &id4_regs);
935 __cache_cpumap_setup(cpu, idx, &id4_regs); 935 __cache_cpumap_setup(cpu, idx, &id4_regs);
936 } 936 }
937 this_cpu_ci->cpu_map_populated = true;
938
937 return 0; 939 return 0;
938} 940}
939 941
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
index afa9f0d487ea..fbb5e90557a5 100644
--- a/arch/x86/kernel/cpu/match.c
+++ b/arch/x86/kernel/cpu/match.c
@@ -1,5 +1,5 @@
1#include <asm/cpu_device_id.h> 1#include <asm/cpu_device_id.h>
2#include <asm/processor.h> 2#include <asm/cpufeature.h>
3#include <linux/cpu.h> 3#include <linux/cpu.h>
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/slab.h> 5#include <linux/slab.h>
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 4cfba4371a71..101bfae369e1 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -152,7 +152,6 @@ static void raise_mce(struct mce *m)
152 if (context == MCJ_CTX_RANDOM) 152 if (context == MCJ_CTX_RANDOM)
153 return; 153 return;
154 154
155#ifdef CONFIG_X86_LOCAL_APIC
156 if (m->inject_flags & (MCJ_IRQ_BROADCAST | MCJ_NMI_BROADCAST)) { 155 if (m->inject_flags & (MCJ_IRQ_BROADCAST | MCJ_NMI_BROADCAST)) {
157 unsigned long start; 156 unsigned long start;
158 int cpu; 157 int cpu;
@@ -193,9 +192,7 @@ static void raise_mce(struct mce *m)
193 raise_local(); 192 raise_local();
194 put_cpu(); 193 put_cpu();
195 put_online_cpus(); 194 put_online_cpus();
196 } else 195 } else {
197#endif
198 {
199 preempt_disable(); 196 preempt_disable();
200 raise_local(); 197 raise_local();
201 preempt_enable(); 198 preempt_enable();
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 364fbad72e60..7b8c8c838191 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -60,6 +60,9 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
60 smp_load_acquire(&(p)); \ 60 smp_load_acquire(&(p)); \
61}) 61})
62 62
63/* sysfs synchronization */
64static DEFINE_MUTEX(mce_sysfs_mutex);
65
63#define CREATE_TRACE_POINTS 66#define CREATE_TRACE_POINTS
64#include <trace/events/mce.h> 67#include <trace/events/mce.h>
65 68
@@ -977,11 +980,12 @@ void do_machine_check(struct pt_regs *regs, long error_code)
977 int i; 980 int i;
978 int worst = 0; 981 int worst = 0;
979 int severity; 982 int severity;
983
980 /* 984 /*
981 * Establish sequential order between the CPUs entering the machine 985 * Establish sequential order between the CPUs entering the machine
982 * check handler. 986 * check handler.
983 */ 987 */
984 int order; 988 int order = -1;
985 /* 989 /*
986 * If no_way_out gets set, there is no safe way to recover from this 990 * If no_way_out gets set, there is no safe way to recover from this
987 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. 991 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
@@ -997,7 +1001,12 @@ void do_machine_check(struct pt_regs *regs, long error_code)
997 char *msg = "Unknown"; 1001 char *msg = "Unknown";
998 u64 recover_paddr = ~0ull; 1002 u64 recover_paddr = ~0ull;
999 int flags = MF_ACTION_REQUIRED; 1003 int flags = MF_ACTION_REQUIRED;
1000 int lmce = 0; 1004
1005 /*
1006 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1007 * on Intel.
1008 */
1009 int lmce = 1;
1001 1010
1002 /* If this CPU is offline, just bail out. */ 1011 /* If this CPU is offline, just bail out. */
1003 if (cpu_is_offline(smp_processor_id())) { 1012 if (cpu_is_offline(smp_processor_id())) {
@@ -1036,17 +1045,23 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1036 kill_it = 1; 1045 kill_it = 1;
1037 1046
1038 /* 1047 /*
1039 * Check if this MCE is signaled to only this logical processor 1048 * Check if this MCE is signaled to only this logical processor,
1049 * on Intel only.
1040 */ 1050 */
1041 if (m.mcgstatus & MCG_STATUS_LMCES) 1051 if (m.cpuvendor == X86_VENDOR_INTEL)
1042 lmce = 1; 1052 lmce = m.mcgstatus & MCG_STATUS_LMCES;
1043 else { 1053
1044 /* 1054 /*
1045 * Go through all the banks in exclusion of the other CPUs. 1055 * Local machine check may already know that we have to panic.
1046 * This way we don't report duplicated events on shared banks 1056 * Broadcast machine check begins rendezvous in mce_start()
1047 * because the first one to see it will clear it. 1057 * Go through all banks in exclusion of the other CPUs. This way we
1048 * If this is a Local MCE, then no need to perform rendezvous. 1058 * don't report duplicated events on shared banks because the first one
1049 */ 1059 * to see it will clear it.
1060 */
1061 if (lmce) {
1062 if (no_way_out)
1063 mce_panic("Fatal local machine check", &m, msg);
1064 } else {
1050 order = mce_start(&no_way_out); 1065 order = mce_start(&no_way_out);
1051 } 1066 }
1052 1067
@@ -1125,12 +1140,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1125 no_way_out = worst >= MCE_PANIC_SEVERITY; 1140 no_way_out = worst >= MCE_PANIC_SEVERITY;
1126 } else { 1141 } else {
1127 /* 1142 /*
1128 * Local MCE skipped calling mce_reign() 1143 * If there was a fatal machine check we should have
1129 * If we found a fatal error, we need to panic here. 1144 * already called mce_panic earlier in this function.
1145 * Since we re-read the banks, we might have found
1146 * something new. Check again to see if we found a
1147 * fatal error. We call "mce_severity()" again to
1148 * make sure we have the right "msg".
1130 */ 1149 */
1131 if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) 1150 if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
1132 mce_panic("Machine check from unknown source", 1151 mce_severity(&m, cfg->tolerant, &msg, true);
1133 NULL, NULL); 1152 mce_panic("Local fatal machine check!", &m, msg);
1153 }
1134 } 1154 }
1135 1155
1136 /* 1156 /*
@@ -2220,6 +2240,7 @@ static ssize_t set_ignore_ce(struct device *s,
2220 if (kstrtou64(buf, 0, &new) < 0) 2240 if (kstrtou64(buf, 0, &new) < 0)
2221 return -EINVAL; 2241 return -EINVAL;
2222 2242
2243 mutex_lock(&mce_sysfs_mutex);
2223 if (mca_cfg.ignore_ce ^ !!new) { 2244 if (mca_cfg.ignore_ce ^ !!new) {
2224 if (new) { 2245 if (new) {
2225 /* disable ce features */ 2246 /* disable ce features */
@@ -2232,6 +2253,8 @@ static ssize_t set_ignore_ce(struct device *s,
2232 on_each_cpu(mce_enable_ce, (void *)1, 1); 2253 on_each_cpu(mce_enable_ce, (void *)1, 1);
2233 } 2254 }
2234 } 2255 }
2256 mutex_unlock(&mce_sysfs_mutex);
2257
2235 return size; 2258 return size;
2236} 2259}
2237 2260
@@ -2244,6 +2267,7 @@ static ssize_t set_cmci_disabled(struct device *s,
2244 if (kstrtou64(buf, 0, &new) < 0) 2267 if (kstrtou64(buf, 0, &new) < 0)
2245 return -EINVAL; 2268 return -EINVAL;
2246 2269
2270 mutex_lock(&mce_sysfs_mutex);
2247 if (mca_cfg.cmci_disabled ^ !!new) { 2271 if (mca_cfg.cmci_disabled ^ !!new) {
2248 if (new) { 2272 if (new) {
2249 /* disable cmci */ 2273 /* disable cmci */
@@ -2255,6 +2279,8 @@ static ssize_t set_cmci_disabled(struct device *s,
2255 on_each_cpu(mce_enable_ce, NULL, 1); 2279 on_each_cpu(mce_enable_ce, NULL, 1);
2256 } 2280 }
2257 } 2281 }
2282 mutex_unlock(&mce_sysfs_mutex);
2283
2258 return size; 2284 return size;
2259} 2285}
2260 2286
@@ -2262,8 +2288,16 @@ static ssize_t store_int_with_restart(struct device *s,
2262 struct device_attribute *attr, 2288 struct device_attribute *attr,
2263 const char *buf, size_t size) 2289 const char *buf, size_t size)
2264{ 2290{
2265 ssize_t ret = device_store_int(s, attr, buf, size); 2291 unsigned long old_check_interval = check_interval;
2292 ssize_t ret = device_store_ulong(s, attr, buf, size);
2293
2294 if (check_interval == old_check_interval)
2295 return ret;
2296
2297 mutex_lock(&mce_sysfs_mutex);
2266 mce_restart(); 2298 mce_restart();
2299 mutex_unlock(&mce_sysfs_mutex);
2300
2267 return ret; 2301 return ret;
2268} 2302}
2269 2303
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 2a0f44d225fe..6da6f9cd6d2d 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -131,6 +131,9 @@ static size_t compute_container_size(u8 *data, u32 total_size)
131 return size; 131 return size;
132} 132}
133 133
134static enum ucode_state
135load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
136
134/* 137/*
135 * Early load occurs before we can vmalloc(). So we look for the microcode 138 * Early load occurs before we can vmalloc(). So we look for the microcode
136 * patch container file in initrd, traverse equivalent cpu table, look for a 139 * patch container file in initrd, traverse equivalent cpu table, look for a
@@ -438,7 +441,7 @@ int __init save_microcode_in_initrd_amd(void)
438 eax = cpuid_eax(0x00000001); 441 eax = cpuid_eax(0x00000001);
439 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); 442 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
440 443
441 ret = load_microcode_amd(smp_processor_id(), eax, container, container_size); 444 ret = load_microcode_amd(true, eax, container, container_size);
442 if (ret != UCODE_OK) 445 if (ret != UCODE_OK)
443 retval = -EINVAL; 446 retval = -EINVAL;
444 447
@@ -854,7 +857,8 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
854 return UCODE_OK; 857 return UCODE_OK;
855} 858}
856 859
857enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size) 860static enum ucode_state
861load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
858{ 862{
859 enum ucode_state ret; 863 enum ucode_state ret;
860 864
@@ -868,8 +872,8 @@ enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t s
868 872
869#ifdef CONFIG_X86_32 873#ifdef CONFIG_X86_32
870 /* save BSP's matching patch for early load */ 874 /* save BSP's matching patch for early load */
871 if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { 875 if (save) {
872 struct ucode_patch *p = find_patch(cpu); 876 struct ucode_patch *p = find_patch(0);
873 if (p) { 877 if (p) {
874 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); 878 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
875 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), 879 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
@@ -901,11 +905,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
901{ 905{
902 char fw_name[36] = "amd-ucode/microcode_amd.bin"; 906 char fw_name[36] = "amd-ucode/microcode_amd.bin";
903 struct cpuinfo_x86 *c = &cpu_data(cpu); 907 struct cpuinfo_x86 *c = &cpu_data(cpu);
908 bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
904 enum ucode_state ret = UCODE_NFOUND; 909 enum ucode_state ret = UCODE_NFOUND;
905 const struct firmware *fw; 910 const struct firmware *fw;
906 911
907 /* reload ucode container only on the boot cpu */ 912 /* reload ucode container only on the boot cpu */
908 if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index) 913 if (!refresh_fw || !bsp)
909 return UCODE_OK; 914 return UCODE_OK;
910 915
911 if (c->x86 >= 0x15) 916 if (c->x86 >= 0x15)
@@ -922,7 +927,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
922 goto fw_release; 927 goto fw_release;
923 } 928 }
924 929
925 ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size); 930 ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
926 931
927 fw_release: 932 fw_release:
928 release_firmware(fw); 933 release_firmware(fw);
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index b3e94ef461fd..ce5f8a2e7ae6 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -44,7 +44,7 @@
44 44
45static struct microcode_ops *microcode_ops; 45static struct microcode_ops *microcode_ops;
46 46
47static bool dis_ucode_ldr; 47static bool dis_ucode_ldr = true;
48 48
49static int __init disable_loader(char *str) 49static int __init disable_loader(char *str)
50{ 50{
@@ -81,6 +81,7 @@ struct cpu_info_ctx {
81 81
82static bool __init check_loader_disabled_bsp(void) 82static bool __init check_loader_disabled_bsp(void)
83{ 83{
84 u32 a, b, c, d;
84#ifdef CONFIG_X86_32 85#ifdef CONFIG_X86_32
85 const char *cmdline = (const char *)__pa_nodebug(boot_command_line); 86 const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
86 const char *opt = "dis_ucode_ldr"; 87 const char *opt = "dis_ucode_ldr";
@@ -93,8 +94,20 @@ static bool __init check_loader_disabled_bsp(void)
93 bool *res = &dis_ucode_ldr; 94 bool *res = &dis_ucode_ldr;
94#endif 95#endif
95 96
96 if (cmdline_find_option_bool(cmdline, option)) 97 a = 1;
97 *res = true; 98 c = 0;
99 native_cpuid(&a, &b, &c, &d);
100
101 /*
102 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
103 * completely accurate as xen pv guests don't see that CPUID bit set but
104 * that's good enough as they don't land on the BSP path anyway.
105 */
106 if (c & BIT(31))
107 return *res;
108
109 if (cmdline_find_option_bool(cmdline, option) <= 0)
110 *res = false;
98 111
99 return *res; 112 return *res;
100} 113}
@@ -122,9 +135,7 @@ void __init load_ucode_bsp(void)
122{ 135{
123 int vendor; 136 int vendor;
124 unsigned int family; 137 unsigned int family;
125 138 bool intel = true;
126 if (check_loader_disabled_bsp())
127 return;
128 139
129 if (!have_cpuid_p()) 140 if (!have_cpuid_p())
130 return; 141 return;
@@ -134,16 +145,27 @@ void __init load_ucode_bsp(void)
134 145
135 switch (vendor) { 146 switch (vendor) {
136 case X86_VENDOR_INTEL: 147 case X86_VENDOR_INTEL:
137 if (family >= 6) 148 if (family < 6)
138 load_ucode_intel_bsp(); 149 return;
139 break; 150 break;
151
140 case X86_VENDOR_AMD: 152 case X86_VENDOR_AMD:
141 if (family >= 0x10) 153 if (family < 0x10)
142 load_ucode_amd_bsp(family); 154 return;
155 intel = false;
143 break; 156 break;
157
144 default: 158 default:
145 break; 159 return;
146 } 160 }
161
162 if (check_loader_disabled_bsp())
163 return;
164
165 if (intel)
166 load_ucode_intel_bsp();
167 else
168 load_ucode_amd_bsp(family);
147} 169}
148 170
149static bool check_loader_disabled_ap(void) 171static bool check_loader_disabled_ap(void)
@@ -162,9 +184,6 @@ void load_ucode_ap(void)
162 if (check_loader_disabled_ap()) 184 if (check_loader_disabled_ap())
163 return; 185 return;
164 186
165 if (!have_cpuid_p())
166 return;
167
168 vendor = x86_vendor(); 187 vendor = x86_vendor();
169 family = x86_family(); 188 family = x86_family();
170 189
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index b428a8174be1..2f38a99cdb98 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -39,6 +39,9 @@
39#include <asm/setup.h> 39#include <asm/setup.h>
40#include <asm/msr.h> 40#include <asm/msr.h>
41 41
42/* last level cache size per core */
43static int llc_size_per_core;
44
42static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT]; 45static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
43static struct mc_saved_data { 46static struct mc_saved_data {
44 unsigned int mc_saved_count; 47 unsigned int mc_saved_count;
@@ -996,15 +999,18 @@ static bool is_blacklisted(unsigned int cpu)
996 999
997 /* 1000 /*
998 * Late loading on model 79 with microcode revision less than 0x0b000021 1001 * Late loading on model 79 with microcode revision less than 0x0b000021
999 * may result in a system hang. This behavior is documented in item 1002 * and LLC size per core bigger than 2.5MB may result in a system hang.
1000 * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family). 1003 * This behavior is documented in item BDF90, #334165 (Intel Xeon
1004 * Processor E7-8800/4800 v4 Product Family).
1001 */ 1005 */
1002 if (c->x86 == 6 && 1006 if (c->x86 == 6 &&
1003 c->x86_model == 79 && 1007 c->x86_model == 79 &&
1004 c->x86_mask == 0x01 && 1008 c->x86_mask == 0x01 &&
1009 llc_size_per_core > 2621440 &&
1005 c->microcode < 0x0b000021) { 1010 c->microcode < 0x0b000021) {
1006 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); 1011 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
1007 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 1012 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
1013 return true;
1008 } 1014 }
1009 1015
1010 return false; 1016 return false;
@@ -1067,6 +1073,15 @@ static struct microcode_ops microcode_intel_ops = {
1067 .microcode_fini_cpu = microcode_fini_cpu, 1073 .microcode_fini_cpu = microcode_fini_cpu,
1068}; 1074};
1069 1075
1076static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
1077{
1078 u64 llc_size = c->x86_cache_size * 1024ULL;
1079
1080 do_div(llc_size, c->x86_max_cores);
1081
1082 return (int)llc_size;
1083}
1084
1070struct microcode_ops * __init init_intel_microcode(void) 1085struct microcode_ops * __init init_intel_microcode(void)
1071{ 1086{
1072 struct cpuinfo_x86 *c = &boot_cpu_data; 1087 struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -1077,6 +1092,8 @@ struct microcode_ops * __init init_intel_microcode(void)
1077 return NULL; 1092 return NULL;
1078 } 1093 }
1079 1094
1095 llc_size_per_core = calc_llc_size_per_core(c);
1096
1080 return &microcode_intel_ops; 1097 return &microcode_intel_ops;
1081} 1098}
1082 1099
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
index 3f20710a5b23..6988c74409a8 100644
--- a/arch/x86/kernel/cpu/mkcapflags.sh
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -1,6 +1,6 @@
1#!/bin/sh 1#!/bin/sh
2# 2#
3# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeature.h 3# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
4# 4#
5 5
6IN=$1 6IN=$1
@@ -49,8 +49,8 @@ dump_array()
49trap 'rm "$OUT"' EXIT 49trap 'rm "$OUT"' EXIT
50 50
51( 51(
52 echo "#ifndef _ASM_X86_CPUFEATURE_H" 52 echo "#ifndef _ASM_X86_CPUFEATURES_H"
53 echo "#include <asm/cpufeature.h>" 53 echo "#include <asm/cpufeatures.h>"
54 echo "#endif" 54 echo "#endif"
55 echo "" 55 echo ""
56 56
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index b5624fafa44a..136ae86f4f5f 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -349,7 +349,7 @@ static void get_fixed_ranges(mtrr_type *frs)
349 349
350void mtrr_save_fixed_ranges(void *info) 350void mtrr_save_fixed_ranges(void *info)
351{ 351{
352 if (cpu_has_mtrr) 352 if (boot_cpu_has(X86_FEATURE_MTRR))
353 get_fixed_ranges(mtrr_state.fixed_ranges); 353 get_fixed_ranges(mtrr_state.fixed_ranges);
354} 354}
355 355
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index fa77ac8291f0..49bd700d9b7f 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -47,7 +47,7 @@
47#include <linux/smp.h> 47#include <linux/smp.h>
48#include <linux/syscore_ops.h> 48#include <linux/syscore_ops.h>
49 49
50#include <asm/processor.h> 50#include <asm/cpufeature.h>
51#include <asm/e820.h> 51#include <asm/e820.h>
52#include <asm/mtrr.h> 52#include <asm/mtrr.h>
53#include <asm/msr.h> 53#include <asm/msr.h>
@@ -682,7 +682,7 @@ void __init mtrr_bp_init(void)
682 682
683 phys_addr = 32; 683 phys_addr = 32;
684 684
685 if (cpu_has_mtrr) { 685 if (boot_cpu_has(X86_FEATURE_MTRR)) {
686 mtrr_if = &generic_mtrr_ops; 686 mtrr_if = &generic_mtrr_ops;
687 size_or_mask = SIZE_OR_MASK_BITS(36); 687 size_or_mask = SIZE_OR_MASK_BITS(36);
688 size_and_mask = 0x00f00000; 688 size_and_mask = 0x00f00000;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 5b2f2306fbcc..fbf2edc3eb35 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -25,6 +25,7 @@
25#include <linux/cpu.h> 25#include <linux/cpu.h>
26#include <linux/bitops.h> 26#include <linux/bitops.h>
27#include <linux/device.h> 27#include <linux/device.h>
28#include <linux/nospec.h>
28 29
29#include <asm/apic.h> 30#include <asm/apic.h>
30#include <asm/stacktrace.h> 31#include <asm/stacktrace.h>
@@ -188,8 +189,8 @@ static void release_pmc_hardware(void) {}
188 189
189static bool check_hw_exists(void) 190static bool check_hw_exists(void)
190{ 191{
191 u64 val, val_fail, val_new= ~0; 192 u64 val, val_fail = -1, val_new= ~0;
192 int i, reg, reg_fail, ret = 0; 193 int i, reg, reg_fail = -1, ret = 0;
193 int bios_fail = 0; 194 int bios_fail = 0;
194 int reg_safe = -1; 195 int reg_safe = -1;
195 196
@@ -297,17 +298,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
297 298
298 config = attr->config; 299 config = attr->config;
299 300
300 cache_type = (config >> 0) & 0xff; 301 cache_type = (config >> 0) & 0xff;
301 if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 302 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
302 return -EINVAL; 303 return -EINVAL;
304 cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
303 305
304 cache_op = (config >> 8) & 0xff; 306 cache_op = (config >> 8) & 0xff;
305 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) 307 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
306 return -EINVAL; 308 return -EINVAL;
309 cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
307 310
308 cache_result = (config >> 16) & 0xff; 311 cache_result = (config >> 16) & 0xff;
309 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 312 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
310 return -EINVAL; 313 return -EINVAL;
314 cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
311 315
312 val = hw_cache_event_ids[cache_type][cache_op][cache_result]; 316 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
313 317
@@ -404,6 +408,8 @@ int x86_setup_perfctr(struct perf_event *event)
404 if (attr->config >= x86_pmu.max_events) 408 if (attr->config >= x86_pmu.max_events)
405 return -EINVAL; 409 return -EINVAL;
406 410
411 attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
412
407 /* 413 /*
408 * The generic map: 414 * The generic map:
409 */ 415 */
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 1cee5d2d7ece..3ea177cb7366 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -160,7 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
160 if (offset) 160 if (offset)
161 return offset; 161 return offset;
162 162
163 if (!cpu_has_perfctr_core) 163 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
164 offset = index; 164 offset = index;
165 else 165 else
166 offset = index << 1; 166 offset = index << 1;
@@ -652,7 +652,7 @@ static __initconst const struct x86_pmu amd_pmu = {
652 652
653static int __init amd_core_pmu_init(void) 653static int __init amd_core_pmu_init(void)
654{ 654{
655 if (!cpu_has_perfctr_core) 655 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
656 return 0; 656 return 0;
657 657
658 switch (boot_cpu_data.x86) { 658 switch (boot_cpu_data.x86) {
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index cc6cedb8f25d..49742746a6c9 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -523,10 +523,10 @@ static int __init amd_uncore_init(void)
523 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 523 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
524 goto fail_nodev; 524 goto fail_nodev;
525 525
526 if (!cpu_has_topoext) 526 if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
527 goto fail_nodev; 527 goto fail_nodev;
528 528
529 if (cpu_has_perfctr_nb) { 529 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
530 amd_uncore_nb = alloc_percpu(struct amd_uncore *); 530 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
531 if (!amd_uncore_nb) { 531 if (!amd_uncore_nb) {
532 ret = -ENOMEM; 532 ret = -ENOMEM;
@@ -540,7 +540,7 @@ static int __init amd_uncore_init(void)
540 ret = 0; 540 ret = 0;
541 } 541 }
542 542
543 if (cpu_has_perfctr_l2) { 543 if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
544 amd_uncore_l2 = alloc_percpu(struct amd_uncore *); 544 amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
545 if (!amd_uncore_l2) { 545 if (!amd_uncore_l2) {
546 ret = -ENOMEM; 546 ret = -ENOMEM;
@@ -583,10 +583,11 @@ fail_online:
583 583
584 /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */ 584 /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
585 amd_uncore_nb = amd_uncore_l2 = NULL; 585 amd_uncore_nb = amd_uncore_l2 = NULL;
586 if (cpu_has_perfctr_l2) 586
587 if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
587 perf_pmu_unregister(&amd_l2_pmu); 588 perf_pmu_unregister(&amd_l2_pmu);
588fail_l2: 589fail_l2:
589 if (cpu_has_perfctr_nb) 590 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
590 perf_pmu_unregister(&amd_nb_pmu); 591 perf_pmu_unregister(&amd_nb_pmu);
591 if (amd_uncore_l2) 592 if (amd_uncore_l2)
592 free_percpu(amd_uncore_l2); 593 free_percpu(amd_uncore_l2);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 5cc2242d77c6..7b79c80ce029 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2716,7 +2716,7 @@ static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
2716 X86_CONFIG(.event=0xc0, .umask=0x01)) { 2716 X86_CONFIG(.event=0xc0, .umask=0x01)) {
2717 if (left < 128) 2717 if (left < 128)
2718 left = 128; 2718 left = 128;
2719 left &= ~0x3fu; 2719 left &= ~0x3fULL;
2720 } 2720 }
2721 return left; 2721 return left;
2722} 2722}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
index 2cad71d1b14c..5af11c46d0b9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_bts.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
@@ -22,6 +22,7 @@
22#include <linux/debugfs.h> 22#include <linux/debugfs.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/coredump.h> 24#include <linux/coredump.h>
25#include <linux/kaiser.h>
25 26
26#include <asm-generic/sizes.h> 27#include <asm-generic/sizes.h>
27#include <asm/perf_event.h> 28#include <asm/perf_event.h>
@@ -67,6 +68,23 @@ static size_t buf_size(struct page *page)
67 return 1 << (PAGE_SHIFT + page_private(page)); 68 return 1 << (PAGE_SHIFT + page_private(page));
68} 69}
69 70
71static void bts_buffer_free_aux(void *data)
72{
73#ifdef CONFIG_PAGE_TABLE_ISOLATION
74 struct bts_buffer *buf = data;
75 int nbuf;
76
77 for (nbuf = 0; nbuf < buf->nr_bufs; nbuf++) {
78 struct page *page = buf->buf[nbuf].page;
79 void *kaddr = page_address(page);
80 size_t page_size = buf_size(page);
81
82 kaiser_remove_mapping((unsigned long)kaddr, page_size);
83 }
84#endif
85 kfree(data);
86}
87
70static void * 88static void *
71bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) 89bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
72{ 90{
@@ -103,29 +121,33 @@ bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
103 buf->real_size = size - size % BTS_RECORD_SIZE; 121 buf->real_size = size - size % BTS_RECORD_SIZE;
104 122
105 for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) { 123 for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) {
106 unsigned int __nr_pages; 124 void *kaddr = pages[pg];
125 size_t page_size;
126
127 page = virt_to_page(kaddr);
128 page_size = buf_size(page);
129
130 if (kaiser_add_mapping((unsigned long)kaddr,
131 page_size, __PAGE_KERNEL) < 0) {
132 buf->nr_bufs = nbuf;
133 bts_buffer_free_aux(buf);
134 return NULL;
135 }
107 136
108 page = virt_to_page(pages[pg]);
109 __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1;
110 buf->buf[nbuf].page = page; 137 buf->buf[nbuf].page = page;
111 buf->buf[nbuf].offset = offset; 138 buf->buf[nbuf].offset = offset;
112 buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); 139 buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
113 buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement; 140 buf->buf[nbuf].size = page_size - buf->buf[nbuf].displacement;
114 pad = buf->buf[nbuf].size % BTS_RECORD_SIZE; 141 pad = buf->buf[nbuf].size % BTS_RECORD_SIZE;
115 buf->buf[nbuf].size -= pad; 142 buf->buf[nbuf].size -= pad;
116 143
117 pg += __nr_pages; 144 pg += page_size >> PAGE_SHIFT;
118 offset += __nr_pages << PAGE_SHIFT; 145 offset += page_size;
119 } 146 }
120 147
121 return buf; 148 return buf;
122} 149}
123 150
124static void bts_buffer_free_aux(void *data)
125{
126 kfree(data);
127}
128
129static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx) 151static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx)
130{ 152{
131 return buf->buf[idx].offset + buf->buf[idx].displacement; 153 return buf->buf[idx].offset + buf->buf[idx].displacement;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cstate.c b/arch/x86/kernel/cpu/perf_event_intel_cstate.c
index 75a38b5a2e26..5b8c90935270 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cstate.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cstate.c
@@ -88,6 +88,7 @@
88#include <linux/module.h> 88#include <linux/module.h>
89#include <linux/slab.h> 89#include <linux/slab.h>
90#include <linux/perf_event.h> 90#include <linux/perf_event.h>
91#include <linux/nospec.h>
91#include <asm/cpu_device_id.h> 92#include <asm/cpu_device_id.h>
92#include "perf_event.h" 93#include "perf_event.h"
93 94
@@ -409,6 +410,7 @@ static int cstate_pmu_event_init(struct perf_event *event)
409 } else if (event->pmu == &cstate_pkg_pmu) { 410 } else if (event->pmu == &cstate_pkg_pmu) {
410 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX) 411 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
411 return -EINVAL; 412 return -EINVAL;
413 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
412 if (!pkg_msr[cfg].attr) 414 if (!pkg_msr[cfg].attr)
413 return -EINVAL; 415 return -EINVAL;
414 event->hw.event_base = pkg_msr[cfg].msr; 416 event->hw.event_base = pkg_msr[cfg].msr;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 61215a69b03d..b22e9c4dd111 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -229,7 +229,7 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e
229 u64 prev_count, new_count, delta; 229 u64 prev_count, new_count, delta;
230 int shift; 230 int shift;
231 231
232 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) 232 if (event->hw.idx == UNCORE_PMC_IDX_FIXED)
233 shift = 64 - uncore_fixed_ctr_bits(box); 233 shift = 64 - uncore_fixed_ctr_bits(box);
234 else 234 else
235 shift = 64 - uncore_perf_ctr_bits(box); 235 shift = 64 - uncore_perf_ctr_bits(box);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
index 2749965afed0..83cadc2605a7 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
@@ -240,7 +240,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
240{ 240{
241 struct hw_perf_event *hwc = &event->hw; 241 struct hw_perf_event *hwc = &event->hw;
242 242
243 if (hwc->idx >= UNCORE_PMC_IDX_FIXED) 243 if (hwc->idx == UNCORE_PMC_IDX_FIXED)
244 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); 244 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
245 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) 245 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
246 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); 246 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c
index ec863b9a9f78..067427384a63 100644
--- a/arch/x86/kernel/cpu/perf_event_msr.c
+++ b/arch/x86/kernel/cpu/perf_event_msr.c
@@ -1,4 +1,5 @@
1#include <linux/perf_event.h> 1#include <linux/perf_event.h>
2#include <linux/nospec.h>
2 3
3enum perf_msr_id { 4enum perf_msr_id {
4 PERF_MSR_TSC = 0, 5 PERF_MSR_TSC = 0,
@@ -115,9 +116,6 @@ static int msr_event_init(struct perf_event *event)
115 if (event->attr.type != event->pmu->type) 116 if (event->attr.type != event->pmu->type)
116 return -ENOENT; 117 return -ENOENT;
117 118
118 if (cfg >= PERF_MSR_EVENT_MAX)
119 return -EINVAL;
120
121 /* unsupported modes and filters */ 119 /* unsupported modes and filters */
122 if (event->attr.exclude_user || 120 if (event->attr.exclude_user ||
123 event->attr.exclude_kernel || 121 event->attr.exclude_kernel ||
@@ -128,6 +126,11 @@ static int msr_event_init(struct perf_event *event)
128 event->attr.sample_period) /* no sampling */ 126 event->attr.sample_period) /* no sampling */
129 return -EINVAL; 127 return -EINVAL;
130 128
129 if (cfg >= PERF_MSR_EVENT_MAX)
130 return -EINVAL;
131
132 cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
133
131 if (!msr[cfg].attr) 134 if (!msr[cfg].attr)
132 return -EINVAL; 135 return -EINVAL;
133 136
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 18ca99f2798b..935225c0375f 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -87,8 +87,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
87 } 87 }
88 88
89 /* Cache size */ 89 /* Cache size */
90 if (c->x86_cache_size >= 0) 90 if (c->x86_cache_size)
91 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); 91 seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size);
92 92
93 show_cpuinfo_core(m, c, cpu); 93 show_cpuinfo_core(m, c, cpu);
94 show_cpuinfo_misc(m, c); 94 show_cpuinfo_misc(m, c);
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 608fb26c7254..8cb57df9398d 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,32 +31,12 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
31 const struct cpuid_bit *cb; 31 const struct cpuid_bit *cb;
32 32
33 static const struct cpuid_bit cpuid_bits[] = { 33 static const struct cpuid_bit cpuid_bits[] = {
34 { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
38 { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
39 { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
40 { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 },
41 { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
42 { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
43 { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
44 { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 }, 34 { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
45 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, 35 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
46 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, 36 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
47 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, 37 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
48 { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, 38 { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
49 { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 }, 39 { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 },
50 { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 },
51 { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
52 { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
53 { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
54 { X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 },
55 { X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 },
56 { X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 },
57 { X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 },
58 { X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 },
59 { X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 },
60 { 0, 0, 0, 0, 0 } 40 { 0, 0, 0, 0, 0 }
61 }; 41 };
62 42
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 3fa0e5ad86b4..a19a663282b5 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -1,6 +1,6 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/mm.h> 2#include <linux/mm.h>
3#include <asm/processor.h> 3#include <asm/cpufeature.h>
4#include <asm/msr.h> 4#include <asm/msr.h>
5#include "cpu.h" 5#include "cpu.h"
6 6
@@ -12,7 +12,7 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
12 xlvl = cpuid_eax(0x80860000); 12 xlvl = cpuid_eax(0x80860000);
13 if ((xlvl & 0xffff0000) == 0x80860000) { 13 if ((xlvl & 0xffff0000) == 0x80860000) {
14 if (xlvl >= 0x80860001) 14 if (xlvl >= 0x80860001)
15 c->x86_capability[2] = cpuid_edx(0x80860001); 15 c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
16 } 16 }
17} 17}
18 18
@@ -82,7 +82,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
82 /* Unhide possibly hidden capability flags */ 82 /* Unhide possibly hidden capability flags */
83 rdmsr(0x80860004, cap_mask, uk); 83 rdmsr(0x80860004, cap_mask, uk);
84 wrmsr(0x80860004, ~0, uk); 84 wrmsr(0x80860004, ~0, uk);
85 c->x86_capability[0] = cpuid_edx(0x00000001); 85 c->x86_capability[CPUID_1_EDX] = cpuid_edx(0x00000001);
86 wrmsr(0x80860004, cap_mask, uk); 86 wrmsr(0x80860004, cap_mask, uk);
87 87
88 /* All Transmeta CPUs have a constant TSC */ 88 /* All Transmeta CPUs have a constant TSC */
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 1f4acd68b98b..74b8dcd1bbdc 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -11,6 +11,7 @@
11#include <linux/of_address.h> 11#include <linux/of_address.h>
12#include <linux/of_platform.h> 12#include <linux/of_platform.h>
13#include <linux/of_irq.h> 13#include <linux/of_irq.h>
14#include <linux/libfdt.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/pci.h> 16#include <linux/pci.h>
16#include <linux/of_pci.h> 17#include <linux/of_pci.h>
@@ -199,19 +200,22 @@ static struct of_ioapic_type of_ioapic_type[] =
199static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, 200static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
200 unsigned int nr_irqs, void *arg) 201 unsigned int nr_irqs, void *arg)
201{ 202{
202 struct of_phandle_args *irq_data = (void *)arg; 203 struct irq_fwspec *fwspec = (struct irq_fwspec *)arg;
203 struct of_ioapic_type *it; 204 struct of_ioapic_type *it;
204 struct irq_alloc_info tmp; 205 struct irq_alloc_info tmp;
206 int type_index;
205 207
206 if (WARN_ON(irq_data->args_count < 2)) 208 if (WARN_ON(fwspec->param_count < 2))
207 return -EINVAL; 209 return -EINVAL;
208 if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type)) 210
211 type_index = fwspec->param[1];
212 if (type_index >= ARRAY_SIZE(of_ioapic_type))
209 return -EINVAL; 213 return -EINVAL;
210 214
211 it = &of_ioapic_type[irq_data->args[1]]; 215 it = &of_ioapic_type[type_index];
212 ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity); 216 ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
213 tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain)); 217 tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
214 tmp.ioapic_pin = irq_data->args[0]; 218 tmp.ioapic_pin = fwspec->param[0];
215 219
216 return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp); 220 return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp);
217} 221}
@@ -276,14 +280,15 @@ static void __init x86_flattree_get_config(void)
276 280
277 map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128); 281 map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128);
278 282
279 initial_boot_params = dt = early_memremap(initial_dtb, map_len); 283 dt = early_memremap(initial_dtb, map_len);
280 size = of_get_flat_dt_size(); 284 size = fdt_totalsize(dt);
281 if (map_len < size) { 285 if (map_len < size) {
282 early_memunmap(dt, map_len); 286 early_memunmap(dt, map_len);
283 initial_boot_params = dt = early_memremap(initial_dtb, size); 287 dt = early_memremap(initial_dtb, size);
284 map_len = size; 288 map_len = size;
285 } 289 }
286 290
291 early_init_dt_verify(dt);
287 unflatten_and_copy_device_tree(); 292 unflatten_and_copy_device_tree();
288 early_memunmap(dt, map_len); 293 early_memunmap(dt, map_len);
289} 294}
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 52a2526c3fbe..19bc19d5e174 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -24,6 +24,7 @@
24#include <asm/e820.h> 24#include <asm/e820.h>
25#include <asm/proto.h> 25#include <asm/proto.h>
26#include <asm/setup.h> 26#include <asm/setup.h>
27#include <asm/cpufeature.h>
27 28
28/* 29/*
29 * The e820 map is the map that gets modified e.g. with command line parameters 30 * The e820 map is the map that gets modified e.g. with command line parameters
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index d25097c3fc1d..6aa0b519c851 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -114,6 +114,10 @@ void __kernel_fpu_begin(void)
114 kernel_fpu_disable(); 114 kernel_fpu_disable();
115 115
116 if (fpu->fpregs_active) { 116 if (fpu->fpregs_active) {
117 /*
118 * Ignore return value -- we don't care if reg state
119 * is clobbered.
120 */
117 copy_fpregs_to_fpstate(fpu); 121 copy_fpregs_to_fpstate(fpu);
118 } else { 122 } else {
119 this_cpu_write(fpu_fpregs_owner_ctx, NULL); 123 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
@@ -189,8 +193,12 @@ void fpu__save(struct fpu *fpu)
189 193
190 preempt_disable(); 194 preempt_disable();
191 if (fpu->fpregs_active) { 195 if (fpu->fpregs_active) {
192 if (!copy_fpregs_to_fpstate(fpu)) 196 if (!copy_fpregs_to_fpstate(fpu)) {
193 fpregs_deactivate(fpu); 197 if (use_eager_fpu())
198 copy_kernel_to_fpregs(&fpu->state);
199 else
200 fpregs_deactivate(fpu);
201 }
194 } 202 }
195 preempt_enable(); 203 preempt_enable();
196} 204}
@@ -259,7 +267,11 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
259 preempt_disable(); 267 preempt_disable();
260 if (!copy_fpregs_to_fpstate(dst_fpu)) { 268 if (!copy_fpregs_to_fpstate(dst_fpu)) {
261 memcpy(&src_fpu->state, &dst_fpu->state, xstate_size); 269 memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
262 fpregs_deactivate(src_fpu); 270
271 if (use_eager_fpu())
272 copy_kernel_to_fpregs(&src_fpu->state);
273 else
274 fpregs_deactivate(src_fpu);
263 } 275 }
264 preempt_enable(); 276 preempt_enable();
265} 277}
@@ -409,8 +421,10 @@ static inline void copy_init_fpstate_to_fpregs(void)
409{ 421{
410 if (use_xsave()) 422 if (use_xsave())
411 copy_kernel_to_xregs(&init_fpstate.xsave, -1); 423 copy_kernel_to_xregs(&init_fpstate.xsave, -1);
412 else 424 else if (static_cpu_has(X86_FEATURE_FXSR))
413 copy_kernel_to_fxregs(&init_fpstate.fxsave); 425 copy_kernel_to_fxregs(&init_fpstate.fxsave);
426 else
427 copy_kernel_to_fregs(&init_fpstate.fsave);
414} 428}
415 429
416/* 430/*
@@ -423,7 +437,7 @@ void fpu__clear(struct fpu *fpu)
423{ 437{
424 WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */ 438 WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
425 439
426 if (!use_eager_fpu()) { 440 if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
427 /* FPU state will be reallocated lazily at the first use. */ 441 /* FPU state will be reallocated lazily at the first use. */
428 fpu__drop(fpu); 442 fpu__drop(fpu);
429 } else { 443 } else {
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 1011c05b1bd5..954517285fa2 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -3,8 +3,11 @@
3 */ 3 */
4#include <asm/fpu/internal.h> 4#include <asm/fpu/internal.h>
5#include <asm/tlbflush.h> 5#include <asm/tlbflush.h>
6#include <asm/setup.h>
7#include <asm/cmdline.h>
6 8
7#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/init.h>
8 11
9/* 12/*
10 * Initialize the TS bit in CR0 according to the style of context-switches 13 * Initialize the TS bit in CR0 according to the style of context-switches
@@ -12,10 +15,7 @@
12 */ 15 */
13static void fpu__init_cpu_ctx_switch(void) 16static void fpu__init_cpu_ctx_switch(void)
14{ 17{
15 if (!cpu_has_eager_fpu) 18 clts();
16 stts();
17 else
18 clts();
19} 19}
20 20
21/* 21/*
@@ -75,13 +75,15 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
75 cr0 &= ~(X86_CR0_TS | X86_CR0_EM); 75 cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
76 write_cr0(cr0); 76 write_cr0(cr0);
77 77
78 asm volatile("fninit ; fnstsw %0 ; fnstcw %1" 78 if (!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
79 : "+m" (fsw), "+m" (fcw)); 79 asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
80 : "+m" (fsw), "+m" (fcw));
80 81
81 if (fsw == 0 && (fcw & 0x103f) == 0x003f) 82 if (fsw == 0 && (fcw & 0x103f) == 0x003f)
82 set_cpu_cap(c, X86_FEATURE_FPU); 83 set_cpu_cap(c, X86_FEATURE_FPU);
83 else 84 else
84 clear_cpu_cap(c, X86_FEATURE_FPU); 85 clear_cpu_cap(c, X86_FEATURE_FPU);
86 }
85 87
86#ifndef CONFIG_MATH_EMULATION 88#ifndef CONFIG_MATH_EMULATION
87 if (!cpu_has_fpu) { 89 if (!cpu_has_fpu) {
@@ -130,7 +132,7 @@ static void __init fpu__init_system_generic(void)
130 * Set up the legacy init FPU context. (xstate init might overwrite this 132 * Set up the legacy init FPU context. (xstate init might overwrite this
131 * with a more modern format, if the CPU supports it.) 133 * with a more modern format, if the CPU supports it.)
132 */ 134 */
133 fpstate_init_fxstate(&init_fpstate.fxsave); 135 fpstate_init(&init_fpstate);
134 136
135 fpu__init_system_mxcsr(); 137 fpu__init_system_mxcsr();
136} 138}
@@ -230,53 +232,16 @@ static void __init fpu__init_system_xstate_size_legacy(void)
230} 232}
231 233
232/* 234/*
233 * FPU context switching strategies: 235 * Find supported xfeatures based on cpu features and command-line input.
234 * 236 * This must be called after fpu__init_parse_early_param() is called and
235 * Against popular belief, we don't do lazy FPU saves, due to the 237 * xfeatures_mask is enumerated.
236 * task migration complications it brings on SMP - we only do
237 * lazy FPU restores.
238 *
239 * 'lazy' is the traditional strategy, which is based on setting
240 * CR0::TS to 1 during context-switch (instead of doing a full
241 * restore of the FPU state), which causes the first FPU instruction
242 * after the context switch (whenever it is executed) to fault - at
243 * which point we lazily restore the FPU state into FPU registers.
244 *
245 * Tasks are of course under no obligation to execute FPU instructions,
246 * so it can easily happen that another context-switch occurs without
247 * a single FPU instruction being executed. If we eventually switch
248 * back to the original task (that still owns the FPU) then we have
249 * not only saved the restores along the way, but we also have the
250 * FPU ready to be used for the original task.
251 *
252 * 'eager' switching is used on modern CPUs, there we switch the FPU
253 * state during every context switch, regardless of whether the task
254 * has used FPU instructions in that time slice or not. This is done
255 * because modern FPU context saving instructions are able to optimize
256 * state saving and restoration in hardware: they can detect both
257 * unused and untouched FPU state and optimize accordingly.
258 *
259 * [ Note that even in 'lazy' mode we might optimize context switches
260 * to use 'eager' restores, if we detect that a task is using the FPU
261 * frequently. See the fpu->counter logic in fpu/internal.h for that. ]
262 */ 238 */
263static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO; 239u64 __init fpu__get_supported_xfeatures_mask(void)
264
265static int __init eager_fpu_setup(char *s)
266{ 240{
267 if (!strcmp(s, "on")) 241 return XCNTXT_MASK;
268 eagerfpu = ENABLE;
269 else if (!strcmp(s, "off"))
270 eagerfpu = DISABLE;
271 else if (!strcmp(s, "auto"))
272 eagerfpu = AUTO;
273 return 1;
274} 242}
275__setup("eagerfpu=", eager_fpu_setup);
276 243
277/* 244/* Legacy code to initialize eager fpu mode. */
278 * Pick the FPU context switching strategy:
279 */
280static void __init fpu__init_system_ctx_switch(void) 245static void __init fpu__init_system_ctx_switch(void)
281{ 246{
282 static bool on_boot_cpu = 1; 247 static bool on_boot_cpu = 1;
@@ -286,25 +251,31 @@ static void __init fpu__init_system_ctx_switch(void)
286 251
287 WARN_ON_FPU(current->thread.fpu.fpstate_active); 252 WARN_ON_FPU(current->thread.fpu.fpstate_active);
288 current_thread_info()->status = 0; 253 current_thread_info()->status = 0;
254}
289 255
290 /* Auto enable eagerfpu for xsaveopt */ 256/*
291 if (cpu_has_xsaveopt && eagerfpu != DISABLE) 257 * We parse fpu parameters early because fpu__init_system() is executed
292 eagerfpu = ENABLE; 258 * before parse_early_param().
293 259 */
294 if (xfeatures_mask & XFEATURE_MASK_EAGER) { 260static void __init fpu__init_parse_early_param(void)
295 if (eagerfpu == DISABLE) { 261{
296 pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n", 262 if (cmdline_find_option_bool(boot_command_line, "no387"))
297 xfeatures_mask & XFEATURE_MASK_EAGER); 263 setup_clear_cpu_cap(X86_FEATURE_FPU);
298 xfeatures_mask &= ~XFEATURE_MASK_EAGER; 264
299 } else { 265 if (cmdline_find_option_bool(boot_command_line, "nofxsr")) {
300 eagerfpu = ENABLE; 266 setup_clear_cpu_cap(X86_FEATURE_FXSR);
301 } 267 setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
268 setup_clear_cpu_cap(X86_FEATURE_XMM);
302 } 269 }
303 270
304 if (eagerfpu == ENABLE) 271 if (cmdline_find_option_bool(boot_command_line, "noxsave"))
305 setup_force_cpu_cap(X86_FEATURE_EAGER_FPU); 272 fpu__xstate_clear_all_cpu_caps();
273
274 if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
275 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
306 276
307 printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy"); 277 if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
278 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
308} 279}
309 280
310/* 281/*
@@ -313,6 +284,7 @@ static void __init fpu__init_system_ctx_switch(void)
313 */ 284 */
314void __init fpu__init_system(struct cpuinfo_x86 *c) 285void __init fpu__init_system(struct cpuinfo_x86 *c)
315{ 286{
287 fpu__init_parse_early_param();
316 fpu__init_system_early_generic(c); 288 fpu__init_system_early_generic(c);
317 289
318 /* 290 /*
@@ -336,62 +308,3 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
336 308
337 fpu__init_system_ctx_switch(); 309 fpu__init_system_ctx_switch();
338} 310}
339
340/*
341 * Boot parameter to turn off FPU support and fall back to math-emu:
342 */
343static int __init no_387(char *s)
344{
345 setup_clear_cpu_cap(X86_FEATURE_FPU);
346 return 1;
347}
348__setup("no387", no_387);
349
350/*
351 * Disable all xstate CPU features:
352 */
353static int __init x86_noxsave_setup(char *s)
354{
355 if (strlen(s))
356 return 0;
357
358 fpu__xstate_clear_all_cpu_caps();
359
360 return 1;
361}
362__setup("noxsave", x86_noxsave_setup);
363
364/*
365 * Disable the XSAVEOPT instruction specifically:
366 */
367static int __init x86_noxsaveopt_setup(char *s)
368{
369 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
370
371 return 1;
372}
373__setup("noxsaveopt", x86_noxsaveopt_setup);
374
375/*
376 * Disable the XSAVES instruction:
377 */
378static int __init x86_noxsaves_setup(char *s)
379{
380 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
381
382 return 1;
383}
384__setup("noxsaves", x86_noxsaves_setup);
385
386/*
387 * Disable FX save/restore and SSE support:
388 */
389static int __init x86_nofxsr_setup(char *s)
390{
391 setup_clear_cpu_cap(X86_FEATURE_FXSR);
392 setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
393 setup_clear_cpu_cap(X86_FEATURE_XMM);
394
395 return 1;
396}
397__setup("nofxsr", x86_nofxsr_setup);
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 70fc312221fc..3fa200ecca62 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -632,8 +632,7 @@ void __init fpu__init_system_xstate(void)
632 BUG(); 632 BUG();
633 } 633 }
634 634
635 /* Support only the state known to the OS: */ 635 xfeatures_mask &= fpu__get_supported_xfeatures_mask();
636 xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
637 636
638 /* Enable xstate instructions to be able to continue with initialization: */ 637 /* Enable xstate instructions to be able to continue with initialization: */
639 fpu__init_cpu_xstate(); 638 fpu__init_cpu_xstate();
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 8f1a3f443f7d..1c0b49fd6365 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -19,7 +19,7 @@
19#include <asm/setup.h> 19#include <asm/setup.h>
20#include <asm/processor-flags.h> 20#include <asm/processor-flags.h>
21#include <asm/msr-index.h> 21#include <asm/msr-index.h>
22#include <asm/cpufeature.h> 22#include <asm/cpufeatures.h>
23#include <asm/percpu.h> 23#include <asm/percpu.h>
24#include <asm/nops.h> 24#include <asm/nops.h>
25#include <asm/bootparam.h> 25#include <asm/bootparam.h>
@@ -669,14 +669,17 @@ __PAGE_ALIGNED_BSS
669initial_pg_pmd: 669initial_pg_pmd:
670 .fill 1024*KPMDS,4,0 670 .fill 1024*KPMDS,4,0
671#else 671#else
672ENTRY(initial_page_table) 672.globl initial_page_table
673initial_page_table:
673 .fill 1024,4,0 674 .fill 1024,4,0
674#endif 675#endif
675initial_pg_fixmap: 676initial_pg_fixmap:
676 .fill 1024,4,0 677 .fill 1024,4,0
677ENTRY(empty_zero_page) 678.globl empty_zero_page
679empty_zero_page:
678 .fill 4096,1,0 680 .fill 4096,1,0
679ENTRY(swapper_pg_dir) 681.globl swapper_pg_dir
682swapper_pg_dir:
680 .fill 1024,4,0 683 .fill 1024,4,0
681 684
682/* 685/*
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 4034e905741a..734ba1d0f686 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -76,9 +76,7 @@ startup_64:
76 subq $_text - __START_KERNEL_map, %rbp 76 subq $_text - __START_KERNEL_map, %rbp
77 77
78 /* Is the address not 2M aligned? */ 78 /* Is the address not 2M aligned? */
79 movq %rbp, %rax 79 testl $~PMD_PAGE_MASK, %ebp
80 andl $~PMD_PAGE_MASK, %eax
81 testl %eax, %eax
82 jnz bad_address 80 jnz bad_address
83 81
84 /* 82 /*
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index f48eb8eeefe2..3fdc1e53aaac 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -12,6 +12,7 @@
12#include <linux/pm.h> 12#include <linux/pm.h>
13#include <linux/io.h> 13#include <linux/io.h>
14 14
15#include <asm/cpufeature.h>
15#include <asm/irqdomain.h> 16#include <asm/irqdomain.h>
16#include <asm/fixmap.h> 17#include <asm/fixmap.h>
17#include <asm/hpet.h> 18#include <asm/hpet.h>
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 50a3fad5b89f..2bcfb5f2bc44 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -300,6 +300,10 @@ static int arch_build_bp_info(struct perf_event *bp)
300 return -EINVAL; 300 return -EINVAL;
301 if (bp->attr.bp_addr & (bp->attr.bp_len - 1)) 301 if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
302 return -EINVAL; 302 return -EINVAL;
303
304 if (!boot_cpu_has(X86_FEATURE_BPEXT))
305 return -EOPNOTSUPP;
306
303 /* 307 /*
304 * It's impossible to use a range breakpoint to fake out 308 * It's impossible to use a range breakpoint to fake out
305 * user vs kernel detection because bp_len - 1 can't 309 * user vs kernel detection because bp_len - 1 can't
@@ -307,8 +311,6 @@ static int arch_build_bp_info(struct perf_event *bp)
307 * breakpoints, then we'll have to check for kprobe-blacklisted 311 * breakpoints, then we'll have to check for kprobe-blacklisted
308 * addresses anywhere in the range. 312 * addresses anywhere in the range.
309 */ 313 */
310 if (!cpu_has_bpext)
311 return -EOPNOTSUPP;
312 info->mask = bp->attr.bp_len - 1; 314 info->mask = bp->attr.bp_len - 1;
313 info->len = X86_BREAKPOINT_LEN_1; 315 info->len = X86_BREAKPOINT_LEN_1;
314 } 316 }
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 64341aa485ae..d40ee8a38fed 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -42,3 +42,5 @@ EXPORT_SYMBOL(empty_zero_page);
42EXPORT_SYMBOL(___preempt_schedule); 42EXPORT_SYMBOL(___preempt_schedule);
43EXPORT_SYMBOL(___preempt_schedule_notrace); 43EXPORT_SYMBOL(___preempt_schedule_notrace);
44#endif 44#endif
45
46EXPORT_SYMBOL(__sw_hweight32);
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index be22f5a2192e..4e3b8a587c88 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -418,6 +418,7 @@ struct legacy_pic default_legacy_pic = {
418}; 418};
419 419
420struct legacy_pic *legacy_pic = &default_legacy_pic; 420struct legacy_pic *legacy_pic = &default_legacy_pic;
421EXPORT_SYMBOL(legacy_pic);
421 422
422static int __init i8259A_init_ops(void) 423static int __init i8259A_init_ops(void)
423{ 424{
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
new file mode 100644
index 000000000000..3817eb748eb4
--- /dev/null
+++ b/arch/x86/kernel/irqflags.S
@@ -0,0 +1,26 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#include <asm/asm.h>
4#include <asm-generic/export.h>
5#include <linux/linkage.h>
6
7/*
8 * unsigned long native_save_fl(void)
9 */
10ENTRY(native_save_fl)
11 pushf
12 pop %_ASM_AX
13 ret
14ENDPROC(native_save_fl)
15EXPORT_SYMBOL(native_save_fl)
16
17/*
18 * void native_restore_fl(unsigned long flags)
19 * %eax/%rdi: flags
20 */
21ENTRY(native_restore_fl)
22 push %_ASM_ARG1
23 popf
24 ret
25ENDPROC(native_restore_fl)
26EXPORT_SYMBOL(native_restore_fl)
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 99d293ea2b49..c6f466d6cc57 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -49,6 +49,7 @@
49#include <linux/kdebug.h> 49#include <linux/kdebug.h>
50#include <linux/kallsyms.h> 50#include <linux/kallsyms.h>
51#include <linux/ftrace.h> 51#include <linux/ftrace.h>
52#include <linux/moduleloader.h>
52 53
53#include <asm/cacheflush.h> 54#include <asm/cacheflush.h>
54#include <asm/desc.h> 55#include <asm/desc.h>
@@ -196,6 +197,8 @@ retry:
196 return (opcode != 0x62 && opcode != 0x67); 197 return (opcode != 0x62 && opcode != 0x67);
197 case 0x70: 198 case 0x70:
198 return 0; /* can't boost conditional jump */ 199 return 0; /* can't boost conditional jump */
200 case 0x90:
201 return opcode != 0x9a; /* can't boost call far */
199 case 0xc0: 202 case 0xc0:
200 /* can't boost software-interruptions */ 203 /* can't boost software-interruptions */
201 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; 204 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
@@ -390,7 +393,6 @@ int __copy_instruction(u8 *dest, u8 *src)
390 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; 393 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
391 if ((s64) (s32) newdisp != newdisp) { 394 if ((s64) (s32) newdisp != newdisp) {
392 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); 395 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
393 pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
394 return 0; 396 return 0;
395 } 397 }
396 disp = (u8 *) dest + insn_offset_displacement(&insn); 398 disp = (u8 *) dest + insn_offset_displacement(&insn);
@@ -400,23 +402,48 @@ int __copy_instruction(u8 *dest, u8 *src)
400 return length; 402 return length;
401} 403}
402 404
405/* Recover page to RW mode before releasing it */
406void free_insn_page(void *page)
407{
408 set_memory_nx((unsigned long)page & PAGE_MASK, 1);
409 set_memory_rw((unsigned long)page & PAGE_MASK, 1);
410 module_memfree(page);
411}
412
413/* Prepare reljump right after instruction to boost */
414static void prepare_boost(struct kprobe *p, int length)
415{
416 if (can_boost(p->ainsn.insn, p->addr) &&
417 MAX_INSN_SIZE - length >= RELATIVEJUMP_SIZE) {
418 /*
419 * These instructions can be executed directly if it
420 * jumps back to correct address.
421 */
422 synthesize_reljump(p->ainsn.insn + length, p->addr + length);
423 p->ainsn.boostable = 1;
424 } else {
425 p->ainsn.boostable = -1;
426 }
427}
428
403static int arch_copy_kprobe(struct kprobe *p) 429static int arch_copy_kprobe(struct kprobe *p)
404{ 430{
405 int ret; 431 int len;
432
433 set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
406 434
407 /* Copy an instruction with recovering if other optprobe modifies it.*/ 435 /* Copy an instruction with recovering if other optprobe modifies it.*/
408 ret = __copy_instruction(p->ainsn.insn, p->addr); 436 len = __copy_instruction(p->ainsn.insn, p->addr);
409 if (!ret) 437 if (!len)
410 return -EINVAL; 438 return -EINVAL;
411 439
412 /* 440 /*
413 * __copy_instruction can modify the displacement of the instruction, 441 * __copy_instruction can modify the displacement of the instruction,
414 * but it doesn't affect boostable check. 442 * but it doesn't affect boostable check.
415 */ 443 */
416 if (can_boost(p->ainsn.insn, p->addr)) 444 prepare_boost(p, len);
417 p->ainsn.boostable = 0; 445
418 else 446 set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
419 p->ainsn.boostable = -1;
420 447
421 /* Check whether the instruction modifies Interrupt Flag or not */ 448 /* Check whether the instruction modifies Interrupt Flag or not */
422 p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); 449 p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
@@ -581,8 +608,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
581 * Raise a BUG or we'll continue in an endless reentering loop 608 * Raise a BUG or we'll continue in an endless reentering loop
582 * and eventually a stack overflow. 609 * and eventually a stack overflow.
583 */ 610 */
584 printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", 611 pr_err("Unrecoverable kprobe detected.\n");
585 p->addr);
586 dump_kprobe(p); 612 dump_kprobe(p);
587 BUG(); 613 BUG();
588 default: 614 default:
@@ -879,21 +905,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
879 break; 905 break;
880 } 906 }
881 907
882 if (p->ainsn.boostable == 0) {
883 if ((regs->ip > copy_ip) &&
884 (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
885 /*
886 * These instructions can be executed directly if it
887 * jumps back to correct address.
888 */
889 synthesize_reljump((void *)regs->ip,
890 (void *)orig_ip + (regs->ip - copy_ip));
891 p->ainsn.boostable = 1;
892 } else {
893 p->ainsn.boostable = -1;
894 }
895 }
896
897 regs->ip += orig_ip - copy_ip; 908 regs->ip += orig_ip - copy_ip;
898 909
899no_change: 910no_change:
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index ea8e2b846101..7aba9d6475a5 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -370,6 +370,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
370 } 370 }
371 371
372 buf = (u8 *)op->optinsn.insn; 372 buf = (u8 *)op->optinsn.insn;
373 set_memory_rw((unsigned long)buf & PAGE_MASK, 1);
373 374
374 /* Copy instructions into the out-of-line buffer */ 375 /* Copy instructions into the out-of-line buffer */
375 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); 376 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
@@ -392,6 +393,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
392 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, 393 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
393 (u8 *)op->kp.addr + op->optinsn.size); 394 (u8 *)op->kp.addr + op->optinsn.size);
394 395
396 set_memory_ro((unsigned long)buf & PAGE_MASK, 1);
397
395 flush_icache_range((unsigned long) buf, 398 flush_icache_range((unsigned long) buf,
396 (unsigned long) buf + TMPL_END_IDX + 399 (unsigned long) buf + TMPL_END_IDX +
397 op->optinsn.size + RELATIVEJUMP_SIZE); 400 op->optinsn.size + RELATIVEJUMP_SIZE);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index bc429365b72a..8bc68cfc0d33 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -119,7 +119,7 @@ static void free_ldt_struct(struct ldt_struct *ldt)
119 * we do not have to muck with descriptors here, that is 119 * we do not have to muck with descriptors here, that is
120 * done in switch_mm() as needed. 120 * done in switch_mm() as needed.
121 */ 121 */
122int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 122int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
123{ 123{
124 struct ldt_struct *new_ldt; 124 struct ldt_struct *new_ldt;
125 struct mm_struct *old_mm; 125 struct mm_struct *old_mm;
@@ -160,7 +160,7 @@ out_unlock:
160 * 160 *
161 * 64bit: Don't touch the LDT register - we're already in the next thread. 161 * 64bit: Don't touch the LDT register - we're already in the next thread.
162 */ 162 */
163void destroy_context(struct mm_struct *mm) 163void destroy_context_ldt(struct mm_struct *mm)
164{ 164{
165 free_ldt_struct(mm->context.ldt); 165 free_ldt_struct(mm->context.ldt);
166 mm->context.ldt = NULL; 166 mm->context.ldt = NULL;
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 469b23d6acc2..fd7e9937ddd6 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -71,12 +71,17 @@ static void load_segments(void)
71static void machine_kexec_free_page_tables(struct kimage *image) 71static void machine_kexec_free_page_tables(struct kimage *image)
72{ 72{
73 free_page((unsigned long)image->arch.pgd); 73 free_page((unsigned long)image->arch.pgd);
74 image->arch.pgd = NULL;
74#ifdef CONFIG_X86_PAE 75#ifdef CONFIG_X86_PAE
75 free_page((unsigned long)image->arch.pmd0); 76 free_page((unsigned long)image->arch.pmd0);
77 image->arch.pmd0 = NULL;
76 free_page((unsigned long)image->arch.pmd1); 78 free_page((unsigned long)image->arch.pmd1);
79 image->arch.pmd1 = NULL;
77#endif 80#endif
78 free_page((unsigned long)image->arch.pte0); 81 free_page((unsigned long)image->arch.pte0);
82 image->arch.pte0 = NULL;
79 free_page((unsigned long)image->arch.pte1); 83 free_page((unsigned long)image->arch.pte1);
84 image->arch.pte1 = NULL;
80} 85}
81 86
82static int machine_kexec_alloc_page_tables(struct kimage *image) 87static int machine_kexec_alloc_page_tables(struct kimage *image)
@@ -93,7 +98,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image)
93 !image->arch.pmd0 || !image->arch.pmd1 || 98 !image->arch.pmd0 || !image->arch.pmd1 ||
94#endif 99#endif
95 !image->arch.pte0 || !image->arch.pte1) { 100 !image->arch.pte0 || !image->arch.pte1) {
96 machine_kexec_free_page_tables(image);
97 return -ENOMEM; 101 return -ENOMEM;
98 } 102 }
99 return 0; 103 return 0;
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 64979821bc2e..2ddb850bceab 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -37,8 +37,11 @@ static struct kexec_file_ops *kexec_file_loaders[] = {
37static void free_transition_pgtable(struct kimage *image) 37static void free_transition_pgtable(struct kimage *image)
38{ 38{
39 free_page((unsigned long)image->arch.pud); 39 free_page((unsigned long)image->arch.pud);
40 image->arch.pud = NULL;
40 free_page((unsigned long)image->arch.pmd); 41 free_page((unsigned long)image->arch.pmd);
42 image->arch.pmd = NULL;
41 free_page((unsigned long)image->arch.pte); 43 free_page((unsigned long)image->arch.pte);
44 image->arch.pte = NULL;
42} 45}
43 46
44static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) 47static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
@@ -79,7 +82,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
79 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC)); 82 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
80 return 0; 83 return 0;
81err: 84err:
82 free_transition_pgtable(image);
83 return result; 85 return result;
84} 86}
85 87
@@ -519,6 +521,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
519 goto overflow; 521 goto overflow;
520 break; 522 break;
521 case R_X86_64_PC32: 523 case R_X86_64_PC32:
524 case R_X86_64_PLT32:
522 value -= (u64)address; 525 value -= (u64)address;
523 *(u32 *)location = value; 526 *(u32 *)location = value;
524 break; 527 break;
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 005c03e93fc5..94779f66bf49 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -170,19 +170,28 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
170 case R_X86_64_NONE: 170 case R_X86_64_NONE:
171 break; 171 break;
172 case R_X86_64_64: 172 case R_X86_64_64:
173 if (*(u64 *)loc != 0)
174 goto invalid_relocation;
173 *(u64 *)loc = val; 175 *(u64 *)loc = val;
174 break; 176 break;
175 case R_X86_64_32: 177 case R_X86_64_32:
178 if (*(u32 *)loc != 0)
179 goto invalid_relocation;
176 *(u32 *)loc = val; 180 *(u32 *)loc = val;
177 if (val != *(u32 *)loc) 181 if (val != *(u32 *)loc)
178 goto overflow; 182 goto overflow;
179 break; 183 break;
180 case R_X86_64_32S: 184 case R_X86_64_32S:
185 if (*(s32 *)loc != 0)
186 goto invalid_relocation;
181 *(s32 *)loc = val; 187 *(s32 *)loc = val;
182 if ((s64)val != *(s32 *)loc) 188 if ((s64)val != *(s32 *)loc)
183 goto overflow; 189 goto overflow;
184 break; 190 break;
185 case R_X86_64_PC32: 191 case R_X86_64_PC32:
192 case R_X86_64_PLT32:
193 if (*(u32 *)loc != 0)
194 goto invalid_relocation;
186 val -= (u64)loc; 195 val -= (u64)loc;
187 *(u32 *)loc = val; 196 *(u32 *)loc = val;
188#if 0 197#if 0
@@ -198,6 +207,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
198 } 207 }
199 return 0; 208 return 0;
200 209
210invalid_relocation:
211 pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
212 (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
213 return -ENOEXEC;
214
201overflow: 215overflow:
202 pr_err("overflow in relocation type %d val %Lx\n", 216 pr_err("overflow in relocation type %d val %Lx\n",
203 (int)ELF64_R_TYPE(rel[i].r_info), val); 217 (int)ELF64_R_TYPE(rel[i].r_info), val);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 113e70784854..f95ac5d435aa 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -40,7 +40,7 @@
40#include <linux/uaccess.h> 40#include <linux/uaccess.h>
41#include <linux/gfp.h> 41#include <linux/gfp.h>
42 42
43#include <asm/processor.h> 43#include <asm/cpufeature.h>
44#include <asm/msr.h> 44#include <asm/msr.h>
45 45
46static struct class *msr_class; 46static struct class *msr_class;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index f534a0e3af53..632195b41688 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -97,10 +97,12 @@ unsigned paravirt_patch_call(void *insnbuf,
97 struct branch *b = insnbuf; 97 struct branch *b = insnbuf;
98 unsigned long delta = (unsigned long)target - (addr+5); 98 unsigned long delta = (unsigned long)target - (addr+5);
99 99
100 if (tgt_clobbers & ~site_clobbers) 100 if (len < 5) {
101 return len; /* target would clobber too much for this site */ 101#ifdef CONFIG_RETPOLINE
102 if (len < 5) 102 WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
103#endif
103 return len; /* call too long for patch site */ 104 return len; /* call too long for patch site */
105 }
104 106
105 b->opcode = 0xe8; /* call */ 107 b->opcode = 0xe8; /* call */
106 b->delta = delta; 108 b->delta = delta;
@@ -115,8 +117,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
115 struct branch *b = insnbuf; 117 struct branch *b = insnbuf;
116 unsigned long delta = (unsigned long)target - (addr+5); 118 unsigned long delta = (unsigned long)target - (addr+5);
117 119
118 if (len < 5) 120 if (len < 5) {
121#ifdef CONFIG_RETPOLINE
122 WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
123#endif
119 return len; /* call too long for patch site */ 124 return len; /* call too long for patch site */
125 }
120 126
121 b->opcode = 0xe9; /* jmp */ 127 b->opcode = 0xe9; /* jmp */
122 b->delta = delta; 128 b->delta = delta;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 7c5c5dc90ffa..e18c8798c3a2 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -31,6 +31,7 @@
31#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
32#include <asm/mce.h> 32#include <asm/mce.h>
33#include <asm/vm86.h> 33#include <asm/vm86.h>
34#include <asm/spec-ctrl.h>
34 35
35/* 36/*
36 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 37 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -130,11 +131,6 @@ void flush_thread(void)
130 fpu__clear(&tsk->thread.fpu); 131 fpu__clear(&tsk->thread.fpu);
131} 132}
132 133
133static void hard_disable_TSC(void)
134{
135 cr4_set_bits(X86_CR4_TSD);
136}
137
138void disable_TSC(void) 134void disable_TSC(void)
139{ 135{
140 preempt_disable(); 136 preempt_disable();
@@ -143,15 +139,10 @@ void disable_TSC(void)
143 * Must flip the CPU state synchronously with 139 * Must flip the CPU state synchronously with
144 * TIF_NOTSC in the current running context. 140 * TIF_NOTSC in the current running context.
145 */ 141 */
146 hard_disable_TSC(); 142 cr4_set_bits(X86_CR4_TSD);
147 preempt_enable(); 143 preempt_enable();
148} 144}
149 145
150static void hard_enable_TSC(void)
151{
152 cr4_clear_bits(X86_CR4_TSD);
153}
154
155static void enable_TSC(void) 146static void enable_TSC(void)
156{ 147{
157 preempt_disable(); 148 preempt_disable();
@@ -160,7 +151,7 @@ static void enable_TSC(void)
160 * Must flip the CPU state synchronously with 151 * Must flip the CPU state synchronously with
161 * TIF_NOTSC in the current running context. 152 * TIF_NOTSC in the current running context.
162 */ 153 */
163 hard_enable_TSC(); 154 cr4_clear_bits(X86_CR4_TSD);
164 preempt_enable(); 155 preempt_enable();
165} 156}
166 157
@@ -188,48 +179,199 @@ int set_tsc_mode(unsigned int val)
188 return 0; 179 return 0;
189} 180}
190 181
191void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 182static inline void switch_to_bitmap(struct tss_struct *tss,
192 struct tss_struct *tss) 183 struct thread_struct *prev,
184 struct thread_struct *next,
185 unsigned long tifp, unsigned long tifn)
193{ 186{
194 struct thread_struct *prev, *next; 187 if (tifn & _TIF_IO_BITMAP) {
195
196 prev = &prev_p->thread;
197 next = &next_p->thread;
198
199 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
200 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
201 unsigned long debugctl = get_debugctlmsr();
202
203 debugctl &= ~DEBUGCTLMSR_BTF;
204 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
205 debugctl |= DEBUGCTLMSR_BTF;
206
207 update_debugctlmsr(debugctl);
208 }
209
210 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
211 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
212 /* prev and next are different */
213 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
214 hard_disable_TSC();
215 else
216 hard_enable_TSC();
217 }
218
219 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
220 /* 188 /*
221 * Copy the relevant range of the IO bitmap. 189 * Copy the relevant range of the IO bitmap.
222 * Normally this is 128 bytes or less: 190 * Normally this is 128 bytes or less:
223 */ 191 */
224 memcpy(tss->io_bitmap, next->io_bitmap_ptr, 192 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
225 max(prev->io_bitmap_max, next->io_bitmap_max)); 193 max(prev->io_bitmap_max, next->io_bitmap_max));
226 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { 194 } else if (tifp & _TIF_IO_BITMAP) {
227 /* 195 /*
228 * Clear any possible leftover bits: 196 * Clear any possible leftover bits:
229 */ 197 */
230 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); 198 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
231 } 199 }
200}
201
202#ifdef CONFIG_SMP
203
204struct ssb_state {
205 struct ssb_state *shared_state;
206 raw_spinlock_t lock;
207 unsigned int disable_state;
208 unsigned long local_state;
209};
210
211#define LSTATE_SSB 0
212
213static DEFINE_PER_CPU(struct ssb_state, ssb_state);
214
215void speculative_store_bypass_ht_init(void)
216{
217 struct ssb_state *st = this_cpu_ptr(&ssb_state);
218 unsigned int this_cpu = smp_processor_id();
219 unsigned int cpu;
220
221 st->local_state = 0;
222
223 /*
224 * Shared state setup happens once on the first bringup
225 * of the CPU. It's not destroyed on CPU hotunplug.
226 */
227 if (st->shared_state)
228 return;
229
230 raw_spin_lock_init(&st->lock);
231
232 /*
233 * Go over HT siblings and check whether one of them has set up the
234 * shared state pointer already.
235 */
236 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
237 if (cpu == this_cpu)
238 continue;
239
240 if (!per_cpu(ssb_state, cpu).shared_state)
241 continue;
242
243 /* Link it to the state of the sibling: */
244 st->shared_state = per_cpu(ssb_state, cpu).shared_state;
245 return;
246 }
247
248 /*
249 * First HT sibling to come up on the core. Link shared state of
250 * the first HT sibling to itself. The siblings on the same core
251 * which come up later will see the shared state pointer and link
252 * themself to the state of this CPU.
253 */
254 st->shared_state = st;
255}
256
257/*
258 * Logic is: First HT sibling enables SSBD for both siblings in the core
259 * and last sibling to disable it, disables it for the whole core. This how
260 * MSR_SPEC_CTRL works in "hardware":
261 *
262 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
263 */
264static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
265{
266 struct ssb_state *st = this_cpu_ptr(&ssb_state);
267 u64 msr = x86_amd_ls_cfg_base;
268
269 if (!static_cpu_has(X86_FEATURE_ZEN)) {
270 msr |= ssbd_tif_to_amd_ls_cfg(tifn);
271 wrmsrl(MSR_AMD64_LS_CFG, msr);
272 return;
273 }
274
275 if (tifn & _TIF_SSBD) {
276 /*
277 * Since this can race with prctl(), block reentry on the
278 * same CPU.
279 */
280 if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
281 return;
282
283 msr |= x86_amd_ls_cfg_ssbd_mask;
284
285 raw_spin_lock(&st->shared_state->lock);
286 /* First sibling enables SSBD: */
287 if (!st->shared_state->disable_state)
288 wrmsrl(MSR_AMD64_LS_CFG, msr);
289 st->shared_state->disable_state++;
290 raw_spin_unlock(&st->shared_state->lock);
291 } else {
292 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
293 return;
294
295 raw_spin_lock(&st->shared_state->lock);
296 st->shared_state->disable_state--;
297 if (!st->shared_state->disable_state)
298 wrmsrl(MSR_AMD64_LS_CFG, msr);
299 raw_spin_unlock(&st->shared_state->lock);
300 }
301}
302#else
303static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
304{
305 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
306
307 wrmsrl(MSR_AMD64_LS_CFG, msr);
308}
309#endif
310
311static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
312{
313 /*
314 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
315 * so ssbd_tif_to_spec_ctrl() just works.
316 */
317 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
318}
319
320static __always_inline void intel_set_ssb_state(unsigned long tifn)
321{
322 u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
323
324 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
325}
326
327static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
328{
329 if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
330 amd_set_ssb_virt_state(tifn);
331 else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
332 amd_set_core_ssb_state(tifn);
333 else
334 intel_set_ssb_state(tifn);
335}
336
337void speculative_store_bypass_update(unsigned long tif)
338{
339 preempt_disable();
340 __speculative_store_bypass_update(tif);
341 preempt_enable();
342}
343
344void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
345 struct tss_struct *tss)
346{
347 struct thread_struct *prev, *next;
348 unsigned long tifp, tifn;
349
350 prev = &prev_p->thread;
351 next = &next_p->thread;
352
353 tifn = READ_ONCE(task_thread_info(next_p)->flags);
354 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
355 switch_to_bitmap(tss, prev, next, tifp, tifn);
356
232 propagate_user_return_notify(prev_p, next_p); 357 propagate_user_return_notify(prev_p, next_p);
358
359 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
360 arch_has_block_step()) {
361 unsigned long debugctl, msk;
362
363 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
364 debugctl &= ~DEBUGCTLMSR_BTF;
365 msk = tifn & _TIF_BLOCKSTEP;
366 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
367 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
368 }
369
370 if ((tifp ^ tifn) & _TIF_NOTSC)
371 cr4_toggle_bits(X86_CR4_TSD);
372
373 if ((tifp ^ tifn) & _TIF_SSBD)
374 __speculative_store_bypass_update(tifn);
233} 375}
234 376
235/* 377/*
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index bbaae4cf9e8e..31c4bc0d3372 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -851,6 +851,12 @@ void __init setup_arch(char **cmdline_p)
851 memblock_reserve(__pa_symbol(_text), 851 memblock_reserve(__pa_symbol(_text),
852 (unsigned long)__bss_stop - (unsigned long)_text); 852 (unsigned long)__bss_stop - (unsigned long)_text);
853 853
854 /*
855 * Make sure page 0 is always reserved because on systems with
856 * L1TF its contents can be leaked to user processes.
857 */
858 memblock_reserve(0, PAGE_SIZE);
859
854 early_reserve_initrd(); 860 early_reserve_initrd();
855 861
856 /* 862 /*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index fe89f938e0f0..c017f1c71560 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -75,6 +75,7 @@
75#include <asm/i8259.h> 75#include <asm/i8259.h>
76#include <asm/realmode.h> 76#include <asm/realmode.h>
77#include <asm/misc.h> 77#include <asm/misc.h>
78#include <asm/spec-ctrl.h>
78 79
79/* Number of siblings per CPU package */ 80/* Number of siblings per CPU package */
80int smp_num_siblings = 1; 81int smp_num_siblings = 1;
@@ -217,6 +218,8 @@ static void notrace start_secondary(void *unused)
217 */ 218 */
218 check_tsc_sync_target(); 219 check_tsc_sync_target();
219 220
221 speculative_store_bypass_ht_init();
222
220 /* 223 /*
221 * Lock vector_lock and initialize the vectors on this cpu 224 * Lock vector_lock and initialize the vectors on this cpu
222 * before setting the cpu online. We must set it online with 225 * before setting the cpu online. We must set it online with
@@ -295,7 +298,7 @@ do { \
295 298
296static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 299static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
297{ 300{
298 if (cpu_has_topoext) { 301 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
299 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 302 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
300 303
301 if (c->phys_proc_id == o->phys_proc_id && 304 if (c->phys_proc_id == o->phys_proc_id &&
@@ -1209,6 +1212,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1209 set_mtrr_aps_delayed_init(); 1212 set_mtrr_aps_delayed_init();
1210 1213
1211 smp_quirk_init_udelay(); 1214 smp_quirk_init_udelay();
1215
1216 speculative_store_bypass_ht_init();
1212} 1217}
1213 1218
1214void arch_enable_nonboot_cpus_begin(void) 1219void arch_enable_nonboot_cpus_begin(void)
@@ -1344,6 +1349,7 @@ static void remove_siblinginfo(int cpu)
1344 cpumask_clear(topology_core_cpumask(cpu)); 1349 cpumask_clear(topology_core_cpumask(cpu));
1345 c->phys_proc_id = 0; 1350 c->phys_proc_id = 0;
1346 c->cpu_core_id = 0; 1351 c->cpu_core_id = 0;
1352 c->booted_cores = 0;
1347 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); 1353 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1348} 1354}
1349 1355
@@ -1442,6 +1448,8 @@ static inline void mwait_play_dead(void)
1442 void *mwait_ptr; 1448 void *mwait_ptr;
1443 int i; 1449 int i;
1444 1450
1451 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1452 return;
1445 if (!this_cpu_has(X86_FEATURE_MWAIT)) 1453 if (!this_cpu_has(X86_FEATURE_MWAIT))
1446 return; 1454 return;
1447 if (!this_cpu_has(X86_FEATURE_CLFLUSH)) 1455 if (!this_cpu_has(X86_FEATURE_CLFLUSH))
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 91a4496db434..c77ab1f51fbe 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -140,6 +140,16 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
140 return -1; 140 return -1;
141 set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); 141 set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
142 pte_unmap(pte); 142 pte_unmap(pte);
143
144 /*
145 * PTI poisons low addresses in the kernel page tables in the
146 * name of making them unusable for userspace. To execute
147 * code at such a low address, the poison must be cleared.
148 *
149 * Note: 'pgd' actually gets set in pud_alloc().
150 */
151 pgd->pgd &= ~_PAGE_NX;
152
143 return 0; 153 return 0;
144} 154}
145 155
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 22b81f35c500..8c73bf1492b8 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -480,7 +480,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
480} 480}
481NOKPROBE_SYMBOL(do_general_protection); 481NOKPROBE_SYMBOL(do_general_protection);
482 482
483/* May run on IST stack. */
484dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) 483dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
485{ 484{
486#ifdef CONFIG_DYNAMIC_FTRACE 485#ifdef CONFIG_DYNAMIC_FTRACE
@@ -495,7 +494,15 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
495 if (poke_int3_handler(regs)) 494 if (poke_int3_handler(regs))
496 return; 495 return;
497 496
497 /*
498 * Use ist_enter despite the fact that we don't use an IST stack.
499 * We can be called from a kprobe in non-CONTEXT_KERNEL kernel
500 * mode or even during context tracking state changes.
501 *
502 * This means that we can't schedule. That's okay.
503 */
498 ist_enter(regs); 504 ist_enter(regs);
505
499 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); 506 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
500#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 507#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
501 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 508 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
@@ -512,15 +519,9 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
512 SIGTRAP) == NOTIFY_STOP) 519 SIGTRAP) == NOTIFY_STOP)
513 goto exit; 520 goto exit;
514 521
515 /*
516 * Let others (NMI) know that the debug stack is in use
517 * as we may switch to the interrupt stack.
518 */
519 debug_stack_usage_inc();
520 preempt_conditional_sti(regs); 522 preempt_conditional_sti(regs);
521 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); 523 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
522 preempt_conditional_cli(regs); 524 preempt_conditional_cli(regs);
523 debug_stack_usage_dec();
524exit: 525exit:
525 ist_exit(regs); 526 ist_exit(regs);
526} 527}
@@ -750,7 +751,6 @@ dotraplinkage void
750do_device_not_available(struct pt_regs *regs, long error_code) 751do_device_not_available(struct pt_regs *regs, long error_code)
751{ 752{
752 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); 753 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
753 BUG_ON(use_eager_fpu());
754 754
755#ifdef CONFIG_MATH_EMULATION 755#ifdef CONFIG_MATH_EMULATION
756 if (read_cr0() & X86_CR0_EM) { 756 if (read_cr0() & X86_CR0_EM) {
@@ -886,19 +886,16 @@ void __init trap_init(void)
886 cpu_init(); 886 cpu_init();
887 887
888 /* 888 /*
889 * X86_TRAP_DB and X86_TRAP_BP have been set 889 * X86_TRAP_DB was installed in early_trap_init(). However,
890 * in early_trap_init(). However, ITS works only after 890 * IST works only after cpu_init() loads TSS. See comments
891 * cpu_init() loads TSS. See comments in early_trap_init(). 891 * in early_trap_init().
892 */ 892 */
893 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); 893 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
894 /* int3 can be called from all */
895 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
896 894
897 x86_init.irqs.trap_init(); 895 x86_init.irqs.trap_init();
898 896
899#ifdef CONFIG_X86_64 897#ifdef CONFIG_X86_64
900 memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16); 898 memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
901 set_nmi_gate(X86_TRAP_DB, &debug); 899 set_nmi_gate(X86_TRAP_DB, &debug);
902 set_nmi_gate(X86_TRAP_BP, &int3);
903#endif 900#endif
904} 901}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c7c4d9c51e99..c42d4a3d9494 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -365,6 +365,8 @@ static int __init tsc_setup(char *str)
365 tsc_clocksource_reliable = 1; 365 tsc_clocksource_reliable = 1;
366 if (!strncmp(str, "noirqtime", 9)) 366 if (!strncmp(str, "noirqtime", 9))
367 no_sched_irq_time = 1; 367 no_sched_irq_time = 1;
368 if (!strcmp(str, "unstable"))
369 mark_tsc_unstable("boot parameter");
368 return 1; 370 return 1;
369} 371}
370 372
@@ -406,7 +408,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
406 hpet2 -= hpet1; 408 hpet2 -= hpet1;
407 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 409 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
408 do_div(tmp, 1000000); 410 do_div(tmp, 1000000);
409 do_div(deltatsc, tmp); 411 deltatsc = div64_u64(deltatsc, tmp);
410 412
411 return (unsigned long) deltatsc; 413 return (unsigned long) deltatsc;
412} 414}
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index c6aace2bbe08..b8105289c60b 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -290,7 +290,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
290 insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64); 290 insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
291 /* has the side-effect of processing the entire instruction */ 291 /* has the side-effect of processing the entire instruction */
292 insn_get_length(insn); 292 insn_get_length(insn);
293 if (WARN_ON_ONCE(!insn_complete(insn))) 293 if (!insn_complete(insn))
294 return -ENOEXEC; 294 return -ENOEXEC;
295 295
296 if (is_prefix_bad(insn)) 296 if (is_prefix_bad(insn))
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
index 4cf401f581e7..b7c9db5deebe 100644
--- a/arch/x86/kernel/verify_cpu.S
+++ b/arch/x86/kernel/verify_cpu.S
@@ -30,7 +30,7 @@
30 * appropriately. Either display a message or halt. 30 * appropriately. Either display a message or halt.
31 */ 31 */
32 32
33#include <asm/cpufeature.h> 33#include <asm/cpufeatures.h>
34#include <asm/msr-index.h> 34#include <asm/msr-index.h>
35 35
36verify_cpu: 36verify_cpu:
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 510e80da7de4..7f4839ef3608 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -357,8 +357,10 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
357 tss = &per_cpu(cpu_tss, get_cpu()); 357 tss = &per_cpu(cpu_tss, get_cpu());
358 /* make room for real-mode segments */ 358 /* make room for real-mode segments */
359 tsk->thread.sp0 += 16; 359 tsk->thread.sp0 += 16;
360 if (cpu_has_sep) 360
361 if (static_cpu_has(X86_FEATURE_SEP))
361 tsk->thread.sysenter_cs = 0; 362 tsk->thread.sysenter_cs = 0;
363
362 load_sp0(tss, &tsk->thread); 364 load_sp0(tss, &tsk->thread);
363 put_cpu(); 365 put_cpu();
364 366
@@ -715,7 +717,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
715 return; 717 return;
716 718
717check_vip: 719check_vip:
718 if (VEFLAGS & X86_EFLAGS_VIP) { 720 if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
721 (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
719 save_v86_state(regs, VM86_STI); 722 save_v86_state(regs, VM86_STI);
720 return; 723 return;
721 } 724 }
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index cc468bd15430..59bce1e11f8f 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -199,6 +199,17 @@ SECTIONS
199 :init 199 :init
200#endif 200#endif
201 201
202 /*
203 * Section for code used exclusively before alternatives are run. All
204 * references to such code must be patched out by alternatives, normally
205 * by using X86_FEATURE_ALWAYS CPU feature bit.
206 *
207 * See static_cpu_has() for an example.
208 */
209 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
210 *(.altinstr_aux)
211 }
212
202 INIT_DATA_SECTION(16) 213 INIT_DATA_SECTION(16)
203 214
204 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { 215 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index a0695be19864..c7efd394c42b 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -42,6 +42,9 @@ EXPORT_SYMBOL(clear_page);
42 42
43EXPORT_SYMBOL(csum_partial); 43EXPORT_SYMBOL(csum_partial);
44 44
45EXPORT_SYMBOL(__sw_hweight32);
46EXPORT_SYMBOL(__sw_hweight64);
47
45/* 48/*
46 * Export string functions. We normally rely on gcc builtin for most of these, 49 * Export string functions. We normally rely on gcc builtin for most of these,
47 * but gcc sometimes decides not to inline them. 50 * but gcc sometimes decides not to inline them.
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 639a6e34500c..53b7f53f6207 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -22,7 +22,8 @@ config KVM
22 depends on HAVE_KVM 22 depends on HAVE_KVM
23 depends on HIGH_RES_TIMERS 23 depends on HIGH_RES_TIMERS
24 # for TASKSTATS/TASK_DELAY_ACCT: 24 # for TASKSTATS/TASK_DELAY_ACCT:
25 depends on NET 25 depends on NET && MULTIUSER
26 depends on X86_LOCAL_APIC
26 select PREEMPT_NOTIFIERS 27 select PREEMPT_NOTIFIERS
27 select MMU_NOTIFIER 28 select MMU_NOTIFIER
28 select ANON_INODES 29 select ANON_INODES
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 00045499f6c2..f1507626ed36 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -26,6 +26,7 @@
26#include <asm/kvm_emulate.h> 26#include <asm/kvm_emulate.h>
27#include <linux/stringify.h> 27#include <linux/stringify.h>
28#include <asm/debugreg.h> 28#include <asm/debugreg.h>
29#include <asm/nospec-branch.h>
29 30
30#include "x86.h" 31#include "x86.h"
31#include "tss.h" 32#include "tss.h"
@@ -789,6 +790,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
789 return assign_eip_near(ctxt, ctxt->_eip + rel); 790 return assign_eip_near(ctxt, ctxt->_eip + rel);
790} 791}
791 792
793static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
794 void *data, unsigned size)
795{
796 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
797}
798
799static int linear_write_system(struct x86_emulate_ctxt *ctxt,
800 ulong linear, void *data,
801 unsigned int size)
802{
803 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
804}
805
792static int segmented_read_std(struct x86_emulate_ctxt *ctxt, 806static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
793 struct segmented_address addr, 807 struct segmented_address addr,
794 void *data, 808 void *data,
@@ -800,7 +814,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
800 rc = linearize(ctxt, addr, size, false, &linear); 814 rc = linearize(ctxt, addr, size, false, &linear);
801 if (rc != X86EMUL_CONTINUE) 815 if (rc != X86EMUL_CONTINUE)
802 return rc; 816 return rc;
803 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); 817 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
804} 818}
805 819
806static int segmented_write_std(struct x86_emulate_ctxt *ctxt, 820static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
@@ -814,7 +828,7 @@ static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
814 rc = linearize(ctxt, addr, size, true, &linear); 828 rc = linearize(ctxt, addr, size, true, &linear);
815 if (rc != X86EMUL_CONTINUE) 829 if (rc != X86EMUL_CONTINUE)
816 return rc; 830 return rc;
817 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception); 831 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
818} 832}
819 833
820/* 834/*
@@ -1000,8 +1014,8 @@ static u8 test_cc(unsigned int condition, unsigned long flags)
1000 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); 1014 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1001 1015
1002 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; 1016 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1003 asm("push %[flags]; popf; call *%[fastop]" 1017 asm("push %[flags]; popf; " CALL_NOSPEC
1004 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); 1018 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1005 return rc; 1019 return rc;
1006} 1020}
1007 1021
@@ -1487,8 +1501,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1487 return emulate_gp(ctxt, index << 3 | 0x2); 1501 return emulate_gp(ctxt, index << 3 | 0x2);
1488 1502
1489 addr = dt.address + index * 8; 1503 addr = dt.address + index * 8;
1490 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, 1504 return linear_read_system(ctxt, addr, desc, sizeof *desc);
1491 &ctxt->exception);
1492} 1505}
1493 1506
1494static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, 1507static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
@@ -1551,8 +1564,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1551 if (rc != X86EMUL_CONTINUE) 1564 if (rc != X86EMUL_CONTINUE)
1552 return rc; 1565 return rc;
1553 1566
1554 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc), 1567 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1555 &ctxt->exception);
1556} 1568}
1557 1569
1558/* allowed just for 8 bytes segments */ 1570/* allowed just for 8 bytes segments */
@@ -1566,8 +1578,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1566 if (rc != X86EMUL_CONTINUE) 1578 if (rc != X86EMUL_CONTINUE)
1567 return rc; 1579 return rc;
1568 1580
1569 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, 1581 return linear_write_system(ctxt, addr, desc, sizeof *desc);
1570 &ctxt->exception);
1571} 1582}
1572 1583
1573static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1584static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
@@ -1728,8 +1739,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1728 return ret; 1739 return ret;
1729 } 1740 }
1730 } else if (ctxt->mode == X86EMUL_MODE_PROT64) { 1741 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1731 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, 1742 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1732 sizeof(base3), &ctxt->exception);
1733 if (ret != X86EMUL_CONTINUE) 1743 if (ret != X86EMUL_CONTINUE)
1734 return ret; 1744 return ret;
1735 if (is_noncanonical_address(get_desc_base(&seg_desc) | 1745 if (is_noncanonical_address(get_desc_base(&seg_desc) |
@@ -2042,11 +2052,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2042 eip_addr = dt.address + (irq << 2); 2052 eip_addr = dt.address + (irq << 2);
2043 cs_addr = dt.address + (irq << 2) + 2; 2053 cs_addr = dt.address + (irq << 2) + 2;
2044 2054
2045 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); 2055 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2046 if (rc != X86EMUL_CONTINUE) 2056 if (rc != X86EMUL_CONTINUE)
2047 return rc; 2057 return rc;
2048 2058
2049 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); 2059 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2050 if (rc != X86EMUL_CONTINUE) 2060 if (rc != X86EMUL_CONTINUE)
2051 return rc; 2061 return rc;
2052 2062
@@ -2890,12 +2900,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2890#ifdef CONFIG_X86_64 2900#ifdef CONFIG_X86_64
2891 base |= ((u64)base3) << 32; 2901 base |= ((u64)base3) << 32;
2892#endif 2902#endif
2893 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); 2903 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2894 if (r != X86EMUL_CONTINUE) 2904 if (r != X86EMUL_CONTINUE)
2895 return false; 2905 return false;
2896 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) 2906 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2897 return false; 2907 return false;
2898 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); 2908 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2899 if (r != X86EMUL_CONTINUE) 2909 if (r != X86EMUL_CONTINUE)
2900 return false; 2910 return false;
2901 if ((perm >> bit_idx) & mask) 2911 if ((perm >> bit_idx) & mask)
@@ -3024,35 +3034,30 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3024 u16 tss_selector, u16 old_tss_sel, 3034 u16 tss_selector, u16 old_tss_sel,
3025 ulong old_tss_base, struct desc_struct *new_desc) 3035 ulong old_tss_base, struct desc_struct *new_desc)
3026{ 3036{
3027 const struct x86_emulate_ops *ops = ctxt->ops;
3028 struct tss_segment_16 tss_seg; 3037 struct tss_segment_16 tss_seg;
3029 int ret; 3038 int ret;
3030 u32 new_tss_base = get_desc_base(new_desc); 3039 u32 new_tss_base = get_desc_base(new_desc);
3031 3040
3032 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 3041 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
3033 &ctxt->exception);
3034 if (ret != X86EMUL_CONTINUE) 3042 if (ret != X86EMUL_CONTINUE)
3035 return ret; 3043 return ret;
3036 3044
3037 save_state_to_tss16(ctxt, &tss_seg); 3045 save_state_to_tss16(ctxt, &tss_seg);
3038 3046
3039 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 3047 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
3040 &ctxt->exception);
3041 if (ret != X86EMUL_CONTINUE) 3048 if (ret != X86EMUL_CONTINUE)
3042 return ret; 3049 return ret;
3043 3050
3044 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, 3051 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
3045 &ctxt->exception);
3046 if (ret != X86EMUL_CONTINUE) 3052 if (ret != X86EMUL_CONTINUE)
3047 return ret; 3053 return ret;
3048 3054
3049 if (old_tss_sel != 0xffff) { 3055 if (old_tss_sel != 0xffff) {
3050 tss_seg.prev_task_link = old_tss_sel; 3056 tss_seg.prev_task_link = old_tss_sel;
3051 3057
3052 ret = ops->write_std(ctxt, new_tss_base, 3058 ret = linear_write_system(ctxt, new_tss_base,
3053 &tss_seg.prev_task_link, 3059 &tss_seg.prev_task_link,
3054 sizeof tss_seg.prev_task_link, 3060 sizeof tss_seg.prev_task_link);
3055 &ctxt->exception);
3056 if (ret != X86EMUL_CONTINUE) 3061 if (ret != X86EMUL_CONTINUE)
3057 return ret; 3062 return ret;
3058 } 3063 }
@@ -3168,38 +3173,34 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3168 u16 tss_selector, u16 old_tss_sel, 3173 u16 tss_selector, u16 old_tss_sel,
3169 ulong old_tss_base, struct desc_struct *new_desc) 3174 ulong old_tss_base, struct desc_struct *new_desc)
3170{ 3175{
3171 const struct x86_emulate_ops *ops = ctxt->ops;
3172 struct tss_segment_32 tss_seg; 3176 struct tss_segment_32 tss_seg;
3173 int ret; 3177 int ret;
3174 u32 new_tss_base = get_desc_base(new_desc); 3178 u32 new_tss_base = get_desc_base(new_desc);
3175 u32 eip_offset = offsetof(struct tss_segment_32, eip); 3179 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3176 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); 3180 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3177 3181
3178 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 3182 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
3179 &ctxt->exception);
3180 if (ret != X86EMUL_CONTINUE) 3183 if (ret != X86EMUL_CONTINUE)
3181 return ret; 3184 return ret;
3182 3185
3183 save_state_to_tss32(ctxt, &tss_seg); 3186 save_state_to_tss32(ctxt, &tss_seg);
3184 3187
3185 /* Only GP registers and segment selectors are saved */ 3188 /* Only GP registers and segment selectors are saved */
3186 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, 3189 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3187 ldt_sel_offset - eip_offset, &ctxt->exception); 3190 ldt_sel_offset - eip_offset);
3188 if (ret != X86EMUL_CONTINUE) 3191 if (ret != X86EMUL_CONTINUE)
3189 return ret; 3192 return ret;
3190 3193
3191 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, 3194 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
3192 &ctxt->exception);
3193 if (ret != X86EMUL_CONTINUE) 3195 if (ret != X86EMUL_CONTINUE)
3194 return ret; 3196 return ret;
3195 3197
3196 if (old_tss_sel != 0xffff) { 3198 if (old_tss_sel != 0xffff) {
3197 tss_seg.prev_task_link = old_tss_sel; 3199 tss_seg.prev_task_link = old_tss_sel;
3198 3200
3199 ret = ops->write_std(ctxt, new_tss_base, 3201 ret = linear_write_system(ctxt, new_tss_base,
3200 &tss_seg.prev_task_link, 3202 &tss_seg.prev_task_link,
3201 sizeof tss_seg.prev_task_link, 3203 sizeof tss_seg.prev_task_link);
3202 &ctxt->exception);
3203 if (ret != X86EMUL_CONTINUE) 3204 if (ret != X86EMUL_CONTINUE)
3204 return ret; 3205 return ret;
3205 } 3206 }
@@ -4978,6 +4979,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4978 bool op_prefix = false; 4979 bool op_prefix = false;
4979 bool has_seg_override = false; 4980 bool has_seg_override = false;
4980 struct opcode opcode; 4981 struct opcode opcode;
4982 u16 dummy;
4983 struct desc_struct desc;
4981 4984
4982 ctxt->memop.type = OP_NONE; 4985 ctxt->memop.type = OP_NONE;
4983 ctxt->memopp = NULL; 4986 ctxt->memopp = NULL;
@@ -4996,6 +4999,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4996 switch (mode) { 4999 switch (mode) {
4997 case X86EMUL_MODE_REAL: 5000 case X86EMUL_MODE_REAL:
4998 case X86EMUL_MODE_VM86: 5001 case X86EMUL_MODE_VM86:
5002 def_op_bytes = def_ad_bytes = 2;
5003 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5004 if (desc.d)
5005 def_op_bytes = def_ad_bytes = 4;
5006 break;
4999 case X86EMUL_MODE_PROT16: 5007 case X86EMUL_MODE_PROT16:
5000 def_op_bytes = def_ad_bytes = 2; 5008 def_op_bytes = def_ad_bytes = 2;
5001 break; 5009 break;
@@ -5290,9 +5298,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5290 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; 5298 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5291 if (!(ctxt->d & ByteOp)) 5299 if (!(ctxt->d & ByteOp))
5292 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; 5300 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5293 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" 5301 asm("push %[flags]; popf; " CALL_NOSPEC "; pushf; pop %[flags]\n"
5294 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), 5302 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5295 [fastop]"+S"(fop) 5303 [thunk_target]"+S"(fop)
5296 : "c"(ctxt->src2.val)); 5304 : "c"(ctxt->src2.val));
5297 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); 5305 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5298 if (!fop) /* exception is returned in fop variable */ 5306 if (!fop) /* exception is returned in fop variable */
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 3aab53f8cad2..d380111351c0 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -247,8 +247,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
247 index == RTC_GSI) { 247 index == RTC_GSI) {
248 if (kvm_apic_match_dest(vcpu, NULL, 0, 248 if (kvm_apic_match_dest(vcpu, NULL, 0,
249 e->fields.dest_id, e->fields.dest_mode) || 249 e->fields.dest_id, e->fields.dest_mode) ||
250 (e->fields.trig_mode == IOAPIC_EDGE_TRIG && 250 kvm_apic_pending_eoi(vcpu, e->fields.vector))
251 kvm_apic_pending_eoi(vcpu, e->fields.vector)))
252 __set_bit(e->fields.vector, 251 __set_bit(e->fields.vector,
253 (unsigned long *)eoi_exit_bitmap); 252 (unsigned long *)eoi_exit_bitmap);
254 } 253 }
@@ -269,6 +268,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
269{ 268{
270 unsigned index; 269 unsigned index;
271 bool mask_before, mask_after; 270 bool mask_before, mask_after;
271 int old_remote_irr, old_delivery_status;
272 union kvm_ioapic_redirect_entry *e; 272 union kvm_ioapic_redirect_entry *e;
273 273
274 switch (ioapic->ioregsel) { 274 switch (ioapic->ioregsel) {
@@ -291,14 +291,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
291 return; 291 return;
292 e = &ioapic->redirtbl[index]; 292 e = &ioapic->redirtbl[index];
293 mask_before = e->fields.mask; 293 mask_before = e->fields.mask;
294 /* Preserve read-only fields */
295 old_remote_irr = e->fields.remote_irr;
296 old_delivery_status = e->fields.delivery_status;
294 if (ioapic->ioregsel & 1) { 297 if (ioapic->ioregsel & 1) {
295 e->bits &= 0xffffffff; 298 e->bits &= 0xffffffff;
296 e->bits |= (u64) val << 32; 299 e->bits |= (u64) val << 32;
297 } else { 300 } else {
298 e->bits &= ~0xffffffffULL; 301 e->bits &= ~0xffffffffULL;
299 e->bits |= (u32) val; 302 e->bits |= (u32) val;
300 e->fields.remote_irr = 0;
301 } 303 }
304 e->fields.remote_irr = old_remote_irr;
305 e->fields.delivery_status = old_delivery_status;
306
307 /*
308 * Some OSes (Linux, Xen) assume that Remote IRR bit will
309 * be cleared by IOAPIC hardware when the entry is configured
310 * as edge-triggered. This behavior is used to simulate an
311 * explicit EOI on IOAPICs that don't have the EOI register.
312 */
313 if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
314 e->fields.remote_irr = 0;
315
302 mask_after = e->fields.mask; 316 mask_after = e->fields.mask;
303 if (mask_before != mask_after) 317 if (mask_before != mask_after)
304 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); 318 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 1c96f09367ae..a1afd80a68aa 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -288,8 +288,16 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
288 if (!kvm_vcpu_has_lapic(vcpu)) 288 if (!kvm_vcpu_has_lapic(vcpu))
289 return; 289 return;
290 290
291 /*
292 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
293 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
294 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
295 * version first and level-triggered interrupts never get EOIed in
296 * IOAPIC.
297 */
291 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); 298 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
292 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31)))) 299 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
300 !ioapic_in_kernel(vcpu->kvm))
293 v |= APIC_LVR_DIRECTED_EOI; 301 v |= APIC_LVR_DIRECTED_EOI;
294 apic_set_reg(apic, APIC_LVR, v); 302 apic_set_reg(apic, APIC_LVR, v);
295} 303}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1049c3c9b877..2b71f2c03b9e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4503,7 +4503,7 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
4503typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap); 4503typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap);
4504 4504
4505/* The caller should hold mmu-lock before calling this function. */ 4505/* The caller should hold mmu-lock before calling this function. */
4506static bool 4506static __always_inline bool
4507slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, 4507slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
4508 slot_level_handler fn, int start_level, int end_level, 4508 slot_level_handler fn, int start_level, int end_level,
4509 gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) 4509 gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
@@ -4533,7 +4533,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
4533 return flush; 4533 return flush;
4534} 4534}
4535 4535
4536static bool 4536static __always_inline bool
4537slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, 4537slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
4538 slot_level_handler fn, int start_level, int end_level, 4538 slot_level_handler fn, int start_level, int end_level,
4539 bool lock_flush_tlb) 4539 bool lock_flush_tlb)
@@ -4544,7 +4544,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
4544 lock_flush_tlb); 4544 lock_flush_tlb);
4545} 4545}
4546 4546
4547static bool 4547static __always_inline bool
4548slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, 4548slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
4549 slot_level_handler fn, bool lock_flush_tlb) 4549 slot_level_handler fn, bool lock_flush_tlb)
4550{ 4550{
@@ -4552,7 +4552,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
4552 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); 4552 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
4553} 4553}
4554 4554
4555static bool 4555static __always_inline bool
4556slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, 4556slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
4557 slot_level_handler fn, bool lock_flush_tlb) 4557 slot_level_handler fn, bool lock_flush_tlb)
4558{ 4558{
@@ -4560,7 +4560,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
4560 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); 4560 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
4561} 4561}
4562 4562
4563static bool 4563static __always_inline bool
4564slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, 4564slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
4565 slot_level_handler fn, bool lock_flush_tlb) 4565 slot_level_handler fn, bool lock_flush_tlb)
4566{ 4566{
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2038e5bacce6..df7827a981dd 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -37,7 +37,7 @@
37#include <asm/desc.h> 37#include <asm/desc.h>
38#include <asm/debugreg.h> 38#include <asm/debugreg.h>
39#include <asm/kvm_para.h> 39#include <asm/kvm_para.h>
40#include <asm/nospec-branch.h> 40#include <asm/spec-ctrl.h>
41 41
42#include <asm/virtext.h> 42#include <asm/virtext.h>
43#include "trace.h" 43#include "trace.h"
@@ -1386,6 +1386,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
1386 */ 1386 */
1387 if (var->unusable) 1387 if (var->unusable)
1388 var->db = 0; 1388 var->db = 0;
1389 /* This is symmetric with svm_set_segment() */
1389 var->dpl = to_svm(vcpu)->vmcb->save.cpl; 1390 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1390 break; 1391 break;
1391 } 1392 }
@@ -1531,18 +1532,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
1531 s->base = var->base; 1532 s->base = var->base;
1532 s->limit = var->limit; 1533 s->limit = var->limit;
1533 s->selector = var->selector; 1534 s->selector = var->selector;
1534 if (var->unusable) 1535 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1535 s->attrib = 0; 1536 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1536 else { 1537 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1537 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); 1538 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1538 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; 1539 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1539 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; 1540 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1540 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; 1541 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1541 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; 1542 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1542 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1543 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1544 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1545 }
1546 1543
1547 /* 1544 /*
1548 * This is always accurate, except if SYSRET returned to a segment 1545 * This is always accurate, except if SYSRET returned to a segment
@@ -1551,7 +1548,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
1551 * would entail passing the CPL to userspace and back. 1548 * would entail passing the CPL to userspace and back.
1552 */ 1549 */
1553 if (seg == VCPU_SREG_SS) 1550 if (seg == VCPU_SREG_SS)
1554 svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; 1551 /* This is symmetric with svm_get_segment() */
1552 svm->vmcb->save.cpl = (var->dpl & 3);
1555 1553
1556 mark_dirty(svm->vmcb, VMCB_SEG); 1554 mark_dirty(svm->vmcb, VMCB_SEG);
1557} 1555}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 75d60e40c389..c5a4b1978cbf 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/tboot.h> 33#include <linux/tboot.h>
34#include <linux/hrtimer.h> 34#include <linux/hrtimer.h>
35#include <linux/nospec.h>
35#include "kvm_cache_regs.h" 36#include "kvm_cache_regs.h"
36#include "x86.h" 37#include "x86.h"
37 38
@@ -47,7 +48,7 @@
47#include <asm/kexec.h> 48#include <asm/kexec.h>
48#include <asm/apic.h> 49#include <asm/apic.h>
49#include <asm/irq_remapping.h> 50#include <asm/irq_remapping.h>
50#include <asm/nospec-branch.h> 51#include <asm/spec-ctrl.h>
51 52
52#include "trace.h" 53#include "trace.h"
53#include "pmu.h" 54#include "pmu.h"
@@ -125,6 +126,12 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
125 126
126#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 127#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
127 128
129#define VMX_VPID_EXTENT_SUPPORTED_MASK \
130 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
131 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
132 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
133 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
134
128/* 135/*
129 * These 2 parameters are used to config the controls for Pause-Loop Exiting: 136 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
130 * ple_gap: upper bound on the amount of time between two successive 137 * ple_gap: upper bound on the amount of time between two successive
@@ -827,21 +834,18 @@ static const unsigned short vmcs_field_to_offset_table[] = {
827 834
828static inline short vmcs_field_to_offset(unsigned long field) 835static inline short vmcs_field_to_offset(unsigned long field)
829{ 836{
830 BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); 837 const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
838 unsigned short offset;
831 839
832 if (field >= ARRAY_SIZE(vmcs_field_to_offset_table)) 840 BUILD_BUG_ON(size > SHRT_MAX);
841 if (field >= size)
833 return -ENOENT; 842 return -ENOENT;
834 843
835 /* 844 field = array_index_nospec(field, size);
836 * FIXME: Mitigation for CVE-2017-5753. To be replaced with a 845 offset = vmcs_field_to_offset_table[field];
837 * generic mechanism. 846 if (offset == 0)
838 */
839 asm("lfence");
840
841 if (vmcs_field_to_offset_table[field] == 0)
842 return -ENOENT; 847 return -ENOENT;
843 848 return offset;
844 return vmcs_field_to_offset_table[field];
845} 849}
846 850
847static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) 851static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
@@ -1007,6 +1011,13 @@ static inline bool is_machine_check(u32 intr_info)
1007 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); 1011 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1008} 1012}
1009 1013
1014/* Undocumented: icebp/int1 */
1015static inline bool is_icebp(u32 intr_info)
1016{
1017 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1018 == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
1019}
1020
1010static inline bool cpu_has_vmx_msr_bitmap(void) 1021static inline bool cpu_has_vmx_msr_bitmap(void)
1011{ 1022{
1012 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; 1023 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
@@ -2308,6 +2319,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
2308 return; 2319 return;
2309 } 2320 }
2310 2321
2322 WARN_ON_ONCE(vmx->emulation_required);
2323
2311 if (kvm_exception_is_soft(nr)) { 2324 if (kvm_exception_is_soft(nr)) {
2312 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2325 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2313 vmx->vcpu.arch.event_exit_inst_len); 2326 vmx->vcpu.arch.event_exit_inst_len);
@@ -2659,8 +2672,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2659 */ 2672 */
2660 if (enable_vpid) 2673 if (enable_vpid)
2661 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | 2674 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2662 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | 2675 VMX_VPID_EXTENT_SUPPORTED_MASK;
2663 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
2664 else 2676 else
2665 vmx->nested.nested_vmx_vpid_caps = 0; 2677 vmx->nested.nested_vmx_vpid_caps = 0;
2666 2678
@@ -4514,7 +4526,7 @@ static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu)
4514 return enable_apicv && lapic_in_kernel(vcpu); 4526 return enable_apicv && lapic_in_kernel(vcpu);
4515} 4527}
4516 4528
4517static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 4529static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
4518{ 4530{
4519 struct vcpu_vmx *vmx = to_vmx(vcpu); 4531 struct vcpu_vmx *vmx = to_vmx(vcpu);
4520 int max_irr; 4532 int max_irr;
@@ -4525,19 +4537,15 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
4525 vmx->nested.pi_pending) { 4537 vmx->nested.pi_pending) {
4526 vmx->nested.pi_pending = false; 4538 vmx->nested.pi_pending = false;
4527 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) 4539 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
4528 return 0; 4540 return;
4529 4541
4530 max_irr = find_last_bit( 4542 max_irr = find_last_bit(
4531 (unsigned long *)vmx->nested.pi_desc->pir, 256); 4543 (unsigned long *)vmx->nested.pi_desc->pir, 256);
4532 4544
4533 if (max_irr == 256) 4545 if (max_irr == 256)
4534 return 0; 4546 return;
4535 4547
4536 vapic_page = kmap(vmx->nested.virtual_apic_page); 4548 vapic_page = kmap(vmx->nested.virtual_apic_page);
4537 if (!vapic_page) {
4538 WARN_ON(1);
4539 return -ENOMEM;
4540 }
4541 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page); 4549 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page);
4542 kunmap(vmx->nested.virtual_apic_page); 4550 kunmap(vmx->nested.virtual_apic_page);
4543 4551
@@ -4548,7 +4556,6 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
4548 vmcs_write16(GUEST_INTR_STATUS, status); 4556 vmcs_write16(GUEST_INTR_STATUS, status);
4549 } 4557 }
4550 } 4558 }
4551 return 0;
4552} 4559}
4553 4560
4554static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu) 4561static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
@@ -4595,14 +4602,15 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4595 4602
4596 if (is_guest_mode(vcpu) && 4603 if (is_guest_mode(vcpu) &&
4597 vector == vmx->nested.posted_intr_nv) { 4604 vector == vmx->nested.posted_intr_nv) {
4598 /* the PIR and ON have been set by L1. */
4599 kvm_vcpu_trigger_posted_interrupt(vcpu);
4600 /* 4605 /*
4601 * If a posted intr is not recognized by hardware, 4606 * If a posted intr is not recognized by hardware,
4602 * we will accomplish it in the next vmentry. 4607 * we will accomplish it in the next vmentry.
4603 */ 4608 */
4604 vmx->nested.pi_pending = true; 4609 vmx->nested.pi_pending = true;
4605 kvm_make_request(KVM_REQ_EVENT, vcpu); 4610 kvm_make_request(KVM_REQ_EVENT, vcpu);
4611 /* the PIR and ON have been set by L1. */
4612 if (!kvm_vcpu_trigger_posted_interrupt(vcpu))
4613 kvm_vcpu_kick(vcpu);
4606 return 0; 4614 return 0;
4607 } 4615 }
4608 return -1; 4616 return -1;
@@ -4954,7 +4962,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
4954 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4962 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4955 } 4963 }
4956 4964
4957 vmcs_writel(GUEST_RFLAGS, 0x02); 4965 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
4958 kvm_rip_write(vcpu, 0xfff0); 4966 kvm_rip_write(vcpu, 0xfff0);
4959 4967
4960 vmcs_writel(GUEST_GDTR_BASE, 0); 4968 vmcs_writel(GUEST_GDTR_BASE, 0);
@@ -5334,7 +5342,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
5334 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { 5342 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
5335 vcpu->arch.dr6 &= ~15; 5343 vcpu->arch.dr6 &= ~15;
5336 vcpu->arch.dr6 |= dr6 | DR6_RTM; 5344 vcpu->arch.dr6 |= dr6 | DR6_RTM;
5337 if (!(dr6 & ~DR6_RESERVED)) /* icebp */ 5345 if (is_icebp(intr_info))
5338 skip_emulated_instruction(vcpu); 5346 skip_emulated_instruction(vcpu);
5339 5347
5340 kvm_queue_exception(vcpu, DB_VECTOR); 5348 kvm_queue_exception(vcpu, DB_VECTOR);
@@ -6023,7 +6031,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
6023 if (test_bit(KVM_REQ_EVENT, &vcpu->requests)) 6031 if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
6024 return 1; 6032 return 1;
6025 6033
6026 err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); 6034 err = emulate_instruction(vcpu, 0);
6027 6035
6028 if (err == EMULATE_USER_EXIT) { 6036 if (err == EMULATE_USER_EXIT) {
6029 ++vcpu->stat.mmio_exits; 6037 ++vcpu->stat.mmio_exits;
@@ -6031,12 +6039,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
6031 goto out; 6039 goto out;
6032 } 6040 }
6033 6041
6034 if (err != EMULATE_DONE) { 6042 if (err != EMULATE_DONE)
6035 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 6043 goto emulation_error;
6036 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 6044
6037 vcpu->run->internal.ndata = 0; 6045 if (vmx->emulation_required && !vmx->rmode.vm86_active &&
6038 return 0; 6046 vcpu->arch.exception.pending)
6039 } 6047 goto emulation_error;
6040 6048
6041 if (vcpu->arch.halt_request) { 6049 if (vcpu->arch.halt_request) {
6042 vcpu->arch.halt_request = 0; 6050 vcpu->arch.halt_request = 0;
@@ -6052,6 +6060,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
6052 6060
6053out: 6061out:
6054 return ret; 6062 return ret;
6063
6064emulation_error:
6065 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6066 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
6067 vcpu->run->internal.ndata = 0;
6068 return 0;
6055} 6069}
6056 6070
6057static int __grow_ple_window(int val) 6071static int __grow_ple_window(int val)
@@ -6678,8 +6692,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
6678 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) 6692 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
6679 return 1; 6693 return 1;
6680 6694
6681 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, 6695 if (kvm_read_guest_virt(vcpu, gva, &vmptr, sizeof(vmptr), &e)) {
6682 sizeof(vmptr), &e)) {
6683 kvm_inject_page_fault(vcpu, &e); 6696 kvm_inject_page_fault(vcpu, &e);
6684 return 1; 6697 return 1;
6685 } 6698 }
@@ -6830,6 +6843,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
6830 HRTIMER_MODE_REL); 6843 HRTIMER_MODE_REL);
6831 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; 6844 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
6832 6845
6846 vmx->nested.vpid02 = allocate_vpid();
6847
6833 vmx->nested.vmxon = true; 6848 vmx->nested.vmxon = true;
6834 6849
6835 skip_emulated_instruction(vcpu); 6850 skip_emulated_instruction(vcpu);
@@ -7197,8 +7212,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
7197 vmx_instruction_info, true, &gva)) 7212 vmx_instruction_info, true, &gva))
7198 return 1; 7213 return 1;
7199 /* _system ok, as nested_vmx_check_permission verified cpl=0 */ 7214 /* _system ok, as nested_vmx_check_permission verified cpl=0 */
7200 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, 7215 kvm_write_guest_virt_system(vcpu, gva, &field_value,
7201 &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); 7216 (is_long_mode(vcpu) ? 8 : 4), NULL);
7202 } 7217 }
7203 7218
7204 nested_vmx_succeed(vcpu); 7219 nested_vmx_succeed(vcpu);
@@ -7233,8 +7248,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
7233 if (get_vmx_mem_address(vcpu, exit_qualification, 7248 if (get_vmx_mem_address(vcpu, exit_qualification,
7234 vmx_instruction_info, false, &gva)) 7249 vmx_instruction_info, false, &gva))
7235 return 1; 7250 return 1;
7236 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, 7251 if (kvm_read_guest_virt(vcpu, gva, &field_value,
7237 &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { 7252 (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
7238 kvm_inject_page_fault(vcpu, &e); 7253 kvm_inject_page_fault(vcpu, &e);
7239 return 1; 7254 return 1;
7240 } 7255 }
@@ -7324,9 +7339,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
7324 vmx_instruction_info, true, &vmcs_gva)) 7339 vmx_instruction_info, true, &vmcs_gva))
7325 return 1; 7340 return 1;
7326 /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ 7341 /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
7327 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, 7342 if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
7328 (void *)&to_vmx(vcpu)->nested.current_vmptr, 7343 (void *)&to_vmx(vcpu)->nested.current_vmptr,
7329 sizeof(u64), &e)) { 7344 sizeof(u64), &e)) {
7330 kvm_inject_page_fault(vcpu, &e); 7345 kvm_inject_page_fault(vcpu, &e);
7331 return 1; 7346 return 1;
7332 } 7347 }
@@ -7367,7 +7382,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
7367 7382
7368 types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 7383 types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
7369 7384
7370 if (!(types & (1UL << type))) { 7385 if (type >= 32 || !(types & (1 << type))) {
7371 nested_vmx_failValid(vcpu, 7386 nested_vmx_failValid(vcpu,
7372 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 7387 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7373 skip_emulated_instruction(vcpu); 7388 skip_emulated_instruction(vcpu);
@@ -7380,8 +7395,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
7380 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 7395 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
7381 vmx_instruction_info, false, &gva)) 7396 vmx_instruction_info, false, &gva))
7382 return 1; 7397 return 1;
7383 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, 7398 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
7384 sizeof(operand), &e)) {
7385 kvm_inject_page_fault(vcpu, &e); 7399 kvm_inject_page_fault(vcpu, &e);
7386 return 1; 7400 return 1;
7387 } 7401 }
@@ -7424,9 +7438,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
7424 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 7438 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
7425 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 7439 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
7426 7440
7427 types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7; 7441 types = (vmx->nested.nested_vmx_vpid_caps &
7442 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
7428 7443
7429 if (!(types & (1UL << type))) { 7444 if (type >= 32 || !(types & (1 << type))) {
7430 nested_vmx_failValid(vcpu, 7445 nested_vmx_failValid(vcpu,
7431 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 7446 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7432 skip_emulated_instruction(vcpu); 7447 skip_emulated_instruction(vcpu);
@@ -7439,28 +7454,33 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
7439 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 7454 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
7440 vmx_instruction_info, false, &gva)) 7455 vmx_instruction_info, false, &gva))
7441 return 1; 7456 return 1;
7442 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid, 7457 if (kvm_read_guest_virt(vcpu, gva, &vpid, sizeof(u32), &e)) {
7443 sizeof(u32), &e)) {
7444 kvm_inject_page_fault(vcpu, &e); 7458 kvm_inject_page_fault(vcpu, &e);
7445 return 1; 7459 return 1;
7446 } 7460 }
7447 7461
7448 switch (type) { 7462 switch (type) {
7463 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
7449 case VMX_VPID_EXTENT_SINGLE_CONTEXT: 7464 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
7450 /* 7465 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
7451 * Old versions of KVM use the single-context version so we 7466 if (!vpid) {
7452 * have to support it; just treat it the same as all-context. 7467 nested_vmx_failValid(vcpu,
7453 */ 7468 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7469 skip_emulated_instruction(vcpu);
7470 return 1;
7471 }
7472 break;
7454 case VMX_VPID_EXTENT_ALL_CONTEXT: 7473 case VMX_VPID_EXTENT_ALL_CONTEXT:
7455 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
7456 nested_vmx_succeed(vcpu);
7457 break; 7474 break;
7458 default: 7475 default:
7459 /* Trap individual address invalidation invvpid calls */ 7476 WARN_ON_ONCE(1);
7460 BUG_ON(1); 7477 skip_emulated_instruction(vcpu);
7461 break; 7478 return 1;
7462 } 7479 }
7463 7480
7481 __vmx_flush_tlb(vcpu, vmx->nested.vpid02);
7482 nested_vmx_succeed(vcpu);
7483
7464 skip_emulated_instruction(vcpu); 7484 skip_emulated_instruction(vcpu);
7465 return 1; 7485 return 1;
7466} 7486}
@@ -7644,11 +7664,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
7644{ 7664{
7645 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 7665 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7646 int cr = exit_qualification & 15; 7666 int cr = exit_qualification & 15;
7647 int reg = (exit_qualification >> 8) & 15; 7667 int reg;
7648 unsigned long val = kvm_register_readl(vcpu, reg); 7668 unsigned long val;
7649 7669
7650 switch ((exit_qualification >> 4) & 3) { 7670 switch ((exit_qualification >> 4) & 3) {
7651 case 0: /* mov to cr */ 7671 case 0: /* mov to cr */
7672 reg = (exit_qualification >> 8) & 15;
7673 val = kvm_register_readl(vcpu, reg);
7652 switch (cr) { 7674 switch (cr) {
7653 case 0: 7675 case 0:
7654 if (vmcs12->cr0_guest_host_mask & 7676 if (vmcs12->cr0_guest_host_mask &
@@ -7703,6 +7725,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
7703 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 7725 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
7704 * cr0. Other attempted changes are ignored, with no exit. 7726 * cr0. Other attempted changes are ignored, with no exit.
7705 */ 7727 */
7728 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
7706 if (vmcs12->cr0_guest_host_mask & 0xe & 7729 if (vmcs12->cr0_guest_host_mask & 0xe &
7707 (val ^ vmcs12->cr0_read_shadow)) 7730 (val ^ vmcs12->cr0_read_shadow))
7708 return true; 7731 return true;
@@ -8376,13 +8399,13 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
8376 "pushf\n\t" 8399 "pushf\n\t"
8377 "orl $0x200, (%%" _ASM_SP ")\n\t" 8400 "orl $0x200, (%%" _ASM_SP ")\n\t"
8378 __ASM_SIZE(push) " $%c[cs]\n\t" 8401 __ASM_SIZE(push) " $%c[cs]\n\t"
8379 "call *%[entry]\n\t" 8402 CALL_NOSPEC
8380 : 8403 :
8381#ifdef CONFIG_X86_64 8404#ifdef CONFIG_X86_64
8382 [sp]"=&r"(tmp) 8405 [sp]"=&r"(tmp)
8383#endif 8406#endif
8384 : 8407 :
8385 [entry]"r"(entry), 8408 THUNK_TARGET(entry),
8386 [ss]"i"(__KERNEL_DS), 8409 [ss]"i"(__KERNEL_DS),
8387 [cs]"i"(__KERNEL_CS) 8410 [cs]"i"(__KERNEL_CS)
8388 ); 8411 );
@@ -8866,10 +8889,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
8866 goto free_vmcs; 8889 goto free_vmcs;
8867 } 8890 }
8868 8891
8869 if (nested) { 8892 if (nested)
8870 nested_vmx_setup_ctls_msrs(vmx); 8893 nested_vmx_setup_ctls_msrs(vmx);
8871 vmx->nested.vpid02 = allocate_vpid();
8872 }
8873 8894
8874 vmx->nested.posted_intr_nv = -1; 8895 vmx->nested.posted_intr_nv = -1;
8875 vmx->nested.current_vmptr = -1ull; 8896 vmx->nested.current_vmptr = -1ull;
@@ -8878,7 +8899,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
8878 return &vmx->vcpu; 8899 return &vmx->vcpu;
8879 8900
8880free_vmcs: 8901free_vmcs:
8881 free_vpid(vmx->nested.vpid02);
8882 free_loaded_vmcs(vmx->loaded_vmcs); 8902 free_loaded_vmcs(vmx->loaded_vmcs);
8883free_msrs: 8903free_msrs:
8884 kfree(vmx->guest_msrs); 8904 kfree(vmx->guest_msrs);
@@ -9239,11 +9259,6 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
9239 return false; 9259 return false;
9240 } 9260 }
9241 msr_bitmap = (unsigned long *)kmap(page); 9261 msr_bitmap = (unsigned long *)kmap(page);
9242 if (!msr_bitmap) {
9243 nested_release_page_clean(page);
9244 WARN_ON(1);
9245 return false;
9246 }
9247 9262
9248 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 9263 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
9249 if (nested_cpu_has_apic_reg_virt(vmcs12)) 9264 if (nested_cpu_has_apic_reg_virt(vmcs12))
@@ -10165,7 +10180,8 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
10165 return 0; 10180 return 0;
10166 } 10181 }
10167 10182
10168 return vmx_complete_nested_posted_interrupt(vcpu); 10183 vmx_complete_nested_posted_interrupt(vcpu);
10184 return 0;
10169} 10185}
10170 10186
10171static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 10187static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f973cfa8ff4f..53d43d22a84b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2755,6 +2755,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2755 kvm_x86_ops->vcpu_put(vcpu); 2755 kvm_x86_ops->vcpu_put(vcpu);
2756 kvm_put_guest_fpu(vcpu); 2756 kvm_put_guest_fpu(vcpu);
2757 vcpu->arch.last_host_tsc = rdtsc(); 2757 vcpu->arch.last_host_tsc = rdtsc();
2758 /*
2759 * If userspace has set any breakpoints or watchpoints, dr6 is restored
2760 * on every vmexit, but if not, we might have a stale dr6 from the
2761 * guest. do_debug expects dr6 to be cleared after it runs, do the same.
2762 */
2763 set_debugreg(0, 6);
2758} 2764}
2759 2765
2760static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 2766static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
@@ -3967,13 +3973,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
3967 mutex_unlock(&kvm->lock); 3973 mutex_unlock(&kvm->lock);
3968 break; 3974 break;
3969 case KVM_XEN_HVM_CONFIG: { 3975 case KVM_XEN_HVM_CONFIG: {
3976 struct kvm_xen_hvm_config xhc;
3970 r = -EFAULT; 3977 r = -EFAULT;
3971 if (copy_from_user(&kvm->arch.xen_hvm_config, argp, 3978 if (copy_from_user(&xhc, argp, sizeof(xhc)))
3972 sizeof(struct kvm_xen_hvm_config)))
3973 goto out; 3979 goto out;
3974 r = -EINVAL; 3980 r = -EINVAL;
3975 if (kvm->arch.xen_hvm_config.flags) 3981 if (xhc.flags)
3976 goto out; 3982 goto out;
3983 memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
3977 r = 0; 3984 r = 0;
3978 break; 3985 break;
3979 } 3986 }
@@ -4238,11 +4245,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
4238 return X86EMUL_CONTINUE; 4245 return X86EMUL_CONTINUE;
4239} 4246}
4240 4247
4241int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, 4248int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
4242 gva_t addr, void *val, unsigned int bytes, 4249 gva_t addr, void *val, unsigned int bytes,
4243 struct x86_exception *exception) 4250 struct x86_exception *exception)
4244{ 4251{
4245 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4246 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 4252 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4247 4253
4248 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 4254 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
@@ -4250,12 +4256,17 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
4250} 4256}
4251EXPORT_SYMBOL_GPL(kvm_read_guest_virt); 4257EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
4252 4258
4253static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, 4259static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
4254 gva_t addr, void *val, unsigned int bytes, 4260 gva_t addr, void *val, unsigned int bytes,
4255 struct x86_exception *exception) 4261 struct x86_exception *exception, bool system)
4256{ 4262{
4257 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4263 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4258 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); 4264 u32 access = 0;
4265
4266 if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
4267 access |= PFERR_USER_MASK;
4268
4269 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
4259} 4270}
4260 4271
4261static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, 4272static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
@@ -4267,18 +4278,16 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
4267 return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE; 4278 return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
4268} 4279}
4269 4280
4270int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, 4281static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
4271 gva_t addr, void *val, 4282 struct kvm_vcpu *vcpu, u32 access,
4272 unsigned int bytes, 4283 struct x86_exception *exception)
4273 struct x86_exception *exception)
4274{ 4284{
4275 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4276 void *data = val; 4285 void *data = val;
4277 int r = X86EMUL_CONTINUE; 4286 int r = X86EMUL_CONTINUE;
4278 4287
4279 while (bytes) { 4288 while (bytes) {
4280 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, 4289 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
4281 PFERR_WRITE_MASK, 4290 access,
4282 exception); 4291 exception);
4283 unsigned offset = addr & (PAGE_SIZE-1); 4292 unsigned offset = addr & (PAGE_SIZE-1);
4284 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 4293 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
@@ -4299,6 +4308,27 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4299out: 4308out:
4300 return r; 4309 return r;
4301} 4310}
4311
4312static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
4313 unsigned int bytes, struct x86_exception *exception,
4314 bool system)
4315{
4316 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4317 u32 access = PFERR_WRITE_MASK;
4318
4319 if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
4320 access |= PFERR_USER_MASK;
4321
4322 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
4323 access, exception);
4324}
4325
4326int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
4327 unsigned int bytes, struct x86_exception *exception)
4328{
4329 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
4330 PFERR_WRITE_MASK, exception);
4331}
4302EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); 4332EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
4303 4333
4304static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 4334static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
@@ -5018,8 +5048,8 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
5018static const struct x86_emulate_ops emulate_ops = { 5048static const struct x86_emulate_ops emulate_ops = {
5019 .read_gpr = emulator_read_gpr, 5049 .read_gpr = emulator_read_gpr,
5020 .write_gpr = emulator_write_gpr, 5050 .write_gpr = emulator_write_gpr,
5021 .read_std = kvm_read_guest_virt_system, 5051 .read_std = emulator_read_std,
5022 .write_std = kvm_write_guest_virt_system, 5052 .write_std = emulator_write_std,
5023 .read_phys = kvm_read_guest_phys_system, 5053 .read_phys = kvm_read_guest_phys_system,
5024 .fetch = kvm_fetch_guest_virt, 5054 .fetch = kvm_fetch_guest_virt,
5025 .read_emulated = emulator_read_emulated, 5055 .read_emulated = emulator_read_emulated,
@@ -5153,7 +5183,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
5153 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 5183 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5154 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 5184 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5155 vcpu->run->internal.ndata = 0; 5185 vcpu->run->internal.ndata = 0;
5156 r = EMULATE_FAIL; 5186 r = EMULATE_USER_EXIT;
5157 } 5187 }
5158 kvm_queue_exception(vcpu, UD_VECTOR); 5188 kvm_queue_exception(vcpu, UD_VECTOR);
5159 5189
@@ -8204,6 +8234,13 @@ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
8204 sizeof(val)); 8234 sizeof(val));
8205} 8235}
8206 8236
8237static int apf_get_user(struct kvm_vcpu *vcpu, u32 *val)
8238{
8239
8240 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, val,
8241 sizeof(u32));
8242}
8243
8207void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 8244void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
8208 struct kvm_async_pf *work) 8245 struct kvm_async_pf *work)
8209{ 8246{
@@ -8230,6 +8267,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
8230 struct kvm_async_pf *work) 8267 struct kvm_async_pf *work)
8231{ 8268{
8232 struct x86_exception fault; 8269 struct x86_exception fault;
8270 u32 val;
8233 8271
8234 if (work->wakeup_all) 8272 if (work->wakeup_all)
8235 work->arch.token = ~0; /* broadcast wakeup */ 8273 work->arch.token = ~0; /* broadcast wakeup */
@@ -8237,14 +8275,24 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
8237 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 8275 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
8238 trace_kvm_async_pf_ready(work->arch.token, work->gva); 8276 trace_kvm_async_pf_ready(work->arch.token, work->gva);
8239 8277
8240 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && 8278 if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
8241 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { 8279 !apf_get_user(vcpu, &val)) {
8242 fault.vector = PF_VECTOR; 8280 if (val == KVM_PV_REASON_PAGE_NOT_PRESENT &&
8243 fault.error_code_valid = true; 8281 vcpu->arch.exception.pending &&
8244 fault.error_code = 0; 8282 vcpu->arch.exception.nr == PF_VECTOR &&
8245 fault.nested_page_fault = false; 8283 !apf_put_user(vcpu, 0)) {
8246 fault.address = work->arch.token; 8284 vcpu->arch.exception.pending = false;
8247 kvm_inject_page_fault(vcpu, &fault); 8285 vcpu->arch.exception.nr = 0;
8286 vcpu->arch.exception.has_error_code = false;
8287 vcpu->arch.exception.error_code = 0;
8288 } else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
8289 fault.vector = PF_VECTOR;
8290 fault.error_code_valid = true;
8291 fault.error_code = 0;
8292 fault.nested_page_fault = false;
8293 fault.address = work->arch.token;
8294 kvm_inject_page_fault(vcpu, &fault);
8295 }
8248 } 8296 }
8249 vcpu->arch.apf.halted = false; 8297 vcpu->arch.apf.halted = false;
8250 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 8298 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index f2afa5fe48a6..53a750a10598 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -164,11 +164,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
164 164
165void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); 165void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
166 166
167int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, 167int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
168 gva_t addr, void *val, unsigned int bytes, 168 gva_t addr, void *val, unsigned int bytes,
169 struct x86_exception *exception); 169 struct x86_exception *exception);
170 170
171int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, 171int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
172 gva_t addr, void *val, unsigned int bytes, 172 gva_t addr, void *val, unsigned int bytes,
173 struct x86_exception *exception); 173 struct x86_exception *exception);
174 174
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 12a34d15b648..c0c8b0a49bb8 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -23,7 +23,7 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
23lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o 23lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
24lib-$(CONFIG_RETPOLINE) += retpoline.o 24lib-$(CONFIG_RETPOLINE) += retpoline.o
25 25
26obj-y += msr.o msr-reg.o msr-reg-export.o 26obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
27 27
28ifeq ($(CONFIG_X86_32),y) 28ifeq ($(CONFIG_X86_32),y)
29 obj-y += atomic64_32.o 29 obj-y += atomic64_32.o
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index a2fe51b00cce..65be7cfaf947 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,5 +1,5 @@
1#include <linux/linkage.h> 1#include <linux/linkage.h>
2#include <asm/cpufeature.h> 2#include <asm/cpufeatures.h>
3#include <asm/alternative-asm.h> 3#include <asm/alternative-asm.h>
4 4
5/* 5/*
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index a744506856b1..88ce150186c6 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -21,12 +21,14 @@ static inline int myisspace(u8 c)
21 * @option: option string to look for 21 * @option: option string to look for
22 * 22 *
23 * Returns the position of that @option (starts counting with 1) 23 * Returns the position of that @option (starts counting with 1)
24 * or 0 on not found. 24 * or 0 on not found. @option will only be found if it is found
25 * as an entire word in @cmdline. For instance, if @option="car"
26 * then a cmdline which contains "cart" will not match.
25 */ 27 */
26int cmdline_find_option_bool(const char *cmdline, const char *option) 28int cmdline_find_option_bool(const char *cmdline, const char *option)
27{ 29{
28 char c; 30 char c;
29 int len, pos = 0, wstart = 0; 31 int pos = 0, wstart = 0;
30 const char *opptr = NULL; 32 const char *opptr = NULL;
31 enum { 33 enum {
32 st_wordstart = 0, /* Start of word/after whitespace */ 34 st_wordstart = 0, /* Start of word/after whitespace */
@@ -37,11 +39,14 @@ int cmdline_find_option_bool(const char *cmdline, const char *option)
37 if (!cmdline) 39 if (!cmdline)
38 return -1; /* No command line */ 40 return -1; /* No command line */
39 41
40 len = min_t(int, strlen(cmdline), COMMAND_LINE_SIZE); 42 if (!strlen(cmdline))
41 if (!len)
42 return 0; 43 return 0;
43 44
44 while (len--) { 45 /*
46 * This 'pos' check ensures we do not overrun
47 * a non-NULL-terminated 'cmdline'
48 */
49 while (pos < COMMAND_LINE_SIZE) {
45 c = *(char *)cmdline++; 50 c = *(char *)cmdline++;
46 pos++; 51 pos++;
47 52
@@ -58,17 +63,26 @@ int cmdline_find_option_bool(const char *cmdline, const char *option)
58 /* fall through */ 63 /* fall through */
59 64
60 case st_wordcmp: 65 case st_wordcmp:
61 if (!*opptr) 66 if (!*opptr) {
67 /*
68 * We matched all the way to the end of the
69 * option we were looking for. If the
70 * command-line has a space _or_ ends, then
71 * we matched!
72 */
62 if (!c || myisspace(c)) 73 if (!c || myisspace(c))
63 return wstart; 74 return wstart;
64 else 75 else
65 state = st_wordskip; 76 state = st_wordskip;
66 else if (!c) 77 } else if (!c) {
78 /*
79 * Hit the NULL terminator on the end of
80 * cmdline.
81 */
67 return 0; 82 return 0;
68 else if (c != *opptr++) 83 } else if (c != *opptr++) {
69 state = st_wordskip; 84 state = st_wordskip;
70 else if (!len) /* last word and is matching */ 85 }
71 return wstart;
72 break; 86 break;
73 87
74 case st_wordskip: 88 case st_wordskip:
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 009f98216b7e..24ef1c2104d4 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -1,7 +1,7 @@
1/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ 1/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/cpufeature.h> 4#include <asm/cpufeatures.h>
5#include <asm/alternative-asm.h> 5#include <asm/alternative-asm.h>
6 6
7/* 7/*
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 423644c230e7..accf7f2f557f 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -10,7 +10,7 @@
10#include <asm/current.h> 10#include <asm/current.h>
11#include <asm/asm-offsets.h> 11#include <asm/asm-offsets.h>
12#include <asm/thread_info.h> 12#include <asm/thread_info.h>
13#include <asm/cpufeature.h> 13#include <asm/cpufeatures.h>
14#include <asm/alternative-asm.h> 14#include <asm/alternative-asm.h>
15#include <asm/asm.h> 15#include <asm/asm.h>
16#include <asm/smap.h> 16#include <asm/smap.h>
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 7e48807b2fa1..45a53dfe1859 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -55,7 +55,7 @@ ENTRY(csum_partial_copy_generic)
55 movq %r12, 3*8(%rsp) 55 movq %r12, 3*8(%rsp)
56 movq %r14, 4*8(%rsp) 56 movq %r14, 4*8(%rsp)
57 movq %r13, 5*8(%rsp) 57 movq %r13, 5*8(%rsp)
58 movq %rbp, 6*8(%rsp) 58 movq %r15, 6*8(%rsp)
59 59
60 movq %r8, (%rsp) 60 movq %r8, (%rsp)
61 movq %r9, 1*8(%rsp) 61 movq %r9, 1*8(%rsp)
@@ -74,7 +74,7 @@ ENTRY(csum_partial_copy_generic)
74 /* main loop. clear in 64 byte blocks */ 74 /* main loop. clear in 64 byte blocks */
75 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */ 75 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
76 /* r11: temp3, rdx: temp4, r12 loopcnt */ 76 /* r11: temp3, rdx: temp4, r12 loopcnt */
77 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */ 77 /* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */
78 .p2align 4 78 .p2align 4
79.Lloop: 79.Lloop:
80 source 80 source
@@ -89,7 +89,7 @@ ENTRY(csum_partial_copy_generic)
89 source 89 source
90 movq 32(%rdi), %r10 90 movq 32(%rdi), %r10
91 source 91 source
92 movq 40(%rdi), %rbp 92 movq 40(%rdi), %r15
93 source 93 source
94 movq 48(%rdi), %r14 94 movq 48(%rdi), %r14
95 source 95 source
@@ -103,7 +103,7 @@ ENTRY(csum_partial_copy_generic)
103 adcq %r11, %rax 103 adcq %r11, %rax
104 adcq %rdx, %rax 104 adcq %rdx, %rax
105 adcq %r10, %rax 105 adcq %r10, %rax
106 adcq %rbp, %rax 106 adcq %r15, %rax
107 adcq %r14, %rax 107 adcq %r14, %rax
108 adcq %r13, %rax 108 adcq %r13, %rax
109 109
@@ -121,7 +121,7 @@ ENTRY(csum_partial_copy_generic)
121 dest 121 dest
122 movq %r10, 32(%rsi) 122 movq %r10, 32(%rsi)
123 dest 123 dest
124 movq %rbp, 40(%rsi) 124 movq %r15, 40(%rsi)
125 dest 125 dest
126 movq %r14, 48(%rsi) 126 movq %r14, 48(%rsi)
127 dest 127 dest
@@ -203,7 +203,7 @@ ENTRY(csum_partial_copy_generic)
203 movq 3*8(%rsp), %r12 203 movq 3*8(%rsp), %r12
204 movq 4*8(%rsp), %r14 204 movq 4*8(%rsp), %r14
205 movq 5*8(%rsp), %r13 205 movq 5*8(%rsp), %r13
206 movq 6*8(%rsp), %rbp 206 movq 6*8(%rsp), %r15
207 addq $7*8, %rsp 207 addq $7*8, %rsp
208 ret 208 ret
209 209
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index e912b2f6d36e..45772560aceb 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -93,6 +93,13 @@ static void delay_mwaitx(unsigned long __loops)
93{ 93{
94 u64 start, end, delay, loops = __loops; 94 u64 start, end, delay, loops = __loops;
95 95
96 /*
97 * Timer value of 0 causes MWAITX to wait indefinitely, unless there
98 * is a store on the memory monitored by MONITORX.
99 */
100 if (loops == 0)
101 return;
102
96 start = rdtsc_ordered(); 103 start = rdtsc_ordered();
97 104
98 for (;;) { 105 for (;;) {
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 46668cda4ffd..490b2ee4e4bb 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -38,6 +38,8 @@ ENTRY(__get_user_1)
38 GET_THREAD_INFO(%_ASM_DX) 38 GET_THREAD_INFO(%_ASM_DX)
39 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX 39 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
40 jae bad_get_user 40 jae bad_get_user
41 sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
42 and %_ASM_DX, %_ASM_AX
41 ASM_STAC 43 ASM_STAC
421: movzbl (%_ASM_AX),%edx 441: movzbl (%_ASM_AX),%edx
43 xor %eax,%eax 45 xor %eax,%eax
@@ -51,6 +53,8 @@ ENTRY(__get_user_2)
51 GET_THREAD_INFO(%_ASM_DX) 53 GET_THREAD_INFO(%_ASM_DX)
52 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX 54 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
53 jae bad_get_user 55 jae bad_get_user
56 sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
57 and %_ASM_DX, %_ASM_AX
54 ASM_STAC 58 ASM_STAC
552: movzwl -1(%_ASM_AX),%edx 592: movzwl -1(%_ASM_AX),%edx
56 xor %eax,%eax 60 xor %eax,%eax
@@ -64,6 +68,8 @@ ENTRY(__get_user_4)
64 GET_THREAD_INFO(%_ASM_DX) 68 GET_THREAD_INFO(%_ASM_DX)
65 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX 69 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
66 jae bad_get_user 70 jae bad_get_user
71 sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
72 and %_ASM_DX, %_ASM_AX
67 ASM_STAC 73 ASM_STAC
683: movl -3(%_ASM_AX),%edx 743: movl -3(%_ASM_AX),%edx
69 xor %eax,%eax 75 xor %eax,%eax
@@ -78,6 +84,8 @@ ENTRY(__get_user_8)
78 GET_THREAD_INFO(%_ASM_DX) 84 GET_THREAD_INFO(%_ASM_DX)
79 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX 85 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
80 jae bad_get_user 86 jae bad_get_user
87 sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
88 and %_ASM_DX, %_ASM_AX
81 ASM_STAC 89 ASM_STAC
824: movq -7(%_ASM_AX),%rdx 904: movq -7(%_ASM_AX),%rdx
83 xor %eax,%eax 91 xor %eax,%eax
@@ -89,6 +97,8 @@ ENTRY(__get_user_8)
89 GET_THREAD_INFO(%_ASM_DX) 97 GET_THREAD_INFO(%_ASM_DX)
90 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX 98 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
91 jae bad_get_user_8 99 jae bad_get_user_8
100 sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
101 and %_ASM_DX, %_ASM_AX
92 ASM_STAC 102 ASM_STAC
934: movl -7(%_ASM_AX),%edx 1034: movl -7(%_ASM_AX),%edx
945: movl -3(%_ASM_AX),%ecx 1045: movl -3(%_ASM_AX),%ecx
diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
new file mode 100644
index 000000000000..8a602a1e404a
--- /dev/null
+++ b/arch/x86/lib/hweight.S
@@ -0,0 +1,79 @@
1#include <linux/linkage.h>
2
3#include <asm/asm.h>
4
5/*
6 * unsigned int __sw_hweight32(unsigned int w)
7 * %rdi: w
8 */
9ENTRY(__sw_hweight32)
10
11#ifdef CONFIG_X86_64
12 movl %edi, %eax # w
13#endif
14 __ASM_SIZE(push,) %__ASM_REG(dx)
15 movl %eax, %edx # w -> t
16 shrl %edx # t >>= 1
17 andl $0x55555555, %edx # t &= 0x55555555
18 subl %edx, %eax # w -= t
19
20 movl %eax, %edx # w -> t
21 shrl $2, %eax # w_tmp >>= 2
22 andl $0x33333333, %edx # t &= 0x33333333
23 andl $0x33333333, %eax # w_tmp &= 0x33333333
24 addl %edx, %eax # w = w_tmp + t
25
26 movl %eax, %edx # w -> t
27 shrl $4, %edx # t >>= 4
28 addl %edx, %eax # w_tmp += t
29 andl $0x0f0f0f0f, %eax # w_tmp &= 0x0f0f0f0f
30 imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101
31 shrl $24, %eax # w = w_tmp >> 24
32 __ASM_SIZE(pop,) %__ASM_REG(dx)
33 ret
34ENDPROC(__sw_hweight32)
35
36ENTRY(__sw_hweight64)
37#ifdef CONFIG_X86_64
38 pushq %rdi
39 pushq %rdx
40
41 movq %rdi, %rdx # w -> t
42 movabsq $0x5555555555555555, %rax
43 shrq %rdx # t >>= 1
44 andq %rdx, %rax # t &= 0x5555555555555555
45 movabsq $0x3333333333333333, %rdx
46 subq %rax, %rdi # w -= t
47
48 movq %rdi, %rax # w -> t
49 shrq $2, %rdi # w_tmp >>= 2
50 andq %rdx, %rax # t &= 0x3333333333333333
51 andq %rdi, %rdx # w_tmp &= 0x3333333333333333
52 addq %rdx, %rax # w = w_tmp + t
53
54 movq %rax, %rdx # w -> t
55 shrq $4, %rdx # t >>= 4
56 addq %rdx, %rax # w_tmp += t
57 movabsq $0x0f0f0f0f0f0f0f0f, %rdx
58 andq %rdx, %rax # w_tmp &= 0x0f0f0f0f0f0f0f0f
59 movabsq $0x0101010101010101, %rdx
60 imulq %rdx, %rax # w_tmp *= 0x0101010101010101
61 shrq $56, %rax # w = w_tmp >> 56
62
63 popq %rdx
64 popq %rdi
65 ret
66#else /* CONFIG_X86_32 */
67 /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
68 pushl %ecx
69
70 call __sw_hweight32
71 movl %eax, %ecx # stash away result
72 movl %edx, %eax # second part of input
73 call __sw_hweight32
74 addl %ecx, %eax # result
75
76 popl %ecx
77 ret
78#endif
79ENDPROC(__sw_hweight64)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 16698bba87de..a0de849435ad 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -1,7 +1,7 @@
1/* Copyright 2002 Andi Kleen */ 1/* Copyright 2002 Andi Kleen */
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/cpufeature.h> 4#include <asm/cpufeatures.h>
5#include <asm/alternative-asm.h> 5#include <asm/alternative-asm.h>
6 6
7/* 7/*
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index ca2afdd6d98e..90ce01bee00c 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -6,7 +6,7 @@
6 * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> 6 * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
7 */ 7 */
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/cpufeature.h> 9#include <asm/cpufeatures.h>
10#include <asm/alternative-asm.h> 10#include <asm/alternative-asm.h>
11 11
12#undef memmove 12#undef memmove
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 2661fad05827..c9c81227ea37 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -1,7 +1,7 @@
1/* Copyright 2002 Andi Kleen, SuSE Labs */ 1/* Copyright 2002 Andi Kleen, SuSE Labs */
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/cpufeature.h> 4#include <asm/cpufeatures.h>
5#include <asm/alternative-asm.h> 5#include <asm/alternative-asm.h>
6 6
7.weak memset 7.weak memset
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index e611a124c442..7bbb853e36bd 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -3,7 +3,7 @@
3#include <linux/stringify.h> 3#include <linux/stringify.h>
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <asm/dwarf2.h> 5#include <asm/dwarf2.h>
6#include <asm/cpufeature.h> 6#include <asm/cpufeatures.h>
7#include <asm/alternative-asm.h> 7#include <asm/alternative-asm.h>
8#include <asm-generic/export.h> 8#include <asm-generic/export.h>
9#include <asm/nospec-branch.h> 9#include <asm/nospec-branch.h>
@@ -36,7 +36,6 @@ GENERATE_THUNK(_ASM_DX)
36GENERATE_THUNK(_ASM_SI) 36GENERATE_THUNK(_ASM_SI)
37GENERATE_THUNK(_ASM_DI) 37GENERATE_THUNK(_ASM_DI)
38GENERATE_THUNK(_ASM_BP) 38GENERATE_THUNK(_ASM_BP)
39GENERATE_THUNK(_ASM_SP)
40#ifdef CONFIG_64BIT 39#ifdef CONFIG_64BIT
41GENERATE_THUNK(r8) 40GENERATE_THUNK(r8)
42GENERATE_THUNK(r9) 41GENERATE_THUNK(r9)
diff --git a/arch/x86/math-emu/Makefile b/arch/x86/math-emu/Makefile
index 9b0c63b60302..1b2dac174321 100644
--- a/arch/x86/math-emu/Makefile
+++ b/arch/x86/math-emu/Makefile
@@ -5,8 +5,8 @@
5#DEBUG = -DDEBUGGING 5#DEBUG = -DDEBUGGING
6DEBUG = 6DEBUG =
7PARANOID = -DPARANOID 7PARANOID = -DPARANOID
8EXTRA_CFLAGS := $(PARANOID) $(DEBUG) -fno-builtin $(MATH_EMULATION) 8ccflags-y += $(PARANOID) $(DEBUG) -fno-builtin $(MATH_EMULATION)
9EXTRA_AFLAGS := $(PARANOID) 9asflags-y += $(PARANOID)
10 10
11# From 'C' language sources: 11# From 'C' language sources:
12C_OBJS =fpu_entry.o errors.o \ 12C_OBJS =fpu_entry.o errors.o \
diff --git a/arch/x86/math-emu/reg_compare.c b/arch/x86/math-emu/reg_compare.c
index b77360fdbf4a..19b33b50adfa 100644
--- a/arch/x86/math-emu/reg_compare.c
+++ b/arch/x86/math-emu/reg_compare.c
@@ -168,7 +168,7 @@ static int compare(FPU_REG const *b, int tagb)
168/* This function requires that st(0) is not empty */ 168/* This function requires that st(0) is not empty */
169int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag) 169int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
170{ 170{
171 int f = 0, c; 171 int f, c;
172 172
173 c = compare(loaded_data, loaded_tag); 173 c = compare(loaded_data, loaded_tag);
174 174
@@ -189,12 +189,12 @@ int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
189 case COMP_No_Comp: 189 case COMP_No_Comp:
190 f = SW_C3 | SW_C2 | SW_C0; 190 f = SW_C3 | SW_C2 | SW_C0;
191 break; 191 break;
192#ifdef PARANOID
193 default: 192 default:
193#ifdef PARANOID
194 EXCEPTION(EX_INTERNAL | 0x121); 194 EXCEPTION(EX_INTERNAL | 0x121);
195#endif /* PARANOID */
195 f = SW_C3 | SW_C2 | SW_C0; 196 f = SW_C3 | SW_C2 | SW_C0;
196 break; 197 break;
197#endif /* PARANOID */
198 } 198 }
199 setcc(f); 199 setcc(f);
200 if (c & COMP_Denormal) { 200 if (c & COMP_Denormal) {
@@ -205,7 +205,7 @@ int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
205 205
206static int compare_st_st(int nr) 206static int compare_st_st(int nr)
207{ 207{
208 int f = 0, c; 208 int f, c;
209 FPU_REG *st_ptr; 209 FPU_REG *st_ptr;
210 210
211 if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) { 211 if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
@@ -235,12 +235,12 @@ static int compare_st_st(int nr)
235 case COMP_No_Comp: 235 case COMP_No_Comp:
236 f = SW_C3 | SW_C2 | SW_C0; 236 f = SW_C3 | SW_C2 | SW_C0;
237 break; 237 break;
238#ifdef PARANOID
239 default: 238 default:
239#ifdef PARANOID
240 EXCEPTION(EX_INTERNAL | 0x122); 240 EXCEPTION(EX_INTERNAL | 0x122);
241#endif /* PARANOID */
241 f = SW_C3 | SW_C2 | SW_C0; 242 f = SW_C3 | SW_C2 | SW_C0;
242 break; 243 break;
243#endif /* PARANOID */
244 } 244 }
245 setcc(f); 245 setcc(f);
246 if (c & COMP_Denormal) { 246 if (c & COMP_Denormal) {
@@ -283,12 +283,12 @@ static int compare_i_st_st(int nr)
283 case COMP_No_Comp: 283 case COMP_No_Comp:
284 f = X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF; 284 f = X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF;
285 break; 285 break;
286#ifdef PARANOID
287 default: 286 default:
287#ifdef PARANOID
288 EXCEPTION(EX_INTERNAL | 0x122); 288 EXCEPTION(EX_INTERNAL | 0x122);
289#endif /* PARANOID */
289 f = 0; 290 f = 0;
290 break; 291 break;
291#endif /* PARANOID */
292 } 292 }
293 FPU_EFLAGS = (FPU_EFLAGS & ~(X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF)) | f; 293 FPU_EFLAGS = (FPU_EFLAGS & ~(X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF)) | f;
294 if (c & COMP_Denormal) { 294 if (c & COMP_Denormal) {
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index e830c71a1323..e0a34b0d381e 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -287,7 +287,7 @@ static noinline int vmalloc_fault(unsigned long address)
287 if (!pmd_k) 287 if (!pmd_k)
288 return -1; 288 return -1;
289 289
290 if (pmd_huge(*pmd_k)) 290 if (pmd_large(*pmd_k))
291 return 0; 291 return 0;
292 292
293 pte_k = pte_offset_kernel(pmd_k, address); 293 pte_k = pte_offset_kernel(pmd_k, address);
@@ -407,7 +407,7 @@ static noinline int vmalloc_fault(unsigned long address)
407 if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) 407 if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
408 BUG(); 408 BUG();
409 409
410 if (pud_huge(*pud)) 410 if (pud_large(*pud))
411 return 0; 411 return 0;
412 412
413 pmd = pmd_offset(pud, address); 413 pmd = pmd_offset(pud, address);
@@ -418,7 +418,7 @@ static noinline int vmalloc_fault(unsigned long address)
418 if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) 418 if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
419 BUG(); 419 BUG();
420 420
421 if (pmd_huge(*pmd)) 421 if (pmd_large(*pmd))
422 return 0; 422 return 0;
423 423
424 pte_ref = pte_offset_kernel(pmd_ref, address); 424 pte_ref = pte_offset_kernel(pmd_ref, address);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 151fd33e9043..4954a6cef50a 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -4,6 +4,8 @@
4#include <linux/swap.h> 4#include <linux/swap.h>
5#include <linux/memblock.h> 5#include <linux/memblock.h>
6#include <linux/bootmem.h> /* for max_low_pfn */ 6#include <linux/bootmem.h> /* for max_low_pfn */
7#include <linux/swapfile.h>
8#include <linux/swapops.h>
7 9
8#include <asm/cacheflush.h> 10#include <asm/cacheflush.h>
9#include <asm/e820.h> 11#include <asm/e820.h>
@@ -767,3 +769,26 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
767 __cachemode2pte_tbl[cache] = __cm_idx2pte(entry); 769 __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
768 __pte2cachemode_tbl[entry] = cache; 770 __pte2cachemode_tbl[entry] = cache;
769} 771}
772
773#ifdef CONFIG_SWAP
774unsigned long max_swapfile_size(void)
775{
776 unsigned long pages;
777
778 pages = generic_max_swapfile_size();
779
780 if (boot_cpu_has_bug(X86_BUG_L1TF)) {
781 /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
782 unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
783 /*
784 * We encode swap offsets also with 3 bits below those for pfn
785 * which makes the usable limit higher.
786 */
787#if CONFIG_PGTABLE_LEVELS > 2
788 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
789#endif
790 pages = min_t(unsigned long, l1tf_limit, pages);
791 }
792 return pages;
793}
794#endif
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index b9c78f3bcd67..53ab3f367472 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -348,11 +348,11 @@ void iounmap(volatile void __iomem *addr)
348 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) 348 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
349 return; 349 return;
350 350
351 mmiotrace_iounmap(addr);
352
351 addr = (volatile void __iomem *) 353 addr = (volatile void __iomem *)
352 (PAGE_MASK & (unsigned long __force)addr); 354 (PAGE_MASK & (unsigned long __force)addr);
353 355
354 mmiotrace_iounmap(addr);
355
356 /* Use the vm area unlocked, assuming the caller 356 /* Use the vm area unlocked, assuming the caller
357 ensures there isn't another iounmap for the same address 357 ensures there isn't another iounmap for the same address
358 in parallel. Reuse of the virtual address is prevented by 358 in parallel. Reuse of the virtual address is prevented by
diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
index 8af98513d36c..7a72e32e4806 100644
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -345,7 +345,7 @@ void __init kaiser_init(void)
345 if (vsyscall_enabled()) 345 if (vsyscall_enabled())
346 kaiser_add_user_map_early((void *)VSYSCALL_ADDR, 346 kaiser_add_user_map_early((void *)VSYSCALL_ADDR,
347 PAGE_SIZE, 347 PAGE_SIZE,
348 __PAGE_KERNEL_VSYSCALL); 348 vsyscall_pgprot);
349 349
350 for_each_possible_cpu(cpu) { 350 for_each_possible_cpu(cpu) {
351 void *percpu_vaddr = __per_cpu_user_mapped_start + 351 void *percpu_vaddr = __per_cpu_user_mapped_start +
@@ -363,7 +363,7 @@ void __init kaiser_init(void)
363 kaiser_add_user_map_ptrs_early(__entry_text_start, __entry_text_end, 363 kaiser_add_user_map_ptrs_early(__entry_text_start, __entry_text_end,
364 __PAGE_KERNEL_RX); 364 __PAGE_KERNEL_RX);
365 365
366#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) 366#ifdef CONFIG_FUNCTION_GRAPH_TRACER
367 kaiser_add_user_map_ptrs_early(__irqentry_text_start, 367 kaiser_add_user_map_ptrs_early(__irqentry_text_start,
368 __irqentry_text_end, 368 __irqentry_text_end,
369 __PAGE_KERNEL_RX); 369 __PAGE_KERNEL_RX);
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index ddb2244b06a1..7bf14e74fc8f 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -125,24 +125,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
125 125
126static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) 126static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
127{ 127{
128 pmd_t new_pmd;
128 pmdval_t v = pmd_val(*pmd); 129 pmdval_t v = pmd_val(*pmd);
129 if (clear) { 130 if (clear) {
130 *old = v & _PAGE_PRESENT; 131 *old = v;
131 v &= ~_PAGE_PRESENT; 132 new_pmd = pmd_mknotpresent(*pmd);
132 } else /* presume this has been called with clear==true previously */ 133 } else {
133 v |= *old; 134 /* Presume this has been called with clear==true previously */
134 set_pmd(pmd, __pmd(v)); 135 new_pmd = __pmd(*old);
136 }
137 set_pmd(pmd, new_pmd);
135} 138}
136 139
137static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old) 140static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
138{ 141{
139 pteval_t v = pte_val(*pte); 142 pteval_t v = pte_val(*pte);
140 if (clear) { 143 if (clear) {
141 *old = v & _PAGE_PRESENT; 144 *old = v;
142 v &= ~_PAGE_PRESENT; 145 /* Nothing should care about address */
143 } else /* presume this has been called with clear==true previously */ 146 pte_clear(&init_mm, 0, pte);
144 v |= *old; 147 } else {
145 set_pte_atomic(pte, __pte(v)); 148 /* Presume this has been called with clear==true previously */
149 set_pte_atomic(pte, __pte(*old));
150 }
146} 151}
147 152
148static int clear_page_presence(struct kmmio_fault_page *f, bool clear) 153static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
@@ -434,17 +439,18 @@ int register_kmmio_probe(struct kmmio_probe *p)
434 unsigned long flags; 439 unsigned long flags;
435 int ret = 0; 440 int ret = 0;
436 unsigned long size = 0; 441 unsigned long size = 0;
442 unsigned long addr = p->addr & PAGE_MASK;
437 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); 443 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
438 unsigned int l; 444 unsigned int l;
439 pte_t *pte; 445 pte_t *pte;
440 446
441 spin_lock_irqsave(&kmmio_lock, flags); 447 spin_lock_irqsave(&kmmio_lock, flags);
442 if (get_kmmio_probe(p->addr)) { 448 if (get_kmmio_probe(addr)) {
443 ret = -EEXIST; 449 ret = -EEXIST;
444 goto out; 450 goto out;
445 } 451 }
446 452
447 pte = lookup_address(p->addr, &l); 453 pte = lookup_address(addr, &l);
448 if (!pte) { 454 if (!pte) {
449 ret = -EINVAL; 455 ret = -EINVAL;
450 goto out; 456 goto out;
@@ -453,7 +459,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
453 kmmio_count++; 459 kmmio_count++;
454 list_add_rcu(&p->list, &kmmio_probes); 460 list_add_rcu(&p->list, &kmmio_probes);
455 while (size < size_lim) { 461 while (size < size_lim) {
456 if (add_kmmio_fault_page(p->addr + size)) 462 if (add_kmmio_fault_page(addr + size))
457 pr_err("Unable to set page fault.\n"); 463 pr_err("Unable to set page fault.\n");
458 size += page_level_size(l); 464 size += page_level_size(l);
459 } 465 }
@@ -527,19 +533,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
527{ 533{
528 unsigned long flags; 534 unsigned long flags;
529 unsigned long size = 0; 535 unsigned long size = 0;
536 unsigned long addr = p->addr & PAGE_MASK;
530 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); 537 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
531 struct kmmio_fault_page *release_list = NULL; 538 struct kmmio_fault_page *release_list = NULL;
532 struct kmmio_delayed_release *drelease; 539 struct kmmio_delayed_release *drelease;
533 unsigned int l; 540 unsigned int l;
534 pte_t *pte; 541 pte_t *pte;
535 542
536 pte = lookup_address(p->addr, &l); 543 pte = lookup_address(addr, &l);
537 if (!pte) 544 if (!pte)
538 return; 545 return;
539 546
540 spin_lock_irqsave(&kmmio_lock, flags); 547 spin_lock_irqsave(&kmmio_lock, flags);
541 while (size < size_lim) { 548 while (size < size_lim) {
542 release_kmmio_fault_page(p->addr + size, &release_list); 549 release_kmmio_fault_page(addr + size, &release_list);
543 size += page_level_size(l); 550 size += page_level_size(l);
544 } 551 }
545 list_del_rcu(&p->list); 552 list_del_rcu(&p->list);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 307f60ecfc6d..9a055ea279eb 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -121,3 +121,24 @@ const char *arch_vma_name(struct vm_area_struct *vma)
121 return "[mpx]"; 121 return "[mpx]";
122 return NULL; 122 return NULL;
123} 123}
124
125/*
126 * Only allow root to set high MMIO mappings to PROT_NONE.
127 * This prevents an unpriv. user to set them to PROT_NONE and invert
128 * them, then pointing to valid memory for L1TF speculation.
129 *
130 * Note: for locked down kernels may want to disable the root override.
131 */
132bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
133{
134 if (!boot_cpu_has_bug(X86_BUG_L1TF))
135 return true;
136 if (!__pte_needs_invert(pgprot_val(prot)))
137 return true;
138 /* If it's real memory always allow */
139 if (pfn_valid(pfn))
140 return true;
141 if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
142 return false;
143 return true;
144}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index ac9c7797b632..53bd895576cc 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1006,8 +1006,8 @@ static int populate_pmd(struct cpa_data *cpa,
1006 1006
1007 pmd = pmd_offset(pud, start); 1007 pmd = pmd_offset(pud, start);
1008 1008
1009 set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | 1009 set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
1010 massage_pgprot(pmd_pgprot))); 1010 canon_pgprot(pmd_pgprot))));
1011 1011
1012 start += PMD_SIZE; 1012 start += PMD_SIZE;
1013 cpa->pfn += PMD_SIZE; 1013 cpa->pfn += PMD_SIZE;
@@ -1079,8 +1079,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
1079 * Map everything starting from the Gb boundary, possibly with 1G pages 1079 * Map everything starting from the Gb boundary, possibly with 1G pages
1080 */ 1080 */
1081 while (end - start >= PUD_SIZE) { 1081 while (end - start >= PUD_SIZE) {
1082 set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | 1082 set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
1083 massage_pgprot(pud_pgprot))); 1083 canon_pgprot(pud_pgprot))));
1084 1084
1085 start += PUD_SIZE; 1085 start += PUD_SIZE;
1086 cpa->pfn += PUD_SIZE; 1086 cpa->pfn += PUD_SIZE;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index dbc27a2b4ad5..55c7446311a7 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -1,5 +1,6 @@
1#include <linux/mm.h> 1#include <linux/mm.h>
2#include <linux/gfp.h> 2#include <linux/gfp.h>
3#include <linux/hugetlb.h>
3#include <asm/pgalloc.h> 4#include <asm/pgalloc.h>
4#include <asm/pgtable.h> 5#include <asm/pgtable.h>
5#include <asm/tlb.h> 6#include <asm/tlb.h>
@@ -600,6 +601,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
600 (mtrr != MTRR_TYPE_WRBACK)) 601 (mtrr != MTRR_TYPE_WRBACK))
601 return 0; 602 return 0;
602 603
604 /* Bail out if we are we on a populated non-leaf entry: */
605 if (pud_present(*pud) && !pud_huge(*pud))
606 return 0;
607
603 prot = pgprot_4k_2_large(prot); 608 prot = pgprot_4k_2_large(prot);
604 609
605 set_pte((pte_t *)pud, pfn_pte( 610 set_pte((pte_t *)pud, pfn_pte(
@@ -628,6 +633,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
628 return 0; 633 return 0;
629 } 634 }
630 635
636 /* Bail out if we are we on a populated non-leaf entry: */
637 if (pmd_present(*pmd) && !pmd_huge(*pmd))
638 return 0;
639
631 prot = pgprot_4k_2_large(prot); 640 prot = pgprot_4k_2_large(prot);
632 641
633 set_pte((pte_t *)pmd, pfn_pte( 642 set_pte((pte_t *)pmd, pfn_pte(
@@ -666,4 +675,97 @@ int pmd_clear_huge(pmd_t *pmd)
666 675
667 return 0; 676 return 0;
668} 677}
678
679#ifdef CONFIG_X86_64
680/**
681 * pud_free_pmd_page - Clear pud entry and free pmd page.
682 * @pud: Pointer to a PUD.
683 * @addr: Virtual address associated with pud.
684 *
685 * Context: The pud range has been unmapped and TLB purged.
686 * Return: 1 if clearing the entry succeeded. 0 otherwise.
687 *
688 * NOTE: Callers must allow a single page allocation.
689 */
690int pud_free_pmd_page(pud_t *pud, unsigned long addr)
691{
692 pmd_t *pmd, *pmd_sv;
693 pte_t *pte;
694 int i;
695
696 if (pud_none(*pud))
697 return 1;
698
699 pmd = (pmd_t *)pud_page_vaddr(*pud);
700 pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
701 if (!pmd_sv)
702 return 0;
703
704 for (i = 0; i < PTRS_PER_PMD; i++) {
705 pmd_sv[i] = pmd[i];
706 if (!pmd_none(pmd[i]))
707 pmd_clear(&pmd[i]);
708 }
709
710 pud_clear(pud);
711
712 /* INVLPG to clear all paging-structure caches */
713 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
714
715 for (i = 0; i < PTRS_PER_PMD; i++) {
716 if (!pmd_none(pmd_sv[i])) {
717 pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
718 free_page((unsigned long)pte);
719 }
720 }
721
722 free_page((unsigned long)pmd_sv);
723 free_page((unsigned long)pmd);
724
725 return 1;
726}
727
728/**
729 * pmd_free_pte_page - Clear pmd entry and free pte page.
730 * @pmd: Pointer to a PMD.
731 * @addr: Virtual address associated with pmd.
732 *
733 * Context: The pmd range has been unmapped and TLB purged.
734 * Return: 1 if clearing the entry succeeded. 0 otherwise.
735 */
736int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
737{
738 pte_t *pte;
739
740 if (pmd_none(*pmd))
741 return 1;
742
743 pte = (pte_t *)pmd_page_vaddr(*pmd);
744 pmd_clear(pmd);
745
746 /* INVLPG to clear all paging-structure caches */
747 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
748
749 free_page((unsigned long)pte);
750
751 return 1;
752}
753
754#else /* !CONFIG_X86_64 */
755
756int pud_free_pmd_page(pud_t *pud, unsigned long addr)
757{
758 return pud_none(*pud);
759}
760
761/*
762 * Disable free page handling on x86-PAE. This assures that ioremap()
763 * does not update sync'd pmd entries. See vmalloc_sync_one().
764 */
765int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
766{
767 return pmd_none(*pmd);
768}
769
770#endif /* CONFIG_X86_64 */
669#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 771#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 90555bf60aa4..f65a33f505b6 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -4,6 +4,7 @@
4 4
5#include <asm/pgtable.h> 5#include <asm/pgtable.h>
6#include <asm/proto.h> 6#include <asm/proto.h>
7#include <asm/cpufeature.h>
7 8
8static int disable_nx; 9static int disable_nx;
9 10
@@ -31,7 +32,7 @@ early_param("noexec", noexec_setup);
31 32
32void x86_configure_nx(void) 33void x86_configure_nx(void)
33{ 34{
34 if (cpu_has_nx && !disable_nx) 35 if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
35 __supported_pte_mask |= _PAGE_NX; 36 __supported_pte_mask |= _PAGE_NX;
36 else 37 else
37 __supported_pte_mask &= ~_PAGE_NX; 38 __supported_pte_mask &= ~_PAGE_NX;
@@ -39,7 +40,7 @@ void x86_configure_nx(void)
39 40
40void __init x86_report_nx(void) 41void __init x86_report_nx(void)
41{ 42{
42 if (!cpu_has_nx) { 43 if (!boot_cpu_has(X86_FEATURE_NX)) {
43 printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " 44 printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
44 "missing in CPU!\n"); 45 "missing in CPU!\n");
45 } else { 46 } else {
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 7cad01af6dcd..6d683bbb3502 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -10,6 +10,7 @@
10 10
11#include <asm/tlbflush.h> 11#include <asm/tlbflush.h>
12#include <asm/mmu_context.h> 12#include <asm/mmu_context.h>
13#include <asm/nospec-branch.h>
13#include <asm/cache.h> 14#include <asm/cache.h>
14#include <asm/apic.h> 15#include <asm/apic.h>
15#include <asm/uv/uv.h> 16#include <asm/uv/uv.h>
@@ -29,6 +30,8 @@
29 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi 30 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
30 */ 31 */
31 32
33atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
34
32struct flush_tlb_info { 35struct flush_tlb_info {
33 struct mm_struct *flush_mm; 36 struct mm_struct *flush_mm;
34 unsigned long flush_start; 37 unsigned long flush_start;
@@ -104,6 +107,36 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
104 unsigned cpu = smp_processor_id(); 107 unsigned cpu = smp_processor_id();
105 108
106 if (likely(prev != next)) { 109 if (likely(prev != next)) {
110 u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
111
112 /*
113 * Avoid user/user BTB poisoning by flushing the branch
114 * predictor when switching between processes. This stops
115 * one process from doing Spectre-v2 attacks on another.
116 *
117 * As an optimization, flush indirect branches only when
118 * switching into processes that disable dumping. This
119 * protects high value processes like gpg, without having
120 * too high performance overhead. IBPB is *expensive*!
121 *
122 * This will not flush branches when switching into kernel
123 * threads. It will also not flush if we switch to idle
124 * thread and back to the same process. It will flush if we
125 * switch to a different non-dumpable process.
126 */
127 if (tsk && tsk->mm &&
128 tsk->mm->context.ctx_id != last_ctx_id &&
129 get_dumpable(tsk->mm) != SUID_DUMP_USER)
130 indirect_branch_prediction_barrier();
131
132 /*
133 * Record last user mm's context id, so we can avoid
134 * flushing branch buffer with IBPB if we switch back
135 * to the same user.
136 */
137 if (next != &init_mm)
138 this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
139
107 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); 140 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
108 this_cpu_write(cpu_tlbstate.active_mm, next); 141 this_cpu_write(cpu_tlbstate.active_mm, next);
109 cpumask_set_cpu(cpu, mm_cpumask(next)); 142 cpumask_set_cpu(cpu, mm_cpumask(next));
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 75991979f667..dd9a861fd526 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -12,6 +12,7 @@
12#include <linux/filter.h> 12#include <linux/filter.h>
13#include <linux/if_vlan.h> 13#include <linux/if_vlan.h>
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/nospec-branch.h>
15#include <linux/bpf.h> 16#include <linux/bpf.h>
16 17
17int bpf_jit_enable __read_mostly; 18int bpf_jit_enable __read_mostly;
@@ -266,10 +267,10 @@ static void emit_bpf_tail_call(u8 **pprog)
266 /* if (index >= array->map.max_entries) 267 /* if (index >= array->map.max_entries)
267 * goto out; 268 * goto out;
268 */ 269 */
269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ 270 EMIT2(0x89, 0xD2); /* mov edx, edx */
271 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
270 offsetof(struct bpf_array, map.max_entries)); 272 offsetof(struct bpf_array, map.max_entries));
271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ 273#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
272#define OFFSET1 47 /* number of bytes to jump */
273 EMIT2(X86_JBE, OFFSET1); /* jbe out */ 274 EMIT2(X86_JBE, OFFSET1); /* jbe out */
274 label1 = cnt; 275 label1 = cnt;
275 276
@@ -278,22 +279,21 @@ static void emit_bpf_tail_call(u8 **pprog)
278 */ 279 */
279 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ 280 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 281 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
281#define OFFSET2 36 282#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
282 EMIT2(X86_JA, OFFSET2); /* ja out */ 283 EMIT2(X86_JA, OFFSET2); /* ja out */
283 label2 = cnt; 284 label2 = cnt;
284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 285 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
285 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ 286 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
286 287
287 /* prog = array->ptrs[index]; */ 288 /* prog = array->ptrs[index]; */
288 EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */ 289 EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
289 offsetof(struct bpf_array, ptrs)); 290 offsetof(struct bpf_array, ptrs));
290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
291 291
292 /* if (prog == NULL) 292 /* if (prog == NULL)
293 * goto out; 293 * goto out;
294 */ 294 */
295 EMIT4(0x48, 0x83, 0xF8, 0x00); /* cmp rax, 0 */ 295 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
296#define OFFSET3 10 296#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
297 EMIT2(X86_JE, OFFSET3); /* je out */ 297 EMIT2(X86_JE, OFFSET3); /* je out */
298 label3 = cnt; 298 label3 = cnt;
299 299
@@ -306,7 +306,7 @@ static void emit_bpf_tail_call(u8 **pprog)
306 * rdi == ctx (1st arg) 306 * rdi == ctx (1st arg)
307 * rax == prog->bpf_func + prologue_size 307 * rax == prog->bpf_func + prologue_size
308 */ 308 */
309 EMIT2(0xFF, 0xE0); /* jmp rax */ 309 RETPOLINE_RAX_BPF_JIT();
310 310
311 /* out: */ 311 /* out: */
312 BUILD_BUG_ON(cnt - label1 != OFFSET1); 312 BUILD_BUG_ON(cnt - label1 != OFFSET1);
@@ -1077,7 +1077,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
1077 * may converge on the last pass. In such case do one more 1077 * may converge on the last pass. In such case do one more
1078 * pass to emit the final image 1078 * pass to emit the final image
1079 */ 1079 */
1080 for (pass = 0; pass < 10 || image; pass++) { 1080 for (pass = 0; pass < 20 || image; pass++) {
1081 proglen = do_jit(prog, addrs, image, oldproglen, &ctx); 1081 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1082 if (proglen <= 0) { 1082 if (proglen <= 0) {
1083 image = NULL; 1083 image = NULL;
@@ -1100,6 +1100,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
1100 goto out; 1100 goto out;
1101 } 1101 }
1102 oldproglen = proglen; 1102 oldproglen = proglen;
1103 cond_resched();
1103 } 1104 }
1104 1105
1105 if (bpf_jit_enable > 1) 1106 if (bpf_jit_enable > 1)
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 1d2e6392f5fa..f24bd7249536 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -471,7 +471,7 @@ static int nmi_setup(void)
471 goto fail; 471 goto fail;
472 472
473 for_each_possible_cpu(cpu) { 473 for_each_possible_cpu(cpu) {
474 if (!cpu) 474 if (!IS_ENABLED(CONFIG_SMP) || !cpu)
475 continue; 475 continue;
476 476
477 memcpy(per_cpu(cpu_msrs, cpu).counters, 477 memcpy(per_cpu(cpu_msrs, cpu).counters,
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 50d86c0e9ba4..660a83c8287b 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -24,7 +24,6 @@
24#include <asm/nmi.h> 24#include <asm/nmi.h>
25#include <asm/apic.h> 25#include <asm/apic.h>
26#include <asm/processor.h> 26#include <asm/processor.h>
27#include <asm/cpufeature.h>
28 27
29#include "op_x86_model.h" 28#include "op_x86_model.h"
30#include "op_counter.h" 29#include "op_counter.h"
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index a0ac0f9c307f..f5a8cd96bae4 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -40,6 +40,7 @@
40#include <asm/fixmap.h> 40#include <asm/fixmap.h>
41#include <asm/realmode.h> 41#include <asm/realmode.h>
42#include <asm/time.h> 42#include <asm/time.h>
43#include <asm/nospec-branch.h>
43 44
44/* 45/*
45 * We allocate runtime services regions bottom-up, starting from -4G, i.e. 46 * We allocate runtime services regions bottom-up, starting from -4G, i.e.
@@ -347,6 +348,7 @@ extern efi_status_t efi64_thunk(u32, ...);
347 \ 348 \
348 efi_sync_low_kernel_mappings(); \ 349 efi_sync_low_kernel_mappings(); \
349 local_irq_save(flags); \ 350 local_irq_save(flags); \
351 firmware_restrict_branch_speculation_start(); \
350 \ 352 \
351 efi_scratch.prev_cr3 = read_cr3(); \ 353 efi_scratch.prev_cr3 = read_cr3(); \
352 write_cr3((unsigned long)efi_scratch.efi_pgt); \ 354 write_cr3((unsigned long)efi_scratch.efi_pgt); \
@@ -357,6 +359,7 @@ extern efi_status_t efi64_thunk(u32, ...);
357 \ 359 \
358 write_cr3(efi_scratch.prev_cr3); \ 360 write_cr3(efi_scratch.prev_cr3); \
359 __flush_tlb_all(); \ 361 __flush_tlb_all(); \
362 firmware_restrict_branch_speculation_end(); \
360 local_irq_restore(flags); \ 363 local_irq_restore(flags); \
361 \ 364 \
362 __s; \ 365 __s; \
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 55130846ac87..c0533fbc39e3 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -196,6 +196,7 @@ static int xo15_sci_remove(struct acpi_device *device)
196 return 0; 196 return 0;
197} 197}
198 198
199#ifdef CONFIG_PM_SLEEP
199static int xo15_sci_resume(struct device *dev) 200static int xo15_sci_resume(struct device *dev)
200{ 201{
201 /* Enable all EC events */ 202 /* Enable all EC events */
@@ -207,6 +208,7 @@ static int xo15_sci_resume(struct device *dev)
207 208
208 return 0; 209 return 0;
209} 210}
211#endif
210 212
211static SIMPLE_DEV_PM_OPS(xo15_sci_pm, NULL, xo15_sci_resume); 213static SIMPLE_DEV_PM_OPS(xo15_sci_pm, NULL, xo15_sci_resume);
212 214
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index 291226b952a9..77ac4e4deb16 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -142,7 +142,7 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
142#endif 142#endif
143} 143}
144 144
145int swsusp_arch_resume(void) 145asmlinkage int swsusp_arch_resume(void)
146{ 146{
147 int error; 147 int error;
148 148
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 009947d419a6..0e0c773edffc 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -78,7 +78,7 @@ static int set_up_temporary_mappings(void)
78 return 0; 78 return 0;
79} 79}
80 80
81int swsusp_arch_resume(void) 81asmlinkage int swsusp_arch_resume(void)
82{ 82{
83 int error; 83 int error;
84 84
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 73eb7fd4aec4..5b6c8486a0be 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -769,9 +769,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
769 break; 769 break;
770 770
771 case R_X86_64_PC32: 771 case R_X86_64_PC32:
772 case R_X86_64_PLT32:
772 /* 773 /*
773 * PC relative relocations don't need to be adjusted unless 774 * PC relative relocations don't need to be adjusted unless
774 * referencing a percpu symbol. 775 * referencing a percpu symbol.
776 *
777 * NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32.
775 */ 778 */
776 if (is_percpu_sym(sym, symname)) 779 if (is_percpu_sym(sym, symname))
777 add_reloc(&relocs32neg, offset); 780 add_reloc(&relocs32neg, offset);
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 755481f14d90..764ac2fc53fe 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -3,7 +3,7 @@
3 3
4#include <asm/asm.h> 4#include <asm/asm.h>
5#include <asm/segment.h> 5#include <asm/segment.h>
6#include <asm/cpufeature.h> 6#include <asm/cpufeatures.h>
7#include <asm/cmpxchg.h> 7#include <asm/cmpxchg.h>
8#include <asm/nops.h> 8#include <asm/nops.h>
9 9
diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
index 1518d2805ae8..fd6825537b97 100644
--- a/arch/x86/um/stub_segv.c
+++ b/arch/x86/um/stub_segv.c
@@ -10,7 +10,7 @@
10void __attribute__ ((__section__ (".__syscall_stub"))) 10void __attribute__ ((__section__ (".__syscall_stub")))
11stub_segv_handler(int sig, siginfo_t *info, void *p) 11stub_segv_handler(int sig, siginfo_t *info, void *p)
12{ 12{
13 struct ucontext *uc = p; 13 ucontext_t *uc = p;
14 14
15 GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA), 15 GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
16 &uc->uc_mcontext); 16 &uc->uc_mcontext);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index cbef64b508e1..82fd84d5e1aa 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -460,6 +460,12 @@ static void __init xen_init_cpuid_mask(void)
460 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); 460 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
461} 461}
462 462
463static void __init xen_init_capabilities(void)
464{
465 if (xen_pv_domain())
466 setup_force_cpu_cap(X86_FEATURE_XENPV);
467}
468
463static void xen_set_debugreg(int reg, unsigned long val) 469static void xen_set_debugreg(int reg, unsigned long val)
464{ 470{
465 HYPERVISOR_set_debugreg(reg, val); 471 HYPERVISOR_set_debugreg(reg, val);
@@ -1587,6 +1593,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
1587 1593
1588 xen_init_irq_ops(); 1594 xen_init_irq_ops();
1589 xen_init_cpuid_mask(); 1595 xen_init_cpuid_mask();
1596 xen_init_capabilities();
1590 1597
1591#ifdef CONFIG_X86_LOCAL_APIC 1598#ifdef CONFIG_X86_LOCAL_APIC
1592 /* 1599 /*
@@ -1883,14 +1890,6 @@ bool xen_hvm_need_lapic(void)
1883} 1890}
1884EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); 1891EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1885 1892
1886static void xen_set_cpu_features(struct cpuinfo_x86 *c)
1887{
1888 if (xen_pv_domain()) {
1889 clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1890 set_cpu_cap(c, X86_FEATURE_XENPV);
1891 }
1892}
1893
1894const struct hypervisor_x86 x86_hyper_xen = { 1893const struct hypervisor_x86 x86_hyper_xen = {
1895 .name = "Xen", 1894 .name = "Xen",
1896 .detect = xen_platform, 1895 .detect = xen_platform,
@@ -1898,7 +1897,6 @@ const struct hypervisor_x86 x86_hyper_xen = {
1898 .init_platform = xen_hvm_guest_init, 1897 .init_platform = xen_hvm_guest_init,
1899#endif 1898#endif
1900 .x2apic_available = xen_x2apic_para_available, 1899 .x2apic_available = xen_x2apic_para_available,
1901 .set_cpu_features = xen_set_cpu_features,
1902}; 1900};
1903EXPORT_SYMBOL(x86_hyper_xen); 1901EXPORT_SYMBOL(x86_hyper_xen);
1904 1902
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 63146c378f1e..2b05f681a1fd 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1316,8 +1316,6 @@ void xen_flush_tlb_all(void)
1316 struct mmuext_op *op; 1316 struct mmuext_op *op;
1317 struct multicall_space mcs; 1317 struct multicall_space mcs;
1318 1318
1319 trace_xen_mmu_flush_tlb_all(0);
1320
1321 preempt_disable(); 1319 preempt_disable();
1322 1320
1323 mcs = xen_mc_entry(sizeof(*op)); 1321 mcs = xen_mc_entry(sizeof(*op));
@@ -1335,8 +1333,6 @@ static void xen_flush_tlb(void)
1335 struct mmuext_op *op; 1333 struct mmuext_op *op;
1336 struct multicall_space mcs; 1334 struct multicall_space mcs;
1337 1335
1338 trace_xen_mmu_flush_tlb(0);
1339
1340 preempt_disable(); 1336 preempt_disable();
1341 1337
1342 mcs = xen_mc_entry(sizeof(*op)); 1338 mcs = xen_mc_entry(sizeof(*op));
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3f4ebf0261f2..29e50d1229bc 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -28,6 +28,7 @@
28#include <xen/interface/vcpu.h> 28#include <xen/interface/vcpu.h>
29#include <xen/interface/xenpmu.h> 29#include <xen/interface/xenpmu.h>
30 30
31#include <asm/spec-ctrl.h>
31#include <asm/xen/interface.h> 32#include <asm/xen/interface.h>
32#include <asm/xen/hypercall.h> 33#include <asm/xen/hypercall.h>
33 34
@@ -87,6 +88,8 @@ static void cpu_bringup(void)
87 cpu_data(cpu).x86_max_cores = 1; 88 cpu_data(cpu).x86_max_cores = 1;
88 set_cpu_sibling_map(cpu); 89 set_cpu_sibling_map(cpu);
89 90
91 speculative_store_bypass_ht_init();
92
90 xen_setup_cpu_clockevents(); 93 xen_setup_cpu_clockevents();
91 94
92 notify_cpu_starting(cpu); 95 notify_cpu_starting(cpu);
@@ -357,6 +360,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
357 } 360 }
358 set_cpu_sibling_map(0); 361 set_cpu_sibling_map(0);
359 362
363 speculative_store_bypass_ht_init();
364
360 xen_pmu_init(0); 365 xen_pmu_init(0);
361 366
362 if (xen_smp_intr_init(0)) 367 if (xen_smp_intr_init(0))
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 7f664c416faf..4ecd0de08557 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -1,11 +1,14 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/tick.h> 2#include <linux/tick.h>
3#include <linux/percpu-defs.h>
3 4
4#include <xen/xen.h> 5#include <xen/xen.h>
5#include <xen/interface/xen.h> 6#include <xen/interface/xen.h>
6#include <xen/grant_table.h> 7#include <xen/grant_table.h>
7#include <xen/events.h> 8#include <xen/events.h>
8 9
10#include <asm/cpufeatures.h>
11#include <asm/msr-index.h>
9#include <asm/xen/hypercall.h> 12#include <asm/xen/hypercall.h>
10#include <asm/xen/page.h> 13#include <asm/xen/page.h>
11#include <asm/fixmap.h> 14#include <asm/fixmap.h>
@@ -68,6 +71,8 @@ static void xen_pv_post_suspend(int suspend_cancelled)
68 xen_mm_unpin_all(); 71 xen_mm_unpin_all();
69} 72}
70 73
74static DEFINE_PER_CPU(u64, spec_ctrl);
75
71void xen_arch_pre_suspend(void) 76void xen_arch_pre_suspend(void)
72{ 77{
73 if (xen_pv_domain()) 78 if (xen_pv_domain())
@@ -84,6 +89,9 @@ void xen_arch_post_suspend(int cancelled)
84 89
85static void xen_vcpu_notify_restore(void *data) 90static void xen_vcpu_notify_restore(void *data)
86{ 91{
92 if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
93 wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
94
87 /* Boot processor notified via generic timekeeping_resume() */ 95 /* Boot processor notified via generic timekeeping_resume() */
88 if (smp_processor_id() == 0) 96 if (smp_processor_id() == 0)
89 return; 97 return;
@@ -93,7 +101,15 @@ static void xen_vcpu_notify_restore(void *data)
93 101
94static void xen_vcpu_notify_suspend(void *data) 102static void xen_vcpu_notify_suspend(void *data)
95{ 103{
104 u64 tmp;
105
96 tick_suspend_local(); 106 tick_suspend_local();
107
108 if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
109 rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
110 this_cpu_write(spec_ctrl, tmp);
111 wrmsrl(MSR_IA32_SPEC_CTRL, 0);
112 }
97} 113}
98 114
99void xen_arch_resume(void) 115void xen_arch_resume(void)
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index b39531babec0..5bfbc1c401d4 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -44,18 +44,10 @@
44 : "r" (uaddr), "I" (-EFAULT), "r" (oparg) \ 44 : "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
45 : "memory") 45 : "memory")
46 46
47static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 47static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
48 u32 __user *uaddr)
48{ 49{
49 int op = (encoded_op >> 28) & 7;
50 int cmp = (encoded_op >> 24) & 15;
51 int oparg = (encoded_op << 8) >> 20;
52 int cmparg = (encoded_op << 20) >> 20;
53 int oldval = 0, ret; 50 int oldval = 0, ret;
54 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
55 oparg = 1 << oparg;
56
57 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
58 return -EFAULT;
59 51
60#if !XCHAL_HAVE_S32C1I 52#if !XCHAL_HAVE_S32C1I
61 return -ENOSYS; 53 return -ENOSYS;
@@ -89,19 +81,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
89 81
90 pagefault_enable(); 82 pagefault_enable();
91 83
92 if (ret) 84 if (!ret)
93 return ret; 85 *oval = oldval;
94 86
95 switch (cmp) { 87 return ret;
96 case FUTEX_OP_CMP_EQ: return (oldval == cmparg);
97 case FUTEX_OP_CMP_NE: return (oldval != cmparg);
98 case FUTEX_OP_CMP_LT: return (oldval < cmparg);
99 case FUTEX_OP_CMP_GE: return (oldval >= cmparg);
100 case FUTEX_OP_CMP_LE: return (oldval <= cmparg);
101 case FUTEX_OP_CMP_GT: return (oldval > cmparg);
102 }
103
104 return -ENOSYS;
105} 88}
106 89
107static inline int 90static inline int
@@ -109,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
109 u32 oldval, u32 newval) 92 u32 oldval, u32 newval)
110{ 93{
111 int ret = 0; 94 int ret = 0;
112 u32 prev;
113 95
114 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 96 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
115 return -EFAULT; 97 return -EFAULT;
@@ -120,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
120 102
121 __asm__ __volatile__ ( 103 __asm__ __volatile__ (
122 " # futex_atomic_cmpxchg_inatomic\n" 104 " # futex_atomic_cmpxchg_inatomic\n"
123 "1: l32i %1, %3, 0\n" 105 " wsr %5, scompare1\n"
124 " mov %0, %5\n" 106 "1: s32c1i %1, %4, 0\n"
125 " wsr %1, scompare1\n" 107 " s32i %1, %6, 0\n"
126 "2: s32c1i %0, %3, 0\n" 108 "2:\n"
127 "3:\n"
128 " .section .fixup,\"ax\"\n" 109 " .section .fixup,\"ax\"\n"
129 " .align 4\n" 110 " .align 4\n"
130 "4: .long 3b\n" 111 "3: .long 2b\n"
131 "5: l32r %1, 4b\n" 112 "4: l32r %1, 3b\n"
132 " movi %0, %6\n" 113 " movi %0, %7\n"
133 " jx %1\n" 114 " jx %1\n"
134 " .previous\n" 115 " .previous\n"
135 " .section __ex_table,\"a\"\n" 116 " .section __ex_table,\"a\"\n"
136 " .long 1b,5b,2b,5b\n" 117 " .long 1b,4b\n"
137 " .previous\n" 118 " .previous\n"
138 : "+r" (ret), "=&r" (prev), "+m" (*uaddr) 119 : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
139 : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) 120 : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
140 : "memory"); 121 : "memory");
141 122
142 *uval = prev;
143 return ret; 123 return ret;
144} 124}
145 125
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 42d441f7898b..1edce040f470 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -309,7 +309,7 @@ do_unaligned_user (struct pt_regs *regs)
309 info.si_errno = 0; 309 info.si_errno = 0;
310 info.si_code = BUS_ADRALN; 310 info.si_code = BUS_ADRALN;
311 info.si_addr = (void *) regs->excvaddr; 311 info.si_addr = (void *) regs->excvaddr;
312 force_sig_info(SIGSEGV, &info, current); 312 force_sig_info(SIGBUS, &info, current);
313 313
314} 314}
315#endif 315#endif
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index f6325d573c10..6e091ccadcd4 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio)
175 if (!bio_is_rw(bio)) 175 if (!bio_is_rw(bio))
176 return false; 176 return false;
177 177
178 if (!bio_sectors(bio))
179 return false;
180
178 /* Already protected? */ 181 /* Already protected? */
179 if (bio_integrity(bio)) 182 if (bio_integrity(bio))
180 return false; 183 return false;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 8161090a1970..46ba2402c8f9 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1078,10 +1078,8 @@ int blkcg_init_queue(struct request_queue *q)
1078 if (preloaded) 1078 if (preloaded)
1079 radix_tree_preload_end(); 1079 radix_tree_preload_end();
1080 1080
1081 if (IS_ERR(blkg)) { 1081 if (IS_ERR(blkg))
1082 blkg_free(new_blkg);
1083 return PTR_ERR(blkg); 1082 return PTR_ERR(blkg);
1084 }
1085 1083
1086 q->root_blkg = blkg; 1084 q->root_blkg = blkg;
1087 q->root_rl.blkg = blkg; 1085 q->root_rl.blkg = blkg;
diff --git a/block/blk-core.c b/block/blk-core.c
index f5f1a55703ae..50d77c90070d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -651,21 +651,17 @@ EXPORT_SYMBOL(blk_alloc_queue);
651int blk_queue_enter(struct request_queue *q, gfp_t gfp) 651int blk_queue_enter(struct request_queue *q, gfp_t gfp)
652{ 652{
653 while (true) { 653 while (true) {
654 int ret;
655
656 if (percpu_ref_tryget_live(&q->q_usage_counter)) 654 if (percpu_ref_tryget_live(&q->q_usage_counter))
657 return 0; 655 return 0;
658 656
659 if (!gfpflags_allow_blocking(gfp)) 657 if (!gfpflags_allow_blocking(gfp))
660 return -EBUSY; 658 return -EBUSY;
661 659
662 ret = wait_event_interruptible(q->mq_freeze_wq, 660 wait_event(q->mq_freeze_wq,
663 !atomic_read(&q->mq_freeze_depth) || 661 !atomic_read(&q->mq_freeze_depth) ||
664 blk_queue_dying(q)); 662 blk_queue_dying(q));
665 if (blk_queue_dying(q)) 663 if (blk_queue_dying(q))
666 return -ENODEV; 664 return -ENODEV;
667 if (ret)
668 return ret;
669 } 665 }
670} 666}
671 667
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0d1af3e44efb..8649dbf06ce4 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1252,13 +1252,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1252 1252
1253 blk_queue_bounce(q, &bio); 1253 blk_queue_bounce(q, &bio);
1254 1254
1255 blk_queue_split(q, &bio, q->bio_split);
1256
1255 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1257 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1256 bio_io_error(bio); 1258 bio_io_error(bio);
1257 return BLK_QC_T_NONE; 1259 return BLK_QC_T_NONE;
1258 } 1260 }
1259 1261
1260 blk_queue_split(q, &bio, q->bio_split);
1261
1262 if (!is_flush_fua && !blk_queue_nomerges(q) && 1262 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1263 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) 1263 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1264 return BLK_QC_T_NONE; 1264 return BLK_QC_T_NONE;
@@ -1634,7 +1634,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
1634{ 1634{
1635 unsigned flush_start_tag = set->queue_depth; 1635 unsigned flush_start_tag = set->queue_depth;
1636 1636
1637 blk_mq_tag_idle(hctx); 1637 if (blk_mq_hw_queue_mapped(hctx))
1638 blk_mq_tag_idle(hctx);
1638 1639
1639 if (set->ops->exit_request) 1640 if (set->ops->exit_request)
1640 set->ops->exit_request(set->driver_data, 1641 set->ops->exit_request(set->driver_data,
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 2149a1ddbacf..17bdd6b55beb 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -505,6 +505,17 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
505static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, 505static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
506 unsigned long expires) 506 unsigned long expires)
507{ 507{
508 unsigned long max_expire = jiffies + 8 * throtl_slice;
509
510 /*
511 * Since we are adjusting the throttle limit dynamically, the sleep
512 * time calculated according to previous limit might be invalid. It's
513 * possible the cgroup sleep time is very long and no other cgroups
514 * have IO running so notify the limit changes. Make sure the cgroup
515 * doesn't sleep too long to avoid the missed notification.
516 */
517 if (time_after(expires, max_expire))
518 expires = max_expire;
508 mod_timer(&sq->pending_timer, expires); 519 mod_timer(&sq->pending_timer, expires);
509 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu", 520 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
510 expires - jiffies, jiffies); 521 expires - jiffies, jiffies);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 3c062699f28b..29521753fb23 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -309,8 +309,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
309 309
310 if (info) { 310 if (info) {
311 struct partition_meta_info *pinfo = alloc_part_info(disk); 311 struct partition_meta_info *pinfo = alloc_part_info(disk);
312 if (!pinfo) 312 if (!pinfo) {
313 err = -ENOMEM;
313 goto out_free_stats; 314 goto out_free_stats;
315 }
314 memcpy(pinfo, info, sizeof(*info)); 316 memcpy(pinfo, info, sizeof(*info));
315 p->info = pinfo; 317 p->info = pinfo;
316 } 318 }
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 5610cd537da7..7d8d50c11ce7 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -300,7 +300,9 @@ static void parse_bsd(struct parsed_partitions *state,
300 continue; 300 continue;
301 bsd_start = le32_to_cpu(p->p_offset); 301 bsd_start = le32_to_cpu(p->p_offset);
302 bsd_size = le32_to_cpu(p->p_size); 302 bsd_size = le32_to_cpu(p->p_size);
303 if (memcmp(flavour, "bsd\0", 4) == 0) 303 /* FreeBSD has relative offset if C partition offset is zero */
304 if (memcmp(flavour, "bsd\0", 4) == 0 &&
305 le32_to_cpu(l->d_partitions[2].p_offset) == 0)
304 bsd_start += offset; 306 bsd_start += offset;
305 if (offset == bsd_start && size == bsd_size) 307 if (offset == bsd_start && size == bsd_size)
306 /* full parent partition, we have it already */ 308 /* full parent partition, we have it already */
diff --git a/certs/Makefile b/certs/Makefile
index 28ac694dd11a..2773c4afa24c 100644
--- a/certs/Makefile
+++ b/certs/Makefile
@@ -36,29 +36,34 @@ ifndef CONFIG_MODULE_SIG_HASH
36$(error Could not determine digest type to use from kernel config) 36$(error Could not determine digest type to use from kernel config)
37endif 37endif
38 38
39redirect_openssl = 2>&1
40quiet_redirect_openssl = 2>&1
41silent_redirect_openssl = 2>/dev/null
42
39# We do it this way rather than having a boolean option for enabling an 43# We do it this way rather than having a boolean option for enabling an
40# external private key, because 'make randconfig' might enable such a 44# external private key, because 'make randconfig' might enable such a
41# boolean option and we unfortunately can't make it depend on !RANDCONFIG. 45# boolean option and we unfortunately can't make it depend on !RANDCONFIG.
42ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem") 46ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
43$(obj)/signing_key.pem: $(obj)/x509.genkey 47$(obj)/signing_key.pem: $(obj)/x509.genkey
44 @echo "###" 48 @$(kecho) "###"
45 @echo "### Now generating an X.509 key pair to be used for signing modules." 49 @$(kecho) "### Now generating an X.509 key pair to be used for signing modules."
46 @echo "###" 50 @$(kecho) "###"
47 @echo "### If this takes a long time, you might wish to run rngd in the" 51 @$(kecho) "### If this takes a long time, you might wish to run rngd in the"
48 @echo "### background to keep the supply of entropy topped up. It" 52 @$(kecho) "### background to keep the supply of entropy topped up. It"
49 @echo "### needs to be run as root, and uses a hardware random" 53 @$(kecho) "### needs to be run as root, and uses a hardware random"
50 @echo "### number generator if one is available." 54 @$(kecho) "### number generator if one is available."
51 @echo "###" 55 @$(kecho) "###"
52 openssl req -new -nodes -utf8 -$(CONFIG_MODULE_SIG_HASH) -days 36500 \ 56 $(Q)openssl req -new -nodes -utf8 -$(CONFIG_MODULE_SIG_HASH) -days 36500 \
53 -batch -x509 -config $(obj)/x509.genkey \ 57 -batch -x509 -config $(obj)/x509.genkey \
54 -outform PEM -out $(obj)/signing_key.pem \ 58 -outform PEM -out $(obj)/signing_key.pem \
55 -keyout $(obj)/signing_key.pem 2>&1 59 -keyout $(obj)/signing_key.pem \
56 @echo "###" 60 $($(quiet)redirect_openssl)
57 @echo "### Key pair generated." 61 @$(kecho) "###"
58 @echo "###" 62 @$(kecho) "### Key pair generated."
63 @$(kecho) "###"
59 64
60$(obj)/x509.genkey: 65$(obj)/x509.genkey:
61 @echo Generating X.509 key generation config 66 @$(kecho) Generating X.509 key generation config
62 @echo >$@ "[ req ]" 67 @echo >$@ "[ req ]"
63 @echo >>$@ "default_bits = 4096" 68 @echo >>$@ "default_bits = 4096"
64 @echo >>$@ "distinguished_name = req_distinguished_name" 69 @echo >>$@ "distinguished_name = req_distinguished_name"
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index e5b5721809e2..149e7a7f04fe 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -73,11 +73,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
73 return max(start, end_page); 73 return max(start, end_page);
74} 74}
75 75
76static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, 76static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
77 unsigned int bsize) 77 unsigned int n)
78{ 78{
79 unsigned int n = bsize;
80
81 for (;;) { 79 for (;;) {
82 unsigned int len_this_page = scatterwalk_pagelen(&walk->out); 80 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
83 81
@@ -89,17 +87,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
89 n -= len_this_page; 87 n -= len_this_page;
90 scatterwalk_start(&walk->out, sg_next(walk->out.sg)); 88 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
91 } 89 }
92
93 return bsize;
94} 90}
95 91
96static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, 92static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
97 unsigned int n) 93 unsigned int n)
98{ 94{
99 scatterwalk_advance(&walk->in, n); 95 scatterwalk_advance(&walk->in, n);
100 scatterwalk_advance(&walk->out, n); 96 scatterwalk_advance(&walk->out, n);
101
102 return n;
103} 97}
104 98
105static int ablkcipher_walk_next(struct ablkcipher_request *req, 99static int ablkcipher_walk_next(struct ablkcipher_request *req,
@@ -109,39 +103,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
109 struct ablkcipher_walk *walk, int err) 103 struct ablkcipher_walk *walk, int err)
110{ 104{
111 struct crypto_tfm *tfm = req->base.tfm; 105 struct crypto_tfm *tfm = req->base.tfm;
112 unsigned int nbytes = 0; 106 unsigned int n; /* bytes processed */
107 bool more;
113 108
114 if (likely(err >= 0)) { 109 if (unlikely(err < 0))
115 unsigned int n = walk->nbytes - err; 110 goto finish;
116 111
117 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) 112 n = walk->nbytes - err;
118 n = ablkcipher_done_fast(walk, n); 113 walk->total -= n;
119 else if (WARN_ON(err)) { 114 more = (walk->total != 0);
120 err = -EINVAL;
121 goto err;
122 } else
123 n = ablkcipher_done_slow(walk, n);
124 115
125 nbytes = walk->total - n; 116 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
126 err = 0; 117 ablkcipher_done_fast(walk, n);
118 } else {
119 if (WARN_ON(err)) {
120 /* unexpected case; didn't process all bytes */
121 err = -EINVAL;
122 goto finish;
123 }
124 ablkcipher_done_slow(walk, n);
127 } 125 }
128 126
129 scatterwalk_done(&walk->in, 0, nbytes); 127 scatterwalk_done(&walk->in, 0, more);
130 scatterwalk_done(&walk->out, 1, nbytes); 128 scatterwalk_done(&walk->out, 1, more);
131
132err:
133 walk->total = nbytes;
134 walk->nbytes = nbytes;
135 129
136 if (nbytes) { 130 if (more) {
137 crypto_yield(req->base.flags); 131 crypto_yield(req->base.flags);
138 return ablkcipher_walk_next(req, walk); 132 return ablkcipher_walk_next(req, walk);
139 } 133 }
140 134 err = 0;
135finish:
136 walk->nbytes = 0;
141 if (walk->iv != req->info) 137 if (walk->iv != req->info)
142 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); 138 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
143 kfree(walk->iv_buffer); 139 kfree(walk->iv_buffer);
144
145 return err; 140 return err;
146} 141}
147EXPORT_SYMBOL_GPL(ablkcipher_walk_done); 142EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index f5e18c2a4852..b5953f1d1a18 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent);
149 149
150static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 150static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
151{ 151{
152 const u32 forbidden = CRYPTO_ALG_INTERNAL; 152 const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
153 struct sock *sk = sock->sk; 153 struct sock *sk = sock->sk;
154 struct alg_sock *ask = alg_sk(sk); 154 struct alg_sock *ask = alg_sk(sk);
155 struct sockaddr_alg *sa = (void *)uaddr; 155 struct sockaddr_alg *sa = (void *)uaddr;
@@ -163,6 +163,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
163 if (addr_len != sizeof(*sa)) 163 if (addr_len != sizeof(*sa))
164 return -EINVAL; 164 return -EINVAL;
165 165
166 /* If caller uses non-allowed flag, return error. */
167 if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
168 return -EINVAL;
169
166 sa->salg_type[sizeof(sa->salg_type) - 1] = 0; 170 sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
167 sa->salg_name[sizeof(sa->salg_name) - 1] = 0; 171 sa->salg_name[sizeof(sa->salg_name) - 1] = 0;
168 172
@@ -175,9 +179,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
175 if (IS_ERR(type)) 179 if (IS_ERR(type))
176 return PTR_ERR(type); 180 return PTR_ERR(type);
177 181
178 private = type->bind(sa->salg_name, 182 private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
179 sa->salg_feat & ~forbidden,
180 sa->salg_mask & ~forbidden);
181 if (IS_ERR(private)) { 183 if (IS_ERR(private)) {
182 module_put(type->owner); 184 module_put(type->owner);
183 return PTR_ERR(private); 185 return PTR_ERR(private);
diff --git a/crypto/ahash.c b/crypto/ahash.c
index f9caf0f74199..6978ad86e516 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -91,13 +91,14 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
91 91
92 if (nbytes && walk->offset & alignmask && !err) { 92 if (nbytes && walk->offset & alignmask && !err) {
93 walk->offset = ALIGN(walk->offset, alignmask + 1); 93 walk->offset = ALIGN(walk->offset, alignmask + 1);
94 walk->data += walk->offset;
95
96 nbytes = min(nbytes, 94 nbytes = min(nbytes,
97 ((unsigned int)(PAGE_SIZE)) - walk->offset); 95 ((unsigned int)(PAGE_SIZE)) - walk->offset);
98 walk->entrylen -= nbytes; 96 walk->entrylen -= nbytes;
99 97
100 return nbytes; 98 if (nbytes) {
99 walk->data += walk->offset;
100 return nbytes;
101 }
101 } 102 }
102 103
103 if (walk->flags & CRYPTO_ALG_ASYNC) 104 if (walk->flags & CRYPTO_ALG_ASYNC)
@@ -637,5 +638,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
637} 638}
638EXPORT_SYMBOL_GPL(ahash_attr_alg); 639EXPORT_SYMBOL_GPL(ahash_attr_alg);
639 640
641bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
642{
643 struct crypto_alg *alg = &halg->base;
644
645 if (alg->cra_type != &crypto_ahash_type)
646 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
647
648 return __crypto_ahash_alg(alg)->setkey != NULL;
649}
650EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
651
640MODULE_LICENSE("GPL"); 652MODULE_LICENSE("GPL");
641MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); 653MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 84f8d4d8b6bc..09f706b7b06e 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
62 dma_addr_t dma_dest[2]; 62 dma_addr_t dma_dest[2];
63 int src_off = 0; 63 int src_off = 0;
64 64
65 if (submit->flags & ASYNC_TX_FENCE)
66 dma_flags |= DMA_PREP_FENCE;
67
68 while (src_cnt > 0) { 65 while (src_cnt > 0) {
69 submit->flags = flags_orig; 66 submit->flags = flags_orig;
70 pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags)); 67 pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
@@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan,
83 if (cb_fn_orig) 80 if (cb_fn_orig)
84 dma_flags |= DMA_PREP_INTERRUPT; 81 dma_flags |= DMA_PREP_INTERRUPT;
85 } 82 }
83 if (submit->flags & ASYNC_TX_FENCE)
84 dma_flags |= DMA_PREP_FENCE;
86 85
87 /* Drivers force forward progress in case they can not provide 86 /* Drivers force forward progress in case they can not provide
88 * a descriptor 87 * a descriptor
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 55a354d57251..b7290c5b1eaa 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -108,6 +108,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
108 CRYPTO_TFM_RES_MASK); 108 CRYPTO_TFM_RES_MASK);
109 109
110out: 110out:
111 memzero_explicit(&keys, sizeof(keys));
111 return err; 112 return err;
112 113
113badkey: 114badkey:
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 52154ef21b5e..fa0c4567f697 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -90,6 +90,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
90 CRYPTO_TFM_RES_MASK); 90 CRYPTO_TFM_RES_MASK);
91 91
92out: 92out:
93 memzero_explicit(&keys, sizeof(keys));
93 return err; 94 return err;
94 95
95badkey: 96badkey:
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index dca7bc87dad9..2d08e59b3212 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -71,19 +71,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
71 return max(start, end_page); 71 return max(start, end_page);
72} 72}
73 73
74static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk, 74static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
75 unsigned int bsize) 75 unsigned int bsize)
76{ 76{
77 u8 *addr; 77 u8 *addr;
78 78
79 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); 79 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
80 addr = blkcipher_get_spot(addr, bsize); 80 addr = blkcipher_get_spot(addr, bsize);
81 scatterwalk_copychunks(addr, &walk->out, bsize, 1); 81 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
82 return bsize;
83} 82}
84 83
85static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, 84static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
86 unsigned int n) 85 unsigned int n)
87{ 86{
88 if (walk->flags & BLKCIPHER_WALK_COPY) { 87 if (walk->flags & BLKCIPHER_WALK_COPY) {
89 blkcipher_map_dst(walk); 88 blkcipher_map_dst(walk);
@@ -97,49 +96,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
97 96
98 scatterwalk_advance(&walk->in, n); 97 scatterwalk_advance(&walk->in, n);
99 scatterwalk_advance(&walk->out, n); 98 scatterwalk_advance(&walk->out, n);
100
101 return n;
102} 99}
103 100
104int blkcipher_walk_done(struct blkcipher_desc *desc, 101int blkcipher_walk_done(struct blkcipher_desc *desc,
105 struct blkcipher_walk *walk, int err) 102 struct blkcipher_walk *walk, int err)
106{ 103{
107 unsigned int nbytes = 0; 104 unsigned int n; /* bytes processed */
105 bool more;
108 106
109 if (likely(err >= 0)) { 107 if (unlikely(err < 0))
110 unsigned int n = walk->nbytes - err; 108 goto finish;
111 109
112 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) 110 n = walk->nbytes - err;
113 n = blkcipher_done_fast(walk, n); 111 walk->total -= n;
114 else if (WARN_ON(err)) { 112 more = (walk->total != 0);
115 err = -EINVAL;
116 goto err;
117 } else
118 n = blkcipher_done_slow(walk, n);
119 113
120 nbytes = walk->total - n; 114 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
121 err = 0; 115 blkcipher_done_fast(walk, n);
116 } else {
117 if (WARN_ON(err)) {
118 /* unexpected case; didn't process all bytes */
119 err = -EINVAL;
120 goto finish;
121 }
122 blkcipher_done_slow(walk, n);
122 } 123 }
123 124
124 scatterwalk_done(&walk->in, 0, nbytes); 125 scatterwalk_done(&walk->in, 0, more);
125 scatterwalk_done(&walk->out, 1, nbytes); 126 scatterwalk_done(&walk->out, 1, more);
126
127err:
128 walk->total = nbytes;
129 walk->nbytes = nbytes;
130 127
131 if (nbytes) { 128 if (more) {
132 crypto_yield(desc->flags); 129 crypto_yield(desc->flags);
133 return blkcipher_walk_next(desc, walk); 130 return blkcipher_walk_next(desc, walk);
134 } 131 }
135 132 err = 0;
133finish:
134 walk->nbytes = 0;
136 if (walk->iv != desc->info) 135 if (walk->iv != desc->info)
137 memcpy(desc->info, walk->iv, walk->ivsize); 136 memcpy(desc->info, walk->iv, walk->ivsize);
138 if (walk->buffer != walk->page) 137 if (walk->buffer != walk->page)
139 kfree(walk->buffer); 138 kfree(walk->buffer);
140 if (walk->page) 139 if (walk->page)
141 free_page((unsigned long)walk->page); 140 free_page((unsigned long)walk->page);
142
143 return err; 141 return err;
144} 142}
145EXPORT_SYMBOL_GPL(blkcipher_walk_done); 143EXPORT_SYMBOL_GPL(blkcipher_walk_done);
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 26a504db3f53..10a5a3eb675a 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -654,7 +654,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
654 inst->alg.finup = cryptd_hash_finup_enqueue; 654 inst->alg.finup = cryptd_hash_finup_enqueue;
655 inst->alg.export = cryptd_hash_export; 655 inst->alg.export = cryptd_hash_export;
656 inst->alg.import = cryptd_hash_import; 656 inst->alg.import = cryptd_hash_import;
657 inst->alg.setkey = cryptd_hash_setkey; 657 if (crypto_shash_alg_has_setkey(salg))
658 inst->alg.setkey = cryptd_hash_setkey;
658 inst->alg.digest = cryptd_hash_digest_enqueue; 659 inst->alg.digest = cryptd_hash_digest_enqueue;
659 660
660 err = ahash_register_instance(tmpl, inst); 661 err = ahash_register_instance(tmpl, inst);
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index 2df9835dfbc0..bca99238948f 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -51,17 +51,6 @@ int crypto_poly1305_init(struct shash_desc *desc)
51} 51}
52EXPORT_SYMBOL_GPL(crypto_poly1305_init); 52EXPORT_SYMBOL_GPL(crypto_poly1305_init);
53 53
54int crypto_poly1305_setkey(struct crypto_shash *tfm,
55 const u8 *key, unsigned int keylen)
56{
57 /* Poly1305 requires a unique key for each tag, which implies that
58 * we can't set it on the tfm that gets accessed by multiple users
59 * simultaneously. Instead we expect the key as the first 32 bytes in
60 * the update() call. */
61 return -ENOTSUPP;
62}
63EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
64
65static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) 54static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
66{ 55{
67 /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ 56 /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
@@ -80,6 +69,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
80 dctx->s[3] = le32_to_cpuvp(key + 12); 69 dctx->s[3] = le32_to_cpuvp(key + 12);
81} 70}
82 71
72/*
73 * Poly1305 requires a unique key for each tag, which implies that we can't set
74 * it on the tfm that gets accessed by multiple users simultaneously. Instead we
75 * expect the key as the first 32 bytes in the update() call.
76 */
83unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, 77unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
84 const u8 *src, unsigned int srclen) 78 const u8 *src, unsigned int srclen)
85{ 79{
@@ -285,7 +279,6 @@ static struct shash_alg poly1305_alg = {
285 .init = crypto_poly1305_init, 279 .init = crypto_poly1305_init,
286 .update = crypto_poly1305_update, 280 .update = crypto_poly1305_update,
287 .final = crypto_poly1305_final, 281 .final = crypto_poly1305_final,
288 .setkey = crypto_poly1305_setkey,
289 .descsize = sizeof(struct poly1305_desc_ctx), 282 .descsize = sizeof(struct poly1305_desc_ctx),
290 .base = { 283 .base = {
291 .cra_name = "poly1305", 284 .cra_name = "poly1305",
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index f522828d45c9..1d92b5d2d6bd 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -291,11 +291,13 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
291 } 291 }
292 292
293 sg_init_table(sg, np + 1); 293 sg_init_table(sg, np + 1);
294 np--; 294 if (rem)
295 np--;
295 for (k = 0; k < np; k++) 296 for (k = 0; k < np; k++)
296 sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); 297 sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
297 298
298 sg_set_buf(&sg[k + 1], xbuf[k], rem); 299 if (rem)
300 sg_set_buf(&sg[k + 1], xbuf[k], rem);
299} 301}
300 302
301static void test_aead_speed(const char *algo, int enc, unsigned int secs, 303static void test_aead_speed(const char *algo, int enc, unsigned int secs,
diff --git a/crypto/vmac.c b/crypto/vmac.c
index df76a816cfb2..bb2fc787d615 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -1,6 +1,10 @@
1/* 1/*
2 * Modified to interface to the Linux kernel 2 * VMAC: Message Authentication Code using Universal Hashing
3 *
4 * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
5 *
3 * Copyright (c) 2009, Intel Corporation. 6 * Copyright (c) 2009, Intel Corporation.
7 * Copyright (c) 2018, Google Inc.
4 * 8 *
5 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 10 * under the terms and conditions of the GNU General Public License,
@@ -16,14 +20,15 @@
16 * Place - Suite 330, Boston, MA 02111-1307 USA. 20 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */ 21 */
18 22
19/* -------------------------------------------------------------------------- 23/*
20 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. 24 * Derived from:
21 * This implementation is herby placed in the public domain. 25 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
22 * The authors offers no warranty. Use at your own risk. 26 * This implementation is herby placed in the public domain.
23 * Please send bug reports to the authors. 27 * The authors offers no warranty. Use at your own risk.
24 * Last modified: 17 APR 08, 1700 PDT 28 * Last modified: 17 APR 08, 1700 PDT
25 * ----------------------------------------------------------------------- */ 29 */
26 30
31#include <asm/unaligned.h>
27#include <linux/init.h> 32#include <linux/init.h>
28#include <linux/types.h> 33#include <linux/types.h>
29#include <linux/crypto.h> 34#include <linux/crypto.h>
@@ -31,10 +36,36 @@
31#include <linux/scatterlist.h> 36#include <linux/scatterlist.h>
32#include <asm/byteorder.h> 37#include <asm/byteorder.h>
33#include <crypto/scatterwalk.h> 38#include <crypto/scatterwalk.h>
34#include <crypto/vmac.h>
35#include <crypto/internal/hash.h> 39#include <crypto/internal/hash.h>
36 40
37/* 41/*
42 * User definable settings.
43 */
44#define VMAC_TAG_LEN 64
45#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
46#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
47#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
48
49/* per-transform (per-key) context */
50struct vmac_tfm_ctx {
51 struct crypto_cipher *cipher;
52 u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
53 u64 polykey[2*VMAC_TAG_LEN/64];
54 u64 l3key[2*VMAC_TAG_LEN/64];
55};
56
57/* per-request context */
58struct vmac_desc_ctx {
59 union {
60 u8 partial[VMAC_NHBYTES]; /* partial block */
61 __le64 partial_words[VMAC_NHBYTES / 8];
62 };
63 unsigned int partial_size; /* size of the partial block */
64 bool first_block_processed;
65 u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
66};
67
68/*
38 * Constants and masks 69 * Constants and masks
39 */ 70 */
40#define UINT64_C(x) x##ULL 71#define UINT64_C(x) x##ULL
@@ -318,13 +349,6 @@ static void poly_step_func(u64 *ahi, u64 *alo,
318 } while (0) 349 } while (0)
319#endif 350#endif
320 351
321static void vhash_abort(struct vmac_ctx *ctx)
322{
323 ctx->polytmp[0] = ctx->polykey[0] ;
324 ctx->polytmp[1] = ctx->polykey[1] ;
325 ctx->first_block_processed = 0;
326}
327
328static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) 352static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
329{ 353{
330 u64 rh, rl, t, z = 0; 354 u64 rh, rl, t, z = 0;
@@ -364,280 +388,209 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
364 return rl; 388 return rl;
365} 389}
366 390
367static void vhash_update(const unsigned char *m, 391/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
368 unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ 392static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
369 struct vmac_ctx *ctx) 393 struct vmac_desc_ctx *dctx,
394 const __le64 *mptr, unsigned int blocks)
370{ 395{
371 u64 rh, rl, *mptr; 396 const u64 *kptr = tctx->nhkey;
372 const u64 *kptr = (u64 *)ctx->nhkey; 397 const u64 pkh = tctx->polykey[0];
373 int i; 398 const u64 pkl = tctx->polykey[1];
374 u64 ch, cl; 399 u64 ch = dctx->polytmp[0];
375 u64 pkh = ctx->polykey[0]; 400 u64 cl = dctx->polytmp[1];
376 u64 pkl = ctx->polykey[1]; 401 u64 rh, rl;
377 402
378 if (!mbytes) 403 if (!dctx->first_block_processed) {
379 return; 404 dctx->first_block_processed = true;
380
381 BUG_ON(mbytes % VMAC_NHBYTES);
382
383 mptr = (u64 *)m;
384 i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
385
386 ch = ctx->polytmp[0];
387 cl = ctx->polytmp[1];
388
389 if (!ctx->first_block_processed) {
390 ctx->first_block_processed = 1;
391 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); 405 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
392 rh &= m62; 406 rh &= m62;
393 ADD128(ch, cl, rh, rl); 407 ADD128(ch, cl, rh, rl);
394 mptr += (VMAC_NHBYTES/sizeof(u64)); 408 mptr += (VMAC_NHBYTES/sizeof(u64));
395 i--; 409 blocks--;
396 } 410 }
397 411
398 while (i--) { 412 while (blocks--) {
399 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); 413 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
400 rh &= m62; 414 rh &= m62;
401 poly_step(ch, cl, pkh, pkl, rh, rl); 415 poly_step(ch, cl, pkh, pkl, rh, rl);
402 mptr += (VMAC_NHBYTES/sizeof(u64)); 416 mptr += (VMAC_NHBYTES/sizeof(u64));
403 } 417 }
404 418
405 ctx->polytmp[0] = ch; 419 dctx->polytmp[0] = ch;
406 ctx->polytmp[1] = cl; 420 dctx->polytmp[1] = cl;
407} 421}
408 422
409static u64 vhash(unsigned char m[], unsigned int mbytes, 423static int vmac_setkey(struct crypto_shash *tfm,
410 u64 *tagl, struct vmac_ctx *ctx) 424 const u8 *key, unsigned int keylen)
411{ 425{
412 u64 rh, rl, *mptr; 426 struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
413 const u64 *kptr = (u64 *)ctx->nhkey; 427 __be64 out[2];
414 int i, remaining; 428 u8 in[16] = { 0 };
415 u64 ch, cl; 429 unsigned int i;
416 u64 pkh = ctx->polykey[0]; 430 int err;
417 u64 pkl = ctx->polykey[1];
418
419 mptr = (u64 *)m;
420 i = mbytes / VMAC_NHBYTES;
421 remaining = mbytes % VMAC_NHBYTES;
422
423 if (ctx->first_block_processed) {
424 ch = ctx->polytmp[0];
425 cl = ctx->polytmp[1];
426 } else if (i) {
427 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
428 ch &= m62;
429 ADD128(ch, cl, pkh, pkl);
430 mptr += (VMAC_NHBYTES/sizeof(u64));
431 i--;
432 } else if (remaining) {
433 nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
434 ch &= m62;
435 ADD128(ch, cl, pkh, pkl);
436 mptr += (VMAC_NHBYTES/sizeof(u64));
437 goto do_l3;
438 } else {/* Empty String */
439 ch = pkh; cl = pkl;
440 goto do_l3;
441 }
442
443 while (i--) {
444 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
445 rh &= m62;
446 poly_step(ch, cl, pkh, pkl, rh, rl);
447 mptr += (VMAC_NHBYTES/sizeof(u64));
448 }
449 if (remaining) {
450 nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
451 rh &= m62;
452 poly_step(ch, cl, pkh, pkl, rh, rl);
453 }
454
455do_l3:
456 vhash_abort(ctx);
457 remaining *= 8;
458 return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
459}
460 431
461static u64 vmac(unsigned char m[], unsigned int mbytes, 432 if (keylen != VMAC_KEY_LEN) {
462 const unsigned char n[16], u64 *tagl, 433 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
463 struct vmac_ctx_t *ctx) 434 return -EINVAL;
464{
465 u64 *in_n, *out_p;
466 u64 p, h;
467 int i;
468
469 in_n = ctx->__vmac_ctx.cached_nonce;
470 out_p = ctx->__vmac_ctx.cached_aes;
471
472 i = n[15] & 1;
473 if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
474 in_n[0] = *(u64 *)(n);
475 in_n[1] = *(u64 *)(n+8);
476 ((unsigned char *)in_n)[15] &= 0xFE;
477 crypto_cipher_encrypt_one(ctx->child,
478 (unsigned char *)out_p, (unsigned char *)in_n);
479
480 ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
481 } 435 }
482 p = be64_to_cpup(out_p + i);
483 h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
484 return le64_to_cpu(p + h);
485}
486 436
487static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) 437 err = crypto_cipher_setkey(tctx->cipher, key, keylen);
488{
489 u64 in[2] = {0}, out[2];
490 unsigned i;
491 int err = 0;
492
493 err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
494 if (err) 438 if (err)
495 return err; 439 return err;
496 440
497 /* Fill nh key */ 441 /* Fill nh key */
498 ((unsigned char *)in)[0] = 0x80; 442 in[0] = 0x80;
499 for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { 443 for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
500 crypto_cipher_encrypt_one(ctx->child, 444 crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
501 (unsigned char *)out, (unsigned char *)in); 445 tctx->nhkey[i] = be64_to_cpu(out[0]);
502 ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); 446 tctx->nhkey[i+1] = be64_to_cpu(out[1]);
503 ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); 447 in[15]++;
504 ((unsigned char *)in)[15] += 1;
505 } 448 }
506 449
507 /* Fill poly key */ 450 /* Fill poly key */
508 ((unsigned char *)in)[0] = 0xC0; 451 in[0] = 0xC0;
509 in[1] = 0; 452 in[15] = 0;
510 for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { 453 for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
511 crypto_cipher_encrypt_one(ctx->child, 454 crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
512 (unsigned char *)out, (unsigned char *)in); 455 tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
513 ctx->__vmac_ctx.polytmp[i] = 456 tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
514 ctx->__vmac_ctx.polykey[i] = 457 in[15]++;
515 be64_to_cpup(out) & mpoly;
516 ctx->__vmac_ctx.polytmp[i+1] =
517 ctx->__vmac_ctx.polykey[i+1] =
518 be64_to_cpup(out+1) & mpoly;
519 ((unsigned char *)in)[15] += 1;
520 } 458 }
521 459
522 /* Fill ip key */ 460 /* Fill ip key */
523 ((unsigned char *)in)[0] = 0xE0; 461 in[0] = 0xE0;
524 in[1] = 0; 462 in[15] = 0;
525 for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { 463 for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
526 do { 464 do {
527 crypto_cipher_encrypt_one(ctx->child, 465 crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
528 (unsigned char *)out, (unsigned char *)in); 466 tctx->l3key[i] = be64_to_cpu(out[0]);
529 ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); 467 tctx->l3key[i+1] = be64_to_cpu(out[1]);
530 ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); 468 in[15]++;
531 ((unsigned char *)in)[15] += 1; 469 } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
532 } while (ctx->__vmac_ctx.l3key[i] >= p64
533 || ctx->__vmac_ctx.l3key[i+1] >= p64);
534 } 470 }
535 471
536 /* Invalidate nonce/aes cache and reset other elements */ 472 return 0;
537 ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
538 ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
539 ctx->__vmac_ctx.first_block_processed = 0;
540
541 return err;
542} 473}
543 474
544static int vmac_setkey(struct crypto_shash *parent, 475static int vmac_init(struct shash_desc *desc)
545 const u8 *key, unsigned int keylen)
546{ 476{
547 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); 477 const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
478 struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
548 479
549 if (keylen != VMAC_KEY_LEN) { 480 dctx->partial_size = 0;
550 crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); 481 dctx->first_block_processed = false;
551 return -EINVAL; 482 memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
552 }
553
554 return vmac_set_key((u8 *)key, ctx);
555}
556
557static int vmac_init(struct shash_desc *pdesc)
558{
559 return 0; 483 return 0;
560} 484}
561 485
562static int vmac_update(struct shash_desc *pdesc, const u8 *p, 486static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
563 unsigned int len)
564{ 487{
565 struct crypto_shash *parent = pdesc->tfm; 488 const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
566 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); 489 struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
567 int expand; 490 unsigned int n;
568 int min; 491
569 492 if (dctx->partial_size) {
570 expand = VMAC_NHBYTES - ctx->partial_size > 0 ? 493 n = min(len, VMAC_NHBYTES - dctx->partial_size);
571 VMAC_NHBYTES - ctx->partial_size : 0; 494 memcpy(&dctx->partial[dctx->partial_size], p, n);
572 495 dctx->partial_size += n;
573 min = len < expand ? len : expand; 496 p += n;
574 497 len -= n;
575 memcpy(ctx->partial + ctx->partial_size, p, min); 498 if (dctx->partial_size == VMAC_NHBYTES) {
576 ctx->partial_size += min; 499 vhash_blocks(tctx, dctx, dctx->partial_words, 1);
577 500 dctx->partial_size = 0;
578 if (len < expand) 501 }
579 return 0; 502 }
580
581 vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
582 ctx->partial_size = 0;
583
584 len -= expand;
585 p += expand;
586 503
587 if (len % VMAC_NHBYTES) { 504 if (len >= VMAC_NHBYTES) {
588 memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES), 505 n = round_down(len, VMAC_NHBYTES);
589 len % VMAC_NHBYTES); 506 /* TODO: 'p' may be misaligned here */
590 ctx->partial_size = len % VMAC_NHBYTES; 507 vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
508 p += n;
509 len -= n;
591 } 510 }
592 511
593 vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx); 512 if (len) {
513 memcpy(dctx->partial, p, len);
514 dctx->partial_size = len;
515 }
594 516
595 return 0; 517 return 0;
596} 518}
597 519
598static int vmac_final(struct shash_desc *pdesc, u8 *out) 520static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
521 struct vmac_desc_ctx *dctx)
599{ 522{
600 struct crypto_shash *parent = pdesc->tfm; 523 unsigned int partial = dctx->partial_size;
601 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); 524 u64 ch = dctx->polytmp[0];
602 vmac_t mac; 525 u64 cl = dctx->polytmp[1];
603 u8 nonce[16] = {}; 526
604 527 /* L1 and L2-hash the final block if needed */
605 /* vmac() ends up accessing outside the array bounds that 528 if (partial) {
606 * we specify. In appears to access up to the next 2-word 529 /* Zero-pad to next 128-bit boundary */
607 * boundary. We'll just be uber cautious and zero the 530 unsigned int n = round_up(partial, 16);
608 * unwritten bytes in the buffer. 531 u64 rh, rl;
609 */ 532
610 if (ctx->partial_size) { 533 memset(&dctx->partial[partial], 0, n - partial);
611 memset(ctx->partial + ctx->partial_size, 0, 534 nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
612 VMAC_NHBYTES - ctx->partial_size); 535 rh &= m62;
536 if (dctx->first_block_processed)
537 poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
538 rh, rl);
539 else
540 ADD128(ch, cl, rh, rl);
613 } 541 }
614 mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx); 542
615 memcpy(out, &mac, sizeof(vmac_t)); 543 /* L3-hash the 128-bit output of L2-hash */
616 memzero_explicit(&mac, sizeof(vmac_t)); 544 return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
617 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); 545}
618 ctx->partial_size = 0; 546
547static int vmac_final(struct shash_desc *desc, u8 *out)
548{
549 const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
550 struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
551 static const u8 nonce[16] = {}; /* TODO: this is insecure */
552 union {
553 u8 bytes[16];
554 __be64 pads[2];
555 } block;
556 int index;
557 u64 hash, pad;
558
559 /* Finish calculating the VHASH of the message */
560 hash = vhash_final(tctx, dctx);
561
562 /* Generate pseudorandom pad by encrypting the nonce */
563 memcpy(&block, nonce, 16);
564 index = block.bytes[15] & 1;
565 block.bytes[15] &= ~1;
566 crypto_cipher_encrypt_one(tctx->cipher, block.bytes, block.bytes);
567 pad = be64_to_cpu(block.pads[index]);
568
569 /* The VMAC is the sum of VHASH and the pseudorandom pad */
570 put_unaligned_le64(hash + pad, out);
619 return 0; 571 return 0;
620} 572}
621 573
622static int vmac_init_tfm(struct crypto_tfm *tfm) 574static int vmac_init_tfm(struct crypto_tfm *tfm)
623{ 575{
624 struct crypto_cipher *cipher; 576 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
625 struct crypto_instance *inst = (void *)tfm->__crt_alg;
626 struct crypto_spawn *spawn = crypto_instance_ctx(inst); 577 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
627 struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); 578 struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
579 struct crypto_cipher *cipher;
628 580
629 cipher = crypto_spawn_cipher(spawn); 581 cipher = crypto_spawn_cipher(spawn);
630 if (IS_ERR(cipher)) 582 if (IS_ERR(cipher))
631 return PTR_ERR(cipher); 583 return PTR_ERR(cipher);
632 584
633 ctx->child = cipher; 585 tctx->cipher = cipher;
634 return 0; 586 return 0;
635} 587}
636 588
637static void vmac_exit_tfm(struct crypto_tfm *tfm) 589static void vmac_exit_tfm(struct crypto_tfm *tfm)
638{ 590{
639 struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); 591 struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
640 crypto_free_cipher(ctx->child); 592
593 crypto_free_cipher(tctx->cipher);
641} 594}
642 595
643static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) 596static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -655,6 +608,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
655 if (IS_ERR(alg)) 608 if (IS_ERR(alg))
656 return PTR_ERR(alg); 609 return PTR_ERR(alg);
657 610
611 err = -EINVAL;
612 if (alg->cra_blocksize != 16)
613 goto out_put_alg;
614
658 inst = shash_alloc_instance("vmac", alg); 615 inst = shash_alloc_instance("vmac", alg);
659 err = PTR_ERR(inst); 616 err = PTR_ERR(inst);
660 if (IS_ERR(inst)) 617 if (IS_ERR(inst))
@@ -670,11 +627,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
670 inst->alg.base.cra_blocksize = alg->cra_blocksize; 627 inst->alg.base.cra_blocksize = alg->cra_blocksize;
671 inst->alg.base.cra_alignmask = alg->cra_alignmask; 628 inst->alg.base.cra_alignmask = alg->cra_alignmask;
672 629
673 inst->alg.digestsize = sizeof(vmac_t); 630 inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
674 inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
675 inst->alg.base.cra_init = vmac_init_tfm; 631 inst->alg.base.cra_init = vmac_init_tfm;
676 inst->alg.base.cra_exit = vmac_exit_tfm; 632 inst->alg.base.cra_exit = vmac_exit_tfm;
677 633
634 inst->alg.descsize = sizeof(struct vmac_desc_ctx);
635 inst->alg.digestsize = VMAC_TAG_LEN / 8;
678 inst->alg.init = vmac_init; 636 inst->alg.init = vmac_init;
679 inst->alg.update = vmac_update; 637 inst->alg.update = vmac_update;
680 inst->alg.final = vmac_final; 638 inst->alg.final = vmac_final;
diff --git a/drivers/Makefile b/drivers/Makefile
index 1d70931d6752..2d3edaa16532 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_TC) += tc/
100obj-$(CONFIG_UWB) += uwb/ 100obj-$(CONFIG_UWB) += uwb/
101obj-$(CONFIG_USB_PHY) += usb/ 101obj-$(CONFIG_USB_PHY) += usb/
102obj-$(CONFIG_USB) += usb/ 102obj-$(CONFIG_USB) += usb/
103obj-$(CONFIG_USB_SUPPORT) += usb/
103obj-$(CONFIG_PCI) += usb/ 104obj-$(CONFIG_PCI) += usb/
104obj-$(CONFIG_USB_GADGET) += usb/ 105obj-$(CONFIG_USB_GADGET) += usb/
105obj-$(CONFIG_OF) += usb/ 106obj-$(CONFIG_OF) += usb/
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index f9e0d09f7c66..8a0f77fb5181 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -154,10 +154,12 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = {
154 154
155static const struct lpss_device_desc byt_pwm_dev_desc = { 155static const struct lpss_device_desc byt_pwm_dev_desc = {
156 .flags = LPSS_SAVE_CTX, 156 .flags = LPSS_SAVE_CTX,
157 .prv_offset = 0x800,
157}; 158};
158 159
159static const struct lpss_device_desc bsw_pwm_dev_desc = { 160static const struct lpss_device_desc bsw_pwm_dev_desc = {
160 .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, 161 .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
162 .prv_offset = 0x800,
161}; 163};
162 164
163static const struct lpss_device_desc byt_uart_dev_desc = { 165static const struct lpss_device_desc byt_uart_dev_desc = {
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 8ea8211b2d58..f8bb0e4d035a 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -108,6 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
108 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); 108 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
109 if (cpumask_empty(tmp)) { 109 if (cpumask_empty(tmp)) {
110 mutex_unlock(&round_robin_lock); 110 mutex_unlock(&round_robin_lock);
111 free_cpumask_var(tmp);
111 return; 112 return;
112 } 113 }
113 for_each_cpu(cpu, tmp) { 114 for_each_cpu(cpu, tmp) {
@@ -125,6 +126,8 @@ static void round_robin_cpu(unsigned int tsk_index)
125 mutex_unlock(&round_robin_lock); 126 mutex_unlock(&round_robin_lock);
126 127
127 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); 128 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
129
130 free_cpumask_var(tmp);
128} 131}
129 132
130static void exit_round_robin(unsigned int tsk_index) 133static void exit_round_robin(unsigned int tsk_index)
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 9f77943653fb..b63a173786d5 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
331 pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 331 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
332 332
333 pr->pblk = object.processor.pblk_address; 333 pr->pblk = object.processor.pblk_address;
334
335 /*
336 * We don't care about error returns - we just try to mark
337 * these reserved so that nobody else is confused into thinking
338 * that this region might be unused..
339 *
340 * (In particular, allocating the IO range for Cardbus)
341 */
342 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
343 } 334 }
344 335
345 /* 336 /*
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index bf6873f95e72..0b5eedb60d04 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -204,6 +204,7 @@ u32 acpi_ev_fixed_event_detect(void)
204 u32 fixed_status; 204 u32 fixed_status;
205 u32 fixed_enable; 205 u32 fixed_enable;
206 u32 i; 206 u32 i;
207 acpi_status status;
207 208
208 ACPI_FUNCTION_NAME(ev_fixed_event_detect); 209 ACPI_FUNCTION_NAME(ev_fixed_event_detect);
209 210
@@ -211,8 +212,12 @@ u32 acpi_ev_fixed_event_detect(void)
211 * Read the fixed feature status and enable registers, as all the cases 212 * Read the fixed feature status and enable registers, as all the cases
212 * depend on their values. Ignore errors here. 213 * depend on their values. Ignore errors here.
213 */ 214 */
214 (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); 215 status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
215 (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); 216 status |=
217 acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
218 if (ACPI_FAILURE(status)) {
219 return (int_status);
220 }
216 221
217 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, 222 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
218 "Fixed Event Block: Enable %08X Status %08X\n", 223 "Fixed Event Block: Enable %08X Status %08X\n",
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 10ce48e16ebf..d830705f8a18 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -180,6 +180,12 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
180 180
181 ACPI_FUNCTION_TRACE(acpi_enable_event); 181 ACPI_FUNCTION_TRACE(acpi_enable_event);
182 182
183 /* If Hardware Reduced flag is set, there are no fixed events */
184
185 if (acpi_gbl_reduced_hardware) {
186 return_ACPI_STATUS(AE_OK);
187 }
188
183 /* Decode the Fixed Event */ 189 /* Decode the Fixed Event */
184 190
185 if (event > ACPI_EVENT_MAX) { 191 if (event > ACPI_EVENT_MAX) {
@@ -237,6 +243,12 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
237 243
238 ACPI_FUNCTION_TRACE(acpi_disable_event); 244 ACPI_FUNCTION_TRACE(acpi_disable_event);
239 245
246 /* If Hardware Reduced flag is set, there are no fixed events */
247
248 if (acpi_gbl_reduced_hardware) {
249 return_ACPI_STATUS(AE_OK);
250 }
251
240 /* Decode the Fixed Event */ 252 /* Decode the Fixed Event */
241 253
242 if (event > ACPI_EVENT_MAX) { 254 if (event > ACPI_EVENT_MAX) {
@@ -290,6 +302,12 @@ acpi_status acpi_clear_event(u32 event)
290 302
291 ACPI_FUNCTION_TRACE(acpi_clear_event); 303 ACPI_FUNCTION_TRACE(acpi_clear_event);
292 304
305 /* If Hardware Reduced flag is set, there are no fixed events */
306
307 if (acpi_gbl_reduced_hardware) {
308 return_ACPI_STATUS(AE_OK);
309 }
310
293 /* Decode the Fixed Event */ 311 /* Decode the Fixed Event */
294 312
295 if (event > ACPI_EVENT_MAX) { 313 if (event > ACPI_EVENT_MAX) {
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 7eba578d36f3..10262cae8a19 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -308,6 +308,14 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
308 /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */ 308 /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
309 309
310 status = AE_OK; 310 status = AE_OK;
311 } else if (ACPI_FAILURE(status)) {
312
313 /* If return_object exists, delete it */
314
315 if (info->return_object) {
316 acpi_ut_remove_reference(info->return_object);
317 info->return_object = NULL;
318 }
311 } 319 }
312 320
313 ACPI_DEBUG_PRINT((ACPI_DB_NAMES, 321 ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index de325ae04ce1..3b3c5b90bd20 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -593,25 +593,20 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
593void acpi_ns_terminate(void) 593void acpi_ns_terminate(void)
594{ 594{
595 acpi_status status; 595 acpi_status status;
596 union acpi_operand_object *prev;
597 union acpi_operand_object *next;
596 598
597 ACPI_FUNCTION_TRACE(ns_terminate); 599 ACPI_FUNCTION_TRACE(ns_terminate);
598 600
599#ifdef ACPI_EXEC_APP 601 /* Delete any module-level code blocks */
600 {
601 union acpi_operand_object *prev;
602 union acpi_operand_object *next;
603 602
604 /* Delete any module-level code blocks */ 603 next = acpi_gbl_module_code_list;
605 604 while (next) {
606 next = acpi_gbl_module_code_list; 605 prev = next;
607 while (next) { 606 next = next->method.mutex;
608 prev = next; 607 prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
609 next = next->method.mutex; 608 acpi_ut_remove_reference(prev);
610 prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
611 acpi_ut_remove_reference(prev);
612 }
613 } 609 }
614#endif
615 610
616 /* 611 /*
617 * Free the entire namespace -- all nodes and all objects 612 * Free the entire namespace -- all nodes and all objects
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index e54bc2aa7a88..a05b3b79b987 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -121,6 +121,9 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
121 (u32)(aml_offset + 121 (u32)(aml_offset +
122 sizeof(struct acpi_table_header))); 122 sizeof(struct acpi_table_header)));
123 123
124 ACPI_ERROR((AE_INFO,
125 "Aborting disassembly, AML byte code is corrupt"));
126
124 /* Dump the context surrounding the invalid opcode */ 127 /* Dump the context surrounding the invalid opcode */
125 128
126 acpi_ut_dump_buffer(((u8 *)walk_state->parser_state. 129 acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
@@ -129,6 +132,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
129 sizeof(struct acpi_table_header) - 132 sizeof(struct acpi_table_header) -
130 16)); 133 16));
131 acpi_os_printf(" */\n"); 134 acpi_os_printf(" */\n");
135
136 /*
137 * Just abort the disassembly, cannot continue because the
138 * parser is essentially lost. The disassembler can then
139 * randomly fail because an ill-constructed parse tree
140 * can result.
141 */
142 return_ACPI_STATUS(AE_AML_BAD_OPCODE);
132#endif 143#endif
133 } 144 }
134 145
@@ -293,6 +304,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
293 if (status == AE_CTRL_PARSE_CONTINUE) { 304 if (status == AE_CTRL_PARSE_CONTINUE) {
294 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE); 305 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
295 } 306 }
307 if (ACPI_FAILURE(status)) {
308 return_ACPI_STATUS(status);
309 }
296 310
297 /* Create Op structure and append to parent's argument list */ 311 /* Create Op structure and append to parent's argument list */
298 312
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index b9afb47db7ed..1521d9a41d25 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
146 int count; 146 int count;
147 struct acpi_hardware_id *id; 147 struct acpi_hardware_id *id;
148 148
149 /* Avoid unnecessarily loading modules for non present devices. */
150 if (!acpi_device_is_present(acpi_dev))
151 return 0;
152
149 /* 153 /*
150 * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should 154 * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
151 * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the 155 * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 73c9c7fa9001..f06317d6fc38 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
99 return -ENODEV; 99 return -ENODEV;
100 100
101 /* 101 /*
102 * If the device has a _HID (or _CID) returning a valid ACPI/PNP 102 * If the device has a _HID returning a valid ACPI/PNP device ID, it is
103 * device ID, it is better to make it look less attractive here, so that 103 * better to make it look less attractive here, so that the other device
104 * the other device with the same _ADR value (that may not have a valid 104 * with the same _ADR value (that may not have a valid device ID) can be
105 * device ID) can be matched going forward. [This means a second spec 105 * matched going forward. [This means a second spec violation in a row,
106 * violation in a row, so whatever we do here is best effort anyway.] 106 * so whatever we do here is best effort anyway.]
107 */ 107 */
108 return sta_present && list_empty(&adev->pnp.ids) ? 108 return sta_present && !adev->pnp.type.platform_id ?
109 FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; 109 FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
110} 110}
111 111
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index d176e0ece470..2946e2846573 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm)
103 */ 103 */
104int acpi_map_pxm_to_online_node(int pxm) 104int acpi_map_pxm_to_online_node(int pxm)
105{ 105{
106 int node, n, dist, min_dist; 106 int node, min_node;
107 107
108 node = acpi_map_pxm_to_node(pxm); 108 node = acpi_map_pxm_to_node(pxm);
109 109
110 if (node == NUMA_NO_NODE) 110 if (node == NUMA_NO_NODE)
111 node = 0; 111 node = 0;
112 112
113 min_node = node;
113 if (!node_online(node)) { 114 if (!node_online(node)) {
114 min_dist = INT_MAX; 115 int min_dist = INT_MAX, dist, n;
116
115 for_each_online_node(n) { 117 for_each_online_node(n) {
116 dist = node_distance(node, n); 118 dist = node_distance(node, n);
117 if (dist < min_dist) { 119 if (dist < min_dist) {
118 min_dist = dist; 120 min_dist = dist;
119 node = n; 121 min_node = n;
120 } 122 }
121 } 123 }
122 } 124 }
123 125
124 return node; 126 return min_node;
125} 127}
126EXPORT_SYMBOL(acpi_map_pxm_to_online_node); 128EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
127 129
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 8a10a7ae6a8a..c8e169e46673 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -131,9 +131,6 @@ static void do_prt_fixups(struct acpi_prt_entry *entry,
131 quirk = &prt_quirks[i]; 131 quirk = &prt_quirks[i];
132 132
133 /* All current quirks involve link devices, not GSIs */ 133 /* All current quirks involve link devices, not GSIs */
134 if (!prt->source)
135 continue;
136
137 if (dmi_check_system(quirk->system) && 134 if (dmi_check_system(quirk->system) &&
138 entry->id.segment == quirk->segment && 135 entry->id.segment == quirk->segment &&
139 entry->id.bus == quirk->bus && 136 entry->id.bus == quirk->bus &&
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ae3fe4e64203..3b0b4bd67b71 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -472,9 +472,11 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
472 } 472 }
473 473
474 control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL 474 control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
475 | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
476 | OSC_PCI_EXPRESS_PME_CONTROL; 475 | OSC_PCI_EXPRESS_PME_CONTROL;
477 476
477 if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
478 control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
479
478 if (pci_aer_available()) { 480 if (pci_aer_available()) {
479 if (aer_acpi_firmware_first()) 481 if (aer_acpi_firmware_first())
480 dev_info(&device->dev, 482 dev_info(&device->dev,
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 6a082d4de12c..24a793957bc0 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -28,97 +28,97 @@ static struct pmic_table power_table[] = {
28 .address = 0x00, 28 .address = 0x00,
29 .reg = 0x13, 29 .reg = 0x13,
30 .bit = 0x05, 30 .bit = 0x05,
31 }, 31 }, /* ALD1 */
32 { 32 {
33 .address = 0x04, 33 .address = 0x04,
34 .reg = 0x13, 34 .reg = 0x13,
35 .bit = 0x06, 35 .bit = 0x06,
36 }, 36 }, /* ALD2 */
37 { 37 {
38 .address = 0x08, 38 .address = 0x08,
39 .reg = 0x13, 39 .reg = 0x13,
40 .bit = 0x07, 40 .bit = 0x07,
41 }, 41 }, /* ALD3 */
42 { 42 {
43 .address = 0x0c, 43 .address = 0x0c,
44 .reg = 0x12, 44 .reg = 0x12,
45 .bit = 0x03, 45 .bit = 0x03,
46 }, 46 }, /* DLD1 */
47 { 47 {
48 .address = 0x10, 48 .address = 0x10,
49 .reg = 0x12, 49 .reg = 0x12,
50 .bit = 0x04, 50 .bit = 0x04,
51 }, 51 }, /* DLD2 */
52 { 52 {
53 .address = 0x14, 53 .address = 0x14,
54 .reg = 0x12, 54 .reg = 0x12,
55 .bit = 0x05, 55 .bit = 0x05,
56 }, 56 }, /* DLD3 */
57 { 57 {
58 .address = 0x18, 58 .address = 0x18,
59 .reg = 0x12, 59 .reg = 0x12,
60 .bit = 0x06, 60 .bit = 0x06,
61 }, 61 }, /* DLD4 */
62 { 62 {
63 .address = 0x1c, 63 .address = 0x1c,
64 .reg = 0x12, 64 .reg = 0x12,
65 .bit = 0x00, 65 .bit = 0x00,
66 }, 66 }, /* ELD1 */
67 { 67 {
68 .address = 0x20, 68 .address = 0x20,
69 .reg = 0x12, 69 .reg = 0x12,
70 .bit = 0x01, 70 .bit = 0x01,
71 }, 71 }, /* ELD2 */
72 { 72 {
73 .address = 0x24, 73 .address = 0x24,
74 .reg = 0x12, 74 .reg = 0x12,
75 .bit = 0x02, 75 .bit = 0x02,
76 }, 76 }, /* ELD3 */
77 { 77 {
78 .address = 0x28, 78 .address = 0x28,
79 .reg = 0x13, 79 .reg = 0x13,
80 .bit = 0x02, 80 .bit = 0x02,
81 }, 81 }, /* FLD1 */
82 { 82 {
83 .address = 0x2c, 83 .address = 0x2c,
84 .reg = 0x13, 84 .reg = 0x13,
85 .bit = 0x03, 85 .bit = 0x03,
86 }, 86 }, /* FLD2 */
87 { 87 {
88 .address = 0x30, 88 .address = 0x30,
89 .reg = 0x13, 89 .reg = 0x13,
90 .bit = 0x04, 90 .bit = 0x04,
91 }, 91 }, /* FLD3 */
92 { 92 {
93 .address = 0x38, 93 .address = 0x34,
94 .reg = 0x10, 94 .reg = 0x10,
95 .bit = 0x03, 95 .bit = 0x03,
96 }, 96 }, /* BUC1 */
97 { 97 {
98 .address = 0x3c, 98 .address = 0x38,
99 .reg = 0x10, 99 .reg = 0x10,
100 .bit = 0x06, 100 .bit = 0x06,
101 }, 101 }, /* BUC2 */
102 { 102 {
103 .address = 0x40, 103 .address = 0x3c,
104 .reg = 0x10, 104 .reg = 0x10,
105 .bit = 0x05, 105 .bit = 0x05,
106 }, 106 }, /* BUC3 */
107 { 107 {
108 .address = 0x44, 108 .address = 0x40,
109 .reg = 0x10, 109 .reg = 0x10,
110 .bit = 0x04, 110 .bit = 0x04,
111 }, 111 }, /* BUC4 */
112 { 112 {
113 .address = 0x48, 113 .address = 0x44,
114 .reg = 0x10, 114 .reg = 0x10,
115 .bit = 0x01, 115 .bit = 0x01,
116 }, 116 }, /* BUC5 */
117 { 117 {
118 .address = 0x4c, 118 .address = 0x48,
119 .reg = 0x10, 119 .reg = 0x10,
120 .bit = 0x00 120 .bit = 0x00
121 }, 121 }, /* BUC6 */
122}; 122};
123 123
124/* TMP0 - TMP5 are the same, all from GPADC */ 124/* TMP0 - TMP5 are the same, all from GPADC */
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 11154a330f07..c9bf74982688 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -259,6 +259,9 @@ static int __acpi_processor_start(struct acpi_device *device)
259 if (ACPI_SUCCESS(status)) 259 if (ACPI_SUCCESS(status))
260 return 0; 260 return 0;
261 261
262 result = -ENODEV;
263 acpi_pss_perf_exit(pr, device);
264
262err_power_exit: 265err_power_exit:
263 acpi_processor_power_exit(pr); 266 acpi_processor_power_exit(pr);
264 return result; 267 return result;
@@ -267,11 +270,16 @@ err_power_exit:
267static int acpi_processor_start(struct device *dev) 270static int acpi_processor_start(struct device *dev)
268{ 271{
269 struct acpi_device *device = ACPI_COMPANION(dev); 272 struct acpi_device *device = ACPI_COMPANION(dev);
273 int ret;
270 274
271 if (!device) 275 if (!device)
272 return -ENODEV; 276 return -ENODEV;
273 277
274 return __acpi_processor_start(device); 278 /* Protect against concurrent CPU hotplug operations */
279 get_online_cpus();
280 ret = __acpi_processor_start(device);
281 put_online_cpus();
282 return ret;
275} 283}
276 284
277static int acpi_processor_stop(struct device *dev) 285static int acpi_processor_stop(struct device *dev)
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index bb01dea39fdc..9825780a1cd2 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -161,7 +161,7 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
161{ 161{
162 int ret; 162 int ret;
163 163
164 if (ignore_ppc) { 164 if (ignore_ppc || !pr->performance) {
165 /* 165 /*
166 * Only when it is notification event, the _OST object 166 * Only when it is notification event, the _OST object
167 * will be evaluated. Otherwise it is skipped. 167 * will be evaluated. Otherwise it is skipped.
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index f170d746336d..93d72413d844 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg {
62#define THROTTLING_POSTCHANGE (2) 62#define THROTTLING_POSTCHANGE (2)
63 63
64static int acpi_processor_get_throttling(struct acpi_processor *pr); 64static int acpi_processor_get_throttling(struct acpi_processor *pr);
65int acpi_processor_set_throttling(struct acpi_processor *pr, 65static int __acpi_processor_set_throttling(struct acpi_processor *pr,
66 int state, bool force); 66 int state, bool force, bool direct);
67 67
68static int acpi_processor_update_tsd_coord(void) 68static int acpi_processor_update_tsd_coord(void)
69{ 69{
@@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
676 if (!pr->flags.throttling) 676 if (!pr->flags.throttling)
677 return -ENODEV; 677 return -ENODEV;
678 678
679 /*
680 * We don't care about error returns - we just try to mark
681 * these reserved so that nobody else is confused into thinking
682 * that this region might be unused..
683 *
684 * (In particular, allocating the IO range for Cardbus)
685 */
686 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
687
679 pr->throttling.state = 0; 688 pr->throttling.state = 0;
680 689
681 duty_mask = pr->throttling.state_count - 1; 690 duty_mask = pr->throttling.state_count - 1;
@@ -882,7 +891,8 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
882 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 891 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
883 "Invalid throttling state, reset\n")); 892 "Invalid throttling state, reset\n"));
884 state = 0; 893 state = 0;
885 ret = acpi_processor_set_throttling(pr, state, true); 894 ret = __acpi_processor_set_throttling(pr, state, true,
895 true);
886 if (ret) 896 if (ret)
887 return ret; 897 return ret;
888 } 898 }
@@ -892,36 +902,31 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
892 return 0; 902 return 0;
893} 903}
894 904
895static int acpi_processor_get_throttling(struct acpi_processor *pr) 905static long __acpi_processor_get_throttling(void *data)
896{ 906{
897 cpumask_var_t saved_mask; 907 struct acpi_processor *pr = data;
898 int ret;
899 908
909 return pr->throttling.acpi_processor_get_throttling(pr);
910}
911
912static int acpi_processor_get_throttling(struct acpi_processor *pr)
913{
900 if (!pr) 914 if (!pr)
901 return -EINVAL; 915 return -EINVAL;
902 916
903 if (!pr->flags.throttling) 917 if (!pr->flags.throttling)
904 return -ENODEV; 918 return -ENODEV;
905 919
906 if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
907 return -ENOMEM;
908
909 /* 920 /*
910 * Migrate task to the cpu pointed by pr. 921 * This is either called from the CPU hotplug callback of
922 * processor_driver or via the ACPI probe function. In the latter
923 * case the CPU is not guaranteed to be online. Both call sites are
924 * protected against CPU hotplug.
911 */ 925 */
912 cpumask_copy(saved_mask, &current->cpus_allowed); 926 if (!cpu_online(pr->id))
913 /* FIXME: use work_on_cpu() */
914 if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
915 /* Can't migrate to the target pr->id CPU. Exit */
916 free_cpumask_var(saved_mask);
917 return -ENODEV; 927 return -ENODEV;
918 }
919 ret = pr->throttling.acpi_processor_get_throttling(pr);
920 /* restore the previous state */
921 set_cpus_allowed_ptr(current, saved_mask);
922 free_cpumask_var(saved_mask);
923 928
924 return ret; 929 return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr);
925} 930}
926 931
927static int acpi_processor_get_fadt_info(struct acpi_processor *pr) 932static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
@@ -1071,8 +1076,15 @@ static long acpi_processor_throttling_fn(void *data)
1071 arg->target_state, arg->force); 1076 arg->target_state, arg->force);
1072} 1077}
1073 1078
1074int acpi_processor_set_throttling(struct acpi_processor *pr, 1079static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
1075 int state, bool force) 1080{
1081 if (direct)
1082 return fn(arg);
1083 return work_on_cpu(cpu, fn, arg);
1084}
1085
1086static int __acpi_processor_set_throttling(struct acpi_processor *pr,
1087 int state, bool force, bool direct)
1076{ 1088{
1077 int ret = 0; 1089 int ret = 0;
1078 unsigned int i; 1090 unsigned int i;
@@ -1121,7 +1133,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
1121 arg.pr = pr; 1133 arg.pr = pr;
1122 arg.target_state = state; 1134 arg.target_state = state;
1123 arg.force = force; 1135 arg.force = force;
1124 ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); 1136 ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
1137 direct);
1125 } else { 1138 } else {
1126 /* 1139 /*
1127 * When the T-state coordination is SW_ALL or HW_ALL, 1140 * When the T-state coordination is SW_ALL or HW_ALL,
@@ -1154,8 +1167,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
1154 arg.pr = match_pr; 1167 arg.pr = match_pr;
1155 arg.target_state = state; 1168 arg.target_state = state;
1156 arg.force = force; 1169 arg.force = force;
1157 ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, 1170 ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
1158 &arg); 1171 &arg, direct);
1159 } 1172 }
1160 } 1173 }
1161 /* 1174 /*
@@ -1173,6 +1186,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
1173 return ret; 1186 return ret;
1174} 1187}
1175 1188
1189int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
1190 bool force)
1191{
1192 return __acpi_processor_set_throttling(pr, state, force, false);
1193}
1194
1176int acpi_processor_get_throttling_info(struct acpi_processor *pr) 1195int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1177{ 1196{
1178 int result = 0; 1197 int result = 0;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 2fa8304171e0..7a3431018e0a 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -275,8 +275,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
275 device->driver_data = hc; 275 device->driver_data = hc;
276 276
277 acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc); 277 acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc);
278 printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n", 278 dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n",
279 hc->ec, hc->offset, hc->query_bit); 279 hc->offset, hc->query_bit);
280 280
281 return 0; 281 return 0;
282} 282}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index b48ecbfc4498..8c5503c0bad7 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -206,6 +206,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
206 }, 206 },
207 }, 207 },
208 { 208 {
209 /* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
210 .callback = video_detect_force_video,
211 .ident = "SAMSUNG 670Z5E",
212 .matches = {
213 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
214 DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
215 },
216 },
217 {
209 /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */ 218 /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
210 .callback = video_detect_force_video, 219 .callback = video_detect_force_video,
211 .ident = "SAMSUNG 730U3E/740U3E", 220 .ident = "SAMSUNG 730U3E/740U3E",
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index f0099360039e..1accc01fb0ca 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -68,11 +68,12 @@ static ssize_t driver_override_show(struct device *_dev,
68 struct device_attribute *attr, char *buf) 68 struct device_attribute *attr, char *buf)
69{ 69{
70 struct amba_device *dev = to_amba_device(_dev); 70 struct amba_device *dev = to_amba_device(_dev);
71 ssize_t len;
71 72
72 if (!dev->driver_override) 73 device_lock(_dev);
73 return 0; 74 len = sprintf(buf, "%s\n", dev->driver_override);
74 75 device_unlock(_dev);
75 return sprintf(buf, "%s\n", dev->driver_override); 76 return len;
76} 77}
77 78
78static ssize_t driver_override_store(struct device *_dev, 79static ssize_t driver_override_store(struct device *_dev,
@@ -80,9 +81,10 @@ static ssize_t driver_override_store(struct device *_dev,
80 const char *buf, size_t count) 81 const char *buf, size_t count)
81{ 82{
82 struct amba_device *dev = to_amba_device(_dev); 83 struct amba_device *dev = to_amba_device(_dev);
83 char *driver_override, *old = dev->driver_override, *cp; 84 char *driver_override, *old, *cp;
84 85
85 if (count > PATH_MAX) 86 /* We need to keep extra room for a newline */
87 if (count >= (PAGE_SIZE - 1))
86 return -EINVAL; 88 return -EINVAL;
87 89
88 driver_override = kstrndup(buf, count, GFP_KERNEL); 90 driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -93,12 +95,15 @@ static ssize_t driver_override_store(struct device *_dev,
93 if (cp) 95 if (cp)
94 *cp = '\0'; 96 *cp = '\0';
95 97
98 device_lock(_dev);
99 old = dev->driver_override;
96 if (strlen(driver_override)) { 100 if (strlen(driver_override)) {
97 dev->driver_override = driver_override; 101 dev->driver_override = driver_override;
98 } else { 102 } else {
99 kfree(driver_override); 103 kfree(driver_override);
100 dev->driver_override = NULL; 104 dev->driver_override = NULL;
101 } 105 }
106 device_unlock(_dev);
102 107
103 kfree(old); 108 kfree(old);
104 109
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 5531f020e561..260ce0e60187 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2622,6 +2622,10 @@ static unsigned int binder_poll(struct file *filp,
2622 binder_lock(__func__); 2622 binder_lock(__func__);
2623 2623
2624 thread = binder_get_thread(proc); 2624 thread = binder_get_thread(proc);
2625 if (!thread) {
2626 binder_unlock(__func__);
2627 return POLLERR;
2628 }
2625 2629
2626 wait_for_proc_work = thread->transaction_stack == NULL && 2630 wait_for_proc_work = thread->transaction_stack == NULL &&
2627 list_empty(&thread->todo) && thread->return_error == BR_OK; 2631 list_empty(&thread->todo) && thread->return_error == BR_OK;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 60a15831c009..34fdaa6e99ba 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -260,9 +260,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
260 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ 260 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
261 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ 261 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
262 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ 262 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
263 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ 263 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH M AHCI */
264 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ 264 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
265 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ 265 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH M RAID */
266 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ 266 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
267 { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ 267 { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
268 { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ 268 { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
@@ -285,9 +285,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
285 { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ 285 { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
286 { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ 286 { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
287 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ 287 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
288 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ 288 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT M AHCI */
289 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ 289 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
290 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ 290 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT M RAID */
291 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ 291 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
292 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ 292 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
293 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ 293 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
@@ -296,20 +296,20 @@ static const struct pci_device_id ahci_pci_tbl[] = {
296 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ 296 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
297 { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ 297 { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
298 { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */ 298 { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
299 { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */ 299 { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point M AHCI */
300 { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */ 300 { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
301 { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */ 301 { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
302 { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */ 302 { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
303 { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */ 303 { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point M RAID */
304 { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */ 304 { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
305 { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */ 305 { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
306 { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */ 306 { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point M AHCI */
307 { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */ 307 { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
308 { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */ 308 { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point M RAID */
309 { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */ 309 { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
310 { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */ 310 { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point M RAID */
311 { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ 311 { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
312 { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ 312 { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point M RAID */
313 { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */ 313 { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
314 { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */ 314 { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
315 { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */ 315 { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
@@ -350,21 +350,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
350 { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */ 350 { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
351 { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */ 351 { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
352 { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */ 352 { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
353 { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */ 353 { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series M AHCI */
354 { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */ 354 { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
355 { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */ 355 { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series M RAID */
356 { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */ 356 { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
357 { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ 357 { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series M RAID */
358 { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ 358 { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
359 { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ 359 { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series M RAID */
360 { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */ 360 { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
361 { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ 361 { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
362 { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ 362 { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
363 { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */ 363 { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
364 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ 364 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H M AHCI */
365 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ 365 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
366 { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */ 366 { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
367 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ 367 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H M RAID */
368 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ 368 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
369 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ 369 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
370 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/ 370 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
@@ -382,6 +382,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
382 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ 382 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
383 { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ 383 { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
384 { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ 384 { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
385 { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
386 { PCI_VDEVICE(INTEL, 0x0f22), board_ahci }, /* Bay Trail AHCI */
387 { PCI_VDEVICE(INTEL, 0x0f23), board_ahci }, /* Bay Trail AHCI */
388 { PCI_VDEVICE(INTEL, 0x22a3), board_ahci }, /* Cherry Trail AHCI */
389 { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci }, /* Apollo Lake AHCI */
385 390
386 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 391 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
387 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 392 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -533,7 +538,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
533 .driver_data = board_ahci_yes_fbs }, 538 .driver_data = board_ahci_yes_fbs },
534 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), 539 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
535 .driver_data = board_ahci_yes_fbs }, 540 .driver_data = board_ahci_yes_fbs },
536 { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), 541 { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
542 .driver_data = board_ahci_yes_fbs },
543 { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
537 .driver_data = board_ahci_yes_fbs }, 544 .driver_data = board_ahci_yes_fbs },
538 545
539 /* Promise */ 546 /* Promise */
@@ -1222,6 +1229,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
1222 return strcmp(buf, dmi->driver_data) < 0; 1229 return strcmp(buf, dmi->driver_data) < 0;
1223} 1230}
1224 1231
1232static bool ahci_broken_lpm(struct pci_dev *pdev)
1233{
1234 static const struct dmi_system_id sysids[] = {
1235 /* Various Lenovo 50 series have LPM issues with older BIOSen */
1236 {
1237 .matches = {
1238 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1239 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
1240 },
1241 .driver_data = "20180406", /* 1.31 */
1242 },
1243 {
1244 .matches = {
1245 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1246 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
1247 },
1248 .driver_data = "20180420", /* 1.28 */
1249 },
1250 {
1251 .matches = {
1252 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1253 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
1254 },
1255 .driver_data = "20180315", /* 1.33 */
1256 },
1257 {
1258 .matches = {
1259 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1260 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
1261 },
1262 /*
1263 * Note date based on release notes, 2.35 has been
1264 * reported to be good, but I've been unable to get
1265 * a hold of the reporter to get the DMI BIOS date.
1266 * TODO: fix this.
1267 */
1268 .driver_data = "20180310", /* 2.35 */
1269 },
1270 { } /* terminate list */
1271 };
1272 const struct dmi_system_id *dmi = dmi_first_match(sysids);
1273 int year, month, date;
1274 char buf[9];
1275
1276 if (!dmi)
1277 return false;
1278
1279 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
1280 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
1281
1282 return strcmp(buf, dmi->driver_data) < 0;
1283}
1284
1225static bool ahci_broken_online(struct pci_dev *pdev) 1285static bool ahci_broken_online(struct pci_dev *pdev)
1226{ 1286{
1227#define ENCODE_BUSDEVFN(bus, slot, func) \ 1287#define ENCODE_BUSDEVFN(bus, slot, func) \
@@ -1581,6 +1641,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1581 "quirky BIOS, skipping spindown on poweroff\n"); 1641 "quirky BIOS, skipping spindown on poweroff\n");
1582 } 1642 }
1583 1643
1644 if (ahci_broken_lpm(pdev)) {
1645 pi.flags |= ATA_FLAG_NO_LPM;
1646 dev_warn(&pdev->dev,
1647 "BIOS update required for Link Power Management support\n");
1648 }
1649
1584 if (ahci_broken_suspend(pdev)) { 1650 if (ahci_broken_suspend(pdev)) {
1585 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; 1651 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
1586 dev_warn(&pdev->dev, 1652 dev_warn(&pdev->dev,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index aaa761b9081c..cd2eab6aa92e 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
514 514
515 irq = platform_get_irq(pdev, 0); 515 irq = platform_get_irq(pdev, 0);
516 if (irq <= 0) { 516 if (irq <= 0) {
517 dev_err(dev, "no irq\n"); 517 if (irq != -EPROBE_DEFER)
518 return -EINVAL; 518 dev_err(dev, "no irq\n");
519 return irq;
519 } 520 }
520 521
521 hpriv->irq = irq; 522 hpriv->irq = irq;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 69ec1c5d7152..ba514fa733de 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2209,6 +2209,9 @@ int ata_dev_configure(struct ata_device *dev)
2209 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) 2209 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2210 dev->horkage |= ATA_HORKAGE_NOLPM; 2210 dev->horkage |= ATA_HORKAGE_NOLPM;
2211 2211
2212 if (ap->flags & ATA_FLAG_NO_LPM)
2213 dev->horkage |= ATA_HORKAGE_NOLPM;
2214
2212 if (dev->horkage & ATA_HORKAGE_NOLPM) { 2215 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2213 ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); 2216 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2214 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; 2217 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
@@ -4187,6 +4190,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4187 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4190 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4188 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4191 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4189 4192
4193 /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
4194 SD7SN6S256G and SD8SN8U256G */
4195 { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
4196
4190 /* devices which puke on READ_NATIVE_MAX */ 4197 /* devices which puke on READ_NATIVE_MAX */
4191 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4198 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4192 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4199 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
@@ -4224,7 +4231,28 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4224 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4231 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4225 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4232 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4226 4233
4234 /* Crucial BX100 SSD 500GB has broken LPM support */
4235 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
4236
4237 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4238 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4239 ATA_HORKAGE_ZERO_AFTER_TRIM |
4240 ATA_HORKAGE_NOLPM, },
4241 /* 512GB MX100 with newer firmware has only LPM issues */
4242 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
4243 ATA_HORKAGE_NOLPM, },
4244
4245 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4246 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4247 ATA_HORKAGE_ZERO_AFTER_TRIM |
4248 ATA_HORKAGE_NOLPM, },
4249 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4250 ATA_HORKAGE_ZERO_AFTER_TRIM |
4251 ATA_HORKAGE_NOLPM, },
4252
4227 /* devices that don't properly handle queued TRIM commands */ 4253 /* devices that don't properly handle queued TRIM commands */
4254 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4255 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4228 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4256 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4229 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4257 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4230 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4258 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
@@ -4235,7 +4263,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4235 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4263 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4236 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4264 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4237 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4265 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4238 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4266 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4267 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4268 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4239 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4269 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4240 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4270 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4241 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4271 ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -5077,8 +5107,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5077 * We guarantee to LLDs that they will have at least one 5107 * We guarantee to LLDs that they will have at least one
5078 * non-zero sg if the command is a data command. 5108 * non-zero sg if the command is a data command.
5079 */ 5109 */
5080 if (WARN_ON_ONCE(ata_is_data(prot) && 5110 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
5081 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5082 goto sys_err; 5111 goto sys_err;
5083 5112
5084 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5113 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 75cced210b2a..7db76b5c7ada 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2198,12 +2198,16 @@ static void ata_eh_link_autopsy(struct ata_link *link)
2198 if (qc->err_mask & ~AC_ERR_OTHER) 2198 if (qc->err_mask & ~AC_ERR_OTHER)
2199 qc->err_mask &= ~AC_ERR_OTHER; 2199 qc->err_mask &= ~AC_ERR_OTHER;
2200 2200
2201 /* SENSE_VALID trumps dev/unknown error and revalidation */ 2201 /*
2202 * SENSE_VALID trumps dev/unknown error and revalidation. Upper
2203 * layers will determine whether the command is worth retrying
2204 * based on the sense data and device class/type. Otherwise,
2205 * determine directly if the command is worth retrying using its
2206 * error mask and flags.
2207 */
2202 if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2208 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2203 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2209 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2204 2210 else if (ata_eh_worth_retry(qc))
2205 /* determine whether the command is worth retrying */
2206 if (ata_eh_worth_retry(qc))
2207 qc->flags |= ATA_QCFLAG_RETRY; 2211 qc->flags |= ATA_QCFLAG_RETRY;
2208 2212
2209 /* accumulate error info */ 2213 /* accumulate error info */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 5b2aee83d776..4a267347a6d9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3472,7 +3472,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
3472 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { 3472 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
3473 /* relay SCSI command to ATAPI device */ 3473 /* relay SCSI command to ATAPI device */
3474 int len = COMMAND_SIZE(scsi_op); 3474 int len = COMMAND_SIZE(scsi_op);
3475 if (unlikely(len > scmd->cmd_len || len > dev->cdb_len)) 3475 if (unlikely(len > scmd->cmd_len ||
3476 len > dev->cdb_len ||
3477 scmd->cmd_len > ATAPI_CDB_LEN))
3476 goto bad_cdb_len; 3478 goto bad_cdb_len;
3477 3479
3478 xlat_func = atapi_xlat; 3480 xlat_func = atapi_xlat;
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index f3a65a3140d3..0ad96c647541 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -34,7 +34,7 @@ struct zpodd {
34static int eject_tray(struct ata_device *dev) 34static int eject_tray(struct ata_device *dev)
35{ 35{
36 struct ata_taskfile tf; 36 struct ata_taskfile tf;
37 const char cdb[] = { GPCMD_START_STOP_UNIT, 37 static const char cdb[ATAPI_CDB_LEN] = { GPCMD_START_STOP_UNIT,
38 0, 0, 0, 38 0, 0, 0,
39 0x02, /* LoEj */ 39 0x02, /* LoEj */
40 0, 0, 0, 0, 0, 0, 0, 40 0, 0, 0, 0, 0, 0, 0,
@@ -55,7 +55,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
55 unsigned int ret; 55 unsigned int ret;
56 struct rm_feature_desc *desc = (void *)(buf + 8); 56 struct rm_feature_desc *desc = (void *)(buf + 8);
57 struct ata_taskfile tf; 57 struct ata_taskfile tf;
58 char cdb[] = { GPCMD_GET_CONFIGURATION, 58 static const char cdb[] = { GPCMD_GET_CONFIGURATION,
59 2, /* only 1 feature descriptor requested */ 59 2, /* only 1 feature descriptor requested */
60 0, 3, /* 3, removable medium feature */ 60 0, 3, /* 3, removable medium feature */
61 0, 0, 0,/* reserved */ 61 0, 0, 0,/* reserved */
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index cecfb943762f..94712e1c5cf9 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -23,6 +23,7 @@
23#include <linux/bitops.h> 23#include <linux/bitops.h>
24#include <linux/wait.h> 24#include <linux/wait.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/nospec.h>
26#include <asm/byteorder.h> 27#include <asm/byteorder.h>
27#include <asm/string.h> 28#include <asm/string.h>
28#include <asm/io.h> 29#include <asm/io.h>
@@ -1148,8 +1149,8 @@ static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
1148} 1149}
1149 1150
1150 1151
1151static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd, 1152static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
1152 int offset, int swap) 1153 int swap)
1153{ 1154{
1154 unsigned char buf[ZEPROM_SIZE]; 1155 unsigned char buf[ZEPROM_SIZE];
1155 struct zatm_dev *zatm_dev; 1156 struct zatm_dev *zatm_dev;
@@ -1456,6 +1457,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
1456 return -EFAULT; 1457 return -EFAULT;
1457 if (pool < 0 || pool > ZATM_LAST_POOL) 1458 if (pool < 0 || pool > ZATM_LAST_POOL)
1458 return -EINVAL; 1459 return -EINVAL;
1460 pool = array_index_nospec(pool,
1461 ZATM_LAST_POOL + 1);
1459 spin_lock_irqsave(&zatm_dev->lock, flags); 1462 spin_lock_irqsave(&zatm_dev->lock, flags);
1460 info = zatm_dev->pool_info[pool]; 1463 info = zatm_dev->pool_info[pool];
1461 if (cmd == ZATM_GETPOOLZ) { 1464 if (cmd == ZATM_GETPOOLZ) {
@@ -1478,6 +1481,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
1478 return -EFAULT; 1481 return -EFAULT;
1479 if (pool < 0 || pool > ZATM_LAST_POOL) 1482 if (pool < 0 || pool > ZATM_LAST_POOL)
1480 return -EINVAL; 1483 return -EINVAL;
1484 pool = array_index_nospec(pool,
1485 ZATM_LAST_POOL + 1);
1481 if (copy_from_user(&info, 1486 if (copy_from_user(&info,
1482 &((struct zatm_pool_req __user *) arg)->info, 1487 &((struct zatm_pool_req __user *) arg)->info,
1483 sizeof(info))) return -EFAULT; 1488 sizeof(info))) return -EFAULT;
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index e9fd32e91668..70e13cf06ed0 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -16,6 +16,7 @@
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19#include <linux/acpi.h>
19#include <linux/bitops.h> 20#include <linux/bitops.h>
20#include <linux/cacheinfo.h> 21#include <linux/cacheinfo.h>
21#include <linux/compiler.h> 22#include <linux/compiler.h>
@@ -104,9 +105,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
104 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 105 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
105 struct cacheinfo *this_leaf, *sib_leaf; 106 struct cacheinfo *this_leaf, *sib_leaf;
106 unsigned int index; 107 unsigned int index;
107 int ret; 108 int ret = 0;
109
110 if (this_cpu_ci->cpu_map_populated)
111 return 0;
108 112
109 ret = cache_setup_of_node(cpu); 113 if (of_have_populated_dt())
114 ret = cache_setup_of_node(cpu);
115 else if (!acpi_disabled)
116 /* No cache property/hierarchy support yet in ACPI */
117 ret = -ENOTSUPP;
110 if (ret) 118 if (ret)
111 return ret; 119 return ret;
112 120
@@ -203,8 +211,7 @@ static int detect_cache_attributes(unsigned int cpu)
203 */ 211 */
204 ret = cache_shared_cpu_map_setup(cpu); 212 ret = cache_shared_cpu_map_setup(cpu);
205 if (ret) { 213 if (ret) {
206 pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n", 214 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
207 cpu);
208 goto free_ci; 215 goto free_ci;
209 } 216 }
210 return 0; 217 return 0;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index afe045792796..049ccc070ce5 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -759,7 +759,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
759 759
760 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 760 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
761 if (!dir) 761 if (!dir)
762 return NULL; 762 return ERR_PTR(-ENOMEM);
763 763
764 dir->class = class; 764 dir->class = class;
765 kobject_init(&dir->kobj, &class_dir_ktype); 765 kobject_init(&dir->kobj, &class_dir_ktype);
@@ -769,7 +769,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
769 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); 769 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
770 if (retval < 0) { 770 if (retval < 0) {
771 kobject_put(&dir->kobj); 771 kobject_put(&dir->kobj);
772 return NULL; 772 return ERR_PTR(retval);
773 } 773 }
774 return &dir->kobj; 774 return &dir->kobj;
775} 775}
@@ -1076,6 +1076,10 @@ int device_add(struct device *dev)
1076 1076
1077 parent = get_device(dev->parent); 1077 parent = get_device(dev->parent);
1078 kobj = get_device_parent(dev, parent); 1078 kobj = get_device_parent(dev, parent);
1079 if (IS_ERR(kobj)) {
1080 error = PTR_ERR(kobj);
1081 goto parent_error;
1082 }
1079 if (kobj) 1083 if (kobj)
1080 dev->kobj.parent = kobj; 1084 dev->kobj.parent = kobj;
1081 1085
@@ -1174,6 +1178,7 @@ done:
1174 kobject_del(&dev->kobj); 1178 kobject_del(&dev->kobj);
1175 Error: 1179 Error:
1176 cleanup_glue_dir(dev, glue_dir); 1180 cleanup_glue_dir(dev, glue_dir);
1181parent_error:
1177 put_device(parent); 1182 put_device(parent);
1178name_error: 1183name_error:
1179 kfree(dev->p); 1184 kfree(dev->p);
@@ -1990,6 +1995,11 @@ int device_move(struct device *dev, struct device *new_parent,
1990 device_pm_lock(); 1995 device_pm_lock();
1991 new_parent = get_device(new_parent); 1996 new_parent = get_device(new_parent);
1992 new_parent_kobj = get_device_parent(dev, new_parent); 1997 new_parent_kobj = get_device_parent(dev, new_parent);
1998 if (IS_ERR(new_parent_kobj)) {
1999 error = PTR_ERR(new_parent_kobj);
2000 put_device(new_parent);
2001 goto out;
2002 }
1993 2003
1994 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), 2004 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
1995 __func__, new_parent ? dev_name(new_parent) : "<NULL>"); 2005 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 3db71afbba93..41090ef5facb 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -518,14 +518,30 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
518 return sprintf(buf, "Not affected\n"); 518 return sprintf(buf, "Not affected\n");
519} 519}
520 520
521ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
522 struct device_attribute *attr, char *buf)
523{
524 return sprintf(buf, "Not affected\n");
525}
526
527ssize_t __weak cpu_show_l1tf(struct device *dev,
528 struct device_attribute *attr, char *buf)
529{
530 return sprintf(buf, "Not affected\n");
531}
532
521static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); 533static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
522static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); 534static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
523static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); 535static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
536static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
537static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
524 538
525static struct attribute *cpu_root_vulnerabilities_attrs[] = { 539static struct attribute *cpu_root_vulnerabilities_attrs[] = {
526 &dev_attr_meltdown.attr, 540 &dev_attr_meltdown.attr,
527 &dev_attr_spectre_v1.attr, 541 &dev_attr_spectre_v1.attr,
528 &dev_attr_spectre_v2.attr, 542 &dev_attr_spectre_v2.attr,
543 &dev_attr_spec_store_bypass.attr,
544 &dev_attr_l1tf.attr,
529 NULL 545 NULL
530}; 546};
531 547
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index a641cf3ccad6..1dffb018a7fe 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -304,14 +304,6 @@ static int really_probe(struct device *dev, struct device_driver *drv)
304 goto probe_failed; 304 goto probe_failed;
305 } 305 }
306 306
307 /*
308 * Ensure devices are listed in devices_kset in correct order
309 * It's important to move Dev to the end of devices_kset before
310 * calling .probe, because it could be recursive and parent Dev
311 * should always go first
312 */
313 devices_kset_move_last(dev);
314
315 if (dev->bus->probe) { 307 if (dev->bus->probe) {
316 ret = dev->bus->probe(dev); 308 ret = dev->bus->probe(dev);
317 if (ret) 309 if (ret)
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index a311cfa4c5bd..a6975795e7f3 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -166,14 +166,14 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
166} 166}
167EXPORT_SYMBOL(generate_pm_trace); 167EXPORT_SYMBOL(generate_pm_trace);
168 168
169extern char __tracedata_start, __tracedata_end; 169extern char __tracedata_start[], __tracedata_end[];
170static int show_file_hash(unsigned int value) 170static int show_file_hash(unsigned int value)
171{ 171{
172 int match; 172 int match;
173 char *tracedata; 173 char *tracedata;
174 174
175 match = 0; 175 match = 0;
176 for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ; 176 for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
177 tracedata += 2 + sizeof(unsigned long)) { 177 tracedata += 2 + sizeof(unsigned long)) {
178 unsigned short lineno = *(unsigned short *)tracedata; 178 unsigned short lineno = *(unsigned short *)tracedata;
179 const char *file = *(const char **)(tracedata + 2); 179 const char *file = *(const char **)(tracedata + 2);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 4ac63c0e50c7..fd377b956199 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1582,7 +1582,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
1582 return -EINVAL; 1582 return -EINVAL;
1583 if (val_len % map->format.val_bytes) 1583 if (val_len % map->format.val_bytes)
1584 return -EINVAL; 1584 return -EINVAL;
1585 if (map->max_raw_write && map->max_raw_write > val_len) 1585 if (map->max_raw_write && map->max_raw_write < val_len)
1586 return -E2BIG; 1586 return -E2BIG;
1587 1587
1588 map->lock(map->lock_arg); 1588 map->lock(map->lock_arg);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 5578c1477ba6..8bfd4fd7e9ec 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -256,8 +256,8 @@ void drbd_request_endio(struct bio *bio)
256 } else 256 } else
257 what = COMPLETED_OK; 257 what = COMPLETED_OK;
258 258
259 bio_put(req->private_bio);
260 req->private_bio = ERR_PTR(bio->bi_error); 259 req->private_bio = ERR_PTR(bio->bi_error);
260 bio_put(bio);
261 261
262 /* not req_mod(), we need irqsave here! */ 262 /* not req_mod(), we need irqsave here! */
263 spin_lock_irqsave(&device->resource->req_lock, flags); 263 spin_lock_irqsave(&device->resource->req_lock, flags);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cec36d5c24f5..da3902ac16c8 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -263,7 +263,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
263 struct iov_iter i; 263 struct iov_iter i;
264 ssize_t bw; 264 ssize_t bw;
265 265
266 iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); 266 iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
267 267
268 file_start_write(file); 268 file_start_write(file);
269 bw = vfs_iter_write(file, &i, ppos); 269 bw = vfs_iter_write(file, &i, ppos);
@@ -623,6 +623,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
623 */ 623 */
624static int loop_flush(struct loop_device *lo) 624static int loop_flush(struct loop_device *lo)
625{ 625{
626 /* loop not yet configured, no running thread, nothing to flush */
627 if (lo->lo_state != Lo_bound)
628 return 0;
626 return loop_switch(lo, NULL); 629 return loop_switch(lo, NULL);
627} 630}
628 631
@@ -648,6 +651,36 @@ static void loop_reread_partitions(struct loop_device *lo,
648 __func__, lo->lo_number, lo->lo_file_name, rc); 651 __func__, lo->lo_number, lo->lo_file_name, rc);
649} 652}
650 653
654static inline int is_loop_device(struct file *file)
655{
656 struct inode *i = file->f_mapping->host;
657
658 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
659}
660
661static int loop_validate_file(struct file *file, struct block_device *bdev)
662{
663 struct inode *inode = file->f_mapping->host;
664 struct file *f = file;
665
666 /* Avoid recursion */
667 while (is_loop_device(f)) {
668 struct loop_device *l;
669
670 if (f->f_mapping->host->i_bdev == bdev)
671 return -EBADF;
672
673 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
674 if (l->lo_state == Lo_unbound) {
675 return -EINVAL;
676 }
677 f = l->lo_backing_file;
678 }
679 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
680 return -EINVAL;
681 return 0;
682}
683
651/* 684/*
652 * loop_change_fd switched the backing store of a loopback device to 685 * loop_change_fd switched the backing store of a loopback device to
653 * a new file. This is useful for operating system installers to free up 686 * a new file. This is useful for operating system installers to free up
@@ -677,14 +710,15 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
677 if (!file) 710 if (!file)
678 goto out; 711 goto out;
679 712
713 error = loop_validate_file(file, bdev);
714 if (error)
715 goto out_putf;
716
680 inode = file->f_mapping->host; 717 inode = file->f_mapping->host;
681 old_file = lo->lo_backing_file; 718 old_file = lo->lo_backing_file;
682 719
683 error = -EINVAL; 720 error = -EINVAL;
684 721
685 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
686 goto out_putf;
687
688 /* size of the new backing store needs to be the same */ 722 /* size of the new backing store needs to be the same */
689 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 723 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
690 goto out_putf; 724 goto out_putf;
@@ -705,13 +739,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
705 return error; 739 return error;
706} 740}
707 741
708static inline int is_loop_device(struct file *file)
709{
710 struct inode *i = file->f_mapping->host;
711
712 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
713}
714
715/* loop sysfs attributes */ 742/* loop sysfs attributes */
716 743
717static ssize_t loop_attr_show(struct device *dev, char *page, 744static ssize_t loop_attr_show(struct device *dev, char *page,
@@ -808,16 +835,17 @@ static struct attribute_group loop_attribute_group = {
808 .attrs= loop_attrs, 835 .attrs= loop_attrs,
809}; 836};
810 837
811static int loop_sysfs_init(struct loop_device *lo) 838static void loop_sysfs_init(struct loop_device *lo)
812{ 839{
813 return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, 840 lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
814 &loop_attribute_group); 841 &loop_attribute_group);
815} 842}
816 843
817static void loop_sysfs_exit(struct loop_device *lo) 844static void loop_sysfs_exit(struct loop_device *lo)
818{ 845{
819 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, 846 if (lo->sysfs_inited)
820 &loop_attribute_group); 847 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
848 &loop_attribute_group);
821} 849}
822 850
823static void loop_config_discard(struct loop_device *lo) 851static void loop_config_discard(struct loop_device *lo)
@@ -869,7 +897,7 @@ static int loop_prepare_queue(struct loop_device *lo)
869static int loop_set_fd(struct loop_device *lo, fmode_t mode, 897static int loop_set_fd(struct loop_device *lo, fmode_t mode,
870 struct block_device *bdev, unsigned int arg) 898 struct block_device *bdev, unsigned int arg)
871{ 899{
872 struct file *file, *f; 900 struct file *file;
873 struct inode *inode; 901 struct inode *inode;
874 struct address_space *mapping; 902 struct address_space *mapping;
875 unsigned lo_blocksize; 903 unsigned lo_blocksize;
@@ -889,29 +917,13 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
889 if (lo->lo_state != Lo_unbound) 917 if (lo->lo_state != Lo_unbound)
890 goto out_putf; 918 goto out_putf;
891 919
892 /* Avoid recursion */ 920 error = loop_validate_file(file, bdev);
893 f = file; 921 if (error)
894 while (is_loop_device(f)) { 922 goto out_putf;
895 struct loop_device *l;
896
897 if (f->f_mapping->host->i_bdev == bdev)
898 goto out_putf;
899
900 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
901 if (l->lo_state == Lo_unbound) {
902 error = -EINVAL;
903 goto out_putf;
904 }
905 f = l->lo_backing_file;
906 }
907 923
908 mapping = file->f_mapping; 924 mapping = file->f_mapping;
909 inode = mapping->host; 925 inode = mapping->host;
910 926
911 error = -EINVAL;
912 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
913 goto out_putf;
914
915 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || 927 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
916 !file->f_op->write_iter) 928 !file->f_op->write_iter)
917 lo_flags |= LO_FLAGS_READ_ONLY; 929 lo_flags |= LO_FLAGS_READ_ONLY;
@@ -1118,11 +1130,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1118 if (info->lo_encrypt_type) { 1130 if (info->lo_encrypt_type) {
1119 unsigned int type = info->lo_encrypt_type; 1131 unsigned int type = info->lo_encrypt_type;
1120 1132
1121 if (type >= MAX_LO_CRYPT) 1133 if (type >= MAX_LO_CRYPT) {
1122 return -EINVAL; 1134 err = -EINVAL;
1135 goto exit;
1136 }
1123 xfer = xfer_funcs[type]; 1137 xfer = xfer_funcs[type];
1124 if (xfer == NULL) 1138 if (xfer == NULL) {
1125 return -EINVAL; 1139 err = -EINVAL;
1140 goto exit;
1141 }
1126 } else 1142 } else
1127 xfer = NULL; 1143 xfer = NULL;
1128 1144
@@ -1569,9 +1585,8 @@ out:
1569 return err; 1585 return err;
1570} 1586}
1571 1587
1572static void lo_release(struct gendisk *disk, fmode_t mode) 1588static void __lo_release(struct loop_device *lo)
1573{ 1589{
1574 struct loop_device *lo = disk->private_data;
1575 int err; 1590 int err;
1576 1591
1577 if (atomic_dec_return(&lo->lo_refcnt)) 1592 if (atomic_dec_return(&lo->lo_refcnt))
@@ -1597,6 +1612,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
1597 mutex_unlock(&lo->lo_ctl_mutex); 1612 mutex_unlock(&lo->lo_ctl_mutex);
1598} 1613}
1599 1614
1615static void lo_release(struct gendisk *disk, fmode_t mode)
1616{
1617 mutex_lock(&loop_index_mutex);
1618 __lo_release(disk->private_data);
1619 mutex_unlock(&loop_index_mutex);
1620}
1621
1600static const struct block_device_operations lo_fops = { 1622static const struct block_device_operations lo_fops = {
1601 .owner = THIS_MODULE, 1623 .owner = THIS_MODULE,
1602 .open = lo_open, 1624 .open = lo_open,
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index fb2237c73e61..60f0fd2c0c65 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -59,6 +59,7 @@ struct loop_device {
59 struct kthread_worker worker; 59 struct kthread_worker worker;
60 struct task_struct *worker_task; 60 struct task_struct *worker_task;
61 bool use_dio; 61 bool use_dio;
62 bool sysfs_inited;
62 63
63 struct request_queue *lo_queue; 64 struct request_queue *lo_queue;
64 struct blk_mq_tag_set tag_set; 65 struct blk_mq_tag_set tag_set;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 93362362aa55..8474a1b0740f 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
230 struct pcd_unit *cd = bdev->bd_disk->private_data; 230 struct pcd_unit *cd = bdev->bd_disk->private_data;
231 int ret; 231 int ret;
232 232
233 check_disk_change(bdev);
234
233 mutex_lock(&pcd_mutex); 235 mutex_lock(&pcd_mutex);
234 ret = cdrom_open(&cd->info, bdev, mode); 236 ret = cdrom_open(&cd->info, bdev, mode);
235 mutex_unlock(&pcd_mutex); 237 mutex_unlock(&pcd_mutex);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d06c62eccdf0..156968a6655d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2779,7 +2779,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2779 pd->pkt_dev = MKDEV(pktdev_major, idx); 2779 pd->pkt_dev = MKDEV(pktdev_major, idx);
2780 ret = pkt_new_dev(pd, dev); 2780 ret = pkt_new_dev(pd, dev);
2781 if (ret) 2781 if (ret)
2782 goto out_new_dev; 2782 goto out_mem2;
2783 2783
2784 /* inherit events of the host device */ 2784 /* inherit events of the host device */
2785 disk->events = pd->bdev->bd_disk->events; 2785 disk->events = pd->bdev->bd_disk->events;
@@ -2797,8 +2797,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2797 mutex_unlock(&ctl_mutex); 2797 mutex_unlock(&ctl_mutex);
2798 return 0; 2798 return 0;
2799 2799
2800out_new_dev:
2801 blk_cleanup_queue(disk->queue);
2802out_mem2: 2800out_mem2:
2803 put_disk(disk); 2801 put_disk(disk);
2804out_mem: 2802out_mem:
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 7b624423a7e8..89ccb604045c 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -31,6 +31,7 @@
31#include <linux/errno.h> 31#include <linux/errno.h>
32#include <linux/skbuff.h> 32#include <linux/skbuff.h>
33 33
34#include <linux/mmc/host.h>
34#include <linux/mmc/sdio_ids.h> 35#include <linux/mmc/sdio_ids.h>
35#include <linux/mmc/sdio_func.h> 36#include <linux/mmc/sdio_func.h>
36 37
@@ -291,6 +292,14 @@ static int btsdio_probe(struct sdio_func *func,
291 tuple = tuple->next; 292 tuple = tuple->next;
292 } 293 }
293 294
295 /* BCM43341 devices soldered onto the PCB (non-removable) use an
296 * uart connection for bluetooth, ignore the BT SDIO interface.
297 */
298 if (func->vendor == SDIO_VENDOR_ID_BROADCOM &&
299 func->device == SDIO_DEVICE_ID_BROADCOM_43341 &&
300 !mmc_card_is_removable(func->card->host))
301 return -ENODEV;
302
294 data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL); 303 data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
295 if (!data) 304 if (!data)
296 return -ENOMEM; 305 return -ENOMEM;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 1ccad79ce77c..4a899b41145e 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/usb.h> 25#include <linux/usb.h>
26#include <linux/usb/quirks.h>
26#include <linux/firmware.h> 27#include <linux/firmware.h>
27#include <asm/unaligned.h> 28#include <asm/unaligned.h>
28 29
@@ -335,6 +336,12 @@ static const struct usb_device_id blacklist_table[] = {
335 { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK }, 336 { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
336 { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK }, 337 { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
337 338
339 /* Additional Realtek 8723BU Bluetooth devices */
340 { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
341
342 /* Additional Realtek 8723DE Bluetooth devices */
343 { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
344
338 /* Additional Realtek 8821AE Bluetooth devices */ 345 /* Additional Realtek 8821AE Bluetooth devices */
339 { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, 346 { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
340 { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK }, 347 { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
@@ -342,6 +349,9 @@ static const struct usb_device_id blacklist_table[] = {
342 { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK }, 349 { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
343 { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK }, 350 { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
344 351
352 /* Additional Realtek 8822BE Bluetooth devices */
353 { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
354
345 /* Silicon Wave based devices */ 355 /* Silicon Wave based devices */
346 { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, 356 { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
347 357
@@ -360,8 +370,8 @@ static const struct usb_device_id blacklist_table[] = {
360#define BTUSB_FIRMWARE_LOADED 7 370#define BTUSB_FIRMWARE_LOADED 7
361#define BTUSB_FIRMWARE_FAILED 8 371#define BTUSB_FIRMWARE_FAILED 8
362#define BTUSB_BOOTING 9 372#define BTUSB_BOOTING 9
363#define BTUSB_RESET_RESUME 10 373#define BTUSB_DIAG_RUNNING 10
364#define BTUSB_DIAG_RUNNING 11 374#define BTUSB_OOB_WAKE_ENABLED 11
365 375
366struct btusb_data { 376struct btusb_data {
367 struct hci_dev *hdev; 377 struct hci_dev *hdev;
@@ -2972,9 +2982,9 @@ static int btusb_probe(struct usb_interface *intf,
2972 2982
2973 /* QCA Rome devices lose their updated firmware over suspend, 2983 /* QCA Rome devices lose their updated firmware over suspend,
2974 * but the USB hub doesn't notice any status change. 2984 * but the USB hub doesn't notice any status change.
2975 * Explicitly request a device reset on resume. 2985 * explicitly request a device reset on resume.
2976 */ 2986 */
2977 set_bit(BTUSB_RESET_RESUME, &data->flags); 2987 interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
2978 } 2988 }
2979 2989
2980#ifdef CONFIG_BT_HCIBTUSB_RTL 2990#ifdef CONFIG_BT_HCIBTUSB_RTL
@@ -2985,7 +2995,7 @@ static int btusb_probe(struct usb_interface *intf,
2985 * but the USB hub doesn't notice any status change. 2995 * but the USB hub doesn't notice any status change.
2986 * Explicitly request a device reset on resume. 2996 * Explicitly request a device reset on resume.
2987 */ 2997 */
2988 set_bit(BTUSB_RESET_RESUME, &data->flags); 2998 interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
2989 } 2999 }
2990#endif 3000#endif
2991 3001
@@ -3142,14 +3152,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
3142 btusb_stop_traffic(data); 3152 btusb_stop_traffic(data);
3143 usb_kill_anchored_urbs(&data->tx_anchor); 3153 usb_kill_anchored_urbs(&data->tx_anchor);
3144 3154
3145 /* Optionally request a device reset on resume, but only when
3146 * wakeups are disabled. If wakeups are enabled we assume the
3147 * device will stay powered up throughout suspend.
3148 */
3149 if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
3150 !device_may_wakeup(&data->udev->dev))
3151 data->udev->reset_resume = 1;
3152
3153 return 0; 3155 return 0;
3154} 3156}
3155 3157
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 71325e443e46..ecfb9ed2cff6 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -884,7 +884,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
884 */ 884 */
885 set_current_state(TASK_UNINTERRUPTIBLE); 885 set_current_state(TASK_UNINTERRUPTIBLE);
886 schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); 886 schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
887 set_current_state(TASK_INTERRUPTIBLE); 887 set_current_state(TASK_RUNNING);
888 888
889 return 0; 889 return 0;
890} 890}
@@ -936,6 +936,15 @@ static int qca_setup(struct hci_uart *hu)
936 if (!ret) { 936 if (!ret) {
937 set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); 937 set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
938 qca_debugfs_init(hdev); 938 qca_debugfs_init(hdev);
939 } else if (ret == -ENOENT) {
940 /* No patch/nvm-config found, run with original fw/config */
941 ret = 0;
942 } else if (ret == -EAGAIN) {
943 /*
944 * Userspace firmware loader will return -EAGAIN in case no
945 * patch/nvm-config is found, so run with original fw/config.
946 */
947 ret = 0;
939 } 948 }
940 949
941 /* Setup bdaddr */ 950 /* Setup bdaddr */
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index f364fa4d24eb..f59183018280 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2014 Broadcom Corporation 2 * Copyright (C) 2014-2017 Broadcom
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
@@ -33,8 +33,6 @@
33#define ARB_ERR_CAP_CLEAR (1 << 0) 33#define ARB_ERR_CAP_CLEAR (1 << 0)
34#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12) 34#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
35#define ARB_ERR_CAP_STATUS_TEA (1 << 11) 35#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
36#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2)
37#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c
38#define ARB_ERR_CAP_STATUS_WRITE (1 << 1) 36#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
39#define ARB_ERR_CAP_STATUS_VALID (1 << 0) 37#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
40 38
@@ -43,7 +41,6 @@ enum {
43 ARB_ERR_CAP_CLR, 41 ARB_ERR_CAP_CLR,
44 ARB_ERR_CAP_HI_ADDR, 42 ARB_ERR_CAP_HI_ADDR,
45 ARB_ERR_CAP_ADDR, 43 ARB_ERR_CAP_ADDR,
46 ARB_ERR_CAP_DATA,
47 ARB_ERR_CAP_STATUS, 44 ARB_ERR_CAP_STATUS,
48 ARB_ERR_CAP_MASTER, 45 ARB_ERR_CAP_MASTER,
49}; 46};
@@ -53,7 +50,6 @@ static const int gisb_offsets_bcm7038[] = {
53 [ARB_ERR_CAP_CLR] = 0x0c4, 50 [ARB_ERR_CAP_CLR] = 0x0c4,
54 [ARB_ERR_CAP_HI_ADDR] = -1, 51 [ARB_ERR_CAP_HI_ADDR] = -1,
55 [ARB_ERR_CAP_ADDR] = 0x0c8, 52 [ARB_ERR_CAP_ADDR] = 0x0c8,
56 [ARB_ERR_CAP_DATA] = 0x0cc,
57 [ARB_ERR_CAP_STATUS] = 0x0d0, 53 [ARB_ERR_CAP_STATUS] = 0x0d0,
58 [ARB_ERR_CAP_MASTER] = -1, 54 [ARB_ERR_CAP_MASTER] = -1,
59}; 55};
@@ -63,7 +59,6 @@ static const int gisb_offsets_bcm7400[] = {
63 [ARB_ERR_CAP_CLR] = 0x0c8, 59 [ARB_ERR_CAP_CLR] = 0x0c8,
64 [ARB_ERR_CAP_HI_ADDR] = -1, 60 [ARB_ERR_CAP_HI_ADDR] = -1,
65 [ARB_ERR_CAP_ADDR] = 0x0cc, 61 [ARB_ERR_CAP_ADDR] = 0x0cc,
66 [ARB_ERR_CAP_DATA] = 0x0d0,
67 [ARB_ERR_CAP_STATUS] = 0x0d4, 62 [ARB_ERR_CAP_STATUS] = 0x0d4,
68 [ARB_ERR_CAP_MASTER] = 0x0d8, 63 [ARB_ERR_CAP_MASTER] = 0x0d8,
69}; 64};
@@ -73,7 +68,6 @@ static const int gisb_offsets_bcm7435[] = {
73 [ARB_ERR_CAP_CLR] = 0x168, 68 [ARB_ERR_CAP_CLR] = 0x168,
74 [ARB_ERR_CAP_HI_ADDR] = -1, 69 [ARB_ERR_CAP_HI_ADDR] = -1,
75 [ARB_ERR_CAP_ADDR] = 0x16c, 70 [ARB_ERR_CAP_ADDR] = 0x16c,
76 [ARB_ERR_CAP_DATA] = 0x170,
77 [ARB_ERR_CAP_STATUS] = 0x174, 71 [ARB_ERR_CAP_STATUS] = 0x174,
78 [ARB_ERR_CAP_MASTER] = 0x178, 72 [ARB_ERR_CAP_MASTER] = 0x178,
79}; 73};
@@ -83,7 +77,6 @@ static const int gisb_offsets_bcm7445[] = {
83 [ARB_ERR_CAP_CLR] = 0x7e4, 77 [ARB_ERR_CAP_CLR] = 0x7e4,
84 [ARB_ERR_CAP_HI_ADDR] = 0x7e8, 78 [ARB_ERR_CAP_HI_ADDR] = 0x7e8,
85 [ARB_ERR_CAP_ADDR] = 0x7ec, 79 [ARB_ERR_CAP_ADDR] = 0x7ec,
86 [ARB_ERR_CAP_DATA] = 0x7f0,
87 [ARB_ERR_CAP_STATUS] = 0x7f4, 80 [ARB_ERR_CAP_STATUS] = 0x7f4,
88 [ARB_ERR_CAP_MASTER] = 0x7f8, 81 [ARB_ERR_CAP_MASTER] = 0x7f8,
89}; 82};
@@ -105,9 +98,13 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
105{ 98{
106 int offset = gdev->gisb_offsets[reg]; 99 int offset = gdev->gisb_offsets[reg];
107 100
108 /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */ 101 if (offset < 0) {
109 if (offset == -1) 102 /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
110 return 1; 103 if (reg == ARB_ERR_CAP_MASTER)
104 return 1;
105 else
106 return 0;
107 }
111 108
112 if (gdev->big_endian) 109 if (gdev->big_endian)
113 return ioread32be(gdev->base + offset); 110 return ioread32be(gdev->base + offset);
@@ -115,6 +112,16 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
115 return ioread32(gdev->base + offset); 112 return ioread32(gdev->base + offset);
116} 113}
117 114
115static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
116{
117 u64 value;
118
119 value = gisb_read(gdev, ARB_ERR_CAP_ADDR);
120 value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
121
122 return value;
123}
124
118static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg) 125static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
119{ 126{
120 int offset = gdev->gisb_offsets[reg]; 127 int offset = gdev->gisb_offsets[reg];
@@ -123,9 +130,9 @@ static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
123 return; 130 return;
124 131
125 if (gdev->big_endian) 132 if (gdev->big_endian)
126 iowrite32be(val, gdev->base + reg); 133 iowrite32be(val, gdev->base + offset);
127 else 134 else
128 iowrite32(val, gdev->base + reg); 135 iowrite32(val, gdev->base + offset);
129} 136}
130 137
131static ssize_t gisb_arb_get_timeout(struct device *dev, 138static ssize_t gisb_arb_get_timeout(struct device *dev,
@@ -181,7 +188,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
181 const char *reason) 188 const char *reason)
182{ 189{
183 u32 cap_status; 190 u32 cap_status;
184 unsigned long arb_addr; 191 u64 arb_addr;
185 u32 master; 192 u32 master;
186 const char *m_name; 193 const char *m_name;
187 char m_fmt[11]; 194 char m_fmt[11];
@@ -193,10 +200,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
193 return 1; 200 return 1;
194 201
195 /* Read the address and master */ 202 /* Read the address and master */
196 arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff; 203 arb_addr = gisb_read_address(gdev);
197#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
198 arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
199#endif
200 master = gisb_read(gdev, ARB_ERR_CAP_MASTER); 204 master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
201 205
202 m_name = brcmstb_gisb_master_to_str(gdev, master); 206 m_name = brcmstb_gisb_master_to_str(gdev, master);
@@ -205,7 +209,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
205 m_name = m_fmt; 209 m_name = m_fmt;
206 } 210 }
207 211
208 pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n", 212 pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n",
209 __func__, reason, arb_addr, 213 __func__, reason, arb_addr,
210 cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R', 214 cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
211 cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "", 215 cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index c206ccda899b..0151039bff05 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1154,9 +1154,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
1154 1154
1155 cd_dbg(CD_OPEN, "entering cdrom_open\n"); 1155 cd_dbg(CD_OPEN, "entering cdrom_open\n");
1156 1156
1157 /* open is event synchronization point, check events first */
1158 check_disk_change(bdev);
1159
1160 /* if this was a O_NONBLOCK open and we should honor the flags, 1157 /* if this was a O_NONBLOCK open and we should honor the flags,
1161 * do a quick open without drive/disc integrity checks. */ 1158 * do a quick open without drive/disc integrity checks. */
1162 cdi->use_count++; 1159 cdi->use_count++;
@@ -2358,7 +2355,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
2358 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) 2355 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
2359 return media_changed(cdi, 1); 2356 return media_changed(cdi, 1);
2360 2357
2361 if ((unsigned int)arg >= cdi->capacity) 2358 if (arg >= cdi->capacity)
2362 return -EINVAL; 2359 return -EINVAL;
2363 2360
2364 info = kmalloc(sizeof(*info), GFP_KERNEL); 2361 info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 584bc3126403..e2808fefbb78 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -497,6 +497,9 @@ static struct cdrom_device_ops gdrom_ops = {
497static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) 497static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
498{ 498{
499 int ret; 499 int ret;
500
501 check_disk_change(bdev);
502
500 mutex_lock(&gdrom_mutex); 503 mutex_lock(&gdrom_mutex);
501 ret = cdrom_open(gd.cd_info, bdev, mode); 504 ret = cdrom_open(gd.cd_info, bdev, mode);
502 mutex_unlock(&gdrom_mutex); 505 mutex_unlock(&gdrom_mutex);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 1341a94cc779..76afc841232c 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -859,6 +859,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
859 } 859 }
860 } 860 }
861 wmb(); 861 wmb();
862 if (intel_private.driver->chipset_flush)
863 intel_private.driver->chipset_flush();
862} 864}
863EXPORT_SYMBOL(intel_gtt_insert_sg_entries); 865EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
864 866
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 7845a38b6604..7ba0ae060d61 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -155,8 +155,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
155 return ret; 155 return ret;
156} 156}
157 157
158#ifdef CONFIG_PM 158static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev)
159static int exynos_rng_runtime_suspend(struct device *dev)
160{ 159{
161 struct platform_device *pdev = to_platform_device(dev); 160 struct platform_device *pdev = to_platform_device(dev);
162 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev); 161 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -166,7 +165,7 @@ static int exynos_rng_runtime_suspend(struct device *dev)
166 return 0; 165 return 0;
167} 166}
168 167
169static int exynos_rng_runtime_resume(struct device *dev) 168static int __maybe_unused exynos_rng_runtime_resume(struct device *dev)
170{ 169{
171 struct platform_device *pdev = to_platform_device(dev); 170 struct platform_device *pdev = to_platform_device(dev);
172 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev); 171 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -174,12 +173,12 @@ static int exynos_rng_runtime_resume(struct device *dev)
174 return clk_prepare_enable(exynos_rng->clk); 173 return clk_prepare_enable(exynos_rng->clk);
175} 174}
176 175
177static int exynos_rng_suspend(struct device *dev) 176static int __maybe_unused exynos_rng_suspend(struct device *dev)
178{ 177{
179 return pm_runtime_force_suspend(dev); 178 return pm_runtime_force_suspend(dev);
180} 179}
181 180
182static int exynos_rng_resume(struct device *dev) 181static int __maybe_unused exynos_rng_resume(struct device *dev)
183{ 182{
184 struct platform_device *pdev = to_platform_device(dev); 183 struct platform_device *pdev = to_platform_device(dev);
185 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev); 184 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -191,7 +190,6 @@ static int exynos_rng_resume(struct device *dev)
191 190
192 return exynos_rng_configure(exynos_rng); 191 return exynos_rng_configure(exynos_rng);
193} 192}
194#endif
195 193
196static const struct dev_pm_ops exynos_rng_pm_ops = { 194static const struct dev_pm_ops exynos_rng_pm_ops = {
197 SET_SYSTEM_SLEEP_PM_OPS(exynos_rng_suspend, exynos_rng_resume) 195 SET_SYSTEM_SLEEP_PM_OPS(exynos_rng_suspend, exynos_rng_resume)
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 92a810648bd0..530aacca3eb8 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -21,6 +21,7 @@
21#include <linux/of_address.h> 21#include <linux/of_address.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
24#include <linux/reset.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
25 26
26#define RNG_CR 0x00 27#define RNG_CR 0x00
@@ -46,6 +47,7 @@ struct stm32_rng_private {
46 struct hwrng rng; 47 struct hwrng rng;
47 void __iomem *base; 48 void __iomem *base;
48 struct clk *clk; 49 struct clk *clk;
50 struct reset_control *rst;
49}; 51};
50 52
51static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) 53static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
@@ -140,6 +142,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
140 if (IS_ERR(priv->clk)) 142 if (IS_ERR(priv->clk))
141 return PTR_ERR(priv->clk); 143 return PTR_ERR(priv->clk);
142 144
145 priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
146 if (!IS_ERR(priv->rst)) {
147 reset_control_assert(priv->rst);
148 udelay(2);
149 reset_control_deassert(priv->rst);
150 }
151
143 dev_set_drvdata(dev, priv); 152 dev_set_drvdata(dev, priv);
144 153
145 priv->rng.name = dev_driver_string(dev), 154 priv->rng.name = dev_driver_string(dev),
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 0c98a9d51a24..44ce80606944 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -140,7 +140,7 @@ static int via_rng_init(struct hwrng *rng)
140 * RNG configuration like it used to be the case in this 140 * RNG configuration like it used to be the case in this
141 * register */ 141 * register */
142 if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { 142 if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
143 if (!cpu_has_xstore_enabled) { 143 if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
144 pr_err(PFX "can't enable hardware RNG " 144 pr_err(PFX "can't enable hardware RNG "
145 "if XSTORE is not enabled\n"); 145 "if XSTORE is not enabled\n");
146 return -ENODEV; 146 return -ENODEV;
@@ -200,8 +200,9 @@ static int __init mod_init(void)
200{ 200{
201 int err; 201 int err;
202 202
203 if (!cpu_has_xstore) 203 if (!boot_cpu_has(X86_FEATURE_XSTORE))
204 return -ENODEV; 204 return -ENODEV;
205
205 pr_info("VIA RNG detected\n"); 206 pr_info("VIA RNG detected\n");
206 err = hwrng_register(&via_rng); 207 err = hwrng_register(&via_rng);
207 if (err) { 208 if (err) {
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index feafdab734ae..4835b588b783 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -522,11 +522,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
522 if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ 522 if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
523 BT_CONTROL(BT_H_BUSY); 523 BT_CONTROL(BT_H_BUSY);
524 524
525 bt->timeout = bt->BT_CAP_req2rsp;
526
525 /* Read BT capabilities if it hasn't been done yet */ 527 /* Read BT capabilities if it hasn't been done yet */
526 if (!bt->BT_CAP_outreqs) 528 if (!bt->BT_CAP_outreqs)
527 BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, 529 BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
528 SI_SM_CALL_WITHOUT_DELAY); 530 SI_SM_CALL_WITHOUT_DELAY);
529 bt->timeout = bt->BT_CAP_req2rsp;
530 BT_SI_SM_RETURN(SI_SM_IDLE); 531 BT_SI_SM_RETURN(SI_SM_IDLE);
531 532
532 case BT_STATE_XACTION_START: 533 case BT_STATE_XACTION_START:
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 6e658aa114f1..a70518a4fcec 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -251,8 +251,9 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
251 ipmi->irq = opal_event_request(prop); 251 ipmi->irq = opal_event_request(prop);
252 } 252 }
253 253
254 if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH, 254 rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
255 "opal-ipmi", ipmi)) { 255 "opal-ipmi", ipmi);
256 if (rc) {
256 dev_warn(dev, "Unable to request irq\n"); 257 dev_warn(dev, "Unable to request irq\n");
257 goto err_dispose; 258 goto err_dispose;
258 } 259 }
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index f53e8ba2c718..d6d166fe49a3 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -409,6 +409,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
409 msg = ipmi_alloc_smi_msg(); 409 msg = ipmi_alloc_smi_msg();
410 if (!msg) { 410 if (!msg) {
411 ssif_info->ssif_state = SSIF_NORMAL; 411 ssif_info->ssif_state = SSIF_NORMAL;
412 ipmi_ssif_unlock_cond(ssif_info, flags);
412 return; 413 return;
413 } 414 }
414 415
@@ -431,6 +432,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
431 msg = ipmi_alloc_smi_msg(); 432 msg = ipmi_alloc_smi_msg();
432 if (!msg) { 433 if (!msg) {
433 ssif_info->ssif_state = SSIF_NORMAL; 434 ssif_info->ssif_state = SSIF_NORMAL;
435 ipmi_ssif_unlock_cond(ssif_info, flags);
434 return; 436 return;
435 } 437 }
436 438
@@ -755,7 +757,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
755 ssif_info->ssif_state = SSIF_NORMAL; 757 ssif_info->ssif_state = SSIF_NORMAL;
756 ipmi_ssif_unlock_cond(ssif_info, flags); 758 ipmi_ssif_unlock_cond(ssif_info, flags);
757 pr_warn(PFX "Error getting flags: %d %d, %x\n", 759 pr_warn(PFX "Error getting flags: %d %d, %x\n",
758 result, len, data[2]); 760 result, len, (len >= 3) ? data[2] : 0);
759 } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 761 } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
760 || data[1] != IPMI_GET_MSG_FLAGS_CMD) { 762 || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
761 /* 763 /*
@@ -777,7 +779,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
777 if ((result < 0) || (len < 3) || (data[2] != 0)) { 779 if ((result < 0) || (len < 3) || (data[2] != 0)) {
778 /* Error clearing flags */ 780 /* Error clearing flags */
779 pr_warn(PFX "Error clearing flags: %d %d, %x\n", 781 pr_warn(PFX "Error clearing flags: %d %d, %x\n",
780 result, len, data[2]); 782 result, len, (len >= 3) ? data[2] : 0);
781 } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 783 } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
782 || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) { 784 || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
783 pr_warn(PFX "Invalid response clearing flags: %x %x\n", 785 pr_warn(PFX "Invalid response clearing flags: %x %x\n",
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 40d400fe5bb7..4ada103945f0 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -515,7 +515,7 @@ static void panic_halt_ipmi_heartbeat(void)
515 msg.cmd = IPMI_WDOG_RESET_TIMER; 515 msg.cmd = IPMI_WDOG_RESET_TIMER;
516 msg.data = NULL; 516 msg.data = NULL;
517 msg.data_len = 0; 517 msg.data_len = 0;
518 atomic_add(2, &panic_done_count); 518 atomic_add(1, &panic_done_count);
519 rv = ipmi_request_supply_msgs(watchdog_user, 519 rv = ipmi_request_supply_msgs(watchdog_user,
520 (struct ipmi_addr *) &addr, 520 (struct ipmi_addr *) &addr,
521 0, 521 0,
@@ -525,7 +525,7 @@ static void panic_halt_ipmi_heartbeat(void)
525 &panic_halt_heartbeat_recv_msg, 525 &panic_halt_heartbeat_recv_msg,
526 1); 526 1);
527 if (rv) 527 if (rv)
528 atomic_sub(2, &panic_done_count); 528 atomic_sub(1, &panic_done_count);
529} 529}
530 530
531static struct ipmi_smi_msg panic_halt_smi_msg = { 531static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -549,12 +549,12 @@ static void panic_halt_ipmi_set_timeout(void)
549 /* Wait for the messages to be free. */ 549 /* Wait for the messages to be free. */
550 while (atomic_read(&panic_done_count) != 0) 550 while (atomic_read(&panic_done_count) != 0)
551 ipmi_poll_interface(watchdog_user); 551 ipmi_poll_interface(watchdog_user);
552 atomic_add(2, &panic_done_count); 552 atomic_add(1, &panic_done_count);
553 rv = i_ipmi_set_timeout(&panic_halt_smi_msg, 553 rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
554 &panic_halt_recv_msg, 554 &panic_halt_recv_msg,
555 &send_heartbeat_now); 555 &send_heartbeat_now);
556 if (rv) { 556 if (rv) {
557 atomic_sub(2, &panic_done_count); 557 atomic_sub(1, &panic_done_count);
558 printk(KERN_WARNING PFX 558 printk(KERN_WARNING PFX
559 "Unable to extend the watchdog timeout."); 559 "Unable to extend the watchdog timeout.");
560 } else { 560 } else {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 1822472dffab..2916d08ee30e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -724,7 +724,7 @@ retry:
724 724
725static int credit_entropy_bits_safe(struct entropy_store *r, int nbits) 725static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
726{ 726{
727 const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1)); 727 const int nbits_max = r->poolinfo->poolwords * 32;
728 728
729 if (nbits < 0) 729 if (nbits < 0)
730 return -EINVAL; 730 return -EINVAL;
@@ -886,12 +886,16 @@ static void add_interrupt_bench(cycles_t start)
886static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) 886static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
887{ 887{
888 __u32 *ptr = (__u32 *) regs; 888 __u32 *ptr = (__u32 *) regs;
889 unsigned int idx;
889 890
890 if (regs == NULL) 891 if (regs == NULL)
891 return 0; 892 return 0;
892 if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) 893 idx = READ_ONCE(f->reg_idx);
893 f->reg_idx = 0; 894 if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
894 return *(ptr + f->reg_idx++); 895 idx = 0;
896 ptr += idx++;
897 WRITE_ONCE(f->reg_idx, idx);
898 return *ptr;
895} 899}
896 900
897void add_interrupt_randomness(int irq, int irq_flags) 901void add_interrupt_randomness(int irq, int irq_flags)
@@ -1499,14 +1503,22 @@ static int
1499write_pool(struct entropy_store *r, const char __user *buffer, size_t count) 1503write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1500{ 1504{
1501 size_t bytes; 1505 size_t bytes;
1502 __u32 buf[16]; 1506 __u32 t, buf[16];
1503 const char __user *p = buffer; 1507 const char __user *p = buffer;
1504 1508
1505 while (count > 0) { 1509 while (count > 0) {
1510 int b, i = 0;
1511
1506 bytes = min(count, sizeof(buf)); 1512 bytes = min(count, sizeof(buf));
1507 if (copy_from_user(&buf, p, bytes)) 1513 if (copy_from_user(&buf, p, bytes))
1508 return -EFAULT; 1514 return -EFAULT;
1509 1515
1516 for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
1517 if (!arch_get_random_int(&t))
1518 break;
1519 buf[i] ^= t;
1520 }
1521
1510 count -= bytes; 1522 count -= bytes;
1511 p += bytes; 1523 p += bytes;
1512 1524
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index 8d626784cd8d..49e4040eeb55 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -485,7 +485,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
485 size_t count) 485 size_t count)
486{ 486{
487 int size = 0; 487 int size = 0;
488 int expected; 488 u32 expected;
489 489
490 if (!chip) 490 if (!chip)
491 return -EBUSY; 491 return -EBUSY;
@@ -502,7 +502,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
502 } 502 }
503 503
504 expected = be32_to_cpu(*(__be32 *)(buf + 2)); 504 expected = be32_to_cpu(*(__be32 *)(buf + 2));
505 if (expected > count) { 505 if (expected > count || expected < TPM_HEADER_SIZE) {
506 size = -EIO; 506 size = -EIO;
507 goto out; 507 goto out;
508 } 508 }
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index a0d9ac6b6cc9..e759100e41a7 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -26,6 +26,7 @@
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/freezer.h> 27#include <linux/freezer.h>
28#include <linux/major.h> 28#include <linux/major.h>
29#include <linux/of.h>
29#include "tpm.h" 30#include "tpm.h"
30#include "tpm_eventlog.h" 31#include "tpm_eventlog.h"
31 32
@@ -324,8 +325,20 @@ static void tpm1_chip_unregister(struct tpm_chip *chip)
324 */ 325 */
325int tpm_chip_register(struct tpm_chip *chip) 326int tpm_chip_register(struct tpm_chip *chip)
326{ 327{
328#ifdef CONFIG_OF
329 struct device_node *np;
330#endif
327 int rc; 331 int rc;
328 332
333#ifdef CONFIG_OF
334 np = of_find_node_by_name(NULL, "vtpm");
335 if (np) {
336 if (of_property_read_bool(np, "powered-while-suspended"))
337 chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
338 }
339 of_node_put(np);
340#endif
341
329 rc = tpm1_chip_register(chip); 342 rc = tpm1_chip_register(chip);
330 if (rc) 343 if (rc)
331 return rc; 344 return rc;
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index 912ad30be585..4719aa781bf2 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -25,7 +25,7 @@ struct file_priv {
25 struct tpm_chip *chip; 25 struct tpm_chip *chip;
26 26
27 /* Data passed to and from the tpm via the read/write calls */ 27 /* Data passed to and from the tpm via the read/write calls */
28 atomic_t data_pending; 28 size_t data_pending;
29 struct mutex buffer_mutex; 29 struct mutex buffer_mutex;
30 30
31 struct timer_list user_read_timer; /* user needs to claim result */ 31 struct timer_list user_read_timer; /* user needs to claim result */
@@ -46,7 +46,7 @@ static void timeout_work(struct work_struct *work)
46 struct file_priv *priv = container_of(work, struct file_priv, work); 46 struct file_priv *priv = container_of(work, struct file_priv, work);
47 47
48 mutex_lock(&priv->buffer_mutex); 48 mutex_lock(&priv->buffer_mutex);
49 atomic_set(&priv->data_pending, 0); 49 priv->data_pending = 0;
50 memset(priv->data_buffer, 0, sizeof(priv->data_buffer)); 50 memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
51 mutex_unlock(&priv->buffer_mutex); 51 mutex_unlock(&priv->buffer_mutex);
52} 52}
@@ -72,7 +72,6 @@ static int tpm_open(struct inode *inode, struct file *file)
72 } 72 }
73 73
74 priv->chip = chip; 74 priv->chip = chip;
75 atomic_set(&priv->data_pending, 0);
76 mutex_init(&priv->buffer_mutex); 75 mutex_init(&priv->buffer_mutex);
77 setup_timer(&priv->user_read_timer, user_reader_timeout, 76 setup_timer(&priv->user_read_timer, user_reader_timeout,
78 (unsigned long)priv); 77 (unsigned long)priv);
@@ -86,28 +85,24 @@ static ssize_t tpm_read(struct file *file, char __user *buf,
86 size_t size, loff_t *off) 85 size_t size, loff_t *off)
87{ 86{
88 struct file_priv *priv = file->private_data; 87 struct file_priv *priv = file->private_data;
89 ssize_t ret_size; 88 ssize_t ret_size = 0;
90 int rc; 89 int rc;
91 90
92 del_singleshot_timer_sync(&priv->user_read_timer); 91 del_singleshot_timer_sync(&priv->user_read_timer);
93 flush_work(&priv->work); 92 flush_work(&priv->work);
94 ret_size = atomic_read(&priv->data_pending); 93 mutex_lock(&priv->buffer_mutex);
95 if (ret_size > 0) { /* relay data */
96 ssize_t orig_ret_size = ret_size;
97 if (size < ret_size)
98 ret_size = size;
99 94
100 mutex_lock(&priv->buffer_mutex); 95 if (priv->data_pending) {
96 ret_size = min_t(ssize_t, size, priv->data_pending);
101 rc = copy_to_user(buf, priv->data_buffer, ret_size); 97 rc = copy_to_user(buf, priv->data_buffer, ret_size);
102 memset(priv->data_buffer, 0, orig_ret_size); 98 memset(priv->data_buffer, 0, priv->data_pending);
103 if (rc) 99 if (rc)
104 ret_size = -EFAULT; 100 ret_size = -EFAULT;
105 101
106 mutex_unlock(&priv->buffer_mutex); 102 priv->data_pending = 0;
107 } 103 }
108 104
109 atomic_set(&priv->data_pending, 0); 105 mutex_unlock(&priv->buffer_mutex);
110
111 return ret_size; 106 return ret_size;
112} 107}
113 108
@@ -118,18 +113,20 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
118 size_t in_size = size; 113 size_t in_size = size;
119 ssize_t out_size; 114 ssize_t out_size;
120 115
121 /* cannot perform a write until the read has cleared
122 either via tpm_read or a user_read_timer timeout.
123 This also prevents splitted buffered writes from blocking here.
124 */
125 if (atomic_read(&priv->data_pending) != 0)
126 return -EBUSY;
127
128 if (in_size > TPM_BUFSIZE) 116 if (in_size > TPM_BUFSIZE)
129 return -E2BIG; 117 return -E2BIG;
130 118
131 mutex_lock(&priv->buffer_mutex); 119 mutex_lock(&priv->buffer_mutex);
132 120
121 /* Cannot perform a write until the read has cleared either via
122 * tpm_read or a user_read_timer timeout. This also prevents split
123 * buffered writes from blocking here.
124 */
125 if (priv->data_pending != 0) {
126 mutex_unlock(&priv->buffer_mutex);
127 return -EBUSY;
128 }
129
133 if (copy_from_user 130 if (copy_from_user
134 (priv->data_buffer, (void __user *) buf, in_size)) { 131 (priv->data_buffer, (void __user *) buf, in_size)) {
135 mutex_unlock(&priv->buffer_mutex); 132 mutex_unlock(&priv->buffer_mutex);
@@ -153,7 +150,7 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
153 return out_size; 150 return out_size;
154 } 151 }
155 152
156 atomic_set(&priv->data_pending, out_size); 153 priv->data_pending = out_size;
157 mutex_unlock(&priv->buffer_mutex); 154 mutex_unlock(&priv->buffer_mutex);
158 155
159 /* Set a timeout by which the reader must come claim the result */ 156 /* Set a timeout by which the reader must come claim the result */
@@ -172,7 +169,7 @@ static int tpm_release(struct inode *inode, struct file *file)
172 del_singleshot_timer_sync(&priv->user_read_timer); 169 del_singleshot_timer_sync(&priv->user_read_timer);
173 flush_work(&priv->work); 170 flush_work(&priv->work);
174 file->private_data = NULL; 171 file->private_data = NULL;
175 atomic_set(&priv->data_pending, 0); 172 priv->data_pending = 0;
176 clear_bit(0, &priv->chip->is_open); 173 clear_bit(0, &priv->chip->is_open);
177 kfree(priv); 174 kfree(priv);
178 return 0; 175 return 0;
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index aaa5fa95dede..95a40ec854ad 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -787,6 +787,10 @@ int tpm_do_selftest(struct tpm_chip *chip)
787 loops = jiffies_to_msecs(duration) / delay_msec; 787 loops = jiffies_to_msecs(duration) / delay_msec;
788 788
789 rc = tpm_continue_selftest(chip); 789 rc = tpm_continue_selftest(chip);
790 if (rc == TPM_ERR_INVALID_POSTINIT) {
791 chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
792 dev_info(&chip->dev, "TPM not ready (%d)\n", rc);
793 }
790 /* This may fail if there was no TPM driver during a suspend/resume 794 /* This may fail if there was no TPM driver during a suspend/resume
791 * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST) 795 * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
792 */ 796 */
@@ -931,6 +935,9 @@ int tpm_pm_suspend(struct device *dev)
931 if (chip == NULL) 935 if (chip == NULL)
932 return -ENODEV; 936 return -ENODEV;
933 937
938 if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
939 return 0;
940
934 if (chip->flags & TPM_CHIP_FLAG_TPM2) { 941 if (chip->flags & TPM_CHIP_FLAG_TPM2) {
935 tpm2_shutdown(chip, TPM2_SU_STATE); 942 tpm2_shutdown(chip, TPM2_SU_STATE);
936 return 0; 943 return 0;
@@ -1040,6 +1047,11 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
1040 break; 1047 break;
1041 1048
1042 recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); 1049 recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
1050 if (recd > num_bytes) {
1051 total = -EFAULT;
1052 break;
1053 }
1054
1043 memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd); 1055 memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd);
1044 1056
1045 dest += recd; 1057 dest += recd;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 772d99b3a8e4..36e1abda00f9 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -168,6 +168,7 @@ struct tpm_vendor_specific {
168enum tpm_chip_flags { 168enum tpm_chip_flags {
169 TPM_CHIP_FLAG_REGISTERED = BIT(0), 169 TPM_CHIP_FLAG_REGISTERED = BIT(0),
170 TPM_CHIP_FLAG_TPM2 = BIT(1), 170 TPM_CHIP_FLAG_TPM2 = BIT(1),
171 TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
171}; 172};
172 173
173struct tpm_chip { 174struct tpm_chip {
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 286bd090a488..389a009b83f2 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -622,6 +622,11 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
622 if (!rc) { 622 if (!rc) {
623 data_len = be16_to_cpup( 623 data_len = be16_to_cpup(
624 (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); 624 (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
625 if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
626 rc = -EFAULT;
627 goto out;
628 }
629
625 data = &buf.data[TPM_HEADER_SIZE + 6]; 630 data = &buf.data[TPM_HEADER_SIZE + 6];
626 631
627 memcpy(payload->key, data, data_len - 1); 632 memcpy(payload->key, data, data_len - 1);
@@ -629,6 +634,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
629 payload->migratable = data[data_len - 1]; 634 payload->migratable = data[data_len - 1];
630 } 635 }
631 636
637out:
632 tpm_buf_destroy(&buf); 638 tpm_buf_destroy(&buf);
633 return rc; 639 return rc;
634} 640}
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index f2aa99e34b4b..9f12ad74a09b 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -436,7 +436,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
436static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) 436static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
437{ 437{
438 int size = 0; 438 int size = 0;
439 int expected, status; 439 int status;
440 u32 expected;
440 441
441 if (count < TPM_HEADER_SIZE) { 442 if (count < TPM_HEADER_SIZE) {
442 size = -EIO; 443 size = -EIO;
@@ -451,7 +452,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
451 } 452 }
452 453
453 expected = be32_to_cpu(*(__be32 *)(buf + 2)); 454 expected = be32_to_cpu(*(__be32 *)(buf + 2));
454 if ((size_t) expected > count) { 455 if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
455 size = -EIO; 456 size = -EIO;
456 goto out; 457 goto out;
457 } 458 }
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index a1e1474dda30..aedf726cbab6 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -267,7 +267,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
267 struct device *dev = chip->dev.parent; 267 struct device *dev = chip->dev.parent;
268 struct i2c_client *client = to_i2c_client(dev); 268 struct i2c_client *client = to_i2c_client(dev);
269 s32 rc; 269 s32 rc;
270 int expected, status, burst_count, retries, size = 0; 270 int status;
271 int burst_count;
272 int retries;
273 int size = 0;
274 u32 expected;
271 275
272 if (count < TPM_HEADER_SIZE) { 276 if (count < TPM_HEADER_SIZE) {
273 i2c_nuvoton_ready(chip); /* return to idle */ 277 i2c_nuvoton_ready(chip); /* return to idle */
@@ -309,7 +313,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
309 * to machine native 313 * to machine native
310 */ 314 */
311 expected = be32_to_cpu(*(__be32 *) (buf + 2)); 315 expected = be32_to_cpu(*(__be32 *) (buf + 2));
312 if (expected > count) { 316 if (expected > count || expected < size) {
313 dev_err(dev, "%s() expected > count\n", __func__); 317 dev_err(dev, "%s() expected > count\n", __func__);
314 size = -EIO; 318 size = -EIO;
315 continue; 319 continue;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 7f13221aeb30..9dd93a209ef2 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -283,7 +283,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
283static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) 283static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
284{ 284{
285 int size = 0; 285 int size = 0;
286 int expected, status; 286 int status;
287 u32 expected;
287 288
288 if (count < TPM_HEADER_SIZE) { 289 if (count < TPM_HEADER_SIZE) {
289 size = -EIO; 290 size = -EIO;
@@ -298,7 +299,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
298 } 299 }
299 300
300 expected = be32_to_cpu(*(__be32 *) (buf + 2)); 301 expected = be32_to_cpu(*(__be32 *) (buf + 2));
301 if (expected > count) { 302 if (expected > count || expected < TPM_HEADER_SIZE) {
302 size = -EIO; 303 size = -EIO;
303 goto out; 304 goto out;
304 } 305 }
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index be0b09a0fb44..2aca689061e1 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1399,7 +1399,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1399{ 1399{
1400 char debugfs_name[16]; 1400 char debugfs_name[16];
1401 struct port *port; 1401 struct port *port;
1402 struct port_buffer *buf;
1403 dev_t devt; 1402 dev_t devt;
1404 unsigned int nr_added_bufs; 1403 unsigned int nr_added_bufs;
1405 int err; 1404 int err;
@@ -1510,8 +1509,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1510 return 0; 1509 return 0;
1511 1510
1512free_inbufs: 1511free_inbufs:
1513 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1514 free_buf(buf, true);
1515free_device: 1512free_device:
1516 device_destroy(pdrvdata.class, port->dev->devt); 1513 device_destroy(pdrvdata.class, port->dev->devt);
1517free_cdev: 1514free_cdev:
@@ -1536,34 +1533,14 @@ static void remove_port(struct kref *kref)
1536 1533
1537static void remove_port_data(struct port *port) 1534static void remove_port_data(struct port *port)
1538{ 1535{
1539 struct port_buffer *buf;
1540
1541 spin_lock_irq(&port->inbuf_lock); 1536 spin_lock_irq(&port->inbuf_lock);
1542 /* Remove unused data this port might have received. */ 1537 /* Remove unused data this port might have received. */
1543 discard_port_data(port); 1538 discard_port_data(port);
1544 spin_unlock_irq(&port->inbuf_lock); 1539 spin_unlock_irq(&port->inbuf_lock);
1545 1540
1546 /* Remove buffers we queued up for the Host to send us data in. */
1547 do {
1548 spin_lock_irq(&port->inbuf_lock);
1549 buf = virtqueue_detach_unused_buf(port->in_vq);
1550 spin_unlock_irq(&port->inbuf_lock);
1551 if (buf)
1552 free_buf(buf, true);
1553 } while (buf);
1554
1555 spin_lock_irq(&port->outvq_lock); 1541 spin_lock_irq(&port->outvq_lock);
1556 reclaim_consumed_buffers(port); 1542 reclaim_consumed_buffers(port);
1557 spin_unlock_irq(&port->outvq_lock); 1543 spin_unlock_irq(&port->outvq_lock);
1558
1559 /* Free pending buffers from the out-queue. */
1560 do {
1561 spin_lock_irq(&port->outvq_lock);
1562 buf = virtqueue_detach_unused_buf(port->out_vq);
1563 spin_unlock_irq(&port->outvq_lock);
1564 if (buf)
1565 free_buf(buf, true);
1566 } while (buf);
1567} 1544}
1568 1545
1569/* 1546/*
@@ -1788,13 +1765,24 @@ static void control_work_handler(struct work_struct *work)
1788 spin_unlock(&portdev->c_ivq_lock); 1765 spin_unlock(&portdev->c_ivq_lock);
1789} 1766}
1790 1767
1768static void flush_bufs(struct virtqueue *vq, bool can_sleep)
1769{
1770 struct port_buffer *buf;
1771 unsigned int len;
1772
1773 while ((buf = virtqueue_get_buf(vq, &len)))
1774 free_buf(buf, can_sleep);
1775}
1776
1791static void out_intr(struct virtqueue *vq) 1777static void out_intr(struct virtqueue *vq)
1792{ 1778{
1793 struct port *port; 1779 struct port *port;
1794 1780
1795 port = find_port_by_vq(vq->vdev->priv, vq); 1781 port = find_port_by_vq(vq->vdev->priv, vq);
1796 if (!port) 1782 if (!port) {
1783 flush_bufs(vq, false);
1797 return; 1784 return;
1785 }
1798 1786
1799 wake_up_interruptible(&port->waitqueue); 1787 wake_up_interruptible(&port->waitqueue);
1800} 1788}
@@ -1805,8 +1793,10 @@ static void in_intr(struct virtqueue *vq)
1805 unsigned long flags; 1793 unsigned long flags;
1806 1794
1807 port = find_port_by_vq(vq->vdev->priv, vq); 1795 port = find_port_by_vq(vq->vdev->priv, vq);
1808 if (!port) 1796 if (!port) {
1797 flush_bufs(vq, false);
1809 return; 1798 return;
1799 }
1810 1800
1811 spin_lock_irqsave(&port->inbuf_lock, flags); 1801 spin_lock_irqsave(&port->inbuf_lock, flags);
1812 port->inbuf = get_inbuf(port); 1802 port->inbuf = get_inbuf(port);
@@ -1981,6 +1971,15 @@ static const struct file_operations portdev_fops = {
1981 1971
1982static void remove_vqs(struct ports_device *portdev) 1972static void remove_vqs(struct ports_device *portdev)
1983{ 1973{
1974 struct virtqueue *vq;
1975
1976 virtio_device_for_each_vq(portdev->vdev, vq) {
1977 struct port_buffer *buf;
1978
1979 flush_bufs(vq, true);
1980 while ((buf = virtqueue_detach_unused_buf(vq)))
1981 free_buf(buf, true);
1982 }
1984 portdev->vdev->config->del_vqs(portdev->vdev); 1983 portdev->vdev->config->del_vqs(portdev->vdev);
1985 kfree(portdev->in_vqs); 1984 kfree(portdev->in_vqs);
1986 kfree(portdev->out_vqs); 1985 kfree(portdev->out_vqs);
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 35ab89fe9d7b..d56ba46e6b78 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -891,9 +891,7 @@ static void bcm2835_pll_off(struct clk_hw *hw)
891 const struct bcm2835_pll_data *data = pll->data; 891 const struct bcm2835_pll_data *data = pll->data;
892 892
893 spin_lock(&cprman->regs_lock); 893 spin_lock(&cprman->regs_lock);
894 cprman_write(cprman, data->cm_ctrl_reg, 894 cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
895 cprman_read(cprman, data->cm_ctrl_reg) |
896 CM_PLL_ANARST);
897 cprman_write(cprman, data->a2w_ctrl_reg, 895 cprman_write(cprman, data->a2w_ctrl_reg,
898 cprman_read(cprman, data->a2w_ctrl_reg) | 896 cprman_read(cprman, data->a2w_ctrl_reg) |
899 A2W_PLL_CTRL_PWRDN); 897 A2W_PLL_CTRL_PWRDN);
@@ -912,8 +910,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
912 ~A2W_PLL_CTRL_PWRDN); 910 ~A2W_PLL_CTRL_PWRDN);
913 911
914 /* Take the PLL out of reset. */ 912 /* Take the PLL out of reset. */
913 spin_lock(&cprman->regs_lock);
915 cprman_write(cprman, data->cm_ctrl_reg, 914 cprman_write(cprman, data->cm_ctrl_reg,
916 cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST); 915 cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST);
916 spin_unlock(&cprman->regs_lock);
917 917
918 /* Wait for the PLL to lock. */ 918 /* Wait for the PLL to lock. */
919 timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); 919 timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
@@ -927,6 +927,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
927 cpu_relax(); 927 cpu_relax();
928 } 928 }
929 929
930 cprman_write(cprman, data->a2w_ctrl_reg,
931 cprman_read(cprman, data->a2w_ctrl_reg) |
932 A2W_PLL_CTRL_PRST_DISABLE);
933
930 return 0; 934 return 0;
931} 935}
932 936
@@ -997,9 +1001,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
997 } 1001 }
998 1002
999 /* Unmask the reference clock from the oscillator. */ 1003 /* Unmask the reference clock from the oscillator. */
1004 spin_lock(&cprman->regs_lock);
1000 cprman_write(cprman, A2W_XOSC_CTRL, 1005 cprman_write(cprman, A2W_XOSC_CTRL,
1001 cprman_read(cprman, A2W_XOSC_CTRL) | 1006 cprman_read(cprman, A2W_XOSC_CTRL) |
1002 data->reference_enable_mask); 1007 data->reference_enable_mask);
1008 spin_unlock(&cprman->regs_lock);
1003 1009
1004 if (do_ana_setup_first) 1010 if (do_ana_setup_first)
1005 bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana); 1011 bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana);
diff --git a/drivers/clk/bcm/clk-ns2.c b/drivers/clk/bcm/clk-ns2.c
index a564e9248814..adc14145861a 100644
--- a/drivers/clk/bcm/clk-ns2.c
+++ b/drivers/clk/bcm/clk-ns2.c
@@ -103,7 +103,7 @@ CLK_OF_DECLARE(ns2_genpll_src_clk, "brcm,ns2-genpll-scr",
103 103
104static const struct iproc_pll_ctrl genpll_sw = { 104static const struct iproc_pll_ctrl genpll_sw = {
105 .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL, 105 .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL,
106 .aon = AON_VAL(0x0, 2, 9, 8), 106 .aon = AON_VAL(0x0, 1, 11, 10),
107 .reset = RESET_VAL(0x4, 2, 1), 107 .reset = RESET_VAL(0x4, 2, 1),
108 .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 2, 3), 108 .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 2, 3),
109 .ndiv_int = REG_VAL(0x8, 4, 10), 109 .ndiv_int = REG_VAL(0x8, 4, 10),
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 43a218f35b19..4ad32ce428cf 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -106,7 +106,7 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
106 106
107 rc = clk_set_rate(clk, rate); 107 rc = clk_set_rate(clk, rate);
108 if (rc < 0) 108 if (rc < 0)
109 pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n", 109 pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n",
110 __clk_get_name(clk), rate, rc, 110 __clk_get_name(clk), rate, rc,
111 clk_get_rate(clk)); 111 clk_get_rate(clk));
112 clk_put(clk); 112 clk_put(clk);
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index cd0f2726f5e0..c40445488d3a 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -71,15 +71,15 @@ static const struct clk_ops scpi_clk_ops = {
71}; 71};
72 72
73/* find closest match to given frequency in OPP table */ 73/* find closest match to given frequency in OPP table */
74static int __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate) 74static long __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
75{ 75{
76 int idx; 76 int idx;
77 u32 fmin = 0, fmax = ~0, ftmp; 77 unsigned long fmin = 0, fmax = ~0, ftmp;
78 const struct scpi_opp *opp = clk->info->opps; 78 const struct scpi_opp *opp = clk->info->opps;
79 79
80 for (idx = 0; idx < clk->info->count; idx++, opp++) { 80 for (idx = 0; idx < clk->info->count; idx++, opp++) {
81 ftmp = opp->freq; 81 ftmp = opp->freq;
82 if (ftmp >= (u32)rate) { 82 if (ftmp >= rate) {
83 if (ftmp <= fmax) 83 if (ftmp <= fmax)
84 fmax = ftmp; 84 fmax = ftmp;
85 break; 85 break;
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index e346b223199d..a01ee9a3ed6d 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -72,7 +72,7 @@ static const char * const si5351_input_names[] = {
72 "xtal", "clkin" 72 "xtal", "clkin"
73}; 73};
74static const char * const si5351_pll_names[] = { 74static const char * const si5351_pll_names[] = {
75 "plla", "pllb", "vxco" 75 "si5351_plla", "si5351_pllb", "si5351_vxco"
76}; 76};
77static const char * const si5351_msynth_names[] = { 77static const char * const si5351_msynth_names[] = {
78 "ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7" 78 "ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7"
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 28ae80d71328..3f28b8682258 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1975,6 +1975,9 @@ static int clk_core_get_phase(struct clk_core *core)
1975 int ret; 1975 int ret;
1976 1976
1977 clk_prepare_lock(); 1977 clk_prepare_lock();
1978 /* Always try to update cached phase if possible */
1979 if (core->ops->get_phase)
1980 core->phase = core->ops->get_phase(core->hw);
1978 ret = core->phase; 1981 ret = core->phase;
1979 clk_prepare_unlock(); 1982 clk_prepare_unlock();
1980 1983
diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
index 8bccf4ecdab6..9ff4ea63932d 100644
--- a/drivers/clk/mvebu/armada-38x.c
+++ b/drivers/clk/mvebu/armada-38x.c
@@ -46,10 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
46} 46}
47 47
48static const u32 armada_38x_cpu_frequencies[] __initconst = { 48static const u32 armada_38x_cpu_frequencies[] __initconst = {
49 0, 0, 0, 0, 49 666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0,
50 1066 * 1000 * 1000, 0, 0, 0, 50 1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
51 1332 * 1000 * 1000, 0, 0, 0, 51 1332 * 1000 * 1000, 0, 0, 0,
52 1600 * 1000 * 1000, 52 1600 * 1000 * 1000, 0, 0, 0,
53 1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
53}; 54};
54 55
55static u32 __init armada_38x_get_cpu_freq(void __iomem *sar) 56static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
@@ -75,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
75}; 76};
76 77
77static const int armada_38x_cpu_l2_ratios[32][2] __initconst = { 78static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
78 {0, 1}, {0, 1}, {0, 1}, {0, 1}, 79 {1, 2}, {0, 1}, {1, 2}, {0, 1},
79 {1, 2}, {0, 1}, {0, 1}, {0, 1}, 80 {1, 2}, {0, 1}, {1, 2}, {0, 1},
80 {1, 2}, {0, 1}, {0, 1}, {0, 1}, 81 {1, 2}, {0, 1}, {0, 1}, {0, 1},
81 {1, 2}, {0, 1}, {0, 1}, {0, 1}, 82 {1, 2}, {0, 1}, {0, 1}, {0, 1},
82 {0, 1}, {0, 1}, {0, 1}, {0, 1}, 83 {1, 2}, {0, 1}, {0, 1}, {1, 2},
83 {0, 1}, {0, 1}, {0, 1}, {0, 1}, 84 {0, 1}, {0, 1}, {0, 1}, {0, 1},
84 {0, 1}, {0, 1}, {0, 1}, {0, 1}, 85 {0, 1}, {0, 1}, {0, 1}, {0, 1},
85 {0, 1}, {0, 1}, {0, 1}, {0, 1}, 86 {0, 1}, {0, 1}, {0, 1}, {0, 1},
@@ -90,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
90 {1, 2}, {0, 1}, {0, 1}, {0, 1}, 91 {1, 2}, {0, 1}, {0, 1}, {0, 1},
91 {1, 2}, {0, 1}, {0, 1}, {0, 1}, 92 {1, 2}, {0, 1}, {0, 1}, {0, 1},
92 {1, 2}, {0, 1}, {0, 1}, {0, 1}, 93 {1, 2}, {0, 1}, {0, 1}, {0, 1},
93 {0, 1}, {0, 1}, {0, 1}, {0, 1}, 94 {1, 2}, {0, 1}, {0, 1}, {7, 15},
94 {0, 1}, {0, 1}, {0, 1}, {0, 1}, 95 {0, 1}, {0, 1}, {0, 1}, {0, 1},
95 {0, 1}, {0, 1}, {0, 1}, {0, 1}, 96 {0, 1}, {0, 1}, {0, 1}, {0, 1},
96 {0, 1}, {0, 1}, {0, 1}, {0, 1}, 97 {0, 1}, {0, 1}, {0, 1}, {0, 1},
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 2e7f03d50f4e..95a4dd290f35 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -1437,6 +1437,7 @@ static const struct freq_tbl ftbl_codec_clk[] = {
1437 1437
1438static struct clk_rcg2 codec_digcodec_clk_src = { 1438static struct clk_rcg2 codec_digcodec_clk_src = {
1439 .cmd_rcgr = 0x1c09c, 1439 .cmd_rcgr = 0x1c09c,
1440 .mnd_width = 8,
1440 .hid_width = 5, 1441 .hid_width = 5,
1441 .parent_map = gcc_xo_gpll1_emclk_sleep_map, 1442 .parent_map = gcc_xo_gpll1_emclk_sleep_map,
1442 .freq_tbl = ftbl_codec_clk, 1443 .freq_tbl = ftbl_codec_clk,
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 33c20c6b45af..b840e4ace623 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -60,6 +60,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
60 u16 degrees; 60 u16 degrees;
61 u32 delay_num = 0; 61 u32 delay_num = 0;
62 62
63 /* See the comment for rockchip_mmc_set_phase below */
64 if (!rate) {
65 pr_err("%s: invalid clk rate\n", __func__);
66 return -EINVAL;
67 }
68
63 raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift); 69 raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
64 70
65 degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90; 71 degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
@@ -86,6 +92,23 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
86 u32 raw_value; 92 u32 raw_value;
87 u32 delay; 93 u32 delay;
88 94
95 /*
96 * The below calculation is based on the output clock from
97 * MMC host to the card, which expects the phase clock inherits
98 * the clock rate from its parent, namely the output clock
99 * provider of MMC host. However, things may go wrong if
100 * (1) It is orphan.
101 * (2) It is assigned to the wrong parent.
102 *
103 * This check help debug the case (1), which seems to be the
104 * most likely problem we often face and which makes it difficult
105 * for people to debug unstable mmc tuning results.
106 */
107 if (!rate) {
108 pr_err("%s: invalid clk rate\n", __func__);
109 return -EINVAL;
110 }
111
89 nineties = degrees / 90; 112 nineties = degrees / 90;
90 remainder = (degrees % 90); 113 remainder = (degrees % 90);
91 114
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index fdd41b17a24f..294efaef5b82 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -683,7 +683,7 @@ static struct samsung_pll_rate_table exynos3250_epll_rates[] = {
683 PLL_36XX_RATE(144000000, 96, 2, 3, 0), 683 PLL_36XX_RATE(144000000, 96, 2, 3, 0),
684 PLL_36XX_RATE( 96000000, 128, 2, 4, 0), 684 PLL_36XX_RATE( 96000000, 128, 2, 4, 0),
685 PLL_36XX_RATE( 84000000, 112, 2, 4, 0), 685 PLL_36XX_RATE( 84000000, 112, 2, 4, 0),
686 PLL_36XX_RATE( 80000004, 106, 2, 4, 43691), 686 PLL_36XX_RATE( 80000003, 106, 2, 4, 43691),
687 PLL_36XX_RATE( 73728000, 98, 2, 4, 19923), 687 PLL_36XX_RATE( 73728000, 98, 2, 4, 19923),
688 PLL_36XX_RATE( 67737598, 270, 3, 5, 62285), 688 PLL_36XX_RATE( 67737598, 270, 3, 5, 62285),
689 PLL_36XX_RATE( 65535999, 174, 2, 5, 49982), 689 PLL_36XX_RATE( 65535999, 174, 2, 5, 49982),
@@ -719,7 +719,7 @@ static struct samsung_pll_rate_table exynos3250_vpll_rates[] = {
719 PLL_36XX_RATE(148352005, 98, 2, 3, 59070), 719 PLL_36XX_RATE(148352005, 98, 2, 3, 59070),
720 PLL_36XX_RATE(108000000, 144, 2, 4, 0), 720 PLL_36XX_RATE(108000000, 144, 2, 4, 0),
721 PLL_36XX_RATE( 74250000, 99, 2, 4, 0), 721 PLL_36XX_RATE( 74250000, 99, 2, 4, 0),
722 PLL_36XX_RATE( 74176002, 98, 3, 4, 59070), 722 PLL_36XX_RATE( 74176002, 98, 2, 4, 59070),
723 PLL_36XX_RATE( 54054000, 216, 3, 5, 14156), 723 PLL_36XX_RATE( 54054000, 216, 3, 5, 14156),
724 PLL_36XX_RATE( 54000000, 144, 2, 5, 0), 724 PLL_36XX_RATE( 54000000, 144, 2, 5, 0),
725 { /* sentinel */ } 725 { /* sentinel */ }
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 5bebf8cb0d70..f0b564c7c9c1 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -711,13 +711,13 @@ static struct samsung_pll_rate_table epll_24mhz_tbl[] __initdata = {
711 /* sorted in descending order */ 711 /* sorted in descending order */
712 /* PLL_36XX_RATE(rate, m, p, s, k) */ 712 /* PLL_36XX_RATE(rate, m, p, s, k) */
713 PLL_36XX_RATE(192000000, 64, 2, 2, 0), 713 PLL_36XX_RATE(192000000, 64, 2, 2, 0),
714 PLL_36XX_RATE(180633600, 90, 3, 2, 20762), 714 PLL_36XX_RATE(180633605, 90, 3, 2, 20762),
715 PLL_36XX_RATE(180000000, 90, 3, 2, 0), 715 PLL_36XX_RATE(180000000, 90, 3, 2, 0),
716 PLL_36XX_RATE(73728000, 98, 2, 4, 19923), 716 PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
717 PLL_36XX_RATE(67737600, 90, 2, 4, 20762), 717 PLL_36XX_RATE(67737602, 90, 2, 4, 20762),
718 PLL_36XX_RATE(49152000, 98, 3, 4, 19923), 718 PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
719 PLL_36XX_RATE(45158400, 90, 3, 4, 20762), 719 PLL_36XX_RATE(45158401, 90, 3, 4, 20762),
720 PLL_36XX_RATE(32768000, 131, 3, 5, 4719), 720 PLL_36XX_RATE(32768001, 131, 3, 5, 4719),
721 { }, 721 { },
722}; 722};
723 723
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index d1a29f6c1084..7027e77bf859 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -65,7 +65,7 @@ static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = {
65 PLL_36XX_RATE(480000000, 160, 2, 2, 0), 65 PLL_36XX_RATE(480000000, 160, 2, 2, 0),
66 PLL_36XX_RATE(432000000, 144, 2, 2, 0), 66 PLL_36XX_RATE(432000000, 144, 2, 2, 0),
67 PLL_36XX_RATE(400000000, 200, 3, 2, 0), 67 PLL_36XX_RATE(400000000, 200, 3, 2, 0),
68 PLL_36XX_RATE(394073130, 459, 7, 2, 49282), 68 PLL_36XX_RATE(394073128, 459, 7, 2, 49282),
69 PLL_36XX_RATE(333000000, 111, 2, 2, 0), 69 PLL_36XX_RATE(333000000, 111, 2, 2, 0),
70 PLL_36XX_RATE(300000000, 100, 2, 2, 0), 70 PLL_36XX_RATE(300000000, 100, 2, 2, 0),
71 PLL_36XX_RATE(266000000, 266, 3, 3, 0), 71 PLL_36XX_RATE(266000000, 266, 3, 3, 0),
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index cee062c588de..91c89ac193b9 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -747,7 +747,7 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
747 PLL_35XX_RATE(800000000U, 400, 6, 1), 747 PLL_35XX_RATE(800000000U, 400, 6, 1),
748 PLL_35XX_RATE(733000000U, 733, 12, 1), 748 PLL_35XX_RATE(733000000U, 733, 12, 1),
749 PLL_35XX_RATE(700000000U, 175, 3, 1), 749 PLL_35XX_RATE(700000000U, 175, 3, 1),
750 PLL_35XX_RATE(667000000U, 222, 4, 1), 750 PLL_35XX_RATE(666000000U, 222, 4, 1),
751 PLL_35XX_RATE(633000000U, 211, 4, 1), 751 PLL_35XX_RATE(633000000U, 211, 4, 1),
752 PLL_35XX_RATE(600000000U, 500, 5, 2), 752 PLL_35XX_RATE(600000000U, 500, 5, 2),
753 PLL_35XX_RATE(552000000U, 460, 5, 2), 753 PLL_35XX_RATE(552000000U, 460, 5, 2),
@@ -773,12 +773,12 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
773/* AUD_PLL */ 773/* AUD_PLL */
774static struct samsung_pll_rate_table exynos5443_aud_pll_rates[] = { 774static struct samsung_pll_rate_table exynos5443_aud_pll_rates[] = {
775 PLL_36XX_RATE(400000000U, 200, 3, 2, 0), 775 PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
776 PLL_36XX_RATE(393216000U, 197, 3, 2, -25690), 776 PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
777 PLL_36XX_RATE(384000000U, 128, 2, 2, 0), 777 PLL_36XX_RATE(384000000U, 128, 2, 2, 0),
778 PLL_36XX_RATE(368640000U, 246, 4, 2, -15729), 778 PLL_36XX_RATE(368639991U, 246, 4, 2, -15729),
779 PLL_36XX_RATE(361507200U, 181, 3, 2, -16148), 779 PLL_36XX_RATE(361507202U, 181, 3, 2, -16148),
780 PLL_36XX_RATE(338688000U, 113, 2, 2, -6816), 780 PLL_36XX_RATE(338687988U, 113, 2, 2, -6816),
781 PLL_36XX_RATE(294912000U, 98, 1, 3, 19923), 781 PLL_36XX_RATE(294912002U, 98, 1, 3, 19923),
782 PLL_36XX_RATE(288000000U, 96, 1, 3, 0), 782 PLL_36XX_RATE(288000000U, 96, 1, 3, 0),
783 PLL_36XX_RATE(252000000U, 84, 1, 3, 0), 783 PLL_36XX_RATE(252000000U, 84, 1, 3, 0),
784 { /* sentinel */ } 784 { /* sentinel */ }
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index 0945a8852299..69e3e848716a 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -168,7 +168,7 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
168 PLL_35XX_RATE(226000000, 105, 1, 1), 168 PLL_35XX_RATE(226000000, 105, 1, 1),
169 PLL_35XX_RATE(210000000, 132, 2, 1), 169 PLL_35XX_RATE(210000000, 132, 2, 1),
170 /* 2410 common */ 170 /* 2410 common */
171 PLL_35XX_RATE(203000000, 161, 3, 1), 171 PLL_35XX_RATE(202800000, 161, 3, 1),
172 PLL_35XX_RATE(192000000, 88, 1, 1), 172 PLL_35XX_RATE(192000000, 88, 1, 1),
173 PLL_35XX_RATE(186000000, 85, 1, 1), 173 PLL_35XX_RATE(186000000, 85, 1, 1),
174 PLL_35XX_RATE(180000000, 82, 1, 1), 174 PLL_35XX_RATE(180000000, 82, 1, 1),
@@ -178,18 +178,18 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
178 PLL_35XX_RATE(147000000, 90, 2, 1), 178 PLL_35XX_RATE(147000000, 90, 2, 1),
179 PLL_35XX_RATE(135000000, 82, 2, 1), 179 PLL_35XX_RATE(135000000, 82, 2, 1),
180 PLL_35XX_RATE(124000000, 116, 1, 2), 180 PLL_35XX_RATE(124000000, 116, 1, 2),
181 PLL_35XX_RATE(118000000, 150, 2, 2), 181 PLL_35XX_RATE(118500000, 150, 2, 2),
182 PLL_35XX_RATE(113000000, 105, 1, 2), 182 PLL_35XX_RATE(113000000, 105, 1, 2),
183 PLL_35XX_RATE(101000000, 127, 2, 2), 183 PLL_35XX_RATE(101250000, 127, 2, 2),
184 PLL_35XX_RATE(90000000, 112, 2, 2), 184 PLL_35XX_RATE(90000000, 112, 2, 2),
185 PLL_35XX_RATE(85000000, 105, 2, 2), 185 PLL_35XX_RATE(84750000, 105, 2, 2),
186 PLL_35XX_RATE(79000000, 71, 1, 2), 186 PLL_35XX_RATE(79000000, 71, 1, 2),
187 PLL_35XX_RATE(68000000, 82, 2, 2), 187 PLL_35XX_RATE(67500000, 82, 2, 2),
188 PLL_35XX_RATE(56000000, 142, 2, 3), 188 PLL_35XX_RATE(56250000, 142, 2, 3),
189 PLL_35XX_RATE(48000000, 120, 2, 3), 189 PLL_35XX_RATE(48000000, 120, 2, 3),
190 PLL_35XX_RATE(51000000, 161, 3, 3), 190 PLL_35XX_RATE(50700000, 161, 3, 3),
191 PLL_35XX_RATE(45000000, 82, 1, 3), 191 PLL_35XX_RATE(45000000, 82, 1, 3),
192 PLL_35XX_RATE(34000000, 82, 2, 3), 192 PLL_35XX_RATE(33750000, 82, 2, 3),
193 { /* sentinel */ }, 193 { /* sentinel */ },
194}; 194};
195 195
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 8c41c6fcb9ee..acf83569f86f 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -333,11 +333,11 @@ static struct pdiv_map pllu_p[] = {
333}; 333};
334 334
335static struct tegra_clk_pll_freq_table pll_u_freq_table[] = { 335static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
336 { 12000000, 480000000, 960, 12, 0, 12}, 336 { 12000000, 480000000, 960, 12, 2, 12 },
337 { 13000000, 480000000, 960, 13, 0, 12}, 337 { 13000000, 480000000, 960, 13, 2, 12 },
338 { 16800000, 480000000, 400, 7, 0, 5}, 338 { 16800000, 480000000, 400, 7, 2, 5 },
339 { 19200000, 480000000, 200, 4, 0, 3}, 339 { 19200000, 480000000, 200, 4, 2, 3 },
340 { 26000000, 480000000, 960, 26, 0, 12}, 340 { 26000000, 480000000, 960, 26, 2, 12 },
341 { 0, 0, 0, 0, 0, 0 }, 341 { 0, 0, 0, 0, 0, 0 },
342}; 342};
343 343
@@ -1372,6 +1372,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
1372 {TEGRA30_CLK_GR2D, TEGRA30_CLK_PLL_C, 300000000, 0}, 1372 {TEGRA30_CLK_GR2D, TEGRA30_CLK_PLL_C, 300000000, 0},
1373 {TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0}, 1373 {TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0},
1374 {TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0}, 1374 {TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0},
1375 { TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
1375 {TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0}, /* This MUST be the last entry. */ 1376 {TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0}, /* This MUST be the last entry. */
1376}; 1377};
1377 1378
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 517e1c7624d4..a00209702f39 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name,
281 281
282static unsigned long __init ftm_clk_init(struct device_node *np) 282static unsigned long __init ftm_clk_init(struct device_node *np)
283{ 283{
284 unsigned long freq; 284 long freq;
285 285
286 freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); 286 freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
287 if (freq <= 0) 287 if (freq <= 0)
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 659879a56dba..949610360b14 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -236,6 +236,7 @@ endif
236if MIPS 236if MIPS
237config LOONGSON2_CPUFREQ 237config LOONGSON2_CPUFREQ
238 tristate "Loongson2 CPUFreq Driver" 238 tristate "Loongson2 CPUFreq Driver"
239 depends on LEMOTE_MACH2F
239 help 240 help
240 This option adds a CPUFreq driver for loongson processors which 241 This option adds a CPUFreq driver for loongson processors which
241 support software configurable cpu frequency. 242 support software configurable cpu frequency.
@@ -248,6 +249,7 @@ config LOONGSON2_CPUFREQ
248 249
249config LOONGSON1_CPUFREQ 250config LOONGSON1_CPUFREQ
250 tristate "Loongson1 CPUFreq Driver" 251 tristate "Loongson1 CPUFreq Driver"
252 depends on LOONGSON1_LS1B
251 help 253 help
252 This option adds a CPUFreq driver for loongson1 processors which 254 This option adds a CPUFreq driver for loongson1 processors which
253 support software configurable cpu frequency. 255 support software configurable cpu frequency.
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 7c0bdfb1a2ca..0dcbf951ad1b 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -100,9 +100,19 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
100 policy->cpuinfo.max_freq = policy->max; 100 policy->cpuinfo.max_freq = policy->max;
101 policy->shared_type = cpu->shared_type; 101 policy->shared_type = cpu->shared_type;
102 102
103 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 103 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
104 int i;
105
104 cpumask_copy(policy->cpus, cpu->shared_cpu_map); 106 cpumask_copy(policy->cpus, cpu->shared_cpu_map);
105 else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { 107
108 for_each_cpu(i, policy->cpus) {
109 if (unlikely(i == policy->cpu))
110 continue;
111
112 memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
113 sizeof(cpu->perf_caps));
114 }
115 } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
106 /* Support only SW_ANY for now. */ 116 /* Support only SW_ANY for now. */
107 pr_debug("Unsupported CPU co-ord type\n"); 117 pr_debug("Unsupported CPU co-ord type\n");
108 return -EFAULT; 118 return -EFAULT;
@@ -166,8 +176,13 @@ static int __init cppc_cpufreq_init(void)
166 return ret; 176 return ret;
167 177
168out: 178out:
169 for_each_possible_cpu(i) 179 for_each_possible_cpu(i) {
170 kfree(all_cpu_data[i]); 180 cpu = all_cpu_data[i];
181 if (!cpu)
182 break;
183 free_cpumask_var(cpu->shared_cpu_map);
184 kfree(cpu);
185 }
171 186
172 kfree(all_cpu_data); 187 kfree(all_cpu_data);
173 return -ENODEV; 188 return -ENODEV;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ebed319657e7..68b604ad8413 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -603,6 +603,8 @@ static ssize_t store_##file_name \
603 struct cpufreq_policy new_policy; \ 603 struct cpufreq_policy new_policy; \
604 \ 604 \
605 memcpy(&new_policy, policy, sizeof(*policy)); \ 605 memcpy(&new_policy, policy, sizeof(*policy)); \
606 new_policy.min = policy->user_policy.min; \
607 new_policy.max = policy->user_policy.max; \
606 \ 608 \
607 ret = sscanf(buf, "%u", &new_policy.object); \ 609 ret = sscanf(buf, "%u", &new_policy.object); \
608 if (ret != 1) \ 610 if (ret != 1) \
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7ff8b15a3422..88728d997088 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1361,6 +1361,11 @@ static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
1361static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1361static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1362#endif /* CONFIG_ACPI */ 1362#endif /* CONFIG_ACPI */
1363 1363
1364static const struct x86_cpu_id hwp_support_ids[] __initconst = {
1365 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
1366 {}
1367};
1368
1364static int __init intel_pstate_init(void) 1369static int __init intel_pstate_init(void)
1365{ 1370{
1366 int cpu, rc = 0; 1371 int cpu, rc = 0;
@@ -1370,17 +1375,16 @@ static int __init intel_pstate_init(void)
1370 if (no_load) 1375 if (no_load)
1371 return -ENODEV; 1376 return -ENODEV;
1372 1377
1378 if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
1379 copy_cpu_funcs(&core_params.funcs);
1380 hwp_active++;
1381 goto hwp_cpu_matched;
1382 }
1383
1373 id = x86_match_cpu(intel_pstate_cpu_ids); 1384 id = x86_match_cpu(intel_pstate_cpu_ids);
1374 if (!id) 1385 if (!id)
1375 return -ENODEV; 1386 return -ENODEV;
1376 1387
1377 /*
1378 * The Intel pstate driver will be ignored if the platform
1379 * firmware has its own power management modes.
1380 */
1381 if (intel_pstate_platform_pwr_mgmt_exists())
1382 return -ENODEV;
1383
1384 cpu_def = (struct cpu_defaults *)id->driver_data; 1388 cpu_def = (struct cpu_defaults *)id->driver_data;
1385 1389
1386 copy_pid_params(&cpu_def->pid_policy); 1390 copy_pid_params(&cpu_def->pid_policy);
@@ -1389,17 +1393,20 @@ static int __init intel_pstate_init(void)
1389 if (intel_pstate_msrs_not_valid()) 1393 if (intel_pstate_msrs_not_valid())
1390 return -ENODEV; 1394 return -ENODEV;
1391 1395
1396hwp_cpu_matched:
1397 /*
1398 * The Intel pstate driver will be ignored if the platform
1399 * firmware has its own power management modes.
1400 */
1401 if (intel_pstate_platform_pwr_mgmt_exists())
1402 return -ENODEV;
1403
1392 pr_info("Intel P-state driver initializing.\n"); 1404 pr_info("Intel P-state driver initializing.\n");
1393 1405
1394 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1406 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
1395 if (!all_cpu_data) 1407 if (!all_cpu_data)
1396 return -ENOMEM; 1408 return -ENOMEM;
1397 1409
1398 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
1399 pr_info("intel_pstate: HWP enabled\n");
1400 hwp_active++;
1401 }
1402
1403 if (!hwp_active && hwp_only) 1410 if (!hwp_active && hwp_only)
1404 goto out; 1411 goto out;
1405 1412
@@ -1410,6 +1417,9 @@ static int __init intel_pstate_init(void)
1410 intel_pstate_debug_expose_params(); 1417 intel_pstate_debug_expose_params();
1411 intel_pstate_sysfs_expose_params(); 1418 intel_pstate_sysfs_expose_params();
1412 1419
1420 if (hwp_active)
1421 pr_info("intel_pstate: HWP enabled\n");
1422
1413 return rc; 1423 return rc;
1414out: 1424out:
1415 get_online_cpus(); 1425 get_online_cpus();
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index c4b0ef65988c..57e6c45724e7 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -592,7 +592,7 @@ static int __init powernv_cpufreq_init(void)
592 int rc = 0; 592 int rc = 0;
593 593
594 /* Don't probe on pseries (guest) platforms */ 594 /* Don't probe on pseries (guest) platforms */
595 if (!firmware_has_feature(FW_FEATURE_OPALv3)) 595 if (!firmware_has_feature(FW_FEATURE_OPAL))
596 return -ENODEV; 596 return -ENODEV;
597 597
598 /* Discover pstates from device tree and init */ 598 /* Discover pstates from device tree and init */
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 68ef8fd9482f..f5c4e009113c 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -364,7 +364,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
364static int s3c_cpufreq_init(struct cpufreq_policy *policy) 364static int s3c_cpufreq_init(struct cpufreq_policy *policy)
365{ 365{
366 policy->clk = clk_arm; 366 policy->clk = clk_arm;
367 return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); 367
368 policy->cpuinfo.transition_latency = cpu_cur.info->latency;
369
370 if (ftab)
371 return cpufreq_table_validate_and_show(policy, ftab);
372
373 return 0;
368} 374}
369 375
370static int __init s3c_cpufreq_initclks(void) 376static int __init s3c_cpufreq_initclks(void)
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 86628e22b2a3..719c3d9f07fb 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -30,54 +30,63 @@
30 30
31static DEFINE_PER_CPU(struct clk, sh_cpuclk); 31static DEFINE_PER_CPU(struct clk, sh_cpuclk);
32 32
33struct cpufreq_target {
34 struct cpufreq_policy *policy;
35 unsigned int freq;
36};
37
33static unsigned int sh_cpufreq_get(unsigned int cpu) 38static unsigned int sh_cpufreq_get(unsigned int cpu)
34{ 39{
35 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; 40 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
36} 41}
37 42
38/* 43static long __sh_cpufreq_target(void *arg)
39 * Here we notify other drivers of the proposed change and the final change.
40 */
41static int sh_cpufreq_target(struct cpufreq_policy *policy,
42 unsigned int target_freq,
43 unsigned int relation)
44{ 44{
45 unsigned int cpu = policy->cpu; 45 struct cpufreq_target *target = arg;
46 struct cpufreq_policy *policy = target->policy;
47 int cpu = policy->cpu;
46 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); 48 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
47 cpumask_t cpus_allowed;
48 struct cpufreq_freqs freqs; 49 struct cpufreq_freqs freqs;
49 struct device *dev; 50 struct device *dev;
50 long freq; 51 long freq;
51 52
52 cpus_allowed = current->cpus_allowed; 53 if (smp_processor_id() != cpu)
53 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 54 return -ENODEV;
54
55 BUG_ON(smp_processor_id() != cpu);
56 55
57 dev = get_cpu_device(cpu); 56 dev = get_cpu_device(cpu);
58 57
59 /* Convert target_freq from kHz to Hz */ 58 /* Convert target_freq from kHz to Hz */
60 freq = clk_round_rate(cpuclk, target_freq * 1000); 59 freq = clk_round_rate(cpuclk, target->freq * 1000);
61 60
62 if (freq < (policy->min * 1000) || freq > (policy->max * 1000)) 61 if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
63 return -EINVAL; 62 return -EINVAL;
64 63
65 dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000); 64 dev_dbg(dev, "requested frequency %u Hz\n", target->freq * 1000);
66 65
67 freqs.old = sh_cpufreq_get(cpu); 66 freqs.old = sh_cpufreq_get(cpu);
68 freqs.new = (freq + 500) / 1000; 67 freqs.new = (freq + 500) / 1000;
69 freqs.flags = 0; 68 freqs.flags = 0;
70 69
71 cpufreq_freq_transition_begin(policy, &freqs); 70 cpufreq_freq_transition_begin(target->policy, &freqs);
72 set_cpus_allowed_ptr(current, &cpus_allowed);
73 clk_set_rate(cpuclk, freq); 71 clk_set_rate(cpuclk, freq);
74 cpufreq_freq_transition_end(policy, &freqs, 0); 72 cpufreq_freq_transition_end(target->policy, &freqs, 0);
75 73
76 dev_dbg(dev, "set frequency %lu Hz\n", freq); 74 dev_dbg(dev, "set frequency %lu Hz\n", freq);
77
78 return 0; 75 return 0;
79} 76}
80 77
78/*
79 * Here we notify other drivers of the proposed change and the final change.
80 */
81static int sh_cpufreq_target(struct cpufreq_policy *policy,
82 unsigned int target_freq,
83 unsigned int relation)
84{
85 struct cpufreq_target data = { .policy = policy, .freq = target_freq };
86
87 return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
88}
89
81static int sh_cpufreq_verify(struct cpufreq_policy *policy) 90static int sh_cpufreq_verify(struct cpufreq_policy *policy)
82{ 91{
83 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); 92 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 344058f8501a..d5657d50ac40 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -119,7 +119,6 @@ struct cpuidle_coupled {
119 119
120#define CPUIDLE_COUPLED_NOT_IDLE (-1) 120#define CPUIDLE_COUPLED_NOT_IDLE (-1)
121 121
122static DEFINE_MUTEX(cpuidle_coupled_lock);
123static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); 122static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
124 123
125/* 124/*
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index d5c5a476360f..44ebda8bbc84 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -29,9 +29,31 @@ struct cpuidle_driver powernv_idle_driver = {
29 29
30static int max_idle_state; 30static int max_idle_state;
31static struct cpuidle_state *cpuidle_state_table; 31static struct cpuidle_state *cpuidle_state_table;
32static u64 snooze_timeout; 32static u64 default_snooze_timeout;
33static bool snooze_timeout_en; 33static bool snooze_timeout_en;
34 34
35static u64 get_snooze_timeout(struct cpuidle_device *dev,
36 struct cpuidle_driver *drv,
37 int index)
38{
39 int i;
40
41 if (unlikely(!snooze_timeout_en))
42 return default_snooze_timeout;
43
44 for (i = index + 1; i < drv->state_count; i++) {
45 struct cpuidle_state *s = &drv->states[i];
46 struct cpuidle_state_usage *su = &dev->states_usage[i];
47
48 if (s->disabled || su->disable)
49 continue;
50
51 return s->target_residency * tb_ticks_per_usec;
52 }
53
54 return default_snooze_timeout;
55}
56
35static int snooze_loop(struct cpuidle_device *dev, 57static int snooze_loop(struct cpuidle_device *dev,
36 struct cpuidle_driver *drv, 58 struct cpuidle_driver *drv,
37 int index) 59 int index)
@@ -41,7 +63,7 @@ static int snooze_loop(struct cpuidle_device *dev,
41 local_irq_enable(); 63 local_irq_enable();
42 set_thread_flag(TIF_POLLING_NRFLAG); 64 set_thread_flag(TIF_POLLING_NRFLAG);
43 65
44 snooze_exit_time = get_tb() + snooze_timeout; 66 snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
45 ppc64_runlatch_off(); 67 ppc64_runlatch_off();
46 while (!need_resched()) { 68 while (!need_resched()) {
47 HMT_low(); 69 HMT_low();
@@ -282,15 +304,13 @@ static int powernv_idle_probe(void)
282 if (cpuidle_disable != IDLE_NO_OVERRIDE) 304 if (cpuidle_disable != IDLE_NO_OVERRIDE)
283 return -ENODEV; 305 return -ENODEV;
284 306
285 if (firmware_has_feature(FW_FEATURE_OPALv3)) { 307 if (firmware_has_feature(FW_FEATURE_OPAL)) {
286 cpuidle_state_table = powernv_states; 308 cpuidle_state_table = powernv_states;
287 /* Device tree can indicate more idle states */ 309 /* Device tree can indicate more idle states */
288 max_idle_state = powernv_add_idle_states(); 310 max_idle_state = powernv_add_idle_states();
289 if (max_idle_state > 1) { 311 default_snooze_timeout = TICK_USEC * tb_ticks_per_usec;
312 if (max_idle_state > 1)
290 snooze_timeout_en = true; 313 snooze_timeout_en = true;
291 snooze_timeout = powernv_states[1].target_residency *
292 tb_ticks_per_usec;
293 }
294 } else 314 } else
295 return -ENODEV; 315 return -ENODEV;
296 316
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index a5c111b67f37..ea11a33e7fff 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -174,8 +174,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
174 if (!state_node) 174 if (!state_node)
175 break; 175 break;
176 176
177 if (!of_device_is_available(state_node)) 177 if (!of_device_is_available(state_node)) {
178 of_node_put(state_node);
178 continue; 179 continue;
180 }
179 181
180 if (!idle_state_valid(state_node, i, cpumask)) { 182 if (!idle_state_valid(state_node, i, cpumask)) {
181 pr_warn("%s idle state not valid, bailing out\n", 183 pr_warn("%s idle state not valid, bailing out\n",
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 58a630e55d5d..78d0722feacb 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -207,7 +207,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
207 dev->pdr_pa); 207 dev->pdr_pa);
208 return -ENOMEM; 208 return -ENOMEM;
209 } 209 }
210 memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); 210 memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
211 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, 211 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
212 256 * PPC4XX_NUM_PD, 212 256 * PPC4XX_NUM_PD,
213 &dev->shadow_sa_pool_pa, 213 &dev->shadow_sa_pool_pa,
@@ -240,13 +240,15 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
240 240
241static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev) 241static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
242{ 242{
243 if (dev->pdr != NULL) 243 if (dev->pdr)
244 dma_free_coherent(dev->core_dev->device, 244 dma_free_coherent(dev->core_dev->device,
245 sizeof(struct ce_pd) * PPC4XX_NUM_PD, 245 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
246 dev->pdr, dev->pdr_pa); 246 dev->pdr, dev->pdr_pa);
247
247 if (dev->shadow_sa_pool) 248 if (dev->shadow_sa_pool)
248 dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD, 249 dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
249 dev->shadow_sa_pool, dev->shadow_sa_pool_pa); 250 dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
251
250 if (dev->shadow_sr_pool) 252 if (dev->shadow_sr_pool)
251 dma_free_coherent(dev->core_dev->device, 253 dma_free_coherent(dev->core_dev->device,
252 sizeof(struct sa_state_record) * PPC4XX_NUM_PD, 254 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
@@ -416,12 +418,12 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
416 418
417static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev) 419static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
418{ 420{
419 if (dev->sdr != NULL) 421 if (dev->sdr)
420 dma_free_coherent(dev->core_dev->device, 422 dma_free_coherent(dev->core_dev->device,
421 sizeof(struct ce_sd) * PPC4XX_NUM_SD, 423 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
422 dev->sdr, dev->sdr_pa); 424 dev->sdr, dev->sdr_pa);
423 425
424 if (dev->scatter_buffer_va != NULL) 426 if (dev->scatter_buffer_va)
425 dma_free_coherent(dev->core_dev->device, 427 dma_free_coherent(dev->core_dev->device,
426 dev->scatter_buffer_size * PPC4XX_NUM_SD, 428 dev->scatter_buffer_size * PPC4XX_NUM_SD,
427 dev->scatter_buffer_va, 429 dev->scatter_buffer_va,
@@ -1029,12 +1031,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1029 break; 1031 break;
1030 } 1032 }
1031 1033
1032 if (rc) { 1034 if (rc)
1033 list_del(&alg->entry);
1034 kfree(alg); 1035 kfree(alg);
1035 } else { 1036 else
1036 list_add_tail(&alg->entry, &sec_dev->alg_list); 1037 list_add_tail(&alg->entry, &sec_dev->alg_list);
1037 }
1038 } 1038 }
1039 1039
1040 return 0; 1040 return 0;
@@ -1188,7 +1188,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
1188 1188
1189 rc = crypto4xx_build_gdr(core_dev->dev); 1189 rc = crypto4xx_build_gdr(core_dev->dev);
1190 if (rc) 1190 if (rc)
1191 goto err_build_gdr; 1191 goto err_build_pdr;
1192 1192
1193 rc = crypto4xx_build_sdr(core_dev->dev); 1193 rc = crypto4xx_build_sdr(core_dev->dev);
1194 if (rc) 1194 if (rc)
@@ -1230,12 +1230,11 @@ err_iomap:
1230err_request_irq: 1230err_request_irq:
1231 irq_dispose_mapping(core_dev->irq); 1231 irq_dispose_mapping(core_dev->irq);
1232 tasklet_kill(&core_dev->tasklet); 1232 tasklet_kill(&core_dev->tasklet);
1233 crypto4xx_destroy_sdr(core_dev->dev);
1234err_build_sdr: 1233err_build_sdr:
1234 crypto4xx_destroy_sdr(core_dev->dev);
1235 crypto4xx_destroy_gdr(core_dev->dev); 1235 crypto4xx_destroy_gdr(core_dev->dev);
1236err_build_gdr:
1237 crypto4xx_destroy_pdr(core_dev->dev);
1238err_build_pdr: 1236err_build_pdr:
1237 crypto4xx_destroy_pdr(core_dev->dev);
1239 kfree(core_dev->dev); 1238 kfree(core_dev->dev);
1240err_alloc_dev: 1239err_alloc_dev:
1241 kfree(core_dev); 1240 kfree(core_dev);
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 53e61459c69f..ee87eb77095c 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -224,12 +224,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
224 * without any error (HW optimizations for later 224 * without any error (HW optimizations for later
225 * CAAM eras), then try again. 225 * CAAM eras), then try again.
226 */ 226 */
227 if (ret)
228 break;
229
227 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; 230 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
228 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || 231 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
229 !(rdsta_val & (1 << sh_idx))) 232 !(rdsta_val & (1 << sh_idx))) {
230 ret = -EAGAIN; 233 ret = -EAGAIN;
231 if (ret)
232 break; 234 break;
235 }
236
233 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); 237 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
234 /* Clear the contents before recreating the descriptor */ 238 /* Clear the contents before recreating the descriptor */
235 memset(desc, 0x00, CAAM_CMD_SZ * 7); 239 memset(desc, 0x00, CAAM_CMD_SZ * 7);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index da2d6777bd09..047ef69b7e65 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
266 return; 266 return;
267 } 267 }
268 268
269 count -= initial;
270
269 if (initial) 271 if (initial)
270 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ 272 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
271 : "+S"(input), "+D"(output) 273 : "+S"(input), "+D"(output)
@@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
273 275
274 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ 276 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
275 : "+S"(input), "+D"(output) 277 : "+S"(input), "+D"(output)
276 : "d"(control_word), "b"(key), "c"(count - initial)); 278 : "d"(control_word), "b"(key), "c"(count));
277} 279}
278 280
279static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, 281static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
@@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
284 if (count < cbc_fetch_blocks) 286 if (count < cbc_fetch_blocks)
285 return cbc_crypt(input, output, key, iv, control_word, count); 287 return cbc_crypt(input, output, key, iv, control_word, count);
286 288
289 count -= initial;
290
287 if (initial) 291 if (initial)
288 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ 292 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
289 : "+S" (input), "+D" (output), "+a" (iv) 293 : "+S" (input), "+D" (output), "+a" (iv)
@@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
291 295
292 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ 296 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
293 : "+S" (input), "+D" (output), "+a" (iv) 297 : "+S" (input), "+D" (output), "+a" (iv)
294 : "d" (control_word), "b" (key), "c" (count-initial)); 298 : "d" (control_word), "b" (key), "c" (count));
295 return iv; 299 return iv;
296} 300}
297 301
@@ -515,7 +519,7 @@ static int __init padlock_init(void)
515 if (!x86_match_cpu(padlock_cpu_id)) 519 if (!x86_match_cpu(padlock_cpu_id))
516 return -ENODEV; 520 return -ENODEV;
517 521
518 if (!cpu_has_xcrypt_enabled) { 522 if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
519 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 523 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
520 return -ENODEV; 524 return -ENODEV;
521 } 525 }
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 4e154c9b9206..8c5f90647b7a 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -540,7 +540,7 @@ static int __init padlock_init(void)
540 struct shash_alg *sha1; 540 struct shash_alg *sha1;
541 struct shash_alg *sha256; 541 struct shash_alg *sha256;
542 542
543 if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled) 543 if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN))
544 return -ENODEV; 544 return -ENODEV;
545 545
546 /* Register the newly added algorithm module if on * 546 /* Register the newly added algorithm module if on *
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index fd39893079d5..45ea8957a73a 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -401,16 +401,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
401 uint32_t aes_control; 401 uint32_t aes_control;
402 int err; 402 int err;
403 unsigned long flags; 403 unsigned long flags;
404 u8 *iv;
404 405
405 aes_control = SSS_AES_KEY_CHANGE_MODE; 406 aes_control = SSS_AES_KEY_CHANGE_MODE;
406 if (mode & FLAGS_AES_DECRYPT) 407 if (mode & FLAGS_AES_DECRYPT)
407 aes_control |= SSS_AES_MODE_DECRYPT; 408 aes_control |= SSS_AES_MODE_DECRYPT;
408 409
409 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) 410 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
410 aes_control |= SSS_AES_CHAIN_MODE_CBC; 411 aes_control |= SSS_AES_CHAIN_MODE_CBC;
411 else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) 412 iv = req->info;
413 } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
412 aes_control |= SSS_AES_CHAIN_MODE_CTR; 414 aes_control |= SSS_AES_CHAIN_MODE_CTR;
413 415 iv = req->info;
416 } else {
417 iv = NULL; /* AES_ECB */
418 }
414 if (dev->ctx->keylen == AES_KEYSIZE_192) 419 if (dev->ctx->keylen == AES_KEYSIZE_192)
415 aes_control |= SSS_AES_KEY_SIZE_192; 420 aes_control |= SSS_AES_KEY_SIZE_192;
416 else if (dev->ctx->keylen == AES_KEYSIZE_256) 421 else if (dev->ctx->keylen == AES_KEYSIZE_256)
@@ -440,7 +445,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
440 goto outdata_error; 445 goto outdata_error;
441 446
442 SSS_AES_WRITE(dev, AES_CONTROL, aes_control); 447 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
443 s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); 448 s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
444 449
445 s5p_set_dma_indata(dev, req->src); 450 s5p_set_dma_indata(dev, req->src);
446 s5p_set_dma_outdata(dev, req->dst); 451 s5p_set_dma_outdata(dev, req->dst);
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 107cd2a41cae..24651d3217cd 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -422,6 +422,7 @@ static struct platform_driver sun4i_ss_driver = {
422 422
423module_platform_driver(sun4i_ss_driver); 423module_platform_driver(sun4i_ss_driver);
424 424
425MODULE_ALIAS("platform:sun4i-ss");
425MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator"); 426MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
426MODULE_LICENSE("GPL"); 427MODULE_LICENSE("GPL");
427MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>"); 428MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index 263af709e536..b907e4b1bbe2 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -53,8 +53,6 @@ static int p8_aes_init(struct crypto_tfm *tfm)
53 alg, PTR_ERR(fallback)); 53 alg, PTR_ERR(fallback));
54 return PTR_ERR(fallback); 54 return PTR_ERR(fallback);
55 } 55 }
56 printk(KERN_INFO "Using '%s' as fallback implementation.\n",
57 crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
58 56
59 crypto_cipher_set_flags(fallback, 57 crypto_cipher_set_flags(fallback,
60 crypto_cipher_get_flags((struct 58 crypto_cipher_get_flags((struct
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 3f8bb9a40df1..9506e8693c81 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -55,8 +55,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
55 alg, PTR_ERR(fallback)); 55 alg, PTR_ERR(fallback));
56 return PTR_ERR(fallback); 56 return PTR_ERR(fallback);
57 } 57 }
58 printk(KERN_INFO "Using '%s' as fallback implementation.\n",
59 crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
60 58
61 crypto_blkcipher_set_flags( 59 crypto_blkcipher_set_flags(
62 fallback, 60 fallback,
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index d83ab4bac8b1..7d070201b3d3 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -53,8 +53,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
53 alg, PTR_ERR(fallback)); 53 alg, PTR_ERR(fallback));
54 return PTR_ERR(fallback); 54 return PTR_ERR(fallback);
55 } 55 }
56 printk(KERN_INFO "Using '%s' as fallback implementation.\n",
57 crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
58 56
59 crypto_blkcipher_set_flags( 57 crypto_blkcipher_set_flags(
60 fallback, 58 fallback,
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 9cb3a0b715e2..84b9389bf1ed 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
64 alg, PTR_ERR(fallback)); 64 alg, PTR_ERR(fallback));
65 return PTR_ERR(fallback); 65 return PTR_ERR(fallback);
66 } 66 }
67 printk(KERN_INFO "Using '%s' as fallback implementation.\n",
68 crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
69 67
70 crypto_shash_set_flags(fallback, 68 crypto_shash_set_flags(fallback,
71 crypto_shash_get_flags((struct crypto_shash 69 crypto_shash_get_flags((struct crypto_shash
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index ca848cc6a8fd..4f6fc1cfd7da 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -583,7 +583,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
583 devfreq = devfreq_add_device(dev, profile, governor_name, data); 583 devfreq = devfreq_add_device(dev, profile, governor_name, data);
584 if (IS_ERR(devfreq)) { 584 if (IS_ERR(devfreq)) {
585 devres_free(ptr); 585 devres_free(ptr);
586 return ERR_PTR(-ENOMEM); 586 return devfreq;
587 } 587 }
588 588
589 *ptr = devfreq; 589 *ptr = devfreq;
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 53d22eb73b56..be26f625bb3e 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -716,7 +716,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
716 unsigned long flags) 716 unsigned long flags)
717{ 717{
718 struct at_dma_chan *atchan = to_at_dma_chan(chan); 718 struct at_dma_chan *atchan = to_at_dma_chan(chan);
719 struct data_chunk *first = xt->sgl; 719 struct data_chunk *first;
720 struct at_desc *desc = NULL; 720 struct at_desc *desc = NULL;
721 size_t xfer_count; 721 size_t xfer_count;
722 unsigned int dwidth; 722 unsigned int dwidth;
@@ -728,6 +728,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
728 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size)) 728 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
729 return NULL; 729 return NULL;
730 730
731 first = xt->sgl;
732
731 dev_info(chan2dev(chan), 733 dev_info(chan2dev(chan),
732 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", 734 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
733 __func__, &xt->src_start, &xt->dst_start, xt->numf, 735 __func__, &xt->src_start, &xt->dst_start, xt->numf,
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 66c073fc8afc..82a7c89caae2 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1473,10 +1473,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1473 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { 1473 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1474 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; 1474 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1475 rmb(); 1475 rmb();
1476 initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1477 rmb();
1478 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); 1476 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1479 rmb(); 1477 rmb();
1478 initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1479 rmb();
1480 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; 1480 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1481 rmb(); 1481 rmb();
1482 1482
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 7638b24ce8d0..35fc58f4bf4b 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -557,7 +557,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
557 557
558 ret = dma_async_device_register(dd); 558 ret = dma_async_device_register(dd);
559 if (ret) 559 if (ret)
560 return ret; 560 goto err_clk;
561 561
562 irq = platform_get_irq(pdev, 0); 562 irq = platform_get_irq(pdev, 0);
563 ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev); 563 ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
@@ -570,6 +570,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
570 570
571err_unregister: 571err_unregister:
572 dma_async_device_unregister(dd); 572 dma_async_device_unregister(dd);
573err_clk:
574 clk_disable_unprepare(dmadev->clk);
573 return ret; 575 return ret;
574} 576}
575 577
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 7254c20007f8..6796eb1a8a4c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -329,7 +329,7 @@ static void dmatest_callback(void *arg)
329{ 329{
330 struct dmatest_done *done = arg; 330 struct dmatest_done *done = arg;
331 struct dmatest_thread *thread = 331 struct dmatest_thread *thread =
332 container_of(arg, struct dmatest_thread, done_wait); 332 container_of(done, struct dmatest_thread, test_done);
333 if (!thread->done) { 333 if (!thread->done) {
334 done->done = true; 334 done->done = true;
335 wake_up_all(done->wait); 335 wake_up_all(done->wait);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 0f6fd42f55ca..48d4dddf4941 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -911,6 +911,21 @@ static int sdma_disable_channel(struct dma_chan *chan)
911 return 0; 911 return 0;
912} 912}
913 913
914static int sdma_disable_channel_with_delay(struct dma_chan *chan)
915{
916 sdma_disable_channel(chan);
917
918 /*
919 * According to NXP R&D team a delay of one BD SDMA cost time
920 * (maximum is 1ms) should be added after disable of the channel
921 * bit, to ensure SDMA core has really been stopped after SDMA
922 * clients call .device_terminate_all.
923 */
924 mdelay(1);
925
926 return 0;
927}
928
914static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) 929static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
915{ 930{
916 struct sdma_engine *sdma = sdmac->sdma; 931 struct sdma_engine *sdma = sdmac->sdma;
@@ -1707,17 +1722,24 @@ static int sdma_probe(struct platform_device *pdev)
1707 if (IS_ERR(sdma->clk_ahb)) 1722 if (IS_ERR(sdma->clk_ahb))
1708 return PTR_ERR(sdma->clk_ahb); 1723 return PTR_ERR(sdma->clk_ahb);
1709 1724
1710 clk_prepare(sdma->clk_ipg); 1725 ret = clk_prepare(sdma->clk_ipg);
1711 clk_prepare(sdma->clk_ahb); 1726 if (ret)
1727 return ret;
1728
1729 ret = clk_prepare(sdma->clk_ahb);
1730 if (ret)
1731 goto err_clk;
1712 1732
1713 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", 1733 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
1714 sdma); 1734 sdma);
1715 if (ret) 1735 if (ret)
1716 return ret; 1736 goto err_irq;
1717 1737
1718 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); 1738 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1719 if (!sdma->script_addrs) 1739 if (!sdma->script_addrs) {
1720 return -ENOMEM; 1740 ret = -ENOMEM;
1741 goto err_irq;
1742 }
1721 1743
1722 /* initially no scripts available */ 1744 /* initially no scripts available */
1723 saddr_arr = (s32 *)sdma->script_addrs; 1745 saddr_arr = (s32 *)sdma->script_addrs;
@@ -1793,7 +1815,7 @@ static int sdma_probe(struct platform_device *pdev)
1793 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; 1815 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1794 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 1816 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1795 sdma->dma_device.device_config = sdma_config; 1817 sdma->dma_device.device_config = sdma_config;
1796 sdma->dma_device.device_terminate_all = sdma_disable_channel; 1818 sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
1797 sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1819 sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1798 sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1820 sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1799 sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1821 sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
@@ -1832,6 +1854,10 @@ err_register:
1832 dma_async_device_unregister(&sdma->dma_device); 1854 dma_async_device_unregister(&sdma->dma_device);
1833err_init: 1855err_init:
1834 kfree(sdma->script_addrs); 1856 kfree(sdma->script_addrs);
1857err_irq:
1858 clk_unprepare(sdma->clk_ahb);
1859err_clk:
1860 clk_unprepare(sdma->clk_ipg);
1835 return ret; 1861 return ret;
1836} 1862}
1837 1863
@@ -1842,6 +1868,8 @@ static int sdma_remove(struct platform_device *pdev)
1842 1868
1843 dma_async_device_unregister(&sdma->dma_device); 1869 dma_async_device_unregister(&sdma->dma_device);
1844 kfree(sdma->script_addrs); 1870 kfree(sdma->script_addrs);
1871 clk_unprepare(sdma->clk_ahb);
1872 clk_unprepare(sdma->clk_ipg);
1845 /* Kill the tasklet */ 1873 /* Kill the tasklet */
1846 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1874 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1847 struct sdma_channel *sdmac = &sdma->channel[i]; 1875 struct sdma_channel *sdmac = &sdma->channel[i];
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index abb75ebd65ea..ac8c28968422 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -395,7 +395,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
395 if (memcmp(src, dest, IOAT_TEST_SIZE)) { 395 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
396 dev_err(dev, "Self-test copy failed compare, disabling\n"); 396 dev_err(dev, "Self-test copy failed compare, disabling\n");
397 err = -ENODEV; 397 err = -ENODEV;
398 goto free_resources; 398 goto unmap_dma;
399 } 399 }
400 400
401unmap_dma: 401unmap_dma:
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 66d84bcf9bbf..8db791ef2027 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1533,7 +1533,7 @@ static void pl330_dotask(unsigned long data)
1533/* Returns 1 if state was updated, 0 otherwise */ 1533/* Returns 1 if state was updated, 0 otherwise */
1534static int pl330_update(struct pl330_dmac *pl330) 1534static int pl330_update(struct pl330_dmac *pl330)
1535{ 1535{
1536 struct dma_pl330_desc *descdone, *tmp; 1536 struct dma_pl330_desc *descdone;
1537 unsigned long flags; 1537 unsigned long flags;
1538 void __iomem *regs; 1538 void __iomem *regs;
1539 u32 val; 1539 u32 val;
@@ -1611,7 +1611,9 @@ static int pl330_update(struct pl330_dmac *pl330)
1611 } 1611 }
1612 1612
1613 /* Now that we are in no hurry, do the callbacks */ 1613 /* Now that we are in no hurry, do the callbacks */
1614 list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) { 1614 while (!list_empty(&pl330->req_done)) {
1615 descdone = list_first_entry(&pl330->req_done,
1616 struct dma_pl330_desc, rqd);
1615 list_del(&descdone->rqd); 1617 list_del(&descdone->rqd);
1616 spin_unlock_irqrestore(&pl330->lock, flags); 1618 spin_unlock_irqrestore(&pl330->lock, flags);
1617 dma_pl330_rqcb(descdone, PL330_ERR_NONE); 1619 dma_pl330_rqcb(descdone, PL330_ERR_NONE);
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 55f5d33f6dc7..4251e9ac0373 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1321,7 +1321,7 @@ static int pxad_init_phys(struct platform_device *op,
1321 return 0; 1321 return 0;
1322} 1322}
1323 1323
1324static const struct of_device_id const pxad_dt_ids[] = { 1324static const struct of_device_id pxad_dt_ids[] = {
1325 { .compatible = "marvell,pdma-1.0", }, 1325 { .compatible = "marvell,pdma-1.0", },
1326 {} 1326 {}
1327}; 1327};
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 7820d07e7bee..2b36d1c63aa5 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -851,7 +851,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
851 851
852 rcar_dmac_chan_configure_desc(chan, desc); 852 rcar_dmac_chan_configure_desc(chan, desc);
853 853
854 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; 854 max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
855 855
856 /* 856 /*
857 * Allocate and fill the transfer chunk descriptors. We own the only 857 * Allocate and fill the transfer chunk descriptors. We own the only
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 56410ea75ac5..6682b3eec2b6 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -448,7 +448,7 @@ usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
448static int usb_dmac_chan_terminate_all(struct dma_chan *chan) 448static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
449{ 449{
450 struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); 450 struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
451 struct usb_dmac_desc *desc; 451 struct usb_dmac_desc *desc, *_desc;
452 unsigned long flags; 452 unsigned long flags;
453 LIST_HEAD(head); 453 LIST_HEAD(head);
454 LIST_HEAD(list); 454 LIST_HEAD(list);
@@ -459,7 +459,7 @@ static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
459 if (uchan->desc) 459 if (uchan->desc)
460 uchan->desc = NULL; 460 uchan->desc = NULL;
461 list_splice_init(&uchan->desc_got, &list); 461 list_splice_init(&uchan->desc_got, &list);
462 list_for_each_entry(desc, &list, node) 462 list_for_each_entry_safe(desc, _desc, &list, node)
463 list_move_tail(&desc->node, &uchan->desc_freed); 463 list_move_tail(&desc->node, &uchan->desc_freed);
464 spin_unlock_irqrestore(&uchan->vc.lock, flags); 464 spin_unlock_irqrestore(&uchan->vc.lock, flags);
465 vchan_dma_desc_free_list(&uchan->vc, &head); 465 vchan_dma_desc_free_list(&uchan->vc, &head);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index c0d1e5e423db..9a6b07f68951 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -50,7 +50,15 @@ struct ti_am335x_xbar_map {
50 50
51static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) 51static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
52{ 52{
53 writeb_relaxed(val, iomem + event); 53 /*
54 * TPCC_EVT_MUX_60_63 register layout is different than the
55 * rest, in the sense, that event 63 is mapped to lowest byte
56 * and event 60 is mapped to highest, handle it separately.
57 */
58 if (event >= 60 && event <= 63)
59 writeb_relaxed(val, iomem + (63 - event % 4));
60 else
61 writeb_relaxed(val, iomem + event);
54} 62}
55 63
56static void ti_am335x_xbar_free(struct device *dev, void *route_data) 64static void ti_am335x_xbar_free(struct device *dev, void *route_data)
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 6059d81e701a..8e55403847b2 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -26,7 +26,7 @@
26 26
27#define DRIVER_NAME "zx-dma" 27#define DRIVER_NAME "zx-dma"
28#define DMA_ALIGN 4 28#define DMA_ALIGN 4
29#define DMA_MAX_SIZE (0x10000 - PAGE_SIZE) 29#define DMA_MAX_SIZE (0x10000 - 512)
30#define LLI_BLOCK_SIZE (4 * PAGE_SIZE) 30#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
31 31
32#define REG_ZX_SRC_ADDR 0x00 32#define REG_ZX_SRC_ADDR 0x00
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 0574e1bbe45c..3ce5609b4611 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -763,7 +763,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
763 /* Non-ECC RAM? */ 763 /* Non-ECC RAM? */
764 printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__); 764 printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
765 res = -ENODEV; 765 res = -ENODEV;
766 goto err2; 766 goto err;
767 } 767 }
768 768
769 edac_dbg(3, "init mci\n"); 769 edac_dbg(3, "init mci\n");
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index cda6dab5067a..6b65a102b49d 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -79,6 +79,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
79 if (!pvt->inject) 79 if (!pvt->inject)
80 int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx)); 80 int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
81 else { 81 else {
82 int_reg.u64 = 0;
82 if (pvt->error_type == 1) 83 if (pvt->error_type == 1)
83 int_reg.s.sec_err = 1; 84 int_reg.s.sec_err = 1;
84 if (pvt->error_type == 2) 85 if (pvt->error_type == 2)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index c2f5117fd8cb..5545a7f3a98f 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1130,7 +1130,13 @@ static int context_add_buffer(struct context *ctx)
1130 return -ENOMEM; 1130 return -ENOMEM;
1131 1131
1132 offset = (void *)&desc->buffer - (void *)desc; 1132 offset = (void *)&desc->buffer - (void *)desc;
1133 desc->buffer_size = PAGE_SIZE - offset; 1133 /*
1134 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
1135 * for descriptors, even 0x10-byte ones. This can cause page faults when
1136 * an IOMMU is in use and the oversized read crosses a page boundary.
1137 * Work around this by always leaving at least 0x10 bytes of padding.
1138 */
1139 desc->buffer_size = PAGE_SIZE - offset - 0x10;
1134 desc->buffer_bus = bus_addr + offset; 1140 desc->buffer_bus = bus_addr + offset;
1135 desc->used = 0; 1141 desc->used = 0;
1136 1142
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 0e08e665f715..053a23a7be94 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -18,7 +18,7 @@ EXPORT_SYMBOL_GPL(dmi_kobj);
18 * of and an antecedent to, SMBIOS, which stands for System 18 * of and an antecedent to, SMBIOS, which stands for System
19 * Management BIOS. See further: http://www.dmtf.org/standards 19 * Management BIOS. See further: http://www.dmtf.org/standards
20 */ 20 */
21static const char dmi_empty_string[] = " "; 21static const char dmi_empty_string[] = "";
22 22
23static u32 dmi_ver __initdata; 23static u32 dmi_ver __initdata;
24static u32 dmi_len; 24static u32 dmi_len;
@@ -44,25 +44,21 @@ static int dmi_memdev_nr;
44static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s) 44static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
45{ 45{
46 const u8 *bp = ((u8 *) dm) + dm->length; 46 const u8 *bp = ((u8 *) dm) + dm->length;
47 const u8 *nsp;
47 48
48 if (s) { 49 if (s) {
49 s--; 50 while (--s > 0 && *bp)
50 while (s > 0 && *bp) {
51 bp += strlen(bp) + 1; 51 bp += strlen(bp) + 1;
52 s--;
53 }
54
55 if (*bp != 0) {
56 size_t len = strlen(bp)+1;
57 size_t cmp_len = len > 8 ? 8 : len;
58 52
59 if (!memcmp(bp, dmi_empty_string, cmp_len)) 53 /* Strings containing only spaces are considered empty */
60 return dmi_empty_string; 54 nsp = bp;
55 while (*nsp == ' ')
56 nsp++;
57 if (*nsp != '\0')
61 return bp; 58 return bp;
62 }
63 } 59 }
64 60
65 return ""; 61 return dmi_empty_string;
66} 62}
67 63
68static const char * __init dmi_string(const struct dmi_header *dm, u8 s) 64static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 5eaea8b812cf..089a78983b39 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -203,3 +203,6 @@ static struct platform_driver ath79_gpio_driver = {
203}; 203};
204 204
205module_platform_driver(ath79_gpio_driver); 205module_platform_driver(ath79_gpio_driver);
206
207MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support");
208MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index c50e930d97d3..297121acc57d 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -326,7 +326,7 @@ static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
326 } 326 }
327} 327}
328 328
329static int intel_gpio_runtime_idle(struct device *dev) 329static int __maybe_unused intel_gpio_runtime_idle(struct device *dev)
330{ 330{
331 int err = pm_schedule_suspend(dev, 500); 331 int err = pm_schedule_suspend(dev, 500);
332 return err ?: -EBUSY; 332 return err ?: -EBUSY;
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index 2ed0237a8baf..304e68633d29 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -129,3 +129,7 @@ static int __init iop3xx_gpio_init(void)
129 return platform_driver_register(&iop3xx_gpio_driver); 129 return platform_driver_register(&iop3xx_gpio_driver);
130} 130}
131arch_initcall(iop3xx_gpio_init); 131arch_initcall(iop3xx_gpio_init);
132
133MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
134MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
135MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 2a8122444614..9ba4aaa9f755 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -200,6 +200,48 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
200 return 0; 200 return 0;
201} 201}
202 202
203static void gpio_rcar_irq_bus_lock(struct irq_data *d)
204{
205 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
206 struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
207 gpio_chip);
208
209 pm_runtime_get_sync(&p->pdev->dev);
210}
211
212static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
213{
214 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
215 struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
216 gpio_chip);
217
218 pm_runtime_put(&p->pdev->dev);
219}
220
221
222static int gpio_rcar_irq_request_resources(struct irq_data *d)
223{
224 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
225 struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
226 gpio_chip);
227 int error;
228
229 error = pm_runtime_get_sync(&p->pdev->dev);
230 if (error < 0)
231 return error;
232
233 return 0;
234}
235
236static void gpio_rcar_irq_release_resources(struct irq_data *d)
237{
238 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
239 struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
240 gpio_chip);
241
242 pm_runtime_put(&p->pdev->dev);
243}
244
203static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) 245static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
204{ 246{
205 struct gpio_rcar_priv *p = dev_id; 247 struct gpio_rcar_priv *p = dev_id;
@@ -460,6 +502,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
460 irq_chip->irq_unmask = gpio_rcar_irq_enable; 502 irq_chip->irq_unmask = gpio_rcar_irq_enable;
461 irq_chip->irq_set_type = gpio_rcar_irq_set_type; 503 irq_chip->irq_set_type = gpio_rcar_irq_set_type;
462 irq_chip->irq_set_wake = gpio_rcar_irq_set_wake; 504 irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
505 irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
506 irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
507 irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
508 irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
463 irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND; 509 irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
464 510
465 ret = gpiochip_add(gpio_chip); 511 ret = gpiochip_add(gpio_chip);
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 18a8182d4fec..7f1f32324504 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -42,9 +42,7 @@ struct xgene_gpio {
42 struct gpio_chip chip; 42 struct gpio_chip chip;
43 void __iomem *base; 43 void __iomem *base;
44 spinlock_t lock; 44 spinlock_t lock;
45#ifdef CONFIG_PM
46 u32 set_dr_val[XGENE_MAX_GPIO_BANKS]; 45 u32 set_dr_val[XGENE_MAX_GPIO_BANKS];
47#endif
48}; 46};
49 47
50static inline struct xgene_gpio *to_xgene_gpio(struct gpio_chip *chip) 48static inline struct xgene_gpio *to_xgene_gpio(struct gpio_chip *chip)
@@ -132,8 +130,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
132 return 0; 130 return 0;
133} 131}
134 132
135#ifdef CONFIG_PM 133static __maybe_unused int xgene_gpio_suspend(struct device *dev)
136static int xgene_gpio_suspend(struct device *dev)
137{ 134{
138 struct xgene_gpio *gpio = dev_get_drvdata(dev); 135 struct xgene_gpio *gpio = dev_get_drvdata(dev);
139 unsigned long bank_offset; 136 unsigned long bank_offset;
@@ -146,7 +143,7 @@ static int xgene_gpio_suspend(struct device *dev)
146 return 0; 143 return 0;
147} 144}
148 145
149static int xgene_gpio_resume(struct device *dev) 146static __maybe_unused int xgene_gpio_resume(struct device *dev)
150{ 147{
151 struct xgene_gpio *gpio = dev_get_drvdata(dev); 148 struct xgene_gpio *gpio = dev_get_drvdata(dev);
152 unsigned long bank_offset; 149 unsigned long bank_offset;
@@ -160,10 +157,6 @@ static int xgene_gpio_resume(struct device *dev)
160} 157}
161 158
162static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume); 159static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
163#define XGENE_GPIO_PM_OPS (&xgene_gpio_pm)
164#else
165#define XGENE_GPIO_PM_OPS NULL
166#endif
167 160
168static int xgene_gpio_probe(struct platform_device *pdev) 161static int xgene_gpio_probe(struct platform_device *pdev)
169{ 162{
@@ -230,7 +223,7 @@ static struct platform_driver xgene_gpio_driver = {
230 .driver = { 223 .driver = {
231 .name = "xgene-gpio", 224 .name = "xgene-gpio",
232 .of_match_table = xgene_gpio_of_match, 225 .of_match_table = xgene_gpio_of_match,
233 .pm = XGENE_GPIO_PM_OPS, 226 .pm = &xgene_gpio_pm,
234 }, 227 },
235 .probe = xgene_gpio_probe, 228 .probe = xgene_gpio_probe,
236 .remove = xgene_gpio_remove, 229 .remove = xgene_gpio_remove,
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 06d345b087f8..fe89fd56eabf 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2117,6 +2117,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2117 struct gpio_desc *desc = NULL; 2117 struct gpio_desc *desc = NULL;
2118 int status; 2118 int status;
2119 enum gpio_lookup_flags lookupflags = 0; 2119 enum gpio_lookup_flags lookupflags = 0;
2120 /* Maybe we have a device name, maybe not */
2121 const char *devname = dev ? dev_name(dev) : "?";
2120 2122
2121 dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id); 2123 dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
2122 2124
@@ -2145,7 +2147,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2145 return desc; 2147 return desc;
2146 } 2148 }
2147 2149
2148 status = gpiod_request(desc, con_id); 2150 /*
2151 * If a connection label was passed use that, else attempt to use
2152 * the device name as label
2153 */
2154 status = gpiod_request(desc, con_id ? con_id : devname);
2149 if (status < 0) 2155 if (status < 0)
2150 return ERR_PTR(status); 2156 return ERR_PTR(status);
2151 2157
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index a142d5ae148d..5c40d6d710af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -585,6 +585,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
585 size_t size; 585 size_t size;
586 u32 retry = 3; 586 u32 retry = 3;
587 587
588 if (amdgpu_acpi_pcie_notify_device_ready(adev))
589 return -EINVAL;
590
588 /* Get the device handle */ 591 /* Get the device handle */
589 handle = ACPI_HANDLE(&adev->pdev->dev); 592 handle = ACPI_HANDLE(&adev->pdev->dev);
590 if (!handle) 593 if (!handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 0e1376317683..b233cf8436b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -367,29 +367,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
367{ 367{
368 struct amdgpu_device *adev = get_amdgpu_device(kgd); 368 struct amdgpu_device *adev = get_amdgpu_device(kgd);
369 struct cik_sdma_rlc_registers *m; 369 struct cik_sdma_rlc_registers *m;
370 unsigned long end_jiffies;
370 uint32_t sdma_base_addr; 371 uint32_t sdma_base_addr;
372 uint32_t data;
371 373
372 m = get_sdma_mqd(mqd); 374 m = get_sdma_mqd(mqd);
373 sdma_base_addr = get_sdma_base_addr(m); 375 sdma_base_addr = get_sdma_base_addr(m);
374 376
375 WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, 377 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
376 m->sdma_rlc_virtual_addr); 378 m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
377 379
378 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 380 end_jiffies = msecs_to_jiffies(2000) + jiffies;
379 m->sdma_rlc_rb_base); 381 while (true) {
382 data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
383 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
384 break;
385 if (time_after(jiffies, end_jiffies))
386 return -ETIME;
387 usleep_range(500, 1000);
388 }
389 if (m->sdma_engine_id) {
390 data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
391 data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
392 RESUME_CTX, 0);
393 WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
394 } else {
395 data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
396 data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
397 RESUME_CTX, 0);
398 WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
399 }
380 400
401 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
402 m->sdma_rlc_doorbell);
403 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
404 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
405 WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
406 m->sdma_rlc_virtual_addr);
407 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
381 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, 408 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
382 m->sdma_rlc_rb_base_hi); 409 m->sdma_rlc_rb_base_hi);
383
384 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 410 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
385 m->sdma_rlc_rb_rptr_addr_lo); 411 m->sdma_rlc_rb_rptr_addr_lo);
386
387 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, 412 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
388 m->sdma_rlc_rb_rptr_addr_hi); 413 m->sdma_rlc_rb_rptr_addr_hi);
389
390 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
391 m->sdma_rlc_doorbell);
392
393 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, 414 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
394 m->sdma_rlc_rb_cntl); 415 m->sdma_rlc_rb_cntl);
395 416
@@ -492,9 +513,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
492 } 513 }
493 514
494 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); 515 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
495 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); 516 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
496 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); 517 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
497 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0); 518 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
498 519
499 return 0; 520 return 0;
500} 521}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 930083336968..1f0e6ede120c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
69 /* don't do anything if sink is not display port, i.e., 69 /* don't do anything if sink is not display port, i.e.,
70 * passive dp->(dvi|hdmi) adaptor 70 * passive dp->(dvi|hdmi) adaptor
71 */ 71 */
72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
73 int saved_dpms = connector->dpms; 73 amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
74 /* Only turn off the display if it's physically disconnected */ 74 amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
75 if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { 75 /* Don't start link training before we have the DPCD */
76 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 76 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { 77 return;
78 /* Don't try to start link training before we 78
79 * have the dpcd */ 79 /* Turn the connector off and back on immediately, which
80 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) 80 * will trigger link training
81 return; 81 */
82 82 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
83 /* set it to OFF so that drm_helper_connector_dpms() 83 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
84 * won't return immediately since the current state
85 * is ON at this point.
86 */
87 connector->dpms = DRM_MODE_DPMS_OFF;
88 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
89 }
90 connector->dpms = saved_dpms;
91 } 84 }
92 } 85 }
93} 86}
@@ -739,9 +732,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
739 enum drm_connector_status ret = connector_status_disconnected; 732 enum drm_connector_status ret = connector_status_disconnected;
740 int r; 733 int r;
741 734
742 r = pm_runtime_get_sync(connector->dev->dev); 735 if (!drm_kms_helper_is_poll_worker()) {
743 if (r < 0) 736 r = pm_runtime_get_sync(connector->dev->dev);
744 return connector_status_disconnected; 737 if (r < 0)
738 return connector_status_disconnected;
739 }
745 740
746 if (encoder) { 741 if (encoder) {
747 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 742 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -760,8 +755,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
760 /* check acpi lid status ??? */ 755 /* check acpi lid status ??? */
761 756
762 amdgpu_connector_update_scratch_regs(connector, ret); 757 amdgpu_connector_update_scratch_regs(connector, ret);
763 pm_runtime_mark_last_busy(connector->dev->dev); 758
764 pm_runtime_put_autosuspend(connector->dev->dev); 759 if (!drm_kms_helper_is_poll_worker()) {
760 pm_runtime_mark_last_busy(connector->dev->dev);
761 pm_runtime_put_autosuspend(connector->dev->dev);
762 }
763
765 return ret; 764 return ret;
766} 765}
767 766
@@ -862,9 +861,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
862 enum drm_connector_status ret = connector_status_disconnected; 861 enum drm_connector_status ret = connector_status_disconnected;
863 int r; 862 int r;
864 863
865 r = pm_runtime_get_sync(connector->dev->dev); 864 if (!drm_kms_helper_is_poll_worker()) {
866 if (r < 0) 865 r = pm_runtime_get_sync(connector->dev->dev);
867 return connector_status_disconnected; 866 if (r < 0)
867 return connector_status_disconnected;
868 }
868 869
869 encoder = amdgpu_connector_best_single_encoder(connector); 870 encoder = amdgpu_connector_best_single_encoder(connector);
870 if (!encoder) 871 if (!encoder)
@@ -918,8 +919,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
918 amdgpu_connector_update_scratch_regs(connector, ret); 919 amdgpu_connector_update_scratch_regs(connector, ret);
919 920
920out: 921out:
921 pm_runtime_mark_last_busy(connector->dev->dev); 922 if (!drm_kms_helper_is_poll_worker()) {
922 pm_runtime_put_autosuspend(connector->dev->dev); 923 pm_runtime_mark_last_busy(connector->dev->dev);
924 pm_runtime_put_autosuspend(connector->dev->dev);
925 }
923 926
924 return ret; 927 return ret;
925} 928}
@@ -981,9 +984,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
981 enum drm_connector_status ret = connector_status_disconnected; 984 enum drm_connector_status ret = connector_status_disconnected;
982 bool dret = false, broken_edid = false; 985 bool dret = false, broken_edid = false;
983 986
984 r = pm_runtime_get_sync(connector->dev->dev); 987 if (!drm_kms_helper_is_poll_worker()) {
985 if (r < 0) 988 r = pm_runtime_get_sync(connector->dev->dev);
986 return connector_status_disconnected; 989 if (r < 0)
990 return connector_status_disconnected;
991 }
987 992
988 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { 993 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
989 ret = connector->status; 994 ret = connector->status;
@@ -1108,8 +1113,10 @@ out:
1108 amdgpu_connector_update_scratch_regs(connector, ret); 1113 amdgpu_connector_update_scratch_regs(connector, ret);
1109 1114
1110exit: 1115exit:
1111 pm_runtime_mark_last_busy(connector->dev->dev); 1116 if (!drm_kms_helper_is_poll_worker()) {
1112 pm_runtime_put_autosuspend(connector->dev->dev); 1117 pm_runtime_mark_last_busy(connector->dev->dev);
1118 pm_runtime_put_autosuspend(connector->dev->dev);
1119 }
1113 1120
1114 return ret; 1121 return ret;
1115} 1122}
@@ -1351,9 +1358,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
1351 struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); 1358 struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
1352 int r; 1359 int r;
1353 1360
1354 r = pm_runtime_get_sync(connector->dev->dev); 1361 if (!drm_kms_helper_is_poll_worker()) {
1355 if (r < 0) 1362 r = pm_runtime_get_sync(connector->dev->dev);
1356 return connector_status_disconnected; 1363 if (r < 0)
1364 return connector_status_disconnected;
1365 }
1357 1366
1358 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { 1367 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
1359 ret = connector->status; 1368 ret = connector->status;
@@ -1421,8 +1430,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
1421 1430
1422 amdgpu_connector_update_scratch_regs(connector, ret); 1431 amdgpu_connector_update_scratch_regs(connector, ret);
1423out: 1432out:
1424 pm_runtime_mark_last_busy(connector->dev->dev); 1433 if (!drm_kms_helper_is_poll_worker()) {
1425 pm_runtime_put_autosuspend(connector->dev->dev); 1434 pm_runtime_mark_last_busy(connector->dev->dev);
1435 pm_runtime_put_autosuspend(connector->dev->dev);
1436 }
1426 1437
1427 return ret; 1438 return ret;
1428} 1439}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index fc9f14747f70..a36230d1331c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1467,8 +1467,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1467 * ignore it */ 1467 * ignore it */
1468 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode); 1468 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1469 1469
1470 if (amdgpu_runtime_pm == 1)
1471 runtime = true;
1472 if (amdgpu_device_is_px(ddev)) 1470 if (amdgpu_device_is_px(ddev))
1473 runtime = true; 1471 runtime = true;
1474 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); 1472 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 82903ca78529..c555781685ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -560,6 +560,12 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
560 return ERR_PTR(-ENOENT); 560 return ERR_PTR(-ENOENT);
561 } 561 }
562 562
563 /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
564 if (obj->import_attach) {
565 DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
566 return ERR_PTR(-EINVAL);
567 }
568
563 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); 569 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
564 if (amdgpu_fb == NULL) { 570 if (amdgpu_fb == NULL) {
565 drm_gem_object_unreference_unlocked(obj); 571 drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index b57fffc2d4af..0a91261b6f5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2104,34 +2104,8 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
2104 case CHIP_KAVERI: 2104 case CHIP_KAVERI:
2105 adev->gfx.config.max_shader_engines = 1; 2105 adev->gfx.config.max_shader_engines = 1;
2106 adev->gfx.config.max_tile_pipes = 4; 2106 adev->gfx.config.max_tile_pipes = 4;
2107 if ((adev->pdev->device == 0x1304) || 2107 adev->gfx.config.max_cu_per_sh = 8;
2108 (adev->pdev->device == 0x1305) || 2108 adev->gfx.config.max_backends_per_se = 2;
2109 (adev->pdev->device == 0x130C) ||
2110 (adev->pdev->device == 0x130F) ||
2111 (adev->pdev->device == 0x1310) ||
2112 (adev->pdev->device == 0x1311) ||
2113 (adev->pdev->device == 0x131C)) {
2114 adev->gfx.config.max_cu_per_sh = 8;
2115 adev->gfx.config.max_backends_per_se = 2;
2116 } else if ((adev->pdev->device == 0x1309) ||
2117 (adev->pdev->device == 0x130A) ||
2118 (adev->pdev->device == 0x130D) ||
2119 (adev->pdev->device == 0x1313) ||
2120 (adev->pdev->device == 0x131D)) {
2121 adev->gfx.config.max_cu_per_sh = 6;
2122 adev->gfx.config.max_backends_per_se = 2;
2123 } else if ((adev->pdev->device == 0x1306) ||
2124 (adev->pdev->device == 0x1307) ||
2125 (adev->pdev->device == 0x130B) ||
2126 (adev->pdev->device == 0x130E) ||
2127 (adev->pdev->device == 0x1315) ||
2128 (adev->pdev->device == 0x131B)) {
2129 adev->gfx.config.max_cu_per_sh = 4;
2130 adev->gfx.config.max_backends_per_se = 1;
2131 } else {
2132 adev->gfx.config.max_cu_per_sh = 3;
2133 adev->gfx.config.max_backends_per_se = 1;
2134 }
2135 adev->gfx.config.max_sh_per_se = 1; 2109 adev->gfx.config.max_sh_per_se = 1;
2136 adev->gfx.config.max_texture_channel_caches = 4; 2110 adev->gfx.config.max_texture_channel_caches = 4;
2137 adev->gfx.config.max_gprs = 256; 2111 adev->gfx.config.max_gprs = 256;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index d83de985e88c..8577a563600f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -215,8 +215,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
215 BUG_ON(!mm || !mqd || !q); 215 BUG_ON(!mm || !mqd || !q);
216 216
217 m = get_sdma_mqd(mqd); 217 m = get_sdma_mqd(mqd);
218 m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) << 218 m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
219 SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | 219 << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
220 q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | 220 q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
221 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | 221 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
222 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; 222 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7b69070f7ecc..aa41b840048f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -205,6 +205,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
205 205
206 switch (type) { 206 switch (type) {
207 case KFD_QUEUE_TYPE_SDMA: 207 case KFD_QUEUE_TYPE_SDMA:
208 if (dev->dqm->queue_count >=
209 CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
210 pr_err("Over-subscription is not allowed for SDMA.\n");
211 retval = -EPERM;
212 goto err_create_queue;
213 }
214
215 retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
216 if (retval != 0)
217 goto err_create_queue;
218 pqn->q = q;
219 pqn->kq = NULL;
220 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
221 &q->properties.vmid);
222 pr_debug("DQM returned %d for create_queue\n", retval);
223 print_queue(q);
224 break;
225
208 case KFD_QUEUE_TYPE_COMPUTE: 226 case KFD_QUEUE_TYPE_COMPUTE:
209 /* check if there is over subscription */ 227 /* check if there is over subscription */
210 if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && 228 if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 74909e72a009..2acbd43f9a53 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -519,11 +519,17 @@ static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
519 return ret; 519 return ret;
520} 520}
521 521
522static void kfd_topology_kobj_release(struct kobject *kobj)
523{
524 kfree(kobj);
525}
526
522static const struct sysfs_ops sysprops_ops = { 527static const struct sysfs_ops sysprops_ops = {
523 .show = sysprops_show, 528 .show = sysprops_show,
524}; 529};
525 530
526static struct kobj_type sysprops_type = { 531static struct kobj_type sysprops_type = {
532 .release = kfd_topology_kobj_release,
527 .sysfs_ops = &sysprops_ops, 533 .sysfs_ops = &sysprops_ops,
528}; 534};
529 535
@@ -559,6 +565,7 @@ static const struct sysfs_ops iolink_ops = {
559}; 565};
560 566
561static struct kobj_type iolink_type = { 567static struct kobj_type iolink_type = {
568 .release = kfd_topology_kobj_release,
562 .sysfs_ops = &iolink_ops, 569 .sysfs_ops = &iolink_ops,
563}; 570};
564 571
@@ -586,6 +593,7 @@ static const struct sysfs_ops mem_ops = {
586}; 593};
587 594
588static struct kobj_type mem_type = { 595static struct kobj_type mem_type = {
596 .release = kfd_topology_kobj_release,
589 .sysfs_ops = &mem_ops, 597 .sysfs_ops = &mem_ops,
590}; 598};
591 599
@@ -625,6 +633,7 @@ static const struct sysfs_ops cache_ops = {
625}; 633};
626 634
627static struct kobj_type cache_type = { 635static struct kobj_type cache_type = {
636 .release = kfd_topology_kobj_release,
628 .sysfs_ops = &cache_ops, 637 .sysfs_ops = &cache_ops,
629}; 638};
630 639
@@ -747,6 +756,7 @@ static const struct sysfs_ops node_ops = {
747}; 756};
748 757
749static struct kobj_type node_type = { 758static struct kobj_type node_type = {
759 .release = kfd_topology_kobj_release,
750 .sysfs_ops = &node_ops, 760 .sysfs_ops = &node_ops,
751}; 761};
752 762
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index cebcab560626..5d68189176cc 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -1182,17 +1182,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1182 1182
1183 ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", 1183 ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
1184 dcrtc); 1184 dcrtc);
1185 if (ret < 0) { 1185 if (ret < 0)
1186 kfree(dcrtc); 1186 goto err_crtc;
1187 return ret;
1188 }
1189 1187
1190 if (dcrtc->variant->init) { 1188 if (dcrtc->variant->init) {
1191 ret = dcrtc->variant->init(dcrtc, dev); 1189 ret = dcrtc->variant->init(dcrtc, dev);
1192 if (ret) { 1190 if (ret)
1193 kfree(dcrtc); 1191 goto err_crtc;
1194 return ret;
1195 }
1196 } 1192 }
1197 1193
1198 /* Ensure AXI pipeline is enabled */ 1194 /* Ensure AXI pipeline is enabled */
@@ -1203,13 +1199,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1203 dcrtc->crtc.port = port; 1199 dcrtc->crtc.port = port;
1204 1200
1205 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 1201 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
1206 if (!primary) 1202 if (!primary) {
1207 return -ENOMEM; 1203 ret = -ENOMEM;
1204 goto err_crtc;
1205 }
1208 1206
1209 ret = armada_drm_plane_init(primary); 1207 ret = armada_drm_plane_init(primary);
1210 if (ret) { 1208 if (ret) {
1211 kfree(primary); 1209 kfree(primary);
1212 return ret; 1210 goto err_crtc;
1213 } 1211 }
1214 1212
1215 ret = drm_universal_plane_init(drm, &primary->base, 0, 1213 ret = drm_universal_plane_init(drm, &primary->base, 0,
@@ -1219,7 +1217,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1219 DRM_PLANE_TYPE_PRIMARY); 1217 DRM_PLANE_TYPE_PRIMARY);
1220 if (ret) { 1218 if (ret) {
1221 kfree(primary); 1219 kfree(primary);
1222 return ret; 1220 goto err_crtc;
1223 } 1221 }
1224 1222
1225 ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL, 1223 ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
@@ -1238,6 +1236,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1238 1236
1239err_crtc_init: 1237err_crtc_init:
1240 primary->base.funcs->destroy(&primary->base); 1238 primary->base.funcs->destroy(&primary->base);
1239err_crtc:
1240 kfree(dcrtc);
1241
1241 return ret; 1242 return ret;
1242} 1243}
1243 1244
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 4ce52c69dfb3..76eace9ff6ab 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1044,7 +1044,9 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
1044{ 1044{
1045 struct drm_plane *plane = plane_state->plane; 1045 struct drm_plane *plane = plane_state->plane;
1046 struct drm_crtc_state *crtc_state; 1046 struct drm_crtc_state *crtc_state;
1047 1047 /* Nothing to do for same crtc*/
1048 if (plane_state->crtc == crtc)
1049 return 0;
1048 if (plane_state->crtc) { 1050 if (plane_state->crtc) {
1049 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1051 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
1050 plane_state->crtc); 1052 plane_state->crtc);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c0106fd9fae9..724f7cf52253 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -106,6 +106,9 @@ static struct edid_quirk {
106 /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ 106 /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
107 { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, 107 { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
108 108
109 /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
110 { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
111
109 /* Belinea 10 15 55 */ 112 /* Belinea 10 15 55 */
110 { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, 113 { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
111 { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, 114 { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -3216,8 +3219,7 @@ monitor_name(struct detailed_timing *t, void *data)
3216 * @edid: EDID to parse 3219 * @edid: EDID to parse
3217 * 3220 *
3218 * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The 3221 * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
3219 * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to 3222 * HDCP and Port_ID ELD fields are left for the graphics driver to fill in.
3220 * fill in.
3221 */ 3223 */
3222void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) 3224void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
3223{ 3225{
@@ -3290,6 +3292,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
3290 } 3292 }
3291 eld[5] |= sad_count << 4; 3293 eld[5] |= sad_count << 4;
3292 3294
3295 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
3296 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3297 eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP;
3298 else
3299 eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI;
3300
3293 eld[DRM_ELD_BASELINE_ELD_LEN] = 3301 eld[DRM_ELD_BASELINE_ELD_LEN] =
3294 DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4); 3302 DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
3295 3303
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 6b5625e66119..88ceac091454 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -209,6 +209,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
209 return -ENOMEM; 209 return -ENOMEM;
210 210
211 filp->private_data = priv; 211 filp->private_data = priv;
212 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
212 priv->filp = filp; 213 priv->filp = filp;
213 priv->uid = current_euid(); 214 priv->uid = current_euid();
214 priv->pid = get_pid(task_pid(current)); 215 priv->pid = get_pid(task_pid(current));
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 8090989185b2..4ddbc49125cd 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1271,9 +1271,9 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
1271 if (atomic_dec_and_test(&vblank->refcount)) { 1271 if (atomic_dec_and_test(&vblank->refcount)) {
1272 if (drm_vblank_offdelay == 0) 1272 if (drm_vblank_offdelay == 0)
1273 return; 1273 return;
1274 else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0) 1274 else if (drm_vblank_offdelay < 0)
1275 vblank_disable_fn((unsigned long)vblank); 1275 vblank_disable_fn((unsigned long)vblank);
1276 else 1276 else if (!dev->vblank_disable_immediate)
1277 mod_timer(&vblank->disable_timer, 1277 mod_timer(&vblank->disable_timer,
1278 jiffies + ((drm_vblank_offdelay * HZ)/1000)); 1278 jiffies + ((drm_vblank_offdelay * HZ)/1000));
1279 } 1279 }
@@ -1902,6 +1902,16 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
1902 wake_up(&vblank->queue); 1902 wake_up(&vblank->queue);
1903 drm_handle_vblank_events(dev, pipe); 1903 drm_handle_vblank_events(dev, pipe);
1904 1904
1905 /* With instant-off, we defer disabling the interrupt until after
1906 * we finish processing the following vblank. The disable has to
1907 * be last (after drm_handle_vblank_events) so that the timestamp
1908 * is always accurate.
1909 */
1910 if (dev->vblank_disable_immediate &&
1911 drm_vblank_offdelay > 0 &&
1912 !atomic_read(&vblank->refcount))
1913 vblank_disable_fn((unsigned long)vblank);
1914
1905 spin_unlock_irqrestore(&dev->event_lock, irqflags); 1915 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1906 1916
1907 return true; 1917 return true;
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index c2f5971146ba..220eee1c1ef7 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -76,7 +76,7 @@ void drm_modeset_lock_all(struct drm_device *dev)
76 struct drm_modeset_acquire_ctx *ctx; 76 struct drm_modeset_acquire_ctx *ctx;
77 int ret; 77 int ret;
78 78
79 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 79 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
80 if (WARN_ON(!ctx)) 80 if (WARN_ON(!ctx))
81 return; 81 return;
82 82
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 5c1633419cf7..57af40b44423 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -418,6 +418,26 @@ out:
418} 418}
419 419
420/** 420/**
421 * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
422 *
423 * Determine if %current task is an output poll worker. This can be used
424 * to select distinct code paths for output polling versus other contexts.
425 *
426 * One use case is to avoid a deadlock between the output poll worker and
427 * the autosuspend worker wherein the latter waits for polling to finish
428 * upon calling drm_kms_helper_poll_disable(), while the former waits for
429 * runtime suspend to finish upon calling pm_runtime_get_sync() in a
430 * connector ->detect hook.
431 */
432bool drm_kms_helper_is_poll_worker(void)
433{
434 struct work_struct *work = current_work();
435
436 return work && work->func == output_poll_execute;
437}
438EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
439
440/**
421 * drm_kms_helper_poll_disable - disable output polling 441 * drm_kms_helper_poll_disable - disable output polling
422 * @dev: drm_device 442 * @dev: drm_device
423 * 443 *
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
index 30496134a3d0..d7cbe53c4c01 100644
--- a/drivers/gpu/drm/exynos/regs-fimc.h
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -569,7 +569,7 @@
569#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26) 569#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
570#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26) 570#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
571#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26) 571#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
572#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0)) 572#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0))
573 573
574/* Real input DMA size register */ 574/* Real input DMA size register */
575#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31) 575#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d4813e03f5ee..00275c3856ce 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -821,14 +821,18 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
821 struct drm_device *dev = dsi_config->dev; 821 struct drm_device *dev = dsi_config->dev;
822 struct drm_psb_private *dev_priv = dev->dev_private; 822 struct drm_psb_private *dev_priv = dev->dev_private;
823 int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder); 823 int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
824
825 u32 pipeconf_reg = PIPEACONF; 824 u32 pipeconf_reg = PIPEACONF;
826 u32 dspcntr_reg = DSPACNTR; 825 u32 dspcntr_reg = DSPACNTR;
826 u32 pipeconf, dspcntr;
827 827
828 u32 pipeconf = dev_priv->pipeconf[pipe];
829 u32 dspcntr = dev_priv->dspcntr[pipe];
830 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; 828 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
831 829
830 if (WARN_ON(pipe < 0))
831 return;
832
833 pipeconf = dev_priv->pipeconf[pipe];
834 dspcntr = dev_priv->dspcntr[pipe];
835
832 if (pipe) { 836 if (pipe) {
833 pipeconf_reg = PIPECCONF; 837 pipeconf_reg = PIPECCONF;
834 dspcntr_reg = DSPCCNTR; 838 dspcntr_reg = DSPCCNTR;
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 89f705c3a5eb..910a2f253990 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -382,16 +382,6 @@ static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
382 return MODE_OK; 382 return MODE_OK;
383} 383}
384 384
385static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
386{
387 if (mode == connector->dpms)
388 return;
389
390 /*first, execute dpms*/
391
392 drm_helper_connector_dpms(connector, mode);
393}
394
395static struct drm_encoder *mdfld_dsi_connector_best_encoder( 385static struct drm_encoder *mdfld_dsi_connector_best_encoder(
396 struct drm_connector *connector) 386 struct drm_connector *connector)
397{ 387{
@@ -404,7 +394,7 @@ static struct drm_encoder *mdfld_dsi_connector_best_encoder(
404 394
405/*DSI connector funcs*/ 395/*DSI connector funcs*/
406static const struct drm_connector_funcs mdfld_dsi_connector_funcs = { 396static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
407 .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms, 397 .dpms = drm_helper_connector_dpms,
408 .save = mdfld_dsi_connector_save, 398 .save = mdfld_dsi_connector_save,
409 .restore = mdfld_dsi_connector_restore, 399 .restore = mdfld_dsi_connector_restore,
410 .detect = mdfld_dsi_connector_detect, 400 .detect = mdfld_dsi_connector_detect,
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 860dd2177ca1..283570080d47 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -252,7 +252,7 @@ extern int intelfb_remove(struct drm_device *dev,
252extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, 252extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
253 const struct drm_display_mode *mode, 253 const struct drm_display_mode *mode,
254 struct drm_display_mode *adjusted_mode); 254 struct drm_display_mode *adjusted_mode);
255extern int psb_intel_lvds_mode_valid(struct drm_connector *connector, 255extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
256 struct drm_display_mode *mode); 256 struct drm_display_mode *mode);
257extern int psb_intel_lvds_set_property(struct drm_connector *connector, 257extern int psb_intel_lvds_set_property(struct drm_connector *connector,
258 struct drm_property *property, 258 struct drm_property *property,
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 61e3a097a478..ccd1b8bf0fd5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -343,7 +343,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
343 } 343 }
344} 344}
345 345
346int psb_intel_lvds_mode_valid(struct drm_connector *connector, 346enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
347 struct drm_display_mode *mode) 347 struct drm_display_mode *mode)
348{ 348{
349 struct drm_psb_private *dev_priv = connector->dev->dev_private; 349 struct drm_psb_private *dev_priv = connector->dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 7f39b8ad88ae..de6710fe3ff4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -768,6 +768,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
768 DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"), 768 DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
769 }, 769 },
770 }, 770 },
771 {
772 .callback = intel_no_lvds_dmi_callback,
773 .ident = "Radiant P845",
774 .matches = {
775 DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
776 DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
777 },
778 },
771 779
772 { } /* terminating entry */ 780 { } /* terminating entry */
773}; 781};
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index c76cc853b08a..644faf3ae93a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -89,14 +89,17 @@ static struct page **get_pages(struct drm_gem_object *obj)
89 return p; 89 return p;
90 } 90 }
91 91
92 msm_obj->pages = p;
93
92 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); 94 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
93 if (IS_ERR(msm_obj->sgt)) { 95 if (IS_ERR(msm_obj->sgt)) {
96 void *ptr = ERR_CAST(msm_obj->sgt);
97
94 dev_err(dev->dev, "failed to allocate sgt\n"); 98 dev_err(dev->dev, "failed to allocate sgt\n");
95 return ERR_CAST(msm_obj->sgt); 99 msm_obj->sgt = NULL;
100 return ptr;
96 } 101 }
97 102
98 msm_obj->pages = p;
99
100 /* For non-cached buffers, ensure the new pages are clean 103 /* For non-cached buffers, ensure the new pages are clean
101 * because display controller, GPU, etc. are not coherent: 104 * because display controller, GPU, etc. are not coherent:
102 */ 105 */
@@ -119,7 +122,10 @@ static void put_pages(struct drm_gem_object *obj)
119 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 122 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
120 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 123 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
121 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 124 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
122 sg_free_table(msm_obj->sgt); 125
126 if (msm_obj->sgt)
127 sg_free_table(msm_obj->sgt);
128
123 kfree(msm_obj->sgt); 129 kfree(msm_obj->sgt);
124 130
125 if (use_pages(obj)) 131 if (use_pages(obj))
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 2a5ed7460354..ababdaabe870 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -253,9 +253,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
253 nv_connector->edid = NULL; 253 nv_connector->edid = NULL;
254 } 254 }
255 255
256 ret = pm_runtime_get_sync(connector->dev->dev); 256 /* Outputs are only polled while runtime active, so acquiring a
257 if (ret < 0 && ret != -EACCES) 257 * runtime PM ref here is unnecessary (and would deadlock upon
258 return conn_status; 258 * runtime suspend because it waits for polling to finish).
259 */
260 if (!drm_kms_helper_is_poll_worker()) {
261 ret = pm_runtime_get_sync(connector->dev->dev);
262 if (ret < 0 && ret != -EACCES)
263 return conn_status;
264 }
259 265
260 nv_encoder = nouveau_connector_ddc_detect(connector); 266 nv_encoder = nouveau_connector_ddc_detect(connector);
261 if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { 267 if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
@@ -323,8 +329,10 @@ detect_analog:
323 329
324 out: 330 out:
325 331
326 pm_runtime_mark_last_busy(connector->dev->dev); 332 if (!drm_kms_helper_is_poll_worker()) {
327 pm_runtime_put_autosuspend(connector->dev->dev); 333 pm_runtime_mark_last_busy(connector->dev->dev);
334 pm_runtime_put_autosuspend(connector->dev->dev);
335 }
328 336
329 return conn_status; 337 return conn_status;
330} 338}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 00de1bf81519..9dfc2471ea09 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -104,7 +104,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
104 }; 104 };
105 struct nouveau_display *disp = nouveau_display(crtc->dev); 105 struct nouveau_display *disp = nouveau_display(crtc->dev);
106 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)]; 106 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
107 int ret, retry = 1; 107 int ret, retry = 20;
108 108
109 do { 109 do {
110 ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args)); 110 ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args));
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index a0865c49ec83..495c279da200 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -370,7 +370,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
370 struct nouveau_cli *cli = nouveau_cli(file_priv); 370 struct nouveau_cli *cli = nouveau_cli(file_priv);
371 struct drm_device *dev = chan->drm->dev; 371 struct drm_device *dev = chan->drm->dev;
372 int trycnt = 0; 372 int trycnt = 0;
373 int ret, i; 373 int ret = -EINVAL, i;
374 struct nouveau_bo *res_bo = NULL; 374 struct nouveau_bo *res_bo = NULL;
375 LIST_HEAD(gart_list); 375 LIST_HEAD(gart_list);
376 LIST_HEAD(vram_list); 376 LIST_HEAD(vram_list);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index 4896474da320..3021fcd0a3df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -127,6 +127,13 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
127 return ret; 127 return ret;
128 128
129 pci->irq = pdev->irq; 129 pci->irq = pdev->irq;
130
131 /* Ensure MSI interrupts are armed, for the case where there are
132 * already interrupts pending (for whatever reason) at load time.
133 */
134 if (pci->msi)
135 pci->func->msi_rearm(pci);
136
130 return ret; 137 return ret;
131} 138}
132 139
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 652c5651d327..26d6ff06cd99 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -794,7 +794,8 @@ static int omap_dmm_probe(struct platform_device *dev)
794 match = of_match_node(dmm_of_match, dev->dev.of_node); 794 match = of_match_node(dmm_of_match, dev->dev.of_node);
795 if (!match) { 795 if (!match) {
796 dev_err(&dev->dev, "failed to find matching device node\n"); 796 dev_err(&dev->dev, "failed to find matching device node\n");
797 return -ENODEV; 797 ret = -ENODEV;
798 goto fail;
798 } 799 }
799 800
800 omap_dmm->plat_data = match->data; 801 omap_dmm->plat_data = match->data;
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index c4a552637c93..3ff7689835dc 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -494,9 +494,11 @@ static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
494 494
495int qxl_fbdev_init(struct qxl_device *qdev) 495int qxl_fbdev_init(struct qxl_device *qdev)
496{ 496{
497 int ret = 0;
498
499#ifdef CONFIG_DRM_FBDEV_EMULATION
497 struct qxl_fbdev *qfbdev; 500 struct qxl_fbdev *qfbdev;
498 int bpp_sel = 32; /* TODO: parameter from somewhere? */ 501 int bpp_sel = 32; /* TODO: parameter from somewhere? */
499 int ret;
500 502
501 qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL); 503 qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
502 if (!qfbdev) 504 if (!qfbdev)
@@ -531,6 +533,8 @@ fini:
531 drm_fb_helper_fini(&qfbdev->helper); 533 drm_fb_helper_fini(&qfbdev->helper);
532free: 534free:
533 kfree(qfbdev); 535 kfree(qfbdev);
536#endif
537
534 return ret; 538 return ret;
535} 539}
536 540
@@ -546,6 +550,9 @@ void qxl_fbdev_fini(struct qxl_device *qdev)
546 550
547void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state) 551void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
548{ 552{
553 if (!qdev->mode_info.qfbdev)
554 return;
555
549 drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state); 556 drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
550} 557}
551 558
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 134874cab4c7..80b6d6e4721a 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3599,35 +3599,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
3599 case CHIP_KAVERI: 3599 case CHIP_KAVERI:
3600 rdev->config.cik.max_shader_engines = 1; 3600 rdev->config.cik.max_shader_engines = 1;
3601 rdev->config.cik.max_tile_pipes = 4; 3601 rdev->config.cik.max_tile_pipes = 4;
3602 if ((rdev->pdev->device == 0x1304) || 3602 rdev->config.cik.max_cu_per_sh = 8;
3603 (rdev->pdev->device == 0x1305) || 3603 rdev->config.cik.max_backends_per_se = 2;
3604 (rdev->pdev->device == 0x130C) ||
3605 (rdev->pdev->device == 0x130F) ||
3606 (rdev->pdev->device == 0x1310) ||
3607 (rdev->pdev->device == 0x1311) ||
3608 (rdev->pdev->device == 0x131C)) {
3609 rdev->config.cik.max_cu_per_sh = 8;
3610 rdev->config.cik.max_backends_per_se = 2;
3611 } else if ((rdev->pdev->device == 0x1309) ||
3612 (rdev->pdev->device == 0x130A) ||
3613 (rdev->pdev->device == 0x130D) ||
3614 (rdev->pdev->device == 0x1313) ||
3615 (rdev->pdev->device == 0x131D)) {
3616 rdev->config.cik.max_cu_per_sh = 6;
3617 rdev->config.cik.max_backends_per_se = 2;
3618 } else if ((rdev->pdev->device == 0x1306) ||
3619 (rdev->pdev->device == 0x1307) ||
3620 (rdev->pdev->device == 0x130B) ||
3621 (rdev->pdev->device == 0x130E) ||
3622 (rdev->pdev->device == 0x1315) ||
3623 (rdev->pdev->device == 0x1318) ||
3624 (rdev->pdev->device == 0x131B)) {
3625 rdev->config.cik.max_cu_per_sh = 4;
3626 rdev->config.cik.max_backends_per_se = 1;
3627 } else {
3628 rdev->config.cik.max_cu_per_sh = 3;
3629 rdev->config.cik.max_backends_per_se = 1;
3630 }
3631 rdev->config.cik.max_sh_per_se = 1; 3604 rdev->config.cik.max_sh_per_se = 1;
3632 rdev->config.cik.max_texture_channel_caches = 4; 3605 rdev->config.cik.max_texture_channel_caches = 4;
3633 rdev->config.cik.max_gprs = 256; 3606 rdev->config.cik.max_gprs = 256;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 30f00748ed37..c6bf378534f8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -89,25 +89,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
89 /* don't do anything if sink is not display port, i.e., 89 /* don't do anything if sink is not display port, i.e.,
90 * passive dp->(dvi|hdmi) adaptor 90 * passive dp->(dvi|hdmi) adaptor
91 */ 91 */
92 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 92 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
93 int saved_dpms = connector->dpms; 93 radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
94 /* Only turn off the display if it's physically disconnected */ 94 radeon_dp_needs_link_train(radeon_connector)) {
95 if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 95 /* Don't start link training before we have the DPCD */
96 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 96 if (!radeon_dp_getdpcd(radeon_connector))
97 } else if (radeon_dp_needs_link_train(radeon_connector)) { 97 return;
98 /* Don't try to start link training before we 98
99 * have the dpcd */ 99 /* Turn the connector off and back on immediately, which
100 if (!radeon_dp_getdpcd(radeon_connector)) 100 * will trigger link training
101 return; 101 */
102 102 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
103 /* set it to OFF so that drm_helper_connector_dpms() 103 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
104 * won't return immediately since the current state
105 * is ON at this point.
106 */
107 connector->dpms = DRM_MODE_DPMS_OFF;
108 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
109 }
110 connector->dpms = saved_dpms;
111 } 104 }
112 } 105 }
113} 106}
@@ -851,7 +844,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
851 return ret; 844 return ret;
852} 845}
853 846
854static int radeon_lvds_mode_valid(struct drm_connector *connector, 847static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector,
855 struct drm_display_mode *mode) 848 struct drm_display_mode *mode)
856{ 849{
857 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 850 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -891,9 +884,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
891 enum drm_connector_status ret = connector_status_disconnected; 884 enum drm_connector_status ret = connector_status_disconnected;
892 int r; 885 int r;
893 886
894 r = pm_runtime_get_sync(connector->dev->dev); 887 if (!drm_kms_helper_is_poll_worker()) {
895 if (r < 0) 888 r = pm_runtime_get_sync(connector->dev->dev);
896 return connector_status_disconnected; 889 if (r < 0)
890 return connector_status_disconnected;
891 }
897 892
898 if (encoder) { 893 if (encoder) {
899 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 894 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -916,8 +911,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
916 /* check acpi lid status ??? */ 911 /* check acpi lid status ??? */
917 912
918 radeon_connector_update_scratch_regs(connector, ret); 913 radeon_connector_update_scratch_regs(connector, ret);
919 pm_runtime_mark_last_busy(connector->dev->dev); 914
920 pm_runtime_put_autosuspend(connector->dev->dev); 915 if (!drm_kms_helper_is_poll_worker()) {
916 pm_runtime_mark_last_busy(connector->dev->dev);
917 pm_runtime_put_autosuspend(connector->dev->dev);
918 }
919
921 return ret; 920 return ret;
922} 921}
923 922
@@ -994,7 +993,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
994 return ret; 993 return ret;
995} 994}
996 995
997static int radeon_vga_mode_valid(struct drm_connector *connector, 996static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector,
998 struct drm_display_mode *mode) 997 struct drm_display_mode *mode)
999{ 998{
1000 struct drm_device *dev = connector->dev; 999 struct drm_device *dev = connector->dev;
@@ -1020,9 +1019,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
1020 enum drm_connector_status ret = connector_status_disconnected; 1019 enum drm_connector_status ret = connector_status_disconnected;
1021 int r; 1020 int r;
1022 1021
1023 r = pm_runtime_get_sync(connector->dev->dev); 1022 if (!drm_kms_helper_is_poll_worker()) {
1024 if (r < 0) 1023 r = pm_runtime_get_sync(connector->dev->dev);
1025 return connector_status_disconnected; 1024 if (r < 0)
1025 return connector_status_disconnected;
1026 }
1026 1027
1027 encoder = radeon_best_single_encoder(connector); 1028 encoder = radeon_best_single_encoder(connector);
1028 if (!encoder) 1029 if (!encoder)
@@ -1089,8 +1090,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
1089 radeon_connector_update_scratch_regs(connector, ret); 1090 radeon_connector_update_scratch_regs(connector, ret);
1090 1091
1091out: 1092out:
1092 pm_runtime_mark_last_busy(connector->dev->dev); 1093 if (!drm_kms_helper_is_poll_worker()) {
1093 pm_runtime_put_autosuspend(connector->dev->dev); 1094 pm_runtime_mark_last_busy(connector->dev->dev);
1095 pm_runtime_put_autosuspend(connector->dev->dev);
1096 }
1094 1097
1095 return ret; 1098 return ret;
1096} 1099}
@@ -1133,7 +1136,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
1133 return 1; 1136 return 1;
1134} 1137}
1135 1138
1136static int radeon_tv_mode_valid(struct drm_connector *connector, 1139static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector,
1137 struct drm_display_mode *mode) 1140 struct drm_display_mode *mode)
1138{ 1141{
1139 if ((mode->hdisplay > 1024) || (mode->vdisplay > 768)) 1142 if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
@@ -1153,9 +1156,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
1153 if (!radeon_connector->dac_load_detect) 1156 if (!radeon_connector->dac_load_detect)
1154 return ret; 1157 return ret;
1155 1158
1156 r = pm_runtime_get_sync(connector->dev->dev); 1159 if (!drm_kms_helper_is_poll_worker()) {
1157 if (r < 0) 1160 r = pm_runtime_get_sync(connector->dev->dev);
1158 return connector_status_disconnected; 1161 if (r < 0)
1162 return connector_status_disconnected;
1163 }
1159 1164
1160 encoder = radeon_best_single_encoder(connector); 1165 encoder = radeon_best_single_encoder(connector);
1161 if (!encoder) 1166 if (!encoder)
@@ -1167,8 +1172,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
1167 if (ret == connector_status_connected) 1172 if (ret == connector_status_connected)
1168 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); 1173 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
1169 radeon_connector_update_scratch_regs(connector, ret); 1174 radeon_connector_update_scratch_regs(connector, ret);
1170 pm_runtime_mark_last_busy(connector->dev->dev); 1175
1171 pm_runtime_put_autosuspend(connector->dev->dev); 1176 if (!drm_kms_helper_is_poll_worker()) {
1177 pm_runtime_mark_last_busy(connector->dev->dev);
1178 pm_runtime_put_autosuspend(connector->dev->dev);
1179 }
1180
1172 return ret; 1181 return ret;
1173} 1182}
1174 1183
@@ -1230,9 +1239,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1230 enum drm_connector_status ret = connector_status_disconnected; 1239 enum drm_connector_status ret = connector_status_disconnected;
1231 bool dret = false, broken_edid = false; 1240 bool dret = false, broken_edid = false;
1232 1241
1233 r = pm_runtime_get_sync(connector->dev->dev); 1242 if (!drm_kms_helper_is_poll_worker()) {
1234 if (r < 0) 1243 r = pm_runtime_get_sync(connector->dev->dev);
1235 return connector_status_disconnected; 1244 if (r < 0)
1245 return connector_status_disconnected;
1246 }
1236 1247
1237 if (radeon_connector->detected_hpd_without_ddc) { 1248 if (radeon_connector->detected_hpd_without_ddc) {
1238 force = true; 1249 force = true;
@@ -1415,8 +1426,10 @@ out:
1415 } 1426 }
1416 1427
1417exit: 1428exit:
1418 pm_runtime_mark_last_busy(connector->dev->dev); 1429 if (!drm_kms_helper_is_poll_worker()) {
1419 pm_runtime_put_autosuspend(connector->dev->dev); 1430 pm_runtime_mark_last_busy(connector->dev->dev);
1431 pm_runtime_put_autosuspend(connector->dev->dev);
1432 }
1420 1433
1421 return ret; 1434 return ret;
1422} 1435}
@@ -1464,7 +1477,7 @@ static void radeon_dvi_force(struct drm_connector *connector)
1464 radeon_connector->use_digital = true; 1477 radeon_connector->use_digital = true;
1465} 1478}
1466 1479
1467static int radeon_dvi_mode_valid(struct drm_connector *connector, 1480static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector,
1468 struct drm_display_mode *mode) 1481 struct drm_display_mode *mode)
1469{ 1482{
1470 struct drm_device *dev = connector->dev; 1483 struct drm_device *dev = connector->dev;
@@ -1666,9 +1679,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1666 if (radeon_dig_connector->is_mst) 1679 if (radeon_dig_connector->is_mst)
1667 return connector_status_disconnected; 1680 return connector_status_disconnected;
1668 1681
1669 r = pm_runtime_get_sync(connector->dev->dev); 1682 if (!drm_kms_helper_is_poll_worker()) {
1670 if (r < 0) 1683 r = pm_runtime_get_sync(connector->dev->dev);
1671 return connector_status_disconnected; 1684 if (r < 0)
1685 return connector_status_disconnected;
1686 }
1672 1687
1673 if (!force && radeon_check_hpd_status_unchanged(connector)) { 1688 if (!force && radeon_check_hpd_status_unchanged(connector)) {
1674 ret = connector->status; 1689 ret = connector->status;
@@ -1755,13 +1770,15 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1755 } 1770 }
1756 1771
1757out: 1772out:
1758 pm_runtime_mark_last_busy(connector->dev->dev); 1773 if (!drm_kms_helper_is_poll_worker()) {
1759 pm_runtime_put_autosuspend(connector->dev->dev); 1774 pm_runtime_mark_last_busy(connector->dev->dev);
1775 pm_runtime_put_autosuspend(connector->dev->dev);
1776 }
1760 1777
1761 return ret; 1778 return ret;
1762} 1779}
1763 1780
1764static int radeon_dp_mode_valid(struct drm_connector *connector, 1781static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector,
1765 struct drm_display_mode *mode) 1782 struct drm_display_mode *mode)
1766{ 1783{
1767 struct drm_device *dev = connector->dev; 1784 struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3645b223aa37..446d99062306 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1374,6 +1374,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
1374 return ERR_PTR(-ENOENT); 1374 return ERR_PTR(-ENOENT);
1375 } 1375 }
1376 1376
1377 /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
1378 if (obj->import_attach) {
1379 DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
1380 return ERR_PTR(-EINVAL);
1381 }
1382
1377 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 1383 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
1378 if (radeon_fb == NULL) { 1384 if (radeon_fb == NULL) {
1379 drm_gem_object_unreference_unlocked(obj); 1385 drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index fb6ad143873f..83aee9e814ba 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -238,9 +238,10 @@ int radeon_bo_create(struct radeon_device *rdev,
238 * may be slow 238 * may be slow
239 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 239 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
240 */ 240 */
241 241#ifndef CONFIG_COMPILE_TEST
242#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ 242#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
243 thanks to write-combining 243 thanks to write-combining
244#endif
244 245
245 if (bo->flags & RADEON_GEM_GTT_WC) 246 if (bo->flags & RADEON_GEM_GTT_WC)
246 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 247 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6edcb5485092..b35ebabd6a9f 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -946,7 +946,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
946 /* calc dclk divider with current vco freq */ 946 /* calc dclk divider with current vco freq */
947 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, 947 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
948 pd_min, pd_even); 948 pd_min, pd_even);
949 if (vclk_div > pd_max) 949 if (dclk_div > pd_max)
950 break; /* vco is too big, it has to stop */ 950 break; /* vco is too big, it has to stop */
951 951
952 /* calc score with current vco freq */ 952 /* calc score with current vco freq */
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index d9007cc37be1..892d0a71d766 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5964,9 +5964,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
5964{ 5964{
5965 u32 lane_width; 5965 u32 lane_width;
5966 u32 new_lane_width = 5966 u32 new_lane_width =
5967 (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; 5967 ((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
5968 u32 current_lane_width = 5968 u32 current_lane_width =
5969 (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; 5969 ((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
5970 5970
5971 if (new_lane_width != current_lane_width) { 5971 if (new_lane_width != current_lane_width) {
5972 radeon_set_pcie_lanes(rdev, new_lane_width); 5972 radeon_set_pcie_lanes(rdev, new_lane_width);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 9befd624a5f0..6fab07935d16 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -371,6 +371,31 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
371 rcrtc->started = true; 371 rcrtc->started = true;
372} 372}
373 373
374static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
375{
376 struct rcar_du_device *rcdu = rcrtc->group->dev;
377 struct drm_crtc *crtc = &rcrtc->crtc;
378 u32 status;
379 /* Make sure vblank interrupts are enabled. */
380 drm_crtc_vblank_get(crtc);
381 /*
382 * Disable planes and calculate how many vertical blanking interrupts we
383 * have to wait for. If a vertical blanking interrupt has been triggered
384 * but not processed yet, we don't know whether it occurred before or
385 * after the planes got disabled. We thus have to wait for two vblank
386 * interrupts in that case.
387 */
388 spin_lock_irq(&rcrtc->vblank_lock);
389 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
390 status = rcar_du_crtc_read(rcrtc, DSSR);
391 rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1;
392 spin_unlock_irq(&rcrtc->vblank_lock);
393 if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0,
394 msecs_to_jiffies(100)))
395 dev_warn(rcdu->dev, "vertical blanking timeout\n");
396 drm_crtc_vblank_put(crtc);
397}
398
374static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) 399static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
375{ 400{
376 struct drm_crtc *crtc = &rcrtc->crtc; 401 struct drm_crtc *crtc = &rcrtc->crtc;
@@ -379,17 +404,16 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
379 return; 404 return;
380 405
381 /* Disable all planes and wait for the change to take effect. This is 406 /* Disable all planes and wait for the change to take effect. This is
382 * required as the DSnPR registers are updated on vblank, and no vblank 407 * required as the plane enable registers are updated on vblank, and no
383 * will occur once the CRTC is stopped. Disabling planes when starting 408 * vblank will occur once the CRTC is stopped. Disabling planes when
384 * the CRTC thus wouldn't be enough as it would start scanning out 409 * starting the CRTC thus wouldn't be enough as it would start scanning
385 * immediately from old frame buffers until the next vblank. 410 * out immediately from old frame buffers until the next vblank.
386 * 411 *
387 * This increases the CRTC stop delay, especially when multiple CRTCs 412 * This increases the CRTC stop delay, especially when multiple CRTCs
388 * are stopped in one operation as we now wait for one vblank per CRTC. 413 * are stopped in one operation as we now wait for one vblank per CRTC.
389 * Whether this can be improved needs to be researched. 414 * Whether this can be improved needs to be researched.
390 */ 415 */
391 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); 416 rcar_du_crtc_disable_planes(rcrtc);
392 drm_crtc_wait_one_vblank(crtc);
393 417
394 /* Disable vertical blanking interrupt reporting. We first need to wait 418 /* Disable vertical blanking interrupt reporting. We first need to wait
395 * for page flip completion before stopping the CRTC as userspace 419 * for page flip completion before stopping the CRTC as userspace
@@ -528,10 +552,26 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
528 irqreturn_t ret = IRQ_NONE; 552 irqreturn_t ret = IRQ_NONE;
529 u32 status; 553 u32 status;
530 554
555 spin_lock(&rcrtc->vblank_lock);
556
531 status = rcar_du_crtc_read(rcrtc, DSSR); 557 status = rcar_du_crtc_read(rcrtc, DSSR);
532 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK); 558 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
533 559
534 if (status & DSSR_FRM) { 560 if (status & DSSR_VBK) {
561 /*
562 * Wake up the vblank wait if the counter reaches 0. This must
563 * be protected by the vblank_lock to avoid races in
564 * rcar_du_crtc_disable_planes().
565 */
566 if (rcrtc->vblank_count) {
567 if (--rcrtc->vblank_count == 0)
568 wake_up(&rcrtc->vblank_wait);
569 }
570 }
571
572 spin_unlock(&rcrtc->vblank_lock);
573
574 if (status & DSSR_VBK) {
535 drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index); 575 drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
536 rcar_du_crtc_finish_page_flip(rcrtc); 576 rcar_du_crtc_finish_page_flip(rcrtc);
537 ret = IRQ_HANDLED; 577 ret = IRQ_HANDLED;
@@ -585,6 +625,8 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
585 } 625 }
586 626
587 init_waitqueue_head(&rcrtc->flip_wait); 627 init_waitqueue_head(&rcrtc->flip_wait);
628 init_waitqueue_head(&rcrtc->vblank_wait);
629 spin_lock_init(&rcrtc->vblank_lock);
588 630
589 rcrtc->group = rgrp; 631 rcrtc->group = rgrp;
590 rcrtc->mmio_offset = mmio_offsets[index]; 632 rcrtc->mmio_offset = mmio_offsets[index];
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 2bbe3f5aab65..be22ce33b70a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,6 +15,7 @@
15#define __RCAR_DU_CRTC_H__ 15#define __RCAR_DU_CRTC_H__
16 16
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/spinlock.h>
18#include <linux/wait.h> 19#include <linux/wait.h>
19 20
20#include <drm/drmP.h> 21#include <drm/drmP.h>
@@ -32,6 +33,9 @@ struct rcar_du_group;
32 * @started: whether the CRTC has been started and is running 33 * @started: whether the CRTC has been started and is running
33 * @event: event to post when the pending page flip completes 34 * @event: event to post when the pending page flip completes
34 * @flip_wait: wait queue used to signal page flip completion 35 * @flip_wait: wait queue used to signal page flip completion
36 * @vblank_lock: protects vblank_wait and vblank_count
37 * @vblank_wait: wait queue used to signal vertical blanking
38 * @vblank_count: number of vertical blanking interrupts to wait for
35 * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC 39 * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC
36 * @enabled: whether the CRTC is enabled, used to control system resume 40 * @enabled: whether the CRTC is enabled, used to control system resume
37 * @group: CRTC group this CRTC belongs to 41 * @group: CRTC group this CRTC belongs to
@@ -48,6 +52,10 @@ struct rcar_du_crtc {
48 struct drm_pending_vblank_event *event; 52 struct drm_pending_vblank_event *event;
49 wait_queue_head_t flip_wait; 53 wait_queue_head_t flip_wait;
50 54
55 spinlock_t vblank_lock;
56 wait_queue_head_t vblank_wait;
57 unsigned int vblank_count;
58
51 unsigned int outputs; 59 unsigned int outputs;
52 bool enabled; 60 bool enabled;
53 61
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index d908321b94ce..e6d07680eb05 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -67,7 +67,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
67 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 67 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
68 */ 68 */
69 vma->vm_flags &= ~VM_PFNMAP; 69 vma->vm_flags &= ~VM_PFNMAP;
70 vma->vm_pgoff = 0;
71 70
72 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, 71 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
73 obj->size, &rk_obj->dma_attrs); 72 obj->size, &rk_obj->dma_attrs);
@@ -99,6 +98,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
99 if (ret) 98 if (ret)
100 return ret; 99 return ret;
101 100
101 /*
102 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
103 * whole buffer from the start.
104 */
105 vma->vm_pgoff = 0;
106
102 obj = vma->vm_private_data; 107 obj = vma->vm_private_data;
103 108
104 return rockchip_drm_gem_object_mmap(obj, vma); 109 return rockchip_drm_gem_object_mmap(obj, vma);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 5d8dfe027b30..75d51ec98e06 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -818,6 +818,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
818 pr_info("Initializing pool allocator\n"); 818 pr_info("Initializing pool allocator\n");
819 819
820 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); 820 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
821 if (!_manager)
822 return -ENOMEM;
821 823
822 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); 824 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
823 825
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 391e7eeefbf5..f2a51922ac10 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -256,10 +256,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
256{ 256{
257 unsigned long start = vma->vm_start; 257 unsigned long start = vma->vm_start;
258 unsigned long size = vma->vm_end - vma->vm_start; 258 unsigned long size = vma->vm_end - vma->vm_start;
259 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 259 unsigned long offset;
260 unsigned long page, pos; 260 unsigned long page, pos;
261 261
262 if (offset + size > info->fix.smem_len) 262 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
263 return -EINVAL;
264
265 offset = vma->vm_pgoff << PAGE_SHIFT;
266
267 if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
263 return -EINVAL; 268 return -EINVAL;
264 269
265 pos = (unsigned long)info->fix.smem_start + offset; 270 pos = (unsigned long)info->fix.smem_start + offset;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index b4de18e65db8..6296e9f270ca 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -208,6 +208,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
208 case VIRTGPU_PARAM_3D_FEATURES: 208 case VIRTGPU_PARAM_3D_FEATURES:
209 value = vgdev->has_virgl_3d == true ? 1 : 0; 209 value = vgdev->has_virgl_3d == true ? 1 : 0;
210 break; 210 break;
211 case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
212 value = 1;
213 break;
211 default: 214 default:
212 return -EINVAL; 215 return -EINVAL;
213 } 216 }
@@ -483,7 +486,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
483{ 486{
484 struct virtio_gpu_device *vgdev = dev->dev_private; 487 struct virtio_gpu_device *vgdev = dev->dev_private;
485 struct drm_virtgpu_get_caps *args = data; 488 struct drm_virtgpu_get_caps *args = data;
486 int size; 489 unsigned size, host_caps_size;
487 int i; 490 int i;
488 int found_valid = -1; 491 int found_valid = -1;
489 int ret; 492 int ret;
@@ -492,6 +495,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
492 if (vgdev->num_capsets == 0) 495 if (vgdev->num_capsets == 0)
493 return -ENOSYS; 496 return -ENOSYS;
494 497
498 /* don't allow userspace to pass 0 */
499 if (args->size == 0)
500 return -EINVAL;
501
495 spin_lock(&vgdev->display_info_lock); 502 spin_lock(&vgdev->display_info_lock);
496 for (i = 0; i < vgdev->num_capsets; i++) { 503 for (i = 0; i < vgdev->num_capsets; i++) {
497 if (vgdev->capsets[i].id == args->cap_set_id) { 504 if (vgdev->capsets[i].id == args->cap_set_id) {
@@ -507,11 +514,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
507 return -EINVAL; 514 return -EINVAL;
508 } 515 }
509 516
510 size = vgdev->capsets[found_valid].max_size; 517 host_caps_size = vgdev->capsets[found_valid].max_size;
511 if (args->size > size) { 518 /* only copy to user the minimum of the host caps size or the guest caps size */
512 spin_unlock(&vgdev->display_info_lock); 519 size = min(args->size, host_caps_size);
513 return -EINVAL;
514 }
515 520
516 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { 521 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
517 if (cache_ent->id == args->cap_set_id && 522 if (cache_ent->id == args->cap_set_id &&
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5a0f8a745b9d..52436b3c01bb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -324,7 +324,7 @@ retry:
324 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); 324 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
325 if (ret == -ENOSPC) { 325 if (ret == -ENOSPC) {
326 spin_unlock(&vgdev->ctrlq.qlock); 326 spin_unlock(&vgdev->ctrlq.qlock);
327 wait_event(vgdev->ctrlq.ack_queue, vq->num_free); 327 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
328 spin_lock(&vgdev->ctrlq.qlock); 328 spin_lock(&vgdev->ctrlq.qlock);
329 goto retry; 329 goto retry;
330 } else { 330 } else {
@@ -399,7 +399,7 @@ retry:
399 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); 399 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
400 if (ret == -ENOSPC) { 400 if (ret == -ENOSPC) {
401 spin_unlock(&vgdev->cursorq.qlock); 401 spin_unlock(&vgdev->cursorq.qlock);
402 wait_event(vgdev->cursorq.ack_queue, vq->num_free); 402 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
403 spin_lock(&vgdev->cursorq.qlock); 403 spin_lock(&vgdev->cursorq.qlock);
404 goto retry; 404 goto retry;
405 } else { 405 } else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 67cebb23c940..aa04fb0159a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -293,13 +293,10 @@ static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
293 struct vmw_cmdbuf_man *man = header->man; 293 struct vmw_cmdbuf_man *man = header->man;
294 u32 val; 294 u32 val;
295 295
296 if (sizeof(header->handle) > 4) 296 val = upper_32_bits(header->handle);
297 val = (header->handle >> 32);
298 else
299 val = 0;
300 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); 297 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
301 298
302 val = (header->handle & 0xFFFFFFFFULL); 299 val = lower_32_bits(header->handle);
303 val |= header->cb_context & SVGA_CB_CONTEXT_MASK; 300 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
304 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); 301 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
305 302
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index d2d93959b119..aec6e9eef489 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -433,7 +433,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
433 set.y = 0; 433 set.y = 0;
434 set.mode = NULL; 434 set.mode = NULL;
435 set.fb = NULL; 435 set.fb = NULL;
436 set.num_connectors = 1; 436 set.num_connectors = 0;
437 set.connectors = &par->con; 437 set.connectors = &par->con;
438 ret = drm_mode_set_config_internal(&set); 438 ret = drm_mode_set_config_internal(&set);
439 if (ret) { 439 if (ret) {
@@ -821,7 +821,9 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
821 flush_delayed_work(&par->local_work); 821 flush_delayed_work(&par->local_work);
822 822
823 mutex_lock(&par->bo_mutex); 823 mutex_lock(&par->bo_mutex);
824 drm_modeset_lock_all(vmw_priv->dev);
824 (void) vmw_fb_kms_detach(par, true, false); 825 (void) vmw_fb_kms_detach(par, true, false);
826 drm_modeset_unlock_all(vmw_priv->dev);
825 mutex_unlock(&par->bo_mutex); 827 mutex_unlock(&par->bo_mutex);
826 828
827 return 0; 829 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 060e5c6f4446..9b97f70fbb3d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -27,7 +27,6 @@
27 27
28#include "vmwgfx_kms.h" 28#include "vmwgfx_kms.h"
29 29
30
31/* Might need a hrtimer here? */ 30/* Might need a hrtimer here? */
32#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 31#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33 32
@@ -1910,9 +1909,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
1910 * Helper to be used if an error forces the caller to undo the actions of 1909 * Helper to be used if an error forces the caller to undo the actions of
1911 * vmw_kms_helper_resource_prepare. 1910 * vmw_kms_helper_resource_prepare.
1912 */ 1911 */
1913void vmw_kms_helper_resource_revert(struct vmw_resource *res) 1912void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
1914{ 1913{
1915 vmw_kms_helper_buffer_revert(res->backup); 1914 struct vmw_resource *res = ctx->res;
1915
1916 vmw_kms_helper_buffer_revert(ctx->buf);
1917 vmw_dmabuf_unreference(&ctx->buf);
1916 vmw_resource_unreserve(res, false, NULL, 0); 1918 vmw_resource_unreserve(res, false, NULL, 0);
1917 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 1919 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1918} 1920}
@@ -1929,10 +1931,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res)
1929 * interrupted by a signal. 1931 * interrupted by a signal.
1930 */ 1932 */
1931int vmw_kms_helper_resource_prepare(struct vmw_resource *res, 1933int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
1932 bool interruptible) 1934 bool interruptible,
1935 struct vmw_validation_ctx *ctx)
1933{ 1936{
1934 int ret = 0; 1937 int ret = 0;
1935 1938
1939 ctx->buf = NULL;
1940 ctx->res = res;
1941
1936 if (interruptible) 1942 if (interruptible)
1937 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); 1943 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
1938 else 1944 else
@@ -1951,6 +1957,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
1951 res->dev_priv->has_mob); 1957 res->dev_priv->has_mob);
1952 if (ret) 1958 if (ret)
1953 goto out_unreserve; 1959 goto out_unreserve;
1960
1961 ctx->buf = vmw_dmabuf_reference(res->backup);
1954 } 1962 }
1955 ret = vmw_resource_validate(res); 1963 ret = vmw_resource_validate(res);
1956 if (ret) 1964 if (ret)
@@ -1958,7 +1966,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
1958 return 0; 1966 return 0;
1959 1967
1960out_revert: 1968out_revert:
1961 vmw_kms_helper_buffer_revert(res->backup); 1969 vmw_kms_helper_buffer_revert(ctx->buf);
1962out_unreserve: 1970out_unreserve:
1963 vmw_resource_unreserve(res, false, NULL, 0); 1971 vmw_resource_unreserve(res, false, NULL, 0);
1964out_unlock: 1972out_unlock:
@@ -1974,13 +1982,16 @@ out_unlock:
1974 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a 1982 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
1975 * ref-counted fence pointer is returned here. 1983 * ref-counted fence pointer is returned here.
1976 */ 1984 */
1977void vmw_kms_helper_resource_finish(struct vmw_resource *res, 1985void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
1978 struct vmw_fence_obj **out_fence) 1986 struct vmw_fence_obj **out_fence)
1979{ 1987{
1980 if (res->backup || out_fence) 1988 struct vmw_resource *res = ctx->res;
1981 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, 1989
1990 if (ctx->buf || out_fence)
1991 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
1982 out_fence, NULL); 1992 out_fence, NULL);
1983 1993
1994 vmw_dmabuf_unreference(&ctx->buf);
1984 vmw_resource_unreserve(res, false, NULL, 0); 1995 vmw_resource_unreserve(res, false, NULL, 0);
1985 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 1996 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1986} 1997}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index edd81503516d..63b05d5ee50a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -180,6 +180,11 @@ struct vmw_display_unit {
180 bool is_implicit; 180 bool is_implicit;
181}; 181};
182 182
183struct vmw_validation_ctx {
184 struct vmw_resource *res;
185 struct vmw_dma_buffer *buf;
186};
187
183#define vmw_crtc_to_du(x) \ 188#define vmw_crtc_to_du(x) \
184 container_of(x, struct vmw_display_unit, crtc) 189 container_of(x, struct vmw_display_unit, crtc)
185#define vmw_connector_to_du(x) \ 190#define vmw_connector_to_du(x) \
@@ -230,9 +235,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
230 struct drm_vmw_fence_rep __user * 235 struct drm_vmw_fence_rep __user *
231 user_fence_rep); 236 user_fence_rep);
232int vmw_kms_helper_resource_prepare(struct vmw_resource *res, 237int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
233 bool interruptible); 238 bool interruptible,
234void vmw_kms_helper_resource_revert(struct vmw_resource *res); 239 struct vmw_validation_ctx *ctx);
235void vmw_kms_helper_resource_finish(struct vmw_resource *res, 240void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
241void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
236 struct vmw_fence_obj **out_fence); 242 struct vmw_fence_obj **out_fence);
237int vmw_kms_readback(struct vmw_private *dev_priv, 243int vmw_kms_readback(struct vmw_private *dev_priv,
238 struct drm_file *file_priv, 244 struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 13926ff192e3..f50fcd213413 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -841,12 +841,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
841 struct vmw_framebuffer_surface *vfbs = 841 struct vmw_framebuffer_surface *vfbs =
842 container_of(framebuffer, typeof(*vfbs), base); 842 container_of(framebuffer, typeof(*vfbs), base);
843 struct vmw_kms_sou_surface_dirty sdirty; 843 struct vmw_kms_sou_surface_dirty sdirty;
844 struct vmw_validation_ctx ctx;
844 int ret; 845 int ret;
845 846
846 if (!srf) 847 if (!srf)
847 srf = &vfbs->surface->res; 848 srf = &vfbs->surface->res;
848 849
849 ret = vmw_kms_helper_resource_prepare(srf, true); 850 ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
850 if (ret) 851 if (ret)
851 return ret; 852 return ret;
852 853
@@ -865,7 +866,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
865 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 866 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
866 dest_x, dest_y, num_clips, inc, 867 dest_x, dest_y, num_clips, inc,
867 &sdirty.base); 868 &sdirty.base);
868 vmw_kms_helper_resource_finish(srf, out_fence); 869 vmw_kms_helper_resource_finish(&ctx, out_fence);
869 870
870 return ret; 871 return ret;
871} 872}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index f823fc3efed7..3184a9ae22c1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1003,12 +1003,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
1003 struct vmw_framebuffer_surface *vfbs = 1003 struct vmw_framebuffer_surface *vfbs =
1004 container_of(framebuffer, typeof(*vfbs), base); 1004 container_of(framebuffer, typeof(*vfbs), base);
1005 struct vmw_stdu_dirty sdirty; 1005 struct vmw_stdu_dirty sdirty;
1006 struct vmw_validation_ctx ctx;
1006 int ret; 1007 int ret;
1007 1008
1008 if (!srf) 1009 if (!srf)
1009 srf = &vfbs->surface->res; 1010 srf = &vfbs->surface->res;
1010 1011
1011 ret = vmw_kms_helper_resource_prepare(srf, true); 1012 ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
1012 if (ret) 1013 if (ret)
1013 return ret; 1014 return ret;
1014 1015
@@ -1031,7 +1032,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
1031 dest_x, dest_y, num_clips, inc, 1032 dest_x, dest_y, num_clips, inc,
1032 &sdirty.base); 1033 &sdirty.base);
1033out_finish: 1034out_finish:
1034 vmw_kms_helper_resource_finish(srf, out_fence); 1035 vmw_kms_helper_resource_finish(&ctx, out_fence);
1035 1036
1036 return ret; 1037 return ret;
1037} 1038}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3ba486d0ec6f..e4541c6bf3d3 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1331,7 +1331,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1331 * of implement() working on 8 byte chunks 1331 * of implement() working on 8 byte chunks
1332 */ 1332 */
1333 1333
1334 int len = hid_report_len(report) + 7; 1334 u32 len = hid_report_len(report) + 7;
1335 1335
1336 return kmalloc(len, flags); 1336 return kmalloc(len, flags);
1337} 1337}
@@ -1396,7 +1396,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
1396{ 1396{
1397 char *buf; 1397 char *buf;
1398 int ret; 1398 int ret;
1399 int len; 1399 u32 len;
1400 1400
1401 buf = hid_alloc_report_buf(report, GFP_KERNEL); 1401 buf = hid_alloc_report_buf(report, GFP_KERNEL);
1402 if (!buf) 1402 if (!buf)
@@ -1422,14 +1422,14 @@ out:
1422} 1422}
1423EXPORT_SYMBOL_GPL(__hid_request); 1423EXPORT_SYMBOL_GPL(__hid_request);
1424 1424
1425int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, 1425int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
1426 int interrupt) 1426 int interrupt)
1427{ 1427{
1428 struct hid_report_enum *report_enum = hid->report_enum + type; 1428 struct hid_report_enum *report_enum = hid->report_enum + type;
1429 struct hid_report *report; 1429 struct hid_report *report;
1430 struct hid_driver *hdrv; 1430 struct hid_driver *hdrv;
1431 unsigned int a; 1431 unsigned int a;
1432 int rsize, csize = size; 1432 u32 rsize, csize = size;
1433 u8 *cdata = data; 1433 u8 *cdata = data;
1434 int ret = 0; 1434 int ret = 0;
1435 1435
@@ -1487,7 +1487,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
1487 * 1487 *
1488 * This is data entry for lower layers. 1488 * This is data entry for lower layers.
1489 */ 1489 */
1490int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt) 1490int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
1491{ 1491{
1492 struct hid_report_enum *report_enum; 1492 struct hid_report_enum *report_enum;
1493 struct hid_driver *hdrv; 1493 struct hid_driver *hdrv;
@@ -2308,7 +2308,6 @@ static const struct hid_device_id hid_ignore_list[] = {
2308 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) }, 2308 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
2309 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) }, 2309 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
2310 { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, 2310 { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
2311 { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
2312 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, 2311 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
2313 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, 2312 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
2314 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, 2313 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -2387,6 +2386,9 @@ static const struct hid_device_id hid_ignore_list[] = {
2387 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, 2386 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
2388 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, 2387 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
2389 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, 2388 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
2389 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
2390 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
2391 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
2390 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, 2392 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
2391 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, 2393 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
2392 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, 2394 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
@@ -2578,6 +2580,17 @@ bool hid_ignore(struct hid_device *hdev)
2578 strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) 2580 strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
2579 return true; 2581 return true;
2580 break; 2582 break;
2583 case USB_VENDOR_ID_ELAN:
2584 /*
2585 * Many Elan devices have a product id of 0x0401 and are handled
2586 * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev
2587 * is not (and cannot be) handled by that driver ->
2588 * Ignore all 0x0401 devs except for the ELAN0800 dev.
2589 */
2590 if (hdev->product == 0x0401 &&
2591 strncmp(hdev->name, "ELAN0800", 8) != 0)
2592 return true;
2593 break;
2581 } 2594 }
2582 2595
2583 if (hdev->type == HID_TYPE_USBMOUSE && 2596 if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 2886b645ced7..6c60f4b63d21 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1152,6 +1152,8 @@ copy_rest:
1152 goto out; 1152 goto out;
1153 if (list->tail > list->head) { 1153 if (list->tail > list->head) {
1154 len = list->tail - list->head; 1154 len = list->tail - list->head;
1155 if (len > count)
1156 len = count;
1155 1157
1156 if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { 1158 if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
1157 ret = -EFAULT; 1159 ret = -EFAULT;
@@ -1161,6 +1163,8 @@ copy_rest:
1161 list->head += len; 1163 list->head += len;
1162 } else { 1164 } else {
1163 len = HID_DEBUG_BUFSIZE - list->head; 1165 len = HID_DEBUG_BUFSIZE - list->head;
1166 if (len > count)
1167 len = count;
1164 1168
1165 if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { 1169 if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
1166 ret = -EFAULT; 1170 ret = -EFAULT;
@@ -1168,7 +1172,9 @@ copy_rest:
1168 } 1172 }
1169 list->head = 0; 1173 list->head = 0;
1170 ret += len; 1174 ret += len;
1171 goto copy_rest; 1175 count -= len;
1176 if (count > 0)
1177 goto copy_rest;
1172 } 1178 }
1173 1179
1174 } 1180 }
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 0cd4f7216239..5eea6fe0d7bd 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev,
42{ 42{
43 struct input_dev *input = hidinput->input; 43 struct input_dev *input = hidinput->input;
44 44
45 /*
46 * ELO devices have one Button usage in GenDesk field, which makes
47 * hid-input map it to BTN_LEFT; that confuses userspace, which then
48 * considers the device to be a mouse/touchpad instead of touchscreen.
49 */
50 clear_bit(BTN_LEFT, input->keybit);
45 set_bit(BTN_TOUCH, input->keybit); 51 set_bit(BTN_TOUCH, input->keybit);
46 set_bit(ABS_PRESSURE, input->absbit); 52 set_bit(ABS_PRESSURE, input->absbit);
47 input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0); 53 input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b554d17c9156..60e2c9faa95f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -512,6 +512,9 @@
512#define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615 512#define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615
513#define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070 513#define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070
514 514
515#define USB_VENDOR_ID_INNOMEDIA 0x1292
516#define USB_DEVICE_ID_INNEX_GENESIS_ATARI 0x4745
517
515#define USB_VENDOR_ID_ITE 0x048d 518#define USB_VENDOR_ID_ITE 0x048d
516#define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386 519#define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
517#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350 520#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
@@ -570,6 +573,9 @@
570#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 573#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033
571#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 574#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035
572#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 575#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038
576#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040
577#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042
578#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043
573#define USB_DEVICE_ID_LD_JWM 0x1080 579#define USB_DEVICE_ID_LD_JWM 0x1080
574#define USB_DEVICE_ID_LD_DMMP 0x1081 580#define USB_DEVICE_ID_LD_DMMP 0x1081
575#define USB_DEVICE_ID_LD_UMIP 0x1090 581#define USB_DEVICE_ID_LD_UMIP 0x1090
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 2ba6bf69b7d0..8d74e691ac90 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1128,18 +1128,26 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
1128 1128
1129 /* 1129 /*
1130 * Ignore out-of-range values as per HID specification, 1130 * Ignore out-of-range values as per HID specification,
1131 * section 5.10 and 6.2.25. 1131 * section 5.10 and 6.2.25, when NULL state bit is present.
1132 * When it's not, clamp the value to match Microsoft's input
1133 * driver as mentioned in "Required HID usages for digitizers":
1134 * https://msdn.microsoft.com/en-us/library/windows/hardware/dn672278(v=vs.85).asp
1132 * 1135 *
1133 * The logical_minimum < logical_maximum check is done so that we 1136 * The logical_minimum < logical_maximum check is done so that we
1134 * don't unintentionally discard values sent by devices which 1137 * don't unintentionally discard values sent by devices which
1135 * don't specify logical min and max. 1138 * don't specify logical min and max.
1136 */ 1139 */
1137 if ((field->flags & HID_MAIN_ITEM_VARIABLE) && 1140 if ((field->flags & HID_MAIN_ITEM_VARIABLE) &&
1138 (field->logical_minimum < field->logical_maximum) && 1141 (field->logical_minimum < field->logical_maximum)) {
1139 (value < field->logical_minimum || 1142 if (field->flags & HID_MAIN_ITEM_NULL_STATE &&
1140 value > field->logical_maximum)) { 1143 (value < field->logical_minimum ||
1141 dbg_hid("Ignoring out-of-range value %x\n", value); 1144 value > field->logical_maximum)) {
1142 return; 1145 dbg_hid("Ignoring out-of-range value %x\n", value);
1146 return;
1147 }
1148 value = clamp(value,
1149 field->logical_minimum,
1150 field->logical_maximum);
1143 } 1151 }
1144 1152
1145 /* 1153 /*
@@ -1250,7 +1258,8 @@ static void hidinput_led_worker(struct work_struct *work)
1250 led_work); 1258 led_work);
1251 struct hid_field *field; 1259 struct hid_field *field;
1252 struct hid_report *report; 1260 struct hid_report *report;
1253 int len, ret; 1261 int ret;
1262 u32 len;
1254 __u8 *buf; 1263 __u8 *buf;
1255 1264
1256 field = hidinput_get_led_field(hid); 1265 field = hidinput_get_led_field(hid);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f62a9d6601cc..9de379c1b3fd 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -314,7 +314,8 @@ static struct attribute_group mt_attribute_group = {
314static void mt_get_feature(struct hid_device *hdev, struct hid_report *report) 314static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
315{ 315{
316 struct mt_device *td = hid_get_drvdata(hdev); 316 struct mt_device *td = hid_get_drvdata(hdev);
317 int ret, size = hid_report_len(report); 317 int ret;
318 u32 size = hid_report_len(report);
318 u8 *buf; 319 u8 *buf;
319 320
320 /* 321 /*
@@ -919,7 +920,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
919 struct hid_report_enum *re; 920 struct hid_report_enum *re;
920 struct mt_class *cls = &td->mtclass; 921 struct mt_class *cls = &td->mtclass;
921 char *buf; 922 char *buf;
922 int report_len; 923 u32 report_len;
923 924
924 if (td->inputmode < 0) 925 if (td->inputmode < 0)
925 return; 926 return;
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
index febb21ee190e..584b10d3fc3d 100644
--- a/drivers/hid/hid-plantronics.c
+++ b/drivers/hid/hid-plantronics.c
@@ -2,7 +2,7 @@
2 * Plantronics USB HID Driver 2 * Plantronics USB HID Driver
3 * 3 *
4 * Copyright (c) 2014 JD Cole <jd.cole@plantronics.com> 4 * Copyright (c) 2014 JD Cole <jd.cole@plantronics.com>
5 * Copyright (c) 2015 Terry Junge <terry.junge@plantronics.com> 5 * Copyright (c) 2015-2018 Terry Junge <terry.junge@plantronics.com>
6 */ 6 */
7 7
8/* 8/*
@@ -48,6 +48,10 @@ static int plantronics_input_mapping(struct hid_device *hdev,
48 unsigned short mapped_key; 48 unsigned short mapped_key;
49 unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev); 49 unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
50 50
51 /* special case for PTT products */
52 if (field->application == HID_GD_JOYSTICK)
53 goto defaulted;
54
51 /* handle volume up/down mapping */ 55 /* handle volume up/down mapping */
52 /* non-standard types or multi-HID interfaces - plt_type is PID */ 56 /* non-standard types or multi-HID interfaces - plt_type is PID */
53 if (!(plt_type & HID_USAGE_PAGE)) { 57 if (!(plt_type & HID_USAGE_PAGE)) {
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 67cd059a8f46..41a4a2af9db1 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -110,8 +110,8 @@ struct rmi_data {
110 u8 *writeReport; 110 u8 *writeReport;
111 u8 *readReport; 111 u8 *readReport;
112 112
113 int input_report_size; 113 u32 input_report_size;
114 int output_report_size; 114 u32 output_report_size;
115 115
116 unsigned long flags; 116 unsigned long flags;
117 117
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 966047711fbf..1073c0d1fae5 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -37,6 +37,8 @@ static uint kovaplus_convert_event_cpi(uint value)
37static void kovaplus_profile_activated(struct kovaplus_device *kovaplus, 37static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
38 uint new_profile_index) 38 uint new_profile_index)
39{ 39{
40 if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings))
41 return;
40 kovaplus->actual_profile = new_profile_index; 42 kovaplus->actual_profile = new_profile_index;
41 kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level; 43 kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
42 kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x; 44 kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 9c2d7c23f296..c0c4df198725 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -197,6 +197,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
197 int ret = 0, len; 197 int ret = 0, len;
198 unsigned char report_number; 198 unsigned char report_number;
199 199
200 if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
201 ret = -ENODEV;
202 goto out;
203 }
204
200 dev = hidraw_table[minor]->hid; 205 dev = hidraw_table[minor]->hid;
201 206
202 if (!dev->ll_driver->raw_request) { 207 if (!dev->ll_driver->raw_request) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 312aa1e33fb2..4248d253c32a 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -137,10 +137,10 @@ struct i2c_hid {
137 * register of the HID 137 * register of the HID
138 * descriptor. */ 138 * descriptor. */
139 unsigned int bufsize; /* i2c buffer size */ 139 unsigned int bufsize; /* i2c buffer size */
140 char *inbuf; /* Input buffer */ 140 u8 *inbuf; /* Input buffer */
141 char *rawbuf; /* Raw Input buffer */ 141 u8 *rawbuf; /* Raw Input buffer */
142 char *cmdbuf; /* Command buffer */ 142 u8 *cmdbuf; /* Command buffer */
143 char *argsbuf; /* Command arguments buffer */ 143 u8 *argsbuf; /* Command arguments buffer */
144 144
145 unsigned long flags; /* device flags */ 145 unsigned long flags; /* device flags */
146 146
@@ -387,7 +387,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
387 387
388static void i2c_hid_get_input(struct i2c_hid *ihid) 388static void i2c_hid_get_input(struct i2c_hid *ihid)
389{ 389{
390 int ret, ret_size; 390 int ret;
391 u32 ret_size;
391 int size = le16_to_cpu(ihid->hdesc.wMaxInputLength); 392 int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
392 393
393 if (size > ihid->bufsize) 394 if (size > ihid->bufsize)
@@ -412,7 +413,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
412 return; 413 return;
413 } 414 }
414 415
415 if (ret_size > size) { 416 if ((ret_size > size) || (ret_size < 2)) {
416 dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", 417 dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
417 __func__, size, ret_size); 418 __func__, size, ret_size);
418 return; 419 return;
@@ -1016,6 +1017,14 @@ static int i2c_hid_probe(struct i2c_client *client,
1016 pm_runtime_set_active(&client->dev); 1017 pm_runtime_set_active(&client->dev);
1017 pm_runtime_enable(&client->dev); 1018 pm_runtime_enable(&client->dev);
1018 1019
1020 /* Make sure there is something at this address */
1021 ret = i2c_smbus_read_byte(client);
1022 if (ret < 0) {
1023 dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
1024 ret = -ENXIO;
1025 goto err_pm;
1026 }
1027
1019 ret = i2c_hid_fetch_hid_descriptor(ihid); 1028 ret = i2c_hid_fetch_hid_descriptor(ihid);
1020 if (ret < 0) 1029 if (ret < 0)
1021 goto err_pm; 1030 goto err_pm;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index ce1543d69acb..c9a11315493b 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -152,6 +152,7 @@ static const struct hid_blacklist {
152 { USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT }, 152 { USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT },
153 { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT }, 153 { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
154 { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT }, 154 { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
155 { USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT },
155 156
156 { 0, 0 } 157 { 0, 0 }
157}; 158};
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 700145b15088..b59b15d4caa9 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -35,6 +35,7 @@
35#include <linux/hiddev.h> 35#include <linux/hiddev.h>
36#include <linux/compat.h> 36#include <linux/compat.h>
37#include <linux/vmalloc.h> 37#include <linux/vmalloc.h>
38#include <linux/nospec.h>
38#include "usbhid.h" 39#include "usbhid.h"
39 40
40#ifdef CONFIG_USB_DYNAMIC_MINORS 41#ifdef CONFIG_USB_DYNAMIC_MINORS
@@ -478,10 +479,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
478 479
479 if (uref->field_index >= report->maxfield) 480 if (uref->field_index >= report->maxfield)
480 goto inval; 481 goto inval;
482 uref->field_index = array_index_nospec(uref->field_index,
483 report->maxfield);
481 484
482 field = report->field[uref->field_index]; 485 field = report->field[uref->field_index];
483 if (uref->usage_index >= field->maxusage) 486 if (uref->usage_index >= field->maxusage)
484 goto inval; 487 goto inval;
488 uref->usage_index = array_index_nospec(uref->usage_index,
489 field->maxusage);
485 490
486 uref->usage_code = field->usage[uref->usage_index].hid; 491 uref->usage_code = field->usage[uref->usage_index].hid;
487 492
@@ -508,6 +513,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
508 513
509 if (uref->field_index >= report->maxfield) 514 if (uref->field_index >= report->maxfield)
510 goto inval; 515 goto inval;
516 uref->field_index = array_index_nospec(uref->field_index,
517 report->maxfield);
511 518
512 field = report->field[uref->field_index]; 519 field = report->field[uref->field_index];
513 520
@@ -761,6 +768,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
761 768
762 if (finfo.field_index >= report->maxfield) 769 if (finfo.field_index >= report->maxfield)
763 break; 770 break;
771 finfo.field_index = array_index_nospec(finfo.field_index,
772 report->maxfield);
764 773
765 field = report->field[finfo.field_index]; 774 field = report->field[finfo.field_index];
766 memset(&finfo, 0, sizeof(finfo)); 775 memset(&finfo, 0, sizeof(finfo));
@@ -801,6 +810,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
801 810
802 if (cinfo.index >= hid->maxcollection) 811 if (cinfo.index >= hid->maxcollection)
803 break; 812 break;
813 cinfo.index = array_index_nospec(cinfo.index,
814 hid->maxcollection);
804 815
805 cinfo.type = hid->collection[cinfo.index].type; 816 cinfo.type = hid->collection[cinfo.index].type;
806 cinfo.usage = hid->collection[cinfo.index].usage; 817 cinfo.usage = hid->collection[cinfo.index].usage;
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index a38af68cf326..0a0628d11c0b 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -976,7 +976,7 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
976 goto drop; 976 goto drop;
977 /* Pad to 32-bits - FIXME: Revisit*/ 977 /* Pad to 32-bits - FIXME: Revisit*/
978 if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) 978 if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
979 goto drop; 979 goto inc_dropped;
980 980
981 /* 981 /*
982 * Modem sends Phonet messages over SSI with its own endianess... 982 * Modem sends Phonet messages over SSI with its own endianess...
@@ -1028,8 +1028,9 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
1028drop2: 1028drop2:
1029 hsi_free_msg(msg); 1029 hsi_free_msg(msg);
1030drop: 1030drop:
1031 dev->stats.tx_dropped++;
1032 dev_kfree_skb(skb); 1031 dev_kfree_skb(skb);
1032inc_dropped:
1033 dev->stats.tx_dropped++;
1033 1034
1034 return 0; 1035 return 0;
1035} 1036}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index d415a804fd26..9a8976a79b29 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -195,9 +195,7 @@ int hv_init(void)
195{ 195{
196 int max_leaf; 196 int max_leaf;
197 union hv_x64_msr_hypercall_contents hypercall_msr; 197 union hv_x64_msr_hypercall_contents hypercall_msr;
198 union hv_x64_msr_hypercall_contents tsc_msr;
199 void *virtaddr = NULL; 198 void *virtaddr = NULL;
200 void *va_tsc = NULL;
201 199
202 memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); 200 memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
203 memset(hv_context.synic_message_page, 0, 201 memset(hv_context.synic_message_page, 0,
@@ -243,6 +241,9 @@ int hv_init(void)
243 241
244#ifdef CONFIG_X86_64 242#ifdef CONFIG_X86_64
245 if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) { 243 if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
244 union hv_x64_msr_hypercall_contents tsc_msr;
245 void *va_tsc;
246
246 va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL); 247 va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
247 if (!va_tsc) 248 if (!va_tsc)
248 goto cleanup; 249 goto cleanup;
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index b24f1d3045f0..ac63e562071f 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -94,18 +94,20 @@ enum ina2xx_ids { ina219, ina226 };
94 94
95struct ina2xx_config { 95struct ina2xx_config {
96 u16 config_default; 96 u16 config_default;
97 int calibration_factor; 97 int calibration_value;
98 int registers; 98 int registers;
99 int shunt_div; 99 int shunt_div;
100 int bus_voltage_shift; 100 int bus_voltage_shift;
101 int bus_voltage_lsb; /* uV */ 101 int bus_voltage_lsb; /* uV */
102 int power_lsb; /* uW */ 102 int power_lsb_factor;
103}; 103};
104 104
105struct ina2xx_data { 105struct ina2xx_data {
106 const struct ina2xx_config *config; 106 const struct ina2xx_config *config;
107 107
108 long rshunt; 108 long rshunt;
109 long current_lsb_uA;
110 long power_lsb_uW;
109 struct mutex config_lock; 111 struct mutex config_lock;
110 struct regmap *regmap; 112 struct regmap *regmap;
111 113
@@ -115,21 +117,21 @@ struct ina2xx_data {
115static const struct ina2xx_config ina2xx_config[] = { 117static const struct ina2xx_config ina2xx_config[] = {
116 [ina219] = { 118 [ina219] = {
117 .config_default = INA219_CONFIG_DEFAULT, 119 .config_default = INA219_CONFIG_DEFAULT,
118 .calibration_factor = 40960000, 120 .calibration_value = 4096,
119 .registers = INA219_REGISTERS, 121 .registers = INA219_REGISTERS,
120 .shunt_div = 100, 122 .shunt_div = 100,
121 .bus_voltage_shift = 3, 123 .bus_voltage_shift = 3,
122 .bus_voltage_lsb = 4000, 124 .bus_voltage_lsb = 4000,
123 .power_lsb = 20000, 125 .power_lsb_factor = 20,
124 }, 126 },
125 [ina226] = { 127 [ina226] = {
126 .config_default = INA226_CONFIG_DEFAULT, 128 .config_default = INA226_CONFIG_DEFAULT,
127 .calibration_factor = 5120000, 129 .calibration_value = 2048,
128 .registers = INA226_REGISTERS, 130 .registers = INA226_REGISTERS,
129 .shunt_div = 400, 131 .shunt_div = 400,
130 .bus_voltage_shift = 0, 132 .bus_voltage_shift = 0,
131 .bus_voltage_lsb = 1250, 133 .bus_voltage_lsb = 1250,
132 .power_lsb = 25000, 134 .power_lsb_factor = 25,
133 }, 135 },
134}; 136};
135 137
@@ -168,12 +170,16 @@ static u16 ina226_interval_to_reg(int interval)
168 return INA226_SHIFT_AVG(avg_bits); 170 return INA226_SHIFT_AVG(avg_bits);
169} 171}
170 172
173/*
174 * Calibration register is set to the best value, which eliminates
175 * truncation errors on calculating current register in hardware.
176 * According to datasheet (eq. 3) the best values are 2048 for
177 * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
178 */
171static int ina2xx_calibrate(struct ina2xx_data *data) 179static int ina2xx_calibrate(struct ina2xx_data *data)
172{ 180{
173 u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor, 181 return regmap_write(data->regmap, INA2XX_CALIBRATION,
174 data->rshunt); 182 data->config->calibration_value);
175
176 return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
177} 183}
178 184
179/* 185/*
@@ -186,10 +192,6 @@ static int ina2xx_init(struct ina2xx_data *data)
186 if (ret < 0) 192 if (ret < 0)
187 return ret; 193 return ret;
188 194
189 /*
190 * Set current LSB to 1mA, shunt is in uOhms
191 * (equation 13 in datasheet).
192 */
193 return ina2xx_calibrate(data); 195 return ina2xx_calibrate(data);
194} 196}
195 197
@@ -267,15 +269,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
267 val = DIV_ROUND_CLOSEST(val, 1000); 269 val = DIV_ROUND_CLOSEST(val, 1000);
268 break; 270 break;
269 case INA2XX_POWER: 271 case INA2XX_POWER:
270 val = regval * data->config->power_lsb; 272 val = regval * data->power_lsb_uW;
271 break; 273 break;
272 case INA2XX_CURRENT: 274 case INA2XX_CURRENT:
273 /* signed register, LSB=1mA (selected), in mA */ 275 /* signed register, result in mA */
274 val = (s16)regval; 276 val = regval * data->current_lsb_uA;
277 val = DIV_ROUND_CLOSEST(val, 1000);
275 break; 278 break;
276 case INA2XX_CALIBRATION: 279 case INA2XX_CALIBRATION:
277 val = DIV_ROUND_CLOSEST(data->config->calibration_factor, 280 val = regval;
278 regval);
279 break; 281 break;
280 default: 282 default:
281 /* programmer goofed */ 283 /* programmer goofed */
@@ -303,9 +305,32 @@ static ssize_t ina2xx_show_value(struct device *dev,
303 ina2xx_get_value(data, attr->index, regval)); 305 ina2xx_get_value(data, attr->index, regval));
304} 306}
305 307
306static ssize_t ina2xx_set_shunt(struct device *dev, 308/*
307 struct device_attribute *da, 309 * In order to keep calibration register value fixed, the product
308 const char *buf, size_t count) 310 * of current_lsb and shunt_resistor should also be fixed and equal
311 * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
312 * to keep the scale.
313 */
314static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
315{
316 unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
317 data->config->shunt_div);
318 if (val <= 0 || val > dividend)
319 return -EINVAL;
320
321 mutex_lock(&data->config_lock);
322 data->rshunt = val;
323 data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
324 data->power_lsb_uW = data->config->power_lsb_factor *
325 data->current_lsb_uA;
326 mutex_unlock(&data->config_lock);
327
328 return 0;
329}
330
331static ssize_t ina2xx_store_shunt(struct device *dev,
332 struct device_attribute *da,
333 const char *buf, size_t count)
309{ 334{
310 unsigned long val; 335 unsigned long val;
311 int status; 336 int status;
@@ -315,18 +340,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
315 if (status < 0) 340 if (status < 0)
316 return status; 341 return status;
317 342
318 if (val == 0 || 343 status = ina2xx_set_shunt(data, val);
319 /* Values greater than the calibration factor make no sense. */
320 val > data->config->calibration_factor)
321 return -EINVAL;
322
323 mutex_lock(&data->config_lock);
324 data->rshunt = val;
325 status = ina2xx_calibrate(data);
326 mutex_unlock(&data->config_lock);
327 if (status < 0) 344 if (status < 0)
328 return status; 345 return status;
329
330 return count; 346 return count;
331} 347}
332 348
@@ -386,7 +402,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
386 402
387/* shunt resistance */ 403/* shunt resistance */
388static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, 404static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
389 ina2xx_show_value, ina2xx_set_shunt, 405 ina2xx_show_value, ina2xx_store_shunt,
390 INA2XX_CALIBRATION); 406 INA2XX_CALIBRATION);
391 407
392/* update interval (ina226 only) */ 408/* update interval (ina226 only) */
@@ -431,6 +447,7 @@ static int ina2xx_probe(struct i2c_client *client,
431 447
432 /* set the device type */ 448 /* set the device type */
433 data->config = &ina2xx_config[id->driver_data]; 449 data->config = &ina2xx_config[id->driver_data];
450 mutex_init(&data->config_lock);
434 451
435 if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) { 452 if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
436 struct ina2xx_platform_data *pdata = dev_get_platdata(dev); 453 struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
@@ -441,10 +458,7 @@ static int ina2xx_probe(struct i2c_client *client,
441 val = INA2XX_RSHUNT_DEFAULT; 458 val = INA2XX_RSHUNT_DEFAULT;
442 } 459 }
443 460
444 if (val <= 0 || val > data->config->calibration_factor) 461 ina2xx_set_shunt(data, val);
445 return -ENODEV;
446
447 data->rshunt = val;
448 462
449 ina2xx_regmap_config.max_register = data->config->registers; 463 ina2xx_regmap_config.max_register = data->config->registers;
450 464
@@ -460,8 +474,6 @@ static int ina2xx_probe(struct i2c_client *client,
460 return -ENODEV; 474 return -ENODEV;
461 } 475 }
462 476
463 mutex_init(&data->config_lock);
464
465 data->groups[group++] = &ina2xx_group; 477 data->groups[group++] = &ina2xx_group;
466 if (id->driver_data == ina226) 478 if (id->driver_data == ina226)
467 data->groups[group++] = &ina226_group; 479 data->groups[group++] = &ina226_group;
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index d7ebdf8651f5..d3c6115f16b9 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -1390,7 +1390,7 @@ static void nct6775_update_pwm(struct device *dev)
1390 duty_is_dc = data->REG_PWM_MODE[i] && 1390 duty_is_dc = data->REG_PWM_MODE[i] &&
1391 (nct6775_read_value(data, data->REG_PWM_MODE[i]) 1391 (nct6775_read_value(data, data->REG_PWM_MODE[i])
1392 & data->PWM_MODE_MASK[i]); 1392 & data->PWM_MODE_MASK[i]);
1393 data->pwm_mode[i] = duty_is_dc; 1393 data->pwm_mode[i] = !duty_is_dc;
1394 1394
1395 fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]); 1395 fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]);
1396 for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) { 1396 for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) {
@@ -2267,7 +2267,7 @@ show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
2267 struct nct6775_data *data = nct6775_update_device(dev); 2267 struct nct6775_data *data = nct6775_update_device(dev);
2268 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); 2268 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
2269 2269
2270 return sprintf(buf, "%d\n", !data->pwm_mode[sattr->index]); 2270 return sprintf(buf, "%d\n", data->pwm_mode[sattr->index]);
2271} 2271}
2272 2272
2273static ssize_t 2273static ssize_t
@@ -2288,9 +2288,9 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
2288 if (val > 1) 2288 if (val > 1)
2289 return -EINVAL; 2289 return -EINVAL;
2290 2290
2291 /* Setting DC mode is not supported for all chips/channels */ 2291 /* Setting DC mode (0) is not supported for all chips/channels */
2292 if (data->REG_PWM_MODE[nr] == 0) { 2292 if (data->REG_PWM_MODE[nr] == 0) {
2293 if (val) 2293 if (!val)
2294 return -EINVAL; 2294 return -EINVAL;
2295 return count; 2295 return count;
2296 } 2296 }
@@ -2299,7 +2299,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
2299 data->pwm_mode[nr] = val; 2299 data->pwm_mode[nr] = val;
2300 reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]); 2300 reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]);
2301 reg &= ~data->PWM_MODE_MASK[nr]; 2301 reg &= ~data->PWM_MODE_MASK[nr];
2302 if (val) 2302 if (!val)
2303 reg |= data->PWM_MODE_MASK[nr]; 2303 reg |= data->PWM_MODE_MASK[nr];
2304 nct6775_write_value(data, data->REG_PWM_MODE[nr], reg); 2304 nct6775_write_value(data, data->REG_PWM_MODE[nr], reg);
2305 mutex_unlock(&data->update_lock); 2305 mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 188af4c89f40..c3f4c9ef6705 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -95,8 +95,8 @@ static const struct coefficients adm1075_coefficients[] = {
95 [0] = { 27169, 0, -1 }, /* voltage */ 95 [0] = { 27169, 0, -1 }, /* voltage */
96 [1] = { 806, 20475, -1 }, /* current, irange25 */ 96 [1] = { 806, 20475, -1 }, /* current, irange25 */
97 [2] = { 404, 20475, -1 }, /* current, irange50 */ 97 [2] = { 404, 20475, -1 }, /* current, irange50 */
98 [3] = { 0, -1, 8549 }, /* power, irange25 */ 98 [3] = { 8549, 0, -1 }, /* power, irange25 */
99 [4] = { 0, -1, 4279 }, /* power, irange50 */ 99 [4] = { 4279, 0, -1 }, /* power, irange50 */
100}; 100};
101 101
102static const struct coefficients adm1275_coefficients[] = { 102static const struct coefficients adm1275_coefficients[] = {
@@ -141,7 +141,7 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
141 const struct adm1275_data *data = to_adm1275_data(info); 141 const struct adm1275_data *data = to_adm1275_data(info);
142 int ret = 0; 142 int ret = 0;
143 143
144 if (page) 144 if (page > 0)
145 return -ENXIO; 145 return -ENXIO;
146 146
147 switch (reg) { 147 switch (reg) {
@@ -218,7 +218,7 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
218 const struct adm1275_data *data = to_adm1275_data(info); 218 const struct adm1275_data *data = to_adm1275_data(info);
219 int ret; 219 int ret;
220 220
221 if (page) 221 if (page > 0)
222 return -ENXIO; 222 return -ENXIO;
223 223
224 switch (reg) { 224 switch (reg) {
diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
index dd4883a19045..e951f9b87abb 100644
--- a/drivers/hwmon/pmbus/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -45,7 +45,7 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
45{ 45{
46 int ret; 46 int ret;
47 47
48 if (page) 48 if (page > 0)
49 return -ENXIO; 49 return -ENXIO;
50 50
51 switch (reg) { 51 switch (reg) {
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index ba59eaef2e07..d013acf3f83a 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/math64.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/init.h> 25#include <linux/init.h>
25#include <linux/err.h> 26#include <linux/err.h>
@@ -476,8 +477,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
476static long pmbus_reg2data_direct(struct pmbus_data *data, 477static long pmbus_reg2data_direct(struct pmbus_data *data,
477 struct pmbus_sensor *sensor) 478 struct pmbus_sensor *sensor)
478{ 479{
479 long val = (s16) sensor->data; 480 s64 b, val = (s16)sensor->data;
480 long m, b, R; 481 s32 m, R;
481 482
482 m = data->info->m[sensor->class]; 483 m = data->info->m[sensor->class];
483 b = data->info->b[sensor->class]; 484 b = data->info->b[sensor->class];
@@ -505,11 +506,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
505 R--; 506 R--;
506 } 507 }
507 while (R < 0) { 508 while (R < 0) {
508 val = DIV_ROUND_CLOSEST(val, 10); 509 val = div_s64(val + 5LL, 10L); /* round closest */
509 R++; 510 R++;
510 } 511 }
511 512
512 return (val - b) / m; 513 val = div_s64(val - b, m);
514 return clamp_val(val, LONG_MIN, LONG_MAX);
513} 515}
514 516
515/* 517/*
@@ -629,7 +631,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
629static u16 pmbus_data2reg_direct(struct pmbus_data *data, 631static u16 pmbus_data2reg_direct(struct pmbus_data *data,
630 struct pmbus_sensor *sensor, long val) 632 struct pmbus_sensor *sensor, long val)
631{ 633{
632 long m, b, R; 634 s64 b, val64 = val;
635 s32 m, R;
633 636
634 m = data->info->m[sensor->class]; 637 m = data->info->m[sensor->class];
635 b = data->info->b[sensor->class]; 638 b = data->info->b[sensor->class];
@@ -646,18 +649,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
646 R -= 3; /* Adjust R and b for data in milli-units */ 649 R -= 3; /* Adjust R and b for data in milli-units */
647 b *= 1000; 650 b *= 1000;
648 } 651 }
649 val = val * m + b; 652 val64 = val64 * m + b;
650 653
651 while (R > 0) { 654 while (R > 0) {
652 val *= 10; 655 val64 *= 10;
653 R--; 656 R--;
654 } 657 }
655 while (R < 0) { 658 while (R < 0) {
656 val = DIV_ROUND_CLOSEST(val, 10); 659 val64 = div_s64(val64 + 5LL, 10L); /* round closest */
657 R++; 660 R++;
658 } 661 }
659 662
660 return val; 663 return (u16)clamp_val(val64, S16_MIN, S16_MAX);
661} 664}
662 665
663static u16 pmbus_data2reg_vid(struct pmbus_data *data, 666static u16 pmbus_data2reg_vid(struct pmbus_data *data,
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 4e471e2e9d89..b71793ba2483 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -46,8 +46,11 @@
46#define TPIU_ITATBCTR0 0xef8 46#define TPIU_ITATBCTR0 0xef8
47 47
48/** register definition **/ 48/** register definition **/
49/* FFSR - 0x300 */
50#define FFSR_FT_STOPPED BIT(1)
49/* FFCR - 0x304 */ 51/* FFCR - 0x304 */
50#define FFCR_FON_MAN BIT(6) 52#define FFCR_FON_MAN BIT(6)
53#define FFCR_STOP_FI BIT(12)
51 54
52/** 55/**
53 * @base: memory mapped base address for this component. 56 * @base: memory mapped base address for this component.
@@ -85,10 +88,14 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
85{ 88{
86 CS_UNLOCK(drvdata->base); 89 CS_UNLOCK(drvdata->base);
87 90
88 /* Clear formatter controle reg. */ 91 /* Clear formatter and stop on flush */
89 writel_relaxed(0x0, drvdata->base + TPIU_FFCR); 92 writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR);
90 /* Generate manual flush */ 93 /* Generate manual flush */
91 writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR); 94 writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
95 /* Wait for flush to complete */
96 coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0);
97 /* Wait for formatter to stop */
98 coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1);
92 99
93 CS_LOCK(drvdata->base); 100 CS_LOCK(drvdata->base);
94} 101}
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index b68da1888fd5..d2c0158d4b78 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -149,7 +149,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
149 continue; 149 continue;
150 150
151 /* The local out port number */ 151 /* The local out port number */
152 pdata->outports[i] = endpoint.id; 152 pdata->outports[i] = endpoint.port;
153 153
154 /* 154 /*
155 * Get a handle on the remote port and parent 155 * Get a handle on the remote port and parent
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index cb07713aceda..129fcf1c06d9 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -26,6 +26,7 @@
26#include <linux/stm.h> 26#include <linux/stm.h>
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/vmalloc.h>
29#include "stm.h" 30#include "stm.h"
30 31
31#include <uapi/linux/stm.h> 32#include <uapi/linux/stm.h>
@@ -650,7 +651,7 @@ static void stm_device_release(struct device *dev)
650{ 651{
651 struct stm_device *stm = to_stm_device(dev); 652 struct stm_device *stm = to_stm_device(dev);
652 653
653 kfree(stm); 654 vfree(stm);
654} 655}
655 656
656int stm_register_device(struct device *parent, struct stm_data *stm_data, 657int stm_register_device(struct device *parent, struct stm_data *stm_data,
@@ -667,7 +668,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
667 return -EINVAL; 668 return -EINVAL;
668 669
669 nmasters = stm_data->sw_end - stm_data->sw_start; 670 nmasters = stm_data->sw_end - stm_data->sw_start;
670 stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL); 671 stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
671 if (!stm) 672 if (!stm)
672 return -ENOMEM; 673 return -ENOMEM;
673 674
@@ -709,7 +710,7 @@ err_device:
709 /* matches device_initialize() above */ 710 /* matches device_initialize() above */
710 put_device(&stm->dev); 711 put_device(&stm->dev);
711err_free: 712err_free:
712 kfree(stm); 713 vfree(stm);
713 714
714 return err; 715 return err;
715} 716}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index d4d853680ae4..a4abf7dc9576 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -382,6 +382,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
382 goto err_desc; 382 goto err_desc;
383 } 383 }
384 384
385 reinit_completion(&dma->cmd_complete);
385 txdesc->callback = i2c_imx_dma_callback; 386 txdesc->callback = i2c_imx_dma_callback;
386 txdesc->callback_param = i2c_imx; 387 txdesc->callback_param = i2c_imx;
387 if (dma_submit_error(dmaengine_submit(txdesc))) { 388 if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -631,7 +632,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
631 * The first byte must be transmitted by the CPU. 632 * The first byte must be transmitted by the CPU.
632 */ 633 */
633 imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR); 634 imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
634 reinit_completion(&i2c_imx->dma->cmd_complete);
635 time_left = wait_for_completion_timeout( 635 time_left = wait_for_completion_timeout(
636 &i2c_imx->dma->cmd_complete, 636 &i2c_imx->dma->cmd_complete,
637 msecs_to_jiffies(DMA_TIMEOUT)); 637 msecs_to_jiffies(DMA_TIMEOUT));
@@ -690,7 +690,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
690 if (result) 690 if (result)
691 return result; 691 return result;
692 692
693 reinit_completion(&i2c_imx->dma->cmd_complete);
694 time_left = wait_for_completion_timeout( 693 time_left = wait_for_completion_timeout(
695 &i2c_imx->dma->cmd_complete, 694 &i2c_imx->dma->cmd_complete,
696 msecs_to_jiffies(DMA_TIMEOUT)); 695 msecs_to_jiffies(DMA_TIMEOUT));
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 1111cb966a44..fa2b58142cde 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -587,7 +587,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
587 587
588 /* unmap the data buffer */ 588 /* unmap the data buffer */
589 if (dma_size != 0) 589 if (dma_size != 0)
590 dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction); 590 dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
591 591
592 if (unlikely(!time_left)) { 592 if (unlikely(!time_left)) {
593 dev_err(dev, "completion wait timed out\n"); 593 dev_err(dev, "completion wait timed out\n");
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 43207f52e5a3..332d32c53c41 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -856,12 +856,16 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
856 */ 856 */
857 if (of_device_is_compatible(np, "marvell,mv78230-i2c")) { 857 if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
858 drv_data->offload_enabled = true; 858 drv_data->offload_enabled = true;
859 drv_data->errata_delay = true; 859 /* The delay is only needed in standard mode (100kHz) */
860 if (bus_freq <= 100000)
861 drv_data->errata_delay = true;
860 } 862 }
861 863
862 if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) { 864 if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
863 drv_data->offload_enabled = false; 865 drv_data->offload_enabled = false;
864 drv_data->errata_delay = true; 866 /* The delay is only needed in standard mode (100kHz) */
867 if (bus_freq <= 100000)
868 drv_data->errata_delay = true;
865 } 869 }
866 870
867 if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c")) 871 if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 599c0d7bd906..dfe1a53ce4ad 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -33,7 +33,6 @@
33#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/pm_runtime.h> 34#include <linux/pm_runtime.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/spinlock.h>
37 36
38/* register offsets */ 37/* register offsets */
39#define ICSCR 0x00 /* slave ctrl */ 38#define ICSCR 0x00 /* slave ctrl */
@@ -84,6 +83,7 @@
84 83
85#define RCAR_BUS_PHASE_START (MDBS | MIE | ESG) 84#define RCAR_BUS_PHASE_START (MDBS | MIE | ESG)
86#define RCAR_BUS_PHASE_DATA (MDBS | MIE) 85#define RCAR_BUS_PHASE_DATA (MDBS | MIE)
86#define RCAR_BUS_MASK_DATA (~(ESG | FSB) & 0xFF)
87#define RCAR_BUS_PHASE_STOP (MDBS | MIE | FSB) 87#define RCAR_BUS_PHASE_STOP (MDBS | MIE | FSB)
88 88
89#define RCAR_IRQ_SEND (MNR | MAL | MST | MAT | MDE) 89#define RCAR_IRQ_SEND (MNR | MAL | MST | MAT | MDE)
@@ -94,7 +94,6 @@
94#define RCAR_IRQ_ACK_RECV (~(MAT | MDR) & 0xFF) 94#define RCAR_IRQ_ACK_RECV (~(MAT | MDR) & 0xFF)
95 95
96#define ID_LAST_MSG (1 << 0) 96#define ID_LAST_MSG (1 << 0)
97#define ID_IOERROR (1 << 1)
98#define ID_DONE (1 << 2) 97#define ID_DONE (1 << 2)
99#define ID_ARBLOST (1 << 3) 98#define ID_ARBLOST (1 << 3)
100#define ID_NACK (1 << 4) 99#define ID_NACK (1 << 4)
@@ -108,10 +107,10 @@ enum rcar_i2c_type {
108struct rcar_i2c_priv { 107struct rcar_i2c_priv {
109 void __iomem *io; 108 void __iomem *io;
110 struct i2c_adapter adap; 109 struct i2c_adapter adap;
111 struct i2c_msg *msg; 110 struct i2c_msg *msg;
111 int msgs_left;
112 struct clk *clk; 112 struct clk *clk;
113 113
114 spinlock_t lock;
115 wait_queue_head_t wait; 114 wait_queue_head_t wait;
116 115
117 int pos; 116 int pos;
@@ -144,9 +143,10 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
144{ 143{
145 /* reset master mode */ 144 /* reset master mode */
146 rcar_i2c_write(priv, ICMIER, 0); 145 rcar_i2c_write(priv, ICMIER, 0);
147 rcar_i2c_write(priv, ICMCR, 0); 146 rcar_i2c_write(priv, ICMCR, MDBS);
148 rcar_i2c_write(priv, ICMSR, 0); 147 rcar_i2c_write(priv, ICMSR, 0);
149 rcar_i2c_write(priv, ICMAR, 0); 148 /* start clock */
149 rcar_i2c_write(priv, ICCCR, priv->icccr);
150} 150}
151 151
152static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv) 152static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
@@ -257,16 +257,28 @@ static void rcar_i2c_prepare_msg(struct rcar_i2c_priv *priv)
257{ 257{
258 int read = !!rcar_i2c_is_recv(priv); 258 int read = !!rcar_i2c_is_recv(priv);
259 259
260 priv->pos = 0;
261 priv->flags = 0;
262 if (priv->msgs_left == 1)
263 rcar_i2c_flags_set(priv, ID_LAST_MSG);
264
260 rcar_i2c_write(priv, ICMAR, (priv->msg->addr << 1) | read); 265 rcar_i2c_write(priv, ICMAR, (priv->msg->addr << 1) | read);
261 rcar_i2c_write(priv, ICMSR, 0); 266 rcar_i2c_write(priv, ICMSR, 0);
262 rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START); 267 rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
263 rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND); 268 rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND);
264} 269}
265 270
271static void rcar_i2c_next_msg(struct rcar_i2c_priv *priv)
272{
273 priv->msg++;
274 priv->msgs_left--;
275 rcar_i2c_prepare_msg(priv);
276}
277
266/* 278/*
267 * interrupt functions 279 * interrupt functions
268 */ 280 */
269static int rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr) 281static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
270{ 282{
271 struct i2c_msg *msg = priv->msg; 283 struct i2c_msg *msg = priv->msg;
272 284
@@ -276,14 +288,7 @@ static int rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
276 * Do nothing 288 * Do nothing
277 */ 289 */
278 if (!(msr & MDE)) 290 if (!(msr & MDE))
279 return 0; 291 return;
280
281 /*
282 * If address transfer phase finished,
283 * goto data phase.
284 */
285 if (msr & MAT)
286 rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
287 292
288 if (priv->pos < msg->len) { 293 if (priv->pos < msg->len) {
289 /* 294 /*
@@ -305,29 +310,23 @@ static int rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
305 * [ICRXTX] -> [SHIFT] -> [I2C bus] 310 * [ICRXTX] -> [SHIFT] -> [I2C bus]
306 */ 311 */
307 312
308 if (priv->flags & ID_LAST_MSG) 313 if (priv->flags & ID_LAST_MSG) {
309 /* 314 /*
310 * If current msg is the _LAST_ msg, 315 * If current msg is the _LAST_ msg,
311 * prepare stop condition here. 316 * prepare stop condition here.
312 * ID_DONE will be set on STOP irq. 317 * ID_DONE will be set on STOP irq.
313 */ 318 */
314 rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP); 319 rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
315 else 320 } else {
316 /* 321 rcar_i2c_next_msg(priv);
317 * If current msg is _NOT_ last msg, 322 return;
318 * it doesn't call stop phase. 323 }
319 * thus, there is no STOP irq.
320 * return ID_DONE here.
321 */
322 return ID_DONE;
323 } 324 }
324 325
325 rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_SEND); 326 rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_SEND);
326
327 return 0;
328} 327}
329 328
330static int rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr) 329static void rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
331{ 330{
332 struct i2c_msg *msg = priv->msg; 331 struct i2c_msg *msg = priv->msg;
333 332
@@ -337,14 +336,10 @@ static int rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
337 * Do nothing 336 * Do nothing
338 */ 337 */
339 if (!(msr & MDR)) 338 if (!(msr & MDR))
340 return 0; 339 return;
341 340
342 if (msr & MAT) { 341 if (msr & MAT) {
343 /* 342 /* Address transfer phase finished, but no data at this point. */
344 * Address transfer phase finished,
345 * but, there is no data at this point.
346 * Do nothing.
347 */
348 } else if (priv->pos < msg->len) { 343 } else if (priv->pos < msg->len) {
349 /* 344 /*
350 * get received data 345 * get received data
@@ -360,12 +355,11 @@ static int rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
360 */ 355 */
361 if (priv->pos + 1 >= msg->len) 356 if (priv->pos + 1 >= msg->len)
362 rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP); 357 rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
363 else
364 rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
365
366 rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_RECV);
367 358
368 return 0; 359 if (priv->pos == msg->len && !(priv->flags & ID_LAST_MSG))
360 rcar_i2c_next_msg(priv);
361 else
362 rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_RECV);
369} 363}
370 364
371static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) 365static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
@@ -426,22 +420,21 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
426static irqreturn_t rcar_i2c_irq(int irq, void *ptr) 420static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
427{ 421{
428 struct rcar_i2c_priv *priv = ptr; 422 struct rcar_i2c_priv *priv = ptr;
429 irqreturn_t result = IRQ_HANDLED; 423 u32 msr, val;
430 u32 msr;
431 424
432 /*-------------- spin lock -----------------*/ 425 /* Clear START or STOP as soon as we can */
433 spin_lock(&priv->lock); 426 val = rcar_i2c_read(priv, ICMCR);
434 427 rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA);
435 if (rcar_i2c_slave_irq(priv))
436 goto exit;
437 428
438 msr = rcar_i2c_read(priv, ICMSR); 429 msr = rcar_i2c_read(priv, ICMSR);
439 430
440 /* Only handle interrupts that are currently enabled */ 431 /* Only handle interrupts that are currently enabled */
441 msr &= rcar_i2c_read(priv, ICMIER); 432 msr &= rcar_i2c_read(priv, ICMIER);
442 if (!msr) { 433 if (!msr) {
443 result = IRQ_NONE; 434 if (rcar_i2c_slave_irq(priv))
444 goto exit; 435 return IRQ_HANDLED;
436
437 return IRQ_NONE;
445 } 438 }
446 439
447 /* Arbitration lost */ 440 /* Arbitration lost */
@@ -452,8 +445,7 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
452 445
453 /* Nack */ 446 /* Nack */
454 if (msr & MNR) { 447 if (msr & MNR) {
455 /* go to stop phase */ 448 /* HW automatically sends STOP after received NACK */
456 rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
457 rcar_i2c_write(priv, ICMIER, RCAR_IRQ_STOP); 449 rcar_i2c_write(priv, ICMIER, RCAR_IRQ_STOP);
458 rcar_i2c_flags_set(priv, ID_NACK); 450 rcar_i2c_flags_set(priv, ID_NACK);
459 goto out; 451 goto out;
@@ -461,14 +453,15 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
461 453
462 /* Stop */ 454 /* Stop */
463 if (msr & MST) { 455 if (msr & MST) {
456 priv->msgs_left--; /* The last message also made it */
464 rcar_i2c_flags_set(priv, ID_DONE); 457 rcar_i2c_flags_set(priv, ID_DONE);
465 goto out; 458 goto out;
466 } 459 }
467 460
468 if (rcar_i2c_is_recv(priv)) 461 if (rcar_i2c_is_recv(priv))
469 rcar_i2c_flags_set(priv, rcar_i2c_irq_recv(priv, msr)); 462 rcar_i2c_irq_recv(priv, msr);
470 else 463 else
471 rcar_i2c_flags_set(priv, rcar_i2c_irq_send(priv, msr)); 464 rcar_i2c_irq_send(priv, msr);
472 465
473out: 466out:
474 if (rcar_i2c_flags_has(priv, ID_DONE)) { 467 if (rcar_i2c_flags_has(priv, ID_DONE)) {
@@ -477,11 +470,7 @@ out:
477 wake_up(&priv->wait); 470 wake_up(&priv->wait);
478 } 471 }
479 472
480exit: 473 return IRQ_HANDLED;
481 spin_unlock(&priv->lock);
482 /*-------------- spin unlock -----------------*/
483
484 return result;
485} 474}
486 475
487static int rcar_i2c_master_xfer(struct i2c_adapter *adap, 476static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
@@ -490,21 +479,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
490{ 479{
491 struct rcar_i2c_priv *priv = i2c_get_adapdata(adap); 480 struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
492 struct device *dev = rcar_i2c_priv_to_dev(priv); 481 struct device *dev = rcar_i2c_priv_to_dev(priv);
493 unsigned long flags;
494 int i, ret; 482 int i, ret;
495 long timeout; 483 long time_left;
496 484
497 pm_runtime_get_sync(dev); 485 pm_runtime_get_sync(dev);
498 486
499 /*-------------- spin lock -----------------*/
500 spin_lock_irqsave(&priv->lock, flags);
501
502 rcar_i2c_init(priv); 487 rcar_i2c_init(priv);
503 /* start clock */
504 rcar_i2c_write(priv, ICCCR, priv->icccr);
505
506 spin_unlock_irqrestore(&priv->lock, flags);
507 /*-------------- spin unlock -----------------*/
508 488
509 ret = rcar_i2c_bus_barrier(priv); 489 ret = rcar_i2c_bus_barrier(priv);
510 if (ret < 0) 490 if (ret < 0)
@@ -514,48 +494,28 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
514 /* This HW can't send STOP after address phase */ 494 /* This HW can't send STOP after address phase */
515 if (msgs[i].len == 0) { 495 if (msgs[i].len == 0) {
516 ret = -EOPNOTSUPP; 496 ret = -EOPNOTSUPP;
517 break; 497 goto out;
518 }
519
520 /*-------------- spin lock -----------------*/
521 spin_lock_irqsave(&priv->lock, flags);
522
523 /* init each data */
524 priv->msg = &msgs[i];
525 priv->pos = 0;
526 priv->flags = 0;
527 if (i == num - 1)
528 rcar_i2c_flags_set(priv, ID_LAST_MSG);
529
530 rcar_i2c_prepare_msg(priv);
531
532 spin_unlock_irqrestore(&priv->lock, flags);
533 /*-------------- spin unlock -----------------*/
534
535 timeout = wait_event_timeout(priv->wait,
536 rcar_i2c_flags_has(priv, ID_DONE),
537 adap->timeout);
538 if (!timeout) {
539 ret = -ETIMEDOUT;
540 break;
541 }
542
543 if (rcar_i2c_flags_has(priv, ID_NACK)) {
544 ret = -ENXIO;
545 break;
546 }
547
548 if (rcar_i2c_flags_has(priv, ID_ARBLOST)) {
549 ret = -EAGAIN;
550 break;
551 }
552
553 if (rcar_i2c_flags_has(priv, ID_IOERROR)) {
554 ret = -EIO;
555 break;
556 } 498 }
499 }
557 500
558 ret = i + 1; /* The number of transfer */ 501 /* init data */
502 priv->msg = msgs;
503 priv->msgs_left = num;
504
505 rcar_i2c_prepare_msg(priv);
506
507 time_left = wait_event_timeout(priv->wait,
508 rcar_i2c_flags_has(priv, ID_DONE),
509 num * adap->timeout);
510 if (!time_left) {
511 rcar_i2c_init(priv);
512 ret = -ETIMEDOUT;
513 } else if (rcar_i2c_flags_has(priv, ID_NACK)) {
514 ret = -ENXIO;
515 } else if (rcar_i2c_flags_has(priv, ID_ARBLOST)) {
516 ret = -EAGAIN;
517 } else {
518 ret = num - priv->msgs_left; /* The number of transfer */
559 } 519 }
560out: 520out:
561 pm_runtime_put(dev); 521 pm_runtime_put(dev);
@@ -650,23 +610,26 @@ static int rcar_i2c_probe(struct platform_device *pdev)
650 return PTR_ERR(priv->clk); 610 return PTR_ERR(priv->clk);
651 } 611 }
652 612
613 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
614 priv->io = devm_ioremap_resource(dev, res);
615 if (IS_ERR(priv->io))
616 return PTR_ERR(priv->io);
617
653 bus_speed = 100000; /* default 100 kHz */ 618 bus_speed = 100000; /* default 100 kHz */
654 of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed); 619 of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed);
655 620
656 priv->devtype = (enum rcar_i2c_type)of_match_device(rcar_i2c_dt_ids, dev)->data; 621 priv->devtype = (enum rcar_i2c_type)of_match_device(rcar_i2c_dt_ids, dev)->data;
657 622
623 pm_runtime_enable(dev);
624 pm_runtime_get_sync(dev);
658 ret = rcar_i2c_clock_calculate(priv, bus_speed, dev); 625 ret = rcar_i2c_clock_calculate(priv, bus_speed, dev);
659 if (ret < 0) 626 if (ret < 0)
660 return ret; 627 goto out_pm_put;
661 628
662 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 629 pm_runtime_put(dev);
663 priv->io = devm_ioremap_resource(dev, res);
664 if (IS_ERR(priv->io))
665 return PTR_ERR(priv->io);
666 630
667 irq = platform_get_irq(pdev, 0); 631 irq = platform_get_irq(pdev, 0);
668 init_waitqueue_head(&priv->wait); 632 init_waitqueue_head(&priv->wait);
669 spin_lock_init(&priv->lock);
670 633
671 adap = &priv->adap; 634 adap = &priv->adap;
672 adap->nr = pdev->id; 635 adap->nr = pdev->id;
@@ -682,22 +645,26 @@ static int rcar_i2c_probe(struct platform_device *pdev)
682 dev_name(dev), priv); 645 dev_name(dev), priv);
683 if (ret < 0) { 646 if (ret < 0) {
684 dev_err(dev, "cannot get irq %d\n", irq); 647 dev_err(dev, "cannot get irq %d\n", irq);
685 return ret; 648 goto out_pm_disable;
686 } 649 }
687 650
688 pm_runtime_enable(dev);
689 platform_set_drvdata(pdev, priv); 651 platform_set_drvdata(pdev, priv);
690 652
691 ret = i2c_add_numbered_adapter(adap); 653 ret = i2c_add_numbered_adapter(adap);
692 if (ret < 0) { 654 if (ret < 0) {
693 dev_err(dev, "reg adap failed: %d\n", ret); 655 dev_err(dev, "reg adap failed: %d\n", ret);
694 pm_runtime_disable(dev); 656 goto out_pm_disable;
695 return ret;
696 } 657 }
697 658
698 dev_info(dev, "probed\n"); 659 dev_info(dev, "probed\n");
699 660
700 return 0; 661 return 0;
662
663 out_pm_put:
664 pm_runtime_put(dev);
665 out_pm_disable:
666 pm_runtime_disable(dev);
667 return ret;
701} 668}
702 669
703static int rcar_i2c_remove(struct platform_device *pdev) 670static int rcar_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index dfc98df7b1b6..7aa7b9cb6203 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -18,6 +18,9 @@
18#define ACPI_SMBUS_HC_CLASS "smbus" 18#define ACPI_SMBUS_HC_CLASS "smbus"
19#define ACPI_SMBUS_HC_DEVICE_NAME "cmi" 19#define ACPI_SMBUS_HC_DEVICE_NAME "cmi"
20 20
21/* SMBUS HID definition as supported by Microsoft Windows */
22#define ACPI_SMBUS_MS_HID "SMB0001"
23
21ACPI_MODULE_NAME("smbus_cmi"); 24ACPI_MODULE_NAME("smbus_cmi");
22 25
23struct smbus_methods_t { 26struct smbus_methods_t {
@@ -51,6 +54,7 @@ static const struct smbus_methods_t ibm_smbus_methods = {
51static const struct acpi_device_id acpi_smbus_cmi_ids[] = { 54static const struct acpi_device_id acpi_smbus_cmi_ids[] = {
52 {"SMBUS01", (kernel_ulong_t)&smbus_methods}, 55 {"SMBUS01", (kernel_ulong_t)&smbus_methods},
53 {ACPI_SMBUS_IBM_HID, (kernel_ulong_t)&ibm_smbus_methods}, 56 {ACPI_SMBUS_IBM_HID, (kernel_ulong_t)&ibm_smbus_methods},
57 {ACPI_SMBUS_MS_HID, (kernel_ulong_t)&smbus_methods},
54 {"", 0} 58 {"", 0}
55}; 59};
56MODULE_DEVICE_TABLE(acpi, acpi_smbus_cmi_ids); 60MODULE_DEVICE_TABLE(acpi, acpi_smbus_cmi_ids);
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index 90e322959303..42c25aed671d 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -56,9 +56,7 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num);
56 * The board info passed can safely be __initdata, but be careful of embedded 56 * The board info passed can safely be __initdata, but be careful of embedded
57 * pointers (for platform_data, functions, etc) since that won't be copied. 57 * pointers (for platform_data, functions, etc) since that won't be copied.
58 */ 58 */
59int __init 59int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
60i2c_register_board_info(int busnum,
61 struct i2c_board_info const *info, unsigned len)
62{ 60{
63 int status; 61 int status;
64 62
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index ef907fd5ba98..08a21d635d0d 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1593,6 +1593,8 @@ static int idecd_open(struct block_device *bdev, fmode_t mode)
1593 struct cdrom_info *info; 1593 struct cdrom_info *info;
1594 int rc = -ENXIO; 1594 int rc = -ENXIO;
1595 1595
1596 check_disk_change(bdev);
1597
1596 mutex_lock(&ide_cd_mutex); 1598 mutex_lock(&ide_cd_mutex);
1597 info = ide_cd_get(bdev->bd_disk); 1599 info = ide_cd_get(bdev->bd_disk);
1598 if (!info) 1600 if (!info)
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
index 4732dfc15447..331adc509f3a 100644
--- a/drivers/idle/Kconfig
+++ b/drivers/idle/Kconfig
@@ -17,6 +17,7 @@ config I7300_IDLE_IOAT_CHANNEL
17 17
18config I7300_IDLE 18config I7300_IDLE
19 tristate "Intel chipset idle memory power saving driver" 19 tristate "Intel chipset idle memory power saving driver"
20 depends on PCI
20 select I7300_IDLE_IOAT_CHANNEL 21 select I7300_IDLE_IOAT_CHANNEL
21 help 22 help
22 Enable memory power savings when idle with certain Intel server 23 Enable memory power savings when idle with certain Intel server
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 197a08b4e2f3..b4136d3bf6b7 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -628,6 +628,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
628int st_accel_common_probe(struct iio_dev *indio_dev) 628int st_accel_common_probe(struct iio_dev *indio_dev)
629{ 629{
630 struct st_sensor_data *adata = iio_priv(indio_dev); 630 struct st_sensor_data *adata = iio_priv(indio_dev);
631 struct st_sensors_platform_data *pdata =
632 (struct st_sensors_platform_data *)adata->dev->platform_data;
631 int irq = adata->get_irq_data_ready(indio_dev); 633 int irq = adata->get_irq_data_ready(indio_dev);
632 int err; 634 int err;
633 635
@@ -652,11 +654,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
652 &adata->sensor_settings->fs.fs_avl[0]; 654 &adata->sensor_settings->fs.fs_avl[0];
653 adata->odr = adata->sensor_settings->odr.odr_avl[0].hz; 655 adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
654 656
655 if (!adata->dev->platform_data) 657 if (!pdata)
656 adata->dev->platform_data = 658 pdata = (struct st_sensors_platform_data *)&default_accel_pdata;
657 (struct st_sensors_platform_data *)&default_accel_pdata;
658 659
659 err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data); 660 err = st_sensors_init_sensor(indio_dev, pdata);
660 if (err < 0) 661 if (err < 0)
661 return err; 662 return err;
662 663
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index f684fe31f832..64799ad7ebad 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -44,7 +44,7 @@ struct axp288_adc_info {
44 struct regmap *regmap; 44 struct regmap *regmap;
45}; 45};
46 46
47static const struct iio_chan_spec const axp288_adc_channels[] = { 47static const struct iio_chan_spec axp288_adc_channels[] = {
48 { 48 {
49 .indexed = 1, 49 .indexed = 1,
50 .type = IIO_TEMP, 50 .type = IIO_TEMP,
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index c73c6c62a6ac..7401f102dff4 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -121,10 +121,21 @@ static int hi8435_write_event_config(struct iio_dev *idev,
121 enum iio_event_direction dir, int state) 121 enum iio_event_direction dir, int state)
122{ 122{
123 struct hi8435_priv *priv = iio_priv(idev); 123 struct hi8435_priv *priv = iio_priv(idev);
124 int ret;
125 u32 tmp;
126
127 if (state) {
128 ret = hi8435_readl(priv, HI8435_SO31_0_REG, &tmp);
129 if (ret < 0)
130 return ret;
131 if (tmp & BIT(chan->channel))
132 priv->event_prev_val |= BIT(chan->channel);
133 else
134 priv->event_prev_val &= ~BIT(chan->channel);
124 135
125 priv->event_scan_mask &= ~BIT(chan->channel);
126 if (state)
127 priv->event_scan_mask |= BIT(chan->channel); 136 priv->event_scan_mask |= BIT(chan->channel);
137 } else
138 priv->event_scan_mask &= ~BIT(chan->channel);
128 139
129 return 0; 140 return 0;
130} 141}
@@ -442,13 +453,15 @@ static int hi8435_probe(struct spi_device *spi)
442 priv->spi = spi; 453 priv->spi = spi;
443 454
444 reset_gpio = devm_gpiod_get(&spi->dev, NULL, GPIOD_OUT_LOW); 455 reset_gpio = devm_gpiod_get(&spi->dev, NULL, GPIOD_OUT_LOW);
445 if (IS_ERR(reset_gpio)) { 456 if (!IS_ERR(reset_gpio)) {
446 /* chip s/w reset if h/w reset failed */ 457 /* need >=100ns low pulse to reset chip */
458 gpiod_set_raw_value_cansleep(reset_gpio, 0);
459 udelay(1);
460 gpiod_set_raw_value_cansleep(reset_gpio, 1);
461 } else {
462 /* s/w reset chip if h/w reset is not available */
447 hi8435_writeb(priv, HI8435_CTRL_REG, HI8435_CTRL_SRST); 463 hi8435_writeb(priv, HI8435_CTRL_REG, HI8435_CTRL_SRST);
448 hi8435_writeb(priv, HI8435_CTRL_REG, 0); 464 hi8435_writeb(priv, HI8435_CTRL_REG, 0);
449 } else {
450 udelay(5);
451 gpiod_set_value(reset_gpio, 1);
452 } 465 }
453 466
454 spi_set_drvdata(spi, idev); 467 spi_set_drvdata(spi, idev);
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index c5b999f0c519..e44181f9eb36 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -19,11 +19,18 @@ struct iio_kfifo {
19#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) 19#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
20 20
21static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, 21static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
22 int bytes_per_datum, int length) 22 size_t bytes_per_datum, unsigned int length)
23{ 23{
24 if ((length == 0) || (bytes_per_datum == 0)) 24 if ((length == 0) || (bytes_per_datum == 0))
25 return -EINVAL; 25 return -EINVAL;
26 26
27 /*
28 * Make sure we don't overflow an unsigned int after kfifo rounds up to
29 * the next power of 2.
30 */
31 if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
32 return -EINVAL;
33
27 return __kfifo_alloc((struct __kfifo *)&buf->kf, length, 34 return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
28 bytes_per_datum, GFP_KERNEL); 35 bytes_per_datum, GFP_KERNEL);
29} 36}
@@ -64,7 +71,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
64 return 0; 71 return 0;
65} 72}
66 73
67static int iio_set_length_kfifo(struct iio_buffer *r, int length) 74static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
68{ 75{
69 /* Avoid an invalid state */ 76 /* Avoid an invalid state */
70 if (length < 2) 77 if (length < 2)
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index f53e9a803a0e..93b99bd93738 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -47,6 +47,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
47 if (adis->trig == NULL) 47 if (adis->trig == NULL)
48 return -ENOMEM; 48 return -ENOMEM;
49 49
50 adis->trig->dev.parent = &adis->spi->dev;
51 adis->trig->ops = &adis_trigger_ops;
52 iio_trigger_set_drvdata(adis->trig, adis);
53
50 ret = request_irq(adis->spi->irq, 54 ret = request_irq(adis->spi->irq,
51 &iio_trigger_generic_data_rdy_poll, 55 &iio_trigger_generic_data_rdy_poll,
52 IRQF_TRIGGER_RISING, 56 IRQF_TRIGGER_RISING,
@@ -55,9 +59,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
55 if (ret) 59 if (ret)
56 goto error_free_trig; 60 goto error_free_trig;
57 61
58 adis->trig->dev.parent = &adis->spi->dev;
59 adis->trig->ops = &adis_trigger_ops;
60 iio_trigger_set_drvdata(adis->trig, adis);
61 ret = iio_trigger_register(adis->trig); 62 ret = iio_trigger_register(adis->trig);
62 63
63 indio_dev->trig = iio_trigger_get(adis->trig); 64 indio_dev->trig = iio_trigger_get(adis->trig);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 32bb036069eb..961afb5588be 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -174,7 +174,7 @@ unsigned int iio_buffer_poll(struct file *filp,
174 struct iio_dev *indio_dev = filp->private_data; 174 struct iio_dev *indio_dev = filp->private_data;
175 struct iio_buffer *rb = indio_dev->buffer; 175 struct iio_buffer *rb = indio_dev->buffer;
176 176
177 if (!indio_dev->info) 177 if (!indio_dev->info || rb == NULL)
178 return 0; 178 return 0;
179 179
180 poll_wait(filp, &rb->pollq, wait); 180 poll_wait(filp, &rb->pollq, wait);
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 6325e7dc8e03..f3cb4dc05391 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -48,8 +48,6 @@ static int st_magn_spi_remove(struct spi_device *spi)
48} 48}
49 49
50static const struct spi_device_id st_magn_id_table[] = { 50static const struct spi_device_id st_magn_id_table[] = {
51 { LSM303DLHC_MAGN_DEV_NAME },
52 { LSM303DLM_MAGN_DEV_NAME },
53 { LIS3MDL_MAGN_DEV_NAME }, 51 { LIS3MDL_MAGN_DEV_NAME },
54 { LSM303AGR_MAGN_DEV_NAME }, 52 { LSM303AGR_MAGN_DEV_NAME },
55 {}, 53 {},
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 5056bd68573f..ba282ff3892d 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -436,6 +436,8 @@ static const struct iio_trigger_ops st_press_trigger_ops = {
436int st_press_common_probe(struct iio_dev *indio_dev) 436int st_press_common_probe(struct iio_dev *indio_dev)
437{ 437{
438 struct st_sensor_data *press_data = iio_priv(indio_dev); 438 struct st_sensor_data *press_data = iio_priv(indio_dev);
439 struct st_sensors_platform_data *pdata =
440 (struct st_sensors_platform_data *)press_data->dev->platform_data;
439 int irq = press_data->get_irq_data_ready(indio_dev); 441 int irq = press_data->get_irq_data_ready(indio_dev);
440 int err; 442 int err;
441 443
@@ -464,12 +466,10 @@ int st_press_common_probe(struct iio_dev *indio_dev)
464 press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz; 466 press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
465 467
466 /* Some devices don't support a data ready pin. */ 468 /* Some devices don't support a data ready pin. */
467 if (!press_data->dev->platform_data && 469 if (!pdata && press_data->sensor_settings->drdy_irq.addr)
468 press_data->sensor_settings->drdy_irq.addr) 470 pdata = (struct st_sensors_platform_data *)&default_press_pdata;
469 press_data->dev->platform_data =
470 (struct st_sensors_platform_data *)&default_press_pdata;
471 471
472 err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data); 472 err = st_sensors_init_sensor(indio_dev, pdata);
473 if (err < 0) 473 if (err < 0)
474 return err; 474 return err;
475 475
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index aa26f3c3416b..c151bb625179 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -33,6 +33,18 @@ config INFINIBAND_USER_ACCESS
33 libibverbs, libibcm and a hardware driver library from 33 libibverbs, libibcm and a hardware driver library from
34 <http://www.openfabrics.org/git/>. 34 <http://www.openfabrics.org/git/>.
35 35
36config INFINIBAND_USER_ACCESS_UCM
37 bool "Userspace CM (UCM, DEPRECATED)"
38 depends on BROKEN
39 depends on INFINIBAND_USER_ACCESS
40 help
41 The UCM module has known security flaws, which no one is
42 interested to fix. The user-space part of this code was
43 dropped from the upstream a long time ago.
44
45 This option is DEPRECATED and planned to be removed.
46
47
36config INFINIBAND_USER_MEM 48config INFINIBAND_USER_MEM
37 bool 49 bool
38 depends on INFINIBAND_USER_ACCESS != n 50 depends on INFINIBAND_USER_ACCESS != n
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index d43a8994ac5c..737612a442be 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -5,8 +5,8 @@ obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
5 ib_cm.o iw_cm.o ib_addr.o \ 5 ib_cm.o iw_cm.o ib_addr.o \
6 $(infiniband-y) 6 $(infiniband-y)
7obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o 7obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
8obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ 8obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
9 $(user_access-y) 9obj-$(CONFIG_INFINIBAND_USER_ACCESS_UCM) += ib_ucm.o $(user_access-y)
10 10
11ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ 11ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
12 device.o fmr_pool.o cache.o netlink.o \ 12 device.o fmr_pool.o cache.o netlink.o \
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 6a8024d9d742..864a7c8d82d3 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -86,6 +86,22 @@ int rdma_addr_size(struct sockaddr *addr)
86} 86}
87EXPORT_SYMBOL(rdma_addr_size); 87EXPORT_SYMBOL(rdma_addr_size);
88 88
89int rdma_addr_size_in6(struct sockaddr_in6 *addr)
90{
91 int ret = rdma_addr_size((struct sockaddr *) addr);
92
93 return ret <= sizeof(*addr) ? ret : 0;
94}
95EXPORT_SYMBOL(rdma_addr_size_in6);
96
97int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
98{
99 int ret = rdma_addr_size((struct sockaddr *) addr);
100
101 return ret <= sizeof(*addr) ? ret : 0;
102}
103EXPORT_SYMBOL(rdma_addr_size_kss);
104
89static struct rdma_addr_client self; 105static struct rdma_addr_client self;
90 106
91void rdma_addr_register_client(struct rdma_addr_client *client) 107void rdma_addr_register_client(struct rdma_addr_client *client)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e354358db77b..d57a78ec7425 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -626,6 +626,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net,
626 INIT_LIST_HEAD(&id_priv->mc_list); 626 INIT_LIST_HEAD(&id_priv->mc_list);
627 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 627 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
628 id_priv->id.route.addr.dev_addr.net = get_net(net); 628 id_priv->id.route.addr.dev_addr.net = get_net(net);
629 id_priv->seq_num &= 0x00ffffff;
629 630
630 return &id_priv->id; 631 return &id_priv->id;
631} 632}
@@ -3742,6 +3743,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
3742 struct cma_multicast *mc; 3743 struct cma_multicast *mc;
3743 int ret; 3744 int ret;
3744 3745
3746 if (!id->device)
3747 return -EINVAL;
3748
3745 id_priv = container_of(id, struct rdma_id_private, id); 3749 id_priv = container_of(id, struct rdma_id_private, id);
3746 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 3750 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
3747 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 3751 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
@@ -4006,7 +4010,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
4006 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) 4010 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
4007 goto out; 4011 goto out;
4008 if (ibnl_put_attr(skb, nlh, 4012 if (ibnl_put_attr(skb, nlh,
4009 rdma_addr_size(cma_src_addr(id_priv)), 4013 rdma_addr_size(cma_dst_addr(id_priv)),
4010 cma_dst_addr(id_priv), 4014 cma_dst_addr(id_priv),
4011 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) 4015 RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
4012 goto out; 4016 goto out;
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index fb43a242847b..8d7d110d0721 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -663,6 +663,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
663 } 663 }
664 skb_num++; 664 skb_num++;
665 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 665 spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
666 ret = -EINVAL;
666 for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { 667 for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
667 hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], 668 hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
668 hlist_node) { 669 hlist_node) {
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 8d84c563ba75..616173b7a5e8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1548,7 +1548,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1548 mad_reg_req->oui, 3)) { 1548 mad_reg_req->oui, 3)) {
1549 method = &(*vendor_table)->vendor_class[ 1549 method = &(*vendor_table)->vendor_class[
1550 vclass]->method_table[i]; 1550 vclass]->method_table[i];
1551 BUG_ON(!*method); 1551 if (!*method)
1552 goto error3;
1552 goto check_in_use; 1553 goto check_in_use;
1553 } 1554 }
1554 } 1555 }
@@ -1558,10 +1559,12 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1558 vclass]->oui[i])) { 1559 vclass]->oui[i])) {
1559 method = &(*vendor_table)->vendor_class[ 1560 method = &(*vendor_table)->vendor_class[
1560 vclass]->method_table[i]; 1561 vclass]->method_table[i];
1561 BUG_ON(*method);
1562 /* Allocate method table for this OUI */ 1562 /* Allocate method table for this OUI */
1563 if ((ret = allocate_method_table(method))) 1563 if (!*method) {
1564 goto error3; 1564 ret = allocate_method_table(method);
1565 if (ret)
1566 goto error3;
1567 }
1565 memcpy((*vendor_table)->vendor_class[vclass]->oui[i], 1568 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1566 mad_reg_req->oui, 3); 1569 mad_reg_req->oui, 3);
1567 goto check_in_use; 1570 goto check_in_use;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 886f61ea6cc7..55aa8d3d752f 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -131,7 +131,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
131 ctx = idr_find(&ctx_idr, id); 131 ctx = idr_find(&ctx_idr, id);
132 if (!ctx) 132 if (!ctx)
133 ctx = ERR_PTR(-ENOENT); 133 ctx = ERR_PTR(-ENOENT);
134 else if (ctx->file != file) 134 else if (ctx->file != file || !ctx->cm_id)
135 ctx = ERR_PTR(-EINVAL); 135 ctx = ERR_PTR(-EINVAL);
136 return ctx; 136 return ctx;
137} 137}
@@ -217,7 +217,7 @@ static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
217 return NULL; 217 return NULL;
218 218
219 mutex_lock(&mut); 219 mutex_lock(&mut);
220 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL); 220 mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL);
221 mutex_unlock(&mut); 221 mutex_unlock(&mut);
222 if (mc->id < 0) 222 if (mc->id < 0)
223 goto error; 223 goto error;
@@ -453,6 +453,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
453 struct rdma_ucm_create_id cmd; 453 struct rdma_ucm_create_id cmd;
454 struct rdma_ucm_create_id_resp resp; 454 struct rdma_ucm_create_id_resp resp;
455 struct ucma_context *ctx; 455 struct ucma_context *ctx;
456 struct rdma_cm_id *cm_id;
456 enum ib_qp_type qp_type; 457 enum ib_qp_type qp_type;
457 int ret; 458 int ret;
458 459
@@ -473,10 +474,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
473 return -ENOMEM; 474 return -ENOMEM;
474 475
475 ctx->uid = cmd.uid; 476 ctx->uid = cmd.uid;
476 ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, 477 cm_id = rdma_create_id(current->nsproxy->net_ns,
477 ucma_event_handler, ctx, cmd.ps, qp_type); 478 ucma_event_handler, ctx, cmd.ps, qp_type);
478 if (IS_ERR(ctx->cm_id)) { 479 if (IS_ERR(cm_id)) {
479 ret = PTR_ERR(ctx->cm_id); 480 ret = PTR_ERR(cm_id);
480 goto err1; 481 goto err1;
481 } 482 }
482 483
@@ -486,14 +487,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
486 ret = -EFAULT; 487 ret = -EFAULT;
487 goto err2; 488 goto err2;
488 } 489 }
490
491 ctx->cm_id = cm_id;
489 return 0; 492 return 0;
490 493
491err2: 494err2:
492 rdma_destroy_id(ctx->cm_id); 495 rdma_destroy_id(cm_id);
493err1: 496err1:
494 mutex_lock(&mut); 497 mutex_lock(&mut);
495 idr_remove(&ctx_idr, ctx->id); 498 idr_remove(&ctx_idr, ctx->id);
496 mutex_unlock(&mut); 499 mutex_unlock(&mut);
500 mutex_lock(&file->mut);
501 list_del(&ctx->list);
502 mutex_unlock(&file->mut);
497 kfree(ctx); 503 kfree(ctx);
498 return ret; 504 return ret;
499} 505}
@@ -623,6 +629,9 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
623 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 629 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
624 return -EFAULT; 630 return -EFAULT;
625 631
632 if (!rdma_addr_size_in6(&cmd.addr))
633 return -EINVAL;
634
626 ctx = ucma_get_ctx(file, cmd.id); 635 ctx = ucma_get_ctx(file, cmd.id);
627 if (IS_ERR(ctx)) 636 if (IS_ERR(ctx))
628 return PTR_ERR(ctx); 637 return PTR_ERR(ctx);
@@ -636,22 +645,21 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
636 int in_len, int out_len) 645 int in_len, int out_len)
637{ 646{
638 struct rdma_ucm_bind cmd; 647 struct rdma_ucm_bind cmd;
639 struct sockaddr *addr;
640 struct ucma_context *ctx; 648 struct ucma_context *ctx;
641 int ret; 649 int ret;
642 650
643 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 651 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
644 return -EFAULT; 652 return -EFAULT;
645 653
646 addr = (struct sockaddr *) &cmd.addr; 654 if (cmd.reserved || !cmd.addr_size ||
647 if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr))) 655 cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
648 return -EINVAL; 656 return -EINVAL;
649 657
650 ctx = ucma_get_ctx(file, cmd.id); 658 ctx = ucma_get_ctx(file, cmd.id);
651 if (IS_ERR(ctx)) 659 if (IS_ERR(ctx))
652 return PTR_ERR(ctx); 660 return PTR_ERR(ctx);
653 661
654 ret = rdma_bind_addr(ctx->cm_id, addr); 662 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
655 ucma_put_ctx(ctx); 663 ucma_put_ctx(ctx);
656 return ret; 664 return ret;
657} 665}
@@ -667,13 +675,16 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
667 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 675 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
668 return -EFAULT; 676 return -EFAULT;
669 677
678 if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
679 !rdma_addr_size_in6(&cmd.dst_addr))
680 return -EINVAL;
681
670 ctx = ucma_get_ctx(file, cmd.id); 682 ctx = ucma_get_ctx(file, cmd.id);
671 if (IS_ERR(ctx)) 683 if (IS_ERR(ctx))
672 return PTR_ERR(ctx); 684 return PTR_ERR(ctx);
673 685
674 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 686 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
675 (struct sockaddr *) &cmd.dst_addr, 687 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
676 cmd.timeout_ms);
677 ucma_put_ctx(ctx); 688 ucma_put_ctx(ctx);
678 return ret; 689 return ret;
679} 690}
@@ -683,24 +694,23 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
683 int in_len, int out_len) 694 int in_len, int out_len)
684{ 695{
685 struct rdma_ucm_resolve_addr cmd; 696 struct rdma_ucm_resolve_addr cmd;
686 struct sockaddr *src, *dst;
687 struct ucma_context *ctx; 697 struct ucma_context *ctx;
688 int ret; 698 int ret;
689 699
690 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 700 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
691 return -EFAULT; 701 return -EFAULT;
692 702
693 src = (struct sockaddr *) &cmd.src_addr; 703 if (cmd.reserved ||
694 dst = (struct sockaddr *) &cmd.dst_addr; 704 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
695 if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) || 705 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
696 !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
697 return -EINVAL; 706 return -EINVAL;
698 707
699 ctx = ucma_get_ctx(file, cmd.id); 708 ctx = ucma_get_ctx(file, cmd.id);
700 if (IS_ERR(ctx)) 709 if (IS_ERR(ctx))
701 return PTR_ERR(ctx); 710 return PTR_ERR(ctx);
702 711
703 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); 712 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
713 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
704 ucma_put_ctx(ctx); 714 ucma_put_ctx(ctx);
705 return ret; 715 return ret;
706} 716}
@@ -1138,10 +1148,18 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1138 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1148 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1139 return -EFAULT; 1149 return -EFAULT;
1140 1150
1151 if (cmd.qp_state > IB_QPS_ERR)
1152 return -EINVAL;
1153
1141 ctx = ucma_get_ctx(file, cmd.id); 1154 ctx = ucma_get_ctx(file, cmd.id);
1142 if (IS_ERR(ctx)) 1155 if (IS_ERR(ctx))
1143 return PTR_ERR(ctx); 1156 return PTR_ERR(ctx);
1144 1157
1158 if (!ctx->cm_id->device) {
1159 ret = -EINVAL;
1160 goto out;
1161 }
1162
1145 resp.qp_attr_mask = 0; 1163 resp.qp_attr_mask = 0;
1146 memset(&qp_attr, 0, sizeof qp_attr); 1164 memset(&qp_attr, 0, sizeof qp_attr);
1147 qp_attr.qp_state = cmd.qp_state; 1165 qp_attr.qp_state = cmd.qp_state;
@@ -1212,6 +1230,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
1212 if (!optlen) 1230 if (!optlen)
1213 return -EINVAL; 1231 return -EINVAL;
1214 1232
1233 if (!ctx->cm_id->device)
1234 return -EINVAL;
1235
1215 memset(&sa_path, 0, sizeof(sa_path)); 1236 memset(&sa_path, 0, sizeof(sa_path));
1216 1237
1217 ib_sa_unpack_path(path_data->path_rec, &sa_path); 1238 ib_sa_unpack_path(path_data->path_rec, &sa_path);
@@ -1274,6 +1295,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1274 if (IS_ERR(ctx)) 1295 if (IS_ERR(ctx))
1275 return PTR_ERR(ctx); 1296 return PTR_ERR(ctx);
1276 1297
1298 if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1299 return -EINVAL;
1300
1277 optval = memdup_user((void __user *) (unsigned long) cmd.optval, 1301 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1278 cmd.optlen); 1302 cmd.optlen);
1279 if (IS_ERR(optval)) { 1303 if (IS_ERR(optval)) {
@@ -1295,7 +1319,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1295{ 1319{
1296 struct rdma_ucm_notify cmd; 1320 struct rdma_ucm_notify cmd;
1297 struct ucma_context *ctx; 1321 struct ucma_context *ctx;
1298 int ret; 1322 int ret = -EINVAL;
1299 1323
1300 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1324 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1301 return -EFAULT; 1325 return -EFAULT;
@@ -1304,7 +1328,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1304 if (IS_ERR(ctx)) 1328 if (IS_ERR(ctx))
1305 return PTR_ERR(ctx); 1329 return PTR_ERR(ctx);
1306 1330
1307 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); 1331 if (ctx->cm_id->device)
1332 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1333
1308 ucma_put_ctx(ctx); 1334 ucma_put_ctx(ctx);
1309 return ret; 1335 return ret;
1310} 1336}
@@ -1322,7 +1348,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
1322 return -ENOSPC; 1348 return -ENOSPC;
1323 1349
1324 addr = (struct sockaddr *) &cmd->addr; 1350 addr = (struct sockaddr *) &cmd->addr;
1325 if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) 1351 if (cmd->reserved || (cmd->addr_size != rdma_addr_size(addr)))
1326 return -EINVAL; 1352 return -EINVAL;
1327 1353
1328 ctx = ucma_get_ctx(file, cmd->id); 1354 ctx = ucma_get_ctx(file, cmd->id);
@@ -1349,6 +1375,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
1349 goto err3; 1375 goto err3;
1350 } 1376 }
1351 1377
1378 mutex_lock(&mut);
1379 idr_replace(&multicast_idr, mc, mc->id);
1380 mutex_unlock(&mut);
1381
1352 mutex_unlock(&file->mut); 1382 mutex_unlock(&file->mut);
1353 ucma_put_ctx(ctx); 1383 ucma_put_ctx(ctx);
1354 return 0; 1384 return 0;
@@ -1381,7 +1411,10 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1381 join_cmd.response = cmd.response; 1411 join_cmd.response = cmd.response;
1382 join_cmd.uid = cmd.uid; 1412 join_cmd.uid = cmd.uid;
1383 join_cmd.id = cmd.id; 1413 join_cmd.id = cmd.id;
1384 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); 1414 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
1415 if (!join_cmd.addr_size)
1416 return -EINVAL;
1417
1385 join_cmd.reserved = 0; 1418 join_cmd.reserved = 0;
1386 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); 1419 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1387 1420
@@ -1397,6 +1430,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
1397 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1430 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1398 return -EFAULT; 1431 return -EFAULT;
1399 1432
1433 if (!rdma_addr_size_kss(&cmd.addr))
1434 return -EINVAL;
1435
1400 return ucma_process_join(file, &cmd, out_len); 1436 return ucma_process_join(file, &cmd, out_len);
1401} 1437}
1402 1438
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 0ae337bec4f2..98fd9a594841 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -122,16 +122,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
122 umem->address = addr; 122 umem->address = addr;
123 umem->page_size = PAGE_SIZE; 123 umem->page_size = PAGE_SIZE;
124 umem->pid = get_task_pid(current, PIDTYPE_PID); 124 umem->pid = get_task_pid(current, PIDTYPE_PID);
125 /* 125 umem->writable = ib_access_writable(access);
126 * We ask for writable memory if any of the following
127 * access flags are set. "Local write" and "remote write"
128 * obviously require write access. "Remote atomic" can do
129 * things like fetch and add, which will modify memory, and
130 * "MW bind" can change permissions by binding a window.
131 */
132 umem->writable = !!(access &
133 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
134 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
135 126
136 if (access & IB_ACCESS_ON_DEMAND) { 127 if (access & IB_ACCESS_ON_DEMAND) {
137 put_pid(umem->pid); 128 put_pid(umem->pid);
@@ -354,7 +345,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
354 return -EINVAL; 345 return -EINVAL;
355 } 346 }
356 347
357 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length, 348 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length,
358 offset + ib_umem_offset(umem)); 349 offset + ib_umem_offset(umem));
359 350
360 if (ret < 0) 351 if (ret < 0)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index b7a73f1a8beb..3eb967521917 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2436,9 +2436,13 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2436 2436
2437static void *alloc_wr(size_t wr_size, __u32 num_sge) 2437static void *alloc_wr(size_t wr_size, __u32 num_sge)
2438{ 2438{
2439 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
2440 sizeof (struct ib_sge))
2441 return NULL;
2442
2439 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2443 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2440 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2444 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2441}; 2445}
2442 2446
2443ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2447ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2444 struct ib_device *ib_dev, 2448 struct ib_device *ib_dev,
@@ -2665,6 +2669,13 @@ static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2665 goto err; 2669 goto err;
2666 } 2670 }
2667 2671
2672 if (user_wr->num_sge >=
2673 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
2674 sizeof (struct ib_sge)) {
2675 ret = -EINVAL;
2676 goto err;
2677 }
2678
2668 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2679 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2669 user_wr->num_sge * sizeof (struct ib_sge), 2680 user_wr->num_sge * sizeof (struct ib_sge),
2670 GFP_KERNEL); 2681 GFP_KERNEL);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 58fce1742b8d..337b1a5eb41c 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -809,10 +809,9 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
809 rdev->lldi.vr->qp.size, 809 rdev->lldi.vr->qp.size,
810 rdev->lldi.vr->cq.start, 810 rdev->lldi.vr->cq.start,
811 rdev->lldi.vr->cq.size); 811 rdev->lldi.vr->cq.size);
812 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p " 812 PDBG("udb %pR db_reg %p gts_reg %p "
813 "qpmask 0x%x cqmask 0x%x\n", 813 "qpmask 0x%x cqmask 0x%x\n",
814 (unsigned)pci_resource_len(rdev->lldi.pdev, 2), 814 &rdev->lldi.pdev->resource[2],
815 (void *)pci_resource_start(rdev->lldi.pdev, 2),
816 rdev->lldi.db_reg, rdev->lldi.gts_reg, 815 rdev->lldi.db_reg, rdev->lldi.gts_reg,
817 rdev->qpmask, rdev->cqmask); 816 rdev->qpmask, rdev->cqmask);
818 817
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index e1629ab58db7..8218d714fa01 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -926,7 +926,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
926{ 926{
927 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); 927 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
928 928
929 if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) 929 if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
930 return -ENOMEM; 930 return -ENOMEM;
931 931
932 mhp->mpl[mhp->mpl_len++] = addr; 932 mhp->mpl[mhp->mpl_len++] = addr;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index d862b9b7910e..199a9cdd0d12 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1780,7 +1780,6 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1780 "buf:%lld\n", wc.wr_id); 1780 "buf:%lld\n", wc.wr_id);
1781 break; 1781 break;
1782 default: 1782 default:
1783 BUG_ON(1);
1784 break; 1783 break;
1785 } 1784 }
1786 } else { 1785 } else {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8763fb832b01..67c4c73343d4 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1041,7 +1041,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1041 /* need to protect from a race on closing the vma as part of 1041 /* need to protect from a race on closing the vma as part of
1042 * mlx4_ib_vma_close(). 1042 * mlx4_ib_vma_close().
1043 */ 1043 */
1044 down_read(&owning_mm->mmap_sem); 1044 down_write(&owning_mm->mmap_sem);
1045 for (i = 0; i < HW_BAR_COUNT; i++) { 1045 for (i = 0; i < HW_BAR_COUNT; i++) {
1046 vma = context->hw_bar_info[i].vma; 1046 vma = context->hw_bar_info[i].vma;
1047 if (!vma) 1047 if (!vma)
@@ -1055,11 +1055,13 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1055 BUG_ON(1); 1055 BUG_ON(1);
1056 } 1056 }
1057 1057
1058 context->hw_bar_info[i].vma->vm_flags &=
1059 ~(VM_SHARED | VM_MAYSHARE);
1058 /* context going to be destroyed, should not access ops any more */ 1060 /* context going to be destroyed, should not access ops any more */
1059 context->hw_bar_info[i].vma->vm_ops = NULL; 1061 context->hw_bar_info[i].vma->vm_ops = NULL;
1060 } 1062 }
1061 1063
1062 up_read(&owning_mm->mmap_sem); 1064 up_write(&owning_mm->mmap_sem);
1063 mmput(owning_mm); 1065 mmput(owning_mm);
1064 put_task_struct(owning_process); 1066 put_task_struct(owning_process);
1065} 1067}
@@ -2483,9 +2485,8 @@ err_steer_free_bitmap:
2483 kfree(ibdev->ib_uc_qpns_bitmap); 2485 kfree(ibdev->ib_uc_qpns_bitmap);
2484 2486
2485err_steer_qp_release: 2487err_steer_qp_release:
2486 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) 2488 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2487 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, 2489 ibdev->steer_qpn_count);
2488 ibdev->steer_qpn_count);
2489err_counter: 2490err_counter:
2490 for (i = 0; i < ibdev->num_ports; ++i) 2491 for (i = 0; i < ibdev->num_ports; ++i)
2491 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); 2492 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
@@ -2586,11 +2587,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2586 ibdev->iboe.nb.notifier_call = NULL; 2587 ibdev->iboe.nb.notifier_call = NULL;
2587 } 2588 }
2588 2589
2589 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { 2590 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2590 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, 2591 ibdev->steer_qpn_count);
2591 ibdev->steer_qpn_count); 2592 kfree(ibdev->ib_uc_qpns_bitmap);
2592 kfree(ibdev->ib_uc_qpns_bitmap);
2593 }
2594 2593
2595 iounmap(ibdev->uar_map); 2594 iounmap(ibdev->uar_map);
2596 for (p = 0; p < ibdev->num_ports; ++p) 2595 for (p = 0; p < ibdev->num_ports; ++p)
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 4d1e1c632603..bf52e35dd506 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -130,6 +130,40 @@ out:
130 return err; 130 return err;
131} 131}
132 132
133static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
134 u64 length, u64 virt_addr,
135 int access_flags)
136{
137 /*
138 * Force registering the memory as writable if the underlying pages
139 * are writable. This is so rereg can change the access permissions
140 * from readable to writable without having to run through ib_umem_get
141 * again
142 */
143 if (!ib_access_writable(access_flags)) {
144 struct vm_area_struct *vma;
145
146 down_read(&current->mm->mmap_sem);
147 /*
148 * FIXME: Ideally this would iterate over all the vmas that
149 * cover the memory, but for now it requires a single vma to
150 * entirely cover the MR to support RO mappings.
151 */
152 vma = find_vma(current->mm, start);
153 if (vma && vma->vm_end >= start + length &&
154 vma->vm_start <= start) {
155 if (vma->vm_flags & VM_WRITE)
156 access_flags |= IB_ACCESS_LOCAL_WRITE;
157 } else {
158 access_flags |= IB_ACCESS_LOCAL_WRITE;
159 }
160
161 up_read(&current->mm->mmap_sem);
162 }
163
164 return ib_umem_get(context, start, length, access_flags, 0);
165}
166
133struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 167struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
134 u64 virt_addr, int access_flags, 168 u64 virt_addr, int access_flags,
135 struct ib_udata *udata) 169 struct ib_udata *udata)
@@ -144,10 +178,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
144 if (!mr) 178 if (!mr)
145 return ERR_PTR(-ENOMEM); 179 return ERR_PTR(-ENOMEM);
146 180
147 /* Force registering the memory as writable. */ 181 mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
148 /* Used for memory re-registeration. HCA protects the access */ 182 virt_addr, access_flags);
149 mr->umem = ib_umem_get(pd->uobject->context, start, length,
150 access_flags | IB_ACCESS_LOCAL_WRITE, 0);
151 if (IS_ERR(mr->umem)) { 183 if (IS_ERR(mr->umem)) {
152 err = PTR_ERR(mr->umem); 184 err = PTR_ERR(mr->umem);
153 goto err_free; 185 goto err_free;
@@ -214,6 +246,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
214 } 246 }
215 247
216 if (flags & IB_MR_REREG_ACCESS) { 248 if (flags & IB_MR_REREG_ACCESS) {
249 if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
250 return -EPERM;
251
217 err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, 252 err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
218 convert_access(mr_access_flags)); 253 convert_access(mr_access_flags));
219 254
@@ -227,10 +262,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
227 262
228 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); 263 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
229 ib_umem_release(mmr->umem); 264 ib_umem_release(mmr->umem);
230 mmr->umem = ib_umem_get(mr->uobject->context, start, length, 265 mmr->umem =
231 mr_access_flags | 266 mlx4_get_umem_mr(mr->uobject->context, start, length,
232 IB_ACCESS_LOCAL_WRITE, 267 virt_addr, mr_access_flags);
233 0);
234 if (IS_ERR(mmr->umem)) { 268 if (IS_ERR(mmr->umem)) {
235 err = PTR_ERR(mmr->umem); 269 err = PTR_ERR(mmr->umem);
236 /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ 270 /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
@@ -424,7 +458,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
424 goto err_free_mr; 458 goto err_free_mr;
425 459
426 mr->max_pages = max_num_sg; 460 mr->max_pages = max_num_sg;
427
428 err = mlx4_mr_enable(dev->dev, &mr->mmr); 461 err = mlx4_mr_enable(dev->dev, &mr->mmr);
429 if (err) 462 if (err)
430 goto err_free_pl; 463 goto err_free_pl;
@@ -435,6 +468,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
435 return &mr->ibmr; 468 return &mr->ibmr;
436 469
437err_free_pl: 470err_free_pl:
471 mr->ibmr.device = pd->device;
438 mlx4_free_priv_pages(mr); 472 mlx4_free_priv_pages(mr);
439err_free_mr: 473err_free_mr:
440 (void) mlx4_mr_free(dev->dev, &mr->mmr); 474 (void) mlx4_mr_free(dev->dev, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 02c8deab1fff..4a4ab433062f 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -972,7 +972,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
972 if (ucmd.reserved0 || ucmd.reserved1) 972 if (ucmd.reserved0 || ucmd.reserved1)
973 return -EINVAL; 973 return -EINVAL;
974 974
975 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, 975 /* check multiplication overflow */
976 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
977 return -EINVAL;
978
979 umem = ib_umem_get(context, ucmd.buf_addr,
980 (size_t)ucmd.cqe_size * entries,
976 IB_ACCESS_LOCAL_WRITE, 1); 981 IB_ACCESS_LOCAL_WRITE, 1);
977 if (IS_ERR(umem)) { 982 if (IS_ERR(umem)) {
978 err = PTR_ERR(umem); 983 err = PTR_ERR(umem);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index cfcfbb6b84d7..43d277a931c2 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -231,7 +231,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
231 } else { 231 } else {
232 if (ucmd) { 232 if (ucmd) {
233 qp->rq.wqe_cnt = ucmd->rq_wqe_count; 233 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
234 if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
235 return -EINVAL;
234 qp->rq.wqe_shift = ucmd->rq_wqe_shift; 236 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
237 if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
238 return -EINVAL;
235 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; 239 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
236 qp->rq.max_post = qp->rq.wqe_cnt; 240 qp->rq.max_post = qp->rq.wqe_cnt;
237 } else { 241 } else {
@@ -1348,18 +1352,18 @@ enum {
1348 1352
1349static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) 1353static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1350{ 1354{
1351 if (rate == IB_RATE_PORT_CURRENT) { 1355 if (rate == IB_RATE_PORT_CURRENT)
1352 return 0; 1356 return 0;
1353 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { 1357
1358 if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS)
1354 return -EINVAL; 1359 return -EINVAL;
1355 } else {
1356 while (rate != IB_RATE_2_5_GBPS &&
1357 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1358 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
1359 --rate;
1360 }
1361 1360
1362 return rate + MLX5_STAT_RATE_OFFSET; 1361 while (rate != IB_RATE_PORT_CURRENT &&
1362 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1363 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
1364 --rate;
1365
1366 return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
1363} 1367}
1364 1368
1365static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, 1369static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
@@ -3157,12 +3161,9 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3157 int err; 3161 int err;
3158 3162
3159 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn); 3163 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
3160 if (err) { 3164 if (err)
3161 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); 3165 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
3162 return err;
3163 }
3164 3166
3165 kfree(xrcd); 3167 kfree(xrcd);
3166
3167 return 0; 3168 return 0;
3168} 3169}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 86c303a620c1..40242ead096f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -643,7 +643,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
643 struct ocrdma_stats *pstats = filp->private_data; 643 struct ocrdma_stats *pstats = filp->private_data;
644 struct ocrdma_dev *dev = pstats->dev; 644 struct ocrdma_dev *dev = pstats->dev;
645 645
646 if (count > 32) 646 if (*ppos != 0 || count == 0 || count > sizeof(tmp_str))
647 goto err; 647 goto err;
648 648
649 if (copy_from_user(tmp_str, buffer, count)) 649 if (copy_from_user(tmp_str, buffer, count))
@@ -834,7 +834,7 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
834 834
835 dev->reset_stats.type = OCRDMA_RESET_STATS; 835 dev->reset_stats.type = OCRDMA_RESET_STATS;
836 dev->reset_stats.dev = dev; 836 dev->reset_stats.dev = dev;
837 if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir, 837 if (!debugfs_create_file("reset_stats", 0200, dev->dir,
838 &dev->reset_stats, &ocrdma_dbg_ops)) 838 &dev->reset_stats, &ocrdma_dbg_ops))
839 goto err; 839 goto err;
840 840
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 7df16f74bb45..c6c75b99cf2c 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1451,8 +1451,7 @@ u64 qib_sps_ints(void);
1451/* 1451/*
1452 * dma_addr wrappers - all 0's invalid for hw 1452 * dma_addr wrappers - all 0's invalid for hw
1453 */ 1453 */
1454dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long, 1454int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
1455 size_t, int);
1456const char *qib_get_unit_name(int unit); 1455const char *qib_get_unit_name(int unit);
1457 1456
1458/* 1457/*
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 24f4a782e0f4..5908fd3af00d 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -364,6 +364,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
364 goto done; 364 goto done;
365 } 365 }
366 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { 366 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
367 dma_addr_t daddr;
368
367 for (; ntids--; tid++) { 369 for (; ntids--; tid++) {
368 if (tid == tidcnt) 370 if (tid == tidcnt)
369 tid = 0; 371 tid = 0;
@@ -380,12 +382,14 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
380 ret = -ENOMEM; 382 ret = -ENOMEM;
381 break; 383 break;
382 } 384 }
385 ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
386 if (ret)
387 break;
388
383 tidlist[i] = tid + tidoff; 389 tidlist[i] = tid + tidoff;
384 /* we "know" system pages and TID pages are same size */ 390 /* we "know" system pages and TID pages are same size */
385 dd->pageshadow[ctxttid + tid] = pagep[i]; 391 dd->pageshadow[ctxttid + tid] = pagep[i];
386 dd->physshadow[ctxttid + tid] = 392 dd->physshadow[ctxttid + tid] = daddr;
387 qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
388 PCI_DMA_FROMDEVICE);
389 /* 393 /*
390 * don't need atomic or it's overhead 394 * don't need atomic or it's overhead
391 */ 395 */
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 74f90b2619f6..ab1588ae1c85 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -98,23 +98,27 @@ bail:
98 * 98 *
99 * I'm sure we won't be so lucky with other iommu's, so FIXME. 99 * I'm sure we won't be so lucky with other iommu's, so FIXME.
100 */ 100 */
101dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page, 101int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
102 unsigned long offset, size_t size, int direction)
103{ 102{
104 dma_addr_t phys; 103 dma_addr_t phys;
105 104
106 phys = pci_map_page(hwdev, page, offset, size, direction); 105 phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
106 if (pci_dma_mapping_error(hwdev, phys))
107 return -ENOMEM;
107 108
108 if (phys == 0) { 109 if (!phys) {
109 pci_unmap_page(hwdev, phys, size, direction); 110 pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
110 phys = pci_map_page(hwdev, page, offset, size, direction); 111 phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
112 PCI_DMA_FROMDEVICE);
113 if (pci_dma_mapping_error(hwdev, phys))
114 return -ENOMEM;
111 /* 115 /*
112 * FIXME: If we get 0 again, we should keep this page, 116 * FIXME: If we get 0 again, we should keep this page,
113 * map another, then free the 0 page. 117 * map another, then free the 0 page.
114 */ 118 */
115 } 119 }
116 120 *daddr = phys;
117 return phys; 121 return 0;
118} 122}
119 123
120/** 124/**
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index d3f0a384faad..f6b06729f4ea 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -945,6 +945,19 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
945 */ 945 */
946 priv->dev->broadcast[8] = priv->pkey >> 8; 946 priv->dev->broadcast[8] = priv->pkey >> 8;
947 priv->dev->broadcast[9] = priv->pkey & 0xff; 947 priv->dev->broadcast[9] = priv->pkey & 0xff;
948
949 /*
950 * Update the broadcast address in the priv->broadcast object,
951 * in case it already exists, otherwise no one will do that.
952 */
953 if (priv->broadcast) {
954 spin_lock_irq(&priv->lock);
955 memcpy(priv->broadcast->mcmember.mgid.raw,
956 priv->dev->broadcast + 4,
957 sizeof(union ib_gid));
958 spin_unlock_irq(&priv->lock);
959 }
960
948 return 0; 961 return 0;
949 } 962 }
950 963
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bad76eed06b3..fcb18b11db75 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -724,6 +724,22 @@ static void path_rec_completion(int status,
724 spin_lock_irqsave(&priv->lock, flags); 724 spin_lock_irqsave(&priv->lock, flags);
725 725
726 if (!IS_ERR_OR_NULL(ah)) { 726 if (!IS_ERR_OR_NULL(ah)) {
727 /*
728 * pathrec.dgid is used as the database key from the LLADDR,
729 * it must remain unchanged even if the SA returns a different
730 * GID to use in the AH.
731 */
732 if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
733 sizeof(union ib_gid))) {
734 ipoib_dbg(
735 priv,
736 "%s got PathRec for gid %pI6 while asked for %pI6\n",
737 dev->name, pathrec->dgid.raw,
738 path->pathrec.dgid.raw);
739 memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
740 sizeof(union ib_gid));
741 }
742
727 path->pathrec = *pathrec; 743 path->pathrec = *pathrec;
728 744
729 old_ah = path->ah; 745 old_ah = path->ah;
@@ -844,8 +860,8 @@ static int path_rec_start(struct net_device *dev,
844 return 0; 860 return 0;
845} 861}
846 862
847static void neigh_add_path(struct sk_buff *skb, u8 *daddr, 863static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
848 struct net_device *dev) 864 struct net_device *dev)
849{ 865{
850 struct ipoib_dev_priv *priv = netdev_priv(dev); 866 struct ipoib_dev_priv *priv = netdev_priv(dev);
851 struct ipoib_path *path; 867 struct ipoib_path *path;
@@ -858,7 +874,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
858 spin_unlock_irqrestore(&priv->lock, flags); 874 spin_unlock_irqrestore(&priv->lock, flags);
859 ++dev->stats.tx_dropped; 875 ++dev->stats.tx_dropped;
860 dev_kfree_skb_any(skb); 876 dev_kfree_skb_any(skb);
861 return; 877 return NULL;
878 }
879
880 /* To avoid race condition, make sure that the
881 * neigh will be added only once.
882 */
883 if (unlikely(!list_empty(&neigh->list))) {
884 spin_unlock_irqrestore(&priv->lock, flags);
885 return neigh;
862 } 886 }
863 887
864 path = __path_find(dev, daddr + 4); 888 path = __path_find(dev, daddr + 4);
@@ -896,7 +920,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
896 spin_unlock_irqrestore(&priv->lock, flags); 920 spin_unlock_irqrestore(&priv->lock, flags);
897 ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr)); 921 ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr));
898 ipoib_neigh_put(neigh); 922 ipoib_neigh_put(neigh);
899 return; 923 return NULL;
900 } 924 }
901 } else { 925 } else {
902 neigh->ah = NULL; 926 neigh->ah = NULL;
@@ -913,7 +937,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
913 937
914 spin_unlock_irqrestore(&priv->lock, flags); 938 spin_unlock_irqrestore(&priv->lock, flags);
915 ipoib_neigh_put(neigh); 939 ipoib_neigh_put(neigh);
916 return; 940 return NULL;
917 941
918err_path: 942err_path:
919 ipoib_neigh_free(neigh); 943 ipoib_neigh_free(neigh);
@@ -923,6 +947,8 @@ err_drop:
923 947
924 spin_unlock_irqrestore(&priv->lock, flags); 948 spin_unlock_irqrestore(&priv->lock, flags);
925 ipoib_neigh_put(neigh); 949 ipoib_neigh_put(neigh);
950
951 return NULL;
926} 952}
927 953
928static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 954static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -1028,8 +1054,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1028 case htons(ETH_P_TIPC): 1054 case htons(ETH_P_TIPC):
1029 neigh = ipoib_neigh_get(dev, phdr->hwaddr); 1055 neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1030 if (unlikely(!neigh)) { 1056 if (unlikely(!neigh)) {
1031 neigh_add_path(skb, phdr->hwaddr, dev); 1057 neigh = neigh_add_path(skb, phdr->hwaddr, dev);
1032 return NETDEV_TX_OK; 1058 if (likely(!neigh))
1059 return NETDEV_TX_OK;
1033 } 1060 }
1034 break; 1061 break;
1035 case htons(ETH_P_ARP): 1062 case htons(ETH_P_ARP):
@@ -1926,6 +1953,9 @@ static struct net_device *ipoib_add_port(const char *format,
1926 goto event_failed; 1953 goto event_failed;
1927 } 1954 }
1928 1955
1956 /* call event handler to ensure pkey in sync */
1957 queue_work(ipoib_workqueue, &priv->flush_heavy);
1958
1929 result = register_netdev(priv->dev); 1959 result = register_netdev(priv->dev);
1930 if (result) { 1960 if (result) {
1931 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1961 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 5580ab0b5781..21e688d55da6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -473,6 +473,9 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
473 !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) 473 !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
474 return -EINVAL; 474 return -EINVAL;
475 475
476 init_completion(&mcast->done);
477 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
478
476 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); 479 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
477 480
478 rec.mgid = mcast->mcmember.mgid; 481 rec.mgid = mcast->mcmember.mgid;
@@ -631,8 +634,6 @@ void ipoib_mcast_join_task(struct work_struct *work)
631 if (mcast->backoff == 1 || 634 if (mcast->backoff == 1 ||
632 time_after_eq(jiffies, mcast->delay_until)) { 635 time_after_eq(jiffies, mcast->delay_until)) {
633 /* Found the next unjoined group */ 636 /* Found the next unjoined group */
634 init_completion(&mcast->done);
635 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
636 if (ipoib_mcast_join(dev, mcast)) { 637 if (ipoib_mcast_join(dev, mcast)) {
637 spin_unlock_irq(&priv->lock); 638 spin_unlock_irq(&priv->lock);
638 return; 639 return;
@@ -652,11 +653,9 @@ out:
652 queue_delayed_work(priv->wq, &priv->mcast_task, 653 queue_delayed_work(priv->wq, &priv->mcast_task,
653 delay_until - jiffies); 654 delay_until - jiffies);
654 } 655 }
655 if (mcast) { 656 if (mcast)
656 init_completion(&mcast->done);
657 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
658 ipoib_mcast_join(dev, mcast); 657 ipoib_mcast_join(dev, mcast);
659 } 658
660 spin_unlock_irq(&priv->lock); 659 spin_unlock_irq(&priv->lock);
661} 660}
662 661
@@ -775,7 +774,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
775 spin_lock_irqsave(&priv->lock, flags); 774 spin_lock_irqsave(&priv->lock, flags);
776 if (!neigh) { 775 if (!neigh) {
777 neigh = ipoib_neigh_alloc(daddr, dev); 776 neigh = ipoib_neigh_alloc(daddr, dev);
778 if (neigh) { 777 /* Make sure that the neigh will be added only
778 * once to mcast list.
779 */
780 if (neigh && list_empty(&neigh->list)) {
779 kref_get(&mcast->ah->ref); 781 kref_get(&mcast->ah->ref);
780 neigh->ah = mcast->ah; 782 neigh->ah = mcast->ah;
781 list_add_tail(&neigh->list, &mcast->neigh_list); 783 list_add_tail(&neigh->list, &mcast->neigh_list);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 9a99cee2665a..4fd2892613dd 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2581,9 +2581,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
2581 ret = FAST_IO_FAIL; 2581 ret = FAST_IO_FAIL;
2582 else 2582 else
2583 ret = FAILED; 2583 ret = FAILED;
2584 srp_free_req(ch, req, scmnd, 0); 2584 if (ret == SUCCESS) {
2585 scmnd->result = DID_ABORT << 16; 2585 srp_free_req(ch, req, scmnd, 0);
2586 scmnd->scsi_done(scmnd); 2586 scmnd->result = DID_ABORT << 16;
2587 scmnd->scsi_done(scmnd);
2588 }
2587 2589
2588 return ret; 2590 return ret;
2589} 2591}
@@ -3309,12 +3311,10 @@ static ssize_t srp_create_target(struct device *dev,
3309 num_online_nodes()); 3311 num_online_nodes());
3310 const int ch_end = ((node_idx + 1) * target->ch_count / 3312 const int ch_end = ((node_idx + 1) * target->ch_count /
3311 num_online_nodes()); 3313 num_online_nodes());
3312 const int cv_start = (node_idx * ibdev->num_comp_vectors / 3314 const int cv_start = node_idx * ibdev->num_comp_vectors /
3313 num_online_nodes() + target->comp_vector) 3315 num_online_nodes();
3314 % ibdev->num_comp_vectors; 3316 const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
3315 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors / 3317 num_online_nodes();
3316 num_online_nodes() + target->comp_vector)
3317 % ibdev->num_comp_vectors;
3318 int cpu_idx = 0; 3318 int cpu_idx = 0;
3319 3319
3320 for_each_online_cpu(cpu) { 3320 for_each_online_cpu(cpu) {
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index a73874508c3a..cb3a8623ff54 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2974,12 +2974,8 @@ static void srpt_queue_response(struct se_cmd *cmd)
2974 } 2974 }
2975 spin_unlock_irqrestore(&ioctx->spinlock, flags); 2975 spin_unlock_irqrestore(&ioctx->spinlock, flags);
2976 2976
2977 if (unlikely(transport_check_aborted_status(&ioctx->cmd, false) 2977 if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
2978 || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
2979 atomic_inc(&ch->req_lim_delta);
2980 srpt_abort_cmd(ioctx);
2981 return; 2978 return;
2982 }
2983 2979
2984 dir = ioctx->cmd.data_direction; 2980 dir = ioctx->cmd.data_direction;
2985 2981
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 766bf2660116..5f04b2d94635 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -88,6 +88,7 @@ static int input_leds_connect(struct input_handler *handler,
88 const struct input_device_id *id) 88 const struct input_device_id *id)
89{ 89{
90 struct input_leds *leds; 90 struct input_leds *leds;
91 struct input_led *led;
91 unsigned int num_leds; 92 unsigned int num_leds;
92 unsigned int led_code; 93 unsigned int led_code;
93 int led_no; 94 int led_no;
@@ -119,14 +120,13 @@ static int input_leds_connect(struct input_handler *handler,
119 120
120 led_no = 0; 121 led_no = 0;
121 for_each_set_bit(led_code, dev->ledbit, LED_CNT) { 122 for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
122 struct input_led *led = &leds->leds[led_no]; 123 if (!input_led_info[led_code].name)
124 continue;
123 125
126 led = &leds->leds[led_no];
124 led->handle = &leds->handle; 127 led->handle = &leds->handle;
125 led->code = led_code; 128 led->code = led_code;
126 129
127 if (!input_led_info[led_code].name)
128 continue;
129
130 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", 130 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
131 dev_name(&dev->dev), 131 dev_name(&dev->dev),
132 input_led_info[led_code].name); 132 input_led_info[led_code].name);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index fe02f9fc8ecd..d9af20dfb392 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -217,8 +217,10 @@ static void matrix_keypad_stop(struct input_dev *dev)
217{ 217{
218 struct matrix_keypad *keypad = input_get_drvdata(dev); 218 struct matrix_keypad *keypad = input_get_drvdata(dev);
219 219
220 spin_lock_irq(&keypad->lock);
220 keypad->stopped = true; 221 keypad->stopped = true;
221 mb(); 222 spin_unlock_irq(&keypad->lock);
223
222 flush_work(&keypad->work.work); 224 flush_work(&keypad->work.work);
223 /* 225 /*
224 * matrix_keypad_scan() will leave IRQs enabled; 226 * matrix_keypad_scan() will leave IRQs enabled;
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 5a5778729e37..76bb51309a78 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -274,9 +274,18 @@ static const struct i2c_device_id qt1070_id[] = {
274}; 274};
275MODULE_DEVICE_TABLE(i2c, qt1070_id); 275MODULE_DEVICE_TABLE(i2c, qt1070_id);
276 276
277#ifdef CONFIG_OF
278static const struct of_device_id qt1070_of_match[] = {
279 { .compatible = "qt1070", },
280 { },
281};
282MODULE_DEVICE_TABLE(of, qt1070_of_match);
283#endif
284
277static struct i2c_driver qt1070_driver = { 285static struct i2c_driver qt1070_driver = {
278 .driver = { 286 .driver = {
279 .name = "qt1070", 287 .name = "qt1070",
288 .of_match_table = of_match_ptr(qt1070_of_match),
280 .pm = &qt1070_pm_ops, 289 .pm = &qt1070_pm_ops,
281 }, 290 },
282 .id_table = qt1070_id, 291 .id_table = qt1070_id,
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 9002298698fc..a5e8998047fe 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -164,11 +164,18 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
164 int error, col, row; 164 int error, col, row;
165 u8 reg, state, code; 165 u8 reg, state, code;
166 166
167 /* Initial read of the key event FIFO */ 167 do {
168 error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg); 168 error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
169 if (error < 0) {
170 dev_err(&keypad_data->client->dev,
171 "unable to read REG_KEY_EVENT_A\n");
172 break;
173 }
174
175 /* Assume that key code 0 signifies empty FIFO */
176 if (reg <= 0)
177 break;
169 178
170 /* Assume that key code 0 signifies empty FIFO */
171 while (error >= 0 && reg > 0) {
172 state = reg & KEY_EVENT_VALUE; 179 state = reg & KEY_EVENT_VALUE;
173 code = reg & KEY_EVENT_CODE; 180 code = reg & KEY_EVENT_CODE;
174 181
@@ -182,13 +189,7 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
182 input_event(input, EV_MSC, MSC_SCAN, code); 189 input_event(input, EV_MSC, MSC_SCAN, code);
183 input_report_key(input, keymap[code], state); 190 input_report_key(input, keymap[code], state);
184 191
185 /* Read for next loop */ 192 } while (1);
186 error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
187 }
188
189 if (error < 0)
190 dev_err(&keypad_data->client->dev,
191 "unable to read REG_KEY_EVENT_A\n");
192 193
193 input_sync(input); 194 input_sync(input);
194} 195}
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 930424e55439..251d64ca41ce 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -521,7 +521,7 @@ static int drv260x_probe(struct i2c_client *client,
521 if (!haptics) 521 if (!haptics)
522 return -ENOMEM; 522 return -ENOMEM;
523 523
524 haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT; 524 haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
525 haptics->rated_voltage = DRV260X_DEF_RATED_VOLT; 525 haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
526 526
527 if (pdata) { 527 if (pdata) {
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index 603fc2fadf05..12b20840fb74 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -70,7 +70,7 @@ static int twl4030_pwrbutton_probe(struct platform_device *pdev)
70 pwr->phys = "twl4030_pwrbutton/input0"; 70 pwr->phys = "twl4030_pwrbutton/input0";
71 pwr->dev.parent = &pdev->dev; 71 pwr->dev.parent = &pdev->dev;
72 72
73 err = devm_request_threaded_irq(&pwr->dev, irq, NULL, powerbutton_irq, 73 err = devm_request_threaded_irq(&pdev->dev, irq, NULL, powerbutton_irq,
74 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | 74 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
75 IRQF_ONESHOT, 75 IRQF_ONESHOT,
76 "twl4030_pwrbutton", pwr); 76 "twl4030_pwrbutton", pwr);
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index c0ec26118732..83dd0ce3ad2a 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -27,6 +27,8 @@
27#define ETP_DISABLE_POWER 0x0001 27#define ETP_DISABLE_POWER 0x0001
28#define ETP_PRESSURE_OFFSET 25 28#define ETP_PRESSURE_OFFSET 25
29 29
30#define ETP_CALIBRATE_MAX_LEN 3
31
30/* IAP Firmware handling */ 32/* IAP Firmware handling */
31#define ETP_PRODUCT_ID_FORMAT_STRING "%d.0" 33#define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
32#define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin" 34#define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index c9d491bc85e0..a716482774db 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -595,7 +595,7 @@ static ssize_t calibrate_store(struct device *dev,
595 int tries = 20; 595 int tries = 20;
596 int retval; 596 int retval;
597 int error; 597 int error;
598 u8 val[3]; 598 u8 val[ETP_CALIBRATE_MAX_LEN];
599 599
600 retval = mutex_lock_interruptible(&data->sysfs_mutex); 600 retval = mutex_lock_interruptible(&data->sysfs_mutex);
601 if (retval) 601 if (retval)
@@ -1082,6 +1082,13 @@ static int elan_probe(struct i2c_client *client,
1082 return error; 1082 return error;
1083 } 1083 }
1084 1084
1085 /* Make sure there is something at this address */
1086 error = i2c_smbus_read_byte(client);
1087 if (error < 0) {
1088 dev_dbg(&client->dev, "nothing at this address: %d\n", error);
1089 return -ENXIO;
1090 }
1091
1085 /* Initialize the touchpad. */ 1092 /* Initialize the touchpad. */
1086 error = elan_initialize(data); 1093 error = elan_initialize(data);
1087 if (error) 1094 if (error)
@@ -1242,6 +1249,10 @@ static const struct acpi_device_id elan_acpi_id[] = {
1242 { "ELAN060B", 0 }, 1249 { "ELAN060B", 0 },
1243 { "ELAN060C", 0 }, 1250 { "ELAN060C", 0 },
1244 { "ELAN0611", 0 }, 1251 { "ELAN0611", 0 },
1252 { "ELAN0612", 0 },
1253 { "ELAN0618", 0 },
1254 { "ELAN061D", 0 },
1255 { "ELAN0622", 0 },
1245 { "ELAN1000", 0 }, 1256 { "ELAN1000", 0 },
1246 { } 1257 { }
1247}; 1258};
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index a679e56c44cd..765879dcaf85 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -557,7 +557,14 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client,
557 long ret; 557 long ret;
558 int error; 558 int error;
559 int len; 559 int len;
560 u8 buffer[ETP_I2C_INF_LENGTH]; 560 u8 buffer[ETP_I2C_REPORT_LEN];
561
562 len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
563 if (len != ETP_I2C_REPORT_LEN) {
564 error = len < 0 ? len : -EIO;
565 dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
566 error, len);
567 }
561 568
562 reinit_completion(completion); 569 reinit_completion(completion);
563 enable_irq(client->irq); 570 enable_irq(client->irq);
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index cb6aecbc1dc2..2ac85f5cbf31 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -56,7 +56,7 @@
56static int elan_smbus_initialize(struct i2c_client *client) 56static int elan_smbus_initialize(struct i2c_client *client)
57{ 57{
58 u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 }; 58 u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
59 u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 }; 59 u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
60 int len, error; 60 int len, error;
61 61
62 /* Get hello packet */ 62 /* Get hello packet */
@@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
117static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val) 117static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
118{ 118{
119 int error; 119 int error;
120 u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
121
122 BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
120 123
121 error = i2c_smbus_read_block_data(client, 124 error = i2c_smbus_read_block_data(client,
122 ETP_SMBUS_CALIBRATE_QUERY, val); 125 ETP_SMBUS_CALIBRATE_QUERY, buf);
123 if (error < 0) 126 if (error < 0)
124 return error; 127 return error;
125 128
129 memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
126 return 0; 130 return 0;
127} 131}
128 132
@@ -130,7 +134,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
130 bool max_baseline, u8 *value) 134 bool max_baseline, u8 *value)
131{ 135{
132 int error; 136 int error;
133 u8 val[3]; 137 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
134 138
135 error = i2c_smbus_read_block_data(client, 139 error = i2c_smbus_read_block_data(client,
136 max_baseline ? 140 max_baseline ?
@@ -149,7 +153,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
149 bool iap, u8 *version) 153 bool iap, u8 *version)
150{ 154{
151 int error; 155 int error;
152 u8 val[3]; 156 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
153 157
154 error = i2c_smbus_read_block_data(client, 158 error = i2c_smbus_read_block_data(client,
155 iap ? ETP_SMBUS_IAP_VERSION_CMD : 159 iap ? ETP_SMBUS_IAP_VERSION_CMD :
@@ -169,7 +173,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
169 u8 *ic_type, u8 *version) 173 u8 *ic_type, u8 *version)
170{ 174{
171 int error; 175 int error;
172 u8 val[3]; 176 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
173 177
174 error = i2c_smbus_read_block_data(client, 178 error = i2c_smbus_read_block_data(client,
175 ETP_SMBUS_SM_VERSION_CMD, val); 179 ETP_SMBUS_SM_VERSION_CMD, val);
@@ -186,7 +190,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
186static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) 190static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
187{ 191{
188 int error; 192 int error;
189 u8 val[3]; 193 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
190 194
191 error = i2c_smbus_read_block_data(client, 195 error = i2c_smbus_read_block_data(client,
192 ETP_SMBUS_UNIQUEID_CMD, val); 196 ETP_SMBUS_UNIQUEID_CMD, val);
@@ -203,7 +207,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
203 bool iap, u16 *csum) 207 bool iap, u16 *csum)
204{ 208{
205 int error; 209 int error;
206 u8 val[3]; 210 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
207 211
208 error = i2c_smbus_read_block_data(client, 212 error = i2c_smbus_read_block_data(client,
209 iap ? ETP_SMBUS_FW_CHECKSUM_CMD : 213 iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
@@ -223,7 +227,7 @@ static int elan_smbus_get_max(struct i2c_client *client,
223 unsigned int *max_x, unsigned int *max_y) 227 unsigned int *max_x, unsigned int *max_y)
224{ 228{
225 int error; 229 int error;
226 u8 val[3]; 230 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
227 231
228 error = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val); 232 error = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
229 if (error) { 233 if (error) {
@@ -241,7 +245,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client,
241 u8 *hw_res_x, u8 *hw_res_y) 245 u8 *hw_res_x, u8 *hw_res_y)
242{ 246{
243 int error; 247 int error;
244 u8 val[3]; 248 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
245 249
246 error = i2c_smbus_read_block_data(client, 250 error = i2c_smbus_read_block_data(client,
247 ETP_SMBUS_RESOLUTION_CMD, val); 251 ETP_SMBUS_RESOLUTION_CMD, val);
@@ -261,7 +265,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
261 unsigned int *y_traces) 265 unsigned int *y_traces)
262{ 266{
263 int error; 267 int error;
264 u8 val[3]; 268 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
265 269
266 error = i2c_smbus_read_block_data(client, 270 error = i2c_smbus_read_block_data(client,
267 ETP_SMBUS_XY_TRACENUM_CMD, val); 271 ETP_SMBUS_XY_TRACENUM_CMD, val);
@@ -288,7 +292,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client,
288{ 292{
289 int error; 293 int error;
290 u16 constant; 294 u16 constant;
291 u8 val[3]; 295 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
292 296
293 error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val); 297 error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
294 if (error < 0) { 298 if (error < 0) {
@@ -339,7 +343,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
339 int len; 343 int len;
340 int error; 344 int error;
341 enum tp_mode mode; 345 enum tp_mode mode;
342 u8 val[3]; 346 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
343 u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06}; 347 u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
344 u16 password; 348 u16 password;
345 349
@@ -413,7 +417,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
413 struct device *dev = &client->dev; 417 struct device *dev = &client->dev;
414 int error; 418 int error;
415 u16 result; 419 u16 result;
416 u8 val[3]; 420 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
417 421
418 /* 422 /*
419 * Due to the limitation of smbus protocol limiting 423 * Due to the limitation of smbus protocol limiting
@@ -466,6 +470,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
466{ 470{
467 int len; 471 int len;
468 472
473 BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
474
469 len = i2c_smbus_read_block_data(client, 475 len = i2c_smbus_read_block_data(client,
470 ETP_SMBUS_PACKET_QUERY, 476 ETP_SMBUS_PACKET_QUERY,
471 &report[ETP_SMBUS_REPORT_OFFSET]); 477 &report[ETP_SMBUS_REPORT_OFFSET]);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 51b96e9bf793..174bb52c578b 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -804,7 +804,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
804 else if (ic_version == 7 && etd->samples[1] == 0x2A) 804 else if (ic_version == 7 && etd->samples[1] == 0x2A)
805 sanity_check = ((packet[3] & 0x1c) == 0x10); 805 sanity_check = ((packet[3] & 0x1c) == 0x10);
806 else 806 else
807 sanity_check = ((packet[0] & 0x0c) == 0x04 && 807 sanity_check = ((packet[0] & 0x08) == 0x00 &&
808 (packet[3] & 0x1c) == 0x10); 808 (packet[3] & 0x1c) == 0x10);
809 809
810 if (!sanity_check) 810 if (!sanity_check)
@@ -1177,6 +1177,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
1177 { } 1177 { }
1178}; 1178};
1179 1179
1180static const char * const middle_button_pnp_ids[] = {
1181 "LEN2131", /* ThinkPad P52 w/ NFC */
1182 "LEN2132", /* ThinkPad P52 */
1183 NULL
1184};
1185
1180/* 1186/*
1181 * Set the appropriate event bits for the input subsystem 1187 * Set the appropriate event bits for the input subsystem
1182 */ 1188 */
@@ -1196,7 +1202,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1196 __clear_bit(EV_REL, dev->evbit); 1202 __clear_bit(EV_REL, dev->evbit);
1197 1203
1198 __set_bit(BTN_LEFT, dev->keybit); 1204 __set_bit(BTN_LEFT, dev->keybit);
1199 if (dmi_check_system(elantech_dmi_has_middle_button)) 1205 if (dmi_check_system(elantech_dmi_has_middle_button) ||
1206 psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
1200 __set_bit(BTN_MIDDLE, dev->keybit); 1207 __set_bit(BTN_MIDDLE, dev->keybit);
1201 __set_bit(BTN_RIGHT, dev->keybit); 1208 __set_bit(BTN_RIGHT, dev->keybit);
1202 1209
@@ -1715,6 +1722,17 @@ int elantech_init(struct psmouse *psmouse)
1715 etd->samples[0], etd->samples[1], etd->samples[2]); 1722 etd->samples[0], etd->samples[1], etd->samples[2]);
1716 } 1723 }
1717 1724
1725 if (etd->samples[1] == 0x74 && etd->hw_version == 0x03) {
1726 /*
1727 * This module has a bug which makes absolute mode
1728 * unusable, so let's abort so we'll be using standard
1729 * PS/2 protocol.
1730 */
1731 psmouse_info(psmouse,
1732 "absolute mode broken, forcing standard PS/2 protocol\n");
1733 goto init_fail;
1734 }
1735
1718 if (elantech_set_absolute_mode(psmouse)) { 1736 if (elantech_set_absolute_mode(psmouse)) {
1719 psmouse_err(psmouse, 1737 psmouse_err(psmouse,
1720 "failed to put touchpad into absolute mode.\n"); 1738 "failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 7e2dc5e56632..0b49f29bf0da 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -383,6 +383,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
383 if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) { 383 if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
384 psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n"); 384 psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
385 button_info = 0x33; 385 button_info = 0x33;
386 } else if (!button_info) {
387 psmouse_warn(psmouse, "got 0 in extended button data, assuming 3 buttons\n");
388 button_info = 0x33;
386 } 389 }
387 390
388 psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); 391 psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index b604564dec5c..30328e57fdda 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -15,6 +15,7 @@
15#define MOUSEDEV_MINORS 31 15#define MOUSEDEV_MINORS 31
16#define MOUSEDEV_MIX 63 16#define MOUSEDEV_MIX 63
17 17
18#include <linux/bitops.h>
18#include <linux/sched.h> 19#include <linux/sched.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/poll.h> 21#include <linux/poll.h>
@@ -103,7 +104,7 @@ struct mousedev_client {
103 spinlock_t packet_lock; 104 spinlock_t packet_lock;
104 int pos_x, pos_y; 105 int pos_x, pos_y;
105 106
106 signed char ps2[6]; 107 u8 ps2[6];
107 unsigned char ready, buffer, bufsiz; 108 unsigned char ready, buffer, bufsiz;
108 unsigned char imexseq, impsseq; 109 unsigned char imexseq, impsseq;
109 enum mousedev_emul mode; 110 enum mousedev_emul mode;
@@ -291,11 +292,10 @@ static void mousedev_notify_readers(struct mousedev *mousedev,
291 } 292 }
292 293
293 client->pos_x += packet->dx; 294 client->pos_x += packet->dx;
294 client->pos_x = client->pos_x < 0 ? 295 client->pos_x = clamp_val(client->pos_x, 0, xres);
295 0 : (client->pos_x >= xres ? xres : client->pos_x); 296
296 client->pos_y += packet->dy; 297 client->pos_y += packet->dy;
297 client->pos_y = client->pos_y < 0 ? 298 client->pos_y = clamp_val(client->pos_y, 0, yres);
298 0 : (client->pos_y >= yres ? yres : client->pos_y);
299 299
300 p->dx += packet->dx; 300 p->dx += packet->dx;
301 p->dy += packet->dy; 301 p->dy += packet->dy;
@@ -571,44 +571,50 @@ static int mousedev_open(struct inode *inode, struct file *file)
571 return error; 571 return error;
572} 572}
573 573
574static inline int mousedev_limit_delta(int delta, int limit) 574static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data)
575{
576 return delta > limit ? limit : (delta < -limit ? -limit : delta);
577}
578
579static void mousedev_packet(struct mousedev_client *client,
580 signed char *ps2_data)
581{ 575{
582 struct mousedev_motion *p = &client->packets[client->tail]; 576 struct mousedev_motion *p = &client->packets[client->tail];
577 s8 dx, dy, dz;
578
579 dx = clamp_val(p->dx, -127, 127);
580 p->dx -= dx;
581
582 dy = clamp_val(p->dy, -127, 127);
583 p->dy -= dy;
583 584
584 ps2_data[0] = 0x08 | 585 ps2_data[0] = BIT(3);
585 ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07); 586 ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2);
586 ps2_data[1] = mousedev_limit_delta(p->dx, 127); 587 ps2_data[0] |= p->buttons & 0x07;
587 ps2_data[2] = mousedev_limit_delta(p->dy, 127); 588 ps2_data[1] = dx;
588 p->dx -= ps2_data[1]; 589 ps2_data[2] = dy;
589 p->dy -= ps2_data[2];
590 590
591 switch (client->mode) { 591 switch (client->mode) {
592 case MOUSEDEV_EMUL_EXPS: 592 case MOUSEDEV_EMUL_EXPS:
593 ps2_data[3] = mousedev_limit_delta(p->dz, 7); 593 dz = clamp_val(p->dz, -7, 7);
594 p->dz -= ps2_data[3]; 594 p->dz -= dz;
595 ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1); 595
596 ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1);
596 client->bufsiz = 4; 597 client->bufsiz = 4;
597 break; 598 break;
598 599
599 case MOUSEDEV_EMUL_IMPS: 600 case MOUSEDEV_EMUL_IMPS:
600 ps2_data[0] |= 601 dz = clamp_val(p->dz, -127, 127);
601 ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); 602 p->dz -= dz;
602 ps2_data[3] = mousedev_limit_delta(p->dz, 127); 603
603 p->dz -= ps2_data[3]; 604 ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
605 ((p->buttons & 0x08) >> 1);
606 ps2_data[3] = dz;
607
604 client->bufsiz = 4; 608 client->bufsiz = 4;
605 break; 609 break;
606 610
607 case MOUSEDEV_EMUL_PS2: 611 case MOUSEDEV_EMUL_PS2:
608 default: 612 default:
609 ps2_data[0] |=
610 ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
611 p->dz = 0; 613 p->dz = 0;
614
615 ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
616 ((p->buttons & 0x08) >> 1);
617
612 client->bufsiz = 3; 618 client->bufsiz = 3;
613 break; 619 break;
614 } 620 }
@@ -714,7 +720,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
714{ 720{
715 struct mousedev_client *client = file->private_data; 721 struct mousedev_client *client = file->private_data;
716 struct mousedev *mousedev = client->mousedev; 722 struct mousedev *mousedev = client->mousedev;
717 signed char data[sizeof(client->ps2)]; 723 u8 data[sizeof(client->ps2)];
718 int retval = 0; 724 int retval = 0;
719 725
720 if (!client->ready && !client->buffer && mousedev->exist && 726 if (!client->ready && !client->buffer && mousedev->exist &&
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index d1051e3ce819..34be09651ee8 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -527,6 +527,27 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
527 DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), 527 DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
528 }, 528 },
529 }, 529 },
530 {
531 /* Lenovo LaVie Z */
532 .matches = {
533 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
534 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
535 },
536 },
537 { }
538};
539
540static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = {
541 {
542 /*
543 * Sony Vaio VGN-CS series require MUX or the touch sensor
544 * buttons will disturb touchpad operation
545 */
546 .matches = {
547 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
548 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
549 },
550 },
530 { } 551 { }
531}; 552};
532 553
@@ -693,6 +714,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
693 }, 714 },
694 }, 715 },
695 { 716 {
717 /* Lenovo ThinkPad L460 */
718 .matches = {
719 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
720 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
721 },
722 },
723 {
696 /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ 724 /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
697 .matches = { 725 .matches = {
698 DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), 726 DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
@@ -1223,6 +1251,9 @@ static int __init i8042_platform_init(void)
1223 if (dmi_check_system(i8042_dmi_nomux_table)) 1251 if (dmi_check_system(i8042_dmi_nomux_table))
1224 i8042_nomux = true; 1252 i8042_nomux = true;
1225 1253
1254 if (dmi_check_system(i8042_dmi_forcemux_table))
1255 i8042_nomux = false;
1256
1226 if (dmi_check_system(i8042_dmi_notimeout_table)) 1257 if (dmi_check_system(i8042_dmi_notimeout_table))
1227 i8042_notimeout = true; 1258 i8042_notimeout = true;
1228 1259
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index 71b5a634cf6d..e7bb155911d0 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -152,7 +152,7 @@ static int __maybe_unused ar1021_i2c_resume(struct device *dev)
152static SIMPLE_DEV_PM_OPS(ar1021_i2c_pm, ar1021_i2c_suspend, ar1021_i2c_resume); 152static SIMPLE_DEV_PM_OPS(ar1021_i2c_pm, ar1021_i2c_suspend, ar1021_i2c_resume);
153 153
154static const struct i2c_device_id ar1021_i2c_id[] = { 154static const struct i2c_device_id ar1021_i2c_id[] = {
155 { "MICROCHIP_AR1021_I2C", 0 }, 155 { "ar1021", 0 },
156 { }, 156 { },
157}; 157};
158MODULE_DEVICE_TABLE(i2c, ar1021_i2c_id); 158MODULE_DEVICE_TABLE(i2c, ar1021_i2c_id);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 2d5794ec338b..88dfe3008cf4 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2523,6 +2523,15 @@ static const struct dmi_system_id mxt_dmi_table[] = {
2523 .driver_data = samus_platform_data, 2523 .driver_data = samus_platform_data,
2524 }, 2524 },
2525 { 2525 {
2526 /* Samsung Chromebook Pro */
2527 .ident = "Samsung Chromebook Pro",
2528 .matches = {
2529 DMI_MATCH(DMI_SYS_VENDOR, "Google"),
2530 DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
2531 },
2532 .driver_data = samus_platform_data,
2533 },
2534 {
2526 /* Other Google Chromebooks */ 2535 /* Other Google Chromebooks */
2527 .ident = "Chromebook", 2536 .ident = "Chromebook",
2528 .matches = { 2537 .matches = {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index a27f0d7107af..eef7caa9e625 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -846,6 +846,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
846#ifdef CONFIG_ACPI 846#ifdef CONFIG_ACPI
847static const struct acpi_device_id goodix_acpi_match[] = { 847static const struct acpi_device_id goodix_acpi_match[] = {
848 { "GDIX1001", 0 }, 848 { "GDIX1001", 0 },
849 { "GDIX1002", 0 },
849 { } 850 { }
850}; 851};
851MODULE_DEVICE_TABLE(acpi, goodix_acpi_match); 852MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 5d0cd51c6f41..a4b7b4c3d27b 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -455,6 +455,14 @@ static int tsc2007_probe(struct i2c_client *client,
455 455
456 tsc2007_stop(ts); 456 tsc2007_stop(ts);
457 457
458 /* power down the chip (TSC2007_SETUP does not ACK on I2C) */
459 err = tsc2007_xfer(ts, PWRDOWN);
460 if (err < 0) {
461 dev_err(&client->dev,
462 "Failed to setup chip: %d\n", err);
463 return err; /* usually, chip does not respond */
464 }
465
458 err = input_register_device(input_dev); 466 err = input_register_device(input_dev);
459 if (err) { 467 if (err) {
460 dev_err(&client->dev, 468 dev_err(&client->dev,
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index f929879ecae6..10068a481e22 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -127,6 +127,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
127 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", 127 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
128 iommu->name); 128 iommu->name);
129 dmar_free_hwirq(irq); 129 dmar_free_hwirq(irq);
130 iommu->pr_irq = 0;
130 goto err; 131 goto err;
131 } 132 }
132 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); 133 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
@@ -142,9 +143,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
142 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); 143 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
143 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); 144 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
144 145
145 free_irq(iommu->pr_irq, iommu); 146 if (iommu->pr_irq) {
146 dmar_free_hwirq(iommu->pr_irq); 147 free_irq(iommu->pr_irq, iommu);
147 iommu->pr_irq = 0; 148 dmar_free_hwirq(iommu->pr_irq);
149 iommu->pr_irq = 0;
150 }
148 151
149 free_pages((unsigned long)iommu->prq, PRQ_ORDER); 152 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
150 iommu->prq = NULL; 153 iommu->prq = NULL;
@@ -386,6 +389,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
386 pasid_max - 1, GFP_KERNEL); 389 pasid_max - 1, GFP_KERNEL);
387 if (ret < 0) { 390 if (ret < 0) {
388 kfree(svm); 391 kfree(svm);
392 kfree(sdev);
389 goto out; 393 goto out;
390 } 394 }
391 svm->pasid = ret; 395 svm->pasid = ret;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index e9b241b1c9dd..ac596928f6b4 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -753,7 +753,7 @@ static inline void set_irq_posting_cap(void)
753 * should have X86_FEATURE_CX16 support, this has been confirmed 753 * should have X86_FEATURE_CX16 support, this has been confirmed
754 * with Intel hardware guys. 754 * with Intel hardware guys.
755 */ 755 */
756 if ( cpu_has_cx16 ) 756 if (boot_cpu_has(X86_FEATURE_CX16))
757 intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP; 757 intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
758 758
759 for_each_iommu(iommu, drhd) 759 for_each_iommu(iommu, drhd)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index fa0adef32bd6..62739766b60b 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -126,7 +126,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
126 break; /* found a free slot */ 126 break; /* found a free slot */
127 } 127 }
128adjust_limit_pfn: 128adjust_limit_pfn:
129 limit_pfn = curr_iova->pfn_lo - 1; 129 limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0;
130move_left: 130move_left:
131 prev = curr; 131 prev = curr;
132 curr = rb_prev(curr); 132 curr = rb_prev(curr);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 4cb603e5ba70..0120366efcf1 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1689,6 +1689,7 @@ static int __init omap_iommu_init(void)
1689 const unsigned long flags = SLAB_HWCACHE_ALIGN; 1689 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1690 size_t align = 1 << 10; /* L2 pagetable alignement */ 1690 size_t align = 1 << 10; /* L2 pagetable alignement */
1691 struct device_node *np; 1691 struct device_node *np;
1692 int ret;
1692 1693
1693 np = of_find_matching_node(NULL, omap_iommu_of_match); 1694 np = of_find_matching_node(NULL, omap_iommu_of_match);
1694 if (!np) 1695 if (!np)
@@ -1702,11 +1703,25 @@ static int __init omap_iommu_init(void)
1702 return -ENOMEM; 1703 return -ENOMEM;
1703 iopte_cachep = p; 1704 iopte_cachep = p;
1704 1705
1705 bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1706
1707 omap_iommu_debugfs_init(); 1706 omap_iommu_debugfs_init();
1708 1707
1709 return platform_driver_register(&omap_iommu_driver); 1708 ret = platform_driver_register(&omap_iommu_driver);
1709 if (ret) {
1710 pr_err("%s: failed to register driver\n", __func__);
1711 goto fail_driver;
1712 }
1713
1714 ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1715 if (ret)
1716 goto fail_bus;
1717
1718 return 0;
1719
1720fail_bus:
1721 platform_driver_unregister(&omap_iommu_driver);
1722fail_driver:
1723 kmem_cache_destroy(iopte_cachep);
1724 return ret;
1710} 1725}
1711subsys_initcall(omap_iommu_init); 1726subsys_initcall(omap_iommu_init);
1712/* must be ready before omap3isp is probed */ 1727/* must be ready before omap3isp is probed */
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c5f1757ac61d..82e00e3ad0e0 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -663,7 +663,7 @@ static struct irq_chip its_irq_chip = {
663 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. 663 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
664 */ 664 */
665#define IRQS_PER_CHUNK_SHIFT 5 665#define IRQS_PER_CHUNK_SHIFT 5
666#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) 666#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
667 667
668static unsigned long *lpi_bitmap; 668static unsigned long *lpi_bitmap;
669static u32 lpi_chunks; 669static u32 lpi_chunks;
@@ -1168,11 +1168,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1168 1168
1169 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1169 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1170 /* 1170 /*
1171 * At least one bit of EventID is being used, hence a minimum 1171 * We allocate at least one chunk worth of LPIs bet device,
1172 * of two entries. No, the architecture doesn't let you 1172 * and thus that many ITEs. The device may require less though.
1173 * express an ITT with a single entry.
1174 */ 1173 */
1175 nr_ites = max(2UL, roundup_pow_of_two(nvecs)); 1174 nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
1176 sz = nr_ites * its->ite_size; 1175 sz = nr_ites * its->ite_size;
1177 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 1176 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1178 itt = kzalloc(sz, GFP_KERNEL); 1177 itt = kzalloc(sz, GFP_KERNEL);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 5a1490b046ac..9ab424b9b281 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -589,7 +589,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
589 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | 589 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
590 tlist << ICC_SGI1R_TARGET_LIST_SHIFT); 590 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
591 591
592 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); 592 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
593 gic_write_sgi1r(val); 593 gic_write_sgi1r(val);
594} 594}
595 595
@@ -604,7 +604,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
604 * Ensure that stores to Normal memory are visible to the 604 * Ensure that stores to Normal memory are visible to the
605 * other CPUs before issuing the IPI. 605 * other CPUs before issuing the IPI.
606 */ 606 */
607 smp_wmb(); 607 wmb();
608 608
609 for_each_cpu(cpu, mask) { 609 for_each_cpu(cpu, mask) {
610 unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL; 610 unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
index d91dd580e978..37aaea88a6ad 100644
--- a/drivers/isdn/hardware/eicon/diva.c
+++ b/drivers/isdn/hardware/eicon/diva.c
@@ -387,10 +387,10 @@ void divasa_xdi_driver_unload(void)
387** Receive and process command from user mode utility 387** Receive and process command from user mode utility
388*/ 388*/
389void *diva_xdi_open_adapter(void *os_handle, const void __user *src, 389void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
390 int length, 390 int length, void *mptr,
391 divas_xdi_copy_from_user_fn_t cp_fn) 391 divas_xdi_copy_from_user_fn_t cp_fn)
392{ 392{
393 diva_xdi_um_cfg_cmd_t msg; 393 diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
394 diva_os_xdi_adapter_t *a = NULL; 394 diva_os_xdi_adapter_t *a = NULL;
395 diva_os_spin_lock_magic_t old_irql; 395 diva_os_spin_lock_magic_t old_irql;
396 struct list_head *tmp; 396 struct list_head *tmp;
@@ -400,21 +400,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
400 length, sizeof(diva_xdi_um_cfg_cmd_t))) 400 length, sizeof(diva_xdi_um_cfg_cmd_t)))
401 return NULL; 401 return NULL;
402 } 402 }
403 if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) { 403 if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
404 DBG_ERR(("A: A(?) open, write error")) 404 DBG_ERR(("A: A(?) open, write error"))
405 return NULL; 405 return NULL;
406 } 406 }
407 diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter"); 407 diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
408 list_for_each(tmp, &adapter_queue) { 408 list_for_each(tmp, &adapter_queue) {
409 a = list_entry(tmp, diva_os_xdi_adapter_t, link); 409 a = list_entry(tmp, diva_os_xdi_adapter_t, link);
410 if (a->controller == (int)msg.adapter) 410 if (a->controller == (int)msg->adapter)
411 break; 411 break;
412 a = NULL; 412 a = NULL;
413 } 413 }
414 diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter"); 414 diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
415 415
416 if (!a) { 416 if (!a) {
417 DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter)) 417 DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
418 } 418 }
419 419
420 return (a); 420 return (a);
@@ -436,8 +436,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
436 436
437int 437int
438diva_xdi_write(void *adapter, void *os_handle, const void __user *src, 438diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
439 int length, divas_xdi_copy_from_user_fn_t cp_fn) 439 int length, void *mptr,
440 divas_xdi_copy_from_user_fn_t cp_fn)
440{ 441{
442 diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
441 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter; 443 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
442 void *data; 444 void *data;
443 445
@@ -458,7 +460,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
458 return (-2); 460 return (-2);
459 } 461 }
460 462
461 length = (*cp_fn) (os_handle, data, src, length); 463 if (msg) {
464 *(diva_xdi_um_cfg_cmd_t *)data = *msg;
465 length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
466 src + sizeof(*msg), length - sizeof(*msg));
467 } else {
468 length = (*cp_fn) (os_handle, data, src, length);
469 }
462 if (length > 0) { 470 if (length > 0) {
463 if ((*(a->interface.cmd_proc)) 471 if ((*(a->interface.cmd_proc))
464 (a, (diva_xdi_um_cfg_cmd_t *) data, length)) { 472 (a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
index e979085d1b89..a0a607c0c32e 100644
--- a/drivers/isdn/hardware/eicon/diva.h
+++ b/drivers/isdn/hardware/eicon/diva.h
@@ -19,10 +19,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
19 int max_length, divas_xdi_copy_to_user_fn_t cp_fn); 19 int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
20 20
21int diva_xdi_write(void *adapter, void *os_handle, const void __user *src, 21int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
22 int length, divas_xdi_copy_from_user_fn_t cp_fn); 22 int length, void *msg,
23 divas_xdi_copy_from_user_fn_t cp_fn);
23 24
24void *diva_xdi_open_adapter(void *os_handle, const void __user *src, 25void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
25 int length, 26 int length, void *msg,
26 divas_xdi_copy_from_user_fn_t cp_fn); 27 divas_xdi_copy_from_user_fn_t cp_fn);
27 28
28void diva_xdi_close_adapter(void *adapter, void *os_handle); 29void diva_xdi_close_adapter(void *adapter, void *os_handle);
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index a2e0ed6c9a4d..91bd2ba0bdd8 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
591static ssize_t divas_write(struct file *file, const char __user *buf, 591static ssize_t divas_write(struct file *file, const char __user *buf,
592 size_t count, loff_t *ppos) 592 size_t count, loff_t *ppos)
593{ 593{
594 diva_xdi_um_cfg_cmd_t msg;
594 int ret = -EINVAL; 595 int ret = -EINVAL;
595 596
596 if (!file->private_data) { 597 if (!file->private_data) {
597 file->private_data = diva_xdi_open_adapter(file, buf, 598 file->private_data = diva_xdi_open_adapter(file, buf,
598 count, 599 count, &msg,
599 xdi_copy_from_user); 600 xdi_copy_from_user);
600 } 601 if (!file->private_data)
601 if (!file->private_data) { 602 return (-ENODEV);
602 return (-ENODEV); 603 ret = diva_xdi_write(file->private_data, file,
604 buf, count, &msg, xdi_copy_from_user);
605 } else {
606 ret = diva_xdi_write(file->private_data, file,
607 buf, count, NULL, xdi_copy_from_user);
603 } 608 }
604 609
605 ret = diva_xdi_write(file->private_data, file,
606 buf, count, xdi_copy_from_user);
607 switch (ret) { 610 switch (ret) {
608 case -1: /* Message should be removed from rx mailbox first */ 611 case -1: /* Message should be removed from rx mailbox first */
609 ret = -EBUSY; 612 ret = -EBUSY;
@@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
622static ssize_t divas_read(struct file *file, char __user *buf, 625static ssize_t divas_read(struct file *file, char __user *buf,
623 size_t count, loff_t *ppos) 626 size_t count, loff_t *ppos)
624{ 627{
628 diva_xdi_um_cfg_cmd_t msg;
625 int ret = -EINVAL; 629 int ret = -EINVAL;
626 630
627 if (!file->private_data) { 631 if (!file->private_data) {
628 file->private_data = diva_xdi_open_adapter(file, buf, 632 file->private_data = diva_xdi_open_adapter(file, buf,
629 count, 633 count, &msg,
630 xdi_copy_from_user); 634 xdi_copy_from_user);
631 } 635 }
632 if (!file->private_data) { 636 if (!file->private_data) {
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 7b4ddf0a39ec..2d28530b7e82 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -147,7 +147,7 @@ static word plci_remove_check(PLCI *);
147static void listen_check(DIVA_CAPI_ADAPTER *); 147static void listen_check(DIVA_CAPI_ADAPTER *);
148static byte AddInfo(byte **, byte **, byte *, byte *); 148static byte AddInfo(byte **, byte **, byte *, byte *);
149static byte getChannel(API_PARSE *); 149static byte getChannel(API_PARSE *);
150static void IndParse(PLCI *, word *, byte **, byte); 150static void IndParse(PLCI *, const word *, byte **, byte);
151static byte ie_compare(byte *, byte *); 151static byte ie_compare(byte *, byte *);
152static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *); 152static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *);
153static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word); 153static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word);
@@ -4860,7 +4860,7 @@ static void sig_ind(PLCI *plci)
4860 /* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */ 4860 /* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */
4861 /* SMSG is situated at the end because its 0 (for compatibility reasons */ 4861 /* SMSG is situated at the end because its 0 (for compatibility reasons */
4862 /* (see Info_Mask Bit 4, first IE. then the message type) */ 4862 /* (see Info_Mask Bit 4, first IE. then the message type) */
4863 word parms_id[] = 4863 static const word parms_id[] =
4864 {MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA, 4864 {MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA,
4865 UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW, 4865 UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW,
4866 RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR, 4866 RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR,
@@ -4868,12 +4868,12 @@ static void sig_ind(PLCI *plci)
4868 /* 14 FTY repl by ESC_CHI */ 4868 /* 14 FTY repl by ESC_CHI */
4869 /* 18 PI repl by ESC_LAW */ 4869 /* 18 PI repl by ESC_LAW */
4870 /* removed OAD changed to 0xff for future use, OAD is multiIE now */ 4870 /* removed OAD changed to 0xff for future use, OAD is multiIE now */
4871 word multi_fac_id[] = {1, FTY}; 4871 static const word multi_fac_id[] = {1, FTY};
4872 word multi_pi_id[] = {1, PI}; 4872 static const word multi_pi_id[] = {1, PI};
4873 word multi_CiPN_id[] = {1, OAD}; 4873 static const word multi_CiPN_id[] = {1, OAD};
4874 word multi_ssext_id[] = {1, ESC_SSEXT}; 4874 static const word multi_ssext_id[] = {1, ESC_SSEXT};
4875 4875
4876 word multi_vswitch_id[] = {1, ESC_VSWITCH}; 4876 static const word multi_vswitch_id[] = {1, ESC_VSWITCH};
4877 4877
4878 byte *cau; 4878 byte *cau;
4879 word ncci; 4879 word ncci;
@@ -8926,7 +8926,7 @@ static void listen_check(DIVA_CAPI_ADAPTER *a)
8926/* functions for all parameters sent in INDs */ 8926/* functions for all parameters sent in INDs */
8927/*------------------------------------------------------------------*/ 8927/*------------------------------------------------------------------*/
8928 8928
8929static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize) 8929static void IndParse(PLCI *plci, const word *parms_id, byte **parms, byte multiIEsize)
8930{ 8930{
8931 word ploc; /* points to current location within packet */ 8931 word ploc; /* points to current location within packet */
8932 byte w; 8932 byte w;
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 358a574d9e8b..46d957c34be1 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -718,7 +718,7 @@ icn_sendbuf(int channel, int ack, struct sk_buff *skb, icn_card *card)
718 return 0; 718 return 0;
719 if (card->sndcount[channel] > ICN_MAX_SQUEUE) 719 if (card->sndcount[channel] > ICN_MAX_SQUEUE)
720 return 0; 720 return 0;
721#warning TODO test headroom or use skb->nb to flag ACK 721 /* TODO test headroom or use skb->nb to flag ACK */
722 nskb = skb_clone(skb, GFP_ATOMIC); 722 nskb = skb_clone(skb, GFP_ATOMIC);
723 if (nskb) { 723 if (nskb) {
724 /* Push ACK flag as one 724 /* Push ACK flag as one
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 9cb4b621fbc3..b92a19a594a1 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -72,7 +72,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
72 if (sk->sk_state != MISDN_BOUND) 72 if (sk->sk_state != MISDN_BOUND)
73 continue; 73 continue;
74 if (!cskb) 74 if (!cskb)
75 cskb = skb_copy(skb, GFP_KERNEL); 75 cskb = skb_copy(skb, GFP_ATOMIC);
76 if (!cskb) { 76 if (!cskb) {
77 printk(KERN_WARNING "%s no skb\n", __func__); 77 printk(KERN_WARNING "%s no skb\n", __func__);
78 break; 78 break;
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index 3597ef47b28a..09fc129ef2fa 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -441,6 +441,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
441 RspMessage rcvmsg; 441 RspMessage rcvmsg;
442 ReqMessage sndmsg; 442 ReqMessage sndmsg;
443 HWConfig_pl hwci; 443 HWConfig_pl hwci;
444 void __iomem *rambase_sig = (void __iomem *)rambase + SIG_OFFSET;
444 int x; 445 int x;
445 446
446 pr_debug("Attempting to identify adapter @ 0x%lx io 0x%x\n", 447 pr_debug("Attempting to identify adapter @ 0x%lx io 0x%x\n",
@@ -481,7 +482,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
481 */ 482 */
482 outb(PRI_BASEPG_VAL, pgport); 483 outb(PRI_BASEPG_VAL, pgport);
483 msleep_interruptible(1000); 484 msleep_interruptible(1000);
484 sig = readl(rambase + SIG_OFFSET); 485 sig = readl(rambase_sig);
485 pr_debug("Looking for a signature, got 0x%lx\n", sig); 486 pr_debug("Looking for a signature, got 0x%lx\n", sig);
486 if (sig == SIGNATURE) 487 if (sig == SIGNATURE)
487 return PRI_BOARD; 488 return PRI_BOARD;
@@ -491,7 +492,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
491 */ 492 */
492 outb(BRI_BASEPG_VAL, pgport); 493 outb(BRI_BASEPG_VAL, pgport);
493 msleep_interruptible(1000); 494 msleep_interruptible(1000);
494 sig = readl(rambase + SIG_OFFSET); 495 sig = readl(rambase_sig);
495 pr_debug("Looking for a signature, got 0x%lx\n", sig); 496 pr_debug("Looking for a signature, got 0x%lx\n", sig);
496 if (sig == SIGNATURE) 497 if (sig == SIGNATURE)
497 return BRI_BOARD; 498 return BRI_BOARD;
@@ -501,7 +502,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
501 /* 502 /*
502 * Try to spot a card 503 * Try to spot a card
503 */ 504 */
504 sig = readl(rambase + SIG_OFFSET); 505 sig = readl(rambase_sig);
505 pr_debug("Looking for a signature, got 0x%lx\n", sig); 506 pr_debug("Looking for a signature, got 0x%lx\n", sig);
506 if (sig != SIGNATURE) 507 if (sig != SIGNATURE)
507 return -1; 508 return -1;
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index e8b1120f486d..eef3e64ca0a8 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -88,21 +88,23 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
88 down_read(&led_cdev->trigger_lock); 88 down_read(&led_cdev->trigger_lock);
89 89
90 if (!led_cdev->trigger) 90 if (!led_cdev->trigger)
91 len += sprintf(buf+len, "[none] "); 91 len += scnprintf(buf+len, PAGE_SIZE - len, "[none] ");
92 else 92 else
93 len += sprintf(buf+len, "none "); 93 len += scnprintf(buf+len, PAGE_SIZE - len, "none ");
94 94
95 list_for_each_entry(trig, &trigger_list, next_trig) { 95 list_for_each_entry(trig, &trigger_list, next_trig) {
96 if (led_cdev->trigger && !strcmp(led_cdev->trigger->name, 96 if (led_cdev->trigger && !strcmp(led_cdev->trigger->name,
97 trig->name)) 97 trig->name))
98 len += sprintf(buf+len, "[%s] ", trig->name); 98 len += scnprintf(buf+len, PAGE_SIZE - len, "[%s] ",
99 trig->name);
99 else 100 else
100 len += sprintf(buf+len, "%s ", trig->name); 101 len += scnprintf(buf+len, PAGE_SIZE - len, "%s ",
102 trig->name);
101 } 103 }
102 up_read(&led_cdev->trigger_lock); 104 up_read(&led_cdev->trigger_lock);
103 up_read(&triggers_list_lock); 105 up_read(&triggers_list_lock);
104 106
105 len += sprintf(len+buf, "\n"); 107 len += scnprintf(len+buf, PAGE_SIZE - len, "\n");
106 return len; 108 return len;
107} 109}
108EXPORT_SYMBOL_GPL(led_trigger_show); 110EXPORT_SYMBOL_GPL(led_trigger_show);
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index b775e1efecd3..b9f71a87b7e1 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -281,7 +281,7 @@ static int pca955x_probe(struct i2c_client *client,
281 "slave address 0x%02x\n", 281 "slave address 0x%02x\n",
282 id->name, chip->bits, client->addr); 282 id->name, chip->bits, client->addr);
283 283
284 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) 284 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
285 return -EIO; 285 return -EIO;
286 286
287 if (pdata) { 287 if (pdata) {
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 4d46f2ce606f..16c3390e5d9f 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -285,8 +285,10 @@ do { \
285 break; \ 285 break; \
286 \ 286 \
287 mutex_unlock(&(ca)->set->bucket_lock); \ 287 mutex_unlock(&(ca)->set->bucket_lock); \
288 if (kthread_should_stop()) \ 288 if (kthread_should_stop()) { \
289 set_current_state(TASK_RUNNING); \
289 return 0; \ 290 return 0; \
291 } \
290 \ 292 \
291 try_to_freeze(); \ 293 try_to_freeze(); \
292 schedule(); \ 294 schedule(); \
@@ -514,15 +516,21 @@ struct open_bucket {
514 516
515/* 517/*
516 * We keep multiple buckets open for writes, and try to segregate different 518 * We keep multiple buckets open for writes, and try to segregate different
517 * write streams for better cache utilization: first we look for a bucket where 519 * write streams for better cache utilization: first we try to segregate flash
518 * the last write to it was sequential with the current write, and failing that 520 * only volume write streams from cached devices, secondly we look for a bucket
519 * we look for a bucket that was last used by the same task. 521 * where the last write to it was sequential with the current write, and
522 * failing that we look for a bucket that was last used by the same task.
520 * 523 *
521 * The ideas is if you've got multiple tasks pulling data into the cache at the 524 * The ideas is if you've got multiple tasks pulling data into the cache at the
522 * same time, you'll get better cache utilization if you try to segregate their 525 * same time, you'll get better cache utilization if you try to segregate their
523 * data and preserve locality. 526 * data and preserve locality.
524 * 527 *
525 * For example, say you've starting Firefox at the same time you're copying a 528 * For example, dirty sectors of flash only volume is not reclaimable, if their
529 * dirty sectors mixed with dirty sectors of cached device, such buckets will
530 * be marked as dirty and won't be reclaimed, though the dirty data of cached
531 * device have been written back to backend device.
532 *
533 * And say you've starting Firefox at the same time you're copying a
526 * bunch of files. Firefox will likely end up being fairly hot and stay in the 534 * bunch of files. Firefox will likely end up being fairly hot and stay in the
527 * cache awhile, but the data you copied might not be; if you wrote all that 535 * cache awhile, but the data you copied might not be; if you wrote all that
528 * data to the same buckets it'd get invalidated at the same time. 536 * data to the same buckets it'd get invalidated at the same time.
@@ -539,7 +547,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
539 struct open_bucket *ret, *ret_task = NULL; 547 struct open_bucket *ret, *ret_task = NULL;
540 548
541 list_for_each_entry_reverse(ret, &c->data_buckets, list) 549 list_for_each_entry_reverse(ret, &c->data_buckets, list)
542 if (!bkey_cmp(&ret->key, search)) 550 if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
551 UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
552 continue;
553 else if (!bkey_cmp(&ret->key, search))
543 goto found; 554 goto found;
544 else if (ret->last_write_point == write_point) 555 else if (ret->last_write_point == write_point)
545 ret_task = ret; 556 ret_task = ret;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 02619cabda8b..7fe7df56fa33 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -904,7 +904,7 @@ void bcache_write_super(struct cache_set *);
904 904
905int bch_flash_dev_create(struct cache_set *c, uint64_t size); 905int bch_flash_dev_create(struct cache_set *c, uint64_t size);
906 906
907int bch_cached_dev_attach(struct cached_dev *, struct cache_set *); 907int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
908void bch_cached_dev_detach(struct cached_dev *); 908void bch_cached_dev_detach(struct cached_dev *);
909void bch_cached_dev_run(struct cached_dev *); 909void bch_cached_dev_run(struct cached_dev *);
910void bcache_device_stop(struct bcache_device *); 910void bcache_device_stop(struct bcache_device *);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5b815e64c1c9..4ed621ad27e4 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -808,7 +808,10 @@ int bch_btree_cache_alloc(struct cache_set *c)
808 c->shrink.scan_objects = bch_mca_scan; 808 c->shrink.scan_objects = bch_mca_scan;
809 c->shrink.seeks = 4; 809 c->shrink.seeks = 4;
810 c->shrink.batch = c->btree_pages * 2; 810 c->shrink.batch = c->btree_pages * 2;
811 register_shrinker(&c->shrink); 811
812 if (register_shrinker(&c->shrink))
813 pr_warn("bcache: %s: could not register shrinker",
814 __func__);
812 815
813 return 0; 816 return 0;
814} 817}
@@ -1866,14 +1869,17 @@ void bch_initial_gc_finish(struct cache_set *c)
1866 */ 1869 */
1867 for_each_cache(ca, c, i) { 1870 for_each_cache(ca, c, i) {
1868 for_each_bucket(b, ca) { 1871 for_each_bucket(b, ca) {
1869 if (fifo_full(&ca->free[RESERVE_PRIO])) 1872 if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1873 fifo_full(&ca->free[RESERVE_BTREE]))
1870 break; 1874 break;
1871 1875
1872 if (bch_can_invalidate_bucket(ca, b) && 1876 if (bch_can_invalidate_bucket(ca, b) &&
1873 !GC_MARK(b)) { 1877 !GC_MARK(b)) {
1874 __bch_invalidate_one_bucket(ca, b); 1878 __bch_invalidate_one_bucket(ca, b);
1875 fifo_push(&ca->free[RESERVE_PRIO], 1879 if (!fifo_push(&ca->free[RESERVE_PRIO],
1876 b - ca->buckets); 1880 b - ca->buckets))
1881 fifo_push(&ca->free[RESERVE_BTREE],
1882 b - ca->buckets);
1877 } 1883 }
1878 } 1884 }
1879 } 1885 }
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index e73aeb0e892c..e497bde96db3 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -633,11 +633,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
633static void search_free(struct closure *cl) 633static void search_free(struct closure *cl)
634{ 634{
635 struct search *s = container_of(cl, struct search, cl); 635 struct search *s = container_of(cl, struct search, cl);
636 bio_complete(s);
637 636
638 if (s->iop.bio) 637 if (s->iop.bio)
639 bio_put(s->iop.bio); 638 bio_put(s->iop.bio);
640 639
640 bio_complete(s);
641 closure_debug_destroy(cl); 641 closure_debug_destroy(cl);
642 mempool_free(s, s->d->c->search); 642 mempool_free(s, s->d->c->search);
643} 643}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 8eaadd9869bc..ef28ddfff7c6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -890,6 +890,12 @@ static void cached_dev_detach_finish(struct work_struct *w)
890 890
891 mutex_lock(&bch_register_lock); 891 mutex_lock(&bch_register_lock);
892 892
893 cancel_delayed_work_sync(&dc->writeback_rate_update);
894 if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
895 kthread_stop(dc->writeback_thread);
896 dc->writeback_thread = NULL;
897 }
898
893 memset(&dc->sb.set_uuid, 0, 16); 899 memset(&dc->sb.set_uuid, 0, 16);
894 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 900 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
895 901
@@ -930,15 +936,18 @@ void bch_cached_dev_detach(struct cached_dev *dc)
930 cached_dev_put(dc); 936 cached_dev_put(dc);
931} 937}
932 938
933int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) 939int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
940 uint8_t *set_uuid)
934{ 941{
935 uint32_t rtime = cpu_to_le32(get_seconds()); 942 uint32_t rtime = cpu_to_le32(get_seconds());
936 struct uuid_entry *u; 943 struct uuid_entry *u;
937 char buf[BDEVNAME_SIZE]; 944 char buf[BDEVNAME_SIZE];
945 struct cached_dev *exist_dc, *t;
938 946
939 bdevname(dc->bdev, buf); 947 bdevname(dc->bdev, buf);
940 948
941 if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) 949 if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
950 (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
942 return -ENOENT; 951 return -ENOENT;
943 952
944 if (dc->disk.c) { 953 if (dc->disk.c) {
@@ -958,6 +967,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
958 return -EINVAL; 967 return -EINVAL;
959 } 968 }
960 969
970 /* Check whether already attached */
971 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
972 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
973 pr_err("Tried to attach %s but duplicate UUID already attached",
974 buf);
975
976 return -EINVAL;
977 }
978 }
979
961 u = uuid_find(c, dc->sb.uuid); 980 u = uuid_find(c, dc->sb.uuid);
962 981
963 if (u && 982 if (u &&
@@ -1172,7 +1191,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1172 1191
1173 list_add(&dc->list, &uncached_devices); 1192 list_add(&dc->list, &uncached_devices);
1174 list_for_each_entry(c, &bch_cache_sets, list) 1193 list_for_each_entry(c, &bch_cache_sets, list)
1175 bch_cached_dev_attach(dc, c); 1194 bch_cached_dev_attach(dc, c, NULL);
1176 1195
1177 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1196 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
1178 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1197 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
@@ -1694,7 +1713,7 @@ static void run_cache_set(struct cache_set *c)
1694 bcache_write_super(c); 1713 bcache_write_super(c);
1695 1714
1696 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1715 list_for_each_entry_safe(dc, t, &uncached_devices, list)
1697 bch_cached_dev_attach(dc, c); 1716 bch_cached_dev_attach(dc, c, NULL);
1698 1717
1699 flash_devs_run(c); 1718 flash_devs_run(c);
1700 1719
@@ -1811,6 +1830,7 @@ void bch_cache_release(struct kobject *kobj)
1811static int cache_alloc(struct cache_sb *sb, struct cache *ca) 1830static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1812{ 1831{
1813 size_t free; 1832 size_t free;
1833 size_t btree_buckets;
1814 struct bucket *b; 1834 struct bucket *b;
1815 1835
1816 __module_get(THIS_MODULE); 1836 __module_get(THIS_MODULE);
@@ -1820,9 +1840,19 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1820 ca->journal.bio.bi_max_vecs = 8; 1840 ca->journal.bio.bi_max_vecs = 8;
1821 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; 1841 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
1822 1842
1843 /*
1844 * when ca->sb.njournal_buckets is not zero, journal exists,
1845 * and in bch_journal_replay(), tree node may split,
1846 * so bucket of RESERVE_BTREE type is needed,
1847 * the worst situation is all journal buckets are valid journal,
1848 * and all the keys need to replay,
1849 * so the number of RESERVE_BTREE type buckets should be as much
1850 * as journal buckets
1851 */
1852 btree_buckets = ca->sb.njournal_buckets ?: 8;
1823 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; 1853 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
1824 1854
1825 if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || 1855 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
1826 !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || 1856 !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
1827 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || 1857 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
1828 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || 1858 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4fbb5532f24c..5a5c1f1bd8a5 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -191,7 +191,7 @@ STORE(__cached_dev)
191{ 191{
192 struct cached_dev *dc = container_of(kobj, struct cached_dev, 192 struct cached_dev *dc = container_of(kobj, struct cached_dev,
193 disk.kobj); 193 disk.kobj);
194 ssize_t v = size; 194 ssize_t v;
195 struct cache_set *c; 195 struct cache_set *c;
196 struct kobj_uevent_env *env; 196 struct kobj_uevent_env *env;
197 197
@@ -263,17 +263,20 @@ STORE(__cached_dev)
263 } 263 }
264 264
265 if (attr == &sysfs_attach) { 265 if (attr == &sysfs_attach) {
266 if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16) 266 uint8_t set_uuid[16];
267
268 if (bch_parse_uuid(buf, set_uuid) < 16)
267 return -EINVAL; 269 return -EINVAL;
268 270
271 v = -ENOENT;
269 list_for_each_entry(c, &bch_cache_sets, list) { 272 list_for_each_entry(c, &bch_cache_sets, list) {
270 v = bch_cached_dev_attach(dc, c); 273 v = bch_cached_dev_attach(dc, c, set_uuid);
271 if (!v) 274 if (!v)
272 return size; 275 return size;
273 } 276 }
274 277
275 pr_err("Can't attach %s: cache set not found", buf); 278 pr_err("Can't attach %s: cache set not found", buf);
276 size = v; 279 return v;
277 } 280 }
278 281
279 if (attr == &sysfs_detach && dc->disk.c) 282 if (attr == &sysfs_detach && dc->disk.c)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index bbb1dc9e1639..f2c0000de613 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -425,19 +425,28 @@ static int bch_writeback_thread(void *arg)
425 425
426 while (!kthread_should_stop()) { 426 while (!kthread_should_stop()) {
427 down_write(&dc->writeback_lock); 427 down_write(&dc->writeback_lock);
428 if (!atomic_read(&dc->has_dirty) || 428 set_current_state(TASK_INTERRUPTIBLE);
429 (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && 429 /*
430 !dc->writeback_running)) { 430 * If the bache device is detaching, skip here and continue
431 * to perform writeback. Otherwise, if no dirty data on cache,
432 * or there is dirty data on cache but writeback is disabled,
433 * the writeback thread should sleep here and wait for others
434 * to wake up it.
435 */
436 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
437 (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
431 up_write(&dc->writeback_lock); 438 up_write(&dc->writeback_lock);
432 set_current_state(TASK_INTERRUPTIBLE);
433 439
434 if (kthread_should_stop()) 440 if (kthread_should_stop()) {
441 set_current_state(TASK_RUNNING);
435 return 0; 442 return 0;
443 }
436 444
437 try_to_freeze(); 445 try_to_freeze();
438 schedule(); 446 schedule();
439 continue; 447 continue;
440 } 448 }
449 set_current_state(TASK_RUNNING);
441 450
442 searched_full_index = refill_dirty(dc); 451 searched_full_index = refill_dirty(dc);
443 452
@@ -447,6 +456,14 @@ static int bch_writeback_thread(void *arg)
447 cached_dev_put(dc); 456 cached_dev_put(dc);
448 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 457 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
449 bch_write_bdev_super(dc, NULL); 458 bch_write_bdev_super(dc, NULL);
459 /*
460 * If bcache device is detaching via sysfs interface,
461 * writeback thread should stop after there is no dirty
462 * data on cache. BCACHE_DEV_DETACHING flag is set in
463 * bch_cached_dev_detach().
464 */
465 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
466 break;
450 } 467 }
451 468
452 up_write(&dc->writeback_lock); 469 up_write(&dc->writeback_lock);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 969c815c90b6..b1d5fa0bc8f7 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -813,12 +813,14 @@ enum new_flag {
813static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf) 813static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
814{ 814{
815 struct dm_buffer *b; 815 struct dm_buffer *b;
816 bool tried_noio_alloc = false;
816 817
817 /* 818 /*
818 * dm-bufio is resistant to allocation failures (it just keeps 819 * dm-bufio is resistant to allocation failures (it just keeps
819 * one buffer reserved in cases all the allocations fail). 820 * one buffer reserved in cases all the allocations fail).
820 * So set flags to not try too hard: 821 * So set flags to not try too hard:
821 * GFP_NOIO: don't recurse into the I/O layer 822 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
823 * mutex and wait ourselves.
822 * __GFP_NORETRY: don't retry and rather return failure 824 * __GFP_NORETRY: don't retry and rather return failure
823 * __GFP_NOMEMALLOC: don't use emergency reserves 825 * __GFP_NOMEMALLOC: don't use emergency reserves
824 * __GFP_NOWARN: don't print a warning in case of failure 826 * __GFP_NOWARN: don't print a warning in case of failure
@@ -828,7 +830,7 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
828 */ 830 */
829 while (1) { 831 while (1) {
830 if (dm_bufio_cache_size_latch != 1) { 832 if (dm_bufio_cache_size_latch != 1) {
831 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); 833 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
832 if (b) 834 if (b)
833 return b; 835 return b;
834 } 836 }
@@ -836,6 +838,15 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
836 if (nf == NF_PREFETCH) 838 if (nf == NF_PREFETCH)
837 return NULL; 839 return NULL;
838 840
841 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
842 dm_bufio_unlock(c);
843 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
844 dm_bufio_lock(c);
845 if (b)
846 return b;
847 tried_noio_alloc = true;
848 }
849
839 if (!list_empty(&c->reserved_buffers)) { 850 if (!list_empty(&c->reserved_buffers)) {
840 b = list_entry(c->reserved_buffers.next, 851 b = list_entry(c->reserved_buffers.next,
841 struct dm_buffer, lru_list); 852 struct dm_buffer, lru_list);
@@ -1563,19 +1574,11 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1563static unsigned long 1574static unsigned long
1564dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 1575dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1565{ 1576{
1566 struct dm_bufio_client *c; 1577 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1567 unsigned long count; 1578 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1568 unsigned long retain_target; 1579 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1569 1580 unsigned long retain_target = get_retain_buffers(c);
1570 c = container_of(shrink, struct dm_bufio_client, shrinker);
1571 if (sc->gfp_mask & __GFP_FS)
1572 dm_bufio_lock(c);
1573 else if (!dm_bufio_trylock(c))
1574 return 0;
1575 1581
1576 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1577 retain_target = get_retain_buffers(c);
1578 dm_bufio_unlock(c);
1579 return (count < retain_target) ? 0 : (count - retain_target); 1582 return (count < retain_target) ? 0 : (count - retain_target);
1580} 1583}
1581 1584
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 81c5e1a1f363..1b84d2890fbf 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -300,6 +300,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
300 else if (rw & REQ_WRITE_SAME) 300 else if (rw & REQ_WRITE_SAME)
301 special_cmd_max_sectors = q->limits.max_write_same_sectors; 301 special_cmd_max_sectors = q->limits.max_write_same_sectors;
302 if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { 302 if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
303 atomic_inc(&io->count);
303 dec_count(io, region, -EOPNOTSUPP); 304 dec_count(io, region, -EOPNOTSUPP);
304 return; 305 return;
305 } 306 }
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index e503279c34fc..6865b186f749 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1773,12 +1773,12 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
1773 cmd == DM_LIST_VERSIONS_CMD) 1773 cmd == DM_LIST_VERSIONS_CMD)
1774 return 0; 1774 return 0;
1775 1775
1776 if ((cmd == DM_DEV_CREATE_CMD)) { 1776 if (cmd == DM_DEV_CREATE_CMD) {
1777 if (!*param->name) { 1777 if (!*param->name) {
1778 DMWARN("name not supplied when creating device"); 1778 DMWARN("name not supplied when creating device");
1779 return -EINVAL; 1779 return -EINVAL;
1780 } 1780 }
1781 } else if ((*param->uuid && *param->name)) { 1781 } else if (*param->uuid && *param->name) {
1782 DMWARN("only supply one of name or uuid, cmd(%u)", cmd); 1782 DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
1783 return -EINVAL; 1783 return -EINVAL;
1784 } 1784 }
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index a1cc797fe88f..315767e8ae4d 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1299,6 +1299,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1299 1299
1300static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); 1300static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1301 1301
1302static void requeue_bios(struct pool *pool);
1303
1302static void check_for_space(struct pool *pool) 1304static void check_for_space(struct pool *pool)
1303{ 1305{
1304 int r; 1306 int r;
@@ -1311,8 +1313,10 @@ static void check_for_space(struct pool *pool)
1311 if (r) 1313 if (r)
1312 return; 1314 return;
1313 1315
1314 if (nr_free) 1316 if (nr_free) {
1315 set_pool_mode(pool, PM_WRITE); 1317 set_pool_mode(pool, PM_WRITE);
1318 requeue_bios(pool);
1319 }
1316} 1320}
1317 1321
1318/* 1322/*
@@ -1389,7 +1393,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1389 1393
1390 r = dm_pool_alloc_data_block(pool->pmd, result); 1394 r = dm_pool_alloc_data_block(pool->pmd, result);
1391 if (r) { 1395 if (r) {
1392 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); 1396 if (r == -ENOSPC)
1397 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1398 else
1399 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1393 return r; 1400 return r;
1394 } 1401 }
1395 1402
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 9ec6948e3b8b..3d9a80759d95 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -974,7 +974,8 @@ static void dec_pending(struct dm_io *io, int error)
974 } else { 974 } else {
975 /* done with normal IO or empty flush */ 975 /* done with normal IO or empty flush */
976 trace_block_bio_complete(md->queue, bio, io_error); 976 trace_block_bio_complete(md->queue, bio, io_error);
977 bio->bi_error = io_error; 977 if (io_error)
978 bio->bi_error = io_error;
978 bio_endio(bio); 979 bio_endio(bio);
979 } 980 }
980 } 981 }
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 494d01d0e92a..a7a561af05c9 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -945,8 +945,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
945 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); 945 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
946 lock_comm(cinfo); 946 lock_comm(cinfo);
947 ret = __sendmsg(cinfo, &cmsg); 947 ret = __sendmsg(cinfo, &cmsg);
948 if (ret) 948 if (ret) {
949 unlock_comm(cinfo);
949 return ret; 950 return ret;
951 }
950 cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; 952 cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
951 ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); 953 ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
952 cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; 954 cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0a856cb181e9..07f307402351 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1028,8 +1028,9 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
1028 * (not needed for Linear and RAID0 as metadata doesn't 1028 * (not needed for Linear and RAID0 as metadata doesn't
1029 * record this size) 1029 * record this size)
1030 */ 1030 */
1031 if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) 1031 if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
1032 rdev->sectors = (2ULL << 32) - 2; 1032 sb->level >= 1)
1033 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1033 1034
1034 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1035 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1035 /* "this cannot possibly happen" ... */ 1036 /* "this cannot possibly happen" ... */
@@ -1322,8 +1323,9 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1322 /* Limit to 4TB as metadata cannot record more than that. 1323 /* Limit to 4TB as metadata cannot record more than that.
1323 * 4TB == 2^32 KB, or 2*2^32 sectors. 1324 * 4TB == 2^32 KB, or 2*2^32 sectors.
1324 */ 1325 */
1325 if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) 1326 if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
1326 num_sectors = (2ULL << 32) - 2; 1327 rdev->mddev->level >= 1)
1328 num_sectors = (sector_t)(2ULL << 32) - 2;
1327 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1329 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1328 rdev->sb_page); 1330 rdev->sb_page);
1329 md_super_wait(rdev->mddev); 1331 md_super_wait(rdev->mddev);
@@ -2688,7 +2690,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
2688 err = 0; 2690 err = 0;
2689 } 2691 }
2690 } else if (cmd_match(buf, "re-add")) { 2692 } else if (cmd_match(buf, "re-add")) {
2691 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) { 2693 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
2694 rdev->saved_raid_disk >= 0) {
2692 /* clear_bit is performed _after_ all the devices 2695 /* clear_bit is performed _after_ all the devices
2693 * have their local Faulty bit cleared. If any writes 2696 * have their local Faulty bit cleared. If any writes
2694 * happen in the meantime in the local node, they 2697 * happen in the meantime in the local node, they
@@ -6142,6 +6145,9 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6142 struct md_rdev *rdev; 6145 struct md_rdev *rdev;
6143 int ret = -1; 6146 int ret = -1;
6144 6147
6148 if (!mddev->pers)
6149 return -ENODEV;
6150
6145 rdev = find_rdev(mddev, dev); 6151 rdev = find_rdev(mddev, dev);
6146 if (!rdev) 6152 if (!rdev)
6147 return -ENXIO; 6153 return -ENXIO;
@@ -8151,6 +8157,7 @@ static int remove_and_add_spares(struct mddev *mddev,
8151 if (mddev->pers->hot_remove_disk( 8157 if (mddev->pers->hot_remove_disk(
8152 mddev, rdev) == 0) { 8158 mddev, rdev) == 0) {
8153 sysfs_unlink_rdev(mddev, rdev); 8159 sysfs_unlink_rdev(mddev, rdev);
8160 rdev->saved_raid_disk = rdev->raid_disk;
8154 rdev->raid_disk = -1; 8161 rdev->raid_disk = -1;
8155 removed++; 8162 removed++;
8156 } 8163 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f24a9e14021d..89dcbf2fa846 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1686,6 +1686,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1686 struct md_rdev *repl = 1686 struct md_rdev *repl =
1687 conf->mirrors[conf->raid_disks + number].rdev; 1687 conf->mirrors[conf->raid_disks + number].rdev;
1688 freeze_array(conf, 0); 1688 freeze_array(conf, 0);
1689 if (atomic_read(&repl->nr_pending)) {
1690 /* It means that some queued IO of retry_list
1691 * hold repl. Thus, we cannot set replacement
1692 * as NULL, avoiding rdev NULL pointer
1693 * dereference in sync_request_write and
1694 * handle_write_finished.
1695 */
1696 err = -EBUSY;
1697 unfreeze_array(conf);
1698 goto abort;
1699 }
1689 clear_bit(Replacement, &repl->flags); 1700 clear_bit(Replacement, &repl->flags);
1690 p->rdev = repl; 1701 p->rdev = repl;
1691 conf->mirrors[conf->raid_disks + number].rdev = NULL; 1702 conf->mirrors[conf->raid_disks + number].rdev = NULL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a8a86d450d76..7b6acedc89c1 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2630,7 +2630,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2630 for (m = 0; m < conf->copies; m++) { 2630 for (m = 0; m < conf->copies; m++) {
2631 int dev = r10_bio->devs[m].devnum; 2631 int dev = r10_bio->devs[m].devnum;
2632 rdev = conf->mirrors[dev].rdev; 2632 rdev = conf->mirrors[dev].rdev;
2633 if (r10_bio->devs[m].bio == NULL) 2633 if (r10_bio->devs[m].bio == NULL ||
2634 r10_bio->devs[m].bio->bi_end_io == NULL)
2634 continue; 2635 continue;
2635 if (!r10_bio->devs[m].bio->bi_error) { 2636 if (!r10_bio->devs[m].bio->bi_error) {
2636 rdev_clear_badblocks( 2637 rdev_clear_badblocks(
@@ -2645,7 +2646,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2645 md_error(conf->mddev, rdev); 2646 md_error(conf->mddev, rdev);
2646 } 2647 }
2647 rdev = conf->mirrors[dev].replacement; 2648 rdev = conf->mirrors[dev].replacement;
2648 if (r10_bio->devs[m].repl_bio == NULL) 2649 if (r10_bio->devs[m].repl_bio == NULL ||
2650 r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2649 continue; 2651 continue;
2650 2652
2651 if (!r10_bio->devs[m].repl_bio->bi_error) { 2653 if (!r10_bio->devs[m].repl_bio->bi_error) {
@@ -2698,6 +2700,11 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2698 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); 2700 list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2699 conf->nr_queued++; 2701 conf->nr_queued++;
2700 spin_unlock_irq(&conf->device_lock); 2702 spin_unlock_irq(&conf->device_lock);
2703 /*
2704 * In case freeze_array() is waiting for condition
2705 * nr_pending == nr_queued + extra to be true.
2706 */
2707 wake_up(&conf->wait_barrier);
2701 md_wakeup_thread(conf->mddev->thread); 2708 md_wakeup_thread(conf->mddev->thread);
2702 } else { 2709 } else {
2703 if (test_bit(R10BIO_WriteError, 2710 if (test_bit(R10BIO_WriteError,
@@ -3633,6 +3640,7 @@ static int run(struct mddev *mddev)
3633 3640
3634 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 3641 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3635 discard_supported = true; 3642 discard_supported = true;
3643 first = 0;
3636 } 3644 }
3637 3645
3638 if (mddev->queue) { 3646 if (mddev->queue) {
@@ -4039,6 +4047,7 @@ static int raid10_start_reshape(struct mddev *mddev)
4039 diff = 0; 4047 diff = 0;
4040 if (first || diff < min_offset_diff) 4048 if (first || diff < min_offset_diff)
4041 min_offset_diff = diff; 4049 min_offset_diff = diff;
4050 first = 0;
4042 } 4051 }
4043 } 4052 }
4044 4053
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 86ab6d14d782..d59b861764a1 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -110,8 +110,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
110static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 110static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
111{ 111{
112 int i; 112 int i;
113 local_irq_disable(); 113 spin_lock_irq(conf->hash_locks);
114 spin_lock(conf->hash_locks);
115 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 114 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
116 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 115 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
117 spin_lock(&conf->device_lock); 116 spin_lock(&conf->device_lock);
@@ -121,9 +120,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
121{ 120{
122 int i; 121 int i;
123 spin_unlock(&conf->device_lock); 122 spin_unlock(&conf->device_lock);
124 for (i = NR_STRIPE_HASH_LOCKS; i; i--) 123 for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
125 spin_unlock(conf->hash_locks + i - 1); 124 spin_unlock(conf->hash_locks + i);
126 local_irq_enable(); 125 spin_unlock_irq(conf->hash_locks);
127} 126}
128 127
129/* bio's attached to a stripe+device for I/O are linked together in bi_sector 128/* bio's attached to a stripe+device for I/O are linked together in bi_sector
@@ -726,12 +725,11 @@ static bool is_full_stripe_write(struct stripe_head *sh)
726 725
727static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 726static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
728{ 727{
729 local_irq_disable();
730 if (sh1 > sh2) { 728 if (sh1 > sh2) {
731 spin_lock(&sh2->stripe_lock); 729 spin_lock_irq(&sh2->stripe_lock);
732 spin_lock_nested(&sh1->stripe_lock, 1); 730 spin_lock_nested(&sh1->stripe_lock, 1);
733 } else { 731 } else {
734 spin_lock(&sh1->stripe_lock); 732 spin_lock_irq(&sh1->stripe_lock);
735 spin_lock_nested(&sh2->stripe_lock, 1); 733 spin_lock_nested(&sh2->stripe_lock, 1);
736 } 734 }
737} 735}
@@ -739,8 +737,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
739static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 737static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
740{ 738{
741 spin_unlock(&sh1->stripe_lock); 739 spin_unlock(&sh1->stripe_lock);
742 spin_unlock(&sh2->stripe_lock); 740 spin_unlock_irq(&sh2->stripe_lock);
743 local_irq_enable();
744} 741}
745 742
746/* Only freshly new full stripe normal write stripe can be added to a batch list */ 743/* Only freshly new full stripe normal write stripe can be added to a batch list */
@@ -2031,15 +2028,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
2031static int grow_stripes(struct r5conf *conf, int num) 2028static int grow_stripes(struct r5conf *conf, int num)
2032{ 2029{
2033 struct kmem_cache *sc; 2030 struct kmem_cache *sc;
2031 size_t namelen = sizeof(conf->cache_name[0]);
2034 int devs = max(conf->raid_disks, conf->previous_raid_disks); 2032 int devs = max(conf->raid_disks, conf->previous_raid_disks);
2035 2033
2036 if (conf->mddev->gendisk) 2034 if (conf->mddev->gendisk)
2037 sprintf(conf->cache_name[0], 2035 snprintf(conf->cache_name[0], namelen,
2038 "raid%d-%s", conf->level, mdname(conf->mddev)); 2036 "raid%d-%s", conf->level, mdname(conf->mddev));
2039 else 2037 else
2040 sprintf(conf->cache_name[0], 2038 snprintf(conf->cache_name[0], namelen,
2041 "raid%d-%p", conf->level, conf->mddev); 2039 "raid%d-%p", conf->level, conf->mddev);
2042 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); 2040 snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
2043 2041
2044 conf->active_name = 0; 2042 conf->active_name = 0;
2045 sc = kmem_cache_create(conf->cache_name[conf->active_name], 2043 sc = kmem_cache_create(conf->cache_name[conf->active_name],
@@ -3372,9 +3370,20 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
3372 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 3370 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
3373 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 3371 BUG_ON(test_bit(R5_Wantread, &dev->flags));
3374 BUG_ON(sh->batch_head); 3372 BUG_ON(sh->batch_head);
3373
3374 /*
3375 * In the raid6 case if the only non-uptodate disk is P
3376 * then we already trusted P to compute the other failed
3377 * drives. It is safe to compute rather than re-read P.
3378 * In other cases we only compute blocks from failed
3379 * devices, otherwise check/repair might fail to detect
3380 * a real inconsistency.
3381 */
3382
3375 if ((s->uptodate == disks - 1) && 3383 if ((s->uptodate == disks - 1) &&
3384 ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) ||
3376 (s->failed && (disk_idx == s->failed_num[0] || 3385 (s->failed && (disk_idx == s->failed_num[0] ||
3377 disk_idx == s->failed_num[1]))) { 3386 disk_idx == s->failed_num[1])))) {
3378 /* have disk failed, and we're requested to fetch it; 3387 /* have disk failed, and we're requested to fetch it;
3379 * do compute it 3388 * do compute it
3380 */ 3389 */
diff --git a/drivers/media/common/b2c2/flexcop-fe-tuner.c b/drivers/media/common/b2c2/flexcop-fe-tuner.c
index 9c59f4306883..f5956402fc69 100644
--- a/drivers/media/common/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/common/b2c2/flexcop-fe-tuner.c
@@ -38,7 +38,7 @@ static int flexcop_fe_request_firmware(struct dvb_frontend *fe,
38#endif 38#endif
39 39
40/* lnb control */ 40/* lnb control */
41#if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299) 41#if (FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)) && FE_SUPPORTED(PLL)
42static int flexcop_set_voltage(struct dvb_frontend *fe, 42static int flexcop_set_voltage(struct dvb_frontend *fe,
43 enum fe_sec_voltage voltage) 43 enum fe_sec_voltage voltage)
44{ 44{
@@ -68,7 +68,7 @@ static int flexcop_set_voltage(struct dvb_frontend *fe,
68#endif 68#endif
69 69
70#if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312) 70#if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312)
71static int flexcop_sleep(struct dvb_frontend* fe) 71static int __maybe_unused flexcop_sleep(struct dvb_frontend* fe)
72{ 72{
73 struct flexcop_device *fc = fe->dvb->priv; 73 struct flexcop_device *fc = fe->dvb->priv;
74 if (fc->fe_sleep) 74 if (fc->fe_sleep)
diff --git a/drivers/media/common/siano/smsendian.c b/drivers/media/common/siano/smsendian.c
index bfe831c10b1c..b95a631f23f9 100644
--- a/drivers/media/common/siano/smsendian.c
+++ b/drivers/media/common/siano/smsendian.c
@@ -35,7 +35,7 @@ void smsendian_handle_tx_message(void *buffer)
35 switch (msg->x_msg_header.msg_type) { 35 switch (msg->x_msg_header.msg_type) {
36 case MSG_SMS_DATA_DOWNLOAD_REQ: 36 case MSG_SMS_DATA_DOWNLOAD_REQ:
37 { 37 {
38 msg->msg_data[0] = le32_to_cpu(msg->msg_data[0]); 38 msg->msg_data[0] = le32_to_cpu((__force __le32)(msg->msg_data[0]));
39 break; 39 break;
40 } 40 }
41 41
@@ -44,7 +44,7 @@ void smsendian_handle_tx_message(void *buffer)
44 sizeof(struct sms_msg_hdr))/4; 44 sizeof(struct sms_msg_hdr))/4;
45 45
46 for (i = 0; i < msg_words; i++) 46 for (i = 0; i < msg_words; i++)
47 msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]); 47 msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]);
48 48
49 break; 49 break;
50 } 50 }
@@ -64,7 +64,7 @@ void smsendian_handle_rx_message(void *buffer)
64 { 64 {
65 struct sms_version_res *ver = 65 struct sms_version_res *ver =
66 (struct sms_version_res *) msg; 66 (struct sms_version_res *) msg;
67 ver->chip_model = le16_to_cpu(ver->chip_model); 67 ver->chip_model = le16_to_cpu((__force __le16)ver->chip_model);
68 break; 68 break;
69 } 69 }
70 70
@@ -81,7 +81,7 @@ void smsendian_handle_rx_message(void *buffer)
81 sizeof(struct sms_msg_hdr))/4; 81 sizeof(struct sms_msg_hdr))/4;
82 82
83 for (i = 0; i < msg_words; i++) 83 for (i = 0; i < msg_words; i++)
84 msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]); 84 msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]);
85 85
86 break; 86 break;
87 } 87 }
@@ -95,9 +95,9 @@ void smsendian_handle_message_header(void *msg)
95#ifdef __BIG_ENDIAN 95#ifdef __BIG_ENDIAN
96 struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)msg; 96 struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)msg;
97 97
98 phdr->msg_type = le16_to_cpu(phdr->msg_type); 98 phdr->msg_type = le16_to_cpu((__force __le16)phdr->msg_type);
99 phdr->msg_length = le16_to_cpu(phdr->msg_length); 99 phdr->msg_length = le16_to_cpu((__force __le16)phdr->msg_length);
100 phdr->msg_flags = le16_to_cpu(phdr->msg_flags); 100 phdr->msg_flags = le16_to_cpu((__force __le16)phdr->msg_flags);
101#endif /* __BIG_ENDIAN */ 101#endif /* __BIG_ENDIAN */
102} 102}
103EXPORT_SYMBOL_GPL(smsendian_handle_message_header); 103EXPORT_SYMBOL_GPL(smsendian_handle_message_header);
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index ea9abde902e9..209db65ab610 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -1071,7 +1071,7 @@ static int dvb_demux_do_ioctl(struct file *file,
1071 break; 1071 break;
1072 1072
1073 default: 1073 default:
1074 ret = -EINVAL; 1074 ret = -ENOTTY;
1075 break; 1075 break;
1076 } 1076 }
1077 mutex_unlock(&dmxdev->mutex); 1077 mutex_unlock(&dmxdev->mutex);
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index fb66184dc9b6..77cf211e842e 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -750,6 +750,29 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * b
750 goto exit; 750 goto exit;
751 } 751 }
752 752
753 /*
754 * It may need some time for the CAM to settle down, or there might
755 * be a race condition between the CAM, writing HC and our last
756 * check for DA. This happens, if the CAM asserts DA, just after
757 * checking DA before we are setting HC. In this case it might be
758 * a bug in the CAM to keep the FR bit, the lower layer/HW
759 * communication requires a longer timeout or the CAM needs more
760 * time internally. But this happens in reality!
761 * We need to read the status from the HW again and do the same
762 * we did for the previous check for DA
763 */
764 status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
765 if (status < 0)
766 goto exit;
767
768 if (status & (STATUSREG_DA | STATUSREG_RE)) {
769 if (status & STATUSREG_DA)
770 dvb_ca_en50221_thread_wakeup(ca);
771
772 status = -EAGAIN;
773 goto exit;
774 }
775
753 /* send the amount of data */ 776 /* send the amount of data */
754 if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, bytes_write >> 8)) != 0) 777 if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, bytes_write >> 8)) != 0)
755 goto exit; 778 goto exit;
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index e2a3833170e3..2c835e69c4df 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -230,8 +230,20 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe,
230 wake_up_interruptible (&events->wait_queue); 230 wake_up_interruptible (&events->wait_queue);
231} 231}
232 232
233static int dvb_frontend_test_event(struct dvb_frontend_private *fepriv,
234 struct dvb_fe_events *events)
235{
236 int ret;
237
238 up(&fepriv->sem);
239 ret = events->eventw != events->eventr;
240 down(&fepriv->sem);
241
242 return ret;
243}
244
233static int dvb_frontend_get_event(struct dvb_frontend *fe, 245static int dvb_frontend_get_event(struct dvb_frontend *fe,
234 struct dvb_frontend_event *event, int flags) 246 struct dvb_frontend_event *event, int flags)
235{ 247{
236 struct dvb_frontend_private *fepriv = fe->frontend_priv; 248 struct dvb_frontend_private *fepriv = fe->frontend_priv;
237 struct dvb_fe_events *events = &fepriv->events; 249 struct dvb_fe_events *events = &fepriv->events;
@@ -249,13 +261,8 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
249 if (flags & O_NONBLOCK) 261 if (flags & O_NONBLOCK)
250 return -EWOULDBLOCK; 262 return -EWOULDBLOCK;
251 263
252 up(&fepriv->sem); 264 ret = wait_event_interruptible(events->wait_queue,
253 265 dvb_frontend_test_event(fepriv, events));
254 ret = wait_event_interruptible (events->wait_queue,
255 events->eventw != events->eventr);
256
257 if (down_interruptible (&fepriv->sem))
258 return -ERESTARTSYS;
259 266
260 if (ret < 0) 267 if (ret < 0)
261 return ret; 268 return ret;
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index feeeb70d841e..d14d075ab1d6 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1281,11 +1281,12 @@ static int m88ds3103_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
1281 * New users must use I2C client binding directly! 1281 * New users must use I2C client binding directly!
1282 */ 1282 */
1283struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, 1283struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
1284 struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter) 1284 struct i2c_adapter *i2c,
1285 struct i2c_adapter **tuner_i2c_adapter)
1285{ 1286{
1286 struct i2c_client *client; 1287 struct i2c_client *client;
1287 struct i2c_board_info board_info; 1288 struct i2c_board_info board_info;
1288 struct m88ds3103_platform_data pdata; 1289 struct m88ds3103_platform_data pdata = {};
1289 1290
1290 pdata.clk = cfg->clock; 1291 pdata.clk = cfg->clock;
1291 pdata.i2c_wr_max = cfg->i2c_wr_max; 1292 pdata.i2c_wr_max = cfg->i2c_wr_max;
@@ -1428,6 +1429,8 @@ static int m88ds3103_probe(struct i2c_client *client,
1428 case M88DS3103_CHIP_ID: 1429 case M88DS3103_CHIP_ID:
1429 break; 1430 break;
1430 default: 1431 default:
1432 ret = -ENODEV;
1433 dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
1431 goto err_kfree; 1434 goto err_kfree;
1432 } 1435 }
1433 1436
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 821a8f481507..9d6270591858 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -14,6 +14,8 @@
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 */ 15 */
16 16
17#include <linux/delay.h>
18
17#include "si2168_priv.h" 19#include "si2168_priv.h"
18 20
19static const struct dvb_frontend_ops si2168_ops; 21static const struct dvb_frontend_ops si2168_ops;
@@ -420,6 +422,7 @@ static int si2168_init(struct dvb_frontend *fe)
420 if (ret) 422 if (ret)
421 goto err; 423 goto err;
422 424
425 udelay(100);
423 memcpy(cmd.args, "\x85", 1); 426 memcpy(cmd.args, "\x85", 1);
424 cmd.wlen = 1; 427 cmd.wlen = 1;
425 cmd.rlen = 1; 428 cmd.rlen = 1;
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index 7979e5d6498b..7ca359391535 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -369,7 +369,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc,
369 gain2 = clamp_t(long, gain2, 0, 13); 369 gain2 = clamp_t(long, gain2, 0, 13);
370 v_agc = clamp_t(long, v_agc, 400, 1100); 370 v_agc = clamp_t(long, v_agc, 400, 1100);
371 371
372 *_gain = -(gain1 * 2330 + 372 *_gain = -((__s64)gain1 * 2330 +
373 gain2 * 3500 + 373 gain2 * 3500 +
374 v_agc * 24 / 10 * 10 + 374 v_agc * 24 / 10 * 10 +
375 10000); 375 10000);
@@ -387,7 +387,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc,
387 gain3 = clamp_t(long, gain3, 0, 6); 387 gain3 = clamp_t(long, gain3, 0, 6);
388 v_agc = clamp_t(long, v_agc, 600, 1600); 388 v_agc = clamp_t(long, v_agc, 600, 1600);
389 389
390 *_gain = -(gain1 * 2650 + 390 *_gain = -((__s64)gain1 * 2650 +
391 gain2 * 3380 + 391 gain2 * 3380 +
392 gain3 * 2850 + 392 gain3 * 2850 +
393 v_agc * 176 / 100 * 10 - 393 v_agc * 176 / 100 * 10 -
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index fe6eb78b6914..17d217c3585a 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -420,11 +420,13 @@ static void cx25840_initialize(struct i2c_client *client)
420 INIT_WORK(&state->fw_work, cx25840_work_handler); 420 INIT_WORK(&state->fw_work, cx25840_work_handler);
421 init_waitqueue_head(&state->fw_wait); 421 init_waitqueue_head(&state->fw_wait);
422 q = create_singlethread_workqueue("cx25840_fw"); 422 q = create_singlethread_workqueue("cx25840_fw");
423 prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); 423 if (q) {
424 queue_work(q, &state->fw_work); 424 prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
425 schedule(); 425 queue_work(q, &state->fw_work);
426 finish_wait(&state->fw_wait, &wait); 426 schedule();
427 destroy_workqueue(q); 427 finish_wait(&state->fw_wait, &wait);
428 destroy_workqueue(q);
429 }
428 430
429 /* 6. */ 431 /* 6. */
430 cx25840_write(client, 0x115, 0x8c); 432 cx25840_write(client, 0x115, 0x8c);
@@ -465,8 +467,13 @@ static void cx23885_initialize(struct i2c_client *client)
465{ 467{
466 DEFINE_WAIT(wait); 468 DEFINE_WAIT(wait);
467 struct cx25840_state *state = to_state(i2c_get_clientdata(client)); 469 struct cx25840_state *state = to_state(i2c_get_clientdata(client));
470 u32 clk_freq = 0;
468 struct workqueue_struct *q; 471 struct workqueue_struct *q;
469 472
473 /* cx23885 sets hostdata to clk_freq pointer */
474 if (v4l2_get_subdev_hostdata(&state->sd))
475 clk_freq = *((u32 *)v4l2_get_subdev_hostdata(&state->sd));
476
470 /* 477 /*
471 * Come out of digital power down 478 * Come out of digital power down
472 * The CX23888, at least, needs this, otherwise registers aside from 479 * The CX23888, at least, needs this, otherwise registers aside from
@@ -502,8 +509,13 @@ static void cx23885_initialize(struct i2c_client *client)
502 * 50.0 MHz * (0xb + 0xe8ba26/0x2000000)/4 = 5 * 28.636363 MHz 509 * 50.0 MHz * (0xb + 0xe8ba26/0x2000000)/4 = 5 * 28.636363 MHz
503 * 572.73 MHz before post divide 510 * 572.73 MHz before post divide
504 */ 511 */
505 /* HVR1850 or 50MHz xtal */ 512 if (clk_freq == 25000000) {
506 cx25840_write(client, 0x2, 0x71); 513 /* 888/ImpactVCBe or 25Mhz xtal */
514 ; /* nothing to do */
515 } else {
516 /* HVR1850 or 50MHz xtal */
517 cx25840_write(client, 0x2, 0x71);
518 }
507 cx25840_write4(client, 0x11c, 0x01d1744c); 519 cx25840_write4(client, 0x11c, 0x01d1744c);
508 cx25840_write4(client, 0x118, 0x00000416); 520 cx25840_write4(client, 0x118, 0x00000416);
509 cx25840_write4(client, 0x404, 0x0010253e); 521 cx25840_write4(client, 0x404, 0x0010253e);
@@ -546,9 +558,15 @@ static void cx23885_initialize(struct i2c_client *client)
546 /* HVR1850 */ 558 /* HVR1850 */
547 switch (state->id) { 559 switch (state->id) {
548 case CX23888_AV: 560 case CX23888_AV:
549 /* 888/HVR1250 specific */ 561 if (clk_freq == 25000000) {
550 cx25840_write4(client, 0x10c, 0x13333333); 562 /* 888/ImpactVCBe or 25MHz xtal */
551 cx25840_write4(client, 0x108, 0x00000515); 563 cx25840_write4(client, 0x10c, 0x01b6db7b);
564 cx25840_write4(client, 0x108, 0x00000512);
565 } else {
566 /* 888/HVR1250 or 50MHz xtal */
567 cx25840_write4(client, 0x10c, 0x13333333);
568 cx25840_write4(client, 0x108, 0x00000515);
569 }
552 break; 570 break;
553 default: 571 default:
554 cx25840_write4(client, 0x10c, 0x002be2c9); 572 cx25840_write4(client, 0x10c, 0x002be2c9);
@@ -575,7 +593,7 @@ static void cx23885_initialize(struct i2c_client *client)
575 * 368.64 MHz before post divide 593 * 368.64 MHz before post divide
576 * 122.88 MHz / 0xa = 12.288 MHz 594 * 122.88 MHz / 0xa = 12.288 MHz
577 */ 595 */
578 /* HVR1850 or 50MHz xtal */ 596 /* HVR1850 or 50MHz xtal or 25MHz xtal */
579 cx25840_write4(client, 0x114, 0x017dbf48); 597 cx25840_write4(client, 0x114, 0x017dbf48);
580 cx25840_write4(client, 0x110, 0x000a030e); 598 cx25840_write4(client, 0x110, 0x000a030e);
581 break; 599 break;
@@ -631,11 +649,13 @@ static void cx23885_initialize(struct i2c_client *client)
631 INIT_WORK(&state->fw_work, cx25840_work_handler); 649 INIT_WORK(&state->fw_work, cx25840_work_handler);
632 init_waitqueue_head(&state->fw_wait); 650 init_waitqueue_head(&state->fw_wait);
633 q = create_singlethread_workqueue("cx25840_fw"); 651 q = create_singlethread_workqueue("cx25840_fw");
634 prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); 652 if (q) {
635 queue_work(q, &state->fw_work); 653 prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
636 schedule(); 654 queue_work(q, &state->fw_work);
637 finish_wait(&state->fw_wait, &wait); 655 schedule();
638 destroy_workqueue(q); 656 finish_wait(&state->fw_wait, &wait);
657 destroy_workqueue(q);
658 }
639 659
640 /* Call the cx23888 specific std setup func, we no longer rely on 660 /* Call the cx23888 specific std setup func, we no longer rely on
641 * the generic cx24840 func. 661 * the generic cx24840 func.
@@ -746,11 +766,13 @@ static void cx231xx_initialize(struct i2c_client *client)
746 INIT_WORK(&state->fw_work, cx25840_work_handler); 766 INIT_WORK(&state->fw_work, cx25840_work_handler);
747 init_waitqueue_head(&state->fw_wait); 767 init_waitqueue_head(&state->fw_wait);
748 q = create_singlethread_workqueue("cx25840_fw"); 768 q = create_singlethread_workqueue("cx25840_fw");
749 prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); 769 if (q) {
750 queue_work(q, &state->fw_work); 770 prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
751 schedule(); 771 queue_work(q, &state->fw_work);
752 finish_wait(&state->fw_wait, &wait); 772 schedule();
753 destroy_workqueue(q); 773 finish_wait(&state->fw_wait, &wait);
774 destroy_workqueue(q);
775 }
754 776
755 cx25840_std_setup(client); 777 cx25840_std_setup(client);
756 778
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index d0ad6a25bdab..5ac2babe123b 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -421,6 +421,7 @@ static int s5k6aa_set_ahb_address(struct i2c_client *client)
421 421
422/** 422/**
423 * s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration 423 * s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration
424 * @s5k6aa: pointer to &struct s5k6aa describing the device
424 * 425 *
425 * Configure the internal ISP PLL for the required output frequency. 426 * Configure the internal ISP PLL for the required output frequency.
426 * Locking: called with s5k6aa.lock mutex held. 427 * Locking: called with s5k6aa.lock mutex held.
@@ -669,6 +670,7 @@ static int s5k6aa_set_input_params(struct s5k6aa *s5k6aa)
669 670
670/** 671/**
671 * s5k6aa_configure_video_bus - configure the video output interface 672 * s5k6aa_configure_video_bus - configure the video output interface
673 * @s5k6aa: pointer to &struct s5k6aa describing the device
672 * @bus_type: video bus type: parallel or MIPI-CSI 674 * @bus_type: video bus type: parallel or MIPI-CSI
673 * @nlanes: number of MIPI lanes to be used (MIPI-CSI only) 675 * @nlanes: number of MIPI lanes to be used (MIPI-CSI only)
674 * 676 *
@@ -724,6 +726,8 @@ static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout,
724 726
725/** 727/**
726 * s5k6aa_set_prev_config - write user preview register set 728 * s5k6aa_set_prev_config - write user preview register set
729 * @s5k6aa: pointer to &struct s5k6aa describing the device
730 * @preset: s5kaa preset to be applied
727 * 731 *
728 * Configure output resolution and color fromat, pixel clock 732 * Configure output resolution and color fromat, pixel clock
729 * frequency range, device frame rate type and frame period range. 733 * frequency range, device frame rate type and frame period range.
@@ -777,6 +781,7 @@ static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa,
777 781
778/** 782/**
779 * s5k6aa_initialize_isp - basic ISP MCU initialization 783 * s5k6aa_initialize_isp - basic ISP MCU initialization
784 * @sd: pointer to V4L2 sub-device descriptor
780 * 785 *
781 * Configure AHB addresses for registers read/write; configure PLLs for 786 * Configure AHB addresses for registers read/write; configure PLLs for
782 * required output pixel clock. The ISP power supply needs to be already 787 * required output pixel clock. The ISP power supply needs to be already
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index fb39dfd55e75..46a052c5be2e 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -981,7 +981,7 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor,
981 if (rval) 981 if (rval)
982 goto out; 982 goto out;
983 983
984 for (i = 0; i < 1000; i++) { 984 for (i = 1000; i > 0; i--) {
985 rval = smiapp_read( 985 rval = smiapp_read(
986 sensor, 986 sensor,
987 SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s); 987 SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s);
@@ -992,11 +992,10 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor,
992 if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY) 992 if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY)
993 break; 993 break;
994 994
995 if (--i == 0) { 995 }
996 rval = -ETIMEDOUT; 996 if (!i) {
997 goto out; 997 rval = -ETIMEDOUT;
998 } 998 goto out;
999
1000 } 999 }
1001 1000
1002 for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) { 1001 for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index 1f8af1ee8352..1e4783b51a35 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -1033,7 +1033,7 @@ static int ov6650_probe(struct i2c_client *client,
1033 priv->code = MEDIA_BUS_FMT_YUYV8_2X8; 1033 priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
1034 priv->colorspace = V4L2_COLORSPACE_JPEG; 1034 priv->colorspace = V4L2_COLORSPACE_JPEG;
1035 1035
1036 priv->clk = v4l2_clk_get(&client->dev, "mclk"); 1036 priv->clk = v4l2_clk_get(&client->dev, NULL);
1037 if (IS_ERR(priv->clk)) { 1037 if (IS_ERR(priv->clk)) {
1038 ret = PTR_ERR(priv->clk); 1038 ret = PTR_ERR(priv->clk);
1039 goto eclkget; 1039 goto eclkget;
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 9ef5baaf8646..bc630a719776 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -197,57 +197,61 @@ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
197 } 197 }
198} 198}
199 199
200static u8 i2c_rd8(struct v4l2_subdev *sd, u16 reg) 200static noinline u32 i2c_rdreg(struct v4l2_subdev *sd, u16 reg, u32 n)
201{ 201{
202 u8 val; 202 __le32 val = 0;
203 203
204 i2c_rd(sd, reg, &val, 1); 204 i2c_rd(sd, reg, (u8 __force *)&val, n);
205 205
206 return val; 206 return le32_to_cpu(val);
207}
208
209static noinline void i2c_wrreg(struct v4l2_subdev *sd, u16 reg, u32 val, u32 n)
210{
211 __le32 raw = cpu_to_le32(val);
212
213 i2c_wr(sd, reg, (u8 __force *)&raw, n);
214}
215
216static u8 i2c_rd8(struct v4l2_subdev *sd, u16 reg)
217{
218 return i2c_rdreg(sd, reg, 1);
207} 219}
208 220
209static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val) 221static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val)
210{ 222{
211 i2c_wr(sd, reg, &val, 1); 223 i2c_wrreg(sd, reg, val, 1);
212} 224}
213 225
214static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg, 226static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg,
215 u8 mask, u8 val) 227 u8 mask, u8 val)
216{ 228{
217 i2c_wr8(sd, reg, (i2c_rd8(sd, reg) & mask) | val); 229 i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1);
218} 230}
219 231
220static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) 232static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
221{ 233{
222 u16 val; 234 return i2c_rdreg(sd, reg, 2);
223
224 i2c_rd(sd, reg, (u8 *)&val, 2);
225
226 return val;
227} 235}
228 236
229static void i2c_wr16(struct v4l2_subdev *sd, u16 reg, u16 val) 237static void i2c_wr16(struct v4l2_subdev *sd, u16 reg, u16 val)
230{ 238{
231 i2c_wr(sd, reg, (u8 *)&val, 2); 239 i2c_wrreg(sd, reg, val, 2);
232} 240}
233 241
234static void i2c_wr16_and_or(struct v4l2_subdev *sd, u16 reg, u16 mask, u16 val) 242static void i2c_wr16_and_or(struct v4l2_subdev *sd, u16 reg, u16 mask, u16 val)
235{ 243{
236 i2c_wr16(sd, reg, (i2c_rd16(sd, reg) & mask) | val); 244 i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2);
237} 245}
238 246
239static u32 i2c_rd32(struct v4l2_subdev *sd, u16 reg) 247static u32 i2c_rd32(struct v4l2_subdev *sd, u16 reg)
240{ 248{
241 u32 val; 249 return i2c_rdreg(sd, reg, 4);
242
243 i2c_rd(sd, reg, (u8 *)&val, 4);
244
245 return val;
246} 250}
247 251
248static void i2c_wr32(struct v4l2_subdev *sd, u16 reg, u32 val) 252static void i2c_wr32(struct v4l2_subdev *sd, u16 reg, u32 val)
249{ 253{
250 i2c_wr(sd, reg, (u8 *)&val, 4); 254 i2c_wrreg(sd, reg, val, 4);
251} 255}
252 256
253/* --------------- STATUS --------------- */ 257/* --------------- STATUS --------------- */
@@ -1240,7 +1244,7 @@ static int tc358743_g_register(struct v4l2_subdev *sd,
1240 1244
1241 reg->size = tc358743_get_reg_size(reg->reg); 1245 reg->size = tc358743_get_reg_size(reg->reg);
1242 1246
1243 i2c_rd(sd, reg->reg, (u8 *)&reg->val, reg->size); 1247 reg->val = i2c_rdreg(sd, reg->reg, reg->size);
1244 1248
1245 return 0; 1249 return 0;
1246} 1250}
@@ -1266,7 +1270,7 @@ static int tc358743_s_register(struct v4l2_subdev *sd,
1266 reg->reg == BCAPS) 1270 reg->reg == BCAPS)
1267 return 0; 1271 return 0;
1268 1272
1269 i2c_wr(sd, (u16)reg->reg, (u8 *)&reg->val, 1273 i2c_wrreg(sd, (u16)reg->reg, reg->val,
1270 tc358743_get_reg_size(reg->reg)); 1274 tc358743_get_reg_size(reg->reg));
1271 1275
1272 return 0; 1276 return 0;
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index 8aa726651630..90fcccc05b56 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -422,8 +422,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
422 bt878_num); 422 bt878_num);
423 if (bt878_num >= BT878_MAX) { 423 if (bt878_num >= BT878_MAX) {
424 printk(KERN_ERR "bt878: Too many devices inserted\n"); 424 printk(KERN_ERR "bt878: Too many devices inserted\n");
425 result = -ENOMEM; 425 return -ENOMEM;
426 goto fail0;
427 } 426 }
428 if (pci_enable_device(dev)) 427 if (pci_enable_device(dev))
429 return -EIO; 428 return -EIO;
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index f384f295676e..679d122af63c 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -2124,6 +2124,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
2124 &dev->i2c_bus[2].i2c_adap, 2124 &dev->i2c_bus[2].i2c_adap,
2125 "cx25840", 0x88 >> 1, NULL); 2125 "cx25840", 0x88 >> 1, NULL);
2126 if (dev->sd_cx25840) { 2126 if (dev->sd_cx25840) {
2127 /* set host data for clk_freq configuration */
2128 v4l2_set_subdev_hostdata(dev->sd_cx25840,
2129 &dev->clk_freq);
2130
2127 dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE; 2131 dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
2128 v4l2_subdev_call(dev->sd_cx25840, core, load_fw); 2132 v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
2129 } 2133 }
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index e8f847226a19..6eb3be13b430 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -872,6 +872,16 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
872 if (cx23885_boards[dev->board].clk_freq > 0) 872 if (cx23885_boards[dev->board].clk_freq > 0)
873 dev->clk_freq = cx23885_boards[dev->board].clk_freq; 873 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
874 874
875 if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
876 dev->pci->subsystem_device == 0x7137) {
877 /* Hauppauge ImpactVCBe device ID 0x7137 is populated
878 * with an 888, and a 25Mhz crystal, instead of the
879 * usual third overtone 50Mhz. The default clock rate must
880 * be overridden so the cx25840 is properly configured
881 */
882 dev->clk_freq = 25000000;
883 }
884
875 dev->pci_bus = dev->pci->bus->number; 885 dev->pci_bus = dev->pci->bus->number;
876 dev->pci_slot = PCI_SLOT(dev->pci->devfn); 886 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
877 cx23885_irq_add(dev, 0x001f00); 887 cx23885_irq_add(dev, 0x001f00);
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 0042803a9de7..54398d8a4696 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -871,6 +871,10 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
871 dev->nr = ++cx25821_devcount; 871 dev->nr = ++cx25821_devcount;
872 sprintf(dev->name, "cx25821[%d]", dev->nr); 872 sprintf(dev->name, "cx25821[%d]", dev->nr);
873 873
874 if (dev->nr >= ARRAY_SIZE(card)) {
875 CX25821_INFO("dev->nr >= %zd", ARRAY_SIZE(card));
876 return -ENODEV;
877 }
874 if (dev->pci->device != 0x8210) { 878 if (dev->pci->device != 0x8210) {
875 pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n", 879 pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
876 __func__, dev->pci->device); 880 __func__, dev->pci->device);
@@ -886,9 +890,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
886 dev->channels[i].sram_channels = &cx25821_sram_channels[i]; 890 dev->channels[i].sram_channels = &cx25821_sram_channels[i];
887 } 891 }
888 892
889 if (dev->nr > 1)
890 CX25821_INFO("dev->nr > 1!");
891
892 /* board config */ 893 /* board config */
893 dev->board = 1; /* card[dev->nr]; */ 894 dev->board = 1; /* card[dev->nr]; */
894 dev->_max_num_decoders = MAX_DECODERS; 895 dev->_max_num_decoders = MAX_DECODERS;
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index 269e0782c7b6..93d53195e8ca 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -430,7 +430,8 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
430 __func__, fw->size); 430 __func__, fw->size);
431 431
432 if (fw->size != fwlength) { 432 if (fw->size != fwlength) {
433 printk(KERN_ERR "xc5000: firmware incorrect size\n"); 433 printk(KERN_ERR "saa7164: firmware incorrect size %zu != %u\n",
434 fw->size, fwlength);
434 ret = -ENOMEM; 435 ret = -ENOMEM;
435 goto out; 436 goto out;
436 } 437 }
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index f7ce493b1fee..a0b61e88c838 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -342,6 +342,17 @@ static void solo_stop_streaming(struct vb2_queue *q)
342 struct solo_dev *solo_dev = vb2_get_drv_priv(q); 342 struct solo_dev *solo_dev = vb2_get_drv_priv(q);
343 343
344 solo_stop_thread(solo_dev); 344 solo_stop_thread(solo_dev);
345
346 spin_lock(&solo_dev->slock);
347 while (!list_empty(&solo_dev->vidq_active)) {
348 struct solo_vb2_buf *buf = list_entry(
349 solo_dev->vidq_active.next,
350 struct solo_vb2_buf, list);
351
352 list_del(&buf->list);
353 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
354 }
355 spin_unlock(&solo_dev->slock);
345 INIT_LIST_HEAD(&solo_dev->vidq_active); 356 INIT_LIST_HEAD(&solo_dev->vidq_active);
346} 357}
347 358
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 56e683b19a73..91e02c1ff392 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2077,6 +2077,7 @@ error_csiphy:
2077 2077
2078static void isp_detach_iommu(struct isp_device *isp) 2078static void isp_detach_iommu(struct isp_device *isp)
2079{ 2079{
2080 arm_iommu_detach_device(isp->dev);
2080 arm_iommu_release_mapping(isp->mapping); 2081 arm_iommu_release_mapping(isp->mapping);
2081 isp->mapping = NULL; 2082 isp->mapping = NULL;
2082 iommu_group_remove_device(isp->dev); 2083 iommu_group_remove_device(isp->dev);
@@ -2110,8 +2111,7 @@ static int isp_attach_iommu(struct isp_device *isp)
2110 mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G); 2111 mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
2111 if (IS_ERR(mapping)) { 2112 if (IS_ERR(mapping)) {
2112 dev_err(isp->dev, "failed to create ARM IOMMU mapping\n"); 2113 dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
2113 ret = PTR_ERR(mapping); 2114 return PTR_ERR(mapping);
2114 goto error;
2115 } 2115 }
2116 2116
2117 isp->mapping = mapping; 2117 isp->mapping = mapping;
@@ -2126,7 +2126,8 @@ static int isp_attach_iommu(struct isp_device *isp)
2126 return 0; 2126 return 0;
2127 2127
2128error: 2128error:
2129 isp_detach_iommu(isp); 2129 arm_iommu_release_mapping(isp->mapping);
2130 isp->mapping = NULL;
2130 return ret; 2131 return ret;
2131} 2132}
2132 2133
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index f8e3e83c52a2..20de5e9fc217 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -1278,7 +1278,7 @@ static int jpu_open(struct file *file)
1278 /* ...issue software reset */ 1278 /* ...issue software reset */
1279 ret = jpu_reset(jpu); 1279 ret = jpu_reset(jpu);
1280 if (ret) 1280 if (ret)
1281 goto device_prepare_rollback; 1281 goto jpu_reset_rollback;
1282 } 1282 }
1283 1283
1284 jpu->ref_count++; 1284 jpu->ref_count++;
@@ -1286,6 +1286,8 @@ static int jpu_open(struct file *file)
1286 mutex_unlock(&jpu->mutex); 1286 mutex_unlock(&jpu->mutex);
1287 return 0; 1287 return 0;
1288 1288
1289jpu_reset_rollback:
1290 clk_disable_unprepare(jpu->clk);
1289device_prepare_rollback: 1291device_prepare_rollback:
1290 mutex_unlock(&jpu->mutex); 1292 mutex_unlock(&jpu->mutex);
1291v4l_prepare_rollback: 1293v4l_prepare_rollback:
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 537b858cb94a..fa6af4a7dae1 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -1268,16 +1268,17 @@ static void __camif_subdev_try_format(struct camif_dev *camif,
1268{ 1268{
1269 const struct s3c_camif_variant *variant = camif->variant; 1269 const struct s3c_camif_variant *variant = camif->variant;
1270 const struct vp_pix_limits *pix_lim; 1270 const struct vp_pix_limits *pix_lim;
1271 int i = ARRAY_SIZE(camif_mbus_formats); 1271 unsigned int i;
1272 1272
1273 /* FIXME: constraints against codec or preview path ? */ 1273 /* FIXME: constraints against codec or preview path ? */
1274 pix_lim = &variant->vp_pix_limits[VP_CODEC]; 1274 pix_lim = &variant->vp_pix_limits[VP_CODEC];
1275 1275
1276 while (i-- >= 0) 1276 for (i = 0; i < ARRAY_SIZE(camif_mbus_formats); i++)
1277 if (camif_mbus_formats[i] == mf->code) 1277 if (camif_mbus_formats[i] == mf->code)
1278 break; 1278 break;
1279 1279
1280 mf->code = camif_mbus_formats[i]; 1280 if (i == ARRAY_SIZE(camif_mbus_formats))
1281 mf->code = camif_mbus_formats[0];
1281 1282
1282 if (pad == CAMIF_SD_PAD_SINK) { 1283 if (pad == CAMIF_SD_PAD_SINK) {
1283 v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH, 1284 v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
index bda29bc1b933..2f74a5ac0147 100644
--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
@@ -405,3 +405,7 @@ void soc_camera_calc_client_output(struct soc_camera_device *icd,
405 mf->height = soc_camera_shift_scale(rect->height, shift, scale_v); 405 mf->height = soc_camera_shift_scale(rect->height, shift, scale_v);
406} 406}
407EXPORT_SYMBOL(soc_camera_calc_client_output); 407EXPORT_SYMBOL(soc_camera_calc_client_output);
408
409MODULE_DESCRIPTION("soc-camera scaling-cropping functions");
410MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
411MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 8490a65ae1c6..a43404cad3e3 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -83,7 +83,7 @@ static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
83static void channel_swdemux_tsklet(unsigned long data) 83static void channel_swdemux_tsklet(unsigned long data)
84{ 84{
85 struct channel_info *channel = (struct channel_info *)data; 85 struct channel_info *channel = (struct channel_info *)data;
86 struct c8sectpfei *fei = channel->fei; 86 struct c8sectpfei *fei;
87 unsigned long wp, rp; 87 unsigned long wp, rp;
88 int pos, num_packets, n, size; 88 int pos, num_packets, n, size;
89 u8 *buf; 89 u8 *buf;
@@ -91,6 +91,8 @@ static void channel_swdemux_tsklet(unsigned long data)
91 if (unlikely(!channel || !channel->irec)) 91 if (unlikely(!channel || !channel->irec))
92 return; 92 return;
93 93
94 fei = channel->fei;
95
94 wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0)); 96 wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
95 rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0)); 97 rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
96 98
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 471d6a8ae8a4..9326439bc49c 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -96,7 +96,7 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
96 */ 96 */
97int si470x_get_register(struct si470x_device *radio, int regnr) 97int si470x_get_register(struct si470x_device *radio, int regnr)
98{ 98{
99 u16 buf[READ_REG_NUM]; 99 __be16 buf[READ_REG_NUM];
100 struct i2c_msg msgs[1] = { 100 struct i2c_msg msgs[1] = {
101 { 101 {
102 .addr = radio->client->addr, 102 .addr = radio->client->addr,
@@ -121,7 +121,7 @@ int si470x_get_register(struct si470x_device *radio, int regnr)
121int si470x_set_register(struct si470x_device *radio, int regnr) 121int si470x_set_register(struct si470x_device *radio, int regnr)
122{ 122{
123 int i; 123 int i;
124 u16 buf[WRITE_REG_NUM]; 124 __be16 buf[WRITE_REG_NUM];
125 struct i2c_msg msgs[1] = { 125 struct i2c_msg msgs[1] = {
126 { 126 {
127 .addr = radio->client->addr, 127 .addr = radio->client->addr,
@@ -151,7 +151,7 @@ int si470x_set_register(struct si470x_device *radio, int regnr)
151static int si470x_get_all_registers(struct si470x_device *radio) 151static int si470x_get_all_registers(struct si470x_device *radio)
152{ 152{
153 int i; 153 int i;
154 u16 buf[READ_REG_NUM]; 154 __be16 buf[READ_REG_NUM];
155 struct i2c_msg msgs[1] = { 155 struct i2c_msg msgs[1] = {
156 { 156 {
157 .addr = radio->client->addr, 157 .addr = radio->client->addr,
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index f838d9c7ed12..0fba4a2c1602 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1370,8 +1370,13 @@ static int mceusb_dev_probe(struct usb_interface *intf,
1370 goto rc_dev_fail; 1370 goto rc_dev_fail;
1371 1371
1372 /* wire up inbound data handler */ 1372 /* wire up inbound data handler */
1373 usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp, 1373 if (usb_endpoint_xfer_int(ep_in))
1374 mceusb_dev_recv, ir, ep_in->bInterval); 1374 usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
1375 mceusb_dev_recv, ir, ep_in->bInterval);
1376 else
1377 usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
1378 mceusb_dev_recv, ir);
1379
1375 ir->urb_in->transfer_dma = ir->dma_in; 1380 ir->urb_in->transfer_dma = ir->dma_in;
1376 ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1381 ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1377 1382
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index a7a8452e99d2..c1ce8d3ce877 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -410,9 +410,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val,
410 return 0; 410 return 0;
411} 411}
412 412
413static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val) 413static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
414{ 414{
415 return r820t_write(priv, reg, &val, 1); 415 u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */
416
417 return r820t_write(priv, reg, &tmp, 1);
416} 418}
417 419
418static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) 420static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
@@ -425,17 +427,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
425 return -EINVAL; 427 return -EINVAL;
426} 428}
427 429
428static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val, 430static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
429 u8 bit_mask) 431 u8 bit_mask)
430{ 432{
433 u8 tmp = val;
431 int rc = r820t_read_cache_reg(priv, reg); 434 int rc = r820t_read_cache_reg(priv, reg);
432 435
433 if (rc < 0) 436 if (rc < 0)
434 return rc; 437 return rc;
435 438
436 val = (rc & ~bit_mask) | (val & bit_mask); 439 tmp = (rc & ~bit_mask) | (tmp & bit_mask);
437 440
438 return r820t_write(priv, reg, &val, 1); 441 return r820t_write(priv, reg, &tmp, 1);
439} 442}
440 443
441static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len) 444static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len)
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 9caea8344547..d793c630f1dd 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -812,7 +812,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
812 struct camera_data *cam = video_drvdata(file); 812 struct camera_data *cam = video_drvdata(file);
813 813
814 if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || 814 if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
815 buf->index > cam->num_frames) 815 buf->index >= cam->num_frames)
816 return -EINVAL; 816 return -EINVAL;
817 817
818 buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; 818 buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
@@ -863,7 +863,7 @@ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
863 863
864 if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || 864 if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
865 buf->memory != V4L2_MEMORY_MMAP || 865 buf->memory != V4L2_MEMORY_MMAP ||
866 buf->index > cam->num_frames) 866 buf->index >= cam->num_frames)
867 return -EINVAL; 867 return -EINVAL;
868 868
869 DBG("QBUF #%d\n", buf->index); 869 DBG("QBUF #%d\n", buf->index);
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 04ae21278440..77f54e4198d3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -864,6 +864,9 @@ struct usb_device_id cx231xx_id_table[] = {
864 .driver_info = CX231XX_BOARD_CNXT_RDE_250}, 864 .driver_info = CX231XX_BOARD_CNXT_RDE_250},
865 {USB_DEVICE(0x0572, 0x58A0), 865 {USB_DEVICE(0x0572, 0x58A0),
866 .driver_info = CX231XX_BOARD_CNXT_RDU_250}, 866 .driver_info = CX231XX_BOARD_CNXT_RDU_250},
867 /* AverMedia DVD EZMaker 7 */
868 {USB_DEVICE(0x07ca, 0xc039),
869 .driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER},
867 {USB_DEVICE(0x2040, 0xb110), 870 {USB_DEVICE(0x2040, 0xb110),
868 .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL}, 871 .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL},
869 {USB_DEVICE(0x2040, 0xb111), 872 {USB_DEVICE(0x2040, 0xb111),
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 3721ee63b8fb..09c97847bf95 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -503,18 +503,23 @@ static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
503 503
504static int lme2510_return_status(struct dvb_usb_device *d) 504static int lme2510_return_status(struct dvb_usb_device *d)
505{ 505{
506 int ret = 0; 506 int ret;
507 u8 *data; 507 u8 *data;
508 508
509 data = kzalloc(10, GFP_KERNEL); 509 data = kzalloc(6, GFP_KERNEL);
510 if (!data) 510 if (!data)
511 return -ENOMEM; 511 return -ENOMEM;
512 512
513 ret |= usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), 513 ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
514 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200); 514 0x06, 0x80, 0x0302, 0x00,
515 info("Firmware Status: %x (%x)", ret , data[2]); 515 data, 0x6, 200);
516 if (ret != 6)
517 ret = -EINVAL;
518 else
519 ret = data[2];
520
521 info("Firmware Status: %6ph", data);
516 522
517 ret = (ret < 0) ? -ENODEV : data[2];
518 kfree(data); 523 kfree(data);
519 return ret; 524 return ret;
520} 525}
@@ -1078,8 +1083,6 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
1078 1083
1079 if (adap->fe[0]) { 1084 if (adap->fe[0]) {
1080 info("FE Found M88RS2000"); 1085 info("FE Found M88RS2000");
1081 dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config,
1082 &d->i2c_adap);
1083 st->i2c_tuner_gate_w = 5; 1086 st->i2c_tuner_gate_w = 5;
1084 st->i2c_tuner_gate_r = 5; 1087 st->i2c_tuner_gate_r = 5;
1085 st->i2c_tuner_addr = 0x60; 1088 st->i2c_tuner_addr = 0x60;
@@ -1145,17 +1148,18 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
1145 ret = st->tuner_config; 1148 ret = st->tuner_config;
1146 break; 1149 break;
1147 case TUNER_RS2000: 1150 case TUNER_RS2000:
1148 ret = st->tuner_config; 1151 if (dvb_attach(ts2020_attach, adap->fe[0],
1152 &ts2020_config, &d->i2c_adap))
1153 ret = st->tuner_config;
1149 break; 1154 break;
1150 default: 1155 default:
1151 break; 1156 break;
1152 } 1157 }
1153 1158
1154 if (ret) 1159 if (ret) {
1155 info("TUN Found %s tuner", tun_msg[ret]); 1160 info("TUN Found %s tuner", tun_msg[ret]);
1156 else { 1161 } else {
1157 info("TUN No tuner found --- resetting device"); 1162 info("TUN No tuner found");
1158 lme_coldreset(d);
1159 return -ENODEV; 1163 return -ENODEV;
1160 } 1164 }
1161 1165
@@ -1199,6 +1203,7 @@ static int lme2510_get_adapter_count(struct dvb_usb_device *d)
1199static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) 1203static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
1200{ 1204{
1201 struct lme2510_state *st = d->priv; 1205 struct lme2510_state *st = d->priv;
1206 int status;
1202 1207
1203 usb_reset_configuration(d->udev); 1208 usb_reset_configuration(d->udev);
1204 1209
@@ -1207,12 +1212,16 @@ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
1207 1212
1208 st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware; 1213 st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware;
1209 1214
1210 if (lme2510_return_status(d) == 0x44) { 1215 status = lme2510_return_status(d);
1216 if (status == 0x44) {
1211 *name = lme_firmware_switch(d, 0); 1217 *name = lme_firmware_switch(d, 0);
1212 return COLD; 1218 return COLD;
1213 } 1219 }
1214 1220
1215 return 0; 1221 if (status != 0x47)
1222 return -EINVAL;
1223
1224 return WARM;
1216} 1225}
1217 1226
1218static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type, 1227static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index ab7151181728..d00b27ed73a6 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -818,6 +818,8 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component,
818 case XC2028_RESET_CLK: 818 case XC2028_RESET_CLK:
819 deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg); 819 deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg);
820 break; 820 break;
821 case XC2028_I2C_FLUSH:
822 break;
821 default: 823 default:
822 deb_info("%s: unknown command %d, arg %d\n", __func__, 824 deb_info("%s: unknown command %d, arg %d\n", __func__,
823 command, arg); 825 command, arg);
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 7df0707a0455..38c03283a441 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -431,6 +431,7 @@ static int stk7700ph_xc3028_callback(void *ptr, int component,
431 state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); 431 state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
432 break; 432 break;
433 case XC2028_RESET_CLK: 433 case XC2028_RESET_CLK:
434 case XC2028_I2C_FLUSH:
434 break; 435 break;
435 default: 436 default:
436 err("%s: unknown command %d, arg %d\n", __func__, 437 err("%s: unknown command %d, arg %d\n", __func__,
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index e382210c4ada..75323f5efd0f 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -11,7 +11,7 @@ config VIDEO_EM28XX_V4L2
11 select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT 11 select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
12 select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT 12 select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
13 select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT 13 select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
14 select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT 14 select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
15 15
16 ---help--- 16 ---help---
17 This is a video4linux driver for Empia 28xx based TV cards. 17 This is a video4linux driver for Empia 28xx based TV cards.
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 76bf8ba372b3..5b53e31ce262 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -187,7 +187,7 @@
187 USB 2.0 spec says bulk packet size is always 512 bytes 187 USB 2.0 spec says bulk packet size is always 512 bytes
188 */ 188 */
189#define EM28XX_BULK_PACKET_MULTIPLIER 384 189#define EM28XX_BULK_PACKET_MULTIPLIER 384
190#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 384 190#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 94
191 191
192#define EM28XX_INTERLACED_DEFAULT 1 192#define EM28XX_INTERLACED_DEFAULT 1
193 193
diff --git a/drivers/media/usb/go7007/Kconfig b/drivers/media/usb/go7007/Kconfig
index 95a3af644a92..af1d02430931 100644
--- a/drivers/media/usb/go7007/Kconfig
+++ b/drivers/media/usb/go7007/Kconfig
@@ -11,7 +11,7 @@ config VIDEO_GO7007
11 select VIDEO_TW2804 if MEDIA_SUBDRV_AUTOSELECT 11 select VIDEO_TW2804 if MEDIA_SUBDRV_AUTOSELECT
12 select VIDEO_TW9903 if MEDIA_SUBDRV_AUTOSELECT 12 select VIDEO_TW9903 if MEDIA_SUBDRV_AUTOSELECT
13 select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT 13 select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
14 select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT 14 select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
15 select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT 15 select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
16 ---help--- 16 ---help---
17 This is a video4linux driver for the WIS GO7007 MPEG 17 This is a video4linux driver for the WIS GO7007 MPEG
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 3fc64197b4e6..08f0ca7aa012 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -273,7 +273,9 @@ static int hdpvr_probe(struct usb_interface *interface,
273 struct hdpvr_device *dev; 273 struct hdpvr_device *dev;
274 struct usb_host_interface *iface_desc; 274 struct usb_host_interface *iface_desc;
275 struct usb_endpoint_descriptor *endpoint; 275 struct usb_endpoint_descriptor *endpoint;
276#if IS_ENABLED(CONFIG_I2C)
276 struct i2c_client *client; 277 struct i2c_client *client;
278#endif
277 size_t buffer_size; 279 size_t buffer_size;
278 int i; 280 int i;
279 int retval = -ENOMEM; 281 int retval = -ENOMEM;
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 58f23bcfe94e..299750e56916 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -1119,8 +1119,10 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1119 1119
1120 return 0; 1120 return 0;
1121 1121
1122#ifdef CONFIG_USB_PWC_INPUT_EVDEV
1122err_video_unreg: 1123err_video_unreg:
1123 video_unregister_device(&pdev->vdev); 1124 video_unregister_device(&pdev->vdev);
1125#endif
1124err_unregister_v4l2_dev: 1126err_unregister_v4l2_dev:
1125 v4l2_device_unregister(&pdev->v4l2_dev); 1127 v4l2_device_unregister(&pdev->v4l2_dev);
1126err_free_controls: 1128err_free_controls:
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index 29428bef272c..483457d4904f 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -95,6 +95,8 @@ static int usbtv_probe(struct usb_interface *intf,
95 return 0; 95 return 0;
96 96
97usbtv_audio_fail: 97usbtv_audio_fail:
98 /* we must not free at this point */
99 usb_get_dev(usbtv->udev);
98 usbtv_video_free(usbtv); 100 usbtv_video_free(usbtv);
99 101
100usbtv_video_fail: 102usbtv_video_fail:
@@ -127,6 +129,7 @@ static void usbtv_disconnect(struct usb_interface *intf)
127 129
128static struct usb_device_id usbtv_id_table[] = { 130static struct usb_device_id usbtv_id_table[] = {
129 { USB_DEVICE(0x1b71, 0x3002) }, 131 { USB_DEVICE(0x1b71, 0x3002) },
132 { USB_DEVICE(0x1f71, 0x3301) },
130 {} 133 {}
131}; 134};
132MODULE_DEVICE_TABLE(usb, usbtv_id_table); 135MODULE_DEVICE_TABLE(usb, usbtv_id_table);
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 9beece00869b..29b3436d0910 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -37,7 +37,6 @@ config VIDEO_PCI_SKELETON
37# Used by drivers that need tuner.ko 37# Used by drivers that need tuner.ko
38config VIDEO_TUNER 38config VIDEO_TUNER
39 tristate 39 tristate
40 depends on MEDIA_TUNER
41 40
42# Used by drivers that need v4l2-mem2mem.ko 41# Used by drivers that need v4l2-mem2mem.ko
43config V4L2_MEM2MEM_DEV 42config V4L2_MEM2MEM_DEV
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 4379b949bb93..9292e35aef06 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -18,8 +18,18 @@
18#include <linux/videodev2.h> 18#include <linux/videodev2.h>
19#include <linux/v4l2-subdev.h> 19#include <linux/v4l2-subdev.h>
20#include <media/v4l2-dev.h> 20#include <media/v4l2-dev.h>
21#include <media/v4l2-fh.h>
22#include <media/v4l2-ctrls.h>
21#include <media/v4l2-ioctl.h> 23#include <media/v4l2-ioctl.h>
22 24
25/* Use the same argument order as copy_in_user */
26#define assign_in_user(to, from) \
27({ \
28 typeof(*from) __assign_tmp; \
29 \
30 get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \
31})
32
23static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 33static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
24{ 34{
25 long ret = -ENOIOCTLCMD; 35 long ret = -ENOIOCTLCMD;
@@ -33,131 +43,90 @@ static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33 43
34struct v4l2_clip32 { 44struct v4l2_clip32 {
35 struct v4l2_rect c; 45 struct v4l2_rect c;
36 compat_caddr_t next; 46 compat_caddr_t next;
37}; 47};
38 48
39struct v4l2_window32 { 49struct v4l2_window32 {
40 struct v4l2_rect w; 50 struct v4l2_rect w;
41 __u32 field; /* enum v4l2_field */ 51 __u32 field; /* enum v4l2_field */
42 __u32 chromakey; 52 __u32 chromakey;
43 compat_caddr_t clips; /* actually struct v4l2_clip32 * */ 53 compat_caddr_t clips; /* actually struct v4l2_clip32 * */
44 __u32 clipcount; 54 __u32 clipcount;
45 compat_caddr_t bitmap; 55 compat_caddr_t bitmap;
56 __u8 global_alpha;
46}; 57};
47 58
48static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) 59static int get_v4l2_window32(struct v4l2_window __user *kp,
49{ 60 struct v4l2_window32 __user *up,
50 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) || 61 void __user *aux_buf, u32 aux_space)
51 copy_from_user(&kp->w, &up->w, sizeof(up->w)) ||
52 get_user(kp->field, &up->field) ||
53 get_user(kp->chromakey, &up->chromakey) ||
54 get_user(kp->clipcount, &up->clipcount))
55 return -EFAULT;
56 if (kp->clipcount > 2048)
57 return -EINVAL;
58 if (kp->clipcount) {
59 struct v4l2_clip32 __user *uclips;
60 struct v4l2_clip __user *kclips;
61 int n = kp->clipcount;
62 compat_caddr_t p;
63
64 if (get_user(p, &up->clips))
65 return -EFAULT;
66 uclips = compat_ptr(p);
67 kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip));
68 kp->clips = kclips;
69 while (--n >= 0) {
70 if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
71 return -EFAULT;
72 if (put_user(n ? kclips + 1 : NULL, &kclips->next))
73 return -EFAULT;
74 uclips += 1;
75 kclips += 1;
76 }
77 } else
78 kp->clips = NULL;
79 return 0;
80}
81
82static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
83{ 62{
84 if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) || 63 struct v4l2_clip32 __user *uclips;
85 put_user(kp->field, &up->field) || 64 struct v4l2_clip __user *kclips;
86 put_user(kp->chromakey, &up->chromakey) || 65 compat_caddr_t p;
87 put_user(kp->clipcount, &up->clipcount)) 66 u32 clipcount;
88 return -EFAULT; 67
89 return 0; 68 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
90} 69 copy_in_user(&kp->w, &up->w, sizeof(up->w)) ||
91 70 assign_in_user(&kp->field, &up->field) ||
92static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up) 71 assign_in_user(&kp->chromakey, &up->chromakey) ||
93{ 72 assign_in_user(&kp->global_alpha, &up->global_alpha) ||
94 if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format))) 73 get_user(clipcount, &up->clipcount) ||
95 return -EFAULT; 74 put_user(clipcount, &kp->clipcount))
96 return 0;
97}
98
99static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
100 struct v4l2_pix_format_mplane __user *up)
101{
102 if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
103 return -EFAULT;
104 return 0;
105}
106
107static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
108{
109 if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
110 return -EFAULT; 75 return -EFAULT;
111 return 0; 76 if (clipcount > 2048)
112} 77 return -EINVAL;
78 if (!clipcount)
79 return put_user(NULL, &kp->clips);
113 80
114static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, 81 if (get_user(p, &up->clips))
115 struct v4l2_pix_format_mplane __user *up)
116{
117 if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
118 return -EFAULT; 82 return -EFAULT;
119 return 0; 83 uclips = compat_ptr(p);
120} 84 if (aux_space < clipcount * sizeof(*kclips))
121
122static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
123{
124 if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
125 return -EFAULT; 85 return -EFAULT;
126 return 0; 86 kclips = aux_buf;
127} 87 if (put_user(kclips, &kp->clips))
128
129static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
130{
131 if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format)))
132 return -EFAULT; 88 return -EFAULT;
133 return 0;
134}
135 89
136static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up) 90 while (clipcount--) {
137{ 91 if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
138 if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format))) 92 return -EFAULT;
139 return -EFAULT; 93 if (put_user(clipcount ? kclips + 1 : NULL, &kclips->next))
94 return -EFAULT;
95 uclips++;
96 kclips++;
97 }
140 return 0; 98 return 0;
141} 99}
142 100
143static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up) 101static int put_v4l2_window32(struct v4l2_window __user *kp,
102 struct v4l2_window32 __user *up)
144{ 103{
145 if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format))) 104 struct v4l2_clip __user *kclips;
105 struct v4l2_clip32 __user *uclips;
106 compat_caddr_t p;
107 u32 clipcount;
108
109 if (copy_in_user(&up->w, &kp->w, sizeof(kp->w)) ||
110 assign_in_user(&up->field, &kp->field) ||
111 assign_in_user(&up->chromakey, &kp->chromakey) ||
112 assign_in_user(&up->global_alpha, &kp->global_alpha) ||
113 get_user(clipcount, &kp->clipcount) ||
114 put_user(clipcount, &up->clipcount))
146 return -EFAULT; 115 return -EFAULT;
147 return 0; 116 if (!clipcount)
148} 117 return 0;
149 118
150static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up) 119 if (get_user(kclips, &kp->clips))
151{
152 if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format)))
153 return -EFAULT; 120 return -EFAULT;
154 return 0; 121 if (get_user(p, &up->clips))
155}
156
157static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
158{
159 if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format)))
160 return -EFAULT; 122 return -EFAULT;
123 uclips = compat_ptr(p);
124 while (clipcount--) {
125 if (copy_in_user(&uclips->c, &kclips->c, sizeof(uclips->c)))
126 return -EFAULT;
127 uclips++;
128 kclips++;
129 }
161 return 0; 130 return 0;
162} 131}
163 132
@@ -191,97 +160,158 @@ struct v4l2_create_buffers32 {
191 __u32 reserved[8]; 160 __u32 reserved[8];
192}; 161};
193 162
194static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 163static int __bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size)
164{
165 u32 type;
166
167 if (get_user(type, &up->type))
168 return -EFAULT;
169
170 switch (type) {
171 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
172 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: {
173 u32 clipcount;
174
175 if (get_user(clipcount, &up->fmt.win.clipcount))
176 return -EFAULT;
177 if (clipcount > 2048)
178 return -EINVAL;
179 *size = clipcount * sizeof(struct v4l2_clip);
180 return 0;
181 }
182 default:
183 *size = 0;
184 return 0;
185 }
186}
187
188static int bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size)
195{ 189{
196 if (get_user(kp->type, &up->type)) 190 if (!access_ok(VERIFY_READ, up, sizeof(*up)))
197 return -EFAULT; 191 return -EFAULT;
192 return __bufsize_v4l2_format(up, size);
193}
198 194
199 switch (kp->type) { 195static int __get_v4l2_format32(struct v4l2_format __user *kp,
196 struct v4l2_format32 __user *up,
197 void __user *aux_buf, u32 aux_space)
198{
199 u32 type;
200
201 if (get_user(type, &up->type) || put_user(type, &kp->type))
202 return -EFAULT;
203
204 switch (type) {
200 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 205 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
201 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 206 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
202 return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); 207 return copy_in_user(&kp->fmt.pix, &up->fmt.pix,
208 sizeof(kp->fmt.pix)) ? -EFAULT : 0;
203 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: 209 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
204 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: 210 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
205 return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp, 211 return copy_in_user(&kp->fmt.pix_mp, &up->fmt.pix_mp,
206 &up->fmt.pix_mp); 212 sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0;
207 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 213 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
208 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: 214 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
209 return get_v4l2_window32(&kp->fmt.win, &up->fmt.win); 215 return get_v4l2_window32(&kp->fmt.win, &up->fmt.win,
216 aux_buf, aux_space);
210 case V4L2_BUF_TYPE_VBI_CAPTURE: 217 case V4L2_BUF_TYPE_VBI_CAPTURE:
211 case V4L2_BUF_TYPE_VBI_OUTPUT: 218 case V4L2_BUF_TYPE_VBI_OUTPUT:
212 return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); 219 return copy_in_user(&kp->fmt.vbi, &up->fmt.vbi,
220 sizeof(kp->fmt.vbi)) ? -EFAULT : 0;
213 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: 221 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
214 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: 222 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
215 return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced); 223 return copy_in_user(&kp->fmt.sliced, &up->fmt.sliced,
224 sizeof(kp->fmt.sliced)) ? -EFAULT : 0;
216 case V4L2_BUF_TYPE_SDR_CAPTURE: 225 case V4L2_BUF_TYPE_SDR_CAPTURE:
217 case V4L2_BUF_TYPE_SDR_OUTPUT: 226 case V4L2_BUF_TYPE_SDR_OUTPUT:
218 return get_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr); 227 return copy_in_user(&kp->fmt.sdr, &up->fmt.sdr,
228 sizeof(kp->fmt.sdr)) ? -EFAULT : 0;
219 default: 229 default:
220 pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
221 kp->type);
222 return -EINVAL; 230 return -EINVAL;
223 } 231 }
224} 232}
225 233
226static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 234static int get_v4l2_format32(struct v4l2_format __user *kp,
235 struct v4l2_format32 __user *up,
236 void __user *aux_buf, u32 aux_space)
227{ 237{
228 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32))) 238 if (!access_ok(VERIFY_READ, up, sizeof(*up)))
229 return -EFAULT; 239 return -EFAULT;
230 return __get_v4l2_format32(kp, up); 240 return __get_v4l2_format32(kp, up, aux_buf, aux_space);
231} 241}
232 242
233static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) 243static int bufsize_v4l2_create(struct v4l2_create_buffers32 __user *up,
244 u32 *size)
234{ 245{
235 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || 246 if (!access_ok(VERIFY_READ, up, sizeof(*up)))
236 copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
237 return -EFAULT; 247 return -EFAULT;
238 return __get_v4l2_format32(&kp->format, &up->format); 248 return __bufsize_v4l2_format(&up->format, size);
239} 249}
240 250
241static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 251static int get_v4l2_create32(struct v4l2_create_buffers __user *kp,
252 struct v4l2_create_buffers32 __user *up,
253 void __user *aux_buf, u32 aux_space)
242{ 254{
243 if (put_user(kp->type, &up->type)) 255 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
256 copy_in_user(kp, up,
257 offsetof(struct v4l2_create_buffers32, format)))
244 return -EFAULT; 258 return -EFAULT;
259 return __get_v4l2_format32(&kp->format, &up->format,
260 aux_buf, aux_space);
261}
262
263static int __put_v4l2_format32(struct v4l2_format __user *kp,
264 struct v4l2_format32 __user *up)
265{
266 u32 type;
245 267
246 switch (kp->type) { 268 if (get_user(type, &kp->type))
269 return -EFAULT;
270
271 switch (type) {
247 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 272 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
248 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 273 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
249 return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); 274 return copy_in_user(&up->fmt.pix, &kp->fmt.pix,
275 sizeof(kp->fmt.pix)) ? -EFAULT : 0;
250 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: 276 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
251 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: 277 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
252 return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp, 278 return copy_in_user(&up->fmt.pix_mp, &kp->fmt.pix_mp,
253 &up->fmt.pix_mp); 279 sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0;
254 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 280 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
255 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: 281 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
256 return put_v4l2_window32(&kp->fmt.win, &up->fmt.win); 282 return put_v4l2_window32(&kp->fmt.win, &up->fmt.win);
257 case V4L2_BUF_TYPE_VBI_CAPTURE: 283 case V4L2_BUF_TYPE_VBI_CAPTURE:
258 case V4L2_BUF_TYPE_VBI_OUTPUT: 284 case V4L2_BUF_TYPE_VBI_OUTPUT:
259 return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); 285 return copy_in_user(&up->fmt.vbi, &kp->fmt.vbi,
286 sizeof(kp->fmt.vbi)) ? -EFAULT : 0;
260 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: 287 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
261 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: 288 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
262 return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced); 289 return copy_in_user(&up->fmt.sliced, &kp->fmt.sliced,
290 sizeof(kp->fmt.sliced)) ? -EFAULT : 0;
263 case V4L2_BUF_TYPE_SDR_CAPTURE: 291 case V4L2_BUF_TYPE_SDR_CAPTURE:
264 case V4L2_BUF_TYPE_SDR_OUTPUT: 292 case V4L2_BUF_TYPE_SDR_OUTPUT:
265 return put_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr); 293 return copy_in_user(&up->fmt.sdr, &kp->fmt.sdr,
294 sizeof(kp->fmt.sdr)) ? -EFAULT : 0;
266 default: 295 default:
267 pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
268 kp->type);
269 return -EINVAL; 296 return -EINVAL;
270 } 297 }
271} 298}
272 299
273static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 300static int put_v4l2_format32(struct v4l2_format __user *kp,
301 struct v4l2_format32 __user *up)
274{ 302{
275 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32))) 303 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)))
276 return -EFAULT; 304 return -EFAULT;
277 return __put_v4l2_format32(kp, up); 305 return __put_v4l2_format32(kp, up);
278} 306}
279 307
280static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) 308static int put_v4l2_create32(struct v4l2_create_buffers __user *kp,
309 struct v4l2_create_buffers32 __user *up)
281{ 310{
282 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) || 311 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
283 copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) || 312 copy_in_user(up, kp,
284 copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) 313 offsetof(struct v4l2_create_buffers32, format)) ||
314 copy_in_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
285 return -EFAULT; 315 return -EFAULT;
286 return __put_v4l2_format32(&kp->format, &up->format); 316 return __put_v4l2_format32(&kp->format, &up->format);
287} 317}
@@ -295,25 +325,28 @@ struct v4l2_standard32 {
295 __u32 reserved[4]; 325 __u32 reserved[4];
296}; 326};
297 327
298static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) 328static int get_v4l2_standard32(struct v4l2_standard __user *kp,
329 struct v4l2_standard32 __user *up)
299{ 330{
300 /* other fields are not set by the user, nor used by the driver */ 331 /* other fields are not set by the user, nor used by the driver */
301 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) || 332 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
302 get_user(kp->index, &up->index)) 333 assign_in_user(&kp->index, &up->index))
303 return -EFAULT; 334 return -EFAULT;
304 return 0; 335 return 0;
305} 336}
306 337
307static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) 338static int put_v4l2_standard32(struct v4l2_standard __user *kp,
339 struct v4l2_standard32 __user *up)
308{ 340{
309 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) || 341 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
310 put_user(kp->index, &up->index) || 342 assign_in_user(&up->index, &kp->index) ||
311 put_user(kp->id, &up->id) || 343 assign_in_user(&up->id, &kp->id) ||
312 copy_to_user(up->name, kp->name, 24) || 344 copy_in_user(up->name, kp->name, sizeof(up->name)) ||
313 copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) || 345 copy_in_user(&up->frameperiod, &kp->frameperiod,
314 put_user(kp->framelines, &up->framelines) || 346 sizeof(up->frameperiod)) ||
315 copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32))) 347 assign_in_user(&up->framelines, &kp->framelines) ||
316 return -EFAULT; 348 copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
349 return -EFAULT;
317 return 0; 350 return 0;
318} 351}
319 352
@@ -352,134 +385,186 @@ struct v4l2_buffer32 {
352 __u32 reserved; 385 __u32 reserved;
353}; 386};
354 387
355static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, 388static int get_v4l2_plane32(struct v4l2_plane __user *up,
356 enum v4l2_memory memory) 389 struct v4l2_plane32 __user *up32,
390 enum v4l2_memory memory)
357{ 391{
358 void __user *up_pln; 392 compat_ulong_t p;
359 compat_long_t p;
360 393
361 if (copy_in_user(up, up32, 2 * sizeof(__u32)) || 394 if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
362 copy_in_user(&up->data_offset, &up32->data_offset, 395 copy_in_user(&up->data_offset, &up32->data_offset,
363 sizeof(__u32))) 396 sizeof(up->data_offset)))
364 return -EFAULT; 397 return -EFAULT;
365 398
366 if (memory == V4L2_MEMORY_USERPTR) { 399 switch (memory) {
367 if (get_user(p, &up32->m.userptr)) 400 case V4L2_MEMORY_MMAP:
368 return -EFAULT; 401 case V4L2_MEMORY_OVERLAY:
369 up_pln = compat_ptr(p); 402 if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
370 if (put_user((unsigned long)up_pln, &up->m.userptr)) 403 sizeof(up32->m.mem_offset)))
371 return -EFAULT; 404 return -EFAULT;
372 } else if (memory == V4L2_MEMORY_DMABUF) { 405 break;
373 if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int))) 406 case V4L2_MEMORY_USERPTR:
407 if (get_user(p, &up32->m.userptr) ||
408 put_user((unsigned long)compat_ptr(p), &up->m.userptr))
374 return -EFAULT; 409 return -EFAULT;
375 } else { 410 break;
376 if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, 411 case V4L2_MEMORY_DMABUF:
377 sizeof(__u32))) 412 if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(up32->m.fd)))
378 return -EFAULT; 413 return -EFAULT;
414 break;
379 } 415 }
380 416
381 return 0; 417 return 0;
382} 418}
383 419
384static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, 420static int put_v4l2_plane32(struct v4l2_plane __user *up,
385 enum v4l2_memory memory) 421 struct v4l2_plane32 __user *up32,
422 enum v4l2_memory memory)
386{ 423{
424 unsigned long p;
425
387 if (copy_in_user(up32, up, 2 * sizeof(__u32)) || 426 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
388 copy_in_user(&up32->data_offset, &up->data_offset, 427 copy_in_user(&up32->data_offset, &up->data_offset,
389 sizeof(__u32))) 428 sizeof(up->data_offset)))
390 return -EFAULT; 429 return -EFAULT;
391 430
392 /* For MMAP, driver might've set up the offset, so copy it back. 431 switch (memory) {
393 * USERPTR stays the same (was userspace-provided), so no copying. */ 432 case V4L2_MEMORY_MMAP:
394 if (memory == V4L2_MEMORY_MMAP) 433 case V4L2_MEMORY_OVERLAY:
395 if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset, 434 if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
396 sizeof(__u32))) 435 sizeof(up->m.mem_offset)))
397 return -EFAULT; 436 return -EFAULT;
398 /* For DMABUF, driver might've set up the fd, so copy it back. */ 437 break;
399 if (memory == V4L2_MEMORY_DMABUF) 438 case V4L2_MEMORY_USERPTR:
400 if (copy_in_user(&up32->m.fd, &up->m.fd, 439 if (get_user(p, &up->m.userptr) ||
401 sizeof(int))) 440 put_user((compat_ulong_t)ptr_to_compat((__force void *)p),
441 &up32->m.userptr))
442 return -EFAULT;
443 break;
444 case V4L2_MEMORY_DMABUF:
445 if (copy_in_user(&up32->m.fd, &up->m.fd, sizeof(up->m.fd)))
402 return -EFAULT; 446 return -EFAULT;
447 break;
448 }
449
450 return 0;
451}
452
453static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *up, u32 *size)
454{
455 u32 type;
456 u32 length;
457
458 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
459 get_user(type, &up->type) ||
460 get_user(length, &up->length))
461 return -EFAULT;
462
463 if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
464 if (length > VIDEO_MAX_PLANES)
465 return -EINVAL;
403 466
467 /*
468 * We don't really care if userspace decides to kill itself
469 * by passing a very big length value
470 */
471 *size = length * sizeof(struct v4l2_plane);
472 } else {
473 *size = 0;
474 }
404 return 0; 475 return 0;
405} 476}
406 477
407static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up) 478static int get_v4l2_buffer32(struct v4l2_buffer __user *kp,
479 struct v4l2_buffer32 __user *up,
480 void __user *aux_buf, u32 aux_space)
408{ 481{
482 u32 type;
483 u32 length;
484 enum v4l2_memory memory;
409 struct v4l2_plane32 __user *uplane32; 485 struct v4l2_plane32 __user *uplane32;
410 struct v4l2_plane __user *uplane; 486 struct v4l2_plane __user *uplane;
411 compat_caddr_t p; 487 compat_caddr_t p;
412 int num_planes;
413 int ret; 488 int ret;
414 489
415 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) || 490 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
416 get_user(kp->index, &up->index) || 491 assign_in_user(&kp->index, &up->index) ||
417 get_user(kp->type, &up->type) || 492 get_user(type, &up->type) ||
418 get_user(kp->flags, &up->flags) || 493 put_user(type, &kp->type) ||
419 get_user(kp->memory, &up->memory) || 494 assign_in_user(&kp->flags, &up->flags) ||
420 get_user(kp->length, &up->length)) 495 get_user(memory, &up->memory) ||
421 return -EFAULT; 496 put_user(memory, &kp->memory) ||
497 get_user(length, &up->length) ||
498 put_user(length, &kp->length))
499 return -EFAULT;
422 500
423 if (V4L2_TYPE_IS_OUTPUT(kp->type)) 501 if (V4L2_TYPE_IS_OUTPUT(type))
424 if (get_user(kp->bytesused, &up->bytesused) || 502 if (assign_in_user(&kp->bytesused, &up->bytesused) ||
425 get_user(kp->field, &up->field) || 503 assign_in_user(&kp->field, &up->field) ||
426 get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || 504 assign_in_user(&kp->timestamp.tv_sec,
427 get_user(kp->timestamp.tv_usec, 505 &up->timestamp.tv_sec) ||
428 &up->timestamp.tv_usec)) 506 assign_in_user(&kp->timestamp.tv_usec,
507 &up->timestamp.tv_usec))
429 return -EFAULT; 508 return -EFAULT;
430 509
431 if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { 510 if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
432 num_planes = kp->length; 511 u32 num_planes = length;
512
433 if (num_planes == 0) { 513 if (num_planes == 0) {
434 kp->m.planes = NULL; 514 /*
435 /* num_planes == 0 is legal, e.g. when userspace doesn't 515 * num_planes == 0 is legal, e.g. when userspace doesn't
436 * need planes array on DQBUF*/ 516 * need planes array on DQBUF
437 return 0; 517 */
518 return put_user(NULL, &kp->m.planes);
438 } 519 }
520 if (num_planes > VIDEO_MAX_PLANES)
521 return -EINVAL;
439 522
440 if (get_user(p, &up->m.planes)) 523 if (get_user(p, &up->m.planes))
441 return -EFAULT; 524 return -EFAULT;
442 525
443 uplane32 = compat_ptr(p); 526 uplane32 = compat_ptr(p);
444 if (!access_ok(VERIFY_READ, uplane32, 527 if (!access_ok(VERIFY_READ, uplane32,
445 num_planes * sizeof(struct v4l2_plane32))) 528 num_planes * sizeof(*uplane32)))
446 return -EFAULT; 529 return -EFAULT;
447 530
448 /* We don't really care if userspace decides to kill itself 531 /*
449 * by passing a very big num_planes value */ 532 * We don't really care if userspace decides to kill itself
450 uplane = compat_alloc_user_space(num_planes * 533 * by passing a very big num_planes value
451 sizeof(struct v4l2_plane)); 534 */
452 kp->m.planes = (__force struct v4l2_plane *)uplane; 535 if (aux_space < num_planes * sizeof(*uplane))
536 return -EFAULT;
537
538 uplane = aux_buf;
539 if (put_user((__force struct v4l2_plane *)uplane,
540 &kp->m.planes))
541 return -EFAULT;
453 542
454 while (--num_planes >= 0) { 543 while (num_planes--) {
455 ret = get_v4l2_plane32(uplane, uplane32, kp->memory); 544 ret = get_v4l2_plane32(uplane, uplane32, memory);
456 if (ret) 545 if (ret)
457 return ret; 546 return ret;
458 ++uplane; 547 uplane++;
459 ++uplane32; 548 uplane32++;
460 } 549 }
461 } else { 550 } else {
462 switch (kp->memory) { 551 switch (memory) {
463 case V4L2_MEMORY_MMAP: 552 case V4L2_MEMORY_MMAP:
464 if (get_user(kp->m.offset, &up->m.offset)) 553 case V4L2_MEMORY_OVERLAY:
554 if (assign_in_user(&kp->m.offset, &up->m.offset))
465 return -EFAULT; 555 return -EFAULT;
466 break; 556 break;
467 case V4L2_MEMORY_USERPTR: 557 case V4L2_MEMORY_USERPTR: {
468 { 558 compat_ulong_t userptr;
469 compat_long_t tmp;
470 559
471 if (get_user(tmp, &up->m.userptr)) 560 if (get_user(userptr, &up->m.userptr) ||
472 return -EFAULT; 561 put_user((unsigned long)compat_ptr(userptr),
473 562 &kp->m.userptr))
474 kp->m.userptr = (unsigned long)compat_ptr(tmp);
475 }
476 break;
477 case V4L2_MEMORY_OVERLAY:
478 if (get_user(kp->m.offset, &up->m.offset))
479 return -EFAULT; 563 return -EFAULT;
480 break; 564 break;
565 }
481 case V4L2_MEMORY_DMABUF: 566 case V4L2_MEMORY_DMABUF:
482 if (get_user(kp->m.fd, &up->m.fd)) 567 if (assign_in_user(&kp->m.fd, &up->m.fd))
483 return -EFAULT; 568 return -EFAULT;
484 break; 569 break;
485 } 570 }
@@ -488,65 +573,70 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
488 return 0; 573 return 0;
489} 574}
490 575
491static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up) 576static int put_v4l2_buffer32(struct v4l2_buffer __user *kp,
577 struct v4l2_buffer32 __user *up)
492{ 578{
579 u32 type;
580 u32 length;
581 enum v4l2_memory memory;
493 struct v4l2_plane32 __user *uplane32; 582 struct v4l2_plane32 __user *uplane32;
494 struct v4l2_plane __user *uplane; 583 struct v4l2_plane __user *uplane;
495 compat_caddr_t p; 584 compat_caddr_t p;
496 int num_planes;
497 int ret; 585 int ret;
498 586
499 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) || 587 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
500 put_user(kp->index, &up->index) || 588 assign_in_user(&up->index, &kp->index) ||
501 put_user(kp->type, &up->type) || 589 get_user(type, &kp->type) ||
502 put_user(kp->flags, &up->flags) || 590 put_user(type, &up->type) ||
503 put_user(kp->memory, &up->memory)) 591 assign_in_user(&up->flags, &kp->flags) ||
504 return -EFAULT; 592 get_user(memory, &kp->memory) ||
593 put_user(memory, &up->memory))
594 return -EFAULT;
505 595
506 if (put_user(kp->bytesused, &up->bytesused) || 596 if (assign_in_user(&up->bytesused, &kp->bytesused) ||
507 put_user(kp->field, &up->field) || 597 assign_in_user(&up->field, &kp->field) ||
508 put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || 598 assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) ||
509 put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) || 599 assign_in_user(&up->timestamp.tv_usec, &kp->timestamp.tv_usec) ||
510 copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) || 600 copy_in_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) ||
511 put_user(kp->sequence, &up->sequence) || 601 assign_in_user(&up->sequence, &kp->sequence) ||
512 put_user(kp->reserved2, &up->reserved2) || 602 assign_in_user(&up->reserved2, &kp->reserved2) ||
513 put_user(kp->reserved, &up->reserved) || 603 assign_in_user(&up->reserved, &kp->reserved) ||
514 put_user(kp->length, &up->length)) 604 get_user(length, &kp->length) ||
515 return -EFAULT; 605 put_user(length, &up->length))
606 return -EFAULT;
607
608 if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
609 u32 num_planes = length;
516 610
517 if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
518 num_planes = kp->length;
519 if (num_planes == 0) 611 if (num_planes == 0)
520 return 0; 612 return 0;
521 613
522 uplane = (__force struct v4l2_plane __user *)kp->m.planes; 614 if (get_user(uplane, ((__force struct v4l2_plane __user **)&kp->m.planes)))
615 return -EFAULT;
523 if (get_user(p, &up->m.planes)) 616 if (get_user(p, &up->m.planes))
524 return -EFAULT; 617 return -EFAULT;
525 uplane32 = compat_ptr(p); 618 uplane32 = compat_ptr(p);
526 619
527 while (--num_planes >= 0) { 620 while (num_planes--) {
528 ret = put_v4l2_plane32(uplane, uplane32, kp->memory); 621 ret = put_v4l2_plane32(uplane, uplane32, memory);
529 if (ret) 622 if (ret)
530 return ret; 623 return ret;
531 ++uplane; 624 ++uplane;
532 ++uplane32; 625 ++uplane32;
533 } 626 }
534 } else { 627 } else {
535 switch (kp->memory) { 628 switch (memory) {
536 case V4L2_MEMORY_MMAP: 629 case V4L2_MEMORY_MMAP:
537 if (put_user(kp->m.offset, &up->m.offset)) 630 case V4L2_MEMORY_OVERLAY:
631 if (assign_in_user(&up->m.offset, &kp->m.offset))
538 return -EFAULT; 632 return -EFAULT;
539 break; 633 break;
540 case V4L2_MEMORY_USERPTR: 634 case V4L2_MEMORY_USERPTR:
541 if (put_user(kp->m.userptr, &up->m.userptr)) 635 if (assign_in_user(&up->m.userptr, &kp->m.userptr))
542 return -EFAULT;
543 break;
544 case V4L2_MEMORY_OVERLAY:
545 if (put_user(kp->m.offset, &up->m.offset))
546 return -EFAULT; 636 return -EFAULT;
547 break; 637 break;
548 case V4L2_MEMORY_DMABUF: 638 case V4L2_MEMORY_DMABUF:
549 if (put_user(kp->m.fd, &up->m.fd)) 639 if (assign_in_user(&up->m.fd, &kp->m.fd))
550 return -EFAULT; 640 return -EFAULT;
551 break; 641 break;
552 } 642 }
@@ -558,7 +648,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
558struct v4l2_framebuffer32 { 648struct v4l2_framebuffer32 {
559 __u32 capability; 649 __u32 capability;
560 __u32 flags; 650 __u32 flags;
561 compat_caddr_t base; 651 compat_caddr_t base;
562 struct { 652 struct {
563 __u32 width; 653 __u32 width;
564 __u32 height; 654 __u32 height;
@@ -571,30 +661,33 @@ struct v4l2_framebuffer32 {
571 } fmt; 661 } fmt;
572}; 662};
573 663
574static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up) 664static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
665 struct v4l2_framebuffer32 __user *up)
575{ 666{
576 u32 tmp; 667 compat_caddr_t tmp;
577 668
578 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) || 669 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
579 get_user(tmp, &up->base) || 670 get_user(tmp, &up->base) ||
580 get_user(kp->capability, &up->capability) || 671 put_user((__force void *)compat_ptr(tmp), &kp->base) ||
581 get_user(kp->flags, &up->flags) || 672 assign_in_user(&kp->capability, &up->capability) ||
582 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt))) 673 assign_in_user(&kp->flags, &up->flags) ||
583 return -EFAULT; 674 copy_in_user(&kp->fmt, &up->fmt, sizeof(kp->fmt)))
584 kp->base = (__force void *)compat_ptr(tmp); 675 return -EFAULT;
585 return 0; 676 return 0;
586} 677}
587 678
588static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up) 679static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
680 struct v4l2_framebuffer32 __user *up)
589{ 681{
590 u32 tmp = (u32)((unsigned long)kp->base); 682 void *base;
591 683
592 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) || 684 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
593 put_user(tmp, &up->base) || 685 get_user(base, &kp->base) ||
594 put_user(kp->capability, &up->capability) || 686 put_user(ptr_to_compat(base), &up->base) ||
595 put_user(kp->flags, &up->flags) || 687 assign_in_user(&up->capability, &kp->capability) ||
596 copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt))) 688 assign_in_user(&up->flags, &kp->flags) ||
597 return -EFAULT; 689 copy_in_user(&up->fmt, &kp->fmt, sizeof(kp->fmt)))
690 return -EFAULT;
598 return 0; 691 return 0;
599} 692}
600 693
@@ -606,21 +699,26 @@ struct v4l2_input32 {
606 __u32 tuner; /* Associated tuner */ 699 __u32 tuner; /* Associated tuner */
607 compat_u64 std; 700 compat_u64 std;
608 __u32 status; 701 __u32 status;
609 __u32 reserved[4]; 702 __u32 capabilities;
703 __u32 reserved[3];
610}; 704};
611 705
612/* The 64-bit v4l2_input struct has extra padding at the end of the struct. 706/*
613 Otherwise it is identical to the 32-bit version. */ 707 * The 64-bit v4l2_input struct has extra padding at the end of the struct.
614static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) 708 * Otherwise it is identical to the 32-bit version.
709 */
710static inline int get_v4l2_input32(struct v4l2_input __user *kp,
711 struct v4l2_input32 __user *up)
615{ 712{
616 if (copy_from_user(kp, up, sizeof(struct v4l2_input32))) 713 if (copy_in_user(kp, up, sizeof(*up)))
617 return -EFAULT; 714 return -EFAULT;
618 return 0; 715 return 0;
619} 716}
620 717
621static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) 718static inline int put_v4l2_input32(struct v4l2_input __user *kp,
719 struct v4l2_input32 __user *up)
622{ 720{
623 if (copy_to_user(up, kp, sizeof(struct v4l2_input32))) 721 if (copy_in_user(up, kp, sizeof(*up)))
624 return -EFAULT; 722 return -EFAULT;
625 return 0; 723 return 0;
626} 724}
@@ -644,58 +742,95 @@ struct v4l2_ext_control32 {
644 }; 742 };
645} __attribute__ ((packed)); 743} __attribute__ ((packed));
646 744
647/* The following function really belong in v4l2-common, but that causes 745/* Return true if this control is a pointer type. */
648 a circular dependency between modules. We need to think about this, but 746static inline bool ctrl_is_pointer(struct file *file, u32 id)
649 for now this will do. */
650
651/* Return non-zero if this control is a pointer type. Currently only
652 type STRING is a pointer type. */
653static inline int ctrl_is_pointer(u32 id)
654{ 747{
655 switch (id) { 748 struct video_device *vdev = video_devdata(file);
656 case V4L2_CID_RDS_TX_PS_NAME: 749 struct v4l2_fh *fh = NULL;
657 case V4L2_CID_RDS_TX_RADIO_TEXT: 750 struct v4l2_ctrl_handler *hdl = NULL;
658 return 1; 751 struct v4l2_query_ext_ctrl qec = { id };
659 default: 752 const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
660 return 0; 753
754 if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
755 fh = file->private_data;
756
757 if (fh && fh->ctrl_handler)
758 hdl = fh->ctrl_handler;
759 else if (vdev->ctrl_handler)
760 hdl = vdev->ctrl_handler;
761
762 if (hdl) {
763 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id);
764
765 return ctrl && ctrl->is_ptr;
661 } 766 }
767
768 if (!ops || !ops->vidioc_query_ext_ctrl)
769 return false;
770
771 return !ops->vidioc_query_ext_ctrl(file, fh, &qec) &&
772 (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD);
773}
774
775static int bufsize_v4l2_ext_controls(struct v4l2_ext_controls32 __user *up,
776 u32 *size)
777{
778 u32 count;
779
780 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
781 get_user(count, &up->count))
782 return -EFAULT;
783 if (count > V4L2_CID_MAX_CTRLS)
784 return -EINVAL;
785 *size = count * sizeof(struct v4l2_ext_control);
786 return 0;
662} 787}
663 788
664static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) 789static int get_v4l2_ext_controls32(struct file *file,
790 struct v4l2_ext_controls __user *kp,
791 struct v4l2_ext_controls32 __user *up,
792 void __user *aux_buf, u32 aux_space)
665{ 793{
666 struct v4l2_ext_control32 __user *ucontrols; 794 struct v4l2_ext_control32 __user *ucontrols;
667 struct v4l2_ext_control __user *kcontrols; 795 struct v4l2_ext_control __user *kcontrols;
668 int n; 796 u32 count;
797 u32 n;
669 compat_caddr_t p; 798 compat_caddr_t p;
670 799
671 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) || 800 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
672 get_user(kp->ctrl_class, &up->ctrl_class) || 801 assign_in_user(&kp->ctrl_class, &up->ctrl_class) ||
673 get_user(kp->count, &up->count) || 802 get_user(count, &up->count) ||
674 get_user(kp->error_idx, &up->error_idx) || 803 put_user(count, &kp->count) ||
675 copy_from_user(kp->reserved, up->reserved, 804 assign_in_user(&kp->error_idx, &up->error_idx) ||
676 sizeof(kp->reserved))) 805 copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
677 return -EFAULT; 806 return -EFAULT;
678 n = kp->count; 807
679 if (n == 0) { 808 if (count == 0)
680 kp->controls = NULL; 809 return put_user(NULL, &kp->controls);
681 return 0; 810 if (count > V4L2_CID_MAX_CTRLS)
682 } 811 return -EINVAL;
683 if (get_user(p, &up->controls)) 812 if (get_user(p, &up->controls))
684 return -EFAULT; 813 return -EFAULT;
685 ucontrols = compat_ptr(p); 814 ucontrols = compat_ptr(p);
686 if (!access_ok(VERIFY_READ, ucontrols, 815 if (!access_ok(VERIFY_READ, ucontrols, count * sizeof(*ucontrols)))
687 n * sizeof(struct v4l2_ext_control32))) 816 return -EFAULT;
817 if (aux_space < count * sizeof(*kcontrols))
688 return -EFAULT; 818 return -EFAULT;
689 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control)); 819 kcontrols = aux_buf;
690 kp->controls = (__force struct v4l2_ext_control *)kcontrols; 820 if (put_user((__force struct v4l2_ext_control *)kcontrols,
691 while (--n >= 0) { 821 &kp->controls))
822 return -EFAULT;
823
824 for (n = 0; n < count; n++) {
692 u32 id; 825 u32 id;
693 826
694 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols))) 827 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
695 return -EFAULT; 828 return -EFAULT;
829
696 if (get_user(id, &kcontrols->id)) 830 if (get_user(id, &kcontrols->id))
697 return -EFAULT; 831 return -EFAULT;
698 if (ctrl_is_pointer(id)) { 832
833 if (ctrl_is_pointer(file, id)) {
699 void __user *s; 834 void __user *s;
700 835
701 if (get_user(p, &ucontrols->string)) 836 if (get_user(p, &ucontrols->string))
@@ -710,43 +845,55 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
710 return 0; 845 return 0;
711} 846}
712 847
713static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) 848static int put_v4l2_ext_controls32(struct file *file,
849 struct v4l2_ext_controls __user *kp,
850 struct v4l2_ext_controls32 __user *up)
714{ 851{
715 struct v4l2_ext_control32 __user *ucontrols; 852 struct v4l2_ext_control32 __user *ucontrols;
716 struct v4l2_ext_control __user *kcontrols = 853 struct v4l2_ext_control __user *kcontrols;
717 (__force struct v4l2_ext_control __user *)kp->controls; 854 u32 count;
718 int n = kp->count; 855 u32 n;
719 compat_caddr_t p; 856 compat_caddr_t p;
720 857
721 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) || 858 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
722 put_user(kp->ctrl_class, &up->ctrl_class) || 859 assign_in_user(&up->ctrl_class, &kp->ctrl_class) ||
723 put_user(kp->count, &up->count) || 860 get_user(count, &kp->count) ||
724 put_user(kp->error_idx, &up->error_idx) || 861 put_user(count, &up->count) ||
725 copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) 862 assign_in_user(&up->error_idx, &kp->error_idx) ||
726 return -EFAULT; 863 copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)) ||
727 if (!kp->count) 864 get_user(kcontrols, &kp->controls))
728 return 0; 865 return -EFAULT;
729 866
867 if (!count || count > (U32_MAX/sizeof(*ucontrols)))
868 return 0;
730 if (get_user(p, &up->controls)) 869 if (get_user(p, &up->controls))
731 return -EFAULT; 870 return -EFAULT;
732 ucontrols = compat_ptr(p); 871 ucontrols = compat_ptr(p);
733 if (!access_ok(VERIFY_WRITE, ucontrols, 872 if (!access_ok(VERIFY_WRITE, ucontrols, count * sizeof(*ucontrols)))
734 n * sizeof(struct v4l2_ext_control32)))
735 return -EFAULT; 873 return -EFAULT;
736 874
737 while (--n >= 0) { 875 for (n = 0; n < count; n++) {
738 unsigned size = sizeof(*ucontrols); 876 unsigned int size = sizeof(*ucontrols);
739 u32 id; 877 u32 id;
740 878
741 if (get_user(id, &kcontrols->id)) 879 if (get_user(id, &kcontrols->id) ||
880 put_user(id, &ucontrols->id) ||
881 assign_in_user(&ucontrols->size, &kcontrols->size) ||
882 copy_in_user(&ucontrols->reserved2, &kcontrols->reserved2,
883 sizeof(ucontrols->reserved2)))
742 return -EFAULT; 884 return -EFAULT;
743 /* Do not modify the pointer when copying a pointer control. 885
744 The contents of the pointer was changed, not the pointer 886 /*
745 itself. */ 887 * Do not modify the pointer when copying a pointer control.
746 if (ctrl_is_pointer(id)) 888 * The contents of the pointer was changed, not the pointer
889 * itself.
890 */
891 if (ctrl_is_pointer(file, id))
747 size -= sizeof(ucontrols->value64); 892 size -= sizeof(ucontrols->value64);
893
748 if (copy_in_user(ucontrols, kcontrols, size)) 894 if (copy_in_user(ucontrols, kcontrols, size))
749 return -EFAULT; 895 return -EFAULT;
896
750 ucontrols++; 897 ucontrols++;
751 kcontrols++; 898 kcontrols++;
752 } 899 }
@@ -766,18 +913,19 @@ struct v4l2_event32 {
766 __u32 reserved[8]; 913 __u32 reserved[8];
767}; 914};
768 915
769static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up) 916static int put_v4l2_event32(struct v4l2_event __user *kp,
917 struct v4l2_event32 __user *up)
770{ 918{
771 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) || 919 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
772 put_user(kp->type, &up->type) || 920 assign_in_user(&up->type, &kp->type) ||
773 copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || 921 copy_in_user(&up->u, &kp->u, sizeof(kp->u)) ||
774 put_user(kp->pending, &up->pending) || 922 assign_in_user(&up->pending, &kp->pending) ||
775 put_user(kp->sequence, &up->sequence) || 923 assign_in_user(&up->sequence, &kp->sequence) ||
776 put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || 924 assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) ||
777 put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) || 925 assign_in_user(&up->timestamp.tv_nsec, &kp->timestamp.tv_nsec) ||
778 put_user(kp->id, &up->id) || 926 assign_in_user(&up->id, &kp->id) ||
779 copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32))) 927 copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
780 return -EFAULT; 928 return -EFAULT;
781 return 0; 929 return 0;
782} 930}
783 931
@@ -789,32 +937,35 @@ struct v4l2_edid32 {
789 compat_caddr_t edid; 937 compat_caddr_t edid;
790}; 938};
791 939
792static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) 940static int get_v4l2_edid32(struct v4l2_edid __user *kp,
941 struct v4l2_edid32 __user *up)
793{ 942{
794 u32 tmp; 943 compat_uptr_t tmp;
795 944
796 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) || 945 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
797 get_user(kp->pad, &up->pad) || 946 assign_in_user(&kp->pad, &up->pad) ||
798 get_user(kp->start_block, &up->start_block) || 947 assign_in_user(&kp->start_block, &up->start_block) ||
799 get_user(kp->blocks, &up->blocks) || 948 assign_in_user(&kp->blocks, &up->blocks) ||
800 get_user(tmp, &up->edid) || 949 get_user(tmp, &up->edid) ||
801 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved))) 950 put_user(compat_ptr(tmp), &kp->edid) ||
802 return -EFAULT; 951 copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
803 kp->edid = (__force u8 *)compat_ptr(tmp); 952 return -EFAULT;
804 return 0; 953 return 0;
805} 954}
806 955
807static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) 956static int put_v4l2_edid32(struct v4l2_edid __user *kp,
957 struct v4l2_edid32 __user *up)
808{ 958{
809 u32 tmp = (u32)((unsigned long)kp->edid); 959 void *edid;
810 960
811 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) || 961 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
812 put_user(kp->pad, &up->pad) || 962 assign_in_user(&up->pad, &kp->pad) ||
813 put_user(kp->start_block, &up->start_block) || 963 assign_in_user(&up->start_block, &kp->start_block) ||
814 put_user(kp->blocks, &up->blocks) || 964 assign_in_user(&up->blocks, &kp->blocks) ||
815 put_user(tmp, &up->edid) || 965 get_user(edid, &kp->edid) ||
816 copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) 966 put_user(ptr_to_compat(edid), &up->edid) ||
817 return -EFAULT; 967 copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
968 return -EFAULT;
818 return 0; 969 return 0;
819} 970}
820 971
@@ -830,7 +981,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
830#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32) 981#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
831#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32) 982#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32)
832#define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32) 983#define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32)
833#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32) 984#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
834#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32) 985#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
835#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32) 986#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
836#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32) 987#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
@@ -846,22 +997,23 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
846#define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32) 997#define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32)
847#define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32) 998#define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32)
848 999
1000static int alloc_userspace(unsigned int size, u32 aux_space,
1001 void __user **up_native)
1002{
1003 *up_native = compat_alloc_user_space(size + aux_space);
1004 if (!*up_native)
1005 return -ENOMEM;
1006 if (clear_user(*up_native, size))
1007 return -EFAULT;
1008 return 0;
1009}
1010
849static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1011static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
850{ 1012{
851 union {
852 struct v4l2_format v2f;
853 struct v4l2_buffer v2b;
854 struct v4l2_framebuffer v2fb;
855 struct v4l2_input v2i;
856 struct v4l2_standard v2s;
857 struct v4l2_ext_controls v2ecs;
858 struct v4l2_event v2ev;
859 struct v4l2_create_buffers v2crt;
860 struct v4l2_edid v2edid;
861 unsigned long vx;
862 int vi;
863 } karg;
864 void __user *up = compat_ptr(arg); 1013 void __user *up = compat_ptr(arg);
1014 void __user *up_native = NULL;
1015 void __user *aux_buf;
1016 u32 aux_space;
865 int compatible_arg = 1; 1017 int compatible_arg = 1;
866 long err = 0; 1018 long err = 0;
867 1019
@@ -900,30 +1052,52 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
900 case VIDIOC_STREAMOFF: 1052 case VIDIOC_STREAMOFF:
901 case VIDIOC_S_INPUT: 1053 case VIDIOC_S_INPUT:
902 case VIDIOC_S_OUTPUT: 1054 case VIDIOC_S_OUTPUT:
903 err = get_user(karg.vi, (s32 __user *)up); 1055 err = alloc_userspace(sizeof(unsigned int), 0, &up_native);
1056 if (!err && assign_in_user((unsigned int __user *)up_native,
1057 (compat_uint_t __user *)up))
1058 err = -EFAULT;
904 compatible_arg = 0; 1059 compatible_arg = 0;
905 break; 1060 break;
906 1061
907 case VIDIOC_G_INPUT: 1062 case VIDIOC_G_INPUT:
908 case VIDIOC_G_OUTPUT: 1063 case VIDIOC_G_OUTPUT:
1064 err = alloc_userspace(sizeof(unsigned int), 0, &up_native);
909 compatible_arg = 0; 1065 compatible_arg = 0;
910 break; 1066 break;
911 1067
912 case VIDIOC_G_EDID: 1068 case VIDIOC_G_EDID:
913 case VIDIOC_S_EDID: 1069 case VIDIOC_S_EDID:
914 err = get_v4l2_edid32(&karg.v2edid, up); 1070 err = alloc_userspace(sizeof(struct v4l2_edid), 0, &up_native);
1071 if (!err)
1072 err = get_v4l2_edid32(up_native, up);
915 compatible_arg = 0; 1073 compatible_arg = 0;
916 break; 1074 break;
917 1075
918 case VIDIOC_G_FMT: 1076 case VIDIOC_G_FMT:
919 case VIDIOC_S_FMT: 1077 case VIDIOC_S_FMT:
920 case VIDIOC_TRY_FMT: 1078 case VIDIOC_TRY_FMT:
921 err = get_v4l2_format32(&karg.v2f, up); 1079 err = bufsize_v4l2_format(up, &aux_space);
1080 if (!err)
1081 err = alloc_userspace(sizeof(struct v4l2_format),
1082 aux_space, &up_native);
1083 if (!err) {
1084 aux_buf = up_native + sizeof(struct v4l2_format);
1085 err = get_v4l2_format32(up_native, up,
1086 aux_buf, aux_space);
1087 }
922 compatible_arg = 0; 1088 compatible_arg = 0;
923 break; 1089 break;
924 1090
925 case VIDIOC_CREATE_BUFS: 1091 case VIDIOC_CREATE_BUFS:
926 err = get_v4l2_create32(&karg.v2crt, up); 1092 err = bufsize_v4l2_create(up, &aux_space);
1093 if (!err)
1094 err = alloc_userspace(sizeof(struct v4l2_create_buffers),
1095 aux_space, &up_native);
1096 if (!err) {
1097 aux_buf = up_native + sizeof(struct v4l2_create_buffers);
1098 err = get_v4l2_create32(up_native, up,
1099 aux_buf, aux_space);
1100 }
927 compatible_arg = 0; 1101 compatible_arg = 0;
928 break; 1102 break;
929 1103
@@ -931,36 +1105,63 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
931 case VIDIOC_QUERYBUF: 1105 case VIDIOC_QUERYBUF:
932 case VIDIOC_QBUF: 1106 case VIDIOC_QBUF:
933 case VIDIOC_DQBUF: 1107 case VIDIOC_DQBUF:
934 err = get_v4l2_buffer32(&karg.v2b, up); 1108 err = bufsize_v4l2_buffer(up, &aux_space);
1109 if (!err)
1110 err = alloc_userspace(sizeof(struct v4l2_buffer),
1111 aux_space, &up_native);
1112 if (!err) {
1113 aux_buf = up_native + sizeof(struct v4l2_buffer);
1114 err = get_v4l2_buffer32(up_native, up,
1115 aux_buf, aux_space);
1116 }
935 compatible_arg = 0; 1117 compatible_arg = 0;
936 break; 1118 break;
937 1119
938 case VIDIOC_S_FBUF: 1120 case VIDIOC_S_FBUF:
939 err = get_v4l2_framebuffer32(&karg.v2fb, up); 1121 err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
1122 &up_native);
1123 if (!err)
1124 err = get_v4l2_framebuffer32(up_native, up);
940 compatible_arg = 0; 1125 compatible_arg = 0;
941 break; 1126 break;
942 1127
943 case VIDIOC_G_FBUF: 1128 case VIDIOC_G_FBUF:
1129 err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
1130 &up_native);
944 compatible_arg = 0; 1131 compatible_arg = 0;
945 break; 1132 break;
946 1133
947 case VIDIOC_ENUMSTD: 1134 case VIDIOC_ENUMSTD:
948 err = get_v4l2_standard32(&karg.v2s, up); 1135 err = alloc_userspace(sizeof(struct v4l2_standard), 0,
1136 &up_native);
1137 if (!err)
1138 err = get_v4l2_standard32(up_native, up);
949 compatible_arg = 0; 1139 compatible_arg = 0;
950 break; 1140 break;
951 1141
952 case VIDIOC_ENUMINPUT: 1142 case VIDIOC_ENUMINPUT:
953 err = get_v4l2_input32(&karg.v2i, up); 1143 err = alloc_userspace(sizeof(struct v4l2_input), 0, &up_native);
1144 if (!err)
1145 err = get_v4l2_input32(up_native, up);
954 compatible_arg = 0; 1146 compatible_arg = 0;
955 break; 1147 break;
956 1148
957 case VIDIOC_G_EXT_CTRLS: 1149 case VIDIOC_G_EXT_CTRLS:
958 case VIDIOC_S_EXT_CTRLS: 1150 case VIDIOC_S_EXT_CTRLS:
959 case VIDIOC_TRY_EXT_CTRLS: 1151 case VIDIOC_TRY_EXT_CTRLS:
960 err = get_v4l2_ext_controls32(&karg.v2ecs, up); 1152 err = bufsize_v4l2_ext_controls(up, &aux_space);
1153 if (!err)
1154 err = alloc_userspace(sizeof(struct v4l2_ext_controls),
1155 aux_space, &up_native);
1156 if (!err) {
1157 aux_buf = up_native + sizeof(struct v4l2_ext_controls);
1158 err = get_v4l2_ext_controls32(file, up_native, up,
1159 aux_buf, aux_space);
1160 }
961 compatible_arg = 0; 1161 compatible_arg = 0;
962 break; 1162 break;
963 case VIDIOC_DQEVENT: 1163 case VIDIOC_DQEVENT:
1164 err = alloc_userspace(sizeof(struct v4l2_event), 0, &up_native);
964 compatible_arg = 0; 1165 compatible_arg = 0;
965 break; 1166 break;
966 } 1167 }
@@ -969,22 +1170,26 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
969 1170
970 if (compatible_arg) 1171 if (compatible_arg)
971 err = native_ioctl(file, cmd, (unsigned long)up); 1172 err = native_ioctl(file, cmd, (unsigned long)up);
972 else { 1173 else
973 mm_segment_t old_fs = get_fs(); 1174 err = native_ioctl(file, cmd, (unsigned long)up_native);
974 1175
975 set_fs(KERNEL_DS); 1176 if (err == -ENOTTY)
976 err = native_ioctl(file, cmd, (unsigned long)&karg); 1177 return err;
977 set_fs(old_fs);
978 }
979 1178
980 /* Special case: even after an error we need to put the 1179 /*
981 results back for these ioctls since the error_idx will 1180 * Special case: even after an error we need to put the
982 contain information on which control failed. */ 1181 * results back for these ioctls since the error_idx will
1182 * contain information on which control failed.
1183 */
983 switch (cmd) { 1184 switch (cmd) {
984 case VIDIOC_G_EXT_CTRLS: 1185 case VIDIOC_G_EXT_CTRLS:
985 case VIDIOC_S_EXT_CTRLS: 1186 case VIDIOC_S_EXT_CTRLS:
986 case VIDIOC_TRY_EXT_CTRLS: 1187 case VIDIOC_TRY_EXT_CTRLS:
987 if (put_v4l2_ext_controls32(&karg.v2ecs, up)) 1188 if (put_v4l2_ext_controls32(file, up_native, up))
1189 err = -EFAULT;
1190 break;
1191 case VIDIOC_S_EDID:
1192 if (put_v4l2_edid32(up_native, up))
988 err = -EFAULT; 1193 err = -EFAULT;
989 break; 1194 break;
990 } 1195 }
@@ -996,44 +1201,46 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
996 case VIDIOC_S_OUTPUT: 1201 case VIDIOC_S_OUTPUT:
997 case VIDIOC_G_INPUT: 1202 case VIDIOC_G_INPUT:
998 case VIDIOC_G_OUTPUT: 1203 case VIDIOC_G_OUTPUT:
999 err = put_user(((s32)karg.vi), (s32 __user *)up); 1204 if (assign_in_user((compat_uint_t __user *)up,
1205 ((unsigned int __user *)up_native)))
1206 err = -EFAULT;
1000 break; 1207 break;
1001 1208
1002 case VIDIOC_G_FBUF: 1209 case VIDIOC_G_FBUF:
1003 err = put_v4l2_framebuffer32(&karg.v2fb, up); 1210 err = put_v4l2_framebuffer32(up_native, up);
1004 break; 1211 break;
1005 1212
1006 case VIDIOC_DQEVENT: 1213 case VIDIOC_DQEVENT:
1007 err = put_v4l2_event32(&karg.v2ev, up); 1214 err = put_v4l2_event32(up_native, up);
1008 break; 1215 break;
1009 1216
1010 case VIDIOC_G_EDID: 1217 case VIDIOC_G_EDID:
1011 case VIDIOC_S_EDID: 1218 err = put_v4l2_edid32(up_native, up);
1012 err = put_v4l2_edid32(&karg.v2edid, up);
1013 break; 1219 break;
1014 1220
1015 case VIDIOC_G_FMT: 1221 case VIDIOC_G_FMT:
1016 case VIDIOC_S_FMT: 1222 case VIDIOC_S_FMT:
1017 case VIDIOC_TRY_FMT: 1223 case VIDIOC_TRY_FMT:
1018 err = put_v4l2_format32(&karg.v2f, up); 1224 err = put_v4l2_format32(up_native, up);
1019 break; 1225 break;
1020 1226
1021 case VIDIOC_CREATE_BUFS: 1227 case VIDIOC_CREATE_BUFS:
1022 err = put_v4l2_create32(&karg.v2crt, up); 1228 err = put_v4l2_create32(up_native, up);
1023 break; 1229 break;
1024 1230
1231 case VIDIOC_PREPARE_BUF:
1025 case VIDIOC_QUERYBUF: 1232 case VIDIOC_QUERYBUF:
1026 case VIDIOC_QBUF: 1233 case VIDIOC_QBUF:
1027 case VIDIOC_DQBUF: 1234 case VIDIOC_DQBUF:
1028 err = put_v4l2_buffer32(&karg.v2b, up); 1235 err = put_v4l2_buffer32(up_native, up);
1029 break; 1236 break;
1030 1237
1031 case VIDIOC_ENUMSTD: 1238 case VIDIOC_ENUMSTD:
1032 err = put_v4l2_standard32(&karg.v2s, up); 1239 err = put_v4l2_standard32(up_native, up);
1033 break; 1240 break;
1034 1241
1035 case VIDIOC_ENUMINPUT: 1242 case VIDIOC_ENUMINPUT:
1036 err = put_v4l2_input32(&karg.v2i, up); 1243 err = put_v4l2_input32(up_native, up);
1037 break; 1244 break;
1038 } 1245 }
1039 return err; 1246 return err;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 7486af2c8ae4..5e2a7e59f578 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -2783,8 +2783,11 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
2783 2783
2784 /* Handles IOCTL */ 2784 /* Handles IOCTL */
2785 err = func(file, cmd, parg); 2785 err = func(file, cmd, parg);
2786 if (err == -ENOIOCTLCMD) 2786 if (err == -ENOTTY || err == -ENOIOCTLCMD) {
2787 err = -ENOTTY; 2787 err = -ENOTTY;
2788 goto out;
2789 }
2790
2788 if (err == 0) { 2791 if (err == 0) {
2789 if (cmd == VIDIOC_DQBUF) 2792 if (cmd == VIDIOC_DQBUF)
2790 trace_v4l2_dqbuf(video_devdata(file)->minor, parg); 2793 trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 3dc9ed2e0774..0c1a42bf27fd 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -205,6 +205,10 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
205 struct vb2_buffer *vb; 205 struct vb2_buffer *vb;
206 int ret; 206 int ret;
207 207
208 /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
209 num_buffers = min_t(unsigned int, num_buffers,
210 VB2_MAX_FRAME - q->num_buffers);
211
208 for (buffer = 0; buffer < num_buffers; ++buffer) { 212 for (buffer = 0; buffer < num_buffers; ++buffer) {
209 /* Allocate videobuf buffer structures */ 213 /* Allocate videobuf buffer structures */
210 vb = kzalloc(q->buf_struct_size, GFP_KERNEL); 214 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
@@ -866,9 +870,12 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
866 dprintk(4, "done processing on buffer %d, state: %d\n", 870 dprintk(4, "done processing on buffer %d, state: %d\n",
867 vb->index, state); 871 vb->index, state);
868 872
869 /* sync buffers */ 873 if (state != VB2_BUF_STATE_QUEUED &&
870 for (plane = 0; plane < vb->num_planes; ++plane) 874 state != VB2_BUF_STATE_REQUEUEING) {
871 call_void_memop(vb, finish, vb->planes[plane].mem_priv); 875 /* sync buffers */
876 for (plane = 0; plane < vb->num_planes; ++plane)
877 call_void_memop(vb, finish, vb->planes[plane].mem_priv);
878 }
872 879
873 spin_lock_irqsave(&q->done_lock, flags); 880 spin_lock_irqsave(&q->done_lock, flags);
874 if (state == VB2_BUF_STATE_QUEUED || 881 if (state == VB2_BUF_STATE_QUEUED ||
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 23886e8fbfd8..df966509ebb2 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -594,6 +594,12 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b,
594 b->flags & V4L2_BUF_FLAG_LAST) 594 b->flags & V4L2_BUF_FLAG_LAST)
595 q->last_buffer_dequeued = true; 595 q->last_buffer_dequeued = true;
596 596
597 /*
598 * After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
599 * cleared.
600 */
601 b->flags &= ~V4L2_BUF_FLAG_DONE;
602
597 return ret; 603 return ret;
598} 604}
599 605
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index a1ae0cc2b86d..6ab481ee8ece 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -20,14 +20,6 @@
20#include "mc.h" 20#include "mc.h"
21 21
22#define MC_INTSTATUS 0x000 22#define MC_INTSTATUS 0x000
23#define MC_INT_DECERR_MTS (1 << 16)
24#define MC_INT_SECERR_SEC (1 << 13)
25#define MC_INT_DECERR_VPR (1 << 12)
26#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11)
27#define MC_INT_INVALID_SMMU_PAGE (1 << 10)
28#define MC_INT_ARBITRATION_EMEM (1 << 9)
29#define MC_INT_SECURITY_VIOLATION (1 << 8)
30#define MC_INT_DECERR_EMEM (1 << 6)
31 23
32#define MC_INTMASK 0x004 24#define MC_INTMASK 0x004
33 25
@@ -248,12 +240,13 @@ static const char *const error_names[8] = {
248static irqreturn_t tegra_mc_irq(int irq, void *data) 240static irqreturn_t tegra_mc_irq(int irq, void *data)
249{ 241{
250 struct tegra_mc *mc = data; 242 struct tegra_mc *mc = data;
251 unsigned long status, mask; 243 unsigned long status;
252 unsigned int bit; 244 unsigned int bit;
253 245
254 /* mask all interrupts to avoid flooding */ 246 /* mask all interrupts to avoid flooding */
255 status = mc_readl(mc, MC_INTSTATUS); 247 status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
256 mask = mc_readl(mc, MC_INTMASK); 248 if (!status)
249 return IRQ_NONE;
257 250
258 for_each_set_bit(bit, &status, 32) { 251 for_each_set_bit(bit, &status, 32) {
259 const char *error = status_names[bit] ?: "unknown"; 252 const char *error = status_names[bit] ?: "unknown";
@@ -346,7 +339,6 @@ static int tegra_mc_probe(struct platform_device *pdev)
346 const struct of_device_id *match; 339 const struct of_device_id *match;
347 struct resource *res; 340 struct resource *res;
348 struct tegra_mc *mc; 341 struct tegra_mc *mc;
349 u32 value;
350 int err; 342 int err;
351 343
352 match = of_match_node(tegra_mc_of_match, pdev->dev.of_node); 344 match = of_match_node(tegra_mc_of_match, pdev->dev.of_node);
@@ -414,11 +406,7 @@ static int tegra_mc_probe(struct platform_device *pdev)
414 406
415 WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n"); 407 WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n");
416 408
417 value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | 409 mc_writel(mc, mc->soc->intmask, MC_INTMASK);
418 MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
419 MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM;
420
421 mc_writel(mc, value, MC_INTMASK);
422 410
423 return 0; 411 return 0;
424} 412}
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index ddb16676c3af..24e020b4609b 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -14,6 +14,15 @@
14 14
15#include <soc/tegra/mc.h> 15#include <soc/tegra/mc.h>
16 16
17#define MC_INT_DECERR_MTS (1 << 16)
18#define MC_INT_SECERR_SEC (1 << 13)
19#define MC_INT_DECERR_VPR (1 << 12)
20#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11)
21#define MC_INT_INVALID_SMMU_PAGE (1 << 10)
22#define MC_INT_ARBITRATION_EMEM (1 << 9)
23#define MC_INT_SECURITY_VIOLATION (1 << 8)
24#define MC_INT_DECERR_EMEM (1 << 6)
25
17static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset) 26static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
18{ 27{
19 return readl(mc->regs + offset); 28 return readl(mc->regs + offset);
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index ba8fff3d66a6..6d2a5a849d92 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -930,4 +930,6 @@ const struct tegra_mc_soc tegra114_mc_soc = {
930 .atom_size = 32, 930 .atom_size = 32,
931 .client_id_mask = 0x7f, 931 .client_id_mask = 0x7f,
932 .smmu = &tegra114_smmu_soc, 932 .smmu = &tegra114_smmu_soc,
933 .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
934 MC_INT_DECERR_EMEM,
933}; 935};
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 21e7255e3d96..234e74f97a4b 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -1019,6 +1019,9 @@ const struct tegra_mc_soc tegra124_mc_soc = {
1019 .smmu = &tegra124_smmu_soc, 1019 .smmu = &tegra124_smmu_soc,
1020 .emem_regs = tegra124_mc_emem_regs, 1020 .emem_regs = tegra124_mc_emem_regs,
1021 .num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs), 1021 .num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs),
1022 .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
1023 MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
1024 MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
1022}; 1025};
1023#endif /* CONFIG_ARCH_TEGRA_124_SOC */ 1026#endif /* CONFIG_ARCH_TEGRA_124_SOC */
1024 1027
@@ -1041,5 +1044,8 @@ const struct tegra_mc_soc tegra132_mc_soc = {
1041 .atom_size = 32, 1044 .atom_size = 32,
1042 .client_id_mask = 0x7f, 1045 .client_id_mask = 0x7f,
1043 .smmu = &tegra132_smmu_soc, 1046 .smmu = &tegra132_smmu_soc,
1047 .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
1048 MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
1049 MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
1044}; 1050};
1045#endif /* CONFIG_ARCH_TEGRA_132_SOC */ 1051#endif /* CONFIG_ARCH_TEGRA_132_SOC */
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index 5e144abe4c18..47c78a6d8f00 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -1077,4 +1077,7 @@ const struct tegra_mc_soc tegra210_mc_soc = {
1077 .atom_size = 64, 1077 .atom_size = 64,
1078 .client_id_mask = 0xff, 1078 .client_id_mask = 0xff,
1079 .smmu = &tegra210_smmu_soc, 1079 .smmu = &tegra210_smmu_soc,
1080 .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
1081 MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
1082 MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
1080}; 1083};
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index b44737840e70..d0689428ea1a 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -952,4 +952,6 @@ const struct tegra_mc_soc tegra30_mc_soc = {
952 .atom_size = 16, 952 .atom_size = 16,
953 .client_id_mask = 0x7f, 953 .client_id_mask = 0x7f,
954 .smmu = &tegra30_smmu_soc, 954 .smmu = &tegra30_smmu_soc,
955 .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
956 MC_INT_DECERR_EMEM,
955}; 957};
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5dcc0313c38a..207370d68c17 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -6848,6 +6848,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
6848 *size = y; 6848 *size = y;
6849} 6849}
6850 6850
6851#ifdef CONFIG_PROC_FS
6851static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan) 6852static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan)
6852{ 6853{
6853 char expVer[32]; 6854 char expVer[32];
@@ -6879,6 +6880,7 @@ static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int
6879 6880
6880 seq_putc(m, '\n'); 6881 seq_putc(m, '\n');
6881} 6882}
6883#endif
6882 6884
6883/** 6885/**
6884 * mpt_set_taskmgmt_in_progress_flag - set flags associated with task management 6886 * mpt_set_taskmgmt_in_progress_flag - set flags associated with task management
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 02b5f69e1a42..14cf6dfc3b14 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2698,6 +2698,8 @@ mptctl_hp_targetinfo(unsigned long arg)
2698 __FILE__, __LINE__, iocnum); 2698 __FILE__, __LINE__, iocnum);
2699 return -ENODEV; 2699 return -ENODEV;
2700 } 2700 }
2701 if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
2702 return -EINVAL;
2701 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", 2703 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
2702 ioc->name)); 2704 ioc->name));
2703 2705
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ebccfa8072a..cb790b68920f 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
1994 .cmd_per_lun = 7, 1994 .cmd_per_lun = 7,
1995 .use_clustering = ENABLE_CLUSTERING, 1995 .use_clustering = ENABLE_CLUSTERING,
1996 .shost_attrs = mptscsih_host_attrs, 1996 .shost_attrs = mptscsih_host_attrs,
1997 .no_write_same = 1,
1997}; 1998};
1998 1999
1999static int mptsas_get_linkerrors(struct sas_phy *phy) 2000static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index 0eee63542038..115a6f67ab51 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -68,7 +68,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
68 68
69 mutex_init(&ec_dev->lock); 69 mutex_init(&ec_dev->lock);
70 70
71 cros_ec_query_all(ec_dev); 71 err = cros_ec_query_all(ec_dev);
72 if (err) {
73 dev_err(dev, "Cannot identify the EC: error %d\n", err);
74 return err;
75 }
72 76
73 err = mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, &ec_cell, 1, 77 err = mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, &ec_cell, 1,
74 NULL, ec_dev->irq, NULL); 78 NULL, ec_dev->irq, NULL);
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index fe89e5e337d5..ac867489b5a9 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -269,11 +269,11 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
269 269
270 intel_lpss_deassert_reset(lpss); 270 intel_lpss_deassert_reset(lpss);
271 271
272 intel_lpss_set_remap_addr(lpss);
273
272 if (!intel_lpss_has_idma(lpss)) 274 if (!intel_lpss_has_idma(lpss))
273 return; 275 return;
274 276
275 intel_lpss_set_remap_addr(lpss);
276
277 /* Make sure that SPI multiblock DMA transfers are re-enabled */ 277 /* Make sure that SPI multiblock DMA transfers are re-enabled */
278 if (lpss->type == LPSS_DEV_SPI) 278 if (lpss->type == LPSS_DEV_SPI)
279 writel(value, lpss->priv + LPSS_PRIV_SSP_REG); 279 writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 321e734b3d9b..9b6a9630de19 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -437,6 +437,19 @@ static void palmas_power_off(void)
437 return; 437 return;
438 438
439 node = palmas_dev->dev->of_node; 439 node = palmas_dev->dev->of_node;
440 if (of_property_read_bool(node, "ti,palmas-override-powerhold")) {
441 addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE,
442 PALMAS_PRIMARY_SECONDARY_PAD2);
443 slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
444
445 ret = regmap_update_bits(palmas_dev->regmap[slave], addr,
446 PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0);
447 if (ret)
448 dev_err(palmas_dev->dev,
449 "Unable to write PRIMARY_SECONDARY_PAD2 %d\n",
450 ret);
451 }
452
440 override_powerhold = 453 override_powerhold =
441 of_property_read_bool(node, "ti,palmas-override-powerhold"); 454 of_property_read_bool(node, "ti,palmas-override-powerhold");
442 455
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index cc91f7b3d90c..eb29113e0bac 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -148,7 +148,7 @@ enclosure_register(struct device *dev, const char *name, int components,
148 for (i = 0; i < components; i++) { 148 for (i = 0; i < components; i++) {
149 edev->component[i].number = -1; 149 edev->component[i].number = -1;
150 edev->component[i].slot = -1; 150 edev->component[i].slot = -1;
151 edev->component[i].power_status = 1; 151 edev->component[i].power_status = -1;
152 } 152 }
153 153
154 mutex_lock(&container_list_lock); 154 mutex_lock(&container_list_lock);
@@ -600,6 +600,11 @@ static ssize_t get_component_power_status(struct device *cdev,
600 600
601 if (edev->cb->get_power_status) 601 if (edev->cb->get_power_status)
602 edev->cb->get_power_status(edev, ecomp); 602 edev->cb->get_power_status(edev, ecomp);
603
604 /* If still uninitialized, the callback failed or does not exist. */
605 if (ecomp->power_status == -1)
606 return (edev->cb->get_power_status) ? -EIO : -ENOTTY;
607
603 return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off"); 608 return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off");
604} 609}
605 610
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e8b933111e0d..92109cadc3fc 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file)
507static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 507static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
508{ 508{
509 void __iomem *address = (void __iomem *)file->private_data; 509 void __iomem *address = (void __iomem *)file->private_data;
510 unsigned char *page;
511 int retval;
512 int len = 0; 510 int len = 0;
513 unsigned int value; 511 unsigned int value;
514 512 char lbuf[20];
515 if (*offset < 0)
516 return -EINVAL;
517 if (count == 0 || count > 1024)
518 return 0;
519 if (*offset != 0)
520 return 0;
521
522 page = (unsigned char *)__get_free_page(GFP_KERNEL);
523 if (!page)
524 return -ENOMEM;
525 513
526 value = readl(address); 514 value = readl(address);
527 len = sprintf(page, "%d\n", value); 515 len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
528
529 if (copy_to_user(buf, page, len)) {
530 retval = -EFAULT;
531 goto exit;
532 }
533 *offset += len;
534 retval = len;
535 516
536exit: 517 return simple_read_from_buffer(buf, count, offset, lbuf, len);
537 free_page((unsigned long)page);
538 return retval;
539} 518}
540 519
541static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) 520static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 4ef189a7a2fb..8c04e342e30a 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -571,7 +571,6 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
571 break; 571 break;
572 572
573 default: 573 default:
574 dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
575 rets = -ENOIOCTLCMD; 574 rets = -ENOIOCTLCMD;
576 } 575 }
577 576
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 1e688bfec567..5e047bfc0cc4 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b,
467 unsigned int num_pages, bool is_2m_pages, unsigned int *target) 467 unsigned int num_pages, bool is_2m_pages, unsigned int *target)
468{ 468{
469 unsigned long status; 469 unsigned long status;
470 unsigned long pfn = page_to_pfn(b->page); 470 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
471 471
472 STATS_INC(b->stats.lock[is_2m_pages]); 472 STATS_INC(b->stats.lock[is_2m_pages]);
473 473
@@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b,
515 unsigned int num_pages, bool is_2m_pages, unsigned int *target) 515 unsigned int num_pages, bool is_2m_pages, unsigned int *target)
516{ 516{
517 unsigned long status; 517 unsigned long status;
518 unsigned long pfn = page_to_pfn(b->page); 518 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
519 519
520 STATS_INC(b->stats.unlock[is_2m_pages]); 520 STATS_INC(b->stats.unlock[is_2m_pages]);
521 521
@@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b)
576 } 576 }
577 } 577 }
578 578
579 if (b->batch_page) { 579 /* Clearing the batch_page unconditionally has no adverse effect */
580 vunmap(b->batch_page); 580 free_page((unsigned long)b->batch_page);
581 b->batch_page = NULL; 581 b->batch_page = NULL;
582 }
583
584 if (b->page) {
585 __free_page(b->page);
586 b->page = NULL;
587 }
588} 582}
589 583
590/* 584/*
@@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops = {
991 985
992static bool vmballoon_init_batching(struct vmballoon *b) 986static bool vmballoon_init_batching(struct vmballoon *b)
993{ 987{
994 b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP); 988 struct page *page;
995 if (!b->page)
996 return false;
997 989
998 b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL); 990 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
999 if (!b->batch_page) { 991 if (!page)
1000 __free_page(b->page);
1001 return false; 992 return false;
1002 }
1003 993
994 b->batch_page = page_address(page);
1004 return true; 995 return true;
1005} 996}
1006 997
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f42d9c4e4561..cc277f7849b0 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -298,8 +298,11 @@ static void *qp_alloc_queue(u64 size, u32 flags)
298 size_t pas_size; 298 size_t pas_size;
299 size_t vas_size; 299 size_t vas_size;
300 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); 300 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
301 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 301 u64 num_pages;
302 302
303 if (size > SIZE_MAX - PAGE_SIZE)
304 return NULL;
305 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
303 if (num_pages > 306 if (num_pages >
304 (SIZE_MAX - queue_size) / 307 (SIZE_MAX - queue_size) /
305 (sizeof(*queue->kernel_if->u.g.pas) + 308 (sizeof(*queue->kernel_if->u.g.pas) +
@@ -624,9 +627,12 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
624{ 627{
625 struct vmci_queue *queue; 628 struct vmci_queue *queue;
626 size_t queue_page_size; 629 size_t queue_page_size;
627 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 630 u64 num_pages;
628 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 631 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
629 632
633 if (size > SIZE_MAX - PAGE_SIZE)
634 return NULL;
635 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
630 if (num_pages > (SIZE_MAX - queue_size) / 636 if (num_pages > (SIZE_MAX - queue_size) /
631 sizeof(*queue->kernel_if->u.h.page)) 637 sizeof(*queue->kernel_if->u.h.page))
632 return NULL; 638 return NULL;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5f7d10ba498a..299a83f1ad38 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2791,6 +2791,14 @@ int mmc_pm_notify(struct notifier_block *notify_block,
2791 if (!err) 2791 if (!err)
2792 break; 2792 break;
2793 2793
2794 if (!mmc_card_is_removable(host)) {
2795 dev_warn(mmc_dev(host),
2796 "pre_suspend failed for non-removable host: "
2797 "%d\n", err);
2798 /* Avoid removing non-removable hosts */
2799 break;
2800 }
2801
2794 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2802 /* Calling bus_ops->remove() with a claimed host can deadlock */
2795 host->bus_ops->remove(host); 2803 host->bus_ops->remove(host);
2796 mmc_claim_host(host); 2804 mmc_claim_host(host);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index fb204ee6ff89..581f5d0271f4 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -619,6 +619,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
619 (sizeof(struct idmac_desc_64addr) * 619 (sizeof(struct idmac_desc_64addr) *
620 (i + 1))) >> 32; 620 (i + 1))) >> 32;
621 /* Initialize reserved and buffer size fields to "0" */ 621 /* Initialize reserved and buffer size fields to "0" */
622 p->des0 = 0;
622 p->des1 = 0; 623 p->des1 = 0;
623 p->des2 = 0; 624 p->des2 = 0;
624 p->des3 = 0; 625 p->des3 = 0;
@@ -640,6 +641,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
640 i++, p++) { 641 i++, p++) {
641 p->des3 = cpu_to_le32(host->sg_dma + 642 p->des3 = cpu_to_le32(host->sg_dma +
642 (sizeof(struct idmac_desc) * (i + 1))); 643 (sizeof(struct idmac_desc) * (i + 1)));
644 p->des0 = 0;
643 p->des1 = 0; 645 p->des1 = 0;
644 } 646 }
645 647
@@ -2807,8 +2809,8 @@ static bool dw_mci_reset(struct dw_mci *host)
2807 } 2809 }
2808 2810
2809 if (host->use_dma == TRANS_MODE_IDMAC) 2811 if (host->use_dma == TRANS_MODE_IDMAC)
2810 /* It is also recommended that we reset and reprogram idmac */ 2812 /* It is also required that we reinit idmac */
2811 dw_mci_idmac_reset(host); 2813 dw_mci_idmac_init(host);
2812 2814
2813 ret = true; 2815 ret = true;
2814 2816
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 76e8bce6f46e..ad572a0f2124 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -368,9 +368,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
368 host->irq_mask &= ~irq; 368 host->irq_mask &= ~irq;
369 else 369 else
370 host->irq_mask |= irq; 370 host->irq_mask |= irq;
371 spin_unlock_irqrestore(&host->lock, flags);
372 371
373 writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK); 372 writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
373 spin_unlock_irqrestore(&host->lock, flags);
374} 374}
375 375
376static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, 376static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 7bc146efb389..fac21ee6fd6d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2102,8 +2102,8 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
2102 */ 2102 */
2103 if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) { 2103 if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
2104 struct pinctrl *p = devm_pinctrl_get(host->dev); 2104 struct pinctrl *p = devm_pinctrl_get(host->dev);
2105 if (!p) { 2105 if (IS_ERR(p)) {
2106 ret = -ENODEV; 2106 ret = PTR_ERR(p);
2107 goto err_free_irq; 2107 goto err_free_irq;
2108 } 2108 }
2109 if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) { 2109 if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index f280744578e4..ffd448149796 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -32,6 +32,8 @@ struct sdhci_iproc_host {
32 const struct sdhci_iproc_data *data; 32 const struct sdhci_iproc_data *data;
33 u32 shadow_cmd; 33 u32 shadow_cmd;
34 u32 shadow_blk; 34 u32 shadow_blk;
35 bool is_cmd_shadowed;
36 bool is_blk_shadowed;
35}; 37};
36 38
37#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18) 39#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
@@ -47,8 +49,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
47 49
48static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg) 50static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
49{ 51{
50 u32 val = sdhci_iproc_readl(host, (reg & ~3)); 52 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
51 u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; 53 struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
54 u32 val;
55 u16 word;
56
57 if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
58 /* Get the saved transfer mode */
59 val = iproc_host->shadow_cmd;
60 } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
61 iproc_host->is_blk_shadowed) {
62 /* Get the saved block info */
63 val = iproc_host->shadow_blk;
64 } else {
65 val = sdhci_iproc_readl(host, (reg & ~3));
66 }
67 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
52 return word; 68 return word;
53} 69}
54 70
@@ -104,13 +120,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
104 120
105 if (reg == SDHCI_COMMAND) { 121 if (reg == SDHCI_COMMAND) {
106 /* Write the block now as we are issuing a command */ 122 /* Write the block now as we are issuing a command */
107 if (iproc_host->shadow_blk != 0) { 123 if (iproc_host->is_blk_shadowed) {
108 sdhci_iproc_writel(host, iproc_host->shadow_blk, 124 sdhci_iproc_writel(host, iproc_host->shadow_blk,
109 SDHCI_BLOCK_SIZE); 125 SDHCI_BLOCK_SIZE);
110 iproc_host->shadow_blk = 0; 126 iproc_host->is_blk_shadowed = false;
111 } 127 }
112 oldval = iproc_host->shadow_cmd; 128 oldval = iproc_host->shadow_cmd;
113 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { 129 iproc_host->is_cmd_shadowed = false;
130 } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
131 iproc_host->is_blk_shadowed) {
114 /* Block size and count are stored in shadow reg */ 132 /* Block size and count are stored in shadow reg */
115 oldval = iproc_host->shadow_blk; 133 oldval = iproc_host->shadow_blk;
116 } else { 134 } else {
@@ -122,9 +140,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
122 if (reg == SDHCI_TRANSFER_MODE) { 140 if (reg == SDHCI_TRANSFER_MODE) {
123 /* Save the transfer mode until the command is issued */ 141 /* Save the transfer mode until the command is issued */
124 iproc_host->shadow_cmd = newval; 142 iproc_host->shadow_cmd = newval;
143 iproc_host->is_cmd_shadowed = true;
125 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { 144 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
126 /* Save the block info until the command is issued */ 145 /* Save the block info until the command is issued */
127 iproc_host->shadow_blk = newval; 146 iproc_host->shadow_blk = newval;
147 iproc_host->is_blk_shadowed = true;
128 } else { 148 } else {
129 /* Command or other regular 32-bit write */ 149 /* Command or other regular 32-bit write */
130 sdhci_iproc_writel(host, newval, reg & ~3); 150 sdhci_iproc_writel(host, newval, reg & ~3);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 90e94a028a49..ac66c61d9433 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -418,6 +418,20 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
418 if (esdhc->vendor_ver < VENDOR_V_23) 418 if (esdhc->vendor_ver < VENDOR_V_23)
419 pre_div = 2; 419 pre_div = 2;
420 420
421 /*
422 * Limit SD clock to 167MHz for ls1046a according to its datasheet
423 */
424 if (clock > 167000000 &&
425 of_find_compatible_node(NULL, NULL, "fsl,ls1046a-esdhc"))
426 clock = 167000000;
427
428 /*
429 * Limit SD clock to 125MHz for ls1012a according to its datasheet
430 */
431 if (clock > 125000000 &&
432 of_find_compatible_node(NULL, NULL, "fsl,ls1012a-esdhc"))
433 clock = 125000000;
434
421 /* Workaround to reduce the clock frequency for p1010 esdhc */ 435 /* Workaround to reduce the clock frequency for p1010 esdhc */
422 if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { 436 if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
423 if (clock > 20000000) 437 if (clock > 20000000)
@@ -584,6 +598,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
584{ 598{
585 struct sdhci_host *host; 599 struct sdhci_host *host;
586 struct device_node *np; 600 struct device_node *np;
601 struct sdhci_pltfm_host *pltfm_host;
602 struct sdhci_esdhc *esdhc;
587 int ret; 603 int ret;
588 604
589 np = pdev->dev.of_node; 605 np = pdev->dev.of_node;
@@ -600,6 +616,14 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
600 616
601 sdhci_get_of_property(pdev); 617 sdhci_get_of_property(pdev);
602 618
619 pltfm_host = sdhci_priv(host);
620 esdhc = pltfm_host->priv;
621 if (esdhc->vendor_ver == VENDOR_V_22)
622 host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
623
624 if (esdhc->vendor_ver > VENDOR_V_22)
625 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
626
603 if (of_device_is_compatible(np, "fsl,p5040-esdhc") || 627 if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
604 of_device_is_compatible(np, "fsl,p5020-esdhc") || 628 of_device_is_compatible(np, "fsl,p5020-esdhc") ||
605 of_device_is_compatible(np, "fsl,p4080-esdhc") || 629 of_device_is_compatible(np, "fsl,p4080-esdhc") ||
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index 8a25adced79f..bbfa1f129266 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -67,6 +67,10 @@ endchoice
67config MTD_CFI_GEOMETRY 67config MTD_CFI_GEOMETRY
68 bool "Specific CFI Flash geometry selection" 68 bool "Specific CFI Flash geometry selection"
69 depends on MTD_CFI_ADV_OPTIONS 69 depends on MTD_CFI_ADV_OPTIONS
70 select MTD_MAP_BANK_WIDTH_1 if !(MTD_MAP_BANK_WIDTH_2 || \
71 MTD_MAP_BANK_WIDTH_4 || MTD_MAP_BANK_WIDTH_8 || \
72 MTD_MAP_BANK_WIDTH_16 || MTD_MAP_BANK_WIDTH_32)
73 select MTD_CFI_I1 if !(MTD_CFI_I2 || MTD_CFI_I4 || MTD_CFI_I8)
70 help 74 help
71 This option does not affect the code directly, but will enable 75 This option does not affect the code directly, but will enable
72 some other configuration options which would allow you to reduce 76 some other configuration options which would allow you to reduce
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 286b97a304cf..4509ee0b294a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
45#define I82802AB 0x00ad 45#define I82802AB 0x00ad
46#define I82802AC 0x00ac 46#define I82802AC 0x00ac
47#define PF38F4476 0x881c 47#define PF38F4476 0x881c
48#define M28F00AP30 0x8963
48/* STMicroelectronics chips */ 49/* STMicroelectronics chips */
49#define M50LPW080 0x002F 50#define M50LPW080 0x002F
50#define M50FLW080A 0x0080 51#define M50FLW080A 0x0080
@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
375 extp->MinorVersion = '1'; 376 extp->MinorVersion = '1';
376} 377}
377 378
379static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
380{
381 /*
382 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383 * Erase Supend for their small Erase Blocks(0x8000)
384 */
385 if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
386 return 1;
387 return 0;
388}
389
378static inline struct cfi_pri_intelext * 390static inline struct cfi_pri_intelext *
379read_pri_intelext(struct map_info *map, __u16 adr) 391read_pri_intelext(struct map_info *map, __u16 adr)
380{ 392{
@@ -825,21 +837,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
825 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1)))) 837 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
826 goto sleep; 838 goto sleep;
827 839
840 /* Do not allow suspend iff read/write to EB address */
841 if ((adr & chip->in_progress_block_mask) ==
842 chip->in_progress_block_addr)
843 goto sleep;
844
845 /* do not suspend small EBs, buggy Micron Chips */
846 if (cfi_is_micron_28F00AP30(cfi, chip) &&
847 (chip->in_progress_block_mask == ~(0x8000-1)))
848 goto sleep;
828 849
829 /* Erase suspend */ 850 /* Erase suspend */
830 map_write(map, CMD(0xB0), adr); 851 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
831 852
832 /* If the flash has finished erasing, then 'erase suspend' 853 /* If the flash has finished erasing, then 'erase suspend'
833 * appears to make some (28F320) flash devices switch to 854 * appears to make some (28F320) flash devices switch to
834 * 'read' mode. Make sure that we switch to 'read status' 855 * 'read' mode. Make sure that we switch to 'read status'
835 * mode so we get the right data. --rmk 856 * mode so we get the right data. --rmk
836 */ 857 */
837 map_write(map, CMD(0x70), adr); 858 map_write(map, CMD(0x70), chip->in_progress_block_addr);
838 chip->oldstate = FL_ERASING; 859 chip->oldstate = FL_ERASING;
839 chip->state = FL_ERASE_SUSPENDING; 860 chip->state = FL_ERASE_SUSPENDING;
840 chip->erase_suspended = 1; 861 chip->erase_suspended = 1;
841 for (;;) { 862 for (;;) {
842 status = map_read(map, adr); 863 status = map_read(map, chip->in_progress_block_addr);
843 if (map_word_andequal(map, status, status_OK, status_OK)) 864 if (map_word_andequal(map, status, status_OK, status_OK))
844 break; 865 break;
845 866
@@ -1035,8 +1056,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
1035 sending the 0x70 (Read Status) command to an erasing 1056 sending the 0x70 (Read Status) command to an erasing
1036 chip and expecting it to be ignored, that's what we 1057 chip and expecting it to be ignored, that's what we
1037 do. */ 1058 do. */
1038 map_write(map, CMD(0xd0), adr); 1059 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1039 map_write(map, CMD(0x70), adr); 1060 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1040 chip->oldstate = FL_READY; 1061 chip->oldstate = FL_READY;
1041 chip->state = FL_ERASING; 1062 chip->state = FL_ERASING;
1042 break; 1063 break;
@@ -1927,6 +1948,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1927 map_write(map, CMD(0xD0), adr); 1948 map_write(map, CMD(0xD0), adr);
1928 chip->state = FL_ERASING; 1949 chip->state = FL_ERASING;
1929 chip->erase_suspended = 0; 1950 chip->erase_suspended = 0;
1951 chip->in_progress_block_addr = adr;
1952 chip->in_progress_block_mask = ~(len - 1);
1930 1953
1931 ret = INVAL_CACHE_AND_WAIT(map, chip, adr, 1954 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1932 adr, len, 1955 adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index c3624eb571d1..fb5a3052f144 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -42,7 +42,7 @@
42#define AMD_BOOTLOC_BUG 42#define AMD_BOOTLOC_BUG
43#define FORCE_WORD_WRITE 0 43#define FORCE_WORD_WRITE 0
44 44
45#define MAX_WORD_RETRIES 3 45#define MAX_RETRIES 3
46 46
47#define SST49LF004B 0x0060 47#define SST49LF004B 0x0060
48#define SST49LF040B 0x0050 48#define SST49LF040B 0x0050
@@ -814,9 +814,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
814 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 814 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
815 goto sleep; 815 goto sleep;
816 816
817 /* We could check to see if we're trying to access the sector 817 /* Do not allow suspend iff read/write to EB address */
818 * that is currently being erased. However, no user will try 818 if ((adr & chip->in_progress_block_mask) ==
819 * anything like that so we just wait for the timeout. */ 819 chip->in_progress_block_addr)
820 goto sleep;
820 821
821 /* Erase suspend */ 822 /* Erase suspend */
822 /* It's harmless to issue the Erase-Suspend and Erase-Resume 823 /* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -1644,7 +1645,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1644 map_write( map, CMD(0xF0), chip->start ); 1645 map_write( map, CMD(0xF0), chip->start );
1645 /* FIXME - should have reset delay before continuing */ 1646 /* FIXME - should have reset delay before continuing */
1646 1647
1647 if (++retry_cnt <= MAX_WORD_RETRIES) 1648 if (++retry_cnt <= MAX_RETRIES)
1648 goto retry; 1649 goto retry;
1649 1650
1650 ret = -EIO; 1651 ret = -EIO;
@@ -1877,7 +1878,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1877 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1878 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1878 break; 1879 break;
1879 1880
1880 if (chip_ready(map, adr)) { 1881 if (chip_good(map, adr, datum)) {
1881 xip_enable(map, chip, adr); 1882 xip_enable(map, chip, adr);
1882 goto op_done; 1883 goto op_done;
1883 } 1884 }
@@ -2103,7 +2104,7 @@ retry:
2103 map_write(map, CMD(0xF0), chip->start); 2104 map_write(map, CMD(0xF0), chip->start);
2104 /* FIXME - should have reset delay before continuing */ 2105 /* FIXME - should have reset delay before continuing */
2105 2106
2106 if (++retry_cnt <= MAX_WORD_RETRIES) 2107 if (++retry_cnt <= MAX_RETRIES)
2107 goto retry; 2108 goto retry;
2108 2109
2109 ret = -EIO; 2110 ret = -EIO;
@@ -2238,6 +2239,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2238 unsigned long int adr; 2239 unsigned long int adr;
2239 DECLARE_WAITQUEUE(wait, current); 2240 DECLARE_WAITQUEUE(wait, current);
2240 int ret = 0; 2241 int ret = 0;
2242 int retry_cnt = 0;
2241 2243
2242 adr = cfi->addr_unlock1; 2244 adr = cfi->addr_unlock1;
2243 2245
@@ -2255,6 +2257,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2255 ENABLE_VPP(map); 2257 ENABLE_VPP(map);
2256 xip_disable(map, chip, adr); 2258 xip_disable(map, chip, adr);
2257 2259
2260 retry:
2258 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2261 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2259 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2262 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2260 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2263 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -2265,6 +2268,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2265 chip->state = FL_ERASING; 2268 chip->state = FL_ERASING;
2266 chip->erase_suspended = 0; 2269 chip->erase_suspended = 0;
2267 chip->in_progress_block_addr = adr; 2270 chip->in_progress_block_addr = adr;
2271 chip->in_progress_block_mask = ~(map->size - 1);
2268 2272
2269 INVALIDATE_CACHE_UDELAY(map, chip, 2273 INVALIDATE_CACHE_UDELAY(map, chip,
2270 adr, map->size, 2274 adr, map->size,
@@ -2290,12 +2294,13 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2290 chip->erase_suspended = 0; 2294 chip->erase_suspended = 0;
2291 } 2295 }
2292 2296
2293 if (chip_ready(map, adr)) 2297 if (chip_good(map, adr, map_word_ff(map)))
2294 break; 2298 break;
2295 2299
2296 if (time_after(jiffies, timeo)) { 2300 if (time_after(jiffies, timeo)) {
2297 printk(KERN_WARNING "MTD %s(): software timeout\n", 2301 printk(KERN_WARNING "MTD %s(): software timeout\n",
2298 __func__ ); 2302 __func__ );
2303 ret = -EIO;
2299 break; 2304 break;
2300 } 2305 }
2301 2306
@@ -2303,12 +2308,15 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2303 UDELAY(map, chip, adr, 1000000/HZ); 2308 UDELAY(map, chip, adr, 1000000/HZ);
2304 } 2309 }
2305 /* Did we succeed? */ 2310 /* Did we succeed? */
2306 if (!chip_good(map, adr, map_word_ff(map))) { 2311 if (ret) {
2307 /* reset on all failures. */ 2312 /* reset on all failures. */
2308 map_write( map, CMD(0xF0), chip->start ); 2313 map_write( map, CMD(0xF0), chip->start );
2309 /* FIXME - should have reset delay before continuing */ 2314 /* FIXME - should have reset delay before continuing */
2310 2315
2311 ret = -EIO; 2316 if (++retry_cnt <= MAX_RETRIES) {
2317 ret = 0;
2318 goto retry;
2319 }
2312 } 2320 }
2313 2321
2314 chip->state = FL_READY; 2322 chip->state = FL_READY;
@@ -2327,6 +2335,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2327 unsigned long timeo = jiffies + HZ; 2335 unsigned long timeo = jiffies + HZ;
2328 DECLARE_WAITQUEUE(wait, current); 2336 DECLARE_WAITQUEUE(wait, current);
2329 int ret = 0; 2337 int ret = 0;
2338 int retry_cnt = 0;
2330 2339
2331 adr += chip->start; 2340 adr += chip->start;
2332 2341
@@ -2344,6 +2353,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2344 ENABLE_VPP(map); 2353 ENABLE_VPP(map);
2345 xip_disable(map, chip, adr); 2354 xip_disable(map, chip, adr);
2346 2355
2356 retry:
2347 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2357 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2348 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2358 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2349 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2359 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -2354,6 +2364,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2354 chip->state = FL_ERASING; 2364 chip->state = FL_ERASING;
2355 chip->erase_suspended = 0; 2365 chip->erase_suspended = 0;
2356 chip->in_progress_block_addr = adr; 2366 chip->in_progress_block_addr = adr;
2367 chip->in_progress_block_mask = ~(len - 1);
2357 2368
2358 INVALIDATE_CACHE_UDELAY(map, chip, 2369 INVALIDATE_CACHE_UDELAY(map, chip,
2359 adr, len, 2370 adr, len,
@@ -2379,7 +2390,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2379 chip->erase_suspended = 0; 2390 chip->erase_suspended = 0;
2380 } 2391 }
2381 2392
2382 if (chip_ready(map, adr)) { 2393 if (chip_good(map, adr, map_word_ff(map))) {
2383 xip_enable(map, chip, adr); 2394 xip_enable(map, chip, adr);
2384 break; 2395 break;
2385 } 2396 }
@@ -2388,6 +2399,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2388 xip_enable(map, chip, adr); 2399 xip_enable(map, chip, adr);
2389 printk(KERN_WARNING "MTD %s(): software timeout\n", 2400 printk(KERN_WARNING "MTD %s(): software timeout\n",
2390 __func__ ); 2401 __func__ );
2402 ret = -EIO;
2391 break; 2403 break;
2392 } 2404 }
2393 2405
@@ -2395,12 +2407,15 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2395 UDELAY(map, chip, adr, 1000000/HZ); 2407 UDELAY(map, chip, adr, 1000000/HZ);
2396 } 2408 }
2397 /* Did we succeed? */ 2409 /* Did we succeed? */
2398 if (!chip_good(map, adr, map_word_ff(map))) { 2410 if (ret) {
2399 /* reset on all failures. */ 2411 /* reset on all failures. */
2400 map_write( map, CMD(0xF0), chip->start ); 2412 map_write( map, CMD(0xF0), chip->start );
2401 /* FIXME - should have reset delay before continuing */ 2413 /* FIXME - should have reset delay before continuing */
2402 2414
2403 ret = -EIO; 2415 if (++retry_cnt <= MAX_RETRIES) {
2416 ret = 0;
2417 goto retry;
2418 }
2404 } 2419 }
2405 2420
2406 chip->state = FL_READY; 2421 chip->state = FL_READY;
@@ -2530,7 +2545,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2530 2545
2531struct ppb_lock { 2546struct ppb_lock {
2532 struct flchip *chip; 2547 struct flchip *chip;
2533 loff_t offset; 2548 unsigned long adr;
2534 int locked; 2549 int locked;
2535}; 2550};
2536 2551
@@ -2548,8 +2563,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2548 unsigned long timeo; 2563 unsigned long timeo;
2549 int ret; 2564 int ret;
2550 2565
2566 adr += chip->start;
2551 mutex_lock(&chip->mutex); 2567 mutex_lock(&chip->mutex);
2552 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2568 ret = get_chip(map, chip, adr, FL_LOCKING);
2553 if (ret) { 2569 if (ret) {
2554 mutex_unlock(&chip->mutex); 2570 mutex_unlock(&chip->mutex);
2555 return ret; 2571 return ret;
@@ -2567,8 +2583,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2567 2583
2568 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 2584 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2569 chip->state = FL_LOCKING; 2585 chip->state = FL_LOCKING;
2570 map_write(map, CMD(0xA0), chip->start + adr); 2586 map_write(map, CMD(0xA0), adr);
2571 map_write(map, CMD(0x00), chip->start + adr); 2587 map_write(map, CMD(0x00), adr);
2572 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { 2588 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2573 /* 2589 /*
2574 * Unlocking of one specific sector is not supported, so we 2590 * Unlocking of one specific sector is not supported, so we
@@ -2606,7 +2622,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2606 map_write(map, CMD(0x00), chip->start); 2622 map_write(map, CMD(0x00), chip->start);
2607 2623
2608 chip->state = FL_READY; 2624 chip->state = FL_READY;
2609 put_chip(map, chip, adr + chip->start); 2625 put_chip(map, chip, adr);
2610 mutex_unlock(&chip->mutex); 2626 mutex_unlock(&chip->mutex);
2611 2627
2612 return ret; 2628 return ret;
@@ -2663,9 +2679,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2663 * sectors shall be unlocked, so lets keep their locking 2679 * sectors shall be unlocked, so lets keep their locking
2664 * status at "unlocked" (locked=0) for the final re-locking. 2680 * status at "unlocked" (locked=0) for the final re-locking.
2665 */ 2681 */
2666 if ((adr < ofs) || (adr >= (ofs + len))) { 2682 if ((offset < ofs) || (offset >= (ofs + len))) {
2667 sect[sectors].chip = &cfi->chips[chipnum]; 2683 sect[sectors].chip = &cfi->chips[chipnum];
2668 sect[sectors].offset = offset; 2684 sect[sectors].adr = adr;
2669 sect[sectors].locked = do_ppb_xxlock( 2685 sect[sectors].locked = do_ppb_xxlock(
2670 map, &cfi->chips[chipnum], adr, 0, 2686 map, &cfi->chips[chipnum], adr, 0,
2671 DO_XXLOCK_ONEBLOCK_GETLOCK); 2687 DO_XXLOCK_ONEBLOCK_GETLOCK);
@@ -2679,6 +2695,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2679 i++; 2695 i++;
2680 2696
2681 if (adr >> cfi->chipshift) { 2697 if (adr >> cfi->chipshift) {
2698 if (offset >= (ofs + len))
2699 break;
2682 adr = 0; 2700 adr = 0;
2683 chipnum++; 2701 chipnum++;
2684 2702
@@ -2709,7 +2727,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2709 */ 2727 */
2710 for (i = 0; i < sectors; i++) { 2728 for (i = 0; i < sectors; i++) {
2711 if (sect[i].locked) 2729 if (sect[i].locked)
2712 do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, 2730 do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2713 DO_XXLOCK_ONEBLOCK_LOCK); 2731 DO_XXLOCK_ONEBLOCK_LOCK);
2714 } 2732 }
2715 2733
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 7c0b27d132b1..b479bd81120b 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1889,6 +1889,8 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
1889 do { 1889 do {
1890 uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi); 1890 uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
1891 mask = (1 << (cfi->device_type * 8)) - 1; 1891 mask = (1 << (cfi->device_type * 8)) - 1;
1892 if (ofs >= map->size)
1893 return 0;
1892 result = map_read(map, base + ofs); 1894 result = map_read(map, base + ofs);
1893 bank++; 1895 bank++;
1894 } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION); 1896 } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 0455166f05fa..4f206a99164c 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -112,8 +112,8 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
112} 112}
113 113
114 114
115static int ck804xrom_init_one(struct pci_dev *pdev, 115static int __init ck804xrom_init_one(struct pci_dev *pdev,
116 const struct pci_device_id *ent) 116 const struct pci_device_id *ent)
117{ 117{
118 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 118 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
119 u8 byte; 119 u8 byte;
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 76ed651b515b..9646b0766ce0 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -144,8 +144,8 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
144 pci_dev_put(window->pdev); 144 pci_dev_put(window->pdev);
145} 145}
146 146
147static int esb2rom_init_one(struct pci_dev *pdev, 147static int __init esb2rom_init_one(struct pci_dev *pdev,
148 const struct pci_device_id *ent) 148 const struct pci_device_id *ent)
149{ 149{
150 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 150 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
151 struct esb2rom_window *window = &esb2rom_window; 151 struct esb2rom_window *window = &esb2rom_window;
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 8636bba42200..976d42f63aef 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -57,10 +57,12 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
57{ 57{
58 struct ichxrom_map_info *map, *scratch; 58 struct ichxrom_map_info *map, *scratch;
59 u16 word; 59 u16 word;
60 int ret;
60 61
61 /* Disable writes through the rom window */ 62 /* Disable writes through the rom window */
62 pci_read_config_word(window->pdev, BIOS_CNTL, &word); 63 ret = pci_read_config_word(window->pdev, BIOS_CNTL, &word);
63 pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1); 64 if (!ret)
65 pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
64 pci_dev_put(window->pdev); 66 pci_dev_put(window->pdev);
65 67
66 /* Free all of the mtd devices */ 68 /* Free all of the mtd devices */
@@ -84,8 +86,8 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
84} 86}
85 87
86 88
87static int ichxrom_init_one(struct pci_dev *pdev, 89static int __init ichxrom_init_one(struct pci_dev *pdev,
88 const struct pci_device_id *ent) 90 const struct pci_device_id *ent)
89{ 91{
90 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 92 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
91 struct ichxrom_window *window = &ichxrom_window; 93 struct ichxrom_window *window = &ichxrom_window;
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 4a07ba1195b5..d125d19a35e4 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1922,16 +1922,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
1922 tmp &= ~ACC_CONTROL_PARTIAL_PAGE; 1922 tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
1923 tmp &= ~ACC_CONTROL_RD_ERASED; 1923 tmp &= ~ACC_CONTROL_RD_ERASED;
1924 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; 1924 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
1925 if (ctrl->features & BRCMNAND_HAS_PREFETCH) { 1925 if (ctrl->features & BRCMNAND_HAS_PREFETCH)
1926 /* 1926 tmp &= ~ACC_CONTROL_PREFETCH;
1927 * FIXME: Flash DMA + prefetch may see spurious erased-page ECC 1927
1928 * errors
1929 */
1930 if (has_flash_dma(ctrl))
1931 tmp &= ~ACC_CONTROL_PREFETCH;
1932 else
1933 tmp |= ACC_CONTROL_PREFETCH;
1934 }
1935 nand_writereg(ctrl, offs, tmp); 1928 nand_writereg(ctrl, offs, tmp);
1936 1929
1937 return 0; 1930 return 0;
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index de31514df282..d38527e0a2f2 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -119,3 +119,7 @@ static struct pci_driver denali_pci_driver = {
119}; 119};
120 120
121module_pci_driver(denali_pci_driver); 121module_pci_driver(denali_pci_driver);
122
123MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
124MODULE_AUTHOR("Intel Corporation and its suppliers");
125MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 7f4ac8c19001..2c0bbaed3609 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -449,9 +449,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
449 449
450 case NAND_CMD_READID: 450 case NAND_CMD_READID:
451 case NAND_CMD_PARAM: { 451 case NAND_CMD_PARAM: {
452 /*
453 * For READID, read 8 bytes that are currently used.
454 * For PARAM, read all 3 copies of 256-bytes pages.
455 */
456 int len = 8;
452 int timing = IFC_FIR_OP_RB; 457 int timing = IFC_FIR_OP_RB;
453 if (command == NAND_CMD_PARAM) 458 if (command == NAND_CMD_PARAM) {
454 timing = IFC_FIR_OP_RBCD; 459 timing = IFC_FIR_OP_RBCD;
460 len = 256 * 3;
461 }
455 462
456 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 463 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
457 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 464 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
@@ -461,12 +468,8 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
461 &ifc->ifc_nand.nand_fcr0); 468 &ifc->ifc_nand.nand_fcr0);
462 ifc_out32(column, &ifc->ifc_nand.row3); 469 ifc_out32(column, &ifc->ifc_nand.row3);
463 470
464 /* 471 ifc_out32(len, &ifc->ifc_nand.nand_fbcr);
465 * although currently it's 8 bytes for READID, we always read 472 ifc_nand_ctrl->read_bytes = len;
466 * the maximum 256 bytes(for PARAM)
467 */
468 ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
469 ifc_nand_ctrl->read_bytes = 256;
470 473
471 set_addr(mtd, 0, 0, 0); 474 set_addr(mtd, 0, 0, 0);
472 fsl_ifc_run_command(mtd); 475 fsl_ifc_run_command(mtd);
@@ -726,6 +729,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
726 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 729 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
727 struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 730 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
728 u32 nand_fsr; 731 u32 nand_fsr;
732 int status;
729 733
730 /* Use READ_STATUS command, but wait for the device to be ready */ 734 /* Use READ_STATUS command, but wait for the device to be ready */
731 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 735 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
@@ -740,12 +744,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
740 fsl_ifc_run_command(mtd); 744 fsl_ifc_run_command(mtd);
741 745
742 nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); 746 nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
743 747 status = nand_fsr >> 24;
744 /* 748 /*
745 * The chip always seems to report that it is 749 * The chip always seems to report that it is
746 * write-protected, even when it is not. 750 * write-protected, even when it is not.
747 */ 751 */
748 return nand_fsr | NAND_STATUS_WP; 752 return status | NAND_STATUS_WP;
749} 753}
750 754
751static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip, 755static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 2064adac1d17..40a335c6b792 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1029,24 +1029,97 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1029 return ret; 1029 return ret;
1030 } 1030 }
1031 1031
1032 /* handle the block mark swapping */
1033 block_mark_swapping(this, payload_virt, auxiliary_virt);
1034
1035 /* Loop over status bytes, accumulating ECC status. */ 1032 /* Loop over status bytes, accumulating ECC status. */
1036 status = auxiliary_virt + nfc_geo->auxiliary_status_offset; 1033 status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
1037 1034
1035 read_page_swap_end(this, buf, nfc_geo->payload_size,
1036 this->payload_virt, this->payload_phys,
1037 nfc_geo->payload_size,
1038 payload_virt, payload_phys);
1039
1038 for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { 1040 for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
1039 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) 1041 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1040 continue; 1042 continue;
1041 1043
1042 if (*status == STATUS_UNCORRECTABLE) { 1044 if (*status == STATUS_UNCORRECTABLE) {
1045 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1046 u8 *eccbuf = this->raw_buffer;
1047 int offset, bitoffset;
1048 int eccbytes;
1049 int flips;
1050
1051 /* Read ECC bytes into our internal raw_buffer */
1052 offset = nfc_geo->metadata_size * 8;
1053 offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
1054 offset -= eccbits;
1055 bitoffset = offset % 8;
1056 eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1057 offset /= 8;
1058 eccbytes -= offset;
1059 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
1060 chip->read_buf(mtd, eccbuf, eccbytes);
1061
1062 /*
1063 * ECC data are not byte aligned and we may have
1064 * in-band data in the first and last byte of
1065 * eccbuf. Set non-eccbits to one so that
1066 * nand_check_erased_ecc_chunk() does not count them
1067 * as bitflips.
1068 */
1069 if (bitoffset)
1070 eccbuf[0] |= GENMASK(bitoffset - 1, 0);
1071
1072 bitoffset = (bitoffset + eccbits) % 8;
1073 if (bitoffset)
1074 eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
1075
1076 /*
1077 * The ECC hardware has an uncorrectable ECC status
1078 * code in case we have bitflips in an erased page. As
1079 * nothing was written into this subpage the ECC is
1080 * obviously wrong and we can not trust it. We assume
1081 * at this point that we are reading an erased page and
1082 * try to correct the bitflips in buffer up to
1083 * ecc_strength bitflips. If this is a page with random
1084 * data, we exceed this number of bitflips and have a
1085 * ECC failure. Otherwise we use the corrected buffer.
1086 */
1087 if (i == 0) {
1088 /* The first block includes metadata */
1089 flips = nand_check_erased_ecc_chunk(
1090 buf + i * nfc_geo->ecc_chunk_size,
1091 nfc_geo->ecc_chunk_size,
1092 eccbuf, eccbytes,
1093 auxiliary_virt,
1094 nfc_geo->metadata_size,
1095 nfc_geo->ecc_strength);
1096 } else {
1097 flips = nand_check_erased_ecc_chunk(
1098 buf + i * nfc_geo->ecc_chunk_size,
1099 nfc_geo->ecc_chunk_size,
1100 eccbuf, eccbytes,
1101 NULL, 0,
1102 nfc_geo->ecc_strength);
1103 }
1104
1105 if (flips > 0) {
1106 max_bitflips = max_t(unsigned int, max_bitflips,
1107 flips);
1108 mtd->ecc_stats.corrected += flips;
1109 continue;
1110 }
1111
1043 mtd->ecc_stats.failed++; 1112 mtd->ecc_stats.failed++;
1044 continue; 1113 continue;
1045 } 1114 }
1115
1046 mtd->ecc_stats.corrected += *status; 1116 mtd->ecc_stats.corrected += *status;
1047 max_bitflips = max_t(unsigned int, max_bitflips, *status); 1117 max_bitflips = max_t(unsigned int, max_bitflips, *status);
1048 } 1118 }
1049 1119
1120 /* handle the block mark swapping */
1121 block_mark_swapping(this, buf, auxiliary_virt);
1122
1050 if (oob_required) { 1123 if (oob_required) {
1051 /* 1124 /*
1052 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() 1125 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
@@ -1062,11 +1135,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1062 chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0]; 1135 chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
1063 } 1136 }
1064 1137
1065 read_page_swap_end(this, buf, nfc_geo->payload_size,
1066 this->payload_virt, this->payload_phys,
1067 nfc_geo->payload_size,
1068 payload_virt, payload_phys);
1069
1070 return max_bitflips; 1138 return max_bitflips;
1071} 1139}
1072 1140
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index e584d910f945..c37611127ca3 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -49,7 +49,7 @@
49#define NFC_V1_V2_CONFIG (host->regs + 0x0a) 49#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
50#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c) 50#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
51#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e) 51#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
52#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10) 52#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
53#define NFC_V1_V2_WRPROT (host->regs + 0x12) 53#define NFC_V1_V2_WRPROT (host->regs + 0x12)
54#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14) 54#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
55#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16) 55#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
@@ -1034,6 +1034,9 @@ static void preset_v2(struct mtd_info *mtd)
1034 writew(config1, NFC_V1_V2_CONFIG1); 1034 writew(config1, NFC_V1_V2_CONFIG1);
1035 /* preset operation */ 1035 /* preset operation */
1036 1036
1037 /* spare area size in 16-bit half-words */
1038 writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
1039
1037 /* Unlock the internal RAM Buffer */ 1040 /* Unlock the internal RAM Buffer */
1038 writew(0x2, NFC_V1_V2_CONFIG); 1041 writew(0x2, NFC_V1_V2_CONFIG);
1039 1042
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index e561fbcb93b3..d2639ea121f4 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -626,7 +626,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
626 chip->cmd_ctrl(mtd, readcmd, ctrl); 626 chip->cmd_ctrl(mtd, readcmd, ctrl);
627 ctrl &= ~NAND_CTRL_CHANGE; 627 ctrl &= ~NAND_CTRL_CHANGE;
628 } 628 }
629 chip->cmd_ctrl(mtd, command, ctrl); 629 if (command != NAND_CMD_NONE)
630 chip->cmd_ctrl(mtd, command, ctrl);
630 631
631 /* Address cycle, when necessary */ 632 /* Address cycle, when necessary */
632 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; 633 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
@@ -655,6 +656,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
655 */ 656 */
656 switch (command) { 657 switch (command) {
657 658
659 case NAND_CMD_NONE:
658 case NAND_CMD_PAGEPROG: 660 case NAND_CMD_PAGEPROG:
659 case NAND_CMD_ERASE1: 661 case NAND_CMD_ERASE1:
660 case NAND_CMD_ERASE2: 662 case NAND_CMD_ERASE2:
@@ -717,7 +719,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
717 } 719 }
718 720
719 /* Command latch cycle */ 721 /* Command latch cycle */
720 chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); 722 if (command != NAND_CMD_NONE)
723 chip->cmd_ctrl(mtd, command,
724 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
721 725
722 if (column != -1 || page_addr != -1) { 726 if (column != -1 || page_addr != -1) {
723 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE; 727 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -750,6 +754,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
750 */ 754 */
751 switch (command) { 755 switch (command) {
752 756
757 case NAND_CMD_NONE:
753 case NAND_CMD_CACHEDPROG: 758 case NAND_CMD_CACHEDPROG:
754 case NAND_CMD_PAGEPROG: 759 case NAND_CMD_PAGEPROG:
755 case NAND_CMD_ERASE1: 760 case NAND_CMD_ERASE1:
@@ -2062,6 +2067,7 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
2062static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, 2067static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2063 struct mtd_oob_ops *ops) 2068 struct mtd_oob_ops *ops)
2064{ 2069{
2070 unsigned int max_bitflips = 0;
2065 int page, realpage, chipnr; 2071 int page, realpage, chipnr;
2066 struct nand_chip *chip = mtd->priv; 2072 struct nand_chip *chip = mtd->priv;
2067 struct mtd_ecc_stats stats; 2073 struct mtd_ecc_stats stats;
@@ -2122,6 +2128,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2122 nand_wait_ready(mtd); 2128 nand_wait_ready(mtd);
2123 } 2129 }
2124 2130
2131 max_bitflips = max_t(unsigned int, max_bitflips, ret);
2132
2125 readlen -= len; 2133 readlen -= len;
2126 if (!readlen) 2134 if (!readlen)
2127 break; 2135 break;
@@ -2147,7 +2155,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2147 if (mtd->ecc_stats.failed - stats.failed) 2155 if (mtd->ecc_stats.failed - stats.failed)
2148 return -EBADMSG; 2156 return -EBADMSG;
2149 2157
2150 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 2158 return max_bitflips;
2151} 2159}
2152 2160
2153/** 2161/**
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index bcba1a924c75..1f2785ee909f 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -160,7 +160,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
160 160
161 memset(&cfg, 0, sizeof(cfg)); 161 memset(&cfg, 0, sizeof(cfg));
162 cfg.direction = DMA_MEM_TO_DEV; 162 cfg.direction = DMA_MEM_TO_DEV;
163 cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl); 163 cfg.dst_addr = flctl->fifo;
164 cfg.src_addr = 0; 164 cfg.src_addr = 0;
165 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg); 165 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
166 if (ret < 0) 166 if (ret < 0)
@@ -176,7 +176,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
176 176
177 cfg.direction = DMA_DEV_TO_MEM; 177 cfg.direction = DMA_DEV_TO_MEM;
178 cfg.dst_addr = 0; 178 cfg.dst_addr = 0;
179 cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl); 179 cfg.src_addr = flctl->fifo;
180 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg); 180 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
181 if (ret < 0) 181 if (ret < 0)
182 goto err; 182 goto err;
@@ -1096,6 +1096,7 @@ static int flctl_probe(struct platform_device *pdev)
1096 flctl->reg = devm_ioremap_resource(&pdev->dev, res); 1096 flctl->reg = devm_ioremap_resource(&pdev->dev, res);
1097 if (IS_ERR(flctl->reg)) 1097 if (IS_ERR(flctl->reg))
1098 return PTR_ERR(flctl->reg); 1098 return PTR_ERR(flctl->reg);
1099 flctl->fifo = res->start + 0x24; /* FLDTFIFO */
1099 1100
1100 irq = platform_get_irq(pdev, 0); 1101 irq = platform_get_irq(pdev, 0);
1101 if (irq < 0) { 1102 if (irq < 0) {
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 824711845c44..3bb9b34d9e77 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -1046,8 +1046,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
1046 1046
1047 /* Add ECC info retrieval from DT */ 1047 /* Add ECC info retrieval from DT */
1048 for (i = 0; i < ARRAY_SIZE(strengths); i++) { 1048 for (i = 0; i < ARRAY_SIZE(strengths); i++) {
1049 if (ecc->strength <= strengths[i]) 1049 if (ecc->strength <= strengths[i]) {
1050 /*
1051 * Update ecc->strength value with the actual strength
1052 * that will be used by the ECC engine.
1053 */
1054 ecc->strength = strengths[i];
1050 break; 1055 break;
1056 }
1051 } 1057 }
1052 1058
1053 if (i >= ARRAY_SIZE(strengths)) { 1059 if (i >= ARRAY_SIZE(strengths)) {
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index c1aaf0336cf2..5cde3ad1665e 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -175,6 +175,40 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
175} 175}
176 176
177/** 177/**
178 * add_fastmap - add a Fastmap related physical eraseblock.
179 * @ai: attaching information
180 * @pnum: physical eraseblock number the VID header came from
181 * @vid_hdr: the volume identifier header
182 * @ec: erase counter of the physical eraseblock
183 *
184 * This function allocates a 'struct ubi_ainf_peb' object for a Fastamp
185 * physical eraseblock @pnum and adds it to the 'fastmap' list.
186 * Such blocks can be Fastmap super and data blocks from both the most
187 * recent Fastmap we're attaching from or from old Fastmaps which will
188 * be erased.
189 */
190static int add_fastmap(struct ubi_attach_info *ai, int pnum,
191 struct ubi_vid_hdr *vid_hdr, int ec)
192{
193 struct ubi_ainf_peb *aeb;
194
195 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
196 if (!aeb)
197 return -ENOMEM;
198
199 aeb->pnum = pnum;
200 aeb->vol_id = be32_to_cpu(vidh->vol_id);
201 aeb->sqnum = be64_to_cpu(vidh->sqnum);
202 aeb->ec = ec;
203 list_add(&aeb->u.list, &ai->fastmap);
204
205 dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum,
206 aeb->vol_id, aeb->sqnum);
207
208 return 0;
209}
210
211/**
178 * validate_vid_hdr - check volume identifier header. 212 * validate_vid_hdr - check volume identifier header.
179 * @ubi: UBI device description object 213 * @ubi: UBI device description object
180 * @vid_hdr: the volume identifier header to check 214 * @vid_hdr: the volume identifier header to check
@@ -803,13 +837,26 @@ out_unlock:
803 return err; 837 return err;
804} 838}
805 839
840static bool vol_ignored(int vol_id)
841{
842 switch (vol_id) {
843 case UBI_LAYOUT_VOLUME_ID:
844 return true;
845 }
846
847#ifdef CONFIG_MTD_UBI_FASTMAP
848 return ubi_is_fm_vol(vol_id);
849#else
850 return false;
851#endif
852}
853
806/** 854/**
807 * scan_peb - scan and process UBI headers of a PEB. 855 * scan_peb - scan and process UBI headers of a PEB.
808 * @ubi: UBI device description object 856 * @ubi: UBI device description object
809 * @ai: attaching information 857 * @ai: attaching information
810 * @pnum: the physical eraseblock number 858 * @pnum: the physical eraseblock number
811 * @vid: The volume ID of the found volume will be stored in this pointer 859 * @fast: true if we're scanning for a Fastmap
812 * @sqnum: The sqnum of the found volume will be stored in this pointer
813 * 860 *
814 * This function reads UBI headers of PEB @pnum, checks them, and adds 861 * This function reads UBI headers of PEB @pnum, checks them, and adds
815 * information about this PEB to the corresponding list or RB-tree in the 862 * information about this PEB to the corresponding list or RB-tree in the
@@ -817,9 +864,9 @@ out_unlock:
817 * successfully handled and a negative error code in case of failure. 864 * successfully handled and a negative error code in case of failure.
818 */ 865 */
819static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, 866static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
820 int pnum, int *vid, unsigned long long *sqnum) 867 int pnum, bool fast)
821{ 868{
822 long long uninitialized_var(ec); 869 long long ec;
823 int err, bitflips = 0, vol_id = -1, ec_err = 0; 870 int err, bitflips = 0, vol_id = -1, ec_err = 0;
824 871
825 dbg_bld("scan PEB %d", pnum); 872 dbg_bld("scan PEB %d", pnum);
@@ -935,6 +982,20 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
935 */ 982 */
936 ai->maybe_bad_peb_count += 1; 983 ai->maybe_bad_peb_count += 1;
937 case UBI_IO_BAD_HDR: 984 case UBI_IO_BAD_HDR:
985 /*
986 * If we're facing a bad VID header we have to drop *all*
987 * Fastmap data structures we find. The most recent Fastmap
988 * could be bad and therefore there is a chance that we attach
989 * from an old one. On a fine MTD stack a PEB must not render
990 * bad all of a sudden, but the reality is different.
991 * So, let's be paranoid and help finding the root cause by
992 * falling back to scanning mode instead of attaching with a
993 * bad EBA table and cause data corruption which is hard to
994 * analyze.
995 */
996 if (fast)
997 ai->force_full_scan = 1;
998
938 if (ec_err) 999 if (ec_err)
939 /* 1000 /*
940 * Both headers are corrupted. There is a possibility 1001 * Both headers are corrupted. There is a possibility
@@ -991,21 +1052,15 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
991 } 1052 }
992 1053
993 vol_id = be32_to_cpu(vidh->vol_id); 1054 vol_id = be32_to_cpu(vidh->vol_id);
994 if (vid) 1055 if (vol_id > UBI_MAX_VOLUMES && !vol_ignored(vol_id)) {
995 *vid = vol_id;
996 if (sqnum)
997 *sqnum = be64_to_cpu(vidh->sqnum);
998 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
999 int lnum = be32_to_cpu(vidh->lnum); 1056 int lnum = be32_to_cpu(vidh->lnum);
1000 1057
1001 /* Unsupported internal volume */ 1058 /* Unsupported internal volume */
1002 switch (vidh->compat) { 1059 switch (vidh->compat) {
1003 case UBI_COMPAT_DELETE: 1060 case UBI_COMPAT_DELETE:
1004 if (vol_id != UBI_FM_SB_VOLUME_ID 1061 ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
1005 && vol_id != UBI_FM_DATA_VOLUME_ID) { 1062 vol_id, lnum);
1006 ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it", 1063
1007 vol_id, lnum);
1008 }
1009 err = add_to_list(ai, pnum, vol_id, lnum, 1064 err = add_to_list(ai, pnum, vol_id, lnum,
1010 ec, 1, &ai->erase); 1065 ec, 1, &ai->erase);
1011 if (err) 1066 if (err)
@@ -1037,7 +1092,12 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
1037 if (ec_err) 1092 if (ec_err)
1038 ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d", 1093 ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
1039 pnum); 1094 pnum);
1040 err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips); 1095
1096 if (ubi_is_fm_vol(vol_id))
1097 err = add_fastmap(ai, pnum, vidh, ec);
1098 else
1099 err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
1100
1041 if (err) 1101 if (err)
1042 return err; 1102 return err;
1043 1103
@@ -1186,6 +1246,10 @@ static void destroy_ai(struct ubi_attach_info *ai)
1186 list_del(&aeb->u.list); 1246 list_del(&aeb->u.list);
1187 kmem_cache_free(ai->aeb_slab_cache, aeb); 1247 kmem_cache_free(ai->aeb_slab_cache, aeb);
1188 } 1248 }
1249 list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) {
1250 list_del(&aeb->u.list);
1251 kmem_cache_free(ai->aeb_slab_cache, aeb);
1252 }
1189 1253
1190 /* Destroy the volume RB-tree */ 1254 /* Destroy the volume RB-tree */
1191 rb = ai->volumes.rb_node; 1255 rb = ai->volumes.rb_node;
@@ -1245,7 +1309,7 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
1245 cond_resched(); 1309 cond_resched();
1246 1310
1247 dbg_gen("process PEB %d", pnum); 1311 dbg_gen("process PEB %d", pnum);
1248 err = scan_peb(ubi, ai, pnum, NULL, NULL); 1312 err = scan_peb(ubi, ai, pnum, false);
1249 if (err < 0) 1313 if (err < 0)
1250 goto out_vidh; 1314 goto out_vidh;
1251 } 1315 }
@@ -1311,6 +1375,7 @@ static struct ubi_attach_info *alloc_ai(void)
1311 INIT_LIST_HEAD(&ai->free); 1375 INIT_LIST_HEAD(&ai->free);
1312 INIT_LIST_HEAD(&ai->erase); 1376 INIT_LIST_HEAD(&ai->erase);
1313 INIT_LIST_HEAD(&ai->alien); 1377 INIT_LIST_HEAD(&ai->alien);
1378 INIT_LIST_HEAD(&ai->fastmap);
1314 ai->volumes = RB_ROOT; 1379 ai->volumes = RB_ROOT;
1315 ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache", 1380 ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
1316 sizeof(struct ubi_ainf_peb), 1381 sizeof(struct ubi_ainf_peb),
@@ -1337,52 +1402,58 @@ static struct ubi_attach_info *alloc_ai(void)
1337 */ 1402 */
1338static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai) 1403static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
1339{ 1404{
1340 int err, pnum, fm_anchor = -1; 1405 int err, pnum;
1341 unsigned long long max_sqnum = 0; 1406 struct ubi_attach_info *scan_ai;
1342 1407
1343 err = -ENOMEM; 1408 err = -ENOMEM;
1344 1409
1410 scan_ai = alloc_ai();
1411 if (!scan_ai)
1412 goto out;
1413
1345 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 1414 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1346 if (!ech) 1415 if (!ech)
1347 goto out; 1416 goto out_ai;
1348 1417
1349 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 1418 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1350 if (!vidh) 1419 if (!vidh)
1351 goto out_ech; 1420 goto out_ech;
1352 1421
1353 for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) { 1422 for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
1354 int vol_id = -1;
1355 unsigned long long sqnum = -1;
1356 cond_resched(); 1423 cond_resched();
1357 1424
1358 dbg_gen("process PEB %d", pnum); 1425 dbg_gen("process PEB %d", pnum);
1359 err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum); 1426 err = scan_peb(ubi, scan_ai, pnum, true);
1360 if (err < 0) 1427 if (err < 0)
1361 goto out_vidh; 1428 goto out_vidh;
1362
1363 if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
1364 max_sqnum = sqnum;
1365 fm_anchor = pnum;
1366 }
1367 } 1429 }
1368 1430
1369 ubi_free_vid_hdr(ubi, vidh); 1431 ubi_free_vid_hdr(ubi, vidh);
1370 kfree(ech); 1432 kfree(ech);
1371 1433
1372 if (fm_anchor < 0) 1434 if (scan_ai->force_full_scan)
1373 return UBI_NO_FASTMAP; 1435 err = UBI_NO_FASTMAP;
1436 else
1437 err = ubi_scan_fastmap(ubi, *ai, scan_ai);
1374 1438
1375 destroy_ai(*ai); 1439 if (err) {
1376 *ai = alloc_ai(); 1440 /*
1377 if (!*ai) 1441 * Didn't attach via fastmap, do a full scan but reuse what
1378 return -ENOMEM; 1442 * we've aready scanned.
1443 */
1444 destroy_ai(*ai);
1445 *ai = scan_ai;
1446 } else
1447 destroy_ai(scan_ai);
1379 1448
1380 return ubi_scan_fastmap(ubi, *ai, fm_anchor); 1449 return err;
1381 1450
1382out_vidh: 1451out_vidh:
1383 ubi_free_vid_hdr(ubi, vidh); 1452 ubi_free_vid_hdr(ubi, vidh);
1384out_ech: 1453out_ech:
1385 kfree(ech); 1454 kfree(ech);
1455out_ai:
1456 destroy_ai(scan_ai);
1386out: 1457out:
1387 return err; 1458 return err;
1388} 1459}
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index ebf46ad2d513..07ad86759d92 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -99,6 +99,8 @@ struct ubiblock {
99 99
100/* Linked list of all ubiblock instances */ 100/* Linked list of all ubiblock instances */
101static LIST_HEAD(ubiblock_devices); 101static LIST_HEAD(ubiblock_devices);
102static DEFINE_IDR(ubiblock_minor_idr);
103/* Protects ubiblock_devices and ubiblock_minor_idr */
102static DEFINE_MUTEX(devices_mutex); 104static DEFINE_MUTEX(devices_mutex);
103static int ubiblock_major; 105static int ubiblock_major;
104 106
@@ -242,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
242 * in any case. 244 * in any case.
243 */ 245 */
244 if (mode & FMODE_WRITE) { 246 if (mode & FMODE_WRITE) {
245 ret = -EPERM; 247 ret = -EROFS;
246 goto out_unlock; 248 goto out_unlock;
247 } 249 }
248 250
@@ -354,8 +356,6 @@ static struct blk_mq_ops ubiblock_mq_ops = {
354 .map_queue = blk_mq_map_queue, 356 .map_queue = blk_mq_map_queue,
355}; 357};
356 358
357static DEFINE_IDR(ubiblock_minor_idr);
358
359int ubiblock_create(struct ubi_volume_info *vi) 359int ubiblock_create(struct ubi_volume_info *vi)
360{ 360{
361 struct ubiblock *dev; 361 struct ubiblock *dev;
@@ -368,14 +368,15 @@ int ubiblock_create(struct ubi_volume_info *vi)
368 /* Check that the volume isn't already handled */ 368 /* Check that the volume isn't already handled */
369 mutex_lock(&devices_mutex); 369 mutex_lock(&devices_mutex);
370 if (find_dev_nolock(vi->ubi_num, vi->vol_id)) { 370 if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
371 mutex_unlock(&devices_mutex); 371 ret = -EEXIST;
372 return -EEXIST; 372 goto out_unlock;
373 } 373 }
374 mutex_unlock(&devices_mutex);
375 374
376 dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL); 375 dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
377 if (!dev) 376 if (!dev) {
378 return -ENOMEM; 377 ret = -ENOMEM;
378 goto out_unlock;
379 }
379 380
380 mutex_init(&dev->dev_mutex); 381 mutex_init(&dev->dev_mutex);
381 382
@@ -440,14 +441,13 @@ int ubiblock_create(struct ubi_volume_info *vi)
440 goto out_free_queue; 441 goto out_free_queue;
441 } 442 }
442 443
443 mutex_lock(&devices_mutex);
444 list_add_tail(&dev->list, &ubiblock_devices); 444 list_add_tail(&dev->list, &ubiblock_devices);
445 mutex_unlock(&devices_mutex);
446 445
447 /* Must be the last step: anyone can call file ops from now on */ 446 /* Must be the last step: anyone can call file ops from now on */
448 add_disk(dev->gd); 447 add_disk(dev->gd);
449 dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", 448 dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
450 dev->ubi_num, dev->vol_id, vi->name); 449 dev->ubi_num, dev->vol_id, vi->name);
450 mutex_unlock(&devices_mutex);
451 return 0; 451 return 0;
452 452
453out_free_queue: 453out_free_queue:
@@ -460,6 +460,8 @@ out_put_disk:
460 put_disk(dev->gd); 460 put_disk(dev->gd);
461out_free_dev: 461out_free_dev:
462 kfree(dev); 462 kfree(dev);
463out_unlock:
464 mutex_unlock(&devices_mutex);
463 465
464 return ret; 466 return ret;
465} 467}
@@ -481,30 +483,36 @@ static void ubiblock_cleanup(struct ubiblock *dev)
481int ubiblock_remove(struct ubi_volume_info *vi) 483int ubiblock_remove(struct ubi_volume_info *vi)
482{ 484{
483 struct ubiblock *dev; 485 struct ubiblock *dev;
486 int ret;
484 487
485 mutex_lock(&devices_mutex); 488 mutex_lock(&devices_mutex);
486 dev = find_dev_nolock(vi->ubi_num, vi->vol_id); 489 dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
487 if (!dev) { 490 if (!dev) {
488 mutex_unlock(&devices_mutex); 491 ret = -ENODEV;
489 return -ENODEV; 492 goto out_unlock;
490 } 493 }
491 494
492 /* Found a device, let's lock it so we can check if it's busy */ 495 /* Found a device, let's lock it so we can check if it's busy */
493 mutex_lock(&dev->dev_mutex); 496 mutex_lock(&dev->dev_mutex);
494 if (dev->refcnt > 0) { 497 if (dev->refcnt > 0) {
495 mutex_unlock(&dev->dev_mutex); 498 ret = -EBUSY;
496 mutex_unlock(&devices_mutex); 499 goto out_unlock_dev;
497 return -EBUSY;
498 } 500 }
499 501
500 /* Remove from device list */ 502 /* Remove from device list */
501 list_del(&dev->list); 503 list_del(&dev->list);
502 mutex_unlock(&devices_mutex);
503
504 ubiblock_cleanup(dev); 504 ubiblock_cleanup(dev);
505 mutex_unlock(&dev->dev_mutex); 505 mutex_unlock(&dev->dev_mutex);
506 mutex_unlock(&devices_mutex);
507
506 kfree(dev); 508 kfree(dev);
507 return 0; 509 return 0;
510
511out_unlock_dev:
512 mutex_unlock(&dev->dev_mutex);
513out_unlock:
514 mutex_unlock(&devices_mutex);
515 return ret;
508} 516}
509 517
510static int ubiblock_resize(struct ubi_volume_info *vi) 518static int ubiblock_resize(struct ubi_volume_info *vi)
@@ -633,6 +641,7 @@ static void ubiblock_remove_all(void)
633 struct ubiblock *next; 641 struct ubiblock *next;
634 struct ubiblock *dev; 642 struct ubiblock *dev;
635 643
644 mutex_lock(&devices_mutex);
636 list_for_each_entry_safe(dev, next, &ubiblock_devices, list) { 645 list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
637 /* The module is being forcefully removed */ 646 /* The module is being forcefully removed */
638 WARN_ON(dev->desc); 647 WARN_ON(dev->desc);
@@ -641,6 +650,7 @@ static void ubiblock_remove_all(void)
641 ubiblock_cleanup(dev); 650 ubiblock_cleanup(dev);
642 kfree(dev); 651 kfree(dev);
643 } 652 }
653 mutex_unlock(&devices_mutex);
644} 654}
645 655
646int __init ubiblock_init(void) 656int __init ubiblock_init(void)
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 27de0463226e..c9f5ae424af7 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -889,6 +889,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
889 return -EINVAL; 889 return -EINVAL;
890 } 890 }
891 891
892 /*
893 * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
894 * MLC NAND is different and needs special care, otherwise UBI or UBIFS
895 * will die soon and you will lose all your data.
896 */
897 if (mtd->type == MTD_MLCNANDFLASH) {
898 pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
899 mtd->index);
900 return -EINVAL;
901 }
902
892 if (ubi_num == UBI_DEV_NUM_AUTO) { 903 if (ubi_num == UBI_DEV_NUM_AUTO) {
893 /* Search for an empty slot in the @ubi_devices array */ 904 /* Search for an empty slot in the @ubi_devices array */
894 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) 905 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
@@ -1121,6 +1132,9 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1121 */ 1132 */
1122 get_device(&ubi->dev); 1133 get_device(&ubi->dev);
1123 1134
1135#ifdef CONFIG_MTD_UBI_FASTMAP
1136 cancel_work_sync(&ubi->fm_work);
1137#endif
1124 ubi_debugfs_exit_dev(ubi); 1138 ubi_debugfs_exit_dev(ubi);
1125 uif_close(ubi); 1139 uif_close(ubi);
1126 1140
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 4dd0391d2942..03cf0553ec1b 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -350,6 +350,82 @@ out_unlock:
350 return err; 350 return err;
351} 351}
352 352
353#ifdef CONFIG_MTD_UBI_FASTMAP
354/**
355 * check_mapping - check and fixup a mapping
356 * @ubi: UBI device description object
357 * @vol: volume description object
358 * @lnum: logical eraseblock number
359 * @pnum: physical eraseblock number
360 *
361 * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
362 * operations, if such an operation is interrupted the mapping still looks
363 * good, but upon first read an ECC is reported to the upper layer.
364 * Normaly during the full-scan at attach time this is fixed, for Fastmap
365 * we have to deal with it while reading.
366 * If the PEB behind a LEB shows this symthom we change the mapping to
367 * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
368 *
369 * Returns 0 on success, negative error code in case of failure.
370 */
371static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
372 int *pnum)
373{
374 int err;
375 struct ubi_vid_hdr *vid_hdr;
376
377 if (!ubi->fast_attach)
378 return 0;
379
380 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
381 if (!vid_hdr)
382 return -ENOMEM;
383
384 err = ubi_io_read_vid_hdr(ubi, *pnum, vid_hdr, 0);
385 if (err > 0 && err != UBI_IO_BITFLIPS) {
386 int torture = 0;
387
388 switch (err) {
389 case UBI_IO_FF:
390 case UBI_IO_FF_BITFLIPS:
391 case UBI_IO_BAD_HDR:
392 case UBI_IO_BAD_HDR_EBADMSG:
393 break;
394 default:
395 ubi_assert(0);
396 }
397
398 if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
399 torture = 1;
400
401 down_read(&ubi->fm_eba_sem);
402 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
403 up_read(&ubi->fm_eba_sem);
404 ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
405
406 *pnum = UBI_LEB_UNMAPPED;
407 } else if (err < 0) {
408 ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
409 *pnum, err);
410
411 goto out_free;
412 }
413
414 err = 0;
415
416out_free:
417 ubi_free_vid_hdr(ubi, vid_hdr);
418
419 return err;
420}
421#else
422static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
423 int *pnum)
424{
425 return 0;
426}
427#endif
428
353/** 429/**
354 * ubi_eba_read_leb - read data. 430 * ubi_eba_read_leb - read data.
355 * @ubi: UBI device description object 431 * @ubi: UBI device description object
@@ -381,7 +457,13 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
381 return err; 457 return err;
382 458
383 pnum = vol->eba_tbl[lnum]; 459 pnum = vol->eba_tbl[lnum];
384 if (pnum < 0) { 460 if (pnum >= 0) {
461 err = check_mapping(ubi, vol, lnum, &pnum);
462 if (err < 0)
463 goto out_unlock;
464 }
465
466 if (pnum == UBI_LEB_UNMAPPED) {
385 /* 467 /*
386 * The logical eraseblock is not mapped, fill the whole buffer 468 * The logical eraseblock is not mapped, fill the whole buffer
387 * with 0xFF bytes. The exception is static volumes for which 469 * with 0xFF bytes. The exception is static volumes for which
@@ -697,6 +779,14 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
697 779
698 pnum = vol->eba_tbl[lnum]; 780 pnum = vol->eba_tbl[lnum];
699 if (pnum >= 0) { 781 if (pnum >= 0) {
782 err = check_mapping(ubi, vol, lnum, &pnum);
783 if (err < 0) {
784 leb_write_unlock(ubi, vol_id, lnum);
785 return err;
786 }
787 }
788
789 if (pnum >= 0) {
700 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", 790 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
701 len, offset, vol_id, lnum, pnum); 791 len, offset, vol_id, lnum, pnum);
702 792
@@ -1088,6 +1178,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1088 struct ubi_volume *vol; 1178 struct ubi_volume *vol;
1089 uint32_t crc; 1179 uint32_t crc;
1090 1180
1181 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
1182
1091 vol_id = be32_to_cpu(vid_hdr->vol_id); 1183 vol_id = be32_to_cpu(vid_hdr->vol_id);
1092 lnum = be32_to_cpu(vid_hdr->lnum); 1184 lnum = be32_to_cpu(vid_hdr->lnum);
1093 1185
@@ -1256,9 +1348,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1256 } 1348 }
1257 1349
1258 ubi_assert(vol->eba_tbl[lnum] == from); 1350 ubi_assert(vol->eba_tbl[lnum] == from);
1259 down_read(&ubi->fm_eba_sem);
1260 vol->eba_tbl[lnum] = to; 1351 vol->eba_tbl[lnum] = to;
1261 up_read(&ubi->fm_eba_sem);
1262 1352
1263out_unlock_buf: 1353out_unlock_buf:
1264 mutex_unlock(&ubi->buf_mutex); 1354 mutex_unlock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 30d3999dddba..69dd21679a30 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -262,6 +262,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
262 struct ubi_fm_pool *pool = &ubi->fm_wl_pool; 262 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
263 int pnum; 263 int pnum;
264 264
265 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
266
265 if (pool->used == pool->size) { 267 if (pool->used == pool->size) {
266 /* We cannot update the fastmap here because this 268 /* We cannot update the fastmap here because this
267 * function is called in atomic context. 269 * function is called in atomic context.
@@ -303,7 +305,7 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
303 305
304 wrk->anchor = 1; 306 wrk->anchor = 1;
305 wrk->func = &wear_leveling_worker; 307 wrk->func = &wear_leveling_worker;
306 schedule_ubi_work(ubi, wrk); 308 __schedule_ubi_work(ubi, wrk);
307 return 0; 309 return 0;
308} 310}
309 311
@@ -344,7 +346,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
344 spin_unlock(&ubi->wl_lock); 346 spin_unlock(&ubi->wl_lock);
345 347
346 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID; 348 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
347 return schedule_erase(ubi, e, vol_id, lnum, torture); 349 return schedule_erase(ubi, e, vol_id, lnum, torture, true);
348} 350}
349 351
350/** 352/**
@@ -360,7 +362,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
360{ 362{
361 int i; 363 int i;
362 364
363 flush_work(&ubi->fm_work);
364 return_unused_pool_pebs(ubi, &ubi->fm_pool); 365 return_unused_pool_pebs(ubi, &ubi->fm_pool);
365 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool); 366 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
366 367
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index bba7dd1b5ebf..72e89b352034 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -326,6 +326,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
326 aeb->pnum = new_aeb->pnum; 326 aeb->pnum = new_aeb->pnum;
327 aeb->copy_flag = new_vh->copy_flag; 327 aeb->copy_flag = new_vh->copy_flag;
328 aeb->scrub = new_aeb->scrub; 328 aeb->scrub = new_aeb->scrub;
329 aeb->sqnum = new_aeb->sqnum;
329 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 330 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
330 331
331 /* new_aeb is older */ 332 /* new_aeb is older */
@@ -851,27 +852,57 @@ fail:
851} 852}
852 853
853/** 854/**
855 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
856 * @ai: UBI attach info to be filled
857 */
858static int find_fm_anchor(struct ubi_attach_info *ai)
859{
860 int ret = -1;
861 struct ubi_ainf_peb *aeb;
862 unsigned long long max_sqnum = 0;
863
864 list_for_each_entry(aeb, &ai->fastmap, u.list) {
865 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
866 max_sqnum = aeb->sqnum;
867 ret = aeb->pnum;
868 }
869 }
870
871 return ret;
872}
873
874/**
854 * ubi_scan_fastmap - scan the fastmap. 875 * ubi_scan_fastmap - scan the fastmap.
855 * @ubi: UBI device object 876 * @ubi: UBI device object
856 * @ai: UBI attach info to be filled 877 * @ai: UBI attach info to be filled
857 * @fm_anchor: The fastmap starts at this PEB 878 * @scan_ai: UBI attach info from the first 64 PEBs,
879 * used to find the most recent Fastmap data structure
858 * 880 *
859 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, 881 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
860 * UBI_BAD_FASTMAP if one was found but is not usable. 882 * UBI_BAD_FASTMAP if one was found but is not usable.
861 * < 0 indicates an internal error. 883 * < 0 indicates an internal error.
862 */ 884 */
863int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, 885int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
864 int fm_anchor) 886 struct ubi_attach_info *scan_ai)
865{ 887{
866 struct ubi_fm_sb *fmsb, *fmsb2; 888 struct ubi_fm_sb *fmsb, *fmsb2;
867 struct ubi_vid_hdr *vh; 889 struct ubi_vid_hdr *vh;
868 struct ubi_ec_hdr *ech; 890 struct ubi_ec_hdr *ech;
869 struct ubi_fastmap_layout *fm; 891 struct ubi_fastmap_layout *fm;
870 int i, used_blocks, pnum, ret = 0; 892 struct ubi_ainf_peb *tmp_aeb, *aeb;
893 int i, used_blocks, pnum, fm_anchor, ret = 0;
871 size_t fm_size; 894 size_t fm_size;
872 __be32 crc, tmp_crc; 895 __be32 crc, tmp_crc;
873 unsigned long long sqnum = 0; 896 unsigned long long sqnum = 0;
874 897
898 fm_anchor = find_fm_anchor(scan_ai);
899 if (fm_anchor < 0)
900 return UBI_NO_FASTMAP;
901
902 /* Move all (possible) fastmap blocks into our new attach structure. */
903 list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list)
904 list_move_tail(&aeb->u.list, &ai->fastmap);
905
875 down_write(&ubi->fm_protect); 906 down_write(&ubi->fm_protect);
876 memset(ubi->fm_buf, 0, ubi->fm_size); 907 memset(ubi->fm_buf, 0, ubi->fm_size);
877 908
@@ -1484,22 +1515,30 @@ int ubi_update_fastmap(struct ubi_device *ubi)
1484 struct ubi_wl_entry *tmp_e; 1515 struct ubi_wl_entry *tmp_e;
1485 1516
1486 down_write(&ubi->fm_protect); 1517 down_write(&ubi->fm_protect);
1518 down_write(&ubi->work_sem);
1519 down_write(&ubi->fm_eba_sem);
1487 1520
1488 ubi_refill_pools(ubi); 1521 ubi_refill_pools(ubi);
1489 1522
1490 if (ubi->ro_mode || ubi->fm_disabled) { 1523 if (ubi->ro_mode || ubi->fm_disabled) {
1524 up_write(&ubi->fm_eba_sem);
1525 up_write(&ubi->work_sem);
1491 up_write(&ubi->fm_protect); 1526 up_write(&ubi->fm_protect);
1492 return 0; 1527 return 0;
1493 } 1528 }
1494 1529
1495 ret = ubi_ensure_anchor_pebs(ubi); 1530 ret = ubi_ensure_anchor_pebs(ubi);
1496 if (ret) { 1531 if (ret) {
1532 up_write(&ubi->fm_eba_sem);
1533 up_write(&ubi->work_sem);
1497 up_write(&ubi->fm_protect); 1534 up_write(&ubi->fm_protect);
1498 return ret; 1535 return ret;
1499 } 1536 }
1500 1537
1501 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); 1538 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1502 if (!new_fm) { 1539 if (!new_fm) {
1540 up_write(&ubi->fm_eba_sem);
1541 up_write(&ubi->work_sem);
1503 up_write(&ubi->fm_protect); 1542 up_write(&ubi->fm_protect);
1504 return -ENOMEM; 1543 return -ENOMEM;
1505 } 1544 }
@@ -1608,16 +1647,14 @@ int ubi_update_fastmap(struct ubi_device *ubi)
1608 new_fm->e[0] = tmp_e; 1647 new_fm->e[0] = tmp_e;
1609 } 1648 }
1610 1649
1611 down_write(&ubi->work_sem);
1612 down_write(&ubi->fm_eba_sem);
1613 ret = ubi_write_fastmap(ubi, new_fm); 1650 ret = ubi_write_fastmap(ubi, new_fm);
1614 up_write(&ubi->fm_eba_sem);
1615 up_write(&ubi->work_sem);
1616 1651
1617 if (ret) 1652 if (ret)
1618 goto err; 1653 goto err;
1619 1654
1620out_unlock: 1655out_unlock:
1656 up_write(&ubi->fm_eba_sem);
1657 up_write(&ubi->work_sem);
1621 up_write(&ubi->fm_protect); 1658 up_write(&ubi->fm_protect);
1622 kfree(old_fm); 1659 kfree(old_fm);
1623 return ret; 1660 return ret;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index de1ea2e4c37d..05d9ec66437c 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -699,6 +699,8 @@ struct ubi_ainf_volume {
699 * @erase: list of physical eraseblocks which have to be erased 699 * @erase: list of physical eraseblocks which have to be erased
700 * @alien: list of physical eraseblocks which should not be used by UBI (e.g., 700 * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
701 * those belonging to "preserve"-compatible internal volumes) 701 * those belonging to "preserve"-compatible internal volumes)
702 * @fastmap: list of physical eraseblocks which relate to fastmap (e.g.,
703 * eraseblocks of the current and not yet erased old fastmap blocks)
702 * @corr_peb_count: count of PEBs in the @corr list 704 * @corr_peb_count: count of PEBs in the @corr list
703 * @empty_peb_count: count of PEBs which are presumably empty (contain only 705 * @empty_peb_count: count of PEBs which are presumably empty (contain only
704 * 0xFF bytes) 706 * 0xFF bytes)
@@ -709,6 +711,8 @@ struct ubi_ainf_volume {
709 * @vols_found: number of volumes found 711 * @vols_found: number of volumes found
710 * @highest_vol_id: highest volume ID 712 * @highest_vol_id: highest volume ID
711 * @is_empty: flag indicating whether the MTD device is empty or not 713 * @is_empty: flag indicating whether the MTD device is empty or not
714 * @force_full_scan: flag indicating whether we need to do a full scan and drop
715 all existing Fastmap data structures
712 * @min_ec: lowest erase counter value 716 * @min_ec: lowest erase counter value
713 * @max_ec: highest erase counter value 717 * @max_ec: highest erase counter value
714 * @max_sqnum: highest sequence number value 718 * @max_sqnum: highest sequence number value
@@ -727,6 +731,7 @@ struct ubi_attach_info {
727 struct list_head free; 731 struct list_head free;
728 struct list_head erase; 732 struct list_head erase;
729 struct list_head alien; 733 struct list_head alien;
734 struct list_head fastmap;
730 int corr_peb_count; 735 int corr_peb_count;
731 int empty_peb_count; 736 int empty_peb_count;
732 int alien_peb_count; 737 int alien_peb_count;
@@ -735,6 +740,7 @@ struct ubi_attach_info {
735 int vols_found; 740 int vols_found;
736 int highest_vol_id; 741 int highest_vol_id;
737 int is_empty; 742 int is_empty;
743 int force_full_scan;
738 int min_ec; 744 int min_ec;
739 int max_ec; 745 int max_ec;
740 unsigned long long max_sqnum; 746 unsigned long long max_sqnum;
@@ -907,7 +913,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
907size_t ubi_calc_fm_size(struct ubi_device *ubi); 913size_t ubi_calc_fm_size(struct ubi_device *ubi);
908int ubi_update_fastmap(struct ubi_device *ubi); 914int ubi_update_fastmap(struct ubi_device *ubi);
909int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, 915int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
910 int fm_anchor); 916 struct ubi_attach_info *scan_ai);
911#else 917#else
912static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; } 918static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
913#endif 919#endif
@@ -1101,4 +1107,42 @@ static inline int idx2vol_id(const struct ubi_device *ubi, int idx)
1101 return idx; 1107 return idx;
1102} 1108}
1103 1109
1110/**
1111 * ubi_is_fm_vol - check whether a volume ID is a Fastmap volume.
1112 * @vol_id: volume ID
1113 */
1114static inline bool ubi_is_fm_vol(int vol_id)
1115{
1116 switch (vol_id) {
1117 case UBI_FM_SB_VOLUME_ID:
1118 case UBI_FM_DATA_VOLUME_ID:
1119 return true;
1120 }
1121
1122 return false;
1123}
1124
1125/**
1126 * ubi_find_fm_block - check whether a PEB is part of the current Fastmap.
1127 * @ubi: UBI device description object
1128 * @pnum: physical eraseblock to look for
1129 *
1130 * This function returns a wear leveling object if @pnum relates to the current
1131 * fastmap, @NULL otherwise.
1132 */
1133static inline struct ubi_wl_entry *ubi_find_fm_block(const struct ubi_device *ubi,
1134 int pnum)
1135{
1136 int i;
1137
1138 if (ubi->fm) {
1139 for (i = 0; i < ubi->fm->used_blocks; i++) {
1140 if (ubi->fm->e[i]->pnum == pnum)
1141 return ubi->fm->e[i];
1142 }
1143 }
1144
1145 return NULL;
1146}
1147
1104#endif /* !__UBI_UBI_H__ */ 1148#endif /* !__UBI_UBI_H__ */
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 3ea4c022cbb9..ccdb3dd74421 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -265,6 +265,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
265 vol->last_eb_bytes = vol->usable_leb_size; 265 vol->last_eb_bytes = vol->usable_leb_size;
266 } 266 }
267 267
268 /* Make volume "available" before it becomes accessible via sysfs */
269 spin_lock(&ubi->volumes_lock);
270 ubi->volumes[vol_id] = vol;
271 ubi->vol_count += 1;
272 spin_unlock(&ubi->volumes_lock);
273
268 /* Register character device for the volume */ 274 /* Register character device for the volume */
269 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 275 cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
270 vol->cdev.owner = THIS_MODULE; 276 vol->cdev.owner = THIS_MODULE;
@@ -304,11 +310,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
304 if (err) 310 if (err)
305 goto out_sysfs; 311 goto out_sysfs;
306 312
307 spin_lock(&ubi->volumes_lock);
308 ubi->volumes[vol_id] = vol;
309 ubi->vol_count += 1;
310 spin_unlock(&ubi->volumes_lock);
311
312 ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED); 313 ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
313 self_check_volumes(ubi); 314 self_check_volumes(ubi);
314 return err; 315 return err;
@@ -328,6 +329,10 @@ out_sysfs:
328out_cdev: 329out_cdev:
329 cdev_del(&vol->cdev); 330 cdev_del(&vol->cdev);
330out_mapping: 331out_mapping:
332 spin_lock(&ubi->volumes_lock);
333 ubi->volumes[vol_id] = NULL;
334 ubi->vol_count -= 1;
335 spin_unlock(&ubi->volumes_lock);
331 if (do_free) 336 if (do_free)
332 kfree(vol->eba_tbl); 337 kfree(vol->eba_tbl);
333out_acc: 338out_acc:
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 75286588b823..b3c1b8106a68 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -580,7 +580,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
580 * failure. 580 * failure.
581 */ 581 */
582static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 582static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
583 int vol_id, int lnum, int torture) 583 int vol_id, int lnum, int torture, bool nested)
584{ 584{
585 struct ubi_work *wl_wrk; 585 struct ubi_work *wl_wrk;
586 586
@@ -599,7 +599,10 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
599 wl_wrk->lnum = lnum; 599 wl_wrk->lnum = lnum;
600 wl_wrk->torture = torture; 600 wl_wrk->torture = torture;
601 601
602 schedule_ubi_work(ubi, wl_wrk); 602 if (nested)
603 __schedule_ubi_work(ubi, wl_wrk);
604 else
605 schedule_ubi_work(ubi, wl_wrk);
603 return 0; 606 return 0;
604} 607}
605 608
@@ -658,6 +661,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
658 if (!vid_hdr) 661 if (!vid_hdr)
659 return -ENOMEM; 662 return -ENOMEM;
660 663
664 down_read(&ubi->fm_eba_sem);
661 mutex_lock(&ubi->move_mutex); 665 mutex_lock(&ubi->move_mutex);
662 spin_lock(&ubi->wl_lock); 666 spin_lock(&ubi->wl_lock);
663 ubi_assert(!ubi->move_from && !ubi->move_to); 667 ubi_assert(!ubi->move_from && !ubi->move_to);
@@ -884,6 +888,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
884 888
885 dbg_wl("done"); 889 dbg_wl("done");
886 mutex_unlock(&ubi->move_mutex); 890 mutex_unlock(&ubi->move_mutex);
891 up_read(&ubi->fm_eba_sem);
887 return 0; 892 return 0;
888 893
889 /* 894 /*
@@ -925,6 +930,7 @@ out_not_moved:
925 } 930 }
926 931
927 mutex_unlock(&ubi->move_mutex); 932 mutex_unlock(&ubi->move_mutex);
933 up_read(&ubi->fm_eba_sem);
928 return 0; 934 return 0;
929 935
930out_error: 936out_error:
@@ -946,6 +952,7 @@ out_error:
946out_ro: 952out_ro:
947 ubi_ro_mode(ubi); 953 ubi_ro_mode(ubi);
948 mutex_unlock(&ubi->move_mutex); 954 mutex_unlock(&ubi->move_mutex);
955 up_read(&ubi->fm_eba_sem);
949 ubi_assert(err != 0); 956 ubi_assert(err != 0);
950 return err < 0 ? err : -EIO; 957 return err < 0 ? err : -EIO;
951 958
@@ -953,6 +960,7 @@ out_cancel:
953 ubi->wl_scheduled = 0; 960 ubi->wl_scheduled = 0;
954 spin_unlock(&ubi->wl_lock); 961 spin_unlock(&ubi->wl_lock);
955 mutex_unlock(&ubi->move_mutex); 962 mutex_unlock(&ubi->move_mutex);
963 up_read(&ubi->fm_eba_sem);
956 ubi_free_vid_hdr(ubi, vid_hdr); 964 ubi_free_vid_hdr(ubi, vid_hdr);
957 return 0; 965 return 0;
958} 966}
@@ -1075,7 +1083,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1075 int err1; 1083 int err1;
1076 1084
1077 /* Re-schedule the LEB for erasure */ 1085 /* Re-schedule the LEB for erasure */
1078 err1 = schedule_erase(ubi, e, vol_id, lnum, 0); 1086 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1079 if (err1) { 1087 if (err1) {
1080 wl_entry_destroy(ubi, e); 1088 wl_entry_destroy(ubi, e);
1081 err = err1; 1089 err = err1;
@@ -1256,7 +1264,7 @@ retry:
1256 } 1264 }
1257 spin_unlock(&ubi->wl_lock); 1265 spin_unlock(&ubi->wl_lock);
1258 1266
1259 err = schedule_erase(ubi, e, vol_id, lnum, torture); 1267 err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1260 if (err) { 1268 if (err) {
1261 spin_lock(&ubi->wl_lock); 1269 spin_lock(&ubi->wl_lock);
1262 wl_tree_add(e, &ubi->used); 1270 wl_tree_add(e, &ubi->used);
@@ -1479,6 +1487,7 @@ int ubi_thread(void *u)
1479 } 1487 }
1480 1488
1481 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); 1489 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1490 ubi->thread_enabled = 0;
1482 return 0; 1491 return 0;
1483} 1492}
1484 1493
@@ -1488,9 +1497,6 @@ int ubi_thread(void *u)
1488 */ 1497 */
1489static void shutdown_work(struct ubi_device *ubi) 1498static void shutdown_work(struct ubi_device *ubi)
1490{ 1499{
1491#ifdef CONFIG_MTD_UBI_FASTMAP
1492 flush_work(&ubi->fm_work);
1493#endif
1494 while (!list_empty(&ubi->works)) { 1500 while (!list_empty(&ubi->works)) {
1495 struct ubi_work *wrk; 1501 struct ubi_work *wrk;
1496 1502
@@ -1503,6 +1509,46 @@ static void shutdown_work(struct ubi_device *ubi)
1503} 1509}
1504 1510
1505/** 1511/**
1512 * erase_aeb - erase a PEB given in UBI attach info PEB
1513 * @ubi: UBI device description object
1514 * @aeb: UBI attach info PEB
1515 * @sync: If true, erase synchronously. Otherwise schedule for erasure
1516 */
1517static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
1518{
1519 struct ubi_wl_entry *e;
1520 int err;
1521
1522 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1523 if (!e)
1524 return -ENOMEM;
1525
1526 e->pnum = aeb->pnum;
1527 e->ec = aeb->ec;
1528 ubi->lookuptbl[e->pnum] = e;
1529
1530 if (sync) {
1531 err = sync_erase(ubi, e, false);
1532 if (err)
1533 goto out_free;
1534
1535 wl_tree_add(e, &ubi->free);
1536 ubi->free_count++;
1537 } else {
1538 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1539 if (err)
1540 goto out_free;
1541 }
1542
1543 return 0;
1544
1545out_free:
1546 wl_entry_destroy(ubi, e);
1547
1548 return err;
1549}
1550
1551/**
1506 * ubi_wl_init - initialize the WL sub-system using attaching information. 1552 * ubi_wl_init - initialize the WL sub-system using attaching information.
1507 * @ubi: UBI device description object 1553 * @ubi: UBI device description object
1508 * @ai: attaching information 1554 * @ai: attaching information
@@ -1539,17 +1585,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1539 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { 1585 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1540 cond_resched(); 1586 cond_resched();
1541 1587
1542 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1588 err = erase_aeb(ubi, aeb, false);
1543 if (!e) 1589 if (err)
1544 goto out_free;
1545
1546 e->pnum = aeb->pnum;
1547 e->ec = aeb->ec;
1548 ubi->lookuptbl[e->pnum] = e;
1549 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1550 wl_entry_destroy(ubi, e);
1551 goto out_free; 1590 goto out_free;
1552 }
1553 1591
1554 found_pebs++; 1592 found_pebs++;
1555 } 1593 }
@@ -1600,19 +1638,49 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1600 } 1638 }
1601 } 1639 }
1602 1640
1603 dbg_wl("found %i PEBs", found_pebs); 1641 list_for_each_entry(aeb, &ai->fastmap, u.list) {
1642 cond_resched();
1604 1643
1605 if (ubi->fm) { 1644 e = ubi_find_fm_block(ubi, aeb->pnum);
1606 ubi_assert(ubi->good_peb_count ==
1607 found_pebs + ubi->fm->used_blocks);
1608 1645
1609 for (i = 0; i < ubi->fm->used_blocks; i++) { 1646 if (e) {
1610 e = ubi->fm->e[i]; 1647 ubi_assert(!ubi->lookuptbl[e->pnum]);
1611 ubi->lookuptbl[e->pnum] = e; 1648 ubi->lookuptbl[e->pnum] = e;
1649 } else {
1650 bool sync = false;
1651
1652 /*
1653 * Usually old Fastmap PEBs are scheduled for erasure
1654 * and we don't have to care about them but if we face
1655 * an power cut before scheduling them we need to
1656 * take care of them here.
1657 */
1658 if (ubi->lookuptbl[aeb->pnum])
1659 continue;
1660
1661 /*
1662 * The fastmap update code might not find a free PEB for
1663 * writing the fastmap anchor to and then reuses the
1664 * current fastmap anchor PEB. When this PEB gets erased
1665 * and a power cut happens before it is written again we
1666 * must make sure that the fastmap attach code doesn't
1667 * find any outdated fastmap anchors, hence we erase the
1668 * outdated fastmap anchor PEBs synchronously here.
1669 */
1670 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
1671 sync = true;
1672
1673 err = erase_aeb(ubi, aeb, sync);
1674 if (err)
1675 goto out_free;
1612 } 1676 }
1677
1678 found_pebs++;
1613 } 1679 }
1614 else 1680
1615 ubi_assert(ubi->good_peb_count == found_pebs); 1681 dbg_wl("found %i PEBs", found_pebs);
1682
1683 ubi_assert(ubi->good_peb_count == found_pebs);
1616 1684
1617 reserved_pebs = WL_RESERVED_PEBS; 1685 reserved_pebs = WL_RESERVED_PEBS;
1618 ubi_fastmap_init(ubi, &reserved_pebs); 1686 ubi_fastmap_init(ubi, &reserved_pebs);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f184fb5bd110..5116aec3c174 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -411,6 +411,9 @@ config XEN_NETDEV_BACKEND
411config VMXNET3 411config VMXNET3
412 tristate "VMware VMXNET3 ethernet driver" 412 tristate "VMware VMXNET3 ethernet driver"
413 depends on PCI && INET 413 depends on PCI && INET
414 depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \
415 IA64_PAGE_SIZE_64KB || MICROBLAZE_64K_PAGES || \
416 PARISC_PAGE_SIZE_64KB || PPC_64K_PAGES)
414 help 417 help
415 This driver supports VMware's vmxnet3 virtual ethernet NIC. 418 This driver supports VMware's vmxnet3 virtual ethernet NIC.
416 To compile this driver as a module, choose M here: the 419 To compile this driver as a module, choose M here: the
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index bb9e9fc45e1b..82d23bd3a742 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -453,7 +453,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
453{ 453{
454 int i; 454 int i;
455 455
456 if (!client_info->slave) 456 if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
457 return; 457 return;
458 458
459 for (i = 0; i < RLB_ARP_BURST_SIZE; i++) { 459 for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2cb34b0f3856..339118f3c718 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1490,39 +1490,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1490 goto err_close; 1490 goto err_close;
1491 } 1491 }
1492 1492
1493 /* If the mode uses primary, then the following is handled by
1494 * bond_change_active_slave().
1495 */
1496 if (!bond_uses_primary(bond)) {
1497 /* set promiscuity level to new slave */
1498 if (bond_dev->flags & IFF_PROMISC) {
1499 res = dev_set_promiscuity(slave_dev, 1);
1500 if (res)
1501 goto err_close;
1502 }
1503
1504 /* set allmulti level to new slave */
1505 if (bond_dev->flags & IFF_ALLMULTI) {
1506 res = dev_set_allmulti(slave_dev, 1);
1507 if (res)
1508 goto err_close;
1509 }
1510
1511 netif_addr_lock_bh(bond_dev);
1512
1513 dev_mc_sync_multiple(slave_dev, bond_dev);
1514 dev_uc_sync_multiple(slave_dev, bond_dev);
1515
1516 netif_addr_unlock_bh(bond_dev);
1517 }
1518
1519 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1520 /* add lacpdu mc addr to mc list */
1521 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1522
1523 dev_mc_add(slave_dev, lacpdu_multicast);
1524 }
1525
1526 res = vlan_vids_add_by_dev(slave_dev, bond_dev); 1493 res = vlan_vids_add_by_dev(slave_dev, bond_dev);
1527 if (res) { 1494 if (res) {
1528 netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n", 1495 netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
@@ -1647,8 +1614,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1647 } /* switch(bond_mode) */ 1614 } /* switch(bond_mode) */
1648 1615
1649#ifdef CONFIG_NET_POLL_CONTROLLER 1616#ifdef CONFIG_NET_POLL_CONTROLLER
1650 slave_dev->npinfo = bond->dev->npinfo; 1617 if (bond->dev->npinfo) {
1651 if (slave_dev->npinfo) {
1652 if (slave_enable_netpoll(new_slave)) { 1618 if (slave_enable_netpoll(new_slave)) {
1653 netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); 1619 netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
1654 res = -EBUSY; 1620 res = -EBUSY;
@@ -1679,6 +1645,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1679 goto err_upper_unlink; 1645 goto err_upper_unlink;
1680 } 1646 }
1681 1647
1648 /* If the mode uses primary, then the following is handled by
1649 * bond_change_active_slave().
1650 */
1651 if (!bond_uses_primary(bond)) {
1652 /* set promiscuity level to new slave */
1653 if (bond_dev->flags & IFF_PROMISC) {
1654 res = dev_set_promiscuity(slave_dev, 1);
1655 if (res)
1656 goto err_sysfs_del;
1657 }
1658
1659 /* set allmulti level to new slave */
1660 if (bond_dev->flags & IFF_ALLMULTI) {
1661 res = dev_set_allmulti(slave_dev, 1);
1662 if (res) {
1663 if (bond_dev->flags & IFF_PROMISC)
1664 dev_set_promiscuity(slave_dev, -1);
1665 goto err_sysfs_del;
1666 }
1667 }
1668
1669 netif_addr_lock_bh(bond_dev);
1670 dev_mc_sync_multiple(slave_dev, bond_dev);
1671 dev_uc_sync_multiple(slave_dev, bond_dev);
1672 netif_addr_unlock_bh(bond_dev);
1673
1674 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1675 /* add lacpdu mc addr to mc list */
1676 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1677
1678 dev_mc_add(slave_dev, lacpdu_multicast);
1679 }
1680 }
1681
1682 bond->slave_cnt++; 1682 bond->slave_cnt++;
1683 bond_compute_features(bond); 1683 bond_compute_features(bond);
1684 bond_set_carrier(bond); 1684 bond_set_carrier(bond);
@@ -1702,6 +1702,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1702 return 0; 1702 return 0;
1703 1703
1704/* Undo stages on error */ 1704/* Undo stages on error */
1705err_sysfs_del:
1706 bond_sysfs_slave_del(new_slave);
1707
1705err_upper_unlink: 1708err_upper_unlink:
1706 bond_upper_dev_unlink(bond_dev, slave_dev); 1709 bond_upper_dev_unlink(bond_dev, slave_dev);
1707 1710
@@ -1709,9 +1712,6 @@ err_unregister:
1709 netdev_rx_handler_unregister(slave_dev); 1712 netdev_rx_handler_unregister(slave_dev);
1710 1713
1711err_detach: 1714err_detach:
1712 if (!bond_uses_primary(bond))
1713 bond_hw_addr_flush(bond_dev, slave_dev);
1714
1715 vlan_vids_del_by_dev(slave_dev, bond_dev); 1715 vlan_vids_del_by_dev(slave_dev, bond_dev);
1716 if (rcu_access_pointer(bond->primary_slave) == new_slave) 1716 if (rcu_access_pointer(bond->primary_slave) == new_slave)
1717 RCU_INIT_POINTER(bond->primary_slave, NULL); 1717 RCU_INIT_POINTER(bond->primary_slave, NULL);
@@ -2555,11 +2555,13 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2555 bond_for_each_slave_rcu(bond, slave, iter) { 2555 bond_for_each_slave_rcu(bond, slave, iter) {
2556 unsigned long trans_start = dev_trans_start(slave->dev); 2556 unsigned long trans_start = dev_trans_start(slave->dev);
2557 2557
2558 slave->new_link = BOND_LINK_NOCHANGE;
2559
2558 if (slave->link != BOND_LINK_UP) { 2560 if (slave->link != BOND_LINK_UP) {
2559 if (bond_time_in_interval(bond, trans_start, 1) && 2561 if (bond_time_in_interval(bond, trans_start, 1) &&
2560 bond_time_in_interval(bond, slave->last_rx, 1)) { 2562 bond_time_in_interval(bond, slave->last_rx, 1)) {
2561 2563
2562 slave->link = BOND_LINK_UP; 2564 slave->new_link = BOND_LINK_UP;
2563 slave_state_changed = 1; 2565 slave_state_changed = 1;
2564 2566
2565 /* primary_slave has no meaning in round-robin 2567 /* primary_slave has no meaning in round-robin
@@ -2586,7 +2588,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2586 if (!bond_time_in_interval(bond, trans_start, 2) || 2588 if (!bond_time_in_interval(bond, trans_start, 2) ||
2587 !bond_time_in_interval(bond, slave->last_rx, 2)) { 2589 !bond_time_in_interval(bond, slave->last_rx, 2)) {
2588 2590
2589 slave->link = BOND_LINK_DOWN; 2591 slave->new_link = BOND_LINK_DOWN;
2590 slave_state_changed = 1; 2592 slave_state_changed = 1;
2591 2593
2592 if (slave->link_failure_count < UINT_MAX) 2594 if (slave->link_failure_count < UINT_MAX)
@@ -2617,6 +2619,11 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2617 if (!rtnl_trylock()) 2619 if (!rtnl_trylock())
2618 goto re_arm; 2620 goto re_arm;
2619 2621
2622 bond_for_each_slave(bond, slave, iter) {
2623 if (slave->new_link != BOND_LINK_NOCHANGE)
2624 slave->link = slave->new_link;
2625 }
2626
2620 if (slave_state_changed) { 2627 if (slave_state_changed) {
2621 bond_slave_state_change(bond); 2628 bond_slave_state_change(bond);
2622 if (BOND_MODE(bond) == BOND_MODE_XOR) 2629 if (BOND_MODE(bond) == BOND_MODE_XOR)
@@ -3276,12 +3283,17 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
3276 for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { 3283 for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
3277 u64 nv = new[i]; 3284 u64 nv = new[i];
3278 u64 ov = old[i]; 3285 u64 ov = old[i];
3286 s64 delta = nv - ov;
3279 3287
3280 /* detects if this particular field is 32bit only */ 3288 /* detects if this particular field is 32bit only */
3281 if (((nv | ov) >> 32) == 0) 3289 if (((nv | ov) >> 32) == 0)
3282 res[i] += (u32)nv - (u32)ov; 3290 delta = (s64)(s32)((u32)nv - (u32)ov);
3283 else 3291
3284 res[i] += nv - ov; 3292 /* filter anomalies, some drivers reset their stats
3293 * at down/up events.
3294 */
3295 if (delta > 0)
3296 res[i] += delta;
3285 } 3297 }
3286} 3298}
3287 3299
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 55e93b6b6d21..66560a8fcfa2 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1115,6 +1115,7 @@ static int bond_option_primary_set(struct bonding *bond,
1115 slave->dev->name); 1115 slave->dev->name);
1116 rcu_assign_pointer(bond->primary_slave, slave); 1116 rcu_assign_pointer(bond->primary_slave, slave);
1117 strcpy(bond->params.primary, slave->dev->name); 1117 strcpy(bond->params.primary, slave->dev->name);
1118 bond->force_primary = true;
1118 bond_select_active_slave(bond); 1119 bond_select_active_slave(bond);
1119 goto out; 1120 goto out;
1120 } 1121 }
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 1e37313054f3..6da69af103e6 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev,
390 return 0; 390 return 0;
391} 391}
392 392
393static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) 393static void cc770_tx(struct net_device *dev, int mo)
394{ 394{
395 struct cc770_priv *priv = netdev_priv(dev); 395 struct cc770_priv *priv = netdev_priv(dev);
396 struct net_device_stats *stats = &dev->stats; 396 struct can_frame *cf = (struct can_frame *)priv->tx_skb->data;
397 struct can_frame *cf = (struct can_frame *)skb->data;
398 unsigned int mo = obj2msgobj(CC770_OBJ_TX);
399 u8 dlc, rtr; 397 u8 dlc, rtr;
400 u32 id; 398 u32 id;
401 int i; 399 int i;
402 400
403 if (can_dropped_invalid_skb(dev, skb))
404 return NETDEV_TX_OK;
405
406 if ((cc770_read_reg(priv,
407 msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
408 netdev_err(dev, "TX register is still occupied!\n");
409 return NETDEV_TX_BUSY;
410 }
411
412 netif_stop_queue(dev);
413
414 dlc = cf->can_dlc; 401 dlc = cf->can_dlc;
415 id = cf->can_id; 402 id = cf->can_id;
416 if (cf->can_id & CAN_RTR_FLAG) 403 rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR;
417 rtr = 0; 404
418 else 405 cc770_write_reg(priv, msgobj[mo].ctrl0,
419 rtr = MSGCFG_DIR; 406 MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
420 cc770_write_reg(priv, msgobj[mo].ctrl1, 407 cc770_write_reg(priv, msgobj[mo].ctrl1,
421 RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); 408 RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
422 cc770_write_reg(priv, msgobj[mo].ctrl0, 409
423 MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
424 if (id & CAN_EFF_FLAG) { 410 if (id & CAN_EFF_FLAG) {
425 id &= CAN_EFF_MASK; 411 id &= CAN_EFF_MASK;
426 cc770_write_reg(priv, msgobj[mo].config, 412 cc770_write_reg(priv, msgobj[mo].config,
@@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
439 for (i = 0; i < dlc; i++) 425 for (i = 0; i < dlc; i++)
440 cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); 426 cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
441 427
442 /* Store echo skb before starting the transfer */
443 can_put_echo_skb(skb, dev, 0);
444
445 cc770_write_reg(priv, msgobj[mo].ctrl1, 428 cc770_write_reg(priv, msgobj[mo].ctrl1,
446 RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); 429 RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
430 cc770_write_reg(priv, msgobj[mo].ctrl0,
431 MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC);
432}
447 433
448 stats->tx_bytes += dlc; 434static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
435{
436 struct cc770_priv *priv = netdev_priv(dev);
437 unsigned int mo = obj2msgobj(CC770_OBJ_TX);
449 438
439 if (can_dropped_invalid_skb(dev, skb))
440 return NETDEV_TX_OK;
450 441
451 /* 442 netif_stop_queue(dev);
452 * HM: We had some cases of repeated IRQs so make sure the 443
453 * INT is acknowledged I know it's already further up, but 444 if ((cc770_read_reg(priv,
454 * doing again fixed the issue 445 msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
455 */ 446 netdev_err(dev, "TX register is still occupied!\n");
456 cc770_write_reg(priv, msgobj[mo].ctrl0, 447 return NETDEV_TX_BUSY;
457 MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); 448 }
449
450 priv->tx_skb = skb;
451 cc770_tx(dev, mo);
458 452
459 return NETDEV_TX_OK; 453 return NETDEV_TX_OK;
460} 454}
@@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
680 struct cc770_priv *priv = netdev_priv(dev); 674 struct cc770_priv *priv = netdev_priv(dev);
681 struct net_device_stats *stats = &dev->stats; 675 struct net_device_stats *stats = &dev->stats;
682 unsigned int mo = obj2msgobj(o); 676 unsigned int mo = obj2msgobj(o);
677 struct can_frame *cf;
678 u8 ctrl1;
679
680 ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
683 681
684 /* Nothing more to send, switch off interrupts */
685 cc770_write_reg(priv, msgobj[mo].ctrl0, 682 cc770_write_reg(priv, msgobj[mo].ctrl0,
686 MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); 683 MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
687 /* 684 cc770_write_reg(priv, msgobj[mo].ctrl1,
688 * We had some cases of repeated IRQ so make sure the 685 RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES);
689 * INT is acknowledged 686
687 if (unlikely(!priv->tx_skb)) {
688 netdev_err(dev, "missing tx skb in tx interrupt\n");
689 return;
690 }
691
692 if (unlikely(ctrl1 & MSGLST_SET)) {
693 stats->rx_over_errors++;
694 stats->rx_errors++;
695 }
696
697 /* When the CC770 is sending an RTR message and it receives a regular
698 * message that matches the id of the RTR message, it will overwrite the
699 * outgoing message in the TX register. When this happens we must
700 * process the received message and try to transmit the outgoing skb
701 * again.
690 */ 702 */
691 cc770_write_reg(priv, msgobj[mo].ctrl0, 703 if (unlikely(ctrl1 & NEWDAT_SET)) {
692 MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); 704 cc770_rx(dev, mo, ctrl1);
705 cc770_tx(dev, mo);
706 return;
707 }
693 708
709 cf = (struct can_frame *)priv->tx_skb->data;
710 stats->tx_bytes += cf->can_dlc;
694 stats->tx_packets++; 711 stats->tx_packets++;
712
713 can_put_echo_skb(priv->tx_skb, dev, 0);
695 can_get_echo_skb(dev, 0); 714 can_get_echo_skb(dev, 0);
715 priv->tx_skb = NULL;
716
696 netif_wake_queue(dev); 717 netif_wake_queue(dev);
697} 718}
698 719
@@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv)
804 priv->can.do_set_bittiming = cc770_set_bittiming; 825 priv->can.do_set_bittiming = cc770_set_bittiming;
805 priv->can.do_set_mode = cc770_set_mode; 826 priv->can.do_set_mode = cc770_set_mode;
806 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 827 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
828 priv->tx_skb = NULL;
807 829
808 memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); 830 memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
809 831
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
index a1739db98d91..95752e1d1283 100644
--- a/drivers/net/can/cc770/cc770.h
+++ b/drivers/net/can/cc770/cc770.h
@@ -193,6 +193,8 @@ struct cc770_priv {
193 u8 cpu_interface; /* CPU interface register */ 193 u8 cpu_interface; /* CPU interface register */
194 u8 clkout; /* Clock out register */ 194 u8 clkout; /* Clock out register */
195 u8 bus_config; /* Bus conffiguration register */ 195 u8 bus_config; /* Bus conffiguration register */
196
197 struct sk_buff *tx_skb;
196}; 198};
197 199
198struct net_device *alloc_cc770dev(int sizeof_priv); 200struct net_device *alloc_cc770dev(int sizeof_priv);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 16f7cadda5c3..47f43bdecd51 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -493,7 +493,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
493 data = be32_to_cpup((__be32 *)&cf->data[0]); 493 data = be32_to_cpup((__be32 *)&cf->data[0]);
494 flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[0]); 494 flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[0]);
495 } 495 }
496 if (cf->can_dlc > 3) { 496 if (cf->can_dlc > 4) {
497 data = be32_to_cpup((__be32 *)&cf->data[4]); 497 data = be32_to_cpup((__be32 *)&cf->data[4]);
498 flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[1]); 498 flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[1]);
499 } 499 }
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 357c9e89fdf9..047348033e27 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1078,6 +1078,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
1078 usb_free_urb(dev->intr_urb); 1078 usb_free_urb(dev->intr_urb);
1079 1079
1080 kfree(dev->intr_in_buffer); 1080 kfree(dev->intr_in_buffer);
1081 kfree(dev->tx_msg_buffer);
1081 } 1082 }
1082} 1083}
1083 1084
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index db1855b0e08f..59f891bebcc6 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1175,7 +1175,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
1175 1175
1176 skb = alloc_can_skb(priv->netdev, &cf); 1176 skb = alloc_can_skb(priv->netdev, &cf);
1177 if (!skb) { 1177 if (!skb) {
1178 stats->tx_dropped++; 1178 stats->rx_dropped++;
1179 return; 1179 return;
1180 } 1180 }
1181 1181
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 51670b322409..700b98d9c250 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright (C) 2012 - 2014 Xilinx, Inc. 3 * Copyright (C) 2012 - 2014 Xilinx, Inc.
4 * Copyright (C) 2009 PetaLogix. All rights reserved. 4 * Copyright (C) 2009 PetaLogix. All rights reserved.
5 * Copyright (C) 2017 Sandvik Mining and Construction Oy
5 * 6 *
6 * Description: 7 * Description:
7 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. 8 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
@@ -25,8 +26,10 @@
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/netdevice.h> 27#include <linux/netdevice.h>
27#include <linux/of.h> 28#include <linux/of.h>
29#include <linux/of_device.h>
28#include <linux/platform_device.h> 30#include <linux/platform_device.h>
29#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
30#include <linux/string.h> 33#include <linux/string.h>
31#include <linux/types.h> 34#include <linux/types.h>
32#include <linux/can/dev.h> 35#include <linux/can/dev.h>
@@ -100,7 +103,7 @@ enum xcan_reg {
100#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ 103#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
101 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ 104 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
102 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ 105 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
103 XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) 106 XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
104 107
105/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ 108/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
106#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ 109#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
@@ -117,6 +120,7 @@ enum xcan_reg {
117/** 120/**
118 * struct xcan_priv - This definition define CAN driver instance 121 * struct xcan_priv - This definition define CAN driver instance
119 * @can: CAN private data structure. 122 * @can: CAN private data structure.
123 * @tx_lock: Lock for synchronizing TX interrupt handling
120 * @tx_head: Tx CAN packets ready to send on the queue 124 * @tx_head: Tx CAN packets ready to send on the queue
121 * @tx_tail: Tx CAN packets successfully sended on the queue 125 * @tx_tail: Tx CAN packets successfully sended on the queue
122 * @tx_max: Maximum number packets the driver can send 126 * @tx_max: Maximum number packets the driver can send
@@ -131,6 +135,7 @@ enum xcan_reg {
131 */ 135 */
132struct xcan_priv { 136struct xcan_priv {
133 struct can_priv can; 137 struct can_priv can;
138 spinlock_t tx_lock;
134 unsigned int tx_head; 139 unsigned int tx_head;
135 unsigned int tx_tail; 140 unsigned int tx_tail;
136 unsigned int tx_max; 141 unsigned int tx_max;
@@ -158,6 +163,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
158 .brp_inc = 1, 163 .brp_inc = 1,
159}; 164};
160 165
166#define XCAN_CAP_WATERMARK 0x0001
167struct xcan_devtype_data {
168 unsigned int caps;
169};
170
161/** 171/**
162 * xcan_write_reg_le - Write a value to the device register little endian 172 * xcan_write_reg_le - Write a value to the device register little endian
163 * @priv: Driver private data structure 173 * @priv: Driver private data structure
@@ -237,6 +247,10 @@ static int set_reset_mode(struct net_device *ndev)
237 usleep_range(500, 10000); 247 usleep_range(500, 10000);
238 } 248 }
239 249
250 /* reset clears FIFOs */
251 priv->tx_head = 0;
252 priv->tx_tail = 0;
253
240 return 0; 254 return 0;
241} 255}
242 256
@@ -391,6 +405,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
391 struct net_device_stats *stats = &ndev->stats; 405 struct net_device_stats *stats = &ndev->stats;
392 struct can_frame *cf = (struct can_frame *)skb->data; 406 struct can_frame *cf = (struct can_frame *)skb->data;
393 u32 id, dlc, data[2] = {0, 0}; 407 u32 id, dlc, data[2] = {0, 0};
408 unsigned long flags;
394 409
395 if (can_dropped_invalid_skb(ndev, skb)) 410 if (can_dropped_invalid_skb(ndev, skb))
396 return NETDEV_TX_OK; 411 return NETDEV_TX_OK;
@@ -438,6 +453,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
438 data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); 453 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
439 454
440 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); 455 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
456
457 spin_lock_irqsave(&priv->tx_lock, flags);
458
441 priv->tx_head++; 459 priv->tx_head++;
442 460
443 /* Write the Frame to Xilinx CAN TX FIFO */ 461 /* Write the Frame to Xilinx CAN TX FIFO */
@@ -453,10 +471,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
453 stats->tx_bytes += cf->can_dlc; 471 stats->tx_bytes += cf->can_dlc;
454 } 472 }
455 473
474 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
475 if (priv->tx_max > 1)
476 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
477
456 /* Check if the TX buffer is full */ 478 /* Check if the TX buffer is full */
457 if ((priv->tx_head - priv->tx_tail) == priv->tx_max) 479 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
458 netif_stop_queue(ndev); 480 netif_stop_queue(ndev);
459 481
482 spin_unlock_irqrestore(&priv->tx_lock, flags);
483
460 return NETDEV_TX_OK; 484 return NETDEV_TX_OK;
461} 485}
462 486
@@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev)
529} 553}
530 554
531/** 555/**
556 * xcan_current_error_state - Get current error state from HW
557 * @ndev: Pointer to net_device structure
558 *
559 * Checks the current CAN error state from the HW. Note that this
560 * only checks for ERROR_PASSIVE and ERROR_WARNING.
561 *
562 * Return:
563 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
564 * otherwise.
565 */
566static enum can_state xcan_current_error_state(struct net_device *ndev)
567{
568 struct xcan_priv *priv = netdev_priv(ndev);
569 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
570
571 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
572 return CAN_STATE_ERROR_PASSIVE;
573 else if (status & XCAN_SR_ERRWRN_MASK)
574 return CAN_STATE_ERROR_WARNING;
575 else
576 return CAN_STATE_ERROR_ACTIVE;
577}
578
579/**
580 * xcan_set_error_state - Set new CAN error state
581 * @ndev: Pointer to net_device structure
582 * @new_state: The new CAN state to be set
583 * @cf: Error frame to be populated or NULL
584 *
585 * Set new CAN error state for the device, updating statistics and
586 * populating the error frame if given.
587 */
588static void xcan_set_error_state(struct net_device *ndev,
589 enum can_state new_state,
590 struct can_frame *cf)
591{
592 struct xcan_priv *priv = netdev_priv(ndev);
593 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
594 u32 txerr = ecr & XCAN_ECR_TEC_MASK;
595 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
596
597 priv->can.state = new_state;
598
599 if (cf) {
600 cf->can_id |= CAN_ERR_CRTL;
601 cf->data[6] = txerr;
602 cf->data[7] = rxerr;
603 }
604
605 switch (new_state) {
606 case CAN_STATE_ERROR_PASSIVE:
607 priv->can.can_stats.error_passive++;
608 if (cf)
609 cf->data[1] = (rxerr > 127) ?
610 CAN_ERR_CRTL_RX_PASSIVE :
611 CAN_ERR_CRTL_TX_PASSIVE;
612 break;
613 case CAN_STATE_ERROR_WARNING:
614 priv->can.can_stats.error_warning++;
615 if (cf)
616 cf->data[1] |= (txerr > rxerr) ?
617 CAN_ERR_CRTL_TX_WARNING :
618 CAN_ERR_CRTL_RX_WARNING;
619 break;
620 case CAN_STATE_ERROR_ACTIVE:
621 if (cf)
622 cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
623 break;
624 default:
625 /* non-ERROR states are handled elsewhere */
626 WARN_ON(1);
627 break;
628 }
629}
630
631/**
632 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
633 * @ndev: Pointer to net_device structure
634 *
635 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
636 * the performed RX/TX has caused it to drop to a lesser state and set
637 * the interface state accordingly.
638 */
639static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
640{
641 struct xcan_priv *priv = netdev_priv(ndev);
642 enum can_state old_state = priv->can.state;
643 enum can_state new_state;
644
645 /* changing error state due to successful frame RX/TX can only
646 * occur from these states
647 */
648 if (old_state != CAN_STATE_ERROR_WARNING &&
649 old_state != CAN_STATE_ERROR_PASSIVE)
650 return;
651
652 new_state = xcan_current_error_state(ndev);
653
654 if (new_state != old_state) {
655 struct sk_buff *skb;
656 struct can_frame *cf;
657
658 skb = alloc_can_err_skb(ndev, &cf);
659
660 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
661
662 if (skb) {
663 struct net_device_stats *stats = &ndev->stats;
664
665 stats->rx_packets++;
666 stats->rx_bytes += cf->can_dlc;
667 netif_rx(skb);
668 }
669 }
670}
671
672/**
532 * xcan_err_interrupt - error frame Isr 673 * xcan_err_interrupt - error frame Isr
533 * @ndev: net_device pointer 674 * @ndev: net_device pointer
534 * @isr: interrupt status register value 675 * @isr: interrupt status register value
@@ -543,16 +684,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
543 struct net_device_stats *stats = &ndev->stats; 684 struct net_device_stats *stats = &ndev->stats;
544 struct can_frame *cf; 685 struct can_frame *cf;
545 struct sk_buff *skb; 686 struct sk_buff *skb;
546 u32 err_status, status, txerr = 0, rxerr = 0; 687 u32 err_status;
547 688
548 skb = alloc_can_err_skb(ndev, &cf); 689 skb = alloc_can_err_skb(ndev, &cf);
549 690
550 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); 691 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
551 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); 692 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
552 txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
553 rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
554 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
555 status = priv->read_reg(priv, XCAN_SR_OFFSET);
556 693
557 if (isr & XCAN_IXR_BSOFF_MASK) { 694 if (isr & XCAN_IXR_BSOFF_MASK) {
558 priv->can.state = CAN_STATE_BUS_OFF; 695 priv->can.state = CAN_STATE_BUS_OFF;
@@ -562,28 +699,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
562 can_bus_off(ndev); 699 can_bus_off(ndev);
563 if (skb) 700 if (skb)
564 cf->can_id |= CAN_ERR_BUSOFF; 701 cf->can_id |= CAN_ERR_BUSOFF;
565 } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { 702 } else {
566 priv->can.state = CAN_STATE_ERROR_PASSIVE; 703 enum can_state new_state = xcan_current_error_state(ndev);
567 priv->can.can_stats.error_passive++; 704
568 if (skb) { 705 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
569 cf->can_id |= CAN_ERR_CRTL;
570 cf->data[1] = (rxerr > 127) ?
571 CAN_ERR_CRTL_RX_PASSIVE :
572 CAN_ERR_CRTL_TX_PASSIVE;
573 cf->data[6] = txerr;
574 cf->data[7] = rxerr;
575 }
576 } else if (status & XCAN_SR_ERRWRN_MASK) {
577 priv->can.state = CAN_STATE_ERROR_WARNING;
578 priv->can.can_stats.error_warning++;
579 if (skb) {
580 cf->can_id |= CAN_ERR_CRTL;
581 cf->data[1] |= (txerr > rxerr) ?
582 CAN_ERR_CRTL_TX_WARNING :
583 CAN_ERR_CRTL_RX_WARNING;
584 cf->data[6] = txerr;
585 cf->data[7] = rxerr;
586 }
587 } 706 }
588 707
589 /* Check for Arbitration lost interrupt */ 708 /* Check for Arbitration lost interrupt */
@@ -599,7 +718,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
599 if (isr & XCAN_IXR_RXOFLW_MASK) { 718 if (isr & XCAN_IXR_RXOFLW_MASK) {
600 stats->rx_over_errors++; 719 stats->rx_over_errors++;
601 stats->rx_errors++; 720 stats->rx_errors++;
602 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
603 if (skb) { 721 if (skb) {
604 cf->can_id |= CAN_ERR_CRTL; 722 cf->can_id |= CAN_ERR_CRTL;
605 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; 723 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
@@ -708,26 +826,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
708 826
709 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 827 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
710 while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { 828 while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
711 if (isr & XCAN_IXR_RXOK_MASK) { 829 work_done += xcan_rx(ndev);
712 priv->write_reg(priv, XCAN_ICR_OFFSET,
713 XCAN_IXR_RXOK_MASK);
714 work_done += xcan_rx(ndev);
715 } else {
716 priv->write_reg(priv, XCAN_ICR_OFFSET,
717 XCAN_IXR_RXNEMP_MASK);
718 break;
719 }
720 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); 830 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
721 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 831 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
722 } 832 }
723 833
724 if (work_done) 834 if (work_done) {
725 can_led_event(ndev, CAN_LED_EVENT_RX); 835 can_led_event(ndev, CAN_LED_EVENT_RX);
836 xcan_update_error_state_after_rxtx(ndev);
837 }
726 838
727 if (work_done < quota) { 839 if (work_done < quota) {
728 napi_complete(napi); 840 napi_complete(napi);
729 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 841 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
730 ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); 842 ier |= XCAN_IXR_RXNEMP_MASK;
731 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 843 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
732 } 844 }
733 return work_done; 845 return work_done;
@@ -742,18 +854,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
742{ 854{
743 struct xcan_priv *priv = netdev_priv(ndev); 855 struct xcan_priv *priv = netdev_priv(ndev);
744 struct net_device_stats *stats = &ndev->stats; 856 struct net_device_stats *stats = &ndev->stats;
857 unsigned int frames_in_fifo;
858 int frames_sent = 1; /* TXOK => at least 1 frame was sent */
859 unsigned long flags;
860 int retries = 0;
861
862 /* Synchronize with xmit as we need to know the exact number
863 * of frames in the FIFO to stay in sync due to the TXFEMP
864 * handling.
865 * This also prevents a race between netif_wake_queue() and
866 * netif_stop_queue().
867 */
868 spin_lock_irqsave(&priv->tx_lock, flags);
745 869
746 while ((priv->tx_head - priv->tx_tail > 0) && 870 frames_in_fifo = priv->tx_head - priv->tx_tail;
747 (isr & XCAN_IXR_TXOK_MASK)) { 871
872 if (WARN_ON_ONCE(frames_in_fifo == 0)) {
873 /* clear TXOK anyway to avoid getting back here */
748 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 874 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
875 spin_unlock_irqrestore(&priv->tx_lock, flags);
876 return;
877 }
878
879 /* Check if 2 frames were sent (TXOK only means that at least 1
880 * frame was sent).
881 */
882 if (frames_in_fifo > 1) {
883 WARN_ON(frames_in_fifo > priv->tx_max);
884
885 /* Synchronize TXOK and isr so that after the loop:
886 * (1) isr variable is up-to-date at least up to TXOK clear
887 * time. This avoids us clearing a TXOK of a second frame
888 * but not noticing that the FIFO is now empty and thus
889 * marking only a single frame as sent.
890 * (2) No TXOK is left. Having one could mean leaving a
891 * stray TXOK as we might process the associated frame
892 * via TXFEMP handling as we read TXFEMP *after* TXOK
893 * clear to satisfy (1).
894 */
895 while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
896 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
897 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
898 }
899
900 if (isr & XCAN_IXR_TXFEMP_MASK) {
901 /* nothing in FIFO anymore */
902 frames_sent = frames_in_fifo;
903 }
904 } else {
905 /* single frame in fifo, just clear TXOK */
906 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
907 }
908
909 while (frames_sent--) {
749 can_get_echo_skb(ndev, priv->tx_tail % 910 can_get_echo_skb(ndev, priv->tx_tail %
750 priv->tx_max); 911 priv->tx_max);
751 priv->tx_tail++; 912 priv->tx_tail++;
752 stats->tx_packets++; 913 stats->tx_packets++;
753 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
754 } 914 }
755 can_led_event(ndev, CAN_LED_EVENT_TX); 915
756 netif_wake_queue(ndev); 916 netif_wake_queue(ndev);
917
918 spin_unlock_irqrestore(&priv->tx_lock, flags);
919
920 can_led_event(ndev, CAN_LED_EVENT_TX);
921 xcan_update_error_state_after_rxtx(ndev);
757} 922}
758 923
759/** 924/**
@@ -772,6 +937,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
772 struct net_device *ndev = (struct net_device *)dev_id; 937 struct net_device *ndev = (struct net_device *)dev_id;
773 struct xcan_priv *priv = netdev_priv(ndev); 938 struct xcan_priv *priv = netdev_priv(ndev);
774 u32 isr, ier; 939 u32 isr, ier;
940 u32 isr_errors;
775 941
776 /* Get the interrupt status from Xilinx CAN */ 942 /* Get the interrupt status from Xilinx CAN */
777 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 943 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -790,18 +956,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
790 xcan_tx_interrupt(ndev, isr); 956 xcan_tx_interrupt(ndev, isr);
791 957
792 /* Check for the type of error interrupt and Processing it */ 958 /* Check for the type of error interrupt and Processing it */
793 if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 959 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
794 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { 960 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
795 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | 961 if (isr_errors) {
796 XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | 962 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
797 XCAN_IXR_ARBLST_MASK));
798 xcan_err_interrupt(ndev, isr); 963 xcan_err_interrupt(ndev, isr);
799 } 964 }
800 965
801 /* Check for the type of receive interrupt and Processing it */ 966 /* Check for the type of receive interrupt and Processing it */
802 if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { 967 if (isr & XCAN_IXR_RXNEMP_MASK) {
803 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 968 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
804 ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); 969 ier &= ~XCAN_IXR_RXNEMP_MASK;
805 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 970 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
806 napi_schedule(&priv->napi); 971 napi_schedule(&priv->napi);
807 } 972 }
@@ -1030,6 +1195,18 @@ static int __maybe_unused xcan_resume(struct device *dev)
1030 1195
1031static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume); 1196static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume);
1032 1197
1198static const struct xcan_devtype_data xcan_zynq_data = {
1199 .caps = XCAN_CAP_WATERMARK,
1200};
1201
1202/* Match table for OF platform binding */
1203static const struct of_device_id xcan_of_match[] = {
1204 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1205 { .compatible = "xlnx,axi-can-1.00.a", },
1206 { /* end of list */ },
1207};
1208MODULE_DEVICE_TABLE(of, xcan_of_match);
1209
1033/** 1210/**
1034 * xcan_probe - Platform registration call 1211 * xcan_probe - Platform registration call
1035 * @pdev: Handle to the platform device structure 1212 * @pdev: Handle to the platform device structure
@@ -1044,8 +1221,10 @@ static int xcan_probe(struct platform_device *pdev)
1044 struct resource *res; /* IO mem resources */ 1221 struct resource *res; /* IO mem resources */
1045 struct net_device *ndev; 1222 struct net_device *ndev;
1046 struct xcan_priv *priv; 1223 struct xcan_priv *priv;
1224 const struct of_device_id *of_id;
1225 int caps = 0;
1047 void __iomem *addr; 1226 void __iomem *addr;
1048 int ret, rx_max, tx_max; 1227 int ret, rx_max, tx_max, tx_fifo_depth;
1049 1228
1050 /* Get the virtual base address for the device */ 1229 /* Get the virtual base address for the device */
1051 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1230 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1055,7 +1234,8 @@ static int xcan_probe(struct platform_device *pdev)
1055 goto err; 1234 goto err;
1056 } 1235 }
1057 1236
1058 ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); 1237 ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1238 &tx_fifo_depth);
1059 if (ret < 0) 1239 if (ret < 0)
1060 goto err; 1240 goto err;
1061 1241
@@ -1063,6 +1243,30 @@ static int xcan_probe(struct platform_device *pdev)
1063 if (ret < 0) 1243 if (ret < 0)
1064 goto err; 1244 goto err;
1065 1245
1246 of_id = of_match_device(xcan_of_match, &pdev->dev);
1247 if (of_id) {
1248 const struct xcan_devtype_data *devtype_data = of_id->data;
1249
1250 if (devtype_data)
1251 caps = devtype_data->caps;
1252 }
1253
1254 /* There is no way to directly figure out how many frames have been
1255 * sent when the TXOK interrupt is processed. If watermark programming
1256 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1257 * to determine if 1 or 2 frames have been sent.
1258 * Theoretically we should be able to use TXFWMEMP to determine up
1259 * to 3 frames, but it seems that after putting a second frame in the
1260 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1261 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1262 * sent), which is not a sensible state - possibly TXFWMEMP is not
1263 * completely synchronized with the rest of the bits?
1264 */
1265 if (caps & XCAN_CAP_WATERMARK)
1266 tx_max = min(tx_fifo_depth, 2);
1267 else
1268 tx_max = 1;
1269
1066 /* Create a CAN device instance */ 1270 /* Create a CAN device instance */
1067 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); 1271 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1068 if (!ndev) 1272 if (!ndev)
@@ -1077,6 +1281,7 @@ static int xcan_probe(struct platform_device *pdev)
1077 CAN_CTRLMODE_BERR_REPORTING; 1281 CAN_CTRLMODE_BERR_REPORTING;
1078 priv->reg_base = addr; 1282 priv->reg_base = addr;
1079 priv->tx_max = tx_max; 1283 priv->tx_max = tx_max;
1284 spin_lock_init(&priv->tx_lock);
1080 1285
1081 /* Get IRQ for the device */ 1286 /* Get IRQ for the device */
1082 ndev->irq = platform_get_irq(pdev, 0); 1287 ndev->irq = platform_get_irq(pdev, 0);
@@ -1144,9 +1349,9 @@ static int xcan_probe(struct platform_device *pdev)
1144 devm_can_led_init(ndev); 1349 devm_can_led_init(ndev);
1145 clk_disable_unprepare(priv->bus_clk); 1350 clk_disable_unprepare(priv->bus_clk);
1146 clk_disable_unprepare(priv->can_clk); 1351 clk_disable_unprepare(priv->can_clk);
1147 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", 1352 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
1148 priv->reg_base, ndev->irq, priv->can.clock.freq, 1353 priv->reg_base, ndev->irq, priv->can.clock.freq,
1149 priv->tx_max); 1354 tx_fifo_depth, priv->tx_max);
1150 1355
1151 return 0; 1356 return 0;
1152 1357
@@ -1182,14 +1387,6 @@ static int xcan_remove(struct platform_device *pdev)
1182 return 0; 1387 return 0;
1183} 1388}
1184 1389
1185/* Match table for OF platform binding */
1186static const struct of_device_id xcan_of_match[] = {
1187 { .compatible = "xlnx,zynq-can-1.0", },
1188 { .compatible = "xlnx,axi-can-1.00.a", },
1189 { /* end of list */ },
1190};
1191MODULE_DEVICE_TABLE(of, xcan_of_match);
1192
1193static struct platform_driver xcan_driver = { 1390static struct platform_driver xcan_driver = {
1194 .probe = xcan_probe, 1391 .probe = xcan_probe,
1195 .remove = xcan_remove, 1392 .remove = xcan_remove,
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 4547a1b8b958..7677c745fb30 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -562,7 +562,7 @@ static void el3_common_remove (struct net_device *dev)
562} 562}
563 563
564#ifdef CONFIG_EISA 564#ifdef CONFIG_EISA
565static int __init el3_eisa_probe (struct device *device) 565static int el3_eisa_probe(struct device *device)
566{ 566{
567 short i; 567 short i;
568 int ioaddr, irq, if_port; 568 int ioaddr, irq, if_port;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 2839af00f20c..1c5f3b273e6a 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -907,7 +907,7 @@ static struct eisa_device_id vortex_eisa_ids[] = {
907}; 907};
908MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); 908MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
909 909
910static int __init vortex_eisa_probe(struct device *device) 910static int vortex_eisa_probe(struct device *device)
911{ 911{
912 void __iomem *ioaddr; 912 void __iomem *ioaddr;
913 struct eisa_device *edev; 913 struct eisa_device *edev;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 618d952c2984..2ef4b4e884ae 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -829,7 +829,7 @@ static int xgbe_remove(struct platform_device *pdev)
829 return 0; 829 return 0;
830} 830}
831 831
832#ifdef CONFIG_PM 832#ifdef CONFIG_PM_SLEEP
833static int xgbe_suspend(struct device *dev) 833static int xgbe_suspend(struct device *dev)
834{ 834{
835 struct net_device *netdev = dev_get_drvdata(dev); 835 struct net_device *netdev = dev_get_drvdata(dev);
@@ -868,7 +868,7 @@ static int xgbe_resume(struct device *dev)
868 868
869 return ret; 869 return ret;
870} 870}
871#endif /* CONFIG_PM */ 871#endif /* CONFIG_PM_SLEEP */
872 872
873#ifdef CONFIG_ACPI 873#ifdef CONFIG_ACPI
874static const struct acpi_device_id xgbe_acpi_match[] = { 874static const struct acpi_device_id xgbe_acpi_match[] = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 446058081866..7a0ab4c44ee4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -872,14 +872,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
872 872
873 if (pdata->tx_pause != pdata->phy.tx_pause) { 873 if (pdata->tx_pause != pdata->phy.tx_pause) {
874 new_state = 1; 874 new_state = 1;
875 pdata->hw_if.config_tx_flow_control(pdata);
876 pdata->tx_pause = pdata->phy.tx_pause; 875 pdata->tx_pause = pdata->phy.tx_pause;
876 pdata->hw_if.config_tx_flow_control(pdata);
877 } 877 }
878 878
879 if (pdata->rx_pause != pdata->phy.rx_pause) { 879 if (pdata->rx_pause != pdata->phy.rx_pause) {
880 new_state = 1; 880 new_state = 1;
881 pdata->hw_if.config_rx_flow_control(pdata);
882 pdata->rx_pause = pdata->phy.rx_pause; 881 pdata->rx_pause = pdata->phy.rx_pause;
882 pdata->hw_if.config_rx_flow_control(pdata);
883 } 883 }
884 884
885 /* Speed support */ 885 /* Speed support */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index c31e691d11fc..e8d31640058d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -604,6 +604,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
604 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); 604 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
605 cb |= CFG_CLE_BYPASS_EN0; 605 cb |= CFG_CLE_BYPASS_EN0;
606 CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); 606 CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
607 CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
607 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); 608 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
608 609
609 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); 610 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index c153a1dc5ff7..480312105964 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -147,6 +147,7 @@ enum xgene_enet_rm {
147#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3) 147#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3)
148 148
149#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2) 149#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
150#define CFG_CLE_IP_HDR_LEN_SET(dst, val) xgene_set_bits(dst, val, 8, 5)
150#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12) 151#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
151#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) 152#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4)
152#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2) 153#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2)
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index abe1eabc0171..9cc5daed13ed 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -250,39 +250,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
250 continue; 250 continue;
251 } 251 }
252 252
253 pktlen = info & LEN_MASK; 253 /* Prepare the BD for next cycle. netif_receive_skb()
254 stats->rx_packets++; 254 * only if new skb was allocated and mapped to avoid holes
255 stats->rx_bytes += pktlen; 255 * in the RX fifo.
256 skb = rx_buff->skb; 256 */
257 skb_put(skb, pktlen); 257 skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
258 skb->dev = ndev; 258 if (unlikely(!skb)) {
259 skb->protocol = eth_type_trans(skb, ndev); 259 if (net_ratelimit())
260 260 netdev_err(ndev, "cannot allocate skb\n");
261 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), 261 /* Return ownership to EMAC */
262 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); 262 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
263
264 /* Prepare the BD for next cycle */
265 rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
266 EMAC_BUFFER_SIZE);
267 if (unlikely(!rx_buff->skb)) {
268 stats->rx_errors++; 263 stats->rx_errors++;
269 /* Because receive_skb is below, increment rx_dropped */
270 stats->rx_dropped++; 264 stats->rx_dropped++;
271 continue; 265 continue;
272 } 266 }
273 267
274 /* receive_skb only if new skb was allocated to avoid holes */ 268 addr = dma_map_single(&ndev->dev, (void *)skb->data,
275 netif_receive_skb(skb);
276
277 addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
278 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); 269 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
279 if (dma_mapping_error(&ndev->dev, addr)) { 270 if (dma_mapping_error(&ndev->dev, addr)) {
280 if (net_ratelimit()) 271 if (net_ratelimit())
281 netdev_err(ndev, "cannot dma map\n"); 272 netdev_err(ndev, "cannot map dma buffer\n");
282 dev_kfree_skb(rx_buff->skb); 273 dev_kfree_skb(skb);
274 /* Return ownership to EMAC */
275 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
283 stats->rx_errors++; 276 stats->rx_errors++;
277 stats->rx_dropped++;
284 continue; 278 continue;
285 } 279 }
280
281 /* unmap previosly mapped skb */
282 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
283 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
284
285 pktlen = info & LEN_MASK;
286 stats->rx_packets++;
287 stats->rx_bytes += pktlen;
288 skb_put(rx_buff->skb, pktlen);
289 rx_buff->skb->dev = ndev;
290 rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
291
292 netif_receive_skb(rx_buff->skb);
293
294 rx_buff->skb = skb;
286 dma_unmap_addr_set(rx_buff, addr, addr); 295 dma_unmap_addr_set(rx_buff, addr, addr);
287 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); 296 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
288 297
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index c31c7407b753..425dae560322 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -150,8 +150,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
150 /* Optional regulator for PHY */ 150 /* Optional regulator for PHY */
151 priv->regulator = devm_regulator_get_optional(dev, "phy"); 151 priv->regulator = devm_regulator_get_optional(dev, "phy");
152 if (IS_ERR(priv->regulator)) { 152 if (IS_ERR(priv->regulator)) {
153 if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) 153 if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
154 return -EPROBE_DEFER; 154 err = -EPROBE_DEFER;
155 goto out_clk_disable;
156 }
155 dev_err(dev, "no regulator found\n"); 157 dev_err(dev, "no regulator found\n");
156 priv->regulator = NULL; 158 priv->regulator = NULL;
157 } 159 }
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 8b1929e9f698..ec5834087e4b 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1063,7 +1063,8 @@ static int bcm_enet_open(struct net_device *dev)
1063 val = enet_readl(priv, ENET_CTL_REG); 1063 val = enet_readl(priv, ENET_CTL_REG);
1064 val |= ENET_CTL_ENABLE_MASK; 1064 val |= ENET_CTL_ENABLE_MASK;
1065 enet_writel(priv, val, ENET_CTL_REG); 1065 enet_writel(priv, val, ENET_CTL_REG);
1066 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 1066 if (priv->dma_has_sram)
1067 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1067 enet_dmac_writel(priv, priv->dma_chan_en_mask, 1068 enet_dmac_writel(priv, priv->dma_chan_en_mask,
1068 ENETDMAC_CHANCFG, priv->rx_chan); 1069 ENETDMAC_CHANCFG, priv->rx_chan);
1069 1070
@@ -1787,7 +1788,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
1787 ret = PTR_ERR(priv->mac_clk); 1788 ret = PTR_ERR(priv->mac_clk);
1788 goto out; 1789 goto out;
1789 } 1790 }
1790 clk_prepare_enable(priv->mac_clk); 1791 ret = clk_prepare_enable(priv->mac_clk);
1792 if (ret)
1793 goto out_put_clk_mac;
1791 1794
1792 /* initialize default and fetch platform data */ 1795 /* initialize default and fetch platform data */
1793 priv->rx_ring_size = BCMENET_DEF_RX_DESC; 1796 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
@@ -1819,9 +1822,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
1819 if (IS_ERR(priv->phy_clk)) { 1822 if (IS_ERR(priv->phy_clk)) {
1820 ret = PTR_ERR(priv->phy_clk); 1823 ret = PTR_ERR(priv->phy_clk);
1821 priv->phy_clk = NULL; 1824 priv->phy_clk = NULL;
1822 goto out_put_clk_mac; 1825 goto out_disable_clk_mac;
1823 } 1826 }
1824 clk_prepare_enable(priv->phy_clk); 1827 ret = clk_prepare_enable(priv->phy_clk);
1828 if (ret)
1829 goto out_put_clk_phy;
1825 } 1830 }
1826 1831
1827 /* do minimal hardware init to be able to probe mii bus */ 1832 /* do minimal hardware init to be able to probe mii bus */
@@ -1921,13 +1926,16 @@ out_free_mdio:
1921out_uninit_hw: 1926out_uninit_hw:
1922 /* turn off mdc clock */ 1927 /* turn off mdc clock */
1923 enet_writel(priv, 0, ENET_MIISC_REG); 1928 enet_writel(priv, 0, ENET_MIISC_REG);
1924 if (priv->phy_clk) { 1929 if (priv->phy_clk)
1925 clk_disable_unprepare(priv->phy_clk); 1930 clk_disable_unprepare(priv->phy_clk);
1931
1932out_put_clk_phy:
1933 if (priv->phy_clk)
1926 clk_put(priv->phy_clk); 1934 clk_put(priv->phy_clk);
1927 }
1928 1935
1929out_put_clk_mac: 1936out_disable_clk_mac:
1930 clk_disable_unprepare(priv->mac_clk); 1937 clk_disable_unprepare(priv->mac_clk);
1938out_put_clk_mac:
1931 clk_put(priv->mac_clk); 1939 clk_put(priv->mac_clk);
1932out: 1940out:
1933 free_netdev(dev); 1941 free_netdev(dev);
@@ -2772,7 +2780,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2772 ret = PTR_ERR(priv->mac_clk); 2780 ret = PTR_ERR(priv->mac_clk);
2773 goto out_unmap; 2781 goto out_unmap;
2774 } 2782 }
2775 clk_enable(priv->mac_clk); 2783 ret = clk_prepare_enable(priv->mac_clk);
2784 if (ret)
2785 goto out_put_clk;
2776 2786
2777 priv->rx_chan = 0; 2787 priv->rx_chan = 0;
2778 priv->tx_chan = 1; 2788 priv->tx_chan = 1;
@@ -2793,7 +2803,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2793 2803
2794 ret = register_netdev(dev); 2804 ret = register_netdev(dev);
2795 if (ret) 2805 if (ret)
2796 goto out_put_clk; 2806 goto out_disable_clk;
2797 2807
2798 netif_carrier_off(dev); 2808 netif_carrier_off(dev);
2799 platform_set_drvdata(pdev, dev); 2809 platform_set_drvdata(pdev, dev);
@@ -2802,6 +2812,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2802 2812
2803 return 0; 2813 return 0;
2804 2814
2815out_disable_clk:
2816 clk_disable_unprepare(priv->mac_clk);
2817
2805out_put_clk: 2818out_put_clk:
2806 clk_put(priv->mac_clk); 2819 clk_put(priv->mac_clk);
2807 2820
@@ -2833,6 +2846,9 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
2833 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2846 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2834 release_mem_region(res->start, resource_size(res)); 2847 release_mem_region(res->start, resource_size(res));
2835 2848
2849 clk_disable_unprepare(priv->mac_clk);
2850 clk_put(priv->mac_clk);
2851
2836 free_netdev(dev); 2852 free_netdev(dev);
2837 return 0; 2853 return 0;
2838} 2854}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 027705117086..af9ec57bbebf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -729,37 +729,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
729 struct bcm_sysport_tx_ring *ring) 729 struct bcm_sysport_tx_ring *ring)
730{ 730{
731 struct net_device *ndev = priv->netdev; 731 struct net_device *ndev = priv->netdev;
732 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
733 unsigned int pkts_compl = 0, bytes_compl = 0; 732 unsigned int pkts_compl = 0, bytes_compl = 0;
733 unsigned int txbds_processed = 0;
734 struct bcm_sysport_cb *cb; 734 struct bcm_sysport_cb *cb;
735 unsigned int txbds_ready;
736 unsigned int c_index;
735 u32 hw_ind; 737 u32 hw_ind;
736 738
737 /* Compute how many descriptors have been processed since last call */ 739 /* Compute how many descriptors have been processed since last call */
738 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 740 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
739 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 741 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
740 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); 742 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
741
742 last_c_index = ring->c_index;
743 num_tx_cbs = ring->size;
744
745 c_index &= (num_tx_cbs - 1);
746
747 if (c_index >= last_c_index)
748 last_tx_cn = c_index - last_c_index;
749 else
750 last_tx_cn = num_tx_cbs - last_c_index + c_index;
751 743
752 netif_dbg(priv, tx_done, ndev, 744 netif_dbg(priv, tx_done, ndev,
753 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", 745 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
754 ring->index, c_index, last_tx_cn, last_c_index); 746 ring->index, ring->c_index, c_index, txbds_ready);
755 747
756 while (last_tx_cn-- > 0) { 748 while (txbds_processed < txbds_ready) {
757 cb = ring->cbs + last_c_index; 749 cb = &ring->cbs[ring->clean_index];
758 bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl); 750 bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
759 751
760 ring->desc_count++; 752 ring->desc_count++;
761 last_c_index++; 753 txbds_processed++;
762 last_c_index &= (num_tx_cbs - 1); 754
755 if (likely(ring->clean_index < ring->size - 1))
756 ring->clean_index++;
757 else
758 ring->clean_index = 0;
763 } 759 }
764 760
765 ring->c_index = c_index; 761 ring->c_index = c_index;
@@ -1229,6 +1225,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1229 netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1225 netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1230 ring->index = index; 1226 ring->index = index;
1231 ring->size = size; 1227 ring->size = size;
1228 ring->clean_index = 0;
1232 ring->alloc_size = ring->size; 1229 ring->alloc_size = ring->size;
1233 ring->desc_cpu = p; 1230 ring->desc_cpu = p;
1234 ring->desc_count = ring->size; 1231 ring->desc_count = ring->size;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index f28bf545d7f4..8ace6ecb5f79 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -638,7 +638,7 @@ struct bcm_sysport_tx_ring {
638 unsigned int desc_count; /* Number of descriptors */ 638 unsigned int desc_count; /* Number of descriptors */
639 unsigned int curr_desc; /* Current descriptor */ 639 unsigned int curr_desc; /* Current descriptor */
640 unsigned int c_index; /* Last consumer index */ 640 unsigned int c_index; /* Last consumer index */
641 unsigned int p_index; /* Current producer index */ 641 unsigned int clean_index; /* Current clean index */
642 struct bcm_sysport_cb *cbs; /* Transmit control blocks */ 642 struct bcm_sysport_cb *cbs; /* Transmit control blocks */
643 struct dma_desc *desc_cpu; /* CPU view of the descriptor */ 643 struct dma_desc *desc_cpu; /* CPU view of the descriptor */
644 struct bcm_sysport_priv *priv; /* private context backpointer */ 644 struct bcm_sysport_priv *priv; /* private context backpointer */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index a5e4b4b93d1b..ec3766264408 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -531,7 +531,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
531 int i; 531 int i;
532 532
533 for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) { 533 for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
534 int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN; 534 u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1);
535 unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN;
535 536
536 slot = &ring->slots[i]; 537 slot = &ring->slots[i];
537 dev_kfree_skb(slot->skb); 538 dev_kfree_skb(slot->skb);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e5911ccb2148..949a82458a29 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2044,6 +2044,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2044 ETH_OVREHEAD + 2044 ETH_OVREHEAD +
2045 mtu + 2045 mtu +
2046 BNX2X_FW_RX_ALIGN_END; 2046 BNX2X_FW_RX_ALIGN_END;
2047 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2047 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ 2048 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2048 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) 2049 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2049 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; 2050 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
@@ -3052,7 +3053,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3052 3053
3053 del_timer_sync(&bp->timer); 3054 del_timer_sync(&bp->timer);
3054 3055
3055 if (IS_PF(bp)) { 3056 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3056 /* Set ALWAYS_ALIVE bit in shmem */ 3057 /* Set ALWAYS_ALIVE bit in shmem */
3057 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 3058 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3058 bnx2x_drv_pulse(bp); 3059 bnx2x_drv_pulse(bp);
@@ -3134,7 +3135,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3134 bp->cnic_loaded = false; 3135 bp->cnic_loaded = false;
3135 3136
3136 /* Clear driver version indication in shmem */ 3137 /* Clear driver version indication in shmem */
3137 if (IS_PF(bp)) 3138 if (IS_PF(bp) && !BP_NOMCP(bp))
3138 bnx2x_update_mng_version(bp); 3139 bnx2x_update_mng_version(bp);
3139 3140
3140 /* Check if there are pending parity attentions. If there are - set 3141 /* Check if there are pending parity attentions. If there are - set
@@ -3942,15 +3943,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3942 /* when transmitting in a vf, start bd must hold the ethertype 3943 /* when transmitting in a vf, start bd must hold the ethertype
3943 * for fw to enforce it 3944 * for fw to enforce it
3944 */ 3945 */
3946 u16 vlan_tci = 0;
3945#ifndef BNX2X_STOP_ON_ERROR 3947#ifndef BNX2X_STOP_ON_ERROR
3946 if (IS_VF(bp)) 3948 if (IS_VF(bp)) {
3947#endif 3949#endif
3948 tx_start_bd->vlan_or_ethertype = 3950 /* Still need to consider inband vlan for enforced */
3949 cpu_to_le16(ntohs(eth->h_proto)); 3951 if (__vlan_get_tag(skb, &vlan_tci)) {
3952 tx_start_bd->vlan_or_ethertype =
3953 cpu_to_le16(ntohs(eth->h_proto));
3954 } else {
3955 tx_start_bd->bd_flags.as_bitfield |=
3956 (X_ETH_INBAND_VLAN <<
3957 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3958 tx_start_bd->vlan_or_ethertype =
3959 cpu_to_le16(vlan_tci);
3960 }
3950#ifndef BNX2X_STOP_ON_ERROR 3961#ifndef BNX2X_STOP_ON_ERROR
3951 else 3962 } else {
3952 /* used by FW for packet accounting */ 3963 /* used by FW for packet accounting */
3953 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 3964 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3965 }
3954#endif 3966#endif
3955 } 3967 }
3956 3968
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d946bba43726..87534c6efd66 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -594,7 +594,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
594 * slots for the highest priority. 594 * slots for the highest priority.
595 */ 595 */
596 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : 596 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
597 NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 597 NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
598 /* Mapping between the CREDIT_WEIGHT registers and actual client 598 /* Mapping between the CREDIT_WEIGHT registers and actual client
599 * numbers 599 * numbers
600 */ 600 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index abb3ff6498dc..8ddb68a3fdb6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9570,6 +9570,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
9570 9570
9571 do { 9571 do {
9572 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9572 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9573
9574 /* If we read all 0xFFs, means we are in PCI error state and
9575 * should bail out to avoid crashes on adapter's FW reads.
9576 */
9577 if (bp->common.shmem_base == 0xFFFFFFFF) {
9578 bp->flags |= NO_MCP_FLAG;
9579 return -ENODEV;
9580 }
9581
9573 if (bp->common.shmem_base) { 9582 if (bp->common.shmem_base) {
9574 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9583 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9575 if (val & SHR_MEM_VALIDITY_MB) 9584 if (val & SHR_MEM_VALIDITY_MB)
@@ -14214,7 +14223,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14214 BNX2X_ERR("IO slot reset --> driver unload\n"); 14223 BNX2X_ERR("IO slot reset --> driver unload\n");
14215 14224
14216 /* MCP should have been reset; Need to wait for validity */ 14225 /* MCP should have been reset; Need to wait for validity */
14217 bnx2x_init_shmem(bp); 14226 if (bnx2x_init_shmem(bp)) {
14227 rtnl_unlock();
14228 return PCI_ERS_RESULT_DISCONNECT;
14229 }
14218 14230
14219 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 14231 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14220 u32 v; 14232 u32 v;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a38a9cb3d544..9904d768a20a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2925,6 +2925,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
2925 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2925 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2926 struct hwrm_vnic_tpa_cfg_input req = {0}; 2926 struct hwrm_vnic_tpa_cfg_input req = {0};
2927 2927
2928 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2929 return 0;
2930
2928 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 2931 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
2929 2932
2930 if (tpa_flags) { 2933 if (tpa_flags) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index ea044bbcd384..3eebb57975e3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -29,7 +29,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
29 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); 29 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
30 return -EINVAL; 30 return -EINVAL;
31 } 31 }
32 if (vf_id >= bp->pf.max_vfs) { 32 if (vf_id >= bp->pf.active_vfs) {
33 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); 33 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
34 return -EINVAL; 34 return -EINVAL;
35 } 35 }
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ab53e0cfb4dc..ce3a56bea6e6 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8722,14 +8722,15 @@ static void tg3_free_consistent(struct tg3 *tp)
8722 tg3_mem_rx_release(tp); 8722 tg3_mem_rx_release(tp);
8723 tg3_mem_tx_release(tp); 8723 tg3_mem_tx_release(tp);
8724 8724
8725 /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */ 8725 /* tp->hw_stats can be referenced safely:
8726 tg3_full_lock(tp, 0); 8726 * 1. under rtnl_lock
8727 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8728 */
8727 if (tp->hw_stats) { 8729 if (tp->hw_stats) {
8728 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8730 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8729 tp->hw_stats, tp->stats_mapping); 8731 tp->hw_stats, tp->stats_mapping);
8730 tp->hw_stats = NULL; 8732 tp->hw_stats = NULL;
8731 } 8733 }
8732 tg3_full_unlock(tp);
8733} 8734}
8734 8735
8735/* 8736/*
@@ -9277,6 +9278,15 @@ static int tg3_chip_reset(struct tg3 *tp)
9277 9278
9278 tg3_restore_clk(tp); 9279 tg3_restore_clk(tp);
9279 9280
9281 /* Increase the core clock speed to fix tx timeout issue for 5762
9282 * with 100Mbps link speed.
9283 */
9284 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9285 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9286 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9287 TG3_CPMU_MAC_ORIDE_ENABLE);
9288 }
9289
9280 /* Reprobe ASF enable state. */ 9290 /* Reprobe ASF enable state. */
9281 tg3_flag_clear(tp, ENABLE_ASF); 9291 tg3_flag_clear(tp, ENABLE_ASF);
9282 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9292 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
@@ -10051,6 +10061,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
10051 10061
10052 tw32(GRC_MODE, tp->grc_mode | val); 10062 tw32(GRC_MODE, tp->grc_mode | val);
10053 10063
10064 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10065 * south bridge limitation. As a workaround, Driver is setting MRRS
10066 * to 2048 instead of default 4096.
10067 */
10068 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10069 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10070 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10071 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10072 }
10073
10054 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10074 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10055 val = tr32(GRC_MISC_CFG); 10075 val = tr32(GRC_MISC_CFG);
10056 val &= ~0xff; 10076 val &= ~0xff;
@@ -14153,7 +14173,7 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14153 struct tg3 *tp = netdev_priv(dev); 14173 struct tg3 *tp = netdev_priv(dev);
14154 14174
14155 spin_lock_bh(&tp->lock); 14175 spin_lock_bh(&tp->lock);
14156 if (!tp->hw_stats) { 14176 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14157 *stats = tp->net_stats_prev; 14177 *stats = tp->net_stats_prev;
14158 spin_unlock_bh(&tp->lock); 14178 spin_unlock_bh(&tp->lock);
14159 return stats; 14179 return stats;
@@ -14230,7 +14250,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14230 */ 14250 */
14231 if (tg3_asic_rev(tp) == ASIC_REV_57766 || 14251 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14232 tg3_asic_rev(tp) == ASIC_REV_5717 || 14252 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14233 tg3_asic_rev(tp) == ASIC_REV_5719) 14253 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14254 tg3_asic_rev(tp) == ASIC_REV_5720)
14234 reset_phy = true; 14255 reset_phy = true;
14235 14256
14236 err = tg3_restart_hw(tp, reset_phy); 14257 err = tg3_restart_hw(tp, reset_phy);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 31c9f8295953..19532961e173 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -95,6 +95,7 @@
95#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106 95#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
96#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109 96#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
97#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a 97#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
98#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0
98#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ 99#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
99#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c 100#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
100#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a 101#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
@@ -280,6 +281,9 @@
280#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ 281#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
281#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ 282#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
282/* 0xa8 --> 0xb8 unused */ 283/* 0xa8 --> 0xb8 unused */
284#define TG3PCI_DEV_STATUS_CTRL 0x000000b4
285#define MAX_READ_REQ_SIZE_2048 0x00004000
286#define MAX_READ_REQ_MASK 0x00007000
283#define TG3PCI_DUAL_MAC_CTRL 0x000000b8 287#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
284#define DUAL_MAC_CTRL_CH_MASK 0x00000003 288#define DUAL_MAC_CTRL_CH_MASK 0x00000003
285#define DUAL_MAC_CTRL_ID 0x00000004 289#define DUAL_MAC_CTRL_ID 0x00000004
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 0f6811860ad5..a36e38676640 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2845,7 +2845,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2845static void 2845static void
2846bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) 2846bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2847{ 2847{
2848 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 2848 strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2849} 2849}
2850 2850
2851static void 2851static void
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index cc1725616f9d..50747573f42e 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2823,7 +2823,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2823 if (!g) { 2823 if (!g) {
2824 netif_info(lio, tx_err, lio->netdev, 2824 netif_info(lio, tx_err, lio->netdev,
2825 "Transmit scatter gather: glist null!\n"); 2825 "Transmit scatter gather: glist null!\n");
2826 goto lio_xmit_dma_failed; 2826 goto lio_xmit_failed;
2827 } 2827 }
2828 2828
2829 cmdsetup.s.gather = 1; 2829 cmdsetup.s.gather = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 8f7aa53a4c4b..7ae8374bff13 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -50,6 +50,7 @@
50#include <linux/stringify.h> 50#include <linux/stringify.h>
51#include <linux/sched.h> 51#include <linux/sched.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/nospec.h>
53#include <asm/uaccess.h> 54#include <asm/uaccess.h>
54 55
55#include "common.h" 56#include "common.h"
@@ -2256,6 +2257,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2256 2257
2257 if (t.qset_idx >= nqsets) 2258 if (t.qset_idx >= nqsets)
2258 return -EINVAL; 2259 return -EINVAL;
2260 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2259 2261
2260 q = &adapter->params.sge.qset[q1 + t.qset_idx]; 2262 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2261 t.rspq_size = q->rspq_size; 2263 t.rspq_size = q->rspq_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cf61a5869c6e..de23f23b41de 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -6076,13 +6076,18 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6076 if (!t4_fw_matches_chip(adap, fw_hdr)) 6076 if (!t4_fw_matches_chip(adap, fw_hdr))
6077 return -EINVAL; 6077 return -EINVAL;
6078 6078
6079 /* Disable FW_OK flag so that mbox commands with FW_OK flag set
6080 * wont be sent when we are flashing FW.
6081 */
6082 adap->flags &= ~FW_OK;
6083
6079 ret = t4_fw_halt(adap, mbox, force); 6084 ret = t4_fw_halt(adap, mbox, force);
6080 if (ret < 0 && !force) 6085 if (ret < 0 && !force)
6081 return ret; 6086 goto out;
6082 6087
6083 ret = t4_load_fw(adap, fw_data, size); 6088 ret = t4_load_fw(adap, fw_data, size);
6084 if (ret < 0) 6089 if (ret < 0)
6085 return ret; 6090 goto out;
6086 6091
6087 /* 6092 /*
6088 * Older versions of the firmware don't understand the new 6093 * Older versions of the firmware don't understand the new
@@ -6093,7 +6098,17 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6093 * its header flags to see if it advertises the capability. 6098 * its header flags to see if it advertises the capability.
6094 */ 6099 */
6095 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 6100 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6096 return t4_fw_restart(adap, mbox, reset); 6101 ret = t4_fw_restart(adap, mbox, reset);
6102
6103 /* Grab potentially new Firmware Device Log parameters so we can see
6104 * how healthy the new Firmware is. It's okay to contact the new
6105 * Firmware for these parameters even though, as far as it's
6106 * concerned, we've never said "HELLO" to it ...
6107 */
6108 (void)t4_init_devlog_params(adap);
6109out:
6110 adap->flags |= FW_OK;
6111 return ret;
6097} 6112}
6098 6113
6099/** 6114/**
@@ -7696,7 +7711,16 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
7696 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]); 7711 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
7697 if (ret) 7712 if (ret)
7698 break; 7713 break;
7699 idx = (idx + 1) & UPDBGLARDPTR_M; 7714
7715 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
7716 * identify the 32-bit portion of the full 312-bit data
7717 */
7718 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
7719 idx = (idx & 0xff0) + 0x10;
7720 else
7721 idx++;
7722 /* address can't exceed 0xfff */
7723 idx &= UPDBGLARDPTR_M;
7700 } 7724 }
7701restart: 7725restart:
7702 if (cfg & UPDBGLAEN_F) { 7726 if (cfg & UPDBGLAEN_F) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index fa3786a9d30e..ec8ffd7eae33 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2604,8 +2604,8 @@ void t4vf_sge_stop(struct adapter *adapter)
2604int t4vf_sge_init(struct adapter *adapter) 2604int t4vf_sge_init(struct adapter *adapter)
2605{ 2605{
2606 struct sge_params *sge_params = &adapter->params.sge; 2606 struct sge_params *sge_params = &adapter->params.sge;
2607 u32 fl0 = sge_params->sge_fl_buffer_size[0]; 2607 u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
2608 u32 fl1 = sge_params->sge_fl_buffer_size[1]; 2608 u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
2609 struct sge *s = &adapter->sge; 2609 struct sge *s = &adapter->sge;
2610 unsigned int ingpadboundary, ingpackboundary; 2610 unsigned int ingpadboundary, ingpackboundary;
2611 2611
@@ -2614,9 +2614,20 @@ int t4vf_sge_init(struct adapter *adapter)
2614 * the Physical Function Driver. Ideally we should be able to deal 2614 * the Physical Function Driver. Ideally we should be able to deal
2615 * with _any_ configuration. Practice is different ... 2615 * with _any_ configuration. Practice is different ...
2616 */ 2616 */
2617 if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { 2617
2618 /* We only bother using the Large Page logic if the Large Page Buffer
2619 * is larger than our Page Size Buffer.
2620 */
2621 if (fl_large_pg <= fl_small_pg)
2622 fl_large_pg = 0;
2623
2624 /* The Page Size Buffer must be exactly equal to our Page Size and the
2625 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2626 */
2627 if (fl_small_pg != PAGE_SIZE ||
2628 (fl_large_pg & (fl_large_pg - 1)) != 0) {
2618 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", 2629 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2619 fl0, fl1); 2630 fl_small_pg, fl_large_pg);
2620 return -EINVAL; 2631 return -EINVAL;
2621 } 2632 }
2622 if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) { 2633 if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
@@ -2627,8 +2638,8 @@ int t4vf_sge_init(struct adapter *adapter)
2627 /* 2638 /*
2628 * Now translate the adapter parameters into our internal forms. 2639 * Now translate the adapter parameters into our internal forms.
2629 */ 2640 */
2630 if (fl1) 2641 if (fl_large_pg)
2631 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; 2642 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2632 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) 2643 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2633 ? 128 : 64); 2644 ? 128 : 64);
2634 s->pktshift = PKTSHIFT_G(sge_params->sge_control); 2645 s->pktshift = PKTSHIFT_G(sge_params->sge_control);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b36643ef0593..029fa5bee520 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1726,6 +1726,8 @@ static int enic_open(struct net_device *netdev)
1726 } 1726 }
1727 1727
1728 for (i = 0; i < enic->rq_count; i++) { 1728 for (i = 0; i < enic->rq_count; i++) {
1729 /* enable rq before updating rq desc */
1730 vnic_rq_enable(&enic->rq[i]);
1729 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); 1731 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1730 /* Need at least one buffer on ring to get going */ 1732 /* Need at least one buffer on ring to get going */
1731 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1733 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
@@ -1737,8 +1739,6 @@ static int enic_open(struct net_device *netdev)
1737 1739
1738 for (i = 0; i < enic->wq_count; i++) 1740 for (i = 0; i < enic->wq_count; i++)
1739 vnic_wq_enable(&enic->wq[i]); 1741 vnic_wq_enable(&enic->wq[i]);
1740 for (i = 0; i < enic->rq_count; i++)
1741 vnic_rq_enable(&enic->rq[i]);
1742 1742
1743 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) 1743 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1744 enic_dev_add_station_addr(enic); 1744 enic_dev_add_station_addr(enic);
@@ -1765,8 +1765,12 @@ static int enic_open(struct net_device *netdev)
1765 return 0; 1765 return 0;
1766 1766
1767err_out_free_rq: 1767err_out_free_rq:
1768 for (i = 0; i < enic->rq_count; i++) 1768 for (i = 0; i < enic->rq_count; i++) {
1769 err = vnic_rq_disable(&enic->rq[i]);
1770 if (err)
1771 return err;
1769 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 1772 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1773 }
1770 enic_dev_notify_unset(enic); 1774 enic_dev_notify_unset(enic);
1771err_out_free_intr: 1775err_out_free_intr:
1772 enic_unset_affinity_hint(enic); 1776 enic_unset_affinity_hint(enic);
@@ -2539,11 +2543,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2539 pci_set_master(pdev); 2543 pci_set_master(pdev);
2540 2544
2541 /* Query PCI controller on system for DMA addressing 2545 /* Query PCI controller on system for DMA addressing
2542 * limitation for the device. Try 64-bit first, and 2546 * limitation for the device. Try 47-bit first, and
2543 * fail to 32-bit. 2547 * fail to 32-bit.
2544 */ 2548 */
2545 2549
2546 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2550 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
2547 if (err) { 2551 if (err) {
2548 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2552 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2549 if (err) { 2553 if (err) {
@@ -2557,10 +2561,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2557 goto err_out_release_regions; 2561 goto err_out_release_regions;
2558 } 2562 }
2559 } else { 2563 } else {
2560 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2564 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
2561 if (err) { 2565 if (err) {
2562 dev_err(dev, "Unable to obtain %u-bit DMA " 2566 dev_err(dev, "Unable to obtain %u-bit DMA "
2563 "for consistent allocations, aborting\n", 64); 2567 "for consistent allocations, aborting\n", 47);
2564 goto err_out_release_regions; 2568 goto err_out_release_regions;
2565 } 2569 }
2566 using_dac = 1; 2570 using_dac = 1;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 8966f3159bb2..3acde3b9b767 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1990,7 +1990,7 @@ SetMulticastFilter(struct net_device *dev)
1990 1990
1991static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST; 1991static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1992 1992
1993static int __init de4x5_eisa_probe (struct device *gendev) 1993static int de4x5_eisa_probe(struct device *gendev)
1994{ 1994{
1995 struct eisa_device *edev; 1995 struct eisa_device *edev;
1996 u_long iobase; 1996 u_long iobase;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 6d0c5d5eea6d..58c0fccdd8cb 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -28,6 +28,7 @@
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/of.h>
31#include <linux/phy.h> 32#include <linux/phy.h>
32#include <linux/platform_device.h> 33#include <linux/platform_device.h>
33#include <net/ip.h> 34#include <net/ip.h>
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 458e2d97d096..ae8e4fc22e7b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3539,6 +3539,8 @@ fec_drv_remove(struct platform_device *pdev)
3539 fec_enet_mii_remove(fep); 3539 fec_enet_mii_remove(fep);
3540 if (fep->reg_phy) 3540 if (fep->reg_phy)
3541 regulator_disable(fep->reg_phy); 3541 regulator_disable(fep->reg_phy);
3542 pm_runtime_put(&pdev->dev);
3543 pm_runtime_disable(&pdev->dev);
3542 of_node_put(fep->phy_node); 3544 of_node_put(fep->phy_node);
3543 free_netdev(ndev); 3545 free_netdev(ndev);
3544 3546
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 40071dad1c57..9c76f1a2f57b 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -382,7 +382,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
382{ 382{
383 const struct of_device_id *id = 383 const struct of_device_id *id =
384 of_match_device(fsl_pq_mdio_match, &pdev->dev); 384 of_match_device(fsl_pq_mdio_match, &pdev->dev);
385 const struct fsl_pq_mdio_data *data = id->data; 385 const struct fsl_pq_mdio_data *data;
386 struct device_node *np = pdev->dev.of_node; 386 struct device_node *np = pdev->dev.of_node;
387 struct resource res; 387 struct resource res;
388 struct device_node *tbi; 388 struct device_node *tbi;
@@ -390,6 +390,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
390 struct mii_bus *new_bus; 390 struct mii_bus *new_bus;
391 int err; 391 int err;
392 392
393 if (!id) {
394 dev_err(&pdev->dev, "Failed to match device\n");
395 return -ENODEV;
396 }
397
398 data = id->data;
399
393 dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); 400 dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
394 401
395 new_bus = mdiobus_alloc_size(sizeof(*priv)); 402 new_bus = mdiobus_alloc_size(sizeof(*priv));
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7923bfdc9b30..2d61369f586f 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1375,9 +1375,11 @@ static int gfar_probe(struct platform_device *ofdev)
1375 1375
1376 gfar_init_addr_hash_table(priv); 1376 gfar_init_addr_hash_table(priv);
1377 1377
1378 /* Insert receive time stamps into padding alignment bytes */ 1378 /* Insert receive time stamps into padding alignment bytes, and
1379 * plus 2 bytes padding to ensure the cpu alignment.
1380 */
1379 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1381 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1380 priv->padding = 8; 1382 priv->padding = 8 + DEFAULT_PADDING;
1381 1383
1382 if (dev->features & NETIF_F_IP_CSUM || 1384 if (dev->features & NETIF_F_IP_CSUM ||
1383 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1385 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
@@ -3051,9 +3053,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
3051 if (ndev->features & NETIF_F_RXCSUM) 3053 if (ndev->features & NETIF_F_RXCSUM)
3052 gfar_rx_checksum(skb, fcb); 3054 gfar_rx_checksum(skb, fcb);
3053 3055
3054 /* Tell the skb what kind of packet this is */
3055 skb->protocol = eth_type_trans(skb, ndev);
3056
3057 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. 3056 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
3058 * Even if vlan rx accel is disabled, on some chips 3057 * Even if vlan rx accel is disabled, on some chips
3059 * RXFCB_VLN is pseudo randomly set. 3058 * RXFCB_VLN is pseudo randomly set.
@@ -3124,13 +3123,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
3124 continue; 3123 continue;
3125 } 3124 }
3126 3125
3126 gfar_process_frame(ndev, skb);
3127
3127 /* Increment the number of packets */ 3128 /* Increment the number of packets */
3128 total_pkts++; 3129 total_pkts++;
3129 total_bytes += skb->len; 3130 total_bytes += skb->len;
3130 3131
3131 skb_record_rx_queue(skb, rx_queue->qindex); 3132 skb_record_rx_queue(skb, rx_queue->qindex);
3132 3133
3133 gfar_process_frame(ndev, skb); 3134 skb->protocol = eth_type_trans(skb, ndev);
3134 3135
3135 /* Send the packet up the stack */ 3136 /* Send the packet up the stack */
3136 napi_gro_receive(&rx_queue->grp->napi_rx, skb); 3137 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index b40fba929d65..d540ee190038 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -314,11 +314,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
314 now = tmr_cnt_read(etsects); 314 now = tmr_cnt_read(etsects);
315 now += delta; 315 now += delta;
316 tmr_cnt_write(etsects, now); 316 tmr_cnt_write(etsects, now);
317 set_fipers(etsects);
317 318
318 spin_unlock_irqrestore(&etsects->lock, flags); 319 spin_unlock_irqrestore(&etsects->lock, flags);
319 320
320 set_fipers(etsects);
321
322 return 0; 321 return 0;
323} 322}
324 323
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 802d55457f19..b1a27aef4425 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -776,7 +776,7 @@ static void hns_xgmac_get_strings(u32 stringset, u8 *data)
776 */ 776 */
777static int hns_xgmac_get_sset_count(int stringset) 777static int hns_xgmac_get_sset_count(int stringset)
778{ 778{
779 if (stringset == ETH_SS_STATS) 779 if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
780 return ARRAY_SIZE(g_xgmac_stats_string); 780 return ARRAY_SIZE(g_xgmac_stats_string);
781 781
782 return 0; 782 return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index a0332129970b..4b91eb70c683 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1000,8 +1000,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
1000 cnt--; 1000 cnt--;
1001 1001
1002 return cnt; 1002 return cnt;
1003 } else { 1003 } else if (stringset == ETH_SS_STATS) {
1004 return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); 1004 return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
1005 } else {
1006 return -EOPNOTSUPP;
1005 } 1007 }
1006} 1008}
1007 1009
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index ae6e30d39f0f..3daf2d4a7ca0 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -194,7 +194,6 @@ static const char *hp100_isa_tbl[] = {
194}; 194};
195#endif 195#endif
196 196
197#ifdef CONFIG_EISA
198static struct eisa_device_id hp100_eisa_tbl[] = { 197static struct eisa_device_id hp100_eisa_tbl[] = {
199 { "HWPF180" }, /* HP J2577 rev A */ 198 { "HWPF180" }, /* HP J2577 rev A */
200 { "HWP1920" }, /* HP 27248B */ 199 { "HWP1920" }, /* HP 27248B */
@@ -205,9 +204,7 @@ static struct eisa_device_id hp100_eisa_tbl[] = {
205 { "" } /* Mandatory final entry ! */ 204 { "" } /* Mandatory final entry ! */
206}; 205};
207MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl); 206MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
208#endif
209 207
210#ifdef CONFIG_PCI
211static const struct pci_device_id hp100_pci_tbl[] = { 208static const struct pci_device_id hp100_pci_tbl[] = {
212 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,}, 209 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
213 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,}, 210 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
@@ -219,7 +216,6 @@ static const struct pci_device_id hp100_pci_tbl[] = {
219 {} /* Terminating entry */ 216 {} /* Terminating entry */
220}; 217};
221MODULE_DEVICE_TABLE(pci, hp100_pci_tbl); 218MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
222#endif
223 219
224static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO; 220static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
225static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX; 221static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
@@ -2842,8 +2838,7 @@ static void cleanup_dev(struct net_device *d)
2842 free_netdev(d); 2838 free_netdev(d);
2843} 2839}
2844 2840
2845#ifdef CONFIG_EISA 2841static int hp100_eisa_probe(struct device *gendev)
2846static int __init hp100_eisa_probe (struct device *gendev)
2847{ 2842{
2848 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); 2843 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
2849 struct eisa_device *edev = to_eisa_device(gendev); 2844 struct eisa_device *edev = to_eisa_device(gendev);
@@ -2884,9 +2879,7 @@ static struct eisa_driver hp100_eisa_driver = {
2884 .remove = hp100_eisa_remove, 2879 .remove = hp100_eisa_remove,
2885 } 2880 }
2886}; 2881};
2887#endif
2888 2882
2889#ifdef CONFIG_PCI
2890static int hp100_pci_probe(struct pci_dev *pdev, 2883static int hp100_pci_probe(struct pci_dev *pdev,
2891 const struct pci_device_id *ent) 2884 const struct pci_device_id *ent)
2892{ 2885{
@@ -2955,7 +2948,6 @@ static struct pci_driver hp100_pci_driver = {
2955 .probe = hp100_pci_probe, 2948 .probe = hp100_pci_probe,
2956 .remove = hp100_pci_remove, 2949 .remove = hp100_pci_remove,
2957}; 2950};
2958#endif
2959 2951
2960/* 2952/*
2961 * module section 2953 * module section
@@ -3032,23 +3024,17 @@ static int __init hp100_module_init(void)
3032 err = hp100_isa_init(); 3024 err = hp100_isa_init();
3033 if (err && err != -ENODEV) 3025 if (err && err != -ENODEV)
3034 goto out; 3026 goto out;
3035#ifdef CONFIG_EISA
3036 err = eisa_driver_register(&hp100_eisa_driver); 3027 err = eisa_driver_register(&hp100_eisa_driver);
3037 if (err && err != -ENODEV) 3028 if (err && err != -ENODEV)
3038 goto out2; 3029 goto out2;
3039#endif
3040#ifdef CONFIG_PCI
3041 err = pci_register_driver(&hp100_pci_driver); 3030 err = pci_register_driver(&hp100_pci_driver);
3042 if (err && err != -ENODEV) 3031 if (err && err != -ENODEV)
3043 goto out3; 3032 goto out3;
3044#endif
3045 out: 3033 out:
3046 return err; 3034 return err;
3047 out3: 3035 out3:
3048#ifdef CONFIG_EISA
3049 eisa_driver_unregister (&hp100_eisa_driver); 3036 eisa_driver_unregister (&hp100_eisa_driver);
3050 out2: 3037 out2:
3051#endif
3052 hp100_isa_cleanup(); 3038 hp100_isa_cleanup();
3053 goto out; 3039 goto out;
3054} 3040}
@@ -3057,12 +3043,8 @@ static int __init hp100_module_init(void)
3057static void __exit hp100_module_exit(void) 3043static void __exit hp100_module_exit(void)
3058{ 3044{
3059 hp100_isa_cleanup(); 3045 hp100_isa_cleanup();
3060#ifdef CONFIG_EISA
3061 eisa_driver_unregister (&hp100_eisa_driver); 3046 eisa_driver_unregister (&hp100_eisa_driver);
3062#endif
3063#ifdef CONFIG_PCI
3064 pci_unregister_driver (&hp100_pci_driver); 3047 pci_unregister_driver (&hp100_pci_driver);
3065#endif
3066} 3048}
3067 3049
3068module_init(hp100_module_init) 3050module_init(hp100_module_init)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 5d7db6c01c46..f301c03c527b 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -342,6 +342,7 @@ static int emac_reset(struct emac_instance *dev)
342{ 342{
343 struct emac_regs __iomem *p = dev->emacp; 343 struct emac_regs __iomem *p = dev->emacp;
344 int n = 20; 344 int n = 20;
345 bool __maybe_unused try_internal_clock = false;
345 346
346 DBG(dev, "reset" NL); 347 DBG(dev, "reset" NL);
347 348
@@ -354,6 +355,7 @@ static int emac_reset(struct emac_instance *dev)
354 } 355 }
355 356
356#ifdef CONFIG_PPC_DCR_NATIVE 357#ifdef CONFIG_PPC_DCR_NATIVE
358do_retry:
357 /* 359 /*
358 * PPC460EX/GT Embedded Processor Advanced User's Manual 360 * PPC460EX/GT Embedded Processor Advanced User's Manual
359 * section 28.10.1 Mode Register 0 (EMACx_MR0) states: 361 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
@@ -361,10 +363,19 @@ static int emac_reset(struct emac_instance *dev)
361 * of the EMAC. If none is present, select the internal clock 363 * of the EMAC. If none is present, select the internal clock
362 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). 364 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
363 * After a soft reset, select the external clock. 365 * After a soft reset, select the external clock.
366 *
367 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
368 * ethernet cable is not attached. This causes the reset to timeout
369 * and the PHY detection code in emac_init_phy() is unable to
370 * communicate and detect the AR8035-A PHY. As a result, the emac
371 * driver bails out early and the user has no ethernet.
372 * In order to stay compatible with existing configurations, the
373 * driver will temporarily switch to the internal clock, after
374 * the first reset fails.
364 */ 375 */
365 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { 376 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
366 if (dev->phy_address == 0xffffffff && 377 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
367 dev->phy_map == 0xffffffff) { 378 dev->phy_map == 0xffffffff)) {
368 /* No PHY: select internal loop clock before reset */ 379 /* No PHY: select internal loop clock before reset */
369 dcri_clrset(SDR0, SDR0_ETH_CFG, 380 dcri_clrset(SDR0, SDR0_ETH_CFG,
370 0, SDR0_ETH_CFG_ECS << dev->cell_index); 381 0, SDR0_ETH_CFG_ECS << dev->cell_index);
@@ -382,8 +393,15 @@ static int emac_reset(struct emac_instance *dev)
382 393
383#ifdef CONFIG_PPC_DCR_NATIVE 394#ifdef CONFIG_PPC_DCR_NATIVE
384 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { 395 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
385 if (dev->phy_address == 0xffffffff && 396 if (!n && !try_internal_clock) {
386 dev->phy_map == 0xffffffff) { 397 /* first attempt has timed out. */
398 n = 20;
399 try_internal_clock = true;
400 goto do_retry;
401 }
402
403 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
404 dev->phy_map == 0xffffffff)) {
387 /* No PHY: restore external clock source after reset */ 405 /* No PHY: restore external clock source after reset */
388 dcri_clrset(SDR0, SDR0_ETH_CFG, 406 dcri_clrset(SDR0, SDR0_ETH_CFG,
389 SDR0_ETH_CFG_ECS << dev->cell_index, 0); 407 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 98fe5a2cd6e3..481e994490ce 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -331,7 +331,8 @@ struct e1000_adapter {
331enum e1000_state_t { 331enum e1000_state_t {
332 __E1000_TESTING, 332 __E1000_TESTING,
333 __E1000_RESETTING, 333 __E1000_RESETTING,
334 __E1000_DOWN 334 __E1000_DOWN,
335 __E1000_DISABLED
335}; 336};
336 337
337#undef pr_fmt 338#undef pr_fmt
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 068023595d84..2a1d4a9d3c19 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -940,7 +940,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
940static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 940static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
941{ 941{
942 struct net_device *netdev; 942 struct net_device *netdev;
943 struct e1000_adapter *adapter; 943 struct e1000_adapter *adapter = NULL;
944 struct e1000_hw *hw; 944 struct e1000_hw *hw;
945 945
946 static int cards_found = 0; 946 static int cards_found = 0;
@@ -950,6 +950,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
950 u16 tmp = 0; 950 u16 tmp = 0;
951 u16 eeprom_apme_mask = E1000_EEPROM_APME; 951 u16 eeprom_apme_mask = E1000_EEPROM_APME;
952 int bars, need_ioport; 952 int bars, need_ioport;
953 bool disable_dev = false;
953 954
954 /* do not allocate ioport bars when not needed */ 955 /* do not allocate ioport bars when not needed */
955 need_ioport = e1000_is_need_ioport(pdev); 956 need_ioport = e1000_is_need_ioport(pdev);
@@ -1250,11 +1251,13 @@ err_mdio_ioremap:
1250 iounmap(hw->ce4100_gbe_mdio_base_virt); 1251 iounmap(hw->ce4100_gbe_mdio_base_virt);
1251 iounmap(hw->hw_addr); 1252 iounmap(hw->hw_addr);
1252err_ioremap: 1253err_ioremap:
1254 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1253 free_netdev(netdev); 1255 free_netdev(netdev);
1254err_alloc_etherdev: 1256err_alloc_etherdev:
1255 pci_release_selected_regions(pdev, bars); 1257 pci_release_selected_regions(pdev, bars);
1256err_pci_reg: 1258err_pci_reg:
1257 pci_disable_device(pdev); 1259 if (!adapter || disable_dev)
1260 pci_disable_device(pdev);
1258 return err; 1261 return err;
1259} 1262}
1260 1263
@@ -1272,6 +1275,7 @@ static void e1000_remove(struct pci_dev *pdev)
1272 struct net_device *netdev = pci_get_drvdata(pdev); 1275 struct net_device *netdev = pci_get_drvdata(pdev);
1273 struct e1000_adapter *adapter = netdev_priv(netdev); 1276 struct e1000_adapter *adapter = netdev_priv(netdev);
1274 struct e1000_hw *hw = &adapter->hw; 1277 struct e1000_hw *hw = &adapter->hw;
1278 bool disable_dev;
1275 1279
1276 e1000_down_and_stop(adapter); 1280 e1000_down_and_stop(adapter);
1277 e1000_release_manageability(adapter); 1281 e1000_release_manageability(adapter);
@@ -1290,9 +1294,11 @@ static void e1000_remove(struct pci_dev *pdev)
1290 iounmap(hw->flash_address); 1294 iounmap(hw->flash_address);
1291 pci_release_selected_regions(pdev, adapter->bars); 1295 pci_release_selected_regions(pdev, adapter->bars);
1292 1296
1297 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1293 free_netdev(netdev); 1298 free_netdev(netdev);
1294 1299
1295 pci_disable_device(pdev); 1300 if (disable_dev)
1301 pci_disable_device(pdev);
1296} 1302}
1297 1303
1298/** 1304/**
@@ -5135,7 +5141,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5135 if (netif_running(netdev)) 5141 if (netif_running(netdev))
5136 e1000_free_irq(adapter); 5142 e1000_free_irq(adapter);
5137 5143
5138 pci_disable_device(pdev); 5144 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5145 pci_disable_device(pdev);
5139 5146
5140 return 0; 5147 return 0;
5141} 5148}
@@ -5179,6 +5186,10 @@ static int e1000_resume(struct pci_dev *pdev)
5179 pr_err("Cannot enable PCI device from suspend\n"); 5186 pr_err("Cannot enable PCI device from suspend\n");
5180 return err; 5187 return err;
5181 } 5188 }
5189
5190 /* flush memory to make sure state is correct */
5191 smp_mb__before_atomic();
5192 clear_bit(__E1000_DISABLED, &adapter->flags);
5182 pci_set_master(pdev); 5193 pci_set_master(pdev);
5183 5194
5184 pci_enable_wake(pdev, PCI_D3hot, 0); 5195 pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5253,7 +5264,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5253 5264
5254 if (netif_running(netdev)) 5265 if (netif_running(netdev))
5255 e1000_down(adapter); 5266 e1000_down(adapter);
5256 pci_disable_device(pdev); 5267
5268 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5269 pci_disable_device(pdev);
5257 5270
5258 /* Request a slot slot reset. */ 5271 /* Request a slot slot reset. */
5259 return PCI_ERS_RESULT_NEED_RESET; 5272 return PCI_ERS_RESULT_NEED_RESET;
@@ -5281,6 +5294,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5281 pr_err("Cannot re-enable PCI device after reset.\n"); 5294 pr_err("Cannot re-enable PCI device after reset.\n");
5282 return PCI_ERS_RESULT_DISCONNECT; 5295 return PCI_ERS_RESULT_DISCONNECT;
5283 } 5296 }
5297
5298 /* flush memory to make sure state is correct */
5299 smp_mb__before_atomic();
5300 clear_bit(__E1000_DISABLED, &adapter->flags);
5284 pci_set_master(pdev); 5301 pci_set_master(pdev);
5285 5302
5286 pci_enable_wake(pdev, PCI_D3hot, 0); 5303 pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 1908a38e7f31..485b9cc53f8b 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1574,7 +1574,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1574 * we have already determined whether we have link or not. 1574 * we have already determined whether we have link or not.
1575 */ 1575 */
1576 if (!mac->autoneg) 1576 if (!mac->autoneg)
1577 return -E1000_ERR_CONFIG; 1577 return 1;
1578 1578
1579 /* Auto-Neg is enabled. Auto Speed Detection takes care 1579 /* Auto-Neg is enabled. Auto Speed Detection takes care
1580 * of MAC speed/duplex configuration. So we only need to 1580 * of MAC speed/duplex configuration. So we only need to
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 645ace74429e..fe133f33a6c6 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -450,7 +450,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
450 * we have already determined whether we have link or not. 450 * we have already determined whether we have link or not.
451 */ 451 */
452 if (!mac->autoneg) 452 if (!mac->autoneg)
453 return -E1000_ERR_CONFIG; 453 return 1;
454 454
455 /* Auto-Neg is enabled. Auto Speed Detection takes care 455 /* Auto-Neg is enabled. Auto Speed Detection takes care
456 * of MAC speed/duplex configuration. So we only need to 456 * of MAC speed/duplex configuration. So we only need to
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 5205f1ebe381..6369d88b81c1 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1182,6 +1182,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1182 struct e1000_hw *hw = &adapter->hw; 1182 struct e1000_hw *hw = &adapter->hw;
1183 1183
1184 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { 1184 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
1185 struct sk_buff *skb = adapter->tx_hwtstamp_skb;
1185 struct skb_shared_hwtstamps shhwtstamps; 1186 struct skb_shared_hwtstamps shhwtstamps;
1186 u64 txstmp; 1187 u64 txstmp;
1187 1188
@@ -1190,9 +1191,14 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1190 1191
1191 e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); 1192 e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
1192 1193
1193 skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); 1194 /* Clear the global tx_hwtstamp_skb pointer and force writes
1194 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); 1195 * prior to notifying the stack of a Tx timestamp.
1196 */
1195 adapter->tx_hwtstamp_skb = NULL; 1197 adapter->tx_hwtstamp_skb = NULL;
1198 wmb(); /* force write prior to skb_tstamp_tx */
1199
1200 skb_tstamp_tx(skb, &shhwtstamps);
1201 dev_kfree_skb_any(skb);
1196 } else if (time_after(jiffies, adapter->tx_hwtstamp_start 1202 } else if (time_after(jiffies, adapter->tx_hwtstamp_start
1197 + adapter->tx_timeout_factor * HZ)) { 1203 + adapter->tx_timeout_factor * HZ)) {
1198 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); 1204 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
@@ -2324,8 +2330,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2324{ 2330{
2325 struct pci_dev *pdev = adapter->pdev; 2331 struct pci_dev *pdev = adapter->pdev;
2326 2332
2327 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, 2333 ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
2328 GFP_KERNEL); 2334 GFP_KERNEL);
2329 if (!ring->desc) 2335 if (!ring->desc)
2330 return -ENOMEM; 2336 return -ENOMEM;
2331 2337
@@ -3526,6 +3532,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
3526 3532
3527 switch (hw->mac.type) { 3533 switch (hw->mac.type) {
3528 case e1000_pch2lan: 3534 case e1000_pch2lan:
3535 /* Stable 96MHz frequency */
3536 incperiod = INCPERIOD_96MHz;
3537 incvalue = INCVALUE_96MHz;
3538 shift = INCVALUE_SHIFT_96MHz;
3539 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
3540 break;
3529 case e1000_pch_lpt: 3541 case e1000_pch_lpt:
3530 if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { 3542 if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
3531 /* Stable 96MHz frequency */ 3543 /* Stable 96MHz frequency */
@@ -6583,12 +6595,17 @@ static int e1000e_pm_thaw(struct device *dev)
6583static int e1000e_pm_suspend(struct device *dev) 6595static int e1000e_pm_suspend(struct device *dev)
6584{ 6596{
6585 struct pci_dev *pdev = to_pci_dev(dev); 6597 struct pci_dev *pdev = to_pci_dev(dev);
6598 int rc;
6586 6599
6587 e1000e_flush_lpic(pdev); 6600 e1000e_flush_lpic(pdev);
6588 6601
6589 e1000e_pm_freeze(dev); 6602 e1000e_pm_freeze(dev);
6590 6603
6591 return __e1000_shutdown(pdev, false); 6604 rc = __e1000_shutdown(pdev, false);
6605 if (rc)
6606 e1000e_pm_thaw(dev);
6607
6608 return rc;
6592} 6609}
6593 6610
6594static int e1000e_pm_resume(struct device *dev) 6611static int e1000e_pm_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 2ce0eba5e040..38431b49020f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -983,7 +983,7 @@ static void fm10k_self_test(struct net_device *dev,
983 983
984 memset(data, 0, sizeof(*data) * FM10K_TEST_LEN); 984 memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
985 985
986 if (FM10K_REMOVED(hw)) { 986 if (FM10K_REMOVED(hw->hw_addr)) {
987 netif_err(interface, drv, dev, 987 netif_err(interface, drv, dev,
988 "Interface removed - test blocked\n"); 988 "Interface removed - test blocked\n");
989 eth_test->flags |= ETH_TEST_FL_FAILED; 989 eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 488a50d59dca..3da1f206ff84 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1073,6 +1073,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
1073 struct i40e_hw *hw = &np->vsi->back->hw; 1073 struct i40e_hw *hw = &np->vsi->back->hw;
1074 u32 val; 1074 u32 val;
1075 1075
1076#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF
1077 if (hw->mac.type == I40E_MAC_X722) {
1078 val = X722_EEPROM_SCOPE_LIMIT + 1;
1079 return val;
1080 }
1076 val = (rd32(hw, I40E_GLPCI_LBARCTRL) 1081 val = (rd32(hw, I40E_GLPCI_LBARCTRL)
1077 & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) 1082 & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
1078 >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; 1083 >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 6100cdd9ad13..dd4e6ea9e0e1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
292{ 292{
293 enum i40e_status_code ret_code = 0; 293 enum i40e_status_code ret_code = 0;
294 294
295 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { 295 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
296 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 296 if (!ret_code) {
297 if (!ret_code) { 297 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
298 ret_code = i40e_read_nvm_word_aq(hw, offset, data); 298 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
299 i40e_release_nvm(hw); 299 } else {
300 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
300 } 301 }
301 } else { 302 i40e_release_nvm(hw);
302 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
303 } 303 }
304 return ret_code; 304 return ret_code;
305} 305}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 53803fd6350c..02b23f6277fb 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3174,7 +3174,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
3174 3174
3175static int igb_close(struct net_device *netdev) 3175static int igb_close(struct net_device *netdev)
3176{ 3176{
3177 if (netif_device_present(netdev)) 3177 if (netif_device_present(netdev) || netdev->dismantle)
3178 return __igb_close(netdev, false); 3178 return __igb_close(netdev, false);
3179 return 0; 3179 return 0;
3180} 3180}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 7430dd44019e..ea693bbf56d8 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -818,6 +818,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
818 } 818 }
819 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); 819 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
820 820
821 q_map = 0;
821 /* Enable all initialized RXQs. */ 822 /* Enable all initialized RXQs. */
822 mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def)); 823 mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def));
823} 824}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 4f34e1b79705..ac92685dd4e5 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -5666,6 +5666,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
5666 int id = port->id; 5666 int id = port->id;
5667 bool allmulti = dev->flags & IFF_ALLMULTI; 5667 bool allmulti = dev->flags & IFF_ALLMULTI;
5668 5668
5669retry:
5669 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); 5670 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
5670 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); 5671 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
5671 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); 5672 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
@@ -5673,9 +5674,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
5673 /* Remove all port->id's mcast enries */ 5674 /* Remove all port->id's mcast enries */
5674 mvpp2_prs_mcast_del_all(priv, id); 5675 mvpp2_prs_mcast_del_all(priv, id);
5675 5676
5676 if (allmulti && !netdev_mc_empty(dev)) { 5677 if (!allmulti) {
5677 netdev_for_each_mc_addr(ha, dev) 5678 netdev_for_each_mc_addr(ha, dev) {
5678 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); 5679 if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
5680 allmulti = true;
5681 goto retry;
5682 }
5683 }
5679 } 5684 }
5680} 5685}
5681 5686
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 4b62aa1f9ff8..6e5065f0907b 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5079,7 +5079,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5079 INIT_WORK(&hw->restart_work, sky2_restart); 5079 INIT_WORK(&hw->restart_work, sky2_restart);
5080 5080
5081 pci_set_drvdata(pdev, hw); 5081 pci_set_drvdata(pdev, hw);
5082 pdev->d3_delay = 150; 5082 pdev->d3_delay = 200;
5083 5083
5084 return 0; 5084 return 0;
5085 5085
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ddb5541882f5..bcfac000199e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -967,6 +967,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
967 if (!coal->tx_max_coalesced_frames_irq) 967 if (!coal->tx_max_coalesced_frames_irq)
968 return -EINVAL; 968 return -EINVAL;
969 969
970 if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
971 coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
972 coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
973 coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
974 netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
975 __func__, MLX4_EN_MAX_COAL_TIME);
976 return -ERANGE;
977 }
978
979 if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
980 coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
981 netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
982 __func__, MLX4_EN_MAX_COAL_PKTS);
983 return -ERANGE;
984 }
985
970 priv->rx_frames = (coal->rx_max_coalesced_frames == 986 priv->rx_frames = (coal->rx_max_coalesced_frames ==
971 MLX4_EN_AUTO_CONF) ? 987 MLX4_EN_AUTO_CONF) ?
972 MLX4_EN_RX_COAL_TARGET : 988 MLX4_EN_RX_COAL_TARGET :
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1d4e2e054647..897d061e4f03 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
35#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
36 36
37#include <linux/mlx4/cmd.h> 37#include <linux/mlx4/cmd.h>
38#include <linux/mlx4/qp.h>
38#include <linux/export.h> 39#include <linux/export.h>
39 40
40#include "mlx4.h" 41#include "mlx4.h"
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
985 if (IS_ERR(mailbox)) 986 if (IS_ERR(mailbox))
986 return PTR_ERR(mailbox); 987 return PTR_ERR(mailbox);
987 988
989 if (!mlx4_qp_lookup(dev, rule->qpn)) {
990 mlx4_err_rule(dev, "QP doesn't exist\n", rule);
991 ret = -EINVAL;
992 goto out;
993 }
994
988 trans_rule_ctrl_to_hw(rule, mailbox->buf); 995 trans_rule_ctrl_to_hw(rule, mailbox->buf);
989 996
990 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 997 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
991 998
992 list_for_each_entry(cur, &rule->list, list) { 999 list_for_each_entry(cur, &rule->list, list) {
993 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 1000 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
994 if (ret < 0) { 1001 if (ret < 0)
995 mlx4_free_cmd_mailbox(dev, mailbox); 1002 goto out;
996 return ret; 1003
997 }
998 size += ret; 1004 size += ret;
999 } 1005 }
1000 1006
@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
1021 } 1027 }
1022 } 1028 }
1023 1029
1030out:
1024 mlx4_free_cmd_mailbox(dev, mailbox); 1031 mlx4_free_cmd_mailbox(dev, mailbox);
1025 1032
1026 return ret; 1033 return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 10aa6544cf4d..607daaffae98 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -140,6 +140,9 @@ enum {
140#define MLX4_EN_TX_COAL_PKTS 16 140#define MLX4_EN_TX_COAL_PKTS 16
141#define MLX4_EN_TX_COAL_TIME 0x10 141#define MLX4_EN_TX_COAL_TIME 0x10
142 142
143#define MLX4_EN_MAX_COAL_PKTS U16_MAX
144#define MLX4_EN_MAX_COAL_TIME U16_MAX
145
143#define MLX4_EN_RX_RATE_LOW 400000 146#define MLX4_EN_RX_RATE_LOW 400000
144#define MLX4_EN_RX_COAL_TIME_LOW 0 147#define MLX4_EN_RX_COAL_TIME_LOW 0
145#define MLX4_EN_RX_RATE_HIGH 450000 148#define MLX4_EN_RX_RATE_HIGH 450000
@@ -518,8 +521,8 @@ struct mlx4_en_priv {
518 u16 rx_usecs_low; 521 u16 rx_usecs_low;
519 u32 pkt_rate_high; 522 u32 pkt_rate_high;
520 u16 rx_usecs_high; 523 u16 rx_usecs_high;
521 u16 sample_interval; 524 u32 sample_interval;
522 u16 adaptive_rx_coal; 525 u32 adaptive_rx_coal;
523 u32 msg_enable; 526 u32 msg_enable;
524 u32 loopback_ok; 527 u32 loopback_ok;
525 u32 validate_loopback; 528 u32 validate_loopback;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 168823dde79f..d6d87dd8a28f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -280,6 +280,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
280 u64 in_param = 0; 280 u64 in_param = 0;
281 int err; 281 int err;
282 282
283 if (!cnt)
284 return;
285
283 if (mlx4_is_mfunc(dev)) { 286 if (mlx4_is_mfunc(dev)) {
284 set_param_l(&in_param, base_qpn); 287 set_param_l(&in_param, base_qpn);
285 set_param_h(&in_param, cnt); 288 set_param_h(&in_param, cnt);
@@ -378,6 +381,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
378 __mlx4_qp_free_icm(dev, qpn); 381 __mlx4_qp_free_icm(dev, qpn);
379} 382}
380 383
384struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
385{
386 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
387 struct mlx4_qp *qp;
388
389 spin_lock_irq(&qp_table->lock);
390
391 qp = __mlx4_qp_lookup(dev, qpn);
392
393 spin_unlock_irq(&qp_table->lock);
394 return qp;
395}
396
381int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) 397int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
382{ 398{
383 struct mlx4_priv *priv = mlx4_priv(dev); 399 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -465,6 +481,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
465 } 481 }
466 482
467 if (attr & MLX4_UPDATE_QP_QOS_VPORT) { 483 if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
484 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
485 mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
486 err = -EOPNOTSUPP;
487 goto out;
488 }
489
468 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; 490 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
469 cmd->qp_context.qos_vport = params->qos_vport; 491 cmd->qp_context.qos_vport = params->qos_vport;
470 } 492 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index d1fc7fa87b05..7911dc3da98e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2891,7 +2891,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2891 u32 srqn = qp_get_srqn(qpc) & 0xffffff; 2891 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2892 int use_srq = (qp_get_srqn(qpc) >> 24) & 1; 2892 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2893 struct res_srq *srq; 2893 struct res_srq *srq;
2894 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; 2894 int local_qpn = vhcr->in_modifier & 0xffffff;
2895 2895
2896 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); 2896 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2897 if (err) 2897 if (err)
@@ -5040,6 +5040,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5040 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 5040 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5041} 5041}
5042 5042
5043static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5044 struct mlx4_vf_immed_vlan_work *work)
5045{
5046 ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5047 ctx->qp_context.qos_vport = work->qos_vport;
5048}
5049
5043void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) 5050void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5044{ 5051{
5045 struct mlx4_vf_immed_vlan_work *work = 5052 struct mlx4_vf_immed_vlan_work *work =
@@ -5144,11 +5151,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5144 qp->sched_queue & 0xC7; 5151 qp->sched_queue & 0xC7;
5145 upd_context->qp_context.pri_path.sched_queue |= 5152 upd_context->qp_context.pri_path.sched_queue |=
5146 ((work->qos & 0x7) << 3); 5153 ((work->qos & 0x7) << 3);
5147 upd_context->qp_mask |= 5154
5148 cpu_to_be64(1ULL << 5155 if (dev->caps.flags2 &
5149 MLX4_UPD_QP_MASK_QOS_VPP); 5156 MLX4_DEV_CAP_FLAG2_QOS_VPP)
5150 upd_context->qp_context.qos_vport = 5157 update_qos_vpp(upd_context, work);
5151 work->qos_vport;
5152 } 5158 }
5153 5159
5154 err = mlx4_cmd(dev, mailbox->dma, 5160 err = mlx4_cmd(dev, mailbox->dma,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 6c66d2979795..9ac14df0ca3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -643,6 +643,7 @@ static void cmd_work_handler(struct work_struct *work)
643 struct semaphore *sem; 643 struct semaphore *sem;
644 unsigned long flags; 644 unsigned long flags;
645 int alloc_ret; 645 int alloc_ret;
646 int cmd_mode;
646 647
647 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 648 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
648 down(sem); 649 down(sem);
@@ -688,6 +689,7 @@ static void cmd_work_handler(struct work_struct *work)
688 set_signature(ent, !cmd->checksum_disabled); 689 set_signature(ent, !cmd->checksum_disabled);
689 dump_command(dev, ent, 1); 690 dump_command(dev, ent, 1);
690 ent->ts1 = ktime_get_ns(); 691 ent->ts1 = ktime_get_ns();
692 cmd_mode = cmd->mode;
691 693
692 /* ring doorbell after the descriptor is valid */ 694 /* ring doorbell after the descriptor is valid */
693 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 695 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
@@ -695,7 +697,7 @@ static void cmd_work_handler(struct work_struct *work)
695 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 697 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
696 mmiowb(); 698 mmiowb();
697 /* if not in polling don't use ent after this point */ 699 /* if not in polling don't use ent after this point */
698 if (cmd->mode == CMD_MODE_POLLING) { 700 if (cmd_mode == CMD_MODE_POLLING) {
699 poll_timeout(ent); 701 poll_timeout(ent);
700 /* make sure we read the descriptor after ownership is SW */ 702 /* make sure we read the descriptor after ownership is SW */
701 rmb(); 703 rmb();
@@ -1126,7 +1128,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
1126{ 1128{
1127 struct mlx5_core_dev *dev = filp->private_data; 1129 struct mlx5_core_dev *dev = filp->private_data;
1128 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1130 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1129 char outlen_str[8]; 1131 char outlen_str[8] = {0};
1130 int outlen; 1132 int outlen;
1131 void *ptr; 1133 void *ptr;
1132 int err; 1134 int err;
@@ -1141,8 +1143,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
1141 if (copy_from_user(outlen_str, buf, count)) 1143 if (copy_from_user(outlen_str, buf, count))
1142 return -EFAULT; 1144 return -EFAULT;
1143 1145
1144 outlen_str[7] = 0;
1145
1146 err = sscanf(outlen_str, "%d", &outlen); 1146 err = sscanf(outlen_str, "%d", &outlen);
1147 if (err < 0) 1147 if (err < 0)
1148 return err; 1148 return err;
@@ -1623,7 +1623,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1623 1623
1624 cmd->checksum_disabled = 1; 1624 cmd->checksum_disabled = 1;
1625 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1625 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1626 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1626 cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
1627 1627
1628 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 1628 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1629 if (cmd->cmdif_rev > CMD_IF_REV) { 1629 if (cmd->cmdif_rev > CMD_IF_REV) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f5c1f4acc57b..7c42be586be8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -513,7 +513,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
513 struct mlx5_priv *priv = &mdev->priv; 513 struct mlx5_priv *priv = &mdev->priv;
514 struct msix_entry *msix = priv->msix_arr; 514 struct msix_entry *msix = priv->msix_arr;
515 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; 515 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
516 int err;
517 516
518 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { 517 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
519 mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); 518 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
@@ -523,18 +522,11 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
523 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), 522 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
524 priv->irq_info[i].mask); 523 priv->irq_info[i].mask);
525 524
526 err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); 525 if (IS_ENABLED(CONFIG_SMP) &&
527 if (err) { 526 irq_set_affinity_hint(irq, priv->irq_info[i].mask))
528 mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", 527 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
529 irq);
530 goto err_clear_mask;
531 }
532 528
533 return 0; 529 return 0;
534
535err_clear_mask:
536 free_cpumask_var(priv->irq_info[i].mask);
537 return err;
538} 530}
539 531
540static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) 532static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 1bd419dbda6d..0798b4adb039 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -71,7 +71,7 @@ static int sonic_open(struct net_device *dev)
71 for (i = 0; i < SONIC_NUM_RRS; i++) { 71 for (i = 0; i < SONIC_NUM_RRS; i++) {
72 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), 72 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
73 SONIC_RBSIZE, DMA_FROM_DEVICE); 73 SONIC_RBSIZE, DMA_FROM_DEVICE);
74 if (!laddr) { 74 if (dma_mapping_error(lp->device, laddr)) {
75 while(i > 0) { /* free any that were mapped successfully */ 75 while(i > 0) { /* free any that were mapped successfully */
76 i--; 76 i--;
77 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); 77 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index b8d5270359cd..e30676515529 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
247 cmd.req.arg3 = 0; 247 cmd.req.arg3 = 0;
248 248
249 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) 249 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
250 netxen_issue_cmd(adapter, &cmd); 250 rcode = netxen_issue_cmd(adapter, &cmd);
251 251
252 if (rcode != NX_RCODE_SUCCESS) 252 if (rcode != NX_RCODE_SUCCESS)
253 return -EIO; 253 return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 7ccdb46c6764..21e0af2620ee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -43,7 +43,7 @@
43#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET 43#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
44 44
45/* ILT entry structure */ 45/* ILT entry structure */
46#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL 46#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
47#define ILT_ENTRY_PHY_ADDR_SHIFT 0 47#define ILT_ENTRY_PHY_ADDR_SHIFT 0
48#define ILT_ENTRY_VALID_MASK 0x1ULL 48#define ILT_ENTRY_VALID_MASK 0x1ULL
49#define ILT_ENTRY_VALID_SHIFT 52 49#define ILT_ENTRY_VALID_SHIFT 52
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 174f7341c5c3..688b6da5a9bb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -22,6 +22,7 @@
22#include <linux/etherdevice.h> 22#include <linux/etherdevice.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/qed/qed_if.h> 24#include <linux/qed/qed_if.h>
25#include <linux/crash_dump.h>
25 26
26#include "qed.h" 27#include "qed.h"
27#include "qed_sp.h" 28#include "qed_sp.h"
@@ -634,6 +635,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
634 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 635 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
635 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 636 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
636 637
638 if (is_kdump_kernel()) {
639 DP_INFO(cdev,
640 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
641 cdev->int_params.in.min_msix_cnt);
642 cdev->int_params.in.num_vectors =
643 cdev->int_params.in.min_msix_cnt;
644 }
645
637 rc = qed_set_int_mode(cdev, false); 646 rc = qed_set_int_mode(cdev, false);
638 if (rc) { 647 if (rc) {
639 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 648 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f9640d5ce6ba..b4f3cb55605e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3850,7 +3850,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
3850 struct list_head *head = &mbx->cmd_q; 3850 struct list_head *head = &mbx->cmd_q;
3851 struct qlcnic_cmd_args *cmd = NULL; 3851 struct qlcnic_cmd_args *cmd = NULL;
3852 3852
3853 spin_lock(&mbx->queue_lock); 3853 spin_lock_bh(&mbx->queue_lock);
3854 3854
3855 while (!list_empty(head)) { 3855 while (!list_empty(head)) {
3856 cmd = list_entry(head->next, struct qlcnic_cmd_args, list); 3856 cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
@@ -3861,7 +3861,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
3861 qlcnic_83xx_notify_cmd_completion(adapter, cmd); 3861 qlcnic_83xx_notify_cmd_completion(adapter, cmd);
3862 } 3862 }
3863 3863
3864 spin_unlock(&mbx->queue_lock); 3864 spin_unlock_bh(&mbx->queue_lock);
3865} 3865}
3866 3866
3867static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) 3867static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
@@ -3897,12 +3897,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
3897{ 3897{
3898 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; 3898 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
3899 3899
3900 spin_lock(&mbx->queue_lock); 3900 spin_lock_bh(&mbx->queue_lock);
3901 3901
3902 list_del(&cmd->list); 3902 list_del(&cmd->list);
3903 mbx->num_cmds--; 3903 mbx->num_cmds--;
3904 3904
3905 spin_unlock(&mbx->queue_lock); 3905 spin_unlock_bh(&mbx->queue_lock);
3906 3906
3907 qlcnic_83xx_notify_cmd_completion(adapter, cmd); 3907 qlcnic_83xx_notify_cmd_completion(adapter, cmd);
3908} 3908}
@@ -3967,7 +3967,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
3967 init_completion(&cmd->completion); 3967 init_completion(&cmd->completion);
3968 cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN; 3968 cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
3969 3969
3970 spin_lock(&mbx->queue_lock); 3970 spin_lock_bh(&mbx->queue_lock);
3971 3971
3972 list_add_tail(&cmd->list, &mbx->cmd_q); 3972 list_add_tail(&cmd->list, &mbx->cmd_q);
3973 mbx->num_cmds++; 3973 mbx->num_cmds++;
@@ -3975,7 +3975,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
3975 *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT; 3975 *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
3976 queue_work(mbx->work_q, &mbx->work); 3976 queue_work(mbx->work_q, &mbx->work);
3977 3977
3978 spin_unlock(&mbx->queue_lock); 3978 spin_unlock_bh(&mbx->queue_lock);
3979 3979
3980 return 0; 3980 return 0;
3981 } 3981 }
@@ -4071,15 +4071,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
4071 mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT; 4071 mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
4072 spin_unlock_irqrestore(&mbx->aen_lock, flags); 4072 spin_unlock_irqrestore(&mbx->aen_lock, flags);
4073 4073
4074 spin_lock(&mbx->queue_lock); 4074 spin_lock_bh(&mbx->queue_lock);
4075 4075
4076 if (list_empty(head)) { 4076 if (list_empty(head)) {
4077 spin_unlock(&mbx->queue_lock); 4077 spin_unlock_bh(&mbx->queue_lock);
4078 return; 4078 return;
4079 } 4079 }
4080 cmd = list_entry(head->next, struct qlcnic_cmd_args, list); 4080 cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
4081 4081
4082 spin_unlock(&mbx->queue_lock); 4082 spin_unlock_bh(&mbx->queue_lock);
4083 4083
4084 mbx_ops->encode_cmd(adapter, cmd); 4084 mbx_ops->encode_cmd(adapter, cmd);
4085 mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST); 4085 mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 509b596cf1e8..bd1ec70fb736 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
341 } 341 }
342 return -EIO; 342 return -EIO;
343 } 343 }
344 usleep_range(1000, 1500); 344 udelay(1200);
345 } 345 }
346 346
347 if (id_reg) 347 if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 7327b729ba2e..ffa6885acfc8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -127,6 +127,8 @@ static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
127 return 0; 127 return 0;
128 128
129 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 129 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
130 if (!pos)
131 return 0;
130 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); 132 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
131 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); 133 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
132 134
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index be258d90de9e..e3223f2fe2ff 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -765,7 +765,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
765 sizeof(struct mpi_coredump_global_header); 765 sizeof(struct mpi_coredump_global_header);
766 mpi_coredump->mpi_global_header.imageSize = 766 mpi_coredump->mpi_global_header.imageSize =
767 sizeof(struct ql_mpi_coredump); 767 sizeof(struct ql_mpi_coredump);
768 memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", 768 strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
769 sizeof(mpi_coredump->mpi_global_header.idString)); 769 sizeof(mpi_coredump->mpi_global_header.idString));
770 770
771 /* Get generic NIC reg dump */ 771 /* Get generic NIC reg dump */
@@ -1255,7 +1255,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
1255 sizeof(struct mpi_coredump_global_header); 1255 sizeof(struct mpi_coredump_global_header);
1256 mpi_coredump->mpi_global_header.imageSize = 1256 mpi_coredump->mpi_global_header.imageSize =
1257 sizeof(struct ql_reg_dump); 1257 sizeof(struct ql_reg_dump);
1258 memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", 1258 strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
1259 sizeof(mpi_coredump->mpi_global_header.idString)); 1259 sizeof(mpi_coredump->mpi_global_header.idString));
1260 1260
1261 1261
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 1ef03939d25f..c90ae4d4be7d 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -296,8 +296,9 @@ qcaspi_receive(struct qcaspi *qca)
296 296
297 /* Allocate rx SKB if we don't have one available. */ 297 /* Allocate rx SKB if we don't have one available. */
298 if (!qca->rx_skb) { 298 if (!qca->rx_skb) {
299 qca->rx_skb = netdev_alloc_skb(net_dev, 299 qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
300 net_dev->mtu + VLAN_ETH_HLEN); 300 net_dev->mtu +
301 VLAN_ETH_HLEN);
301 if (!qca->rx_skb) { 302 if (!qca->rx_skb) {
302 netdev_dbg(net_dev, "out of RX resources\n"); 303 netdev_dbg(net_dev, "out of RX resources\n");
303 qca->stats.out_of_mem++; 304 qca->stats.out_of_mem++;
@@ -377,7 +378,7 @@ qcaspi_receive(struct qcaspi *qca)
377 qca->rx_skb, qca->rx_skb->dev); 378 qca->rx_skb, qca->rx_skb->dev);
378 qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; 379 qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
379 netif_rx_ni(qca->rx_skb); 380 netif_rx_ni(qca->rx_skb);
380 qca->rx_skb = netdev_alloc_skb(net_dev, 381 qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
381 net_dev->mtu + VLAN_ETH_HLEN); 382 net_dev->mtu + VLAN_ETH_HLEN);
382 if (!qca->rx_skb) { 383 if (!qca->rx_skb) {
383 netdev_dbg(net_dev, "out of RX resources\n"); 384 netdev_dbg(net_dev, "out of RX resources\n");
@@ -759,7 +760,8 @@ qcaspi_netdev_init(struct net_device *dev)
759 if (!qca->rx_buffer) 760 if (!qca->rx_buffer)
760 return -ENOBUFS; 761 return -ENOBUFS;
761 762
762 qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); 763 qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu +
764 VLAN_ETH_HLEN);
763 if (!qca->rx_skb) { 765 if (!qca->rx_skb) {
764 kfree(qca->rx_buffer); 766 kfree(qca->rx_buffer);
765 netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); 767 netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index ef668d300800..d987d571fdd6 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2229,7 +2229,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
2229 struct rtl8139_private *tp = netdev_priv(dev); 2229 struct rtl8139_private *tp = netdev_priv(dev);
2230 const int irq = tp->pci_dev->irq; 2230 const int irq = tp->pci_dev->irq;
2231 2231
2232 disable_irq(irq); 2232 disable_irq_nosync(irq);
2233 rtl8139_interrupt(irq, dev); 2233 rtl8139_interrupt(irq, dev);
2234 enable_irq(irq); 2234 enable_irq(irq);
2235} 2235}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c5ea1018cb47..8b4069ea52ce 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1387,7 +1387,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
1387{ 1387{
1388 void __iomem *ioaddr = tp->mmio_addr; 1388 void __iomem *ioaddr = tp->mmio_addr;
1389 1389
1390 return RTL_R8(IBISR0) & 0x02; 1390 return RTL_R8(IBISR0) & 0x20;
1391} 1391}
1392 1392
1393static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) 1393static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
@@ -1395,7 +1395,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
1395 void __iomem *ioaddr = tp->mmio_addr; 1395 void __iomem *ioaddr = tp->mmio_addr;
1396 1396
1397 RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); 1397 RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
1398 rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000); 1398 rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
1399 RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); 1399 RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
1400 RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); 1400 RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
1401} 1401}
@@ -2205,19 +2205,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
2205 void __iomem *ioaddr = tp->mmio_addr; 2205 void __iomem *ioaddr = tp->mmio_addr;
2206 dma_addr_t paddr = tp->counters_phys_addr; 2206 dma_addr_t paddr = tp->counters_phys_addr;
2207 u32 cmd; 2207 u32 cmd;
2208 bool ret;
2209 2208
2210 RTL_W32(CounterAddrHigh, (u64)paddr >> 32); 2209 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
2210 RTL_R32(CounterAddrHigh);
2211 cmd = (u64)paddr & DMA_BIT_MASK(32); 2211 cmd = (u64)paddr & DMA_BIT_MASK(32);
2212 RTL_W32(CounterAddrLow, cmd); 2212 RTL_W32(CounterAddrLow, cmd);
2213 RTL_W32(CounterAddrLow, cmd | counter_cmd); 2213 RTL_W32(CounterAddrLow, cmd | counter_cmd);
2214 2214
2215 ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); 2215 return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
2216
2217 RTL_W32(CounterAddrLow, 0);
2218 RTL_W32(CounterAddrHigh, 0);
2219
2220 return ret;
2221} 2216}
2222 2217
2223static bool rtl8169_reset_counters(struct net_device *dev) 2218static bool rtl8169_reset_counters(struct net_device *dev)
@@ -4837,6 +4832,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
4837static void rtl_pll_power_up(struct rtl8169_private *tp) 4832static void rtl_pll_power_up(struct rtl8169_private *tp)
4838{ 4833{
4839 rtl_generic_op(tp, tp->pll_power_ops.up); 4834 rtl_generic_op(tp, tp->pll_power_ops.up);
4835
4836 /* give MAC/PHY some time to resume */
4837 msleep(20);
4840} 4838}
4841 4839
4842static void rtl_init_pll_power_ops(struct rtl8169_private *tp) 4840static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
@@ -8416,12 +8414,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8416 goto err_out_msi_4; 8414 goto err_out_msi_4;
8417 } 8415 }
8418 8416
8417 pci_set_drvdata(pdev, dev);
8418
8419 rc = register_netdev(dev); 8419 rc = register_netdev(dev);
8420 if (rc < 0) 8420 if (rc < 0)
8421 goto err_out_cnt_5; 8421 goto err_out_cnt_5;
8422 8422
8423 pci_set_drvdata(pdev, dev);
8424
8425 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", 8423 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
8426 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, 8424 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
8427 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); 8425 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 424d1dee55c9..afaf79b8761f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3222,7 +3222,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3222 /* MDIO bus init */ 3222 /* MDIO bus init */
3223 ret = sh_mdio_init(mdp, pd); 3223 ret = sh_mdio_init(mdp, pd);
3224 if (ret) { 3224 if (ret) {
3225 dev_err(&ndev->dev, "failed to initialise MDIO\n"); 3225 dev_err(&pdev->dev, "failed to initialise MDIO\n");
3226 goto out_release; 3226 goto out_release;
3227 } 3227 }
3228 3228
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5adaf537513b..7bba30f24135 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -54,7 +54,7 @@
54#include <linux/reset.h> 54#include <linux/reset.h>
55#include <linux/of_mdio.h> 55#include <linux/of_mdio.h>
56 56
57#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 57#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58 58
59/* Module parameters */ 59/* Module parameters */
60#define TX_TIMEO 5000 60#define TX_TIMEO 5000
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ab6051a43134..ccebf89aa1e4 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3442,7 +3442,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3442 3442
3443 len = (val & RCR_ENTRY_L2_LEN) >> 3443 len = (val & RCR_ENTRY_L2_LEN) >>
3444 RCR_ENTRY_L2_LEN_SHIFT; 3444 RCR_ENTRY_L2_LEN_SHIFT;
3445 len -= ETH_FCS_LEN; 3445 append_size = len + ETH_HLEN + ETH_FCS_LEN;
3446 3446
3447 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 3447 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3448 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 3448 RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
@@ -3452,7 +3452,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3452 RCR_ENTRY_PKTBUFSZ_SHIFT]; 3452 RCR_ENTRY_PKTBUFSZ_SHIFT];
3453 3453
3454 off = addr & ~PAGE_MASK; 3454 off = addr & ~PAGE_MASK;
3455 append_size = rcr_size;
3456 if (num_rcr == 1) { 3455 if (num_rcr == 1) {
3457 int ptype; 3456 int ptype;
3458 3457
@@ -3465,7 +3464,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3465 else 3464 else
3466 skb_checksum_none_assert(skb); 3465 skb_checksum_none_assert(skb);
3467 } else if (!(val & RCR_ENTRY_MULTI)) 3466 } else if (!(val & RCR_ENTRY_MULTI))
3468 append_size = len - skb->len; 3467 append_size = append_size - skb->len;
3469 3468
3470 niu_rx_skb_append(skb, page, off, append_size, rcr_size); 3469 niu_rx_skb_append(skb, page, off, append_size, rcr_size);
3471 if ((page->index + rp->rbr_block_size) - rcr_size == addr) { 3470 if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index e23a642357e7..eb4d8df49399 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -60,8 +60,7 @@
60#include <linux/sungem_phy.h> 60#include <linux/sungem_phy.h>
61#include "sungem.h" 61#include "sungem.h"
62 62
63/* Stripping FCS is causing problems, disabled for now */ 63#define STRIP_FCS
64#undef STRIP_FCS
65 64
66#define DEFAULT_MSG (NETIF_MSG_DRV | \ 65#define DEFAULT_MSG (NETIF_MSG_DRV | \
67 NETIF_MSG_PROBE | \ 66 NETIF_MSG_PROBE | \
@@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp)
435 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 434 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
436 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 435 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
437 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 436 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
438 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 437 (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
439 writel(val, gp->regs + RXDMA_CFG); 438 writel(val, gp->regs + RXDMA_CFG);
440 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 439 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
441 writel(((5 & RXDMA_BLANK_IPKTS) | 440 writel(((5 & RXDMA_BLANK_IPKTS) |
@@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
760 struct net_device *dev = gp->dev; 759 struct net_device *dev = gp->dev;
761 int entry, drops, work_done = 0; 760 int entry, drops, work_done = 0;
762 u32 done; 761 u32 done;
763 __sum16 csum;
764 762
765 if (netif_msg_rx_status(gp)) 763 if (netif_msg_rx_status(gp))
766 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", 764 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
@@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
855 skb = copy_skb; 853 skb = copy_skb;
856 } 854 }
857 855
858 csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); 856 if (likely(dev->features & NETIF_F_RXCSUM)) {
859 skb->csum = csum_unfold(csum); 857 __sum16 csum;
860 skb->ip_summed = CHECKSUM_COMPLETE; 858
859 csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
860 skb->csum = csum_unfold(csum);
861 skb->ip_summed = CHECKSUM_COMPLETE;
862 }
861 skb->protocol = eth_type_trans(skb, gp->dev); 863 skb->protocol = eth_type_trans(skb, gp->dev);
862 864
863 napi_gro_receive(&gp->napi, skb); 865 napi_gro_receive(&gp->napi, skb);
@@ -1755,7 +1757,7 @@ static void gem_init_dma(struct gem *gp)
1755 writel(0, gp->regs + TXDMA_KICK); 1757 writel(0, gp->regs + TXDMA_KICK);
1756 1758
1757 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 1759 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1758 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 1760 (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
1759 writel(val, gp->regs + RXDMA_CFG); 1761 writel(val, gp->regs + RXDMA_CFG);
1760 1762
1761 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 1763 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
@@ -2973,8 +2975,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2973 pci_set_drvdata(pdev, dev); 2975 pci_set_drvdata(pdev, dev);
2974 2976
2975 /* We can do scatter/gather and HW checksum */ 2977 /* We can do scatter/gather and HW checksum */
2976 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; 2978 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2977 dev->features |= dev->hw_features | NETIF_F_RXCSUM; 2979 dev->features = dev->hw_features;
2978 if (pci_using_dac) 2980 if (pci_using_dac)
2979 dev->features |= NETIF_F_HIGHDMA; 2981 dev->features |= NETIF_F_HIGHDMA;
2980 2982
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index cc106d892e29..b15e322b8bfe 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1787,7 +1787,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
1787 dev->ethtool_ops = &vnet_ethtool_ops; 1787 dev->ethtool_ops = &vnet_ethtool_ops;
1788 dev->watchdog_timeo = VNET_TX_TIMEOUT; 1788 dev->watchdog_timeo = VNET_TX_TIMEOUT;
1789 1789
1790 dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | 1790 dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO |
1791 NETIF_F_HW_CSUM | NETIF_F_SG; 1791 NETIF_F_HW_CSUM | NETIF_F_SG;
1792 dev->features = dev->hw_features; 1792 dev->features = dev->hw_features;
1793 1793
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f9740377f8bb..1ca720071200 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -283,6 +283,10 @@ struct cpsw_ss_regs {
283/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */ 283/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
284#define CPSW_V1_SEQ_ID_OFS_SHIFT 16 284#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
285 285
286#define CPSW_MAX_BLKS_TX 15
287#define CPSW_MAX_BLKS_TX_SHIFT 4
288#define CPSW_MAX_BLKS_RX 5
289
286struct cpsw_host_regs { 290struct cpsw_host_regs {
287 u32 max_blks; 291 u32 max_blks;
288 u32 blk_cnt; 292 u32 blk_cnt;
@@ -866,7 +870,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
866 /* set speed_in input in case RMII mode is used in 100Mbps */ 870 /* set speed_in input in case RMII mode is used in 100Mbps */
867 if (phy->speed == 100) 871 if (phy->speed == 100)
868 mac_control |= BIT(15); 872 mac_control |= BIT(15);
869 else if (phy->speed == 10) 873 /* in band mode only works in 10Mbps RGMII mode */
874 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
870 mac_control |= BIT(18); /* In Band mode */ 875 mac_control |= BIT(18); /* In Band mode */
871 876
872 if (priv->rx_pause) 877 if (priv->rx_pause)
@@ -1103,11 +1108,23 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1103 switch (cpsw->version) { 1108 switch (cpsw->version) {
1104 case CPSW_VERSION_1: 1109 case CPSW_VERSION_1:
1105 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); 1110 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
1111 /* Increase RX FIFO size to 5 for supporting fullduplex
1112 * flow control mode
1113 */
1114 slave_write(slave,
1115 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1116 CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
1106 break; 1117 break;
1107 case CPSW_VERSION_2: 1118 case CPSW_VERSION_2:
1108 case CPSW_VERSION_3: 1119 case CPSW_VERSION_3:
1109 case CPSW_VERSION_4: 1120 case CPSW_VERSION_4:
1110 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); 1121 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
1122 /* Increase RX FIFO size to 5 for supporting fullduplex
1123 * flow control mode
1124 */
1125 slave_write(slave,
1126 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1127 CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
1111 break; 1128 break;
1112 } 1129 }
1113 1130
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index a274cd49afe9..399a89f30826 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -610,8 +610,8 @@ err_out_regions:
610#ifdef CONFIG_PCI 610#ifdef CONFIG_PCI
611 if (pdev) 611 if (pdev)
612 pci_release_regions(pdev); 612 pci_release_regions(pdev);
613#endif
614err_out: 613err_out:
614#endif
615 if (pdev) 615 if (pdev)
616 pci_disable_device(pdev); 616 pci_disable_device(pdev);
617 return rc; 617 return rc;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 4f5c024c6192..5d5c0c433f3e 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
34config XILINX_LL_TEMAC 34config XILINX_LL_TEMAC
35 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" 35 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
36 depends on (PPC || MICROBLAZE) 36 depends on (PPC || MICROBLAZE)
37 depends on !64BIT || BROKEN
37 select PHYLIB 38 select PHYLIB
38 ---help--- 39 ---help---
39 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC 40 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 49fe59b180a8..a75ce9051a7f 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -574,6 +574,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
574 case HDLCDRVCTL_CALIBRATE: 574 case HDLCDRVCTL_CALIBRATE:
575 if(!capable(CAP_SYS_RAWIO)) 575 if(!capable(CAP_SYS_RAWIO))
576 return -EPERM; 576 return -EPERM;
577 if (s->par.bitrate <= 0)
578 return -EINVAL;
577 if (bi.data.calibrate > INT_MAX / s->par.bitrate) 579 if (bi.data.calibrate > INT_MAX / s->par.bitrate)
578 return -EINVAL; 580 return -EINVAL;
579 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; 581 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 95c0b45a68fb..313e006f74fe 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1381,8 +1381,8 @@ static int rr_close(struct net_device *dev)
1381 rrpriv->info_dma); 1381 rrpriv->info_dma);
1382 rrpriv->info = NULL; 1382 rrpriv->info = NULL;
1383 1383
1384 free_irq(pdev->irq, dev);
1385 spin_unlock_irqrestore(&rrpriv->lock, flags); 1384 spin_unlock_irqrestore(&rrpriv->lock, flags);
1385 free_irq(pdev->irq, dev);
1386 1386
1387 return 0; 1387 return 0;
1388} 1388}
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index af827faec7fe..142015af43db 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -282,6 +282,10 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
282 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) 282 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
283 success = true; 283 success = true;
284 } else { 284 } else {
285 if (!ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
286 ipvlan->phy_dev->dev_addr))
287 skb->pkt_type = PACKET_OTHERHOST;
288
285 ret = RX_HANDLER_ANOTHER; 289 ret = RX_HANDLER_ANOTHER;
286 success = true; 290 success = true;
287 } 291 }
@@ -353,6 +357,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
353 .flowi4_oif = dev->ifindex, 357 .flowi4_oif = dev->ifindex,
354 .flowi4_tos = RT_TOS(ip4h->tos), 358 .flowi4_tos = RT_TOS(ip4h->tos),
355 .flowi4_flags = FLOWI_FLAG_ANYSRC, 359 .flowi4_flags = FLOWI_FLAG_ANYSRC,
360 .flowi4_mark = skb->mark,
356 .daddr = ip4h->daddr, 361 .daddr = ip4h->daddr,
357 .saddr = ip4h->saddr, 362 .saddr = ip4h->saddr,
358 }; 363 };
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 4e3d2e7c697c..e8c3a8c32534 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -518,7 +518,9 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
518 518
519 mtt = irda_get_mtt(skb); 519 mtt = irda_get_mtt(skb);
520 pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); 520 pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
521 if (mtt) 521 if (mtt > 1000)
522 mdelay(mtt/1000);
523 else if (mtt)
522 udelay(mtt); 524 udelay(mtt);
523 525
524 /* Enable DMA interrupt */ 526 /* Enable DMA interrupt */
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 49bbc6826883..9a7dca2bb618 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
61 return rc; 61 return rc;
62 62
63 /* make rcal=100, since rdb default is 000 */ 63 /* make rcal=100, since rdb default is 000 */
64 rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10); 64 rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
65 if (rc < 0) 65 if (rc < 0)
66 return rc; 66 return rc;
67 67
68 /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */ 68 /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
69 rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10); 69 rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
70 if (rc < 0) 70 if (rc < 0)
71 return rc; 71 return rc;
72 72
73 /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */ 73 /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
74 rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00); 74 rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
75 75
76 return 0; 76 return 0;
77} 77}
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index b2091c88b44d..ce16b26d49ff 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -14,11 +14,18 @@
14#ifndef _LINUX_BCM_PHY_LIB_H 14#ifndef _LINUX_BCM_PHY_LIB_H
15#define _LINUX_BCM_PHY_LIB_H 15#define _LINUX_BCM_PHY_LIB_H
16 16
17#include <linux/brcmphy.h>
17#include <linux/phy.h> 18#include <linux/phy.h>
18 19
19int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val); 20int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
20int bcm_phy_read_exp(struct phy_device *phydev, u16 reg); 21int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
21 22
23static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
24 u16 reg, u16 val)
25{
26 return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
27}
28
22int bcm_phy_write_misc(struct phy_device *phydev, 29int bcm_phy_write_misc(struct phy_device *phydev,
23 u16 reg, u16 chl, u16 value); 30 u16 reg, u16 chl, u16 value);
24int bcm_phy_read_misc(struct phy_device *phydev, 31int bcm_phy_read_misc(struct phy_device *phydev,
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 03d4809a9126..bffa70e46202 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -48,10 +48,10 @@
48static void r_rc_cal_reset(struct phy_device *phydev) 48static void r_rc_cal_reset(struct phy_device *phydev)
49{ 49{
50 /* Reset R_CAL/RC_CAL Engine */ 50 /* Reset R_CAL/RC_CAL Engine */
51 bcm_phy_write_exp(phydev, 0x00b0, 0x0010); 51 bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
52 52
53 /* Disable Reset R_AL/RC_CAL Engine */ 53 /* Disable Reset R_AL/RC_CAL Engine */
54 bcm_phy_write_exp(phydev, 0x00b0, 0x0000); 54 bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
55} 55}
56 56
57static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev) 57static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index e83acc608678..dc934347ae28 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1203,6 +1203,23 @@ static void dp83640_remove(struct phy_device *phydev)
1203 kfree(dp83640); 1203 kfree(dp83640);
1204} 1204}
1205 1205
1206static int dp83640_soft_reset(struct phy_device *phydev)
1207{
1208 int ret;
1209
1210 ret = genphy_soft_reset(phydev);
1211 if (ret < 0)
1212 return ret;
1213
1214 /* From DP83640 datasheet: "Software driver code must wait 3 us
1215 * following a software reset before allowing further serial MII
1216 * operations with the DP83640."
1217 */
1218 udelay(10); /* Taking udelay inaccuracy into account */
1219
1220 return 0;
1221}
1222
1206static int dp83640_config_init(struct phy_device *phydev) 1223static int dp83640_config_init(struct phy_device *phydev)
1207{ 1224{
1208 struct dp83640_private *dp83640 = phydev->priv; 1225 struct dp83640_private *dp83640 = phydev->priv;
@@ -1496,6 +1513,7 @@ static struct phy_driver dp83640_driver = {
1496 .flags = PHY_HAS_INTERRUPT, 1513 .flags = PHY_HAS_INTERRUPT,
1497 .probe = dp83640_probe, 1514 .probe = dp83640_probe,
1498 .remove = dp83640_remove, 1515 .remove = dp83640_remove,
1516 .soft_reset = dp83640_soft_reset,
1499 .config_init = dp83640_config_init, 1517 .config_init = dp83640_config_init,
1500 .config_aneg = genphy_config_aneg, 1518 .config_aneg = genphy_config_aneg,
1501 .read_status = genphy_read_status, 1519 .read_status = genphy_read_status,
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 15bc7f9ea224..afd76e07088b 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -128,8 +128,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
128 128
129 data->regulator = devm_regulator_get(&pdev->dev, "phy"); 129 data->regulator = devm_regulator_get(&pdev->dev, "phy");
130 if (IS_ERR(data->regulator)) { 130 if (IS_ERR(data->regulator)) {
131 if (PTR_ERR(data->regulator) == -EPROBE_DEFER) 131 if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
132 return -EPROBE_DEFER; 132 ret = -EPROBE_DEFER;
133 goto err_out_free_mdiobus;
134 }
133 135
134 dev_info(&pdev->dev, "no regulator found\n"); 136 dev_info(&pdev->dev, "no regulator found\n");
135 } else { 137 } else {
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7d0690433ee0..7d2cf015c5e7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -148,6 +148,12 @@ static inline int phy_aneg_done(struct phy_device *phydev)
148 if (phydev->drv->aneg_done) 148 if (phydev->drv->aneg_done)
149 return phydev->drv->aneg_done(phydev); 149 return phydev->drv->aneg_done(phydev);
150 150
151 /* Avoid genphy_aneg_done() if the Clause 45 PHY does not
152 * implement Clause 22 registers
153 */
154 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
155 return -EINVAL;
156
151 return genphy_aneg_done(phydev); 157 return genphy_aneg_done(phydev);
152} 158}
153 159
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8179727d3423..1f2f25a71d18 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1265,11 +1265,8 @@ static int gen10g_resume(struct phy_device *phydev)
1265 1265
1266static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) 1266static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
1267{ 1267{
1268 /* The default values for phydev->supported are provided by the PHY 1268 phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
1269 * driver "features" member, we want to reset to sane defaults first 1269 PHY_10BT_FEATURES);
1270 * before supporting higher speeds.
1271 */
1272 phydev->supported &= PHY_DEFAULT_FEATURES;
1273 1270
1274 switch (max_speed) { 1271 switch (max_speed) {
1275 default: 1272 default:
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e2decf71c6d1..46448d7e3290 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2952,6 +2952,15 @@ ppp_connect_channel(struct channel *pch, int unit)
2952 goto outl; 2952 goto outl;
2953 2953
2954 ppp_lock(ppp); 2954 ppp_lock(ppp);
2955 spin_lock_bh(&pch->downl);
2956 if (!pch->chan) {
2957 /* Don't connect unregistered channels */
2958 spin_unlock_bh(&pch->downl);
2959 ppp_unlock(ppp);
2960 ret = -ENOTCONN;
2961 goto outl;
2962 }
2963 spin_unlock_bh(&pch->downl);
2955 if (pch->file.hdrlen > ppp->file.hdrlen) 2964 if (pch->file.hdrlen > ppp->file.hdrlen)
2956 ppp->file.hdrlen = pch->file.hdrlen; 2965 ppp->file.hdrlen = pch->file.hdrlen;
2957 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ 2966 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 4e0068e775f9..583d50f80b24 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -638,6 +638,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
638 lock_sock(sk); 638 lock_sock(sk);
639 639
640 error = -EINVAL; 640 error = -EINVAL;
641
642 if (sockaddr_len != sizeof(struct sockaddr_pppox))
643 goto end;
644
641 if (sp->sa_protocol != PX_PROTO_OE) 645 if (sp->sa_protocol != PX_PROTO_OE)
642 goto end; 646 goto end;
643 647
@@ -860,6 +864,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
860 struct pppoe_hdr *ph; 864 struct pppoe_hdr *ph;
861 struct net_device *dev; 865 struct net_device *dev;
862 char *start; 866 char *start;
867 int hlen;
863 868
864 lock_sock(sk); 869 lock_sock(sk);
865 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { 870 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
@@ -878,16 +883,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
878 if (total_len > (dev->mtu + dev->hard_header_len)) 883 if (total_len > (dev->mtu + dev->hard_header_len))
879 goto end; 884 goto end;
880 885
881 886 hlen = LL_RESERVED_SPACE(dev);
882 skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32, 887 skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
883 0, GFP_KERNEL); 888 dev->needed_tailroom, 0, GFP_KERNEL);
884 if (!skb) { 889 if (!skb) {
885 error = -ENOMEM; 890 error = -ENOMEM;
886 goto end; 891 goto end;
887 } 892 }
888 893
889 /* Reserve space for headers. */ 894 /* Reserve space for headers. */
890 skb_reserve(skb, dev->hard_header_len); 895 skb_reserve(skb, hlen);
891 skb_reset_network_header(skb); 896 skb_reset_network_header(skb);
892 897
893 skb->dev = dev; 898 skb->dev = dev;
@@ -948,7 +953,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
948 /* Copy the data if there is no space for the header or if it's 953 /* Copy the data if there is no space for the header or if it's
949 * read-only. 954 * read-only.
950 */ 955 */
951 if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len)) 956 if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
952 goto abort; 957 goto abort;
953 958
954 __skb_push(skb, sizeof(*ph)); 959 __skb_push(skb, sizeof(*ph));
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index f7e8c79349ad..12a627fcc02c 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -501,7 +501,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
501 po->chan.mtu = dst_mtu(&rt->dst); 501 po->chan.mtu = dst_mtu(&rt->dst);
502 if (!po->chan.mtu) 502 if (!po->chan.mtu)
503 po->chan.mtu = PPP_MRU; 503 po->chan.mtu = PPP_MRU;
504 ip_rt_put(rt);
505 po->chan.mtu -= PPTP_HEADER_OVERHEAD; 504 po->chan.mtu -= PPTP_HEADER_OVERHEAD;
506 505
507 po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); 506 po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index 27ed25252aac..cfd81eb1b532 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
509 if(x < 0 || x > comp->rslot_limit) 509 if(x < 0 || x > comp->rslot_limit)
510 goto bad; 510 goto bad;
511 511
512 /* Check if the cstate is initialized */
513 if (!comp->rstate[x].initialized)
514 goto bad;
515
512 comp->flags &=~ SLF_TOSS; 516 comp->flags &=~ SLF_TOSS;
513 comp->recv_current = x; 517 comp->recv_current = x;
514 } else { 518 } else {
@@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
673 if (cs->cs_tcp.doff > 5) 677 if (cs->cs_tcp.doff > 5)
674 memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4); 678 memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
675 cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2; 679 cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
680 cs->initialized = true;
676 /* Put headers back on packet 681 /* Put headers back on packet
677 * Neither header checksum is recalculated 682 * Neither header checksum is recalculated
678 */ 683 */
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 61cd53838360..49174837c2ba 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -247,6 +247,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
247 } 247 }
248} 248}
249 249
250static bool __team_option_inst_tmp_find(const struct list_head *opts,
251 const struct team_option_inst *needle)
252{
253 struct team_option_inst *opt_inst;
254
255 list_for_each_entry(opt_inst, opts, tmp_list)
256 if (opt_inst == needle)
257 return true;
258 return false;
259}
260
250static int __team_options_register(struct team *team, 261static int __team_options_register(struct team *team,
251 const struct team_option *option, 262 const struct team_option *option,
252 size_t option_count) 263 size_t option_count)
@@ -972,7 +983,8 @@ static void team_port_disable(struct team *team,
972static void ___team_compute_features(struct team *team) 983static void ___team_compute_features(struct team *team)
973{ 984{
974 struct team_port *port; 985 struct team_port *port;
975 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; 986 netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
987 NETIF_F_ALL_FOR_ALL;
976 unsigned short max_hard_header_len = ETH_HLEN; 988 unsigned short max_hard_header_len = ETH_HLEN;
977 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | 989 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
978 IFF_XMIT_DST_RELEASE_PERM; 990 IFF_XMIT_DST_RELEASE_PERM;
@@ -1039,14 +1051,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
1039} 1051}
1040 1052
1041#ifdef CONFIG_NET_POLL_CONTROLLER 1053#ifdef CONFIG_NET_POLL_CONTROLLER
1042static int team_port_enable_netpoll(struct team *team, struct team_port *port) 1054static int __team_port_enable_netpoll(struct team_port *port)
1043{ 1055{
1044 struct netpoll *np; 1056 struct netpoll *np;
1045 int err; 1057 int err;
1046 1058
1047 if (!team->dev->npinfo)
1048 return 0;
1049
1050 np = kzalloc(sizeof(*np), GFP_KERNEL); 1059 np = kzalloc(sizeof(*np), GFP_KERNEL);
1051 if (!np) 1060 if (!np)
1052 return -ENOMEM; 1061 return -ENOMEM;
@@ -1060,6 +1069,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1060 return err; 1069 return err;
1061} 1070}
1062 1071
1072static int team_port_enable_netpoll(struct team_port *port)
1073{
1074 if (!port->team->dev->npinfo)
1075 return 0;
1076
1077 return __team_port_enable_netpoll(port);
1078}
1079
1063static void team_port_disable_netpoll(struct team_port *port) 1080static void team_port_disable_netpoll(struct team_port *port)
1064{ 1081{
1065 struct netpoll *np = port->np; 1082 struct netpoll *np = port->np;
@@ -1074,7 +1091,7 @@ static void team_port_disable_netpoll(struct team_port *port)
1074 kfree(np); 1091 kfree(np);
1075} 1092}
1076#else 1093#else
1077static int team_port_enable_netpoll(struct team *team, struct team_port *port) 1094static int team_port_enable_netpoll(struct team_port *port)
1078{ 1095{
1079 return 0; 1096 return 0;
1080} 1097}
@@ -1181,7 +1198,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1181 goto err_vids_add; 1198 goto err_vids_add;
1182 } 1199 }
1183 1200
1184 err = team_port_enable_netpoll(team, port); 1201 err = team_port_enable_netpoll(port);
1185 if (err) { 1202 if (err) {
1186 netdev_err(dev, "Failed to enable netpoll on device %s\n", 1203 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1187 portname); 1204 portname);
@@ -1889,7 +1906,7 @@ static int team_netpoll_setup(struct net_device *dev,
1889 1906
1890 mutex_lock(&team->lock); 1907 mutex_lock(&team->lock);
1891 list_for_each_entry(port, &team->port_list, list) { 1908 list_for_each_entry(port, &team->port_list, list) {
1892 err = team_port_enable_netpoll(team, port); 1909 err = __team_port_enable_netpoll(port);
1893 if (err) { 1910 if (err) {
1894 __team_netpoll_cleanup(team); 1911 __team_netpoll_cleanup(team);
1895 break; 1912 break;
@@ -2380,7 +2397,7 @@ send_done:
2380 if (!nlh) { 2397 if (!nlh) {
2381 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2398 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2382 if (err) 2399 if (err)
2383 goto errout; 2400 return err;
2384 goto send_done; 2401 goto send_done;
2385 } 2402 }
2386 2403
@@ -2544,6 +2561,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2544 if (err) 2561 if (err)
2545 goto team_put; 2562 goto team_put;
2546 opt_inst->changed = true; 2563 opt_inst->changed = true;
2564
2565 /* dumb/evil user-space can send us duplicate opt,
2566 * keep only the last one
2567 */
2568 if (__team_option_inst_tmp_find(&opt_inst_list,
2569 opt_inst))
2570 continue;
2571
2547 list_add(&opt_inst->tmp_list, &opt_inst_list); 2572 list_add(&opt_inst->tmp_list, &opt_inst_list);
2548 } 2573 }
2549 if (!opt_found) { 2574 if (!opt_found) {
@@ -2660,7 +2685,7 @@ send_done:
2660 if (!nlh) { 2685 if (!nlh) {
2661 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2686 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2662 if (err) 2687 if (err)
2663 goto errout; 2688 return err;
2664 goto send_done; 2689 goto send_done;
2665 } 2690 }
2666 2691
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 1f6893ebce16..3a7286256db0 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -395,6 +395,10 @@ config USB_NET_RNDIS_HOST
395 The protocol specification is incomplete, and is controlled by 395 The protocol specification is incomplete, and is controlled by
396 (and for) Microsoft; it isn't an "Open" ecosystem or market. 396 (and for) Microsoft; it isn't an "Open" ecosystem or market.
397 397
398config USB_NET_CDC_SUBSET_ENABLE
399 tristate
400 depends on USB_NET_CDC_SUBSET
401
398config USB_NET_CDC_SUBSET 402config USB_NET_CDC_SUBSET
399 tristate "Simple USB Network Links (CDC Ethernet subset)" 403 tristate "Simple USB Network Links (CDC Ethernet subset)"
400 depends on USB_USBNET 404 depends on USB_USBNET
@@ -413,6 +417,7 @@ config USB_NET_CDC_SUBSET
413config USB_ALI_M5632 417config USB_ALI_M5632
414 bool "ALi M5632 based 'USB 2.0 Data Link' cables" 418 bool "ALi M5632 based 'USB 2.0 Data Link' cables"
415 depends on USB_NET_CDC_SUBSET 419 depends on USB_NET_CDC_SUBSET
420 select USB_NET_CDC_SUBSET_ENABLE
416 help 421 help
417 Choose this option if you're using a host-to-host cable 422 Choose this option if you're using a host-to-host cable
418 based on this design, which supports USB 2.0 high speed. 423 based on this design, which supports USB 2.0 high speed.
@@ -420,6 +425,7 @@ config USB_ALI_M5632
420config USB_AN2720 425config USB_AN2720
421 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)" 426 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
422 depends on USB_NET_CDC_SUBSET 427 depends on USB_NET_CDC_SUBSET
428 select USB_NET_CDC_SUBSET_ENABLE
423 help 429 help
424 Choose this option if you're using a host-to-host cable 430 Choose this option if you're using a host-to-host cable
425 based on this design. Note that AnchorChips is now a 431 based on this design. Note that AnchorChips is now a
@@ -428,6 +434,7 @@ config USB_AN2720
428config USB_BELKIN 434config USB_BELKIN
429 bool "eTEK based host-to-host cables (Advance, Belkin, ...)" 435 bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
430 depends on USB_NET_CDC_SUBSET 436 depends on USB_NET_CDC_SUBSET
437 select USB_NET_CDC_SUBSET_ENABLE
431 default y 438 default y
432 help 439 help
433 Choose this option if you're using a host-to-host cable 440 Choose this option if you're using a host-to-host cable
@@ -437,6 +444,7 @@ config USB_BELKIN
437config USB_ARMLINUX 444config USB_ARMLINUX
438 bool "Embedded ARM Linux links (iPaq, ...)" 445 bool "Embedded ARM Linux links (iPaq, ...)"
439 depends on USB_NET_CDC_SUBSET 446 depends on USB_NET_CDC_SUBSET
447 select USB_NET_CDC_SUBSET_ENABLE
440 default y 448 default y
441 help 449 help
442 Choose this option to support the "usb-eth" networking driver 450 Choose this option to support the "usb-eth" networking driver
@@ -454,6 +462,7 @@ config USB_ARMLINUX
454config USB_EPSON2888 462config USB_EPSON2888
455 bool "Epson 2888 based firmware (DEVELOPMENT)" 463 bool "Epson 2888 based firmware (DEVELOPMENT)"
456 depends on USB_NET_CDC_SUBSET 464 depends on USB_NET_CDC_SUBSET
465 select USB_NET_CDC_SUBSET_ENABLE
457 help 466 help
458 Choose this option to support the usb networking links used 467 Choose this option to support the usb networking links used
459 by some sample firmware from Epson. 468 by some sample firmware from Epson.
@@ -461,6 +470,7 @@ config USB_EPSON2888
461config USB_KC2190 470config USB_KC2190
462 bool "KT Technology KC2190 based cables (InstaNet)" 471 bool "KT Technology KC2190 based cables (InstaNet)"
463 depends on USB_NET_CDC_SUBSET 472 depends on USB_NET_CDC_SUBSET
473 select USB_NET_CDC_SUBSET_ENABLE
464 help 474 help
465 Choose this option if you're using a host-to-host cable 475 Choose this option if you're using a host-to-host cable
466 with one of these chips. 476 with one of these chips.
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b5f04068dbe4..37fb46aee341 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
23obj-$(CONFIG_USB_NET_NET1080) += net1080.o 23obj-$(CONFIG_USB_NET_NET1080) += net1080.o
24obj-$(CONFIG_USB_NET_PLUSB) += plusb.o 24obj-$(CONFIG_USB_NET_PLUSB) += plusb.o
25obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o 25obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o
26obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o 26obj-$(CONFIG_USB_NET_CDC_SUBSET_ENABLE) += cdc_subset.o
27obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o 27obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
28obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o 28obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
29obj-$(CONFIG_USB_USBNET) += usbnet.o 29obj-$(CONFIG_USB_USBNET) += usbnet.o
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index f9343bee1de3..f71abe50ea6f 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -461,6 +461,7 @@ static const struct driver_info wwan_info = {
461#define REALTEK_VENDOR_ID 0x0bda 461#define REALTEK_VENDOR_ID 0x0bda
462#define SAMSUNG_VENDOR_ID 0x04e8 462#define SAMSUNG_VENDOR_ID 0x04e8
463#define LENOVO_VENDOR_ID 0x17ef 463#define LENOVO_VENDOR_ID 0x17ef
464#define LINKSYS_VENDOR_ID 0x13b1
464#define NVIDIA_VENDOR_ID 0x0955 465#define NVIDIA_VENDOR_ID 0x0955
465#define HP_VENDOR_ID 0x03f0 466#define HP_VENDOR_ID 0x03f0
466 467
@@ -650,6 +651,15 @@ static const struct usb_device_id products[] = {
650 .driver_info = 0, 651 .driver_info = 0,
651}, 652},
652 653
654#if IS_ENABLED(CONFIG_USB_RTL8152)
655/* Linksys USB3GIGV1 Ethernet Adapter */
656{
657 USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
658 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
659 .driver_info = 0,
660},
661#endif
662
653/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ 663/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
654{ 664{
655 USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM, 665 USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
@@ -705,6 +715,12 @@ static const struct usb_device_id products[] = {
705 USB_CDC_PROTO_NONE), 715 USB_CDC_PROTO_NONE),
706 .driver_info = (unsigned long)&wwan_info, 716 .driver_info = (unsigned long)&wwan_info,
707}, { 717}, {
718 /* Cinterion AHS3 modem by GEMALTO */
719 USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
720 USB_CDC_SUBCLASS_ETHERNET,
721 USB_CDC_PROTO_NONE),
722 .driver_info = (unsigned long)&wwan_info,
723}, {
708 /* Telit modules */ 724 /* Telit modules */
709 USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM, 725 USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
710 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 726 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 96a5028621c8..8edbccf06b7b 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -593,7 +593,7 @@ static const struct driver_info cdc_mbim_info_zlp = {
593 */ 593 */
594static const struct driver_info cdc_mbim_info_ndp_to_end = { 594static const struct driver_info cdc_mbim_info_ndp_to_end = {
595 .description = "CDC MBIM", 595 .description = "CDC MBIM",
596 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, 596 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
597 .bind = cdc_mbim_bind, 597 .bind = cdc_mbim_bind,
598 .unbind = cdc_mbim_unbind, 598 .unbind = cdc_mbim_unbind,
599 .manage_power = cdc_mbim_manage_power, 599 .manage_power = cdc_mbim_manage_power,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 1228d0da4075..36e1377fc954 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -825,6 +825,9 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
825 goto error2; 825 goto error2;
826 } 826 }
827 827
828 /* Device-specific flags */
829 ctx->drvflags = drvflags;
830
828 /* 831 /*
829 * Some Huawei devices have been observed to come out of reset in NDP32 mode. 832 * Some Huawei devices have been observed to come out of reset in NDP32 mode.
830 * Let's check if this is the case, and set the device to NDP16 mode again if 833 * Let's check if this is the case, and set the device to NDP16 mode again if
@@ -873,9 +876,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
873 /* finish setting up the device specific data */ 876 /* finish setting up the device specific data */
874 cdc_ncm_setup(dev); 877 cdc_ncm_setup(dev);
875 878
876 /* Device-specific flags */
877 ctx->drvflags = drvflags;
878
879 /* Allocate the delayed NDP if needed. */ 879 /* Allocate the delayed NDP if needed. */
880 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { 880 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
881 ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL); 881 ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
@@ -1069,12 +1069,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1069 u16 n = 0, index, ndplen; 1069 u16 n = 0, index, ndplen;
1070 u8 ready2send = 0; 1070 u8 ready2send = 0;
1071 u32 delayed_ndp_size; 1071 u32 delayed_ndp_size;
1072 size_t padding_count;
1072 1073
1073 /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated 1074 /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
1074 * accordingly. Otherwise, we should check here. 1075 * accordingly. Otherwise, we should check here.
1075 */ 1076 */
1076 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) 1077 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
1077 delayed_ndp_size = ctx->max_ndp_size; 1078 delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
1078 else 1079 else
1079 delayed_ndp_size = 0; 1080 delayed_ndp_size = 0;
1080 1081
@@ -1207,7 +1208,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1207 /* If requested, put NDP at end of frame. */ 1208 /* If requested, put NDP at end of frame. */
1208 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { 1209 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
1209 nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; 1210 nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
1210 cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max); 1211 cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max - ctx->max_ndp_size);
1211 nth16->wNdpIndex = cpu_to_le16(skb_out->len); 1212 nth16->wNdpIndex = cpu_to_le16(skb_out->len);
1212 memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size); 1213 memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size);
1213 1214
@@ -1225,11 +1226,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1225 * a ZLP after full sized NTBs. 1226 * a ZLP after full sized NTBs.
1226 */ 1227 */
1227 if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && 1228 if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
1228 skb_out->len > ctx->min_tx_pkt) 1229 skb_out->len > ctx->min_tx_pkt) {
1229 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, 1230 padding_count = ctx->tx_max - skb_out->len;
1230 ctx->tx_max - skb_out->len); 1231 memset(skb_put(skb_out, padding_count), 0, padding_count);
1231 else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) 1232 } else if (skb_out->len < ctx->tx_max &&
1233 (skb_out->len % dev->maxpacket) == 0) {
1232 *skb_put(skb_out, 1) = 0; /* force short packet */ 1234 *skb_put(skb_out, 1) = 0; /* force short packet */
1235 }
1233 1236
1234 /* set final frame length */ 1237 /* set final frame length */
1235 nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; 1238 nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 41e9ebd7d0a6..acec4b565511 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -618,7 +618,8 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
618 offset += 0x100; 618 offset += 0x100;
619 else 619 else
620 ret = -EINVAL; 620 ret = -EINVAL;
621 ret = lan78xx_read_raw_otp(dev, offset, length, data); 621 if (!ret)
622 ret = lan78xx_read_raw_otp(dev, offset, length, data);
622 } 623 }
623 624
624 return ret; 625 return ret;
@@ -1360,6 +1361,8 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1360 netif_dbg(dev, ifup, dev->net, 1361 netif_dbg(dev, ifup, dev->net,
1361 "MAC address set to random addr"); 1362 "MAC address set to random addr");
1362 } 1363 }
1364
1365 tasklet_schedule(&dev->bh);
1363 } 1366 }
1364 1367
1365 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); 1368 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
@@ -1859,6 +1862,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
1859 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; 1862 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1860 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; 1863 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1861 dev->rx_qlen = 4; 1864 dev->rx_qlen = 4;
1865 dev->tx_qlen = 4;
1862 } 1866 }
1863 1867
1864 ret = lan78xx_write_reg(dev, BURST_CAP, buf); 1868 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index b0ea8dee5f06..3b67140eed73 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -631,11 +631,16 @@ static const struct usb_device_id products[] = {
631 {QMI_FIXED_INTF(0x05c6, 0x9080, 8)}, 631 {QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
632 {QMI_FIXED_INTF(0x05c6, 0x9083, 3)}, 632 {QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
633 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, 633 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
634 {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */
634 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, 635 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
635 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 636 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
636 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, 637 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
638 {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
637 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 639 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
638 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 640 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
641 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
642 {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
643 {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
639 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 644 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
640 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ 645 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
641 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ 646 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
@@ -712,6 +717,7 @@ static const struct usb_device_id products[] = {
712 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 717 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
713 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 718 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
714 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ 719 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
720 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
715 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 721 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
716 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 722 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
717 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 723 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -761,6 +767,7 @@ static const struct usb_device_id products[] = {
761 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 767 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
762 {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ 768 {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
763 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 769 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
770 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
764 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ 771 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
765 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ 772 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
766 773
@@ -854,6 +861,18 @@ static int qmi_wwan_probe(struct usb_interface *intf,
854 id->driver_info = (unsigned long)&qmi_wwan_info; 861 id->driver_info = (unsigned long)&qmi_wwan_info;
855 } 862 }
856 863
864 /* There are devices where the same interface number can be
865 * configured as different functions. We should only bind to
866 * vendor specific functions when matching on interface number
867 */
868 if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
869 desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
870 dev_dbg(&intf->dev,
871 "Rejecting interface number match for class %02x\n",
872 desc->bInterfaceClass);
873 return -ENODEV;
874 }
875
857 /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */ 876 /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
858 if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) { 877 if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
859 dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n"); 878 dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 89950f5cea71..2bb336cb13ee 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -506,6 +506,7 @@ enum rtl8152_flags {
506#define VENDOR_ID_REALTEK 0x0bda 506#define VENDOR_ID_REALTEK 0x0bda
507#define VENDOR_ID_SAMSUNG 0x04e8 507#define VENDOR_ID_SAMSUNG 0x04e8
508#define VENDOR_ID_LENOVO 0x17ef 508#define VENDOR_ID_LENOVO 0x17ef
509#define VENDOR_ID_LINKSYS 0x13b1
509#define VENDOR_ID_NVIDIA 0x0955 510#define VENDOR_ID_NVIDIA 0x0955
510 511
511#define MCU_TYPE_PLA 0x0100 512#define MCU_TYPE_PLA 0x0100
@@ -1609,7 +1610,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1609 1610
1610 tx_data += len; 1611 tx_data += len;
1611 agg->skb_len += len; 1612 agg->skb_len += len;
1612 agg->skb_num++; 1613 agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
1613 1614
1614 dev_kfree_skb_any(skb); 1615 dev_kfree_skb_any(skb);
1615 1616
@@ -3138,7 +3139,8 @@ static int rtl8152_close(struct net_device *netdev)
3138#ifdef CONFIG_PM_SLEEP 3139#ifdef CONFIG_PM_SLEEP
3139 unregister_pm_notifier(&tp->pm_notifier); 3140 unregister_pm_notifier(&tp->pm_notifier);
3140#endif 3141#endif
3141 napi_disable(&tp->napi); 3142 if (!test_bit(RTL8152_UNPLUG, &tp->flags))
3143 napi_disable(&tp->napi);
3142 clear_bit(WORK_ENABLE, &tp->flags); 3144 clear_bit(WORK_ENABLE, &tp->flags);
3143 usb_kill_urb(tp->intr_urb); 3145 usb_kill_urb(tp->intr_urb);
3144 cancel_delayed_work_sync(&tp->schedule); 3146 cancel_delayed_work_sync(&tp->schedule);
@@ -4376,6 +4378,7 @@ static struct usb_device_id rtl8152_table[] = {
4376 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 4378 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
4377 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, 4379 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
4378 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, 4380 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
4381 {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
4379 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, 4382 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
4380 {} 4383 {}
4381}; 4384};
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index c5f375befd2f..7337e6c0e126 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -945,10 +945,11 @@ static int smsc75xx_set_features(struct net_device *netdev,
945 /* it's racing here! */ 945 /* it's racing here! */
946 946
947 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); 947 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
948 if (ret < 0) 948 if (ret < 0) {
949 netdev_warn(dev->net, "Error writing RFE_CTL\n"); 949 netdev_warn(dev->net, "Error writing RFE_CTL\n");
950 950 return ret;
951 return ret; 951 }
952 return 0;
952} 953}
953 954
954static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) 955static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ba21d072be31..6b4cc1c2e6b4 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -399,6 +399,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
399 if (ifmp && (dev->ifindex != 0)) 399 if (ifmp && (dev->ifindex != 0))
400 peer->ifindex = ifmp->ifi_index; 400 peer->ifindex = ifmp->ifi_index;
401 401
402 peer->gso_max_size = dev->gso_max_size;
403 peer->gso_max_segs = dev->gso_max_segs;
404
402 err = register_netdevice(peer); 405 err = register_netdevice(peer);
403 put_net(net); 406 put_net(net);
404 net = NULL; 407 net = NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8dfc75250583..2759d386ade7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -556,7 +556,12 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
556 hdr = skb_vnet_hdr(skb); 556 hdr = skb_vnet_hdr(skb);
557 sg_init_table(rq->sg, 2); 557 sg_init_table(rq->sg, 2);
558 sg_set_buf(rq->sg, hdr, vi->hdr_len); 558 sg_set_buf(rq->sg, hdr, vi->hdr_len);
559 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); 559
560 err = skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
561 if (unlikely(err < 0)) {
562 dev_kfree_skb(skb);
563 return err;
564 }
560 565
561 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); 566 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
562 if (err < 0) 567 if (err < 0)
@@ -858,7 +863,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
858 struct virtio_net_hdr_mrg_rxbuf *hdr; 863 struct virtio_net_hdr_mrg_rxbuf *hdr;
859 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 864 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
860 struct virtnet_info *vi = sq->vq->vdev->priv; 865 struct virtnet_info *vi = sq->vq->vdev->priv;
861 unsigned num_sg; 866 int num_sg;
862 unsigned hdr_len = vi->hdr_len; 867 unsigned hdr_len = vi->hdr_len;
863 bool can_push; 868 bool can_push;
864 869
@@ -911,11 +916,16 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
911 if (can_push) { 916 if (can_push) {
912 __skb_push(skb, hdr_len); 917 __skb_push(skb, hdr_len);
913 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); 918 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
919 if (unlikely(num_sg < 0))
920 return num_sg;
914 /* Pull header back to avoid skew in tx bytes calculations. */ 921 /* Pull header back to avoid skew in tx bytes calculations. */
915 __skb_pull(skb, hdr_len); 922 __skb_pull(skb, hdr_len);
916 } else { 923 } else {
917 sg_set_buf(sq->sg, hdr, hdr_len); 924 sg_set_buf(sq->sg, hdr, hdr_len);
918 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; 925 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
926 if (unlikely(num_sg < 0))
927 return num_sg;
928 num_sg++;
919 } 929 }
920 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); 930 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
921} 931}
@@ -1902,8 +1912,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1902 1912
1903 /* Assume link up if device can't report link status, 1913 /* Assume link up if device can't report link status,
1904 otherwise get link status from config. */ 1914 otherwise get link status from config. */
1915 netif_carrier_off(dev);
1905 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 1916 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1906 netif_carrier_off(dev);
1907 schedule_work(&vi->config_work); 1917 schedule_work(&vi->config_work);
1908 } else { 1918 } else {
1909 vi->status = VIRTIO_NET_S_LINK_UP; 1919 vi->status = VIRTIO_NET_S_LINK_UP;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0cbf520cea77..419c045d0752 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1563,7 +1563,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1563 rq->rx_ring[i].basePA); 1563 rq->rx_ring[i].basePA);
1564 rq->rx_ring[i].base = NULL; 1564 rq->rx_ring[i].base = NULL;
1565 } 1565 }
1566 rq->buf_info[i] = NULL;
1567 } 1566 }
1568 1567
1569 if (rq->comp_ring.base) { 1568 if (rq->comp_ring.base) {
@@ -1578,6 +1577,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1578 (rq->rx_ring[0].size + rq->rx_ring[1].size); 1577 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1579 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], 1578 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1580 rq->buf_info_pa); 1579 rq->buf_info_pa);
1580 rq->buf_info[0] = rq->buf_info[1] = NULL;
1581 } 1581 }
1582} 1582}
1583 1583
@@ -2789,6 +2789,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2789 /* we need to enable NAPI, otherwise dev_close will deadlock */ 2789 /* we need to enable NAPI, otherwise dev_close will deadlock */
2790 for (i = 0; i < adapter->num_rx_queues; i++) 2790 for (i = 0; i < adapter->num_rx_queues; i++)
2791 napi_enable(&adapter->rx_queue[i].napi); 2791 napi_enable(&adapter->rx_queue[i].napi);
2792 /*
2793 * Need to clear the quiesce bit to ensure that vmxnet3_close
2794 * can quiesce the device properly
2795 */
2796 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2792 dev_close(adapter->netdev); 2797 dev_close(adapter->netdev);
2793} 2798}
2794 2799
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index ac945f8781ac..d3d59122a357 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -550,13 +550,15 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
550 neigh = __ipv4_neigh_lookup_noref(dev, nexthop); 550 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
551 if (unlikely(!neigh)) 551 if (unlikely(!neigh))
552 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); 552 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
553 if (!IS_ERR(neigh)) 553 if (!IS_ERR(neigh)) {
554 ret = dst_neigh_output(dst, neigh, skb); 554 ret = dst_neigh_output(dst, neigh, skb);
555 rcu_read_unlock_bh();
556 return ret;
557 }
555 558
556 rcu_read_unlock_bh(); 559 rcu_read_unlock_bh();
557err: 560err:
558 if (unlikely(ret < 0)) 561 vrf_tx_error(skb->dev, skb);
559 vrf_tx_error(skb->dev, skb);
560 return ret; 562 return ret;
561} 563}
562 564
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index dab3bf6649e6..c41378214ede 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -962,7 +962,7 @@ static bool vxlan_snoop(struct net_device *dev,
962 return false; 962 return false;
963 963
964 /* Don't migrate static entries, drop packets */ 964 /* Don't migrate static entries, drop packets */
965 if (f->state & NUD_NOARP) 965 if (f->state & (NUD_PERMANENT | NUD_NOARP))
966 return true; 966 return true;
967 967
968 if (net_ratelimit()) 968 if (net_ratelimit())
@@ -2834,6 +2834,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2834 needed_headroom = lowerdev->hard_header_len; 2834 needed_headroom = lowerdev->hard_header_len;
2835 } 2835 }
2836 2836
2837 if (lowerdev) {
2838 dev->gso_max_size = lowerdev->gso_max_size;
2839 dev->gso_max_segs = lowerdev->gso_max_segs;
2840 }
2841
2837 if (conf->mtu) { 2842 if (conf->mtu) {
2838 err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false); 2843 err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
2839 if (err) 2844 if (err)
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 0d7645581f91..4842344a96f1 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg)
574 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 574 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
575 0, NULL); 575 0, NULL);
576 proto->restart_counter--; 576 proto->restart_counter--;
577 } else 577 } else if (netif_carrier_ok(proto->dev))
578 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
579 0, NULL);
580 else
578 ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 581 ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
579 0, NULL); 582 0, NULL);
580 break; 583 break;
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index db363856e0b5..2b064998915f 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -347,6 +347,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
347 card->rambase == NULL) { 347 card->rambase == NULL) {
348 pr_err("ioremap() failed\n"); 348 pr_err("ioremap() failed\n");
349 pc300_pci_remove_one(pdev); 349 pc300_pci_remove_one(pdev);
350 return -ENOMEM;
350 } 351 }
351 352
352 /* PLX PCI 9050 workaround for local configuration register read bug */ 353 /* PLX PCI 9050 workaround for local configuration register read bug */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index ee638cb8b48f..0c23768aa1ec 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -67,6 +67,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
67 .board_size = QCA988X_BOARD_DATA_SZ, 67 .board_size = QCA988X_BOARD_DATA_SZ,
68 .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, 68 .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
69 }, 69 },
70 .decap_align_bytes = 4,
70 }, 71 },
71 { 72 {
72 .id = QCA6174_HW_2_1_VERSION, 73 .id = QCA6174_HW_2_1_VERSION,
@@ -85,6 +86,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
85 .board_size = QCA6174_BOARD_DATA_SZ, 86 .board_size = QCA6174_BOARD_DATA_SZ,
86 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, 87 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
87 }, 88 },
89 .decap_align_bytes = 4,
88 }, 90 },
89 { 91 {
90 .id = QCA6174_HW_2_1_VERSION, 92 .id = QCA6174_HW_2_1_VERSION,
@@ -103,6 +105,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
103 .board_size = QCA6174_BOARD_DATA_SZ, 105 .board_size = QCA6174_BOARD_DATA_SZ,
104 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, 106 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
105 }, 107 },
108 .decap_align_bytes = 4,
106 }, 109 },
107 { 110 {
108 .id = QCA6174_HW_3_0_VERSION, 111 .id = QCA6174_HW_3_0_VERSION,
@@ -121,6 +124,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
121 .board_size = QCA6174_BOARD_DATA_SZ, 124 .board_size = QCA6174_BOARD_DATA_SZ,
122 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, 125 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
123 }, 126 },
127 .decap_align_bytes = 4,
124 }, 128 },
125 { 129 {
126 .id = QCA6174_HW_3_2_VERSION, 130 .id = QCA6174_HW_3_2_VERSION,
@@ -140,6 +144,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
140 .board_size = QCA6174_BOARD_DATA_SZ, 144 .board_size = QCA6174_BOARD_DATA_SZ,
141 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, 145 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
142 }, 146 },
147 .decap_align_bytes = 4,
143 }, 148 },
144 { 149 {
145 .id = QCA99X0_HW_2_0_DEV_VERSION, 150 .id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -159,6 +164,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
159 .board_size = QCA99X0_BOARD_DATA_SZ, 164 .board_size = QCA99X0_BOARD_DATA_SZ,
160 .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, 165 .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
161 }, 166 },
167 .decap_align_bytes = 1,
162 }, 168 },
163 { 169 {
164 .id = QCA9377_HW_1_0_DEV_VERSION, 170 .id = QCA9377_HW_1_0_DEV_VERSION,
@@ -177,6 +183,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
177 .board_size = QCA9377_BOARD_DATA_SZ, 183 .board_size = QCA9377_BOARD_DATA_SZ,
178 .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, 184 .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
179 }, 185 },
186 .decap_align_bytes = 4,
180 }, 187 },
181 { 188 {
182 .id = QCA9377_HW_1_1_DEV_VERSION, 189 .id = QCA9377_HW_1_1_DEV_VERSION,
@@ -195,6 +202,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
195 .board_size = QCA9377_BOARD_DATA_SZ, 202 .board_size = QCA9377_BOARD_DATA_SZ,
196 .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, 203 .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
197 }, 204 },
205 .decap_align_bytes = 4,
198 }, 206 },
199}; 207};
200 208
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 858d75f49a9f..257836a0cfbc 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -670,6 +670,10 @@ struct ath10k {
670 size_t board_size; 670 size_t board_size;
671 size_t board_ext_size; 671 size_t board_ext_size;
672 } fw; 672 } fw;
673
674 /* Number of bytes used for alignment in rx_hdr_status */
675 int decap_align_bytes;
676
673 } hw_params; 677 } hw_params;
674 678
675 const struct firmware *board; 679 const struct firmware *board;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 1a88a24ffeac..30c357567054 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1892,6 +1892,15 @@ static ssize_t ath10k_write_simulate_radar(struct file *file,
1892 size_t count, loff_t *ppos) 1892 size_t count, loff_t *ppos)
1893{ 1893{
1894 struct ath10k *ar = file->private_data; 1894 struct ath10k *ar = file->private_data;
1895 struct ath10k_vif *arvif;
1896
1897 /* Just check for for the first vif alone, as all the vifs will be
1898 * sharing the same channel and if the channel is disabled, all the
1899 * vifs will share the same 'is_started' state.
1900 */
1901 arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list);
1902 if (!arvif->is_started)
1903 return -EINVAL;
1895 1904
1896 ieee80211_radar_detected(ar->hw); 1905 ieee80211_radar_detected(ar->hw);
1897 1906
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 6060dda4e910..b32c47fe926d 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -979,7 +979,7 @@ static void ath10k_process_rx(struct ath10k *ar,
979 *status = *rx_status; 979 *status = *rx_status;
980 980
981 ath10k_dbg(ar, ATH10K_DBG_DATA, 981 ath10k_dbg(ar, ATH10K_DBG_DATA,
982 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 982 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
983 skb, 983 skb,
984 skb->len, 984 skb->len,
985 ieee80211_get_SA(hdr), 985 ieee80211_get_SA(hdr),
@@ -1076,7 +1076,21 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1076 hdr = (void *)msdu->data; 1076 hdr = (void *)msdu->data;
1077 1077
1078 /* Tail */ 1078 /* Tail */
1079 skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype)); 1079 if (status->flag & RX_FLAG_IV_STRIPPED) {
1080 skb_trim(msdu, msdu->len -
1081 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1082 } else {
1083 /* MIC */
1084 if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
1085 enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
1086 skb_trim(msdu, msdu->len - 8);
1087
1088 /* ICV */
1089 if (status->flag & RX_FLAG_ICV_STRIPPED &&
1090 enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
1091 skb_trim(msdu, msdu->len -
1092 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1093 }
1080 1094
1081 /* MMIC */ 1095 /* MMIC */
1082 if (!ieee80211_has_morefrags(hdr->frame_control) && 1096 if (!ieee80211_has_morefrags(hdr->frame_control) &&
@@ -1095,12 +1109,14 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1095static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, 1109static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1096 struct sk_buff *msdu, 1110 struct sk_buff *msdu,
1097 struct ieee80211_rx_status *status, 1111 struct ieee80211_rx_status *status,
1098 const u8 first_hdr[64]) 1112 const u8 first_hdr[64],
1113 enum htt_rx_mpdu_encrypt_type enctype)
1099{ 1114{
1100 struct ieee80211_hdr *hdr; 1115 struct ieee80211_hdr *hdr;
1101 size_t hdr_len; 1116 size_t hdr_len;
1102 u8 da[ETH_ALEN]; 1117 u8 da[ETH_ALEN];
1103 u8 sa[ETH_ALEN]; 1118 u8 sa[ETH_ALEN];
1119 int bytes_aligned = ar->hw_params.decap_align_bytes;
1104 1120
1105 /* Delivered decapped frame: 1121 /* Delivered decapped frame:
1106 * [nwifi 802.11 header] <-- replaced with 802.11 hdr 1122 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
@@ -1123,6 +1139,14 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1123 /* push original 802.11 header */ 1139 /* push original 802.11 header */
1124 hdr = (struct ieee80211_hdr *)first_hdr; 1140 hdr = (struct ieee80211_hdr *)first_hdr;
1125 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1141 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1142
1143 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1144 memcpy(skb_push(msdu,
1145 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1146 (void *)hdr + round_up(hdr_len, bytes_aligned),
1147 ath10k_htt_rx_crypto_param_len(ar, enctype));
1148 }
1149
1126 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1150 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1127 1151
1128 /* original 802.11 header has a different DA and in 1152 /* original 802.11 header has a different DA and in
@@ -1142,6 +1166,7 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1142 size_t hdr_len, crypto_len; 1166 size_t hdr_len, crypto_len;
1143 void *rfc1042; 1167 void *rfc1042;
1144 bool is_first, is_last, is_amsdu; 1168 bool is_first, is_last, is_amsdu;
1169 int bytes_aligned = ar->hw_params.decap_align_bytes;
1145 1170
1146 rxd = (void *)msdu->data - sizeof(*rxd); 1171 rxd = (void *)msdu->data - sizeof(*rxd);
1147 hdr = (void *)rxd->rx_hdr_status; 1172 hdr = (void *)rxd->rx_hdr_status;
@@ -1158,8 +1183,8 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1158 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1183 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1159 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1184 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1160 1185
1161 rfc1042 += round_up(hdr_len, 4) + 1186 rfc1042 += round_up(hdr_len, bytes_aligned) +
1162 round_up(crypto_len, 4); 1187 round_up(crypto_len, bytes_aligned);
1163 } 1188 }
1164 1189
1165 if (is_amsdu) 1190 if (is_amsdu)
@@ -1180,6 +1205,7 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1180 void *rfc1042; 1205 void *rfc1042;
1181 u8 da[ETH_ALEN]; 1206 u8 da[ETH_ALEN];
1182 u8 sa[ETH_ALEN]; 1207 u8 sa[ETH_ALEN];
1208 int bytes_aligned = ar->hw_params.decap_align_bytes;
1183 1209
1184 /* Delivered decapped frame: 1210 /* Delivered decapped frame:
1185 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc 1211 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@@ -1203,6 +1229,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1203 /* push original 802.11 header */ 1229 /* push original 802.11 header */
1204 hdr = (struct ieee80211_hdr *)first_hdr; 1230 hdr = (struct ieee80211_hdr *)first_hdr;
1205 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1231 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1232
1233 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1234 memcpy(skb_push(msdu,
1235 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1236 (void *)hdr + round_up(hdr_len, bytes_aligned),
1237 ath10k_htt_rx_crypto_param_len(ar, enctype));
1238 }
1239
1206 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1240 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1207 1241
1208 /* original 802.11 header has a different DA and in 1242 /* original 802.11 header has a different DA and in
@@ -1216,10 +1250,12 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1216static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, 1250static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1217 struct sk_buff *msdu, 1251 struct sk_buff *msdu,
1218 struct ieee80211_rx_status *status, 1252 struct ieee80211_rx_status *status,
1219 const u8 first_hdr[64]) 1253 const u8 first_hdr[64],
1254 enum htt_rx_mpdu_encrypt_type enctype)
1220{ 1255{
1221 struct ieee80211_hdr *hdr; 1256 struct ieee80211_hdr *hdr;
1222 size_t hdr_len; 1257 size_t hdr_len;
1258 int bytes_aligned = ar->hw_params.decap_align_bytes;
1223 1259
1224 /* Delivered decapped frame: 1260 /* Delivered decapped frame:
1225 * [amsdu header] <-- replaced with 802.11 hdr 1261 * [amsdu header] <-- replaced with 802.11 hdr
@@ -1231,6 +1267,14 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1231 1267
1232 hdr = (struct ieee80211_hdr *)first_hdr; 1268 hdr = (struct ieee80211_hdr *)first_hdr;
1233 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1269 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1270
1271 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1272 memcpy(skb_push(msdu,
1273 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1274 (void *)hdr + round_up(hdr_len, bytes_aligned),
1275 ath10k_htt_rx_crypto_param_len(ar, enctype));
1276 }
1277
1234 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1278 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1235} 1279}
1236 1280
@@ -1265,13 +1309,15 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1265 is_decrypted); 1309 is_decrypted);
1266 break; 1310 break;
1267 case RX_MSDU_DECAP_NATIVE_WIFI: 1311 case RX_MSDU_DECAP_NATIVE_WIFI:
1268 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr); 1312 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1313 enctype);
1269 break; 1314 break;
1270 case RX_MSDU_DECAP_ETHERNET2_DIX: 1315 case RX_MSDU_DECAP_ETHERNET2_DIX:
1271 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); 1316 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1272 break; 1317 break;
1273 case RX_MSDU_DECAP_8023_SNAP_LLC: 1318 case RX_MSDU_DECAP_8023_SNAP_LLC:
1274 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr); 1319 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1320 enctype);
1275 break; 1321 break;
1276 } 1322 }
1277} 1323}
@@ -1314,7 +1360,8 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1314 1360
1315static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, 1361static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1316 struct sk_buff_head *amsdu, 1362 struct sk_buff_head *amsdu,
1317 struct ieee80211_rx_status *status) 1363 struct ieee80211_rx_status *status,
1364 bool fill_crypt_header)
1318{ 1365{
1319 struct sk_buff *first; 1366 struct sk_buff *first;
1320 struct sk_buff *last; 1367 struct sk_buff *last;
@@ -1324,7 +1371,6 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1324 enum htt_rx_mpdu_encrypt_type enctype; 1371 enum htt_rx_mpdu_encrypt_type enctype;
1325 u8 first_hdr[64]; 1372 u8 first_hdr[64];
1326 u8 *qos; 1373 u8 *qos;
1327 size_t hdr_len;
1328 bool has_fcs_err; 1374 bool has_fcs_err;
1329 bool has_crypto_err; 1375 bool has_crypto_err;
1330 bool has_tkip_err; 1376 bool has_tkip_err;
@@ -1345,15 +1391,17 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1345 * decapped header. It'll be used for undecapping of each MSDU. 1391 * decapped header. It'll be used for undecapping of each MSDU.
1346 */ 1392 */
1347 hdr = (void *)rxd->rx_hdr_status; 1393 hdr = (void *)rxd->rx_hdr_status;
1348 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1394 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1349 memcpy(first_hdr, hdr, hdr_len);
1350 1395
1351 /* Each A-MSDU subframe will use the original header as the base and be 1396 /* Each A-MSDU subframe will use the original header as the base and be
1352 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1397 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1353 */ 1398 */
1354 hdr = (void *)first_hdr; 1399 hdr = (void *)first_hdr;
1355 qos = ieee80211_get_qos_ctl(hdr); 1400
1356 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1401 if (ieee80211_is_data_qos(hdr->frame_control)) {
1402 qos = ieee80211_get_qos_ctl(hdr);
1403 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1404 }
1357 1405
1358 /* Some attention flags are valid only in the last MSDU. */ 1406 /* Some attention flags are valid only in the last MSDU. */
1359 last = skb_peek_tail(amsdu); 1407 last = skb_peek_tail(amsdu);
@@ -1387,11 +1435,17 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1387 if (has_tkip_err) 1435 if (has_tkip_err)
1388 status->flag |= RX_FLAG_MMIC_ERROR; 1436 status->flag |= RX_FLAG_MMIC_ERROR;
1389 1437
1390 if (is_decrypted) 1438 if (is_decrypted) {
1391 status->flag |= RX_FLAG_DECRYPTED | 1439 status->flag |= RX_FLAG_DECRYPTED |
1392 RX_FLAG_IV_STRIPPED |
1393 RX_FLAG_MMIC_STRIPPED; 1440 RX_FLAG_MMIC_STRIPPED;
1394 1441
1442 if (fill_crypt_header)
1443 status->flag |= RX_FLAG_MIC_STRIPPED |
1444 RX_FLAG_ICV_STRIPPED;
1445 else
1446 status->flag |= RX_FLAG_IV_STRIPPED;
1447 }
1448
1395 skb_queue_walk(amsdu, msdu) { 1449 skb_queue_walk(amsdu, msdu) {
1396 ath10k_htt_rx_h_csum_offload(msdu); 1450 ath10k_htt_rx_h_csum_offload(msdu);
1397 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, 1451 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
@@ -1404,6 +1458,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1404 if (!is_decrypted) 1458 if (!is_decrypted)
1405 continue; 1459 continue;
1406 1460
1461 if (fill_crypt_header)
1462 continue;
1463
1407 hdr = (void *)msdu->data; 1464 hdr = (void *)msdu->data;
1408 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 1465 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1409 } 1466 }
@@ -1414,6 +1471,9 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1414 struct ieee80211_rx_status *status) 1471 struct ieee80211_rx_status *status)
1415{ 1472{
1416 struct sk_buff *msdu; 1473 struct sk_buff *msdu;
1474 struct sk_buff *first_subframe;
1475
1476 first_subframe = skb_peek(amsdu);
1417 1477
1418 while ((msdu = __skb_dequeue(amsdu))) { 1478 while ((msdu = __skb_dequeue(amsdu))) {
1419 /* Setup per-MSDU flags */ 1479 /* Setup per-MSDU flags */
@@ -1422,6 +1482,13 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1422 else 1482 else
1423 status->flag |= RX_FLAG_AMSDU_MORE; 1483 status->flag |= RX_FLAG_AMSDU_MORE;
1424 1484
1485 if (msdu == first_subframe) {
1486 first_subframe = NULL;
1487 status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1488 } else {
1489 status->flag |= RX_FLAG_ALLOW_SAME_PN;
1490 }
1491
1425 ath10k_process_rx(ar, status, msdu); 1492 ath10k_process_rx(ar, status, msdu);
1426 } 1493 }
1427} 1494}
@@ -1607,7 +1674,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
1607 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); 1674 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1608 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); 1675 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1609 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); 1676 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1610 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); 1677 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
1611 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); 1678 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1612 } 1679 }
1613 1680
@@ -1653,7 +1720,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1653 1720
1654 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); 1721 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1655 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); 1722 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1656 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); 1723 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
1657 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); 1724 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1658 1725
1659 if (fw_desc_len > 0) { 1726 if (fw_desc_len > 0) {
@@ -1952,7 +2019,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1952 */ 2019 */
1953 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); 2020 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
1954 ath10k_htt_rx_h_filter(ar, &amsdu, status); 2021 ath10k_htt_rx_h_filter(ar, &amsdu, status);
1955 ath10k_htt_rx_h_mpdu(ar, &amsdu, status); 2022 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
1956 ath10k_htt_rx_h_deliver(ar, &amsdu, status); 2023 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1957 break; 2024 break;
1958 case -EAGAIN: 2025 case -EAGAIN:
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index bed8d89fe3a0..916b9b12edd2 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5285,9 +5285,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
5285 sta->addr, smps, err); 5285 sta->addr, smps, err);
5286 } 5286 }
5287 5287
5288 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED || 5288 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
5289 changed & IEEE80211_RC_NSS_CHANGED) { 5289 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
5290 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
5291 sta->addr); 5290 sta->addr);
5292 5291
5293 err = ath10k_station_assoc(ar, arvif->vif, sta, true); 5292 err = ath10k_station_assoc(ar, arvif->vif, sta, true);
@@ -5497,6 +5496,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
5497 "mac vdev %d peer delete %pM (sta gone)\n", 5496 "mac vdev %d peer delete %pM (sta gone)\n",
5498 arvif->vdev_id, sta->addr); 5497 arvif->vdev_id, sta->addr);
5499 5498
5499 if (sta->tdls) {
5500 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
5501 sta,
5502 WMI_TDLS_PEER_STATE_TEARDOWN);
5503 if (ret)
5504 ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
5505 sta->addr,
5506 WMI_TDLS_PEER_STATE_TEARDOWN, ret);
5507 }
5508
5500 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 5509 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5501 if (ret) 5510 if (ret)
5502 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", 5511 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
@@ -6302,10 +6311,20 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
6302{ 6311{
6303 struct ath10k *ar = hw->priv; 6312 struct ath10k *ar = hw->priv;
6304 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6313 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6314 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6315 struct ath10k_peer *peer;
6305 u32 bw, smps; 6316 u32 bw, smps;
6306 6317
6307 spin_lock_bh(&ar->data_lock); 6318 spin_lock_bh(&ar->data_lock);
6308 6319
6320 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
6321 if (!peer) {
6322 spin_unlock_bh(&ar->data_lock);
6323 ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
6324 sta->addr, arvif->vdev_id);
6325 return;
6326 }
6327
6309 ath10k_dbg(ar, ATH10K_DBG_MAC, 6328 ath10k_dbg(ar, ATH10K_DBG_MAC,
6310 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 6329 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
6311 sta->addr, changed, sta->bandwidth, sta->rx_nss, 6330 sta->addr, changed, sta->bandwidth, sta->rx_nss,
@@ -6427,7 +6446,7 @@ ath10k_mac_update_rx_channel(struct ath10k *ar,
6427 lockdep_assert_held(&ar->data_lock); 6446 lockdep_assert_held(&ar->data_lock);
6428 6447
6429 WARN_ON(ctx && vifs); 6448 WARN_ON(ctx && vifs);
6430 WARN_ON(vifs && n_vifs != 1); 6449 WARN_ON(vifs && !n_vifs);
6431 6450
6432 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are 6451 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
6433 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each 6452 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 72a4ef709577..a8b2553e8988 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -4826,7 +4826,8 @@ enum wmi_10_4_vdev_param {
4826#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3) 4826#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
4827 4827
4828#define WMI_TXBF_STS_CAP_OFFSET_LSB 4 4828#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
4829#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0 4829#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
4830#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7)
4830#define WMI_BF_SOUND_DIM_OFFSET_LSB 8 4831#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
4831#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00 4832#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00
4832 4833
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 654a1e33f827..7c5f189cace7 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -939,7 +939,10 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
939 } 939 }
940 940
941 for (i = 0; i < eesize; ++i) { 941 for (i = 0; i < eesize; ++i) {
942 AR5K_EEPROM_READ(i, val); 942 if (!ath5k_hw_nvram_read(ah, i, &val)) {
943 ret = -EIO;
944 goto freebuf;
945 }
943 buf[i] = val; 946 buf[i] = val;
944 } 947 }
945 948
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 41382f89abe1..4435c7bbb625 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1595,6 +1595,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
1595 int count = 50; 1595 int count = 50;
1596 u32 reg, last_val; 1596 u32 reg, last_val;
1597 1597
1598 /* Check if chip failed to wake up */
1599 if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
1600 return false;
1601
1598 if (AR_SREV_9300(ah)) 1602 if (AR_SREV_9300(ah))
1599 return !ath9k_hw_detect_mac_hang(ah); 1603 return !ath9k_hw_detect_mac_hang(ah);
1600 1604
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 06ea6cc9e30a..62077bda8dde 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -254,8 +254,12 @@ bool ath_is_49ghz_allowed(u16 regdomain)
254EXPORT_SYMBOL(ath_is_49ghz_allowed); 254EXPORT_SYMBOL(ath_is_49ghz_allowed);
255 255
256/* Frequency is one where radar detection is required */ 256/* Frequency is one where radar detection is required */
257static bool ath_is_radar_freq(u16 center_freq) 257static bool ath_is_radar_freq(u16 center_freq,
258 struct ath_regulatory *reg)
259
258{ 260{
261 if (reg->country_code == CTRY_INDIA)
262 return (center_freq >= 5500 && center_freq <= 5700);
259 return (center_freq >= 5260 && center_freq <= 5700); 263 return (center_freq >= 5260 && center_freq <= 5700);
260} 264}
261 265
@@ -306,7 +310,7 @@ __ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
306 enum nl80211_reg_initiator initiator, 310 enum nl80211_reg_initiator initiator,
307 struct ieee80211_channel *ch) 311 struct ieee80211_channel *ch)
308{ 312{
309 if (ath_is_radar_freq(ch->center_freq) || 313 if (ath_is_radar_freq(ch->center_freq, reg) ||
310 (ch->flags & IEEE80211_CHAN_RADAR)) 314 (ch->flags & IEEE80211_CHAN_RADAR))
311 return; 315 return;
312 316
@@ -395,8 +399,9 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy,
395 } 399 }
396} 400}
397 401
398/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */ 402/* Always apply Radar/DFS rules on freq range 5500 MHz - 5700 MHz */
399static void ath_reg_apply_radar_flags(struct wiphy *wiphy) 403static void ath_reg_apply_radar_flags(struct wiphy *wiphy,
404 struct ath_regulatory *reg)
400{ 405{
401 struct ieee80211_supported_band *sband; 406 struct ieee80211_supported_band *sband;
402 struct ieee80211_channel *ch; 407 struct ieee80211_channel *ch;
@@ -409,7 +414,7 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
409 414
410 for (i = 0; i < sband->n_channels; i++) { 415 for (i = 0; i < sband->n_channels; i++) {
411 ch = &sband->channels[i]; 416 ch = &sband->channels[i];
412 if (!ath_is_radar_freq(ch->center_freq)) 417 if (!ath_is_radar_freq(ch->center_freq, reg))
413 continue; 418 continue;
414 /* We always enable radar detection/DFS on this 419 /* We always enable radar detection/DFS on this
415 * frequency range. Additionally we also apply on 420 * frequency range. Additionally we also apply on
@@ -505,7 +510,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
505 struct ath_common *common = container_of(reg, struct ath_common, 510 struct ath_common *common = container_of(reg, struct ath_common,
506 regulatory); 511 regulatory);
507 /* We always apply this */ 512 /* We always apply this */
508 ath_reg_apply_radar_flags(wiphy); 513 ath_reg_apply_radar_flags(wiphy, reg);
509 514
510 /* 515 /*
511 * This would happen when we have sent a custom regulatory request 516 * This would happen when we have sent a custom regulatory request
@@ -653,7 +658,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
653 } 658 }
654 659
655 wiphy_apply_custom_regulatory(wiphy, regd); 660 wiphy_apply_custom_regulatory(wiphy, regd);
656 ath_reg_apply_radar_flags(wiphy); 661 ath_reg_apply_radar_flags(wiphy, reg);
657 ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg); 662 ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
658 return 0; 663 return 0;
659} 664}
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 37f53bd8fcb1..184b6810cde9 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -68,12 +68,14 @@ enum CountryCode {
68 CTRY_AUSTRALIA = 36, 68 CTRY_AUSTRALIA = 36,
69 CTRY_AUSTRIA = 40, 69 CTRY_AUSTRIA = 40,
70 CTRY_AZERBAIJAN = 31, 70 CTRY_AZERBAIJAN = 31,
71 CTRY_BAHAMAS = 44,
71 CTRY_BAHRAIN = 48, 72 CTRY_BAHRAIN = 48,
72 CTRY_BANGLADESH = 50, 73 CTRY_BANGLADESH = 50,
73 CTRY_BARBADOS = 52, 74 CTRY_BARBADOS = 52,
74 CTRY_BELARUS = 112, 75 CTRY_BELARUS = 112,
75 CTRY_BELGIUM = 56, 76 CTRY_BELGIUM = 56,
76 CTRY_BELIZE = 84, 77 CTRY_BELIZE = 84,
78 CTRY_BERMUDA = 60,
77 CTRY_BOLIVIA = 68, 79 CTRY_BOLIVIA = 68,
78 CTRY_BOSNIA_HERZ = 70, 80 CTRY_BOSNIA_HERZ = 70,
79 CTRY_BRAZIL = 76, 81 CTRY_BRAZIL = 76,
@@ -159,6 +161,7 @@ enum CountryCode {
159 CTRY_ROMANIA = 642, 161 CTRY_ROMANIA = 642,
160 CTRY_RUSSIA = 643, 162 CTRY_RUSSIA = 643,
161 CTRY_SAUDI_ARABIA = 682, 163 CTRY_SAUDI_ARABIA = 682,
164 CTRY_SERBIA = 688,
162 CTRY_SERBIA_MONTENEGRO = 891, 165 CTRY_SERBIA_MONTENEGRO = 891,
163 CTRY_SINGAPORE = 702, 166 CTRY_SINGAPORE = 702,
164 CTRY_SLOVAKIA = 703, 167 CTRY_SLOVAKIA = 703,
@@ -170,11 +173,13 @@ enum CountryCode {
170 CTRY_SWITZERLAND = 756, 173 CTRY_SWITZERLAND = 756,
171 CTRY_SYRIA = 760, 174 CTRY_SYRIA = 760,
172 CTRY_TAIWAN = 158, 175 CTRY_TAIWAN = 158,
176 CTRY_TANZANIA = 834,
173 CTRY_THAILAND = 764, 177 CTRY_THAILAND = 764,
174 CTRY_TRINIDAD_Y_TOBAGO = 780, 178 CTRY_TRINIDAD_Y_TOBAGO = 780,
175 CTRY_TUNISIA = 788, 179 CTRY_TUNISIA = 788,
176 CTRY_TURKEY = 792, 180 CTRY_TURKEY = 792,
177 CTRY_UAE = 784, 181 CTRY_UAE = 784,
182 CTRY_UGANDA = 800,
178 CTRY_UKRAINE = 804, 183 CTRY_UKRAINE = 804,
179 CTRY_UNITED_KINGDOM = 826, 184 CTRY_UNITED_KINGDOM = 826,
180 CTRY_UNITED_STATES = 840, 185 CTRY_UNITED_STATES = 840,
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index bdd2b4d61f2f..15bbd1e0d912 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -35,6 +35,7 @@ enum EnumRd {
35 FRANCE_RES = 0x31, 35 FRANCE_RES = 0x31,
36 FCC3_FCCA = 0x3A, 36 FCC3_FCCA = 0x3A,
37 FCC3_WORLD = 0x3B, 37 FCC3_WORLD = 0x3B,
38 FCC3_ETSIC = 0x3F,
38 39
39 ETSI1_WORLD = 0x37, 40 ETSI1_WORLD = 0x37,
40 ETSI3_ETSIA = 0x32, 41 ETSI3_ETSIA = 0x32,
@@ -44,6 +45,7 @@ enum EnumRd {
44 ETSI4_ETSIC = 0x38, 45 ETSI4_ETSIC = 0x38,
45 ETSI5_WORLD = 0x39, 46 ETSI5_WORLD = 0x39,
46 ETSI6_WORLD = 0x34, 47 ETSI6_WORLD = 0x34,
48 ETSI8_WORLD = 0x3D,
47 ETSI_RESERVED = 0x33, 49 ETSI_RESERVED = 0x33,
48 50
49 MKK1_MKKA = 0x40, 51 MKK1_MKKA = 0x40,
@@ -59,6 +61,7 @@ enum EnumRd {
59 MKK1_MKKA1 = 0x4A, 61 MKK1_MKKA1 = 0x4A,
60 MKK1_MKKA2 = 0x4B, 62 MKK1_MKKA2 = 0x4B,
61 MKK1_MKKC = 0x4C, 63 MKK1_MKKC = 0x4C,
64 APL2_FCCA = 0x4D,
62 65
63 APL3_FCCA = 0x50, 66 APL3_FCCA = 0x50,
64 APL1_WORLD = 0x52, 67 APL1_WORLD = 0x52,
@@ -67,6 +70,7 @@ enum EnumRd {
67 APL1_ETSIC = 0x55, 70 APL1_ETSIC = 0x55,
68 APL2_ETSIC = 0x56, 71 APL2_ETSIC = 0x56,
69 APL5_WORLD = 0x58, 72 APL5_WORLD = 0x58,
73 APL13_WORLD = 0x5A,
70 APL6_WORLD = 0x5B, 74 APL6_WORLD = 0x5B,
71 APL7_FCCA = 0x5C, 75 APL7_FCCA = 0x5C,
72 APL8_WORLD = 0x5D, 76 APL8_WORLD = 0x5D,
@@ -168,6 +172,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
168 {FCC2_ETSIC, CTL_FCC, CTL_ETSI}, 172 {FCC2_ETSIC, CTL_FCC, CTL_ETSI},
169 {FCC3_FCCA, CTL_FCC, CTL_FCC}, 173 {FCC3_FCCA, CTL_FCC, CTL_FCC},
170 {FCC3_WORLD, CTL_FCC, CTL_ETSI}, 174 {FCC3_WORLD, CTL_FCC, CTL_ETSI},
175 {FCC3_ETSIC, CTL_FCC, CTL_ETSI},
171 {FCC4_FCCA, CTL_FCC, CTL_FCC}, 176 {FCC4_FCCA, CTL_FCC, CTL_FCC},
172 {FCC5_FCCA, CTL_FCC, CTL_FCC}, 177 {FCC5_FCCA, CTL_FCC, CTL_FCC},
173 {FCC6_FCCA, CTL_FCC, CTL_FCC}, 178 {FCC6_FCCA, CTL_FCC, CTL_FCC},
@@ -179,6 +184,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
179 {ETSI4_WORLD, CTL_ETSI, CTL_ETSI}, 184 {ETSI4_WORLD, CTL_ETSI, CTL_ETSI},
180 {ETSI5_WORLD, CTL_ETSI, CTL_ETSI}, 185 {ETSI5_WORLD, CTL_ETSI, CTL_ETSI},
181 {ETSI6_WORLD, CTL_ETSI, CTL_ETSI}, 186 {ETSI6_WORLD, CTL_ETSI, CTL_ETSI},
187 {ETSI8_WORLD, CTL_ETSI, CTL_ETSI},
182 188
183 /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */ 189 /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */
184 {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI}, 190 {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI},
@@ -188,9 +194,11 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
188 {FCC1_FCCA, CTL_FCC, CTL_FCC}, 194 {FCC1_FCCA, CTL_FCC, CTL_FCC},
189 {APL1_WORLD, CTL_FCC, CTL_ETSI}, 195 {APL1_WORLD, CTL_FCC, CTL_ETSI},
190 {APL2_WORLD, CTL_FCC, CTL_ETSI}, 196 {APL2_WORLD, CTL_FCC, CTL_ETSI},
197 {APL2_FCCA, CTL_FCC, CTL_FCC},
191 {APL3_WORLD, CTL_FCC, CTL_ETSI}, 198 {APL3_WORLD, CTL_FCC, CTL_ETSI},
192 {APL4_WORLD, CTL_FCC, CTL_ETSI}, 199 {APL4_WORLD, CTL_FCC, CTL_ETSI},
193 {APL5_WORLD, CTL_FCC, CTL_ETSI}, 200 {APL5_WORLD, CTL_FCC, CTL_ETSI},
201 {APL13_WORLD, CTL_ETSI, CTL_ETSI},
194 {APL6_WORLD, CTL_ETSI, CTL_ETSI}, 202 {APL6_WORLD, CTL_ETSI, CTL_ETSI},
195 {APL8_WORLD, CTL_ETSI, CTL_ETSI}, 203 {APL8_WORLD, CTL_ETSI, CTL_ETSI},
196 {APL9_WORLD, CTL_ETSI, CTL_ETSI}, 204 {APL9_WORLD, CTL_ETSI, CTL_ETSI},
@@ -298,6 +306,7 @@ static struct country_code_to_enum_rd allCountries[] = {
298 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"}, 306 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"},
299 {CTRY_AUSTRIA, ETSI1_WORLD, "AT"}, 307 {CTRY_AUSTRIA, ETSI1_WORLD, "AT"},
300 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, 308 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"},
309 {CTRY_BAHAMAS, FCC3_WORLD, "BS"},
301 {CTRY_BAHRAIN, APL6_WORLD, "BH"}, 310 {CTRY_BAHRAIN, APL6_WORLD, "BH"},
302 {CTRY_BANGLADESH, NULL1_WORLD, "BD"}, 311 {CTRY_BANGLADESH, NULL1_WORLD, "BD"},
303 {CTRY_BARBADOS, FCC2_WORLD, "BB"}, 312 {CTRY_BARBADOS, FCC2_WORLD, "BB"},
@@ -305,6 +314,7 @@ static struct country_code_to_enum_rd allCountries[] = {
305 {CTRY_BELGIUM, ETSI1_WORLD, "BE"}, 314 {CTRY_BELGIUM, ETSI1_WORLD, "BE"},
306 {CTRY_BELGIUM2, ETSI4_WORLD, "BL"}, 315 {CTRY_BELGIUM2, ETSI4_WORLD, "BL"},
307 {CTRY_BELIZE, APL1_ETSIC, "BZ"}, 316 {CTRY_BELIZE, APL1_ETSIC, "BZ"},
317 {CTRY_BERMUDA, FCC3_FCCA, "BM"},
308 {CTRY_BOLIVIA, APL1_ETSIC, "BO"}, 318 {CTRY_BOLIVIA, APL1_ETSIC, "BO"},
309 {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"}, 319 {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"},
310 {CTRY_BRAZIL, FCC3_WORLD, "BR"}, 320 {CTRY_BRAZIL, FCC3_WORLD, "BR"},
@@ -444,6 +454,7 @@ static struct country_code_to_enum_rd allCountries[] = {
444 {CTRY_ROMANIA, NULL1_WORLD, "RO"}, 454 {CTRY_ROMANIA, NULL1_WORLD, "RO"},
445 {CTRY_RUSSIA, NULL1_WORLD, "RU"}, 455 {CTRY_RUSSIA, NULL1_WORLD, "RU"},
446 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, 456 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"},
457 {CTRY_SERBIA, ETSI1_WORLD, "RS"},
447 {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"}, 458 {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"},
448 {CTRY_SINGAPORE, APL6_WORLD, "SG"}, 459 {CTRY_SINGAPORE, APL6_WORLD, "SG"},
449 {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"}, 460 {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"},
@@ -455,10 +466,12 @@ static struct country_code_to_enum_rd allCountries[] = {
455 {CTRY_SWITZERLAND, ETSI1_WORLD, "CH"}, 466 {CTRY_SWITZERLAND, ETSI1_WORLD, "CH"},
456 {CTRY_SYRIA, NULL1_WORLD, "SY"}, 467 {CTRY_SYRIA, NULL1_WORLD, "SY"},
457 {CTRY_TAIWAN, APL3_FCCA, "TW"}, 468 {CTRY_TAIWAN, APL3_FCCA, "TW"},
469 {CTRY_TANZANIA, APL1_WORLD, "TZ"},
458 {CTRY_THAILAND, FCC3_WORLD, "TH"}, 470 {CTRY_THAILAND, FCC3_WORLD, "TH"},
459 {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"}, 471 {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"},
460 {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, 472 {CTRY_TUNISIA, ETSI3_WORLD, "TN"},
461 {CTRY_TURKEY, ETSI3_WORLD, "TR"}, 473 {CTRY_TURKEY, ETSI3_WORLD, "TR"},
474 {CTRY_UGANDA, FCC3_WORLD, "UG"},
462 {CTRY_UKRAINE, NULL1_WORLD, "UA"}, 475 {CTRY_UKRAINE, NULL1_WORLD, "UA"},
463 {CTRY_UAE, NULL1_WORLD, "AE"}, 476 {CTRY_UAE, NULL1_WORLD, "AE"},
464 {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"}, 477 {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"},
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 9bec8237231d..99c21aac68bd 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -57,7 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
57 RX_FLAG_MMIC_STRIPPED | 57 RX_FLAG_MMIC_STRIPPED |
58 RX_FLAG_DECRYPTED; 58 RX_FLAG_DECRYPTED;
59 59
60 wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag); 60 wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%llx\n", status.flag);
61 61
62 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 62 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
63 63
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 85bca557a339..f09fafaaaf1a 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -125,9 +125,15 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
125 u32 *d = dst; 125 u32 *d = dst;
126 const volatile u32 __iomem *s = src; 126 const volatile u32 __iomem *s = src;
127 127
128 /* size_t is unsigned, if (count%4 != 0) it will wrap */ 128 for (; count >= 4; count -= 4)
129 for (count += 4; count > 4; count -= 4)
130 *d++ = __raw_readl(s++); 129 *d++ = __raw_readl(s++);
130
131 if (unlikely(count)) {
132 /* count can be 1..3 */
133 u32 tmp = __raw_readl(s);
134
135 memcpy(d, &tmp, count);
136 }
131} 137}
132 138
133void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src, 139void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
@@ -136,8 +142,16 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
136 volatile u32 __iomem *d = dst; 142 volatile u32 __iomem *d = dst;
137 const u32 *s = src; 143 const u32 *s = src;
138 144
139 for (count += 4; count > 4; count -= 4) 145 for (; count >= 4; count -= 4)
140 __raw_writel(*s++, d++); 146 __raw_writel(*s++, d++);
147
148 if (unlikely(count)) {
149 /* count can be 1..3 */
150 u32 tmp = 0;
151
152 memcpy(&tmp, s, count);
153 __raw_writel(tmp, d);
154 }
141} 155}
142 156
143static void wil_disconnect_cid(struct wil6210_priv *wil, int cid, 157static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 59cef6c69fe8..91da67657f81 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -1109,6 +1109,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
1109 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340), 1109 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
1110 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341), 1110 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
1111 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362), 1111 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
1112 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
1112 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339), 1113 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
1113 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430), 1114 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
1114 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345), 1115 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 83e5aa6a9f28..ad35e760ed3f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -6167,7 +6167,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
6167 req->alpha2[0], req->alpha2[1]); 6167 req->alpha2[0], req->alpha2[1]);
6168 6168
6169 /* ignore non-ISO3166 country codes */ 6169 /* ignore non-ISO3166 country codes */
6170 for (i = 0; i < sizeof(req->alpha2); i++) 6170 for (i = 0; i < 2; i++)
6171 if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') { 6171 if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
6172 brcmf_err("not a ISO3166 code\n"); 6172 brcmf_err("not a ISO3166 code\n");
6173 return; 6173 return;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index d224b3dd72ed..3196245ab820 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -461,25 +461,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
461 * @dev_addr: optional device address. 461 * @dev_addr: optional device address.
462 * 462 *
463 * P2P needs mac addresses for P2P device and interface. If no device 463 * P2P needs mac addresses for P2P device and interface. If no device
464 * address it specified, these are derived from the primary net device, ie. 464 * address it specified, these are derived from a random ethernet
465 * the permanent ethernet address of the device. 465 * address.
466 */ 466 */
467static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr) 467static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
468{ 468{
469 struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; 469 bool random_addr = false;
470 bool local_admin = false;
471 470
472 if (!dev_addr || is_zero_ether_addr(dev_addr)) { 471 if (!dev_addr || is_zero_ether_addr(dev_addr))
473 dev_addr = pri_ifp->mac_addr; 472 random_addr = true;
474 local_admin = true;
475 }
476 473
477 /* Generate the P2P Device Address. This consists of the device's 474 /* Generate the P2P Device Address obtaining a random ethernet
478 * primary MAC address with the locally administered bit set. 475 * address with the locally administered bit set.
479 */ 476 */
480 memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); 477 if (random_addr)
481 if (local_admin) 478 eth_random_addr(p2p->dev_addr);
482 p2p->dev_addr[0] |= 0x02; 479 else
480 memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
483 481
484 /* Generate the P2P Interface Address. If the discovery and connection 482 /* Generate the P2P Interface Address. If the discovery and connection
485 * BSSCFGs need to simultaneously co-exist, then this address must be 483 * BSSCFGs need to simultaneously co-exist, then this address must be
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index a740083634d8..63f95e9c2992 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -446,8 +446,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
446 return 0; 446 return 0;
447} 447}
448 448
449#ifdef CONFIG_PM 449static int __maybe_unused cw1200_spi_suspend(struct device *dev)
450static int cw1200_spi_suspend(struct device *dev)
451{ 450{
452 struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev)); 451 struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev));
453 452
@@ -460,16 +459,12 @@ static int cw1200_spi_suspend(struct device *dev)
460 459
461static SIMPLE_DEV_PM_OPS(cw1200_pm_ops, cw1200_spi_suspend, NULL); 460static SIMPLE_DEV_PM_OPS(cw1200_pm_ops, cw1200_spi_suspend, NULL);
462 461
463#endif
464
465static struct spi_driver spi_driver = { 462static struct spi_driver spi_driver = {
466 .probe = cw1200_spi_probe, 463 .probe = cw1200_spi_probe,
467 .remove = cw1200_spi_disconnect, 464 .remove = cw1200_spi_disconnect,
468 .driver = { 465 .driver = {
469 .name = "cw1200_wlan_spi", 466 .name = "cw1200_wlan_spi",
470#ifdef CONFIG_PM 467 .pm = IS_ENABLED(CONFIG_PM) ? &cw1200_pm_ops : NULL,
471 .pm = &cw1200_pm_ops,
472#endif
473 }, 468 },
474}; 469};
475 470
diff --git a/drivers/net/wireless/cw1200/pm.h b/drivers/net/wireless/cw1200/pm.h
index 3ed90ff22bb8..534548470ebc 100644
--- a/drivers/net/wireless/cw1200/pm.h
+++ b/drivers/net/wireless/cw1200/pm.h
@@ -31,13 +31,18 @@ int cw1200_pm_init(struct cw1200_pm_state *pm,
31void cw1200_pm_deinit(struct cw1200_pm_state *pm); 31void cw1200_pm_deinit(struct cw1200_pm_state *pm);
32int cw1200_wow_suspend(struct ieee80211_hw *hw, 32int cw1200_wow_suspend(struct ieee80211_hw *hw,
33 struct cfg80211_wowlan *wowlan); 33 struct cfg80211_wowlan *wowlan);
34int cw1200_wow_resume(struct ieee80211_hw *hw);
35int cw1200_can_suspend(struct cw1200_common *priv); 34int cw1200_can_suspend(struct cw1200_common *priv);
35int cw1200_wow_resume(struct ieee80211_hw *hw);
36void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, 36void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
37 unsigned long tmo); 37 unsigned long tmo);
38#else 38#else
39static inline void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, 39static inline void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
40 unsigned long tmo) { 40 unsigned long tmo)
41{
42}
43static inline int cw1200_can_suspend(struct cw1200_common *priv)
44{
45 return 0;
41} 46}
42#endif 47#endif
43#endif 48#endif
diff --git a/drivers/net/wireless/cw1200/wsm.c b/drivers/net/wireless/cw1200/wsm.c
index 9e0ca3048657..3dd46c78c1cc 100644
--- a/drivers/net/wireless/cw1200/wsm.c
+++ b/drivers/net/wireless/cw1200/wsm.c
@@ -379,7 +379,6 @@ static int wsm_multi_tx_confirm(struct cw1200_common *priv,
379{ 379{
380 int ret; 380 int ret;
381 int count; 381 int count;
382 int i;
383 382
384 count = WSM_GET32(buf); 383 count = WSM_GET32(buf);
385 if (WARN_ON(count <= 0)) 384 if (WARN_ON(count <= 0))
@@ -395,11 +394,10 @@ static int wsm_multi_tx_confirm(struct cw1200_common *priv,
395 } 394 }
396 395
397 cw1200_debug_txed_multi(priv, count); 396 cw1200_debug_txed_multi(priv, count);
398 for (i = 0; i < count; ++i) { 397 do {
399 ret = wsm_tx_confirm(priv, buf, link_id); 398 ret = wsm_tx_confirm(priv, buf, link_id);
400 if (ret) 399 } while (!ret && --count);
401 return ret; 400
402 }
403 return ret; 401 return ret;
404 402
405underflow: 403underflow:
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index e06591f625c4..d6f9858ff2de 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -713,6 +713,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
713 WQ_HIGHPRI | WQ_UNBOUND, 1); 713 WQ_HIGHPRI | WQ_UNBOUND, 1);
714 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); 714 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
715 715
716 cancel_work_sync(&rba->rx_alloc);
717
716 spin_lock(&rba->lock); 718 spin_lock(&rba->lock);
717 atomic_set(&rba->req_pending, 0); 719 atomic_set(&rba->req_pending, 0);
718 atomic_set(&rba->req_ready, 0); 720 atomic_set(&rba->req_ready, 0);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index f877fbc7d7af..e8b770a95f7a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -699,16 +699,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
699 val != PS_MANUAL_POLL) 699 val != PS_MANUAL_POLL)
700 return -EINVAL; 700 return -EINVAL;
701 701
702 old_ps = data->ps;
703 data->ps = val;
704
705 local_bh_disable();
706 if (val == PS_MANUAL_POLL) { 702 if (val == PS_MANUAL_POLL) {
703 if (data->ps != PS_ENABLED)
704 return -EINVAL;
705 local_bh_disable();
707 ieee80211_iterate_active_interfaces_atomic( 706 ieee80211_iterate_active_interfaces_atomic(
708 data->hw, IEEE80211_IFACE_ITER_NORMAL, 707 data->hw, IEEE80211_IFACE_ITER_NORMAL,
709 hwsim_send_ps_poll, data); 708 hwsim_send_ps_poll, data);
710 data->ps_poll_pending = true; 709 local_bh_enable();
711 } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { 710 return 0;
711 }
712 old_ps = data->ps;
713 data->ps = val;
714
715 local_bh_disable();
716 if (old_ps == PS_DISABLED && val != PS_DISABLED) {
712 ieee80211_iterate_active_interfaces_atomic( 717 ieee80211_iterate_active_interfaces_atomic(
713 data->hw, IEEE80211_IFACE_ITER_NORMAL, 718 data->hw, IEEE80211_IFACE_ITER_NORMAL,
714 hwsim_send_nullfunc_ps, data); 719 hwsim_send_nullfunc_ps, data);
@@ -2920,8 +2925,10 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
2920 if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) { 2925 if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
2921 u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]); 2926 u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
2922 2927
2923 if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) 2928 if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) {
2929 kfree(hwname);
2924 return -EINVAL; 2930 return -EINVAL;
2931 }
2925 param.regd = hwsim_world_regdom_custom[idx]; 2932 param.regd = hwsim_world_regdom_custom[idx];
2926 } 2933 }
2927 2934
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
index fbb1986eda3c..686b1b5dd394 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mcu.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -66,8 +66,10 @@ mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len)
66 WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */ 66 WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
67 67
68 skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL); 68 skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
69 skb_reserve(skb, MT_DMA_HDR_LEN); 69 if (skb) {
70 memcpy(skb_put(skb, len), data, len); 70 skb_reserve(skb, MT_DMA_HDR_LEN);
71 memcpy(skb_put(skb, len), data, len);
72 }
71 73
72 return skb; 74 return skb;
73} 75}
@@ -170,6 +172,8 @@ static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
170 }; 172 };
171 173
172 skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); 174 skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
175 if (!skb)
176 return -ENOMEM;
173 return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5); 177 return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
174} 178}
175 179
@@ -205,6 +209,8 @@ mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
205 }; 209 };
206 210
207 skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); 211 skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
212 if (!skb)
213 return -ENOMEM;
208 return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true); 214 return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
209} 215}
210 216
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index e43aff932360..1a1b1de87583 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -624,6 +624,9 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
624 MWIFIEX_FUNC_SHUTDOWN); 624 MWIFIEX_FUNC_SHUTDOWN);
625 } 625 }
626 626
627 if (adapter->workqueue)
628 flush_workqueue(adapter->workqueue);
629
627 mwifiex_usb_free(card); 630 mwifiex_usb_free(card);
628 631
629 mwifiex_dbg(adapter, FATAL, 632 mwifiex_dbg(adapter, FATAL,
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 0cec8a64473e..eb5ffa5b1c6c 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -702,12 +702,14 @@ void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr,
702 s8 nflr) 702 s8 nflr)
703{ 703{
704 struct mwifiex_histogram_data *phist_data = priv->hist_data; 704 struct mwifiex_histogram_data *phist_data = priv->hist_data;
705 s8 nf = -nflr;
706 s8 rssi = snr - nflr;
705 707
706 atomic_inc(&phist_data->num_samples); 708 atomic_inc(&phist_data->num_samples);
707 atomic_inc(&phist_data->rx_rate[rx_rate]); 709 atomic_inc(&phist_data->rx_rate[rx_rate]);
708 atomic_inc(&phist_data->snr[snr]); 710 atomic_inc(&phist_data->snr[snr + 128]);
709 atomic_inc(&phist_data->noise_flr[128 + nflr]); 711 atomic_inc(&phist_data->noise_flr[nf + 128]);
710 atomic_inc(&phist_data->sig_str[nflr - snr]); 712 atomic_inc(&phist_data->sig_str[rssi + 128]);
711} 713}
712 714
713/* function to reset histogram data during init/reset */ 715/* function to reset histogram data during init/reset */
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0881ba8535f4..c78abfc7bd96 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -247,7 +247,10 @@ static const UCHAR b4_default_startup_parms[] = {
247 0x04, 0x08, /* Noise gain, limit offset */ 247 0x04, 0x08, /* Noise gain, limit offset */
248 0x28, 0x28, /* det rssi, med busy offsets */ 248 0x28, 0x28, /* det rssi, med busy offsets */
249 7, /* det sync thresh */ 249 7, /* det sync thresh */
250 0, 2, 2 /* test mode, min, max */ 250 0, 2, 2, /* test mode, min, max */
251 0, /* rx/tx delay */
252 0, 0, 0, 0, 0, 0, /* current BSS id */
253 0 /* hop set */
251}; 254};
252 255
253/*===========================================================================*/ 256/*===========================================================================*/
@@ -598,7 +601,7 @@ static void init_startup_params(ray_dev_t *local)
598 * a_beacon_period = hops a_beacon_period = KuS 601 * a_beacon_period = hops a_beacon_period = KuS
599 *//* 64ms = 010000 */ 602 *//* 64ms = 010000 */
600 if (local->fw_ver == 0x55) { 603 if (local->fw_ver == 0x55) {
601 memcpy((UCHAR *) &local->sparm.b4, b4_default_startup_parms, 604 memcpy(&local->sparm.b4, b4_default_startup_parms,
602 sizeof(struct b4_startup_params)); 605 sizeof(struct b4_startup_params));
603 /* Translate sane kus input values to old build 4/5 format */ 606 /* Translate sane kus input values to old build 4/5 format */
604 /* i = hop time in uS truncated to 3 bytes */ 607 /* i = hop time in uS truncated to 3 bytes */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index b7f72f9c7988..b3691712df61 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1454,6 +1454,7 @@ static int rtl8187_probe(struct usb_interface *intf,
1454 goto err_free_dev; 1454 goto err_free_dev;
1455 } 1455 }
1456 mutex_init(&priv->io_mutex); 1456 mutex_init(&priv->io_mutex);
1457 mutex_init(&priv->conf_mutex);
1457 1458
1458 SET_IEEE80211_DEV(dev, &intf->dev); 1459 SET_IEEE80211_DEV(dev, &intf->dev);
1459 usb_set_intfdata(intf, dev); 1460 usb_set_intfdata(intf, dev);
@@ -1627,7 +1628,6 @@ static int rtl8187_probe(struct usb_interface *intf,
1627 printk(KERN_ERR "rtl8187: Cannot register device\n"); 1628 printk(KERN_ERR "rtl8187: Cannot register device\n");
1628 goto err_free_dmabuf; 1629 goto err_free_dmabuf;
1629 } 1630 }
1630 mutex_init(&priv->conf_mutex);
1631 skb_queue_head_init(&priv->b_tx_status.queue); 1631 skb_queue_head_init(&priv->b_tx_status.queue);
1632 1632
1633 wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n", 1633 wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 8b537a5a4b01..8006f0972ad1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -135,7 +135,6 @@ found_alt:
135 firmware->size); 135 firmware->size);
136 rtlpriv->rtlhal.wowlan_fwsize = firmware->size; 136 rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
137 } 137 }
138 rtlpriv->rtlhal.fwsize = firmware->size;
139 release_firmware(firmware); 138 release_firmware(firmware);
140} 139}
141 140
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index c48b7e8ee0d6..b51815eccdb3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -1572,7 +1572,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1572 dev_kfree_skb_irq(skb); 1572 dev_kfree_skb_irq(skb);
1573 ring->idx = (ring->idx + 1) % ring->entries; 1573 ring->idx = (ring->idx + 1) % ring->entries;
1574 } 1574 }
1575
1576 if (rtlpriv->use_new_trx_flow) {
1577 rtlpci->tx_ring[i].cur_tx_rp = 0;
1578 rtlpci->tx_ring[i].cur_tx_wp = 0;
1579 }
1580
1575 ring->idx = 0; 1581 ring->idx = 0;
1582 ring->entries = rtlpci->txringcount[i];
1576 } 1583 }
1577 } 1584 }
1578 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); 1585 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
index 5624ade92cc0..c2a156a8acec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
@@ -304,9 +304,6 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
304 writeVal = 0x00000000; 304 writeVal = 0x00000000;
305 if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) 305 if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
306 writeVal = writeVal - 0x06060606; 306 writeVal = writeVal - 0x06060606;
307 else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
308 TXHIGHPWRLEVEL_BT2)
309 writeVal = writeVal;
310 *(p_outwriteval + rf) = writeVal; 307 *(p_outwriteval + rf) = writeVal;
311 } 308 }
312} 309}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 5a3df9198ddf..89515f02c353 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -1123,7 +1123,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw)
1123 1123
1124 /* Configuration Space offset 0x70f BIT7 is used to control L0S */ 1124 /* Configuration Space offset 0x70f BIT7 is used to control L0S */
1125 tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); 1125 tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f);
1126 _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); 1126 _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) |
1127 ASPM_L1_LATENCY << 3);
1127 1128
1128 /* Configuration Space offset 0x719 Bit3 is for L1 1129 /* Configuration Space offset 0x719 Bit3 is for L1
1129 * BIT4 is for clock request 1130 * BIT4 is for clock request
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index b57cfd965196..7b13962ec9da 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
2488 for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) 2488 for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
2489 rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p]; 2489 rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
2490 2490
2491 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 2491 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
2492 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", 2492 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
2493 rtldm->thermalvalue, thermal_value); 2493 rtldm->thermalvalue, thermal_value);
2494 /*Record last Power Tracking Thermal Value*/ 2494 /*Record last Power Tracking Thermal Value*/
2495 rtldm->thermalvalue = thermal_value; 2495 rtldm->thermalvalue = thermal_value;
2496 } 2496 }
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 738d541a2255..348ed1b0e58b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1127,7 +1127,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
1127 } 1127 }
1128 if (0 == tmp) { 1128 if (0 == tmp) {
1129 read_addr = REG_DBI_RDATA + addr % 4; 1129 read_addr = REG_DBI_RDATA + addr % 4;
1130 ret = rtl_read_word(rtlpriv, read_addr); 1130 ret = rtl_read_byte(rtlpriv, read_addr);
1131 } 1131 }
1132 return ret; 1132 return ret;
1133} 1133}
@@ -1169,7 +1169,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw)
1169 } 1169 }
1170 1170
1171 tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f); 1171 tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f);
1172 _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7)); 1172 _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) |
1173 ASPM_L1_LATENCY << 3);
1173 1174
1174 tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719); 1175 tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719);
1175 _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4)); 1176 _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4));
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index b6faf624480e..d676d055feda 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -99,6 +99,7 @@
99#define RTL_USB_MAX_RX_COUNT 100 99#define RTL_USB_MAX_RX_COUNT 100
100#define QBSS_LOAD_SIZE 5 100#define QBSS_LOAD_SIZE 5
101#define MAX_WMMELE_LENGTH 64 101#define MAX_WMMELE_LENGTH 64
102#define ASPM_L1_LATENCY 7
102 103
103#define TOTAL_CAM_ENTRY 32 104#define TOTAL_CAM_ENTRY 32
104 105
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index a13d1f2b5912..259590013382 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -3425,6 +3425,10 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
3425 3425
3426 /* because rndis_command() sleeps we need to use workqueue */ 3426 /* because rndis_command() sleeps we need to use workqueue */
3427 priv->workqueue = create_singlethread_workqueue("rndis_wlan"); 3427 priv->workqueue = create_singlethread_workqueue("rndis_wlan");
3428 if (!priv->workqueue) {
3429 wiphy_free(wiphy);
3430 return -ENOMEM;
3431 }
3428 INIT_WORK(&priv->work, rndis_wlan_worker); 3432 INIT_WORK(&priv->work, rndis_wlan_worker);
3429 INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller); 3433 INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller);
3430 INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results); 3434 INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results);
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 8428858204a6..fc895b466ebb 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -155,7 +155,6 @@ static void rsi_reset_card(struct sdio_func *pfunction)
155 int err; 155 int err;
156 struct mmc_card *card = pfunction->card; 156 struct mmc_card *card = pfunction->card;
157 struct mmc_host *host = card->host; 157 struct mmc_host *host = card->host;
158 s32 bit = (fls(host->ocr_avail) - 1);
159 u8 cmd52_resp; 158 u8 cmd52_resp;
160 u32 clock, resp, i; 159 u32 clock, resp, i;
161 u16 rca; 160 u16 rca;
@@ -175,7 +174,6 @@ static void rsi_reset_card(struct sdio_func *pfunction)
175 msleep(20); 174 msleep(20);
176 175
177 /* Initialize the SDIO card */ 176 /* Initialize the SDIO card */
178 host->ios.vdd = bit;
179 host->ios.chip_select = MMC_CS_DONTCARE; 177 host->ios.chip_select = MMC_CS_DONTCARE;
180 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 178 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
181 host->ios.power_mode = MMC_POWER_UP; 179 host->ios.power_mode = MMC_POWER_UP;
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 9bee3f11898a..869411f55d88 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1196,8 +1196,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1196 WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); 1196 WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
1197 1197
1198 enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc; 1198 enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
1199 wl1251_acx_arp_ip_filter(wl, enable, addr); 1199 ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
1200
1201 if (ret < 0) 1200 if (ret < 0)
1202 goto out_sleep; 1201 goto out_sleep;
1203 } 1202 }
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index d0228457befe..031528669510 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -388,6 +388,11 @@ static int wl1271_suspend(struct device *dev)
388 mmc_pm_flag_t sdio_flags; 388 mmc_pm_flag_t sdio_flags;
389 int ret = 0; 389 int ret = 0;
390 390
391 if (!wl) {
392 dev_err(dev, "no wilink module was probed\n");
393 goto out;
394 }
395
391 dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n", 396 dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
392 wl->wow_enabled); 397 wl->wow_enabled);
393 398
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index fd221cc4cb79..68d0a5c9d437 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -86,6 +86,9 @@ struct netfront_cb {
86/* IRQ name is queue name with "-tx" or "-rx" appended */ 86/* IRQ name is queue name with "-tx" or "-rx" appended */
87#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 87#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
88 88
89static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
90static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
91
89struct netfront_stats { 92struct netfront_stats {
90 u64 packets; 93 u64 packets;
91 u64 bytes; 94 u64 bytes;
@@ -236,7 +239,7 @@ static void rx_refill_timeout(unsigned long data)
236static int netfront_tx_slot_available(struct netfront_queue *queue) 239static int netfront_tx_slot_available(struct netfront_queue *queue)
237{ 240{
238 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < 241 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
239 (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); 242 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
240} 243}
241 244
242static void xennet_maybe_wake_tx(struct netfront_queue *queue) 245static void xennet_maybe_wake_tx(struct netfront_queue *queue)
@@ -340,6 +343,9 @@ static int xennet_open(struct net_device *dev)
340 unsigned int i = 0; 343 unsigned int i = 0;
341 struct netfront_queue *queue = NULL; 344 struct netfront_queue *queue = NULL;
342 345
346 if (!np->queues)
347 return -ENODEV;
348
343 for (i = 0; i < num_queues; ++i) { 349 for (i = 0; i < num_queues; ++i) {
344 queue = &np->queues[i]; 350 queue = &np->queues[i];
345 napi_enable(&queue->napi); 351 napi_enable(&queue->napi);
@@ -770,7 +776,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
770 RING_IDX cons = queue->rx.rsp_cons; 776 RING_IDX cons = queue->rx.rsp_cons;
771 struct sk_buff *skb = xennet_get_rx_skb(queue, cons); 777 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
772 grant_ref_t ref = xennet_get_rx_ref(queue, cons); 778 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
773 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 779 int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
774 int slots = 1; 780 int slots = 1;
775 int err = 0; 781 int err = 0;
776 unsigned long ret; 782 unsigned long ret;
@@ -873,7 +879,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
873 struct sk_buff *skb, 879 struct sk_buff *skb,
874 struct sk_buff_head *list) 880 struct sk_buff_head *list)
875{ 881{
876 struct skb_shared_info *shinfo = skb_shinfo(skb);
877 RING_IDX cons = queue->rx.rsp_cons; 882 RING_IDX cons = queue->rx.rsp_cons;
878 struct sk_buff *nskb; 883 struct sk_buff *nskb;
879 884
@@ -882,15 +887,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
882 RING_GET_RESPONSE(&queue->rx, ++cons); 887 RING_GET_RESPONSE(&queue->rx, ++cons);
883 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 888 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
884 889
885 if (shinfo->nr_frags == MAX_SKB_FRAGS) { 890 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
886 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 891 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
887 892
888 BUG_ON(pull_to <= skb_headlen(skb)); 893 BUG_ON(pull_to <= skb_headlen(skb));
889 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 894 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
890 } 895 }
891 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); 896 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
892 897
893 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), 898 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
899 skb_frag_page(nfrag),
894 rx->offset, rx->status, PAGE_SIZE); 900 rx->offset, rx->status, PAGE_SIZE);
895 901
896 skb_shinfo(nskb)->nr_frags = 0; 902 skb_shinfo(nskb)->nr_frags = 0;
@@ -1329,6 +1335,12 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1329 1335
1330 netif_carrier_off(netdev); 1336 netif_carrier_off(netdev);
1331 1337
1338 xenbus_switch_state(dev, XenbusStateInitialising);
1339 wait_event(module_load_q,
1340 xenbus_read_driver_state(dev->otherend) !=
1341 XenbusStateClosed &&
1342 xenbus_read_driver_state(dev->otherend) !=
1343 XenbusStateUnknown);
1332 return netdev; 1344 return netdev;
1333 1345
1334 exit: 1346 exit:
@@ -1360,18 +1372,8 @@ static int netfront_probe(struct xenbus_device *dev,
1360#ifdef CONFIG_SYSFS 1372#ifdef CONFIG_SYSFS
1361 info->netdev->sysfs_groups[0] = &xennet_dev_group; 1373 info->netdev->sysfs_groups[0] = &xennet_dev_group;
1362#endif 1374#endif
1363 err = register_netdev(info->netdev);
1364 if (err) {
1365 pr_warn("%s: register_netdev err=%d\n", __func__, err);
1366 goto fail;
1367 }
1368 1375
1369 return 0; 1376 return 0;
1370
1371 fail:
1372 xennet_free_netdev(netdev);
1373 dev_set_drvdata(&dev->dev, NULL);
1374 return err;
1375} 1377}
1376 1378
1377static void xennet_end_access(int ref, void *page) 1379static void xennet_end_access(int ref, void *page)
@@ -1740,8 +1742,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
1740{ 1742{
1741 unsigned int i; 1743 unsigned int i;
1742 1744
1743 rtnl_lock();
1744
1745 for (i = 0; i < info->netdev->real_num_tx_queues; i++) { 1745 for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1746 struct netfront_queue *queue = &info->queues[i]; 1746 struct netfront_queue *queue = &info->queues[i];
1747 1747
@@ -1750,8 +1750,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
1750 netif_napi_del(&queue->napi); 1750 netif_napi_del(&queue->napi);
1751 } 1751 }
1752 1752
1753 rtnl_unlock();
1754
1755 kfree(info->queues); 1753 kfree(info->queues);
1756 info->queues = NULL; 1754 info->queues = NULL;
1757} 1755}
@@ -1767,8 +1765,6 @@ static int xennet_create_queues(struct netfront_info *info,
1767 if (!info->queues) 1765 if (!info->queues)
1768 return -ENOMEM; 1766 return -ENOMEM;
1769 1767
1770 rtnl_lock();
1771
1772 for (i = 0; i < *num_queues; i++) { 1768 for (i = 0; i < *num_queues; i++) {
1773 struct netfront_queue *queue = &info->queues[i]; 1769 struct netfront_queue *queue = &info->queues[i];
1774 1770
@@ -1777,7 +1773,7 @@ static int xennet_create_queues(struct netfront_info *info,
1777 1773
1778 ret = xennet_init_queue(queue); 1774 ret = xennet_init_queue(queue);
1779 if (ret < 0) { 1775 if (ret < 0) {
1780 dev_warn(&info->netdev->dev, 1776 dev_warn(&info->xbdev->dev,
1781 "only created %d queues\n", i); 1777 "only created %d queues\n", i);
1782 *num_queues = i; 1778 *num_queues = i;
1783 break; 1779 break;
@@ -1791,10 +1787,8 @@ static int xennet_create_queues(struct netfront_info *info,
1791 1787
1792 netif_set_real_num_tx_queues(info->netdev, *num_queues); 1788 netif_set_real_num_tx_queues(info->netdev, *num_queues);
1793 1789
1794 rtnl_unlock();
1795
1796 if (*num_queues == 0) { 1790 if (*num_queues == 0) {
1797 dev_err(&info->netdev->dev, "no queues\n"); 1791 dev_err(&info->xbdev->dev, "no queues\n");
1798 return -EINVAL; 1792 return -EINVAL;
1799 } 1793 }
1800 return 0; 1794 return 0;
@@ -1836,6 +1830,7 @@ static int talk_to_netback(struct xenbus_device *dev,
1836 goto out; 1830 goto out;
1837 } 1831 }
1838 1832
1833 rtnl_lock();
1839 if (info->queues) 1834 if (info->queues)
1840 xennet_destroy_queues(info); 1835 xennet_destroy_queues(info);
1841 1836
@@ -1846,6 +1841,7 @@ static int talk_to_netback(struct xenbus_device *dev,
1846 info->queues = NULL; 1841 info->queues = NULL;
1847 goto out; 1842 goto out;
1848 } 1843 }
1844 rtnl_unlock();
1849 1845
1850 /* Create shared ring, alloc event channel -- for each queue */ 1846 /* Create shared ring, alloc event channel -- for each queue */
1851 for (i = 0; i < num_queues; ++i) { 1847 for (i = 0; i < num_queues; ++i) {
@@ -1942,8 +1938,10 @@ abort_transaction_no_dev_fatal:
1942 xenbus_transaction_end(xbt, 1); 1938 xenbus_transaction_end(xbt, 1);
1943 destroy_ring: 1939 destroy_ring:
1944 xennet_disconnect_backend(info); 1940 xennet_disconnect_backend(info);
1941 rtnl_lock();
1945 xennet_destroy_queues(info); 1942 xennet_destroy_queues(info);
1946 out: 1943 out:
1944 rtnl_unlock();
1947 device_unregister(&dev->dev); 1945 device_unregister(&dev->dev);
1948 return err; 1946 return err;
1949} 1947}
@@ -1979,6 +1977,15 @@ static int xennet_connect(struct net_device *dev)
1979 netdev_update_features(dev); 1977 netdev_update_features(dev);
1980 rtnl_unlock(); 1978 rtnl_unlock();
1981 1979
1980 if (dev->reg_state == NETREG_UNINITIALIZED) {
1981 err = register_netdev(dev);
1982 if (err) {
1983 pr_warn("%s: register_netdev err=%d\n", __func__, err);
1984 device_unregister(&np->xbdev->dev);
1985 return err;
1986 }
1987 }
1988
1982 /* 1989 /*
1983 * All public and private state should now be sane. Get 1990 * All public and private state should now be sane. Get
1984 * ready to start sending and receiving packets and give the driver 1991 * ready to start sending and receiving packets and give the driver
@@ -2021,7 +2028,10 @@ static void netback_changed(struct xenbus_device *dev,
2021 case XenbusStateInitialised: 2028 case XenbusStateInitialised:
2022 case XenbusStateReconfiguring: 2029 case XenbusStateReconfiguring:
2023 case XenbusStateReconfigured: 2030 case XenbusStateReconfigured:
2031 break;
2032
2024 case XenbusStateUnknown: 2033 case XenbusStateUnknown:
2034 wake_up_all(&module_unload_q);
2025 break; 2035 break;
2026 2036
2027 case XenbusStateInitWait: 2037 case XenbusStateInitWait:
@@ -2037,10 +2047,12 @@ static void netback_changed(struct xenbus_device *dev,
2037 break; 2047 break;
2038 2048
2039 case XenbusStateClosed: 2049 case XenbusStateClosed:
2050 wake_up_all(&module_unload_q);
2040 if (dev->state == XenbusStateClosed) 2051 if (dev->state == XenbusStateClosed)
2041 break; 2052 break;
2042 /* Missed the backend's CLOSING state -- fallthrough */ 2053 /* Missed the backend's CLOSING state -- fallthrough */
2043 case XenbusStateClosing: 2054 case XenbusStateClosing:
2055 wake_up_all(&module_unload_q);
2044 xenbus_frontend_closed(dev); 2056 xenbus_frontend_closed(dev);
2045 break; 2057 break;
2046 } 2058 }
@@ -2146,12 +2158,32 @@ static int xennet_remove(struct xenbus_device *dev)
2146 2158
2147 dev_dbg(&dev->dev, "%s\n", dev->nodename); 2159 dev_dbg(&dev->dev, "%s\n", dev->nodename);
2148 2160
2161 if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
2162 xenbus_switch_state(dev, XenbusStateClosing);
2163 wait_event(module_unload_q,
2164 xenbus_read_driver_state(dev->otherend) ==
2165 XenbusStateClosing ||
2166 xenbus_read_driver_state(dev->otherend) ==
2167 XenbusStateUnknown);
2168
2169 xenbus_switch_state(dev, XenbusStateClosed);
2170 wait_event(module_unload_q,
2171 xenbus_read_driver_state(dev->otherend) ==
2172 XenbusStateClosed ||
2173 xenbus_read_driver_state(dev->otherend) ==
2174 XenbusStateUnknown);
2175 }
2176
2149 xennet_disconnect_backend(info); 2177 xennet_disconnect_backend(info);
2150 2178
2151 unregister_netdev(info->netdev); 2179 if (info->netdev->reg_state == NETREG_REGISTERED)
2180 unregister_netdev(info->netdev);
2152 2181
2153 if (info->queues) 2182 if (info->queues) {
2183 rtnl_lock();
2154 xennet_destroy_queues(info); 2184 xennet_destroy_queues(info);
2185 rtnl_unlock();
2186 }
2155 xennet_free_netdev(info->netdev); 2187 xennet_free_netdev(info->netdev);
2156 2188
2157 return 0; 2189 return 0;
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index af62c4c854f3..b4f31dad40d6 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -17,7 +17,7 @@
17 */ 17 */
18 18
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/unaligned/access_ok.h> 20#include <asm/unaligned.h>
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include <linux/nfc.h> 22#include <linux/nfc.h>
23#include <net/nfc/nci.h> 23#include <net/nfc/nci.h>
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index a7faa0bcc01e..fc8e78a29d77 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -96,10 +96,9 @@ static int nfcmrvl_spi_nci_send(struct nfcmrvl_private *priv,
96 /* Send the SPI packet */ 96 /* Send the SPI packet */
97 err = nci_spi_send(drv_data->nci_spi, &drv_data->handshake_completion, 97 err = nci_spi_send(drv_data->nci_spi, &drv_data->handshake_completion,
98 skb); 98 skb);
99 if (err != 0) { 99 if (err)
100 nfc_err(priv->dev, "spi_send failed %d", err); 100 nfc_err(priv->dev, "spi_send failed %d", err);
101 kfree_skb(skb); 101
102 }
103 return err; 102 return err;
104} 103}
105 104
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 3bbdf60f8908..49f3fba75f4d 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -955,6 +955,9 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
955 mw_base = nt->mw_vec[mw_num].phys_addr; 955 mw_base = nt->mw_vec[mw_num].phys_addr;
956 mw_size = nt->mw_vec[mw_num].phys_size; 956 mw_size = nt->mw_vec[mw_num].phys_size;
957 957
958 if (max_mw_size && mw_size > max_mw_size)
959 mw_size = max_mw_size;
960
958 tx_size = (unsigned int)mw_size / num_qps_mw; 961 tx_size = (unsigned int)mw_size / num_qps_mw;
959 qp_offset = tx_size * (qp_num / mw_count); 962 qp_offset = tx_size * (qp_num / mw_count);
960 963
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 254b0ee37039..a71187c783b7 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -237,14 +237,18 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
237{ 237{
238 struct device *dev = disk->driverfs_dev; 238 struct device *dev = disk->driverfs_dev;
239 struct nd_region *nd_region = to_nd_region(dev->parent); 239 struct nd_region *nd_region = to_nd_region(dev->parent);
240 const char *pol = nd_region->ro ? "only" : "write"; 240 int disk_ro = get_disk_ro(disk);
241 241
242 if (nd_region->ro == get_disk_ro(disk)) 242 /*
243 * Upgrade to read-only if the region is read-only preserve as
244 * read-only if the disk is already read-only.
245 */
246 if (disk_ro || nd_region->ro == disk_ro)
243 return 0; 247 return 0;
244 248
245 dev_info(dev, "%s read-%s, marking %s read-%s\n", 249 dev_info(dev, "%s read-only, marking %s read-only\n",
246 dev_name(&nd_region->dev), pol, disk->disk_name, pol); 250 dev_name(&nd_region->dev), disk->disk_name);
247 set_disk_ro(disk, nd_region->ro); 251 set_disk_ro(disk, 1);
248 252
249 return 0; 253 return 0;
250 254
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d6ceb8b91cd6..01f47b68b6e7 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1583,23 +1583,26 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1583 nvmeq->cq_vector = qid - 1; 1583 nvmeq->cq_vector = qid - 1;
1584 result = adapter_alloc_cq(dev, qid, nvmeq); 1584 result = adapter_alloc_cq(dev, qid, nvmeq);
1585 if (result < 0) 1585 if (result < 0)
1586 return result; 1586 goto release_vector;
1587 1587
1588 result = adapter_alloc_sq(dev, qid, nvmeq); 1588 result = adapter_alloc_sq(dev, qid, nvmeq);
1589 if (result < 0) 1589 if (result < 0)
1590 goto release_cq; 1590 goto release_cq;
1591 1591
1592 nvme_init_queue(nvmeq, qid);
1592 result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 1593 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1593 if (result < 0) 1594 if (result < 0)
1594 goto release_sq; 1595 goto release_sq;
1595 1596
1596 nvme_init_queue(nvmeq, qid);
1597 return result; 1597 return result;
1598 1598
1599 release_sq: 1599 release_sq:
1600 dev->online_queues--;
1600 adapter_delete_sq(dev, qid); 1601 adapter_delete_sq(dev, qid);
1601 release_cq: 1602 release_cq:
1602 adapter_delete_cq(dev, qid); 1603 adapter_delete_cq(dev, qid);
1604 release_vector:
1605 nvmeq->cq_vector = -1;
1603 return result; 1606 return result;
1604} 1607}
1605 1608
@@ -1794,6 +1797,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1794 goto free_nvmeq; 1797 goto free_nvmeq;
1795 1798
1796 nvmeq->cq_vector = 0; 1799 nvmeq->cq_vector = 0;
1800 nvme_init_queue(nvmeq, 0);
1797 result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 1801 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1798 if (result) { 1802 if (result) {
1799 nvmeq->cq_vector = -1; 1803 nvmeq->cq_vector = -1;
@@ -2976,10 +2980,16 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2976 mutex_unlock(&dev->shutdown_lock); 2980 mutex_unlock(&dev->shutdown_lock);
2977} 2981}
2978 2982
2979static void nvme_dev_remove(struct nvme_dev *dev) 2983static void nvme_remove_namespaces(struct nvme_dev *dev)
2980{ 2984{
2981 struct nvme_ns *ns, *next; 2985 struct nvme_ns *ns, *next;
2982 2986
2987 list_for_each_entry_safe(ns, next, &dev->namespaces, list)
2988 nvme_ns_remove(ns);
2989}
2990
2991static void nvme_dev_remove(struct nvme_dev *dev)
2992{
2983 if (nvme_io_incapable(dev)) { 2993 if (nvme_io_incapable(dev)) {
2984 /* 2994 /*
2985 * If the device is not capable of IO (surprise hot-removal, 2995 * If the device is not capable of IO (surprise hot-removal,
@@ -2989,8 +2999,7 @@ static void nvme_dev_remove(struct nvme_dev *dev)
2989 */ 2999 */
2990 nvme_dev_shutdown(dev); 3000 nvme_dev_shutdown(dev);
2991 } 3001 }
2992 list_for_each_entry_safe(ns, next, &dev->namespaces, list) 3002 nvme_remove_namespaces(dev);
2993 nvme_ns_remove(ns);
2994} 3003}
2995 3004
2996static int nvme_setup_prp_pools(struct nvme_dev *dev) 3005static int nvme_setup_prp_pools(struct nvme_dev *dev)
@@ -3157,7 +3166,6 @@ static void nvme_probe_work(struct work_struct *work)
3157 goto disable; 3166 goto disable;
3158 } 3167 }
3159 3168
3160 nvme_init_queue(dev->queues[0], 0);
3161 result = nvme_alloc_admin_tags(dev); 3169 result = nvme_alloc_admin_tags(dev);
3162 if (result) 3170 if (result)
3163 goto disable; 3171 goto disable;
@@ -3174,7 +3182,7 @@ static void nvme_probe_work(struct work_struct *work)
3174 */ 3182 */
3175 if (dev->online_queues < 2) { 3183 if (dev->online_queues < 2) {
3176 dev_warn(dev->dev, "IO queues not created\n"); 3184 dev_warn(dev->dev, "IO queues not created\n");
3177 nvme_dev_remove(dev); 3185 nvme_remove_namespaces(dev);
3178 } else { 3186 } else {
3179 nvme_unfreeze_queues(dev); 3187 nvme_unfreeze_queues(dev);
3180 nvme_dev_add(dev); 3188 nvme_dev_add(dev);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 97a280d50d6d..7c509bff9295 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -223,7 +223,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
223 str[i] = '_'; 223 str[i] = '_';
224 } 224 }
225 225
226 return tsize; 226 return repend;
227} 227}
228EXPORT_SYMBOL_GPL(of_device_get_modalias); 228EXPORT_SYMBOL_GPL(of_device_get_modalias);
229 229
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index e16ea5717b7f..2a547ca3d443 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -156,20 +156,20 @@ static void __init of_unittest_dynamic(void)
156 /* Add a new property - should pass*/ 156 /* Add a new property - should pass*/
157 prop->name = "new-property"; 157 prop->name = "new-property";
158 prop->value = "new-property-data"; 158 prop->value = "new-property-data";
159 prop->length = strlen(prop->value); 159 prop->length = strlen(prop->value) + 1;
160 unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n"); 160 unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
161 161
162 /* Try to add an existing property - should fail */ 162 /* Try to add an existing property - should fail */
163 prop++; 163 prop++;
164 prop->name = "new-property"; 164 prop->name = "new-property";
165 prop->value = "new-property-data-should-fail"; 165 prop->value = "new-property-data-should-fail";
166 prop->length = strlen(prop->value); 166 prop->length = strlen(prop->value) + 1;
167 unittest(of_add_property(np, prop) != 0, 167 unittest(of_add_property(np, prop) != 0,
168 "Adding an existing property should have failed\n"); 168 "Adding an existing property should have failed\n");
169 169
170 /* Try to modify an existing property - should pass */ 170 /* Try to modify an existing property - should pass */
171 prop->value = "modify-property-data-should-pass"; 171 prop->value = "modify-property-data-should-pass";
172 prop->length = strlen(prop->value); 172 prop->length = strlen(prop->value) + 1;
173 unittest(of_update_property(np, prop) == 0, 173 unittest(of_update_property(np, prop) == 0,
174 "Updating an existing property should have passed\n"); 174 "Updating an existing property should have passed\n");
175 175
@@ -177,7 +177,7 @@ static void __init of_unittest_dynamic(void)
177 prop++; 177 prop++;
178 prop->name = "modify-property"; 178 prop->name = "modify-property";
179 prop->value = "modify-missing-property-data-should-pass"; 179 prop->value = "modify-missing-property-data-should-pass";
180 prop->length = strlen(prop->value); 180 prop->length = strlen(prop->value) + 1;
181 unittest(of_update_property(np, prop) == 0, 181 unittest(of_update_property(np, prop) == 0,
182 "Updating a missing property should have passed\n"); 182 "Updating a missing property should have passed\n");
183 183
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 312cb5b74dec..1d288fa4f4d6 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1365,9 +1365,27 @@ lba_hw_init(struct lba_device *d)
1365 WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG); 1365 WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
1366 } 1366 }
1367 1367
1368 /* Set HF mode as the default (vs. -1 mode). */ 1368
1369 /*
1370 * Hard Fail vs. Soft Fail on PCI "Master Abort".
1371 *
1372 * "Master Abort" means the MMIO transaction timed out - usually due to
1373 * the device not responding to an MMIO read. We would like HF to be
1374 * enabled to find driver problems, though it means the system will
1375 * crash with a HPMC.
1376 *
1377 * In SoftFail mode "~0L" is returned as a result of a timeout on the
1378 * pci bus. This is like how PCI busses on x86 and most other
1379 * architectures behave. In order to increase compatibility with
1380 * existing (x86) PCI hardware and existing Linux drivers we enable
1381 * Soft Faul mode on PA-RISC now too.
1382 */
1369 stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); 1383 stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
1384#if defined(ENABLE_HARDFAIL)
1370 WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL); 1385 WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1386#else
1387 WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1388#endif
1371 1389
1372 /* 1390 /*
1373 ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal 1391 ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 78530d1714dc..bdce0679674c 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2646,6 +2646,7 @@ enum parport_pc_pci_cards {
2646 netmos_9901, 2646 netmos_9901,
2647 netmos_9865, 2647 netmos_9865,
2648 quatech_sppxp100, 2648 quatech_sppxp100,
2649 wch_ch382l,
2649}; 2650};
2650 2651
2651 2652
@@ -2708,6 +2709,7 @@ static struct parport_pc_pci {
2708 /* netmos_9901 */ { 1, { { 0, -1 }, } }, 2709 /* netmos_9901 */ { 1, { { 0, -1 }, } },
2709 /* netmos_9865 */ { 1, { { 0, -1 }, } }, 2710 /* netmos_9865 */ { 1, { { 0, -1 }, } },
2710 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, 2711 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
2712 /* wch_ch382l */ { 1, { { 2, -1 }, } },
2711}; 2713};
2712 2714
2713static const struct pci_device_id parport_pc_pci_tbl[] = { 2715static const struct pci_device_id parport_pc_pci_tbl[] = {
@@ -2797,6 +2799,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
2797 /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ 2799 /* Quatech SPPXP-100 Parallel port PCI ExpressCard */
2798 { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, 2800 { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
2799 PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, 2801 PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
2802 /* WCH CH382L PCI-E single parallel port card */
2803 { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
2800 { 0, } /* terminate list */ 2804 { 0, } /* terminate list */
2801}; 2805};
2802MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl); 2806MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
diff --git a/drivers/pci/controller/pci-keystone.c b/drivers/pci/controller/pci-keystone.c
index 81f653cedef2..8eb90a6839c4 100644
--- a/drivers/pci/controller/pci-keystone.c
+++ b/drivers/pci/controller/pci-keystone.c
@@ -183,7 +183,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
183 } 183 }
184 184
185 /* interrupt controller is in a child node */ 185 /* interrupt controller is in a child node */
186 *np_temp = of_find_node_by_name(np_pcie, controller); 186 *np_temp = of_get_child_by_name(np_pcie, controller);
187 if (!(*np_temp)) { 187 if (!(*np_temp)) {
188 dev_err(dev, "Node for %s is absent\n", controller); 188 dev_err(dev, "Node for %s is absent\n", controller);
189 return -EINVAL; 189 return -EINVAL;
@@ -192,6 +192,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
192 temp = of_irq_count(*np_temp); 192 temp = of_irq_count(*np_temp);
193 if (!temp) { 193 if (!temp) {
194 dev_err(dev, "No IRQ entries in %s\n", controller); 194 dev_err(dev, "No IRQ entries in %s\n", controller);
195 of_node_put(*np_temp);
195 return -EINVAL; 196 return -EINVAL;
196 } 197 }
197 198
@@ -209,6 +210,8 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
209 break; 210 break;
210 } 211 }
211 212
213 of_node_put(*np_temp);
214
212 if (temp) { 215 if (temp) {
213 *num_irqs = temp; 216 *num_irqs = temp;
214 return 0; 217 return 0;
diff --git a/drivers/pci/controller/pci-layerscape.c b/drivers/pci/controller/pci-layerscape.c
index 7f57e3f730ea..6cb78844f1b2 100644
--- a/drivers/pci/controller/pci-layerscape.c
+++ b/drivers/pci/controller/pci-layerscape.c
@@ -118,13 +118,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp)
118 118
119 dw_pcie_setup_rc(pp); 119 dw_pcie_setup_rc(pp);
120 120
121 /* 121 ls_pcie_drop_msg_tlp(pcie);
122 * LS1021A Workaround for internal TKT228622
123 * to fix the INTx hang issue
124 */
125 val = ioread32(pcie->dbi + PCIE_STRFMR1);
126 val &= 0xffff;
127 iowrite32(val, pcie->dbi + PCIE_STRFMR1);
128} 122}
129 123
130static int ls_pcie_link_up(struct dw_pcie *pci) 124static int ls_pcie_link_up(struct dw_pcie *pci)
@@ -150,6 +144,7 @@ static void ls_pcie_host_init(struct pcie_port *pp)
150 iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); 144 iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
151 ls_pcie_fix_class(pcie); 145 ls_pcie_fix_class(pcie);
152 ls_pcie_clear_multifunction(pcie); 146 ls_pcie_clear_multifunction(pcie);
147 ls_pcie_drop_msg_tlp(pcie);
153 iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); 148 iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
154} 149}
155 150
@@ -216,6 +211,7 @@ static const struct of_device_id ls_pcie_of_match[] = {
216 { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, 211 { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
217 { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, 212 { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
218 { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, 213 { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
214 { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
219 { }, 215 { },
220}; 216};
221MODULE_DEVICE_TABLE(of, ls_pcie_of_match); 217MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 0b3e0bfa7be5..572ca192cb1f 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -587,6 +587,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
587{ 587{
588 unsigned long long sta = 0; 588 unsigned long long sta = 0;
589 struct acpiphp_func *func; 589 struct acpiphp_func *func;
590 u32 dvid;
590 591
591 list_for_each_entry(func, &slot->funcs, sibling) { 592 list_for_each_entry(func, &slot->funcs, sibling) {
592 if (func->flags & FUNC_HAS_STA) { 593 if (func->flags & FUNC_HAS_STA) {
@@ -597,19 +598,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
597 if (ACPI_SUCCESS(status) && sta) 598 if (ACPI_SUCCESS(status) && sta)
598 break; 599 break;
599 } else { 600 } else {
600 u32 dvid; 601 if (pci_bus_read_dev_vendor_id(slot->bus,
601 602 PCI_DEVFN(slot->device, func->function),
602 pci_bus_read_config_dword(slot->bus, 603 &dvid, 0)) {
603 PCI_DEVFN(slot->device,
604 func->function),
605 PCI_VENDOR_ID, &dvid);
606 if (dvid != 0xffffffff) {
607 sta = ACPI_STA_ALL; 604 sta = ACPI_STA_ALL;
608 break; 605 break;
609 } 606 }
610 } 607 }
611 } 608 }
612 609
610 if (!sta) {
611 /*
612 * Check for the slot itself since it may be that the
613 * ACPI slot is a device below PCIe upstream port so in
614 * that case it may not even be reachable yet.
615 */
616 if (pci_bus_read_dev_vendor_id(slot->bus,
617 PCI_DEVFN(slot->device, 0), &dvid, 0)) {
618 sta = ACPI_STA_ALL;
619 }
620 }
621
613 return (unsigned int)sta; 622 return (unsigned int)sta;
614} 623}
615 624
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 62d6fe6c3714..cbe58480b474 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -134,7 +134,7 @@ struct controller *pcie_init(struct pcie_device *dev);
134int pcie_init_notification(struct controller *ctrl); 134int pcie_init_notification(struct controller *ctrl);
135int pciehp_enable_slot(struct slot *p_slot); 135int pciehp_enable_slot(struct slot *p_slot);
136int pciehp_disable_slot(struct slot *p_slot); 136int pciehp_disable_slot(struct slot *p_slot);
137void pcie_enable_notification(struct controller *ctrl); 137void pcie_reenable_notification(struct controller *ctrl);
138int pciehp_power_on_slot(struct slot *slot); 138int pciehp_power_on_slot(struct slot *slot);
139void pciehp_power_off_slot(struct slot *slot); 139void pciehp_power_off_slot(struct slot *slot);
140void pciehp_get_power_status(struct slot *slot, u8 *status); 140void pciehp_get_power_status(struct slot *slot, u8 *status);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 612b21a14df5..8f6ded43760a 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -295,7 +295,7 @@ static int pciehp_resume(struct pcie_device *dev)
295 ctrl = get_service_data(dev); 295 ctrl = get_service_data(dev);
296 296
297 /* reinitialize the chipset's event detection logic */ 297 /* reinitialize the chipset's event detection logic */
298 pcie_enable_notification(ctrl); 298 pcie_reenable_notification(ctrl);
299 299
300 slot = ctrl->slot; 300 slot = ctrl->slot;
301 301
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 5c24e938042f..63c6c7fce3eb 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -628,7 +628,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
628 return IRQ_HANDLED; 628 return IRQ_HANDLED;
629} 629}
630 630
631void pcie_enable_notification(struct controller *ctrl) 631static void pcie_enable_notification(struct controller *ctrl)
632{ 632{
633 u16 cmd, mask; 633 u16 cmd, mask;
634 634
@@ -666,6 +666,17 @@ void pcie_enable_notification(struct controller *ctrl)
666 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd); 666 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
667} 667}
668 668
669void pcie_reenable_notification(struct controller *ctrl)
670{
671 /*
672 * Clear both Presence and Data Link Layer Changed to make sure
673 * those events still fire after we have re-enabled them.
674 */
675 pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
676 PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
677 pcie_enable_notification(ctrl);
678}
679
669static void pcie_disable_notification(struct controller *ctrl) 680static void pcie_disable_notification(struct controller *ctrl)
670{ 681{
671 u16 mask; 682 u16 mask;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index a32ba753e413..afaf13474796 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -543,7 +543,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
543 union acpi_object *obj; 543 union acpi_object *obj;
544 struct pci_host_bridge *bridge; 544 struct pci_host_bridge *bridge;
545 545
546 if (acpi_pci_disabled || !bus->bridge) 546 if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
547 return; 547 return;
548 548
549 acpi_pci_slot_enumerate(bus); 549 acpi_pci_slot_enumerate(bus);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 32bd8ab79d53..dd9ebdc968c8 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1140,11 +1140,14 @@ static int pci_pm_runtime_suspend(struct device *dev)
1140 int error; 1140 int error;
1141 1141
1142 /* 1142 /*
1143 * If pci_dev->driver is not set (unbound), the device should 1143 * If pci_dev->driver is not set (unbound), we leave the device in D0,
1144 * always remain in D0 regardless of the runtime PM status 1144 * but it may go to D3cold when the bridge above it runtime suspends.
1145 * Save its config space in case that happens.
1145 */ 1146 */
1146 if (!pci_dev->driver) 1147 if (!pci_dev->driver) {
1148 pci_save_state(pci_dev);
1147 return 0; 1149 return 0;
1150 }
1148 1151
1149 if (!pm || !pm->runtime_suspend) 1152 if (!pm || !pm->runtime_suspend)
1150 return -ENOSYS; 1153 return -ENOSYS;
@@ -1195,16 +1198,18 @@ static int pci_pm_runtime_resume(struct device *dev)
1195 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1198 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
1196 1199
1197 /* 1200 /*
1198 * If pci_dev->driver is not set (unbound), the device should 1201 * Restoring config space is necessary even if the device is not bound
1199 * always remain in D0 regardless of the runtime PM status 1202 * to a driver because although we left it in D0, it may have gone to
1203 * D3cold when the bridge above it runtime suspended.
1200 */ 1204 */
1205 pci_restore_standard_config(pci_dev);
1206
1201 if (!pci_dev->driver) 1207 if (!pci_dev->driver)
1202 return 0; 1208 return 0;
1203 1209
1204 if (!pm || !pm->runtime_resume) 1210 if (!pm || !pm->runtime_resume)
1205 return -ENOSYS; 1211 return -ENOSYS;
1206 1212
1207 pci_restore_standard_config(pci_dev);
1208 pci_fixup_device(pci_fixup_resume_early, pci_dev); 1213 pci_fixup_device(pci_fixup_resume_early, pci_dev);
1209 __pci_enable_wake(pci_dev, PCI_D0, true, false); 1214 __pci_enable_wake(pci_dev, PCI_D0, true, false);
1210 pci_fixup_device(pci_fixup_resume, pci_dev); 1215 pci_fixup_device(pci_fixup_resume, pci_dev);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index ec91cd17bf34..5fb4ed6ea322 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -180,13 +180,16 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
180 if (!capable(CAP_SYS_ADMIN)) 180 if (!capable(CAP_SYS_ADMIN))
181 return -EPERM; 181 return -EPERM;
182 182
183 if (!val) { 183 device_lock(dev);
184 if (pci_is_enabled(pdev)) 184 if (dev->driver)
185 pci_disable_device(pdev); 185 result = -EBUSY;
186 else 186 else if (val)
187 result = -EIO;
188 } else
189 result = pci_enable_device(pdev); 187 result = pci_enable_device(pdev);
188 else if (pci_is_enabled(pdev))
189 pci_disable_device(pdev);
190 else
191 result = -EIO;
192 device_unlock(dev);
190 193
191 return result < 0 ? result : count; 194 return result < 0 ? result : count;
192} 195}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 193ac13de49b..566897f24dee 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -230,7 +230,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
230 res->flags |= IORESOURCE_ROM_ENABLE; 230 res->flags |= IORESOURCE_ROM_ENABLE;
231 l64 = l & PCI_ROM_ADDRESS_MASK; 231 l64 = l & PCI_ROM_ADDRESS_MASK;
232 sz64 = sz & PCI_ROM_ADDRESS_MASK; 232 sz64 = sz & PCI_ROM_ADDRESS_MASK;
233 mask64 = (u32)PCI_ROM_ADDRESS_MASK; 233 mask64 = PCI_ROM_ADDRESS_MASK;
234 } 234 }
235 235
236 if (res->flags & IORESOURCE_MEM_64) { 236 if (res->flags & IORESOURCE_MEM_64) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 254192b5dad1..5697b32819cb 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3614,6 +3614,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
3614 quirk_dma_func1_alias); 3614 quirk_dma_func1_alias);
3615DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, 3615DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
3616 quirk_dma_func1_alias); 3616 quirk_dma_func1_alias);
3617DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
3618 quirk_dma_func1_alias);
3617/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ 3619/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
3618DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, 3620DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
3619 quirk_dma_func1_alias); 3621 quirk_dma_func1_alias);
@@ -3626,11 +3628,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
3626/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */ 3628/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
3627DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0, 3629DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
3628 quirk_dma_func1_alias); 3630 quirk_dma_func1_alias);
3631/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
3632DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
3633 quirk_dma_func1_alias);
3629/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */ 3634/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
3630DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, 3635DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
3631 quirk_dma_func1_alias); 3636 quirk_dma_func1_alias);
3632DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, 3637DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
3633 quirk_dma_func1_alias); 3638 quirk_dma_func1_alias);
3639DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
3640 quirk_dma_func1_alias);
3634/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */ 3641/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
3635DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, 3642DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
3636 PCI_DEVICE_ID_JMICRON_JMB388_ESD, 3643 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 25062966cbfa..8b2f8b2a574e 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -63,7 +63,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
63 mask = (u32)PCI_BASE_ADDRESS_IO_MASK; 63 mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
64 new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK; 64 new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
65 } else if (resno == PCI_ROM_RESOURCE) { 65 } else if (resno == PCI_ROM_RESOURCE) {
66 mask = (u32)PCI_ROM_ADDRESS_MASK; 66 mask = PCI_ROM_ADDRESS_MASK;
67 } else { 67 } else {
68 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 68 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
69 new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; 69 new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 8af1f900ea65..1ba58fb6f796 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -321,10 +321,16 @@ validate_group(struct perf_event *event)
321 return 0; 321 return 0;
322} 322}
323 323
324static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
325{
326 struct platform_device *pdev = armpmu->plat_device;
327
328 return pdev ? dev_get_platdata(&pdev->dev) : NULL;
329}
330
324static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) 331static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
325{ 332{
326 struct arm_pmu *armpmu; 333 struct arm_pmu *armpmu;
327 struct platform_device *plat_device;
328 struct arm_pmu_platdata *plat; 334 struct arm_pmu_platdata *plat;
329 int ret; 335 int ret;
330 u64 start_clock, finish_clock; 336 u64 start_clock, finish_clock;
@@ -336,8 +342,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
336 * dereference. 342 * dereference.
337 */ 343 */
338 armpmu = *(void **)dev; 344 armpmu = *(void **)dev;
339 plat_device = armpmu->plat_device; 345
340 plat = dev_get_platdata(&plat_device->dev); 346 plat = armpmu_get_platdata(armpmu);
341 347
342 start_clock = sched_clock(); 348 start_clock = sched_clock();
343 if (plat && plat->handle_irq) 349 if (plat && plat->handle_irq)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index a4dcdb48fd56..1ff21b89961e 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -980,19 +980,16 @@ struct pinctrl_state *pinctrl_lookup_state(struct pinctrl *p,
980EXPORT_SYMBOL_GPL(pinctrl_lookup_state); 980EXPORT_SYMBOL_GPL(pinctrl_lookup_state);
981 981
982/** 982/**
983 * pinctrl_select_state() - select/activate/program a pinctrl state to HW 983 * pinctrl_commit_state() - select/activate/program a pinctrl state to HW
984 * @p: the pinctrl handle for the device that requests configuration 984 * @p: the pinctrl handle for the device that requests configuration
985 * @state: the state handle to select/activate/program 985 * @state: the state handle to select/activate/program
986 */ 986 */
987int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state) 987static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
988{ 988{
989 struct pinctrl_setting *setting, *setting2; 989 struct pinctrl_setting *setting, *setting2;
990 struct pinctrl_state *old_state = p->state; 990 struct pinctrl_state *old_state = p->state;
991 int ret; 991 int ret;
992 992
993 if (p->state == state)
994 return 0;
995
996 if (p->state) { 993 if (p->state) {
997 /* 994 /*
998 * For each pinmux setting in the old state, forget SW's record 995 * For each pinmux setting in the old state, forget SW's record
@@ -1056,6 +1053,19 @@ unapply_new_state:
1056 1053
1057 return ret; 1054 return ret;
1058} 1055}
1056
1057/**
1058 * pinctrl_select_state() - select/activate/program a pinctrl state to HW
1059 * @p: the pinctrl handle for the device that requests configuration
1060 * @state: the state handle to select/activate/program
1061 */
1062int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state)
1063{
1064 if (p->state == state)
1065 return 0;
1066
1067 return pinctrl_commit_state(p, state);
1068}
1059EXPORT_SYMBOL_GPL(pinctrl_select_state); 1069EXPORT_SYMBOL_GPL(pinctrl_select_state);
1060 1070
1061static void devm_pinctrl_release(struct device *dev, void *res) 1071static void devm_pinctrl_release(struct device *dev, void *res)
@@ -1224,7 +1234,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
1224int pinctrl_force_sleep(struct pinctrl_dev *pctldev) 1234int pinctrl_force_sleep(struct pinctrl_dev *pctldev)
1225{ 1235{
1226 if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep)) 1236 if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep))
1227 return pinctrl_select_state(pctldev->p, pctldev->hog_sleep); 1237 return pinctrl_commit_state(pctldev->p, pctldev->hog_sleep);
1228 return 0; 1238 return 0;
1229} 1239}
1230EXPORT_SYMBOL_GPL(pinctrl_force_sleep); 1240EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
@@ -1236,7 +1246,7 @@ EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
1236int pinctrl_force_default(struct pinctrl_dev *pctldev) 1246int pinctrl_force_default(struct pinctrl_dev *pctldev)
1237{ 1247{
1238 if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default)) 1248 if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default))
1239 return pinctrl_select_state(pctldev->p, pctldev->hog_default); 1249 return pinctrl_commit_state(pctldev->p, pctldev->hog_default);
1240 return 0; 1250 return 0;
1241} 1251}
1242EXPORT_SYMBOL_GPL(pinctrl_force_default); 1252EXPORT_SYMBOL_GPL(pinctrl_force_default);
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 271cca63e9bd..9aa82a4e9e25 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -568,8 +568,10 @@ static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
568 for_each_child_of_node(np_config, np) { 568 for_each_child_of_node(np_config, np) {
569 ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map, 569 ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map,
570 &reserved_maps, num_maps); 570 &reserved_maps, num_maps);
571 if (ret < 0) 571 if (ret < 0) {
572 of_node_put(np);
572 break; 573 break;
574 }
573 } 575 }
574 } 576 }
575 577
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
index 1b580ba76453..907d7db3fcee 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
@@ -145,19 +145,19 @@ static const struct sunxi_desc_pin sun9i_a80_pins[] = {
145 SUNXI_FUNCTION(0x0, "gpio_in"), 145 SUNXI_FUNCTION(0x0, "gpio_in"),
146 SUNXI_FUNCTION(0x1, "gpio_out"), 146 SUNXI_FUNCTION(0x1, "gpio_out"),
147 SUNXI_FUNCTION(0x3, "mcsi"), /* MCLK */ 147 SUNXI_FUNCTION(0x3, "mcsi"), /* MCLK */
148 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PB_EINT14 */ 148 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)), /* PB_EINT14 */
149 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15), 149 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
150 SUNXI_FUNCTION(0x0, "gpio_in"), 150 SUNXI_FUNCTION(0x0, "gpio_in"),
151 SUNXI_FUNCTION(0x1, "gpio_out"), 151 SUNXI_FUNCTION(0x1, "gpio_out"),
152 SUNXI_FUNCTION(0x3, "mcsi"), /* SCK */ 152 SUNXI_FUNCTION(0x3, "mcsi"), /* SCK */
153 SUNXI_FUNCTION(0x4, "i2c4"), /* SCK */ 153 SUNXI_FUNCTION(0x4, "i2c4"), /* SCK */
154 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PB_EINT15 */ 154 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)), /* PB_EINT15 */
155 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16), 155 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
156 SUNXI_FUNCTION(0x0, "gpio_in"), 156 SUNXI_FUNCTION(0x0, "gpio_in"),
157 SUNXI_FUNCTION(0x1, "gpio_out"), 157 SUNXI_FUNCTION(0x1, "gpio_out"),
158 SUNXI_FUNCTION(0x3, "mcsi"), /* SDA */ 158 SUNXI_FUNCTION(0x3, "mcsi"), /* SDA */
159 SUNXI_FUNCTION(0x4, "i2c4"), /* SDA */ 159 SUNXI_FUNCTION(0x4, "i2c4"), /* SDA */
160 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PB_EINT16 */ 160 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)), /* PB_EINT16 */
161 161
162 /* Hole */ 162 /* Hole */
163 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0), 163 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 92430f781eb7..a0b8c8a8c323 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -59,12 +59,14 @@ static int send_command(struct cros_ec_device *ec_dev,
59 struct cros_ec_command *msg) 59 struct cros_ec_command *msg)
60{ 60{
61 int ret; 61 int ret;
62 int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
62 63
63 if (ec_dev->proto_version > 2) 64 if (ec_dev->proto_version > 2)
64 ret = ec_dev->pkt_xfer(ec_dev, msg); 65 xfer_fxn = ec_dev->pkt_xfer;
65 else 66 else
66 ret = ec_dev->cmd_xfer(ec_dev, msg); 67 xfer_fxn = ec_dev->cmd_xfer;
67 68
69 ret = (*xfer_fxn)(ec_dev, msg);
68 if (msg->result == EC_RES_IN_PROGRESS) { 70 if (msg->result == EC_RES_IN_PROGRESS) {
69 int i; 71 int i;
70 struct cros_ec_command *status_msg; 72 struct cros_ec_command *status_msg;
@@ -87,7 +89,7 @@ static int send_command(struct cros_ec_device *ec_dev,
87 for (i = 0; i < EC_COMMAND_RETRIES; i++) { 89 for (i = 0; i < EC_COMMAND_RETRIES; i++) {
88 usleep_range(10000, 11000); 90 usleep_range(10000, 11000);
89 91
90 ret = ec_dev->cmd_xfer(ec_dev, status_msg); 92 ret = (*xfer_fxn)(ec_dev, status_msg);
91 if (ret < 0) 93 if (ret < 0)
92 break; 94 break;
93 95
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index f3baf9973989..24f1630a8b3f 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -187,7 +187,7 @@ static ssize_t show_ec_version(struct device *dev,
187 count += scnprintf(buf + count, PAGE_SIZE - count, 187 count += scnprintf(buf + count, PAGE_SIZE - count,
188 "Build info: EC error %d\n", msg->result); 188 "Build info: EC error %d\n", msg->result);
189 else { 189 else {
190 msg->data[sizeof(msg->data) - 1] = '\0'; 190 msg->data[EC_HOST_PARAM_SIZE - 1] = '\0';
191 count += scnprintf(buf + count, PAGE_SIZE - count, 191 count += scnprintf(buf + count, PAGE_SIZE - count,
192 "Build info: %s\n", msg->data); 192 "Build info: %s\n", msg->data);
193 } 193 }
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 1089eaa02b00..988ebe9a6b90 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -95,6 +95,7 @@ config DELL_LAPTOP
95 tristate "Dell Laptop Extras" 95 tristate "Dell Laptop Extras"
96 depends on X86 96 depends on X86
97 depends on DCDBAS 97 depends on DCDBAS
98 depends on DMI
98 depends on BACKLIGHT_CLASS_DEVICE 99 depends on BACKLIGHT_CLASS_DEVICE
99 depends on ACPI_VIDEO || ACPI_VIDEO = n 100 depends on ACPI_VIDEO || ACPI_VIDEO = n
100 depends on RFKILL || RFKILL = n 101 depends on RFKILL || RFKILL = n
@@ -110,6 +111,7 @@ config DELL_LAPTOP
110config DELL_WMI 111config DELL_WMI
111 tristate "Dell WMI extras" 112 tristate "Dell WMI extras"
112 depends on ACPI_WMI 113 depends on ACPI_WMI
114 depends on DMI
113 depends on INPUT 115 depends on INPUT
114 depends on ACPI_VIDEO || ACPI_VIDEO = n 116 depends on ACPI_VIDEO || ACPI_VIDEO = n
115 select INPUT_SPARSEKMAP 117 select INPUT_SPARSEKMAP
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index a3661cc44f86..0e0403e024c5 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -101,6 +101,15 @@ static const struct dmi_system_id asus_quirks[] = {
101 }, 101 },
102 { 102 {
103 .callback = dmi_matched, 103 .callback = dmi_matched,
104 .ident = "ASUSTeK COMPUTER INC. X302UA",
105 .matches = {
106 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
107 DMI_MATCH(DMI_PRODUCT_NAME, "X302UA"),
108 },
109 .driver_data = &quirk_asus_wapf4,
110 },
111 {
112 .callback = dmi_matched,
104 .ident = "ASUSTeK COMPUTER INC. X401U", 113 .ident = "ASUSTeK COMPUTER INC. X401U",
105 .matches = { 114 .matches = {
106 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 115 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 5c768c4627d3..78e1bfee698a 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -415,6 +415,7 @@ static struct thermal_device_info *initialize_sensor(int index)
415 return td_info; 415 return td_info;
416} 416}
417 417
418#ifdef CONFIG_PM_SLEEP
418/** 419/**
419 * mid_thermal_resume - resume routine 420 * mid_thermal_resume - resume routine
420 * @dev: device structure 421 * @dev: device structure
@@ -442,6 +443,7 @@ static int mid_thermal_suspend(struct device *dev)
442 */ 443 */
443 return configure_adc(0); 444 return configure_adc(0);
444} 445}
446#endif
445 447
446static SIMPLE_DEV_PM_OPS(mid_thermal_pm, 448static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
447 mid_thermal_suspend, mid_thermal_resume); 449 mid_thermal_suspend, mid_thermal_resume);
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 89aa976f0ab2..65b0a4845ddd 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -52,7 +52,9 @@ struct tc1100_data {
52 u32 jogdial; 52 u32 jogdial;
53}; 53};
54 54
55#ifdef CONFIG_PM
55static struct tc1100_data suspend_data; 56static struct tc1100_data suspend_data;
57#endif
56 58
57/* -------------------------------------------------------------------------- 59/* --------------------------------------------------------------------------
58 Device Management 60 Device Management
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 237d7aa73e8c..9f53fb74ae6f 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -159,6 +159,7 @@ config BATTERY_SBS
159 159
160config BATTERY_BQ27XXX 160config BATTERY_BQ27XXX
161 tristate "BQ27xxx battery driver" 161 tristate "BQ27xxx battery driver"
162 depends on I2C || I2C=n
162 help 163 help
163 Say Y here to enable support for batteries with BQ27xxx (I2C/HDQ) chips. 164 Say Y here to enable support for batteries with BQ27xxx (I2C/HDQ) chips.
164 165
diff --git a/drivers/power/bq27xxx_battery.c b/drivers/power/bq27xxx_battery.c
index 880233ce9343..6c3a447f378b 100644
--- a/drivers/power/bq27xxx_battery.c
+++ b/drivers/power/bq27xxx_battery.c
@@ -285,7 +285,7 @@ static u8 bq27421_regs[] = {
285 0x18, /* AP */ 285 0x18, /* AP */
286}; 286};
287 287
288static u8 *bq27xxx_regs[] = { 288static u8 *bq27xxx_regs[] __maybe_unused = {
289 [BQ27000] = bq27000_regs, 289 [BQ27000] = bq27000_regs,
290 [BQ27010] = bq27010_regs, 290 [BQ27010] = bq27010_regs,
291 [BQ27500] = bq27500_regs, 291 [BQ27500] = bq27500_regs,
@@ -991,7 +991,7 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
991 schedule_delayed_work(&di->work, 0); 991 schedule_delayed_work(&di->work, 0);
992} 992}
993 993
994static int bq27xxx_powersupply_init(struct bq27xxx_device_info *di, 994static int __maybe_unused bq27xxx_powersupply_init(struct bq27xxx_device_info *di,
995 const char *name) 995 const char *name)
996{ 996{
997 int ret; 997 int ret;
@@ -1026,7 +1026,7 @@ static int bq27xxx_powersupply_init(struct bq27xxx_device_info *di,
1026 return 0; 1026 return 0;
1027} 1027}
1028 1028
1029static void bq27xxx_powersupply_unregister(struct bq27xxx_device_info *di) 1029static void __maybe_unused bq27xxx_powersupply_unregister(struct bq27xxx_device_info *di)
1030{ 1030{
1031 /* 1031 /*
1032 * power_supply_unregister call bq27xxx_battery_get_property which 1032 * power_supply_unregister call bq27xxx_battery_get_property which
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index dfe1ee89f7c7..922a86787c5c 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -30,9 +30,9 @@ static inline unsigned int get_irq_flags(struct resource *res)
30static struct device *dev; 30static struct device *dev;
31static struct pda_power_pdata *pdata; 31static struct pda_power_pdata *pdata;
32static struct resource *ac_irq, *usb_irq; 32static struct resource *ac_irq, *usb_irq;
33static struct timer_list charger_timer; 33static struct delayed_work charger_work;
34static struct timer_list supply_timer; 34static struct delayed_work polling_work;
35static struct timer_list polling_timer; 35static struct delayed_work supply_work;
36static int polling; 36static int polling;
37static struct power_supply *pda_psy_ac, *pda_psy_usb; 37static struct power_supply *pda_psy_ac, *pda_psy_usb;
38 38
@@ -140,7 +140,7 @@ static void update_charger(void)
140 } 140 }
141} 141}
142 142
143static void supply_timer_func(unsigned long unused) 143static void supply_work_func(struct work_struct *work)
144{ 144{
145 if (ac_status == PDA_PSY_TO_CHANGE) { 145 if (ac_status == PDA_PSY_TO_CHANGE) {
146 ac_status = new_ac_status; 146 ac_status = new_ac_status;
@@ -161,11 +161,12 @@ static void psy_changed(void)
161 * Okay, charger set. Now wait a bit before notifying supplicants, 161 * Okay, charger set. Now wait a bit before notifying supplicants,
162 * charge power should stabilize. 162 * charge power should stabilize.
163 */ 163 */
164 mod_timer(&supply_timer, 164 cancel_delayed_work(&supply_work);
165 jiffies + msecs_to_jiffies(pdata->wait_for_charger)); 165 schedule_delayed_work(&supply_work,
166 msecs_to_jiffies(pdata->wait_for_charger));
166} 167}
167 168
168static void charger_timer_func(unsigned long unused) 169static void charger_work_func(struct work_struct *work)
169{ 170{
170 update_status(); 171 update_status();
171 psy_changed(); 172 psy_changed();
@@ -184,13 +185,14 @@ static irqreturn_t power_changed_isr(int irq, void *power_supply)
184 * Wait a bit before reading ac/usb line status and setting charger, 185 * Wait a bit before reading ac/usb line status and setting charger,
185 * because ac/usb status readings may lag from irq. 186 * because ac/usb status readings may lag from irq.
186 */ 187 */
187 mod_timer(&charger_timer, 188 cancel_delayed_work(&charger_work);
188 jiffies + msecs_to_jiffies(pdata->wait_for_status)); 189 schedule_delayed_work(&charger_work,
190 msecs_to_jiffies(pdata->wait_for_status));
189 191
190 return IRQ_HANDLED; 192 return IRQ_HANDLED;
191} 193}
192 194
193static void polling_timer_func(unsigned long unused) 195static void polling_work_func(struct work_struct *work)
194{ 196{
195 int changed = 0; 197 int changed = 0;
196 198
@@ -211,8 +213,9 @@ static void polling_timer_func(unsigned long unused)
211 if (changed) 213 if (changed)
212 psy_changed(); 214 psy_changed();
213 215
214 mod_timer(&polling_timer, 216 cancel_delayed_work(&polling_work);
215 jiffies + msecs_to_jiffies(pdata->polling_interval)); 217 schedule_delayed_work(&polling_work,
218 msecs_to_jiffies(pdata->polling_interval));
216} 219}
217 220
218#if IS_ENABLED(CONFIG_USB_PHY) 221#if IS_ENABLED(CONFIG_USB_PHY)
@@ -250,8 +253,9 @@ static int otg_handle_notification(struct notifier_block *nb,
250 * Wait a bit before reading ac/usb line status and setting charger, 253 * Wait a bit before reading ac/usb line status and setting charger,
251 * because ac/usb status readings may lag from irq. 254 * because ac/usb status readings may lag from irq.
252 */ 255 */
253 mod_timer(&charger_timer, 256 cancel_delayed_work(&charger_work);
254 jiffies + msecs_to_jiffies(pdata->wait_for_status)); 257 schedule_delayed_work(&charger_work,
258 msecs_to_jiffies(pdata->wait_for_status));
255 259
256 return NOTIFY_OK; 260 return NOTIFY_OK;
257} 261}
@@ -300,8 +304,8 @@ static int pda_power_probe(struct platform_device *pdev)
300 if (!pdata->ac_max_uA) 304 if (!pdata->ac_max_uA)
301 pdata->ac_max_uA = 500000; 305 pdata->ac_max_uA = 500000;
302 306
303 setup_timer(&charger_timer, charger_timer_func, 0); 307 INIT_DELAYED_WORK(&charger_work, charger_work_func);
304 setup_timer(&supply_timer, supply_timer_func, 0); 308 INIT_DELAYED_WORK(&supply_work, supply_work_func);
305 309
306 ac_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ac"); 310 ac_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ac");
307 usb_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "usb"); 311 usb_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "usb");
@@ -385,9 +389,10 @@ static int pda_power_probe(struct platform_device *pdev)
385 389
386 if (polling) { 390 if (polling) {
387 dev_dbg(dev, "will poll for status\n"); 391 dev_dbg(dev, "will poll for status\n");
388 setup_timer(&polling_timer, polling_timer_func, 0); 392 INIT_DELAYED_WORK(&polling_work, polling_work_func);
389 mod_timer(&polling_timer, 393 cancel_delayed_work(&polling_work);
390 jiffies + msecs_to_jiffies(pdata->polling_interval)); 394 schedule_delayed_work(&polling_work,
395 msecs_to_jiffies(pdata->polling_interval));
391 } 396 }
392 397
393 if (ac_irq || usb_irq) 398 if (ac_irq || usb_irq)
@@ -433,9 +438,9 @@ static int pda_power_remove(struct platform_device *pdev)
433 free_irq(ac_irq->start, pda_psy_ac); 438 free_irq(ac_irq->start, pda_psy_ac);
434 439
435 if (polling) 440 if (polling)
436 del_timer_sync(&polling_timer); 441 cancel_delayed_work_sync(&polling_work);
437 del_timer_sync(&charger_timer); 442 cancel_delayed_work_sync(&charger_work);
438 del_timer_sync(&supply_timer); 443 cancel_delayed_work_sync(&supply_work);
439 444
440 if (pdata->is_usb_online) 445 if (pdata->is_usb_online)
441 power_supply_unregister(pda_psy_usb); 446 power_supply_unregister(pda_psy_usb);
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
index a5b009673d0e..5eb719e73e9e 100644
--- a/drivers/power/reset/zx-reboot.c
+++ b/drivers/power/reset/zx-reboot.c
@@ -78,3 +78,7 @@ static struct platform_driver zx_reboot_driver = {
78 }, 78 },
79}; 79};
80module_platform_driver(zx_reboot_driver); 80module_platform_driver(zx_reboot_driver);
81
82MODULE_DESCRIPTION("ZTE SoCs reset driver");
83MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
84MODULE_LICENSE("GPL v2");
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 84419af16f77..fd12ccc11e26 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone(
538 538
539 power_zone->id = result; 539 power_zone->id = result;
540 idr_init(&power_zone->idr); 540 idr_init(&power_zone->idr);
541 result = -ENOMEM;
541 power_zone->name = kstrdup(name, GFP_KERNEL); 542 power_zone->name = kstrdup(name, GFP_KERNEL);
542 if (!power_zone->name) 543 if (!power_zone->name)
543 goto err_name_alloc; 544 goto err_name_alloc;
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index da7bae991552..d877ff124365 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -88,6 +88,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
88 case PTP_PF_PHYSYNC: 88 case PTP_PF_PHYSYNC:
89 if (chan != 0) 89 if (chan != 0)
90 return -EINVAL; 90 return -EINVAL;
91 break;
91 default: 92 default:
92 return -EINVAL; 93 return -EINVAL;
93 } 94 }
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 2e481b9e8ea5..60a5e0c63a13 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -97,30 +97,26 @@ static s32 scaled_ppm_to_ppb(long ppm)
97 97
98/* posix clock implementation */ 98/* posix clock implementation */
99 99
100static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp) 100static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
101{ 101{
102 tp->tv_sec = 0; 102 tp->tv_sec = 0;
103 tp->tv_nsec = 1; 103 tp->tv_nsec = 1;
104 return 0; 104 return 0;
105} 105}
106 106
107static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp) 107static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
108{ 108{
109 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 109 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
110 struct timespec64 ts = timespec_to_timespec64(*tp);
111 110
112 return ptp->info->settime64(ptp->info, &ts); 111 return ptp->info->settime64(ptp->info, tp);
113} 112}
114 113
115static int ptp_clock_gettime(struct posix_clock *pc, struct timespec *tp) 114static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
116{ 115{
117 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 116 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
118 struct timespec64 ts;
119 int err; 117 int err;
120 118
121 err = ptp->info->gettime64(ptp->info, &ts); 119 err = ptp->info->gettime64(ptp->info, tp);
122 if (!err)
123 *tp = timespec64_to_timespec(ts);
124 return err; 120 return err;
125} 121}
126 122
@@ -133,7 +129,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
133 ops = ptp->info; 129 ops = ptp->info;
134 130
135 if (tx->modes & ADJ_SETOFFSET) { 131 if (tx->modes & ADJ_SETOFFSET) {
136 struct timespec ts; 132 struct timespec64 ts;
137 ktime_t kt; 133 ktime_t kt;
138 s64 delta; 134 s64 delta;
139 135
@@ -146,7 +142,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
146 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC) 142 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
147 return -EINVAL; 143 return -EINVAL;
148 144
149 kt = timespec_to_ktime(ts); 145 kt = timespec64_to_ktime(ts);
150 delta = ktime_to_ns(kt); 146 delta = ktime_to_ns(kt);
151 err = ops->adjtime(ops, delta); 147 err = ops->adjtime(ops, delta);
152 } else if (tx->modes & ADJ_FREQUENCY) { 148 } else if (tx->modes & ADJ_FREQUENCY) {
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index d4de0607b502..3039fb762893 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -69,6 +69,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
69 struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip); 69 struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
70 unsigned long long c; 70 unsigned long long c;
71 unsigned long rate, hz; 71 unsigned long rate, hz;
72 unsigned long long ns100 = NSEC_PER_SEC;
72 u32 val = 0; 73 u32 val = 0;
73 int err; 74 int err;
74 75
@@ -87,9 +88,11 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
87 * cycles at the PWM clock rate will take period_ns nanoseconds. 88 * cycles at the PWM clock rate will take period_ns nanoseconds.
88 */ 89 */
89 rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH; 90 rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH;
90 hz = NSEC_PER_SEC / period_ns;
91 91
92 rate = (rate + (hz / 2)) / hz; 92 /* Consider precision in PWM_SCALE_WIDTH rate calculation */
93 ns100 *= 100;
94 hz = DIV_ROUND_CLOSEST_ULL(ns100, period_ns);
95 rate = DIV_ROUND_CLOSEST(rate * 100, hz);
93 96
94 /* 97 /*
95 * Since the actual PWM divider is the register's frequency divider 98 * Since the actual PWM divider is the register's frequency divider
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 3a6d0290c54c..c5e272ea4372 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -296,6 +296,11 @@ static int anatop_regulator_probe(struct platform_device *pdev)
296 if (!sreg->sel && !strcmp(sreg->name, "vddpu")) 296 if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
297 sreg->sel = 22; 297 sreg->sel = 22;
298 298
299 /* set the default voltage of the pcie phy to be 1.100v */
300 if (!sreg->sel && rdesc->name &&
301 !strcmp(rdesc->name, "vddpcie"))
302 sreg->sel = 0x10;
303
299 if (!sreg->bypass && !sreg->sel) { 304 if (!sreg->bypass && !sreg->sel) {
300 dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n"); 305 dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
301 return -EINVAL; 306 return -EINVAL;
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 3fe18da3ad52..8132b0e66932 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -277,6 +277,7 @@ int of_regulator_match(struct device *dev, struct device_node *node,
277 dev_err(dev, 277 dev_err(dev,
278 "failed to parse DT for regulator %s\n", 278 "failed to parse DT for regulator %s\n",
279 child->name); 279 child->name);
280 of_node_put(child);
280 return -EINVAL; 281 return -EINVAL;
281 } 282 }
282 match->of_node = of_node_get(child); 283 match->of_node = of_node_get(child);
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 2a44e5dd9c2a..c68556bf6f39 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -152,6 +152,7 @@ static struct regulator_ops pfuze100_sw_regulator_ops = {
152static struct regulator_ops pfuze100_swb_regulator_ops = { 152static struct regulator_ops pfuze100_swb_regulator_ops = {
153 .enable = regulator_enable_regmap, 153 .enable = regulator_enable_regmap,
154 .disable = regulator_disable_regmap, 154 .disable = regulator_disable_regmap,
155 .is_enabled = regulator_is_enabled_regmap,
155 .list_voltage = regulator_list_voltage_table, 156 .list_voltage = regulator_list_voltage_table,
156 .map_voltage = regulator_map_voltage_ascend, 157 .map_voltage = regulator_map_voltage_ascend,
157 .set_voltage_sel = regulator_set_voltage_sel_regmap, 158 .set_voltage_sel = regulator_set_voltage_sel_regmap,
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index e1cfa06810ef..e79f2a181ad2 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -49,6 +49,11 @@ static int __init rtc_hctosys(void)
49 49
50 tv64.tv_sec = rtc_tm_to_time64(&tm); 50 tv64.tv_sec = rtc_tm_to_time64(&tm);
51 51
52#if BITS_PER_LONG == 32
53 if (tv64.tv_sec > INT_MAX)
54 goto err_read;
55#endif
56
52 err = do_settimeofday64(&tv64); 57 err = do_settimeofday64(&tv64);
53 58
54 dev_info(rtc->dev.parent, 59 dev_info(rtc->dev.parent,
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 733d85686639..6e8fa60c1aa2 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -217,6 +217,13 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
217 missing = year; 217 missing = year;
218 } 218 }
219 219
220 /* Can't proceed if alarm is still invalid after replacing
221 * missing fields.
222 */
223 err = rtc_valid_tm(&alarm->time);
224 if (err)
225 goto done;
226
220 /* with luck, no rollover is needed */ 227 /* with luck, no rollover is needed */
221 t_now = rtc_tm_to_time64(&now); 228 t_now = rtc_tm_to_time64(&now);
222 t_alm = rtc_tm_to_time64(&alarm->time); 229 t_alm = rtc_tm_to_time64(&alarm->time);
@@ -268,9 +275,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
268 dev_warn(&rtc->dev, "alarm rollover not handled\n"); 275 dev_warn(&rtc->dev, "alarm rollover not handled\n");
269 } 276 }
270 277
271done:
272 err = rtc_valid_tm(&alarm->time); 278 err = rtc_valid_tm(&alarm->time);
273 279
280done:
274 if (err) { 281 if (err) {
275 dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n", 282 dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n",
276 alarm->time.tm_year + 1900, alarm->time.tm_mon + 1, 283 alarm->time.tm_year + 1900, alarm->time.tm_mon + 1,
@@ -342,6 +349,11 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
342{ 349{
343 int err; 350 int err;
344 351
352 if (!rtc->ops)
353 return -ENODEV;
354 else if (!rtc->ops->set_alarm)
355 return -EINVAL;
356
345 err = rtc_valid_tm(&alarm->time); 357 err = rtc_valid_tm(&alarm->time);
346 if (err != 0) 358 if (err != 0)
347 return err; 359 return err;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 8f7034ba7d9e..86015b393dd5 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -41,6 +41,9 @@
41#include <linux/pm.h> 41#include <linux/pm.h>
42#include <linux/of.h> 42#include <linux/of.h>
43#include <linux/of_platform.h> 43#include <linux/of_platform.h>
44#ifdef CONFIG_X86
45#include <asm/i8259.h>
46#endif
44 47
45/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ 48/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
46#include <asm-generic/rtc.h> 49#include <asm-generic/rtc.h>
@@ -1058,17 +1061,23 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
1058{ 1061{
1059 cmos_wake_setup(&pnp->dev); 1062 cmos_wake_setup(&pnp->dev);
1060 1063
1061 if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) 1064 if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) {
1065 unsigned int irq = 0;
1066#ifdef CONFIG_X86
1062 /* Some machines contain a PNP entry for the RTC, but 1067 /* Some machines contain a PNP entry for the RTC, but
1063 * don't define the IRQ. It should always be safe to 1068 * don't define the IRQ. It should always be safe to
1064 * hardcode it in these cases 1069 * hardcode it on systems with a legacy PIC.
1065 */ 1070 */
1071 if (nr_legacy_irqs())
1072 irq = 8;
1073#endif
1066 return cmos_do_probe(&pnp->dev, 1074 return cmos_do_probe(&pnp->dev,
1067 pnp_get_resource(pnp, IORESOURCE_IO, 0), 8); 1075 pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
1068 else 1076 } else {
1069 return cmos_do_probe(&pnp->dev, 1077 return cmos_do_probe(&pnp->dev,
1070 pnp_get_resource(pnp, IORESOURCE_IO, 0), 1078 pnp_get_resource(pnp, IORESOURCE_IO, 0),
1071 pnp_irq(pnp, 0)); 1079 pnp_irq(pnp, 0));
1080 }
1072} 1081}
1073 1082
1074static void __exit cmos_pnp_remove(struct pnp_dev *pnp) 1083static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 3b3049c8c9e0..c0eb113588ff 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -527,6 +527,10 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
527 if (get_user(new_margin, (int __user *)arg)) 527 if (get_user(new_margin, (int __user *)arg))
528 return -EFAULT; 528 return -EFAULT;
529 529
530 /* the hardware's tick rate is 4096 Hz, so
531 * the counter value needs to be scaled accordingly
532 */
533 new_margin <<= 12;
530 if (new_margin < 1 || new_margin > 16777216) 534 if (new_margin < 1 || new_margin > 16777216)
531 return -EINVAL; 535 return -EINVAL;
532 536
@@ -535,7 +539,8 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
535 ds1374_wdt_ping(); 539 ds1374_wdt_ping();
536 /* fallthrough */ 540 /* fallthrough */
537 case WDIOC_GETTIMEOUT: 541 case WDIOC_GETTIMEOUT:
538 return put_user(wdt_margin, (int __user *)arg); 542 /* when returning ... inverse is true */
543 return put_user((wdt_margin >> 12), (int __user *)arg);
539 case WDIOC_SETOPTIONS: 544 case WDIOC_SETOPTIONS:
540 if (copy_from_user(&options, (int __user *)arg, sizeof(int))) 545 if (copy_from_user(&options, (int __user *)arg, sizeof(int)))
541 return -EFAULT; 546 return -EFAULT;
@@ -543,14 +548,15 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
543 if (options & WDIOS_DISABLECARD) { 548 if (options & WDIOS_DISABLECARD) {
544 pr_info("disable watchdog\n"); 549 pr_info("disable watchdog\n");
545 ds1374_wdt_disable(); 550 ds1374_wdt_disable();
551 return 0;
546 } 552 }
547 553
548 if (options & WDIOS_ENABLECARD) { 554 if (options & WDIOS_ENABLECARD) {
549 pr_info("enable watchdog\n"); 555 pr_info("enable watchdog\n");
550 ds1374_wdt_settimeout(wdt_margin); 556 ds1374_wdt_settimeout(wdt_margin);
551 ds1374_wdt_ping(); 557 ds1374_wdt_ping();
558 return 0;
552 } 559 }
553
554 return -EINVAL; 560 return -EINVAL;
555 } 561 }
556 return -ENOTTY; 562 return -ENOTTY;
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index df39ce02a99d..c6b0c7ed7a30 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -58,6 +58,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
58static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) 58static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
59{ 59{
60 long rc = OPAL_BUSY; 60 long rc = OPAL_BUSY;
61 int retries = 10;
61 u32 y_m_d; 62 u32 y_m_d;
62 u64 h_m_s_ms; 63 u64 h_m_s_ms;
63 __be32 __y_m_d; 64 __be32 __y_m_d;
@@ -67,8 +68,11 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
67 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); 68 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
68 if (rc == OPAL_BUSY_EVENT) 69 if (rc == OPAL_BUSY_EVENT)
69 opal_poll_events(NULL); 70 opal_poll_events(NULL);
70 else 71 else if (retries-- && (rc == OPAL_HARDWARE
72 || rc == OPAL_INTERNAL_ERROR))
71 msleep(10); 73 msleep(10);
74 else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
75 break;
72 } 76 }
73 77
74 if (rc != OPAL_SUCCESS) 78 if (rc != OPAL_SUCCESS)
@@ -84,6 +88,7 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
84static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) 88static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
85{ 89{
86 long rc = OPAL_BUSY; 90 long rc = OPAL_BUSY;
91 int retries = 10;
87 u32 y_m_d = 0; 92 u32 y_m_d = 0;
88 u64 h_m_s_ms = 0; 93 u64 h_m_s_ms = 0;
89 94
@@ -92,8 +97,11 @@ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
92 rc = opal_rtc_write(y_m_d, h_m_s_ms); 97 rc = opal_rtc_write(y_m_d, h_m_s_ms);
93 if (rc == OPAL_BUSY_EVENT) 98 if (rc == OPAL_BUSY_EVENT)
94 opal_poll_events(NULL); 99 opal_poll_events(NULL);
95 else 100 else if (retries-- && (rc == OPAL_HARDWARE
101 || rc == OPAL_INTERNAL_ERROR))
96 msleep(10); 102 msleep(10);
103 else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
104 break;
97 } 105 }
98 106
99 return rc == OPAL_SUCCESS ? 0 : -EIO; 107 return rc == OPAL_SUCCESS ? 0 : -EIO;
@@ -142,6 +150,16 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
142 150
143 y_m_d = be32_to_cpu(__y_m_d); 151 y_m_d = be32_to_cpu(__y_m_d);
144 h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32); 152 h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32);
153
154 /* check if no alarm is set */
155 if (y_m_d == 0 && h_m_s_ms == 0) {
156 pr_debug("No alarm is set\n");
157 rc = -ENOENT;
158 goto exit;
159 } else {
160 pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms);
161 }
162
145 opal_to_tm(y_m_d, h_m_s_ms, &alarm->time); 163 opal_to_tm(y_m_d, h_m_s_ms, &alarm->time);
146 164
147exit: 165exit:
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 950c5d0b6dca..a161fbf6f172 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -132,20 +132,23 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
132{ 132{
133 struct snvs_rtc_data *data = dev_get_drvdata(dev); 133 struct snvs_rtc_data *data = dev_get_drvdata(dev);
134 unsigned long time; 134 unsigned long time;
135 int ret;
135 136
136 rtc_tm_to_time(tm, &time); 137 rtc_tm_to_time(tm, &time);
137 138
138 /* Disable RTC first */ 139 /* Disable RTC first */
139 snvs_rtc_enable(data, false); 140 ret = snvs_rtc_enable(data, false);
141 if (ret)
142 return ret;
140 143
141 /* Write 32-bit time to 47-bit timer, leaving 15 LSBs blank */ 144 /* Write 32-bit time to 47-bit timer, leaving 15 LSBs blank */
142 regmap_write(data->regmap, data->offset + SNVS_LPSRTCLR, time << CNTR_TO_SECS_SH); 145 regmap_write(data->regmap, data->offset + SNVS_LPSRTCLR, time << CNTR_TO_SECS_SH);
143 regmap_write(data->regmap, data->offset + SNVS_LPSRTCMR, time >> (32 - CNTR_TO_SECS_SH)); 146 regmap_write(data->regmap, data->offset + SNVS_LPSRTCMR, time >> (32 - CNTR_TO_SECS_SH));
144 147
145 /* Enable RTC again */ 148 /* Enable RTC again */
146 snvs_rtc_enable(data, true); 149 ret = snvs_rtc_enable(data, true);
147 150
148 return 0; 151 return ret;
149} 152}
150 153
151static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) 154static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -257,7 +260,7 @@ static int snvs_rtc_probe(struct platform_device *pdev)
257 of_property_read_u32(pdev->dev.of_node, "offset", &data->offset); 260 of_property_read_u32(pdev->dev.of_node, "offset", &data->offset);
258 } 261 }
259 262
260 if (!data->regmap) { 263 if (IS_ERR(data->regmap)) {
261 dev_err(&pdev->dev, "Can't find snvs syscon\n"); 264 dev_err(&pdev->dev, "Can't find snvs syscon\n");
262 return -ENODEV; 265 return -ENODEV;
263 } 266 }
@@ -287,7 +290,11 @@ static int snvs_rtc_probe(struct platform_device *pdev)
287 regmap_write(data->regmap, data->offset + SNVS_LPSR, 0xffffffff); 290 regmap_write(data->regmap, data->offset + SNVS_LPSR, 0xffffffff);
288 291
289 /* Enable RTC */ 292 /* Enable RTC */
290 snvs_rtc_enable(data, true); 293 ret = snvs_rtc_enable(data, true);
294 if (ret) {
295 dev_err(&pdev->dev, "failed to enable rtc %d\n", ret);
296 goto error_rtc_device_register;
297 }
291 298
292 device_init_wakeup(&pdev->dev, true); 299 device_init_wakeup(&pdev->dev, true);
293 300
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 560d9a5e0225..a9528083061d 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -86,7 +86,8 @@ static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm)
86 for (i = 2; i < 6; i++) 86 for (i = 2; i < 6; i++)
87 buf[i] = __raw_readl(&rtcreg->dat); 87 buf[i] = __raw_readl(&rtcreg->dat);
88 spin_unlock_irq(&pdata->lock); 88 spin_unlock_irq(&pdata->lock);
89 sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2]; 89 sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
90 (buf[3] << 8) | buf[2];
90 rtc_time_to_tm(sec, tm); 91 rtc_time_to_tm(sec, tm);
91 return rtc_valid_tm(tm); 92 return rtc_valid_tm(tm);
92} 93}
@@ -147,7 +148,8 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
147 alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0; 148 alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0;
148 alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0; 149 alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0;
149 spin_unlock_irq(&pdata->lock); 150 spin_unlock_irq(&pdata->lock);
150 sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2]; 151 sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
152 (buf[3] << 8) | buf[2];
151 rtc_time_to_tm(sec, &alrm->time); 153 rtc_time_to_tm(sec, &alrm->time);
152 return rtc_valid_tm(&alrm->time); 154 return rtc_valid_tm(&alrm->time);
153} 155}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e7a6f1222642..b76a85d14ef0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1881,8 +1881,12 @@ static int __dasd_device_is_unusable(struct dasd_device *device,
1881{ 1881{
1882 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); 1882 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
1883 1883
1884 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 1884 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
1885 /* dasd is being set offline. */ 1885 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1886 /*
1887 * dasd is being set offline
1888 * but it is no safe offline where we have to allow I/O
1889 */
1886 return 1; 1890 return 1;
1887 } 1891 }
1888 if (device->stopped) { 1892 if (device->stopped) {
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index d26134713682..d05c553eb552 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2743,6 +2743,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2743 erp = dasd_3990_erp_handle_match_erp(cqr, erp); 2743 erp = dasd_3990_erp_handle_match_erp(cqr, erp);
2744 } 2744 }
2745 2745
2746
2747 /*
2748 * For path verification work we need to stick with the path that was
2749 * originally chosen so that the per path configuration data is
2750 * assigned correctly.
2751 */
2752 if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
2753 erp->lpm = cqr->lpm;
2754 }
2755
2746 if (device->features & DASD_FEATURE_ERPLOG) { 2756 if (device->features & DASD_FEATURE_ERPLOG) {
2747 /* print current erp_chain */ 2757 /* print current erp_chain */
2748 dev_err(&device->cdev->dev, 2758 dev_err(&device->cdev->dev,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 9083247f55a8..21d174e9ebdb 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -518,10 +518,12 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
518 pfxdata->validity.define_extent = 1; 518 pfxdata->validity.define_extent = 1;
519 519
520 /* private uid is kept up to date, conf_data may be outdated */ 520 /* private uid is kept up to date, conf_data may be outdated */
521 if (startpriv->uid.type != UA_BASE_DEVICE) { 521 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
522 pfxdata->validity.verify_base = 1; 522 pfxdata->validity.verify_base = 1;
523 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) 523
524 pfxdata->validity.hyper_pav = 1; 524 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
525 pfxdata->validity.verify_base = 1;
526 pfxdata->validity.hyper_pav = 1;
525 } 527 }
526 528
527 /* define extend data (mostly)*/ 529 /* define extend data (mostly)*/
@@ -3002,10 +3004,12 @@ static int prepare_itcw(struct itcw *itcw,
3002 pfxdata.validity.define_extent = 1; 3004 pfxdata.validity.define_extent = 1;
3003 3005
3004 /* private uid is kept up to date, conf_data may be outdated */ 3006 /* private uid is kept up to date, conf_data may be outdated */
3005 if (startpriv->uid.type != UA_BASE_DEVICE) { 3007 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
3008 pfxdata.validity.verify_base = 1;
3009
3010 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
3006 pfxdata.validity.verify_base = 1; 3011 pfxdata.validity.verify_base = 1;
3007 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) 3012 pfxdata.validity.hyper_pav = 1;
3008 pfxdata.validity.hyper_pav = 1;
3009 } 3013 }
3010 3014
3011 switch (cmd) { 3015 switch (cmd) {
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 6fa9364d1c07..835f1054976b 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,6 +2,8 @@
2# S/390 character devices 2# S/390 character devices
3# 3#
4 4
5CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
6
5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 7obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ 8 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
7 sclp_early.o 9 sclp_early.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1e16331891a9..f9d6a9f00640 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -451,6 +451,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
451 451
452static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 452static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
453{ 453{
454 struct channel_path *chp;
454 struct chp_link link; 455 struct chp_link link;
455 struct chp_id chpid; 456 struct chp_id chpid;
456 int status; 457 int status;
@@ -463,10 +464,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
463 chpid.id = sei_area->rsid; 464 chpid.id = sei_area->rsid;
464 /* allocate a new channel path structure, if needed */ 465 /* allocate a new channel path structure, if needed */
465 status = chp_get_status(chpid); 466 status = chp_get_status(chpid);
466 if (status < 0) 467 if (!status)
467 chp_new(chpid);
468 else if (!status)
469 return; 468 return;
469
470 if (status < 0) {
471 chp_new(chpid);
472 } else {
473 chp = chpid_to_chp(chpid);
474 mutex_lock(&chp->lock);
475 chp_update_desc(chp);
476 mutex_unlock(&chp->lock);
477 }
470 memset(&link, 0, sizeof(struct chp_link)); 478 memset(&link, 0, sizeof(struct chp_link));
471 link.chpid = chpid; 479 link.chpid = chpid;
472 if ((sei_area->vf & 0xc0) != 0) { 480 if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 92e03b42e661..3fc73b5894f0 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -822,6 +822,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
822 822
823 ccw_device_set_timeout(cdev, 0); 823 ccw_device_set_timeout(cdev, 0);
824 cdev->private->iretry = 255; 824 cdev->private->iretry = 255;
825 cdev->private->async_kill_io_rc = -ETIMEDOUT;
825 ret = ccw_device_cancel_halt_clear(cdev); 826 ret = ccw_device_cancel_halt_clear(cdev);
826 if (ret == -EBUSY) { 827 if (ret == -EBUSY) {
827 ccw_device_set_timeout(cdev, 3*HZ); 828 ccw_device_set_timeout(cdev, 3*HZ);
@@ -898,7 +899,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
898 /* OK, i/o is dead now. Call interrupt handler. */ 899 /* OK, i/o is dead now. Call interrupt handler. */
899 if (cdev->handler) 900 if (cdev->handler)
900 cdev->handler(cdev, cdev->private->intparm, 901 cdev->handler(cdev, cdev->private->intparm,
901 ERR_PTR(-EIO)); 902 ERR_PTR(cdev->private->async_kill_io_rc));
902} 903}
903 904
904static void 905static void
@@ -915,14 +916,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
915 ccw_device_online_verify(cdev, 0); 916 ccw_device_online_verify(cdev, 0);
916 if (cdev->handler) 917 if (cdev->handler)
917 cdev->handler(cdev, cdev->private->intparm, 918 cdev->handler(cdev, cdev->private->intparm,
918 ERR_PTR(-EIO)); 919 ERR_PTR(cdev->private->async_kill_io_rc));
919} 920}
920 921
921void ccw_device_kill_io(struct ccw_device *cdev) 922void ccw_device_kill_io(struct ccw_device *cdev)
922{ 923{
923 int ret; 924 int ret;
924 925
926 ccw_device_set_timeout(cdev, 0);
925 cdev->private->iretry = 255; 927 cdev->private->iretry = 255;
928 cdev->private->async_kill_io_rc = -EIO;
926 ret = ccw_device_cancel_halt_clear(cdev); 929 ret = ccw_device_cancel_halt_clear(cdev);
927 if (ret == -EBUSY) { 930 if (ret == -EBUSY) {
928 ccw_device_set_timeout(cdev, 3*HZ); 931 ccw_device_set_timeout(cdev, 3*HZ);
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index b108f4a5c7dd..b142c7a389b7 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -155,6 +155,7 @@ struct ccw_device_private {
155 unsigned long intparm; /* user interruption parameter */ 155 unsigned long intparm; /* user interruption parameter */
156 struct qdio_irq *qdio_data; 156 struct qdio_irq *qdio_data;
157 struct irb irb; /* device status */ 157 struct irb irb; /* device status */
158 int async_kill_io_rc;
158 struct senseid senseid; /* SenseID info */ 159 struct senseid senseid; /* SenseID info */
159 struct pgid pgid[8]; /* path group IDs per chpid*/ 160 struct pgid pgid[8]; /* path group IDs per chpid*/
160 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ 161 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4bb5262f7aee..742ca57ece8c 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -126,7 +126,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
126static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 126static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
127 int start, int count, int auto_ack) 127 int start, int count, int auto_ack)
128{ 128{
129 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; 129 int rc, tmp_count = count, tmp_start = start, nr = q->nr;
130 unsigned int ccq = 0; 130 unsigned int ccq = 0;
131 131
132 qperf_inc(q, eqbs); 132 qperf_inc(q, eqbs);
@@ -149,14 +149,7 @@ again:
149 qperf_inc(q, eqbs_partial); 149 qperf_inc(q, eqbs_partial);
150 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", 150 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
151 tmp_count); 151 tmp_count);
152 /* 152 return count - tmp_count;
153 * Retry once, if that fails bail out and process the
154 * extracted buffers before trying again.
155 */
156 if (!retried++)
157 goto again;
158 else
159 return count - tmp_count;
160 } 153 }
161 154
162 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); 155 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
@@ -212,7 +205,10 @@ again:
212 return 0; 205 return 0;
213} 206}
214 207
215/* returns number of examined buffers and their common state in *state */ 208/*
209 * Returns number of examined buffers and their common state in *state.
210 * Requested number of buffers-to-examine must be > 0.
211 */
216static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, 212static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
217 unsigned char *state, unsigned int count, 213 unsigned char *state, unsigned int count,
218 int auto_ack, int merge_pending) 214 int auto_ack, int merge_pending)
@@ -223,17 +219,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
223 if (is_qebsm(q)) 219 if (is_qebsm(q))
224 return qdio_do_eqbs(q, state, bufnr, count, auto_ack); 220 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
225 221
226 for (i = 0; i < count; i++) { 222 /* get initial state: */
227 if (!__state) { 223 __state = q->slsb.val[bufnr];
228 __state = q->slsb.val[bufnr]; 224 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
229 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) 225 __state = SLSB_P_OUTPUT_EMPTY;
230 __state = SLSB_P_OUTPUT_EMPTY; 226
231 } else if (merge_pending) { 227 for (i = 1; i < count; i++) {
232 if ((q->slsb.val[bufnr] & __state) != __state)
233 break;
234 } else if (q->slsb.val[bufnr] != __state)
235 break;
236 bufnr = next_buf(bufnr); 228 bufnr = next_buf(bufnr);
229
230 /* merge PENDING into EMPTY: */
231 if (merge_pending &&
232 q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
233 __state == SLSB_P_OUTPUT_EMPTY)
234 continue;
235
236 /* stop if next state differs from initial state: */
237 if (q->slsb.val[bufnr] != __state)
238 break;
237 } 239 }
238 *state = __state; 240 *state = __state;
239 return i; 241 return i;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 48b3866a9ded..35286907c636 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -140,7 +140,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
140 int i; 140 int i;
141 141
142 for (i = 0; i < nr_queues; i++) { 142 for (i = 0; i < nr_queues; i++) {
143 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 143 q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
144 if (!q) 144 if (!q)
145 return -ENOMEM; 145 return -ENOMEM;
146 146
@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
456{ 456{
457 struct ciw *ciw; 457 struct ciw *ciw;
458 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 458 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
459 int rc;
460 459
461 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); 460 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
462 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); 461 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
493 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); 492 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
494 if (!ciw) { 493 if (!ciw) {
495 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); 494 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
496 rc = -EINVAL; 495 return -EINVAL;
497 goto out_err;
498 } 496 }
499 irq_ptr->equeue = *ciw; 497 irq_ptr->equeue = *ciw;
500 498
501 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); 499 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
502 if (!ciw) { 500 if (!ciw) {
503 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); 501 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
504 rc = -EINVAL; 502 return -EINVAL;
505 goto out_err;
506 } 503 }
507 irq_ptr->aqueue = *ciw; 504 irq_ptr->aqueue = *ciw;
508 505
@@ -510,9 +507,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
510 irq_ptr->orig_handler = init_data->cdev->handler; 507 irq_ptr->orig_handler = init_data->cdev->handler;
511 init_data->cdev->handler = qdio_int_handler; 508 init_data->cdev->handler = qdio_int_handler;
512 return 0; 509 return 0;
513out_err:
514 qdio_release_memory(irq_ptr);
515 return rc;
516} 510}
517 511
518void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, 512void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 5006cb6ce62d..50030cdf91fb 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -591,6 +591,11 @@ struct qeth_cmd_buffer {
591 void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); 591 void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
592}; 592};
593 593
594static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
595{
596 return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
597}
598
594/** 599/**
595 * definition of a qeth channel, used for read and write 600 * definition of a qeth channel, used for read and write
596 */ 601 */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e5b9506698b1..95c631125a20 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -517,8 +517,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
517 queue == card->qdio.no_in_queues - 1; 517 queue == card->qdio.no_in_queues - 1;
518} 518}
519 519
520 520static int __qeth_issue_next_read(struct qeth_card *card)
521static int qeth_issue_next_read(struct qeth_card *card)
522{ 521{
523 int rc; 522 int rc;
524 struct qeth_cmd_buffer *iob; 523 struct qeth_cmd_buffer *iob;
@@ -549,6 +548,17 @@ static int qeth_issue_next_read(struct qeth_card *card)
549 return rc; 548 return rc;
550} 549}
551 550
551static int qeth_issue_next_read(struct qeth_card *card)
552{
553 int ret;
554
555 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
556 ret = __qeth_issue_next_read(card);
557 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
558
559 return ret;
560}
561
552static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) 562static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
553{ 563{
554 struct qeth_reply *reply; 564 struct qeth_reply *reply;
@@ -952,7 +962,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
952 spin_lock_irqsave(&card->thread_mask_lock, flags); 962 spin_lock_irqsave(&card->thread_mask_lock, flags);
953 card->thread_running_mask &= ~thread; 963 card->thread_running_mask &= ~thread;
954 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 964 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
955 wake_up(&card->wait_q); 965 wake_up_all(&card->wait_q);
956} 966}
957EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); 967EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
958 968
@@ -1156,6 +1166,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1156 } 1166 }
1157 rc = qeth_get_problem(cdev, irb); 1167 rc = qeth_get_problem(cdev, irb);
1158 if (rc) { 1168 if (rc) {
1169 card->read_or_write_problem = 1;
1159 qeth_clear_ipacmd_list(card); 1170 qeth_clear_ipacmd_list(card);
1160 qeth_schedule_recovery(card); 1171 qeth_schedule_recovery(card);
1161 goto out; 1172 goto out;
@@ -1174,7 +1185,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1174 return; 1185 return;
1175 if (channel == &card->read && 1186 if (channel == &card->read &&
1176 channel->state == CH_STATE_UP) 1187 channel->state == CH_STATE_UP)
1177 qeth_issue_next_read(card); 1188 __qeth_issue_next_read(card);
1178 1189
1179 iob = channel->iob; 1190 iob = channel->iob;
1180 index = channel->buf_no; 1191 index = channel->buf_no;
@@ -2054,7 +2065,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2054 unsigned long flags; 2065 unsigned long flags;
2055 struct qeth_reply *reply = NULL; 2066 struct qeth_reply *reply = NULL;
2056 unsigned long timeout, event_timeout; 2067 unsigned long timeout, event_timeout;
2057 struct qeth_ipa_cmd *cmd; 2068 struct qeth_ipa_cmd *cmd = NULL;
2058 2069
2059 QETH_CARD_TEXT(card, 2, "sendctl"); 2070 QETH_CARD_TEXT(card, 2, "sendctl");
2060 2071
@@ -2068,23 +2079,27 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2068 } 2079 }
2069 reply->callback = reply_cb; 2080 reply->callback = reply_cb;
2070 reply->param = reply_param; 2081 reply->param = reply_param;
2071 if (card->state == CARD_STATE_DOWN) 2082
2072 reply->seqno = QETH_IDX_COMMAND_SEQNO;
2073 else
2074 reply->seqno = card->seqno.ipa++;
2075 init_waitqueue_head(&reply->wait_q); 2083 init_waitqueue_head(&reply->wait_q);
2076 spin_lock_irqsave(&card->lock, flags);
2077 list_add_tail(&reply->list, &card->cmd_waiter_list);
2078 spin_unlock_irqrestore(&card->lock, flags);
2079 QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); 2084 QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
2080 2085
2081 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; 2086 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
2082 qeth_prepare_control_data(card, len, iob);
2083 2087
2084 if (IS_IPA(iob->data)) 2088 if (IS_IPA(iob->data)) {
2089 cmd = __ipa_cmd(iob);
2090 cmd->hdr.seqno = card->seqno.ipa++;
2091 reply->seqno = cmd->hdr.seqno;
2085 event_timeout = QETH_IPA_TIMEOUT; 2092 event_timeout = QETH_IPA_TIMEOUT;
2086 else 2093 } else {
2094 reply->seqno = QETH_IDX_COMMAND_SEQNO;
2087 event_timeout = QETH_TIMEOUT; 2095 event_timeout = QETH_TIMEOUT;
2096 }
2097 qeth_prepare_control_data(card, len, iob);
2098
2099 spin_lock_irqsave(&card->lock, flags);
2100 list_add_tail(&reply->list, &card->cmd_waiter_list);
2101 spin_unlock_irqrestore(&card->lock, flags);
2102
2088 timeout = jiffies + event_timeout; 2103 timeout = jiffies + event_timeout;
2089 2104
2090 QETH_CARD_TEXT(card, 6, "noirqpnd"); 2105 QETH_CARD_TEXT(card, 6, "noirqpnd");
@@ -2109,9 +2124,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2109 2124
2110 /* we have only one long running ipassist, since we can ensure 2125 /* we have only one long running ipassist, since we can ensure
2111 process context of this command we can sleep */ 2126 process context of this command we can sleep */
2112 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 2127 if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
2113 if ((cmd->hdr.command == IPA_CMD_SETIP) && 2128 cmd->hdr.prot_version == QETH_PROT_IPV4) {
2114 (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
2115 if (!wait_event_timeout(reply->wait_q, 2129 if (!wait_event_timeout(reply->wait_q,
2116 atomic_read(&reply->received), event_timeout)) 2130 atomic_read(&reply->received), event_timeout))
2117 goto time_err; 2131 goto time_err;
@@ -2877,7 +2891,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
2877 memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); 2891 memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
2878 cmd->hdr.command = command; 2892 cmd->hdr.command = command;
2879 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; 2893 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2880 cmd->hdr.seqno = card->seqno.ipa; 2894 /* cmd->hdr.seqno is set by qeth_send_control_data() */
2881 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); 2895 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2882 cmd->hdr.rel_adapter_no = (__u8) card->info.portno; 2896 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
2883 if (card->options.layer2) 2897 if (card->options.layer2)
@@ -4966,8 +4980,6 @@ static void qeth_core_free_card(struct qeth_card *card)
4966 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 4980 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
4967 qeth_clean_channel(&card->read); 4981 qeth_clean_channel(&card->read);
4968 qeth_clean_channel(&card->write); 4982 qeth_clean_channel(&card->write);
4969 if (card->dev)
4970 free_netdev(card->dev);
4971 kfree(card->ip_tbd_list); 4983 kfree(card->ip_tbd_list);
4972 qeth_free_qdio_buffers(card); 4984 qeth_free_qdio_buffers(card);
4973 unregister_service_level(&card->qeth_service_level); 4985 unregister_service_level(&card->qeth_service_level);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 58bcb3c9a86a..acdb5ccb0ab9 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1062,8 +1062,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
1062 qeth_l2_set_offline(cgdev); 1062 qeth_l2_set_offline(cgdev);
1063 1063
1064 if (card->dev) { 1064 if (card->dev) {
1065 netif_napi_del(&card->napi);
1066 unregister_netdev(card->dev); 1065 unregister_netdev(card->dev);
1066 free_netdev(card->dev);
1067 card->dev = NULL; 1067 card->dev = NULL;
1068 } 1068 }
1069 return; 1069 return;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0d6888cbd96e..bbdb3b6c54bb 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3243,8 +3243,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3243 qeth_l3_set_offline(cgdev); 3243 qeth_l3_set_offline(cgdev);
3244 3244
3245 if (card->dev) { 3245 if (card->dev) {
3246 netif_napi_del(&card->napi);
3247 unregister_netdev(card->dev); 3246 unregister_netdev(card->dev);
3247 free_netdev(card->dev);
3248 card->dev = NULL; 3248 card->dev = NULL;
3249 } 3249 }
3250 3250
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 34367d172961..b6caad0fee24 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Debug traces for zfcp. 4 * Debug traces for zfcp.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2017 6 * Copyright IBM Corp. 2002, 2018
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -287,6 +287,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
287 spin_unlock_irqrestore(&dbf->rec_lock, flags); 287 spin_unlock_irqrestore(&dbf->rec_lock, flags);
288} 288}
289 289
290/**
291 * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
292 * @tag: identifier for event
293 * @adapter: adapter on which the erp_action should run
294 * @port: remote port involved in the erp_action
295 * @sdev: scsi device involved in the erp_action
296 * @want: wanted erp_action
297 * @need: required erp_action
298 *
299 * The adapter->erp_lock must not be held.
300 */
301void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
302 struct zfcp_port *port, struct scsi_device *sdev,
303 u8 want, u8 need)
304{
305 unsigned long flags;
306
307 read_lock_irqsave(&adapter->erp_lock, flags);
308 zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
309 read_unlock_irqrestore(&adapter->erp_lock, flags);
310}
290 311
291/** 312/**
292 * zfcp_dbf_rec_run_lvl - trace event related to running recovery 313 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
@@ -604,6 +625,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
604 spin_unlock_irqrestore(&dbf->scsi_lock, flags); 625 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
605} 626}
606 627
628/**
629 * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
630 * @tag: Identifier for event.
631 * @adapter: Pointer to zfcp adapter as context for this event.
632 * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
633 * @ret: Return value of calling function.
634 *
635 * This SCSI trace variant does not depend on any of:
636 * scsi_cmnd, zfcp_fsf_req, scsi_device.
637 */
638void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
639 unsigned int scsi_id, int ret)
640{
641 struct zfcp_dbf *dbf = adapter->dbf;
642 struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
643 unsigned long flags;
644 static int const level = 1;
645
646 if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
647 return;
648
649 spin_lock_irqsave(&dbf->scsi_lock, flags);
650 memset(rec, 0, sizeof(*rec));
651
652 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
653 rec->id = ZFCP_DBF_SCSI_CMND;
654 rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
655 rec->scsi_retries = ~0;
656 rec->scsi_allowed = ~0;
657 rec->fcp_rsp_info = ~0;
658 rec->scsi_id = scsi_id;
659 rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
660 rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
661 rec->host_scribble = ~0;
662 memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
663
664 debug_event(dbf->scsi, level, rec, sizeof(*rec));
665 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
666}
667
607static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size) 668static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
608{ 669{
609 struct debug_info *d; 670 struct debug_info *d;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 3b23d6754598..2abcd331b05d 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -34,11 +34,28 @@ enum zfcp_erp_steps {
34 ZFCP_ERP_STEP_LUN_OPENING = 0x2000, 34 ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
35}; 35};
36 36
37/**
38 * enum zfcp_erp_act_type - Type of ERP action object.
39 * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
40 * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
41 * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
42 * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
43 * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
44 * either of the first four enum values.
45 * Used to indicate that an ERP action could not be
46 * set up despite a detected need for some recovery.
47 * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
48 * either of the first four enum values.
49 * Used to indicate that ERP not needed because
50 * the object has ZFCP_STATUS_COMMON_ERP_FAILED.
51 */
37enum zfcp_erp_act_type { 52enum zfcp_erp_act_type {
38 ZFCP_ERP_ACTION_REOPEN_LUN = 1, 53 ZFCP_ERP_ACTION_REOPEN_LUN = 1,
39 ZFCP_ERP_ACTION_REOPEN_PORT = 2, 54 ZFCP_ERP_ACTION_REOPEN_PORT = 2,
40 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, 55 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
41 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, 56 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
57 ZFCP_ERP_ACTION_NONE = 0xc0,
58 ZFCP_ERP_ACTION_FAILED = 0xe0,
42}; 59};
43 60
44enum zfcp_erp_act_state { 61enum zfcp_erp_act_state {
@@ -125,6 +142,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
125 } 142 }
126} 143}
127 144
145static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
146 struct zfcp_port *port,
147 struct scsi_device *sdev)
148{
149 int need = want;
150 struct zfcp_scsi_dev *zsdev;
151
152 switch (want) {
153 case ZFCP_ERP_ACTION_REOPEN_LUN:
154 zsdev = sdev_to_zfcp(sdev);
155 if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
156 need = 0;
157 break;
158 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
159 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
160 need = 0;
161 break;
162 case ZFCP_ERP_ACTION_REOPEN_PORT:
163 if (atomic_read(&port->status) &
164 ZFCP_STATUS_COMMON_ERP_FAILED) {
165 need = 0;
166 /* ensure propagation of failed status to new devices */
167 zfcp_erp_set_port_status(
168 port, ZFCP_STATUS_COMMON_ERP_FAILED);
169 }
170 break;
171 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
172 if (atomic_read(&adapter->status) &
173 ZFCP_STATUS_COMMON_ERP_FAILED) {
174 need = 0;
175 /* ensure propagation of failed status to new devices */
176 zfcp_erp_set_adapter_status(
177 adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
178 }
179 break;
180 default:
181 need = 0;
182 break;
183 }
184
185 return need;
186}
187
128static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, 188static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
129 struct zfcp_port *port, 189 struct zfcp_port *port,
130 struct scsi_device *sdev) 190 struct scsi_device *sdev)
@@ -248,16 +308,27 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
248 int retval = 1, need; 308 int retval = 1, need;
249 struct zfcp_erp_action *act; 309 struct zfcp_erp_action *act;
250 310
251 if (!adapter->erp_thread) 311 need = zfcp_erp_handle_failed(want, adapter, port, sdev);
252 return -EIO; 312 if (!need) {
313 need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */
314 goto out;
315 }
316
317 if (!adapter->erp_thread) {
318 need = ZFCP_ERP_ACTION_NONE; /* marker for trace */
319 retval = -EIO;
320 goto out;
321 }
253 322
254 need = zfcp_erp_required_act(want, adapter, port, sdev); 323 need = zfcp_erp_required_act(want, adapter, port, sdev);
255 if (!need) 324 if (!need)
256 goto out; 325 goto out;
257 326
258 act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); 327 act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
259 if (!act) 328 if (!act) {
329 need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */
260 goto out; 330 goto out;
331 }
261 atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); 332 atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
262 ++adapter->erp_total_count; 333 ++adapter->erp_total_count;
263 list_add_tail(&act->list, &adapter->erp_ready_head); 334 list_add_tail(&act->list, &adapter->erp_ready_head);
@@ -268,18 +339,32 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
268 return retval; 339 return retval;
269} 340}
270 341
342void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
343 u64 port_name, u32 port_id)
344{
345 unsigned long flags;
346 static /* don't waste stack */ struct zfcp_port tmpport;
347
348 write_lock_irqsave(&adapter->erp_lock, flags);
349 /* Stand-in zfcp port with fields just good enough for
350 * zfcp_dbf_rec_trig() and zfcp_dbf_set_common().
351 * Under lock because tmpport is static.
352 */
353 atomic_set(&tmpport.status, -1); /* unknown */
354 tmpport.wwpn = port_name;
355 tmpport.d_id = port_id;
356 zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
357 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
358 ZFCP_ERP_ACTION_NONE);
359 write_unlock_irqrestore(&adapter->erp_lock, flags);
360}
361
271static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, 362static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
272 int clear_mask, char *id) 363 int clear_mask, char *id)
273{ 364{
274 zfcp_erp_adapter_block(adapter, clear_mask); 365 zfcp_erp_adapter_block(adapter, clear_mask);
275 zfcp_scsi_schedule_rports_block(adapter); 366 zfcp_scsi_schedule_rports_block(adapter);
276 367
277 /* ensure propagation of failed status to new devices */
278 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
279 zfcp_erp_set_adapter_status(adapter,
280 ZFCP_STATUS_COMMON_ERP_FAILED);
281 return -EIO;
282 }
283 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, 368 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
284 adapter, NULL, NULL, id, 0); 369 adapter, NULL, NULL, id, 0);
285} 370}
@@ -298,12 +383,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
298 zfcp_scsi_schedule_rports_block(adapter); 383 zfcp_scsi_schedule_rports_block(adapter);
299 384
300 write_lock_irqsave(&adapter->erp_lock, flags); 385 write_lock_irqsave(&adapter->erp_lock, flags);
301 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) 386 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
302 zfcp_erp_set_adapter_status(adapter, 387 NULL, NULL, id, 0);
303 ZFCP_STATUS_COMMON_ERP_FAILED);
304 else
305 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
306 NULL, NULL, id, 0);
307 write_unlock_irqrestore(&adapter->erp_lock, flags); 388 write_unlock_irqrestore(&adapter->erp_lock, flags);
308} 389}
309 390
@@ -344,9 +425,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
344 zfcp_erp_port_block(port, clear); 425 zfcp_erp_port_block(port, clear);
345 zfcp_scsi_schedule_rport_block(port); 426 zfcp_scsi_schedule_rport_block(port);
346 427
347 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
348 return;
349
350 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, 428 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
351 port->adapter, port, NULL, id, 0); 429 port->adapter, port, NULL, id, 0);
352} 430}
@@ -372,12 +450,6 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
372 zfcp_erp_port_block(port, clear); 450 zfcp_erp_port_block(port, clear);
373 zfcp_scsi_schedule_rport_block(port); 451 zfcp_scsi_schedule_rport_block(port);
374 452
375 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
376 /* ensure propagation of failed status to new devices */
377 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
378 return -EIO;
379 }
380
381 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, 453 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
382 port->adapter, port, NULL, id, 0); 454 port->adapter, port, NULL, id, 0);
383} 455}
@@ -417,9 +489,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
417 489
418 zfcp_erp_lun_block(sdev, clear); 490 zfcp_erp_lun_block(sdev, clear);
419 491
420 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
421 return;
422
423 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, 492 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
424 zfcp_sdev->port, sdev, id, act_status); 493 zfcp_sdev->port, sdev, id, act_status);
425} 494}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 21c8c689b02b..b326f05c7f89 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * External function declarations. 4 * External function declarations.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2016 6 * Copyright IBM Corp. 2002, 2018
7 */ 7 */
8 8
9#ifndef ZFCP_EXT_H 9#ifndef ZFCP_EXT_H
@@ -34,6 +34,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
34extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); 34extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
35extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, 35extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
36 struct zfcp_port *, struct scsi_device *, u8, u8); 36 struct zfcp_port *, struct scsi_device *, u8, u8);
37extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
38 struct zfcp_port *port,
39 struct scsi_device *sdev, u8 want, u8 need);
37extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); 40extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
38extern void zfcp_dbf_rec_run_lvl(int level, char *tag, 41extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
39 struct zfcp_erp_action *erp); 42 struct zfcp_erp_action *erp);
@@ -49,10 +52,15 @@ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
49extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); 52extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
50extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *, 53extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
51 struct zfcp_fsf_req *); 54 struct zfcp_fsf_req *);
55extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
56 unsigned int scsi_id, int ret);
52 57
53/* zfcp_erp.c */ 58/* zfcp_erp.c */
54extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); 59extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
55extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32); 60extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
61extern void zfcp_erp_port_forced_no_port_dbf(char *id,
62 struct zfcp_adapter *adapter,
63 u64 port_name, u32 port_id);
56extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *); 64extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
57extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *); 65extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
58extern void zfcp_erp_set_port_status(struct zfcp_port *, u32); 66extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index a9b8104b982e..3afb200b2829 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Interface to Linux SCSI midlayer. 4 * Interface to Linux SCSI midlayer.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2017 6 * Copyright IBM Corp. 2002, 2018
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -180,6 +180,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
180 if (abrt_req) 180 if (abrt_req)
181 break; 181 break;
182 182
183 zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL);
183 zfcp_erp_wait(adapter); 184 zfcp_erp_wait(adapter);
184 ret = fc_block_scsi_eh(scpnt); 185 ret = fc_block_scsi_eh(scpnt);
185 if (ret) { 186 if (ret) {
@@ -276,6 +277,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
276 if (fsf_req) 277 if (fsf_req)
277 break; 278 break;
278 279
280 zfcp_dbf_scsi_devreset("wait", scpnt, tm_flags, NULL);
279 zfcp_erp_wait(adapter); 281 zfcp_erp_wait(adapter);
280 ret = fc_block_scsi_eh(scpnt); 282 ret = fc_block_scsi_eh(scpnt);
281 if (ret) { 283 if (ret) {
@@ -322,15 +324,16 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
322{ 324{
323 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 325 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
324 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 326 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
325 int ret; 327 int ret = SUCCESS, fc_ret;
326 328
327 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); 329 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
328 zfcp_erp_wait(adapter); 330 zfcp_erp_wait(adapter);
329 ret = fc_block_scsi_eh(scpnt); 331 fc_ret = fc_block_scsi_eh(scpnt);
330 if (ret) 332 if (fc_ret)
331 return ret; 333 ret = fc_ret;
332 334
333 return SUCCESS; 335 zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
336 return ret;
334} 337}
335 338
336struct scsi_transport_template *zfcp_scsi_transport_template; 339struct scsi_transport_template *zfcp_scsi_transport_template;
@@ -600,6 +603,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
600 if (port) { 603 if (port) {
601 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1"); 604 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
602 put_device(&port->dev); 605 put_device(&port->dev);
606 } else {
607 zfcp_erp_port_forced_no_port_dbf(
608 "sctrpin", adapter,
609 rport->port_name /* zfcp_scsi_rport_register */,
610 rport->port_id /* zfcp_scsi_rport_register */);
603 } 611 }
604} 612}
605 613
@@ -616,9 +624,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
616 ids.port_id = port->d_id; 624 ids.port_id = port->d_id;
617 ids.roles = FC_RPORT_ROLE_FCP_TARGET; 625 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
618 626
619 zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL, 627 zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
620 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, 628 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
621 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); 629 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
622 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); 630 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
623 if (!rport) { 631 if (!rport) {
624 dev_err(&port->adapter->ccw_device->dev, 632 dev_err(&port->adapter->ccw_device->dev,
@@ -640,9 +648,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
640 struct fc_rport *rport = port->rport; 648 struct fc_rport *rport = port->rport;
641 649
642 if (rport) { 650 if (rport) {
643 zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL, 651 zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
644 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, 652 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
645 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); 653 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
646 fc_remote_port_delete(rport); 654 fc_remote_port_delete(rport);
647 port->rport = NULL; 655 port->rport = NULL;
648 } 656 }
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a56a7b243e91..5466246c69b4 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -889,6 +889,11 @@ static int twa_chrdev_open(struct inode *inode, struct file *file)
889 unsigned int minor_number; 889 unsigned int minor_number;
890 int retval = TW_IOCTL_ERROR_OS_ENODEV; 890 int retval = TW_IOCTL_ERROR_OS_ENODEV;
891 891
892 if (!capable(CAP_SYS_ADMIN)) {
893 retval = -EACCES;
894 goto out;
895 }
896
892 minor_number = iminor(inode); 897 minor_number = iminor(inode);
893 if (minor_number >= twa_device_extension_count) 898 if (minor_number >= twa_device_extension_count)
894 goto out; 899 goto out;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 2940bd769936..14af38036287 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1034,6 +1034,9 @@ static int tw_chrdev_open(struct inode *inode, struct file *file)
1034 1034
1035 dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n"); 1035 dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n");
1036 1036
1037 if (!capable(CAP_SYS_ADMIN))
1038 return -EACCES;
1039
1037 minor_number = iminor(inode); 1040 minor_number = iminor(inode);
1038 if (minor_number >= tw_device_extension_count) 1041 if (minor_number >= tw_device_extension_count)
1039 return -ENODEV; 1042 return -ENODEV;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 8c758c36fc70..cf531ad8b6ee 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1321,9 +1321,10 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1321 host = aac->scsi_host_ptr; 1321 host = aac->scsi_host_ptr;
1322 scsi_block_requests(host); 1322 scsi_block_requests(host);
1323 aac_adapter_disable_int(aac); 1323 aac_adapter_disable_int(aac);
1324 if (aac->thread->pid != current->pid) { 1324 if (aac->thread && aac->thread->pid != current->pid) {
1325 spin_unlock_irq(host->host_lock); 1325 spin_unlock_irq(host->host_lock);
1326 kthread_stop(aac->thread); 1326 kthread_stop(aac->thread);
1327 aac->thread = NULL;
1327 jafo = 1; 1328 jafo = 1;
1328 } 1329 }
1329 1330
@@ -1363,13 +1364,13 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1363 * will ensure that i/o is queisced and the card is flushed in that 1364 * will ensure that i/o is queisced and the card is flushed in that
1364 * case. 1365 * case.
1365 */ 1366 */
1367 aac_free_irq(aac);
1366 aac_fib_map_free(aac); 1368 aac_fib_map_free(aac);
1367 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 1369 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1368 aac->comm_addr = NULL; 1370 aac->comm_addr = NULL;
1369 aac->comm_phys = 0; 1371 aac->comm_phys = 0;
1370 kfree(aac->queues); 1372 kfree(aac->queues);
1371 aac->queues = NULL; 1373 aac->queues = NULL;
1372 aac_free_irq(aac);
1373 kfree(aac->fsa_dev); 1374 kfree(aac->fsa_dev);
1374 aac->fsa_dev = NULL; 1375 aac->fsa_dev = NULL;
1375 quirks = aac_get_driver_ident(index)->quirks; 1376 quirks = aac_get_driver_ident(index)->quirks;
@@ -1392,6 +1393,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1392 aac->name); 1393 aac->name);
1393 if (IS_ERR(aac->thread)) { 1394 if (IS_ERR(aac->thread)) {
1394 retval = PTR_ERR(aac->thread); 1395 retval = PTR_ERR(aac->thread);
1396 aac->thread = NULL;
1395 goto out; 1397 goto out;
1396 } 1398 }
1397 } 1399 }
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index aa6eccb8940b..8da8b46da722 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1085,6 +1085,7 @@ static void __aac_shutdown(struct aac_dev * aac)
1085 up(&fib->event_wait); 1085 up(&fib->event_wait);
1086 } 1086 }
1087 kthread_stop(aac->thread); 1087 kthread_stop(aac->thread);
1088 aac->thread = NULL;
1088 } 1089 }
1089 aac_send_shutdown(aac); 1090 aac_send_shutdown(aac);
1090 aac_adapter_disable_int(aac); 1091 aac_adapter_disable_int(aac);
@@ -1189,8 +1190,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1189 * Map in the registers from the adapter. 1190 * Map in the registers from the adapter.
1190 */ 1191 */
1191 aac->base_size = AAC_MIN_FOOTPRINT_SIZE; 1192 aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
1192 if ((*aac_drivers[index].init)(aac)) 1193 if ((*aac_drivers[index].init)(aac)) {
1194 error = -ENODEV;
1193 goto out_unmap; 1195 goto out_unmap;
1196 }
1194 1197
1195 if (aac->sync_mode) { 1198 if (aac->sync_mode) {
1196 if (aac_sync_mode) 1199 if (aac_sync_mode)
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index febbd83e2ecd..24e57e770432 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -6291,18 +6291,17 @@ static uchar AscGetSynPeriodIndex(ASC_DVC_VAR *asc_dvc, uchar syn_time)
6291static uchar 6291static uchar
6292AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset) 6292AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset)
6293{ 6293{
6294 EXT_MSG sdtr_buf; 6294 PortAddr iop_base = asc_dvc->iop_base;
6295 uchar sdtr_period_index; 6295 uchar sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period);
6296 PortAddr iop_base; 6296 EXT_MSG sdtr_buf = {
6297 6297 .msg_type = EXTENDED_MESSAGE,
6298 iop_base = asc_dvc->iop_base; 6298 .msg_len = MS_SDTR_LEN,
6299 sdtr_buf.msg_type = EXTENDED_MESSAGE; 6299 .msg_req = EXTENDED_SDTR,
6300 sdtr_buf.msg_len = MS_SDTR_LEN; 6300 .xfer_period = sdtr_period,
6301 sdtr_buf.msg_req = EXTENDED_SDTR; 6301 .req_ack_offset = sdtr_offset,
6302 sdtr_buf.xfer_period = sdtr_period; 6302 };
6303 sdtr_offset &= ASC_SYN_MAX_OFFSET; 6303 sdtr_offset &= ASC_SYN_MAX_OFFSET;
6304 sdtr_buf.req_ack_offset = sdtr_offset; 6304
6305 sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period);
6306 if (sdtr_period_index <= asc_dvc->max_sdtr_index) { 6305 if (sdtr_period_index <= asc_dvc->max_sdtr_index) {
6307 AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, 6306 AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG,
6308 (uchar *)&sdtr_buf, 6307 (uchar *)&sdtr_buf,
@@ -11030,6 +11029,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
11030 ASC_DBG(2, "AdvInitGetConfig()\n"); 11029 ASC_DBG(2, "AdvInitGetConfig()\n");
11031 11030
11032 ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0; 11031 ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0;
11032#else
11033 share_irq = 0;
11034 ret = -ENODEV;
11033#endif /* CONFIG_PCI */ 11035#endif /* CONFIG_PCI */
11034 } 11036 }
11035 11037
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index decdc71b6b86..f6d7c4712e66 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2009,7 +2009,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
2009 * have valid data in the sense buffer that could 2009 * have valid data in the sense buffer that could
2010 * confuse the higher levels. 2010 * confuse the higher levels.
2011 */ 2011 */
2012 memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); 2012 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2013//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id); 2013//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
2014//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); } 2014//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
2015 /* 2015 /*
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 499e369eabf0..8bc1625337f6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -191,6 +191,7 @@ struct bnx2fc_hba {
191 struct bnx2fc_cmd_mgr *cmd_mgr; 191 struct bnx2fc_cmd_mgr *cmd_mgr;
192 spinlock_t hba_lock; 192 spinlock_t hba_lock;
193 struct mutex hba_mutex; 193 struct mutex hba_mutex;
194 struct mutex hba_stats_mutex;
194 unsigned long adapter_state; 195 unsigned long adapter_state;
195 #define ADAPTER_STATE_UP 0 196 #define ADAPTER_STATE_UP 0
196 #define ADAPTER_STATE_GOING_DOWN 1 197 #define ADAPTER_STATE_GOING_DOWN 1
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 67405c628864..d0b227ffbd5f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -641,15 +641,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
641 if (!fw_stats) 641 if (!fw_stats)
642 return NULL; 642 return NULL;
643 643
644 mutex_lock(&hba->hba_stats_mutex);
645
644 bnx2fc_stats = fc_get_host_stats(shost); 646 bnx2fc_stats = fc_get_host_stats(shost);
645 647
646 init_completion(&hba->stat_req_done); 648 init_completion(&hba->stat_req_done);
647 if (bnx2fc_send_stat_req(hba)) 649 if (bnx2fc_send_stat_req(hba))
648 return bnx2fc_stats; 650 goto unlock_stats_mutex;
649 rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); 651 rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
650 if (!rc) { 652 if (!rc) {
651 BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); 653 BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
652 return bnx2fc_stats; 654 goto unlock_stats_mutex;
653 } 655 }
654 BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); 656 BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
655 bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; 657 bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
@@ -671,6 +673,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
671 673
672 memcpy(&hba->prev_stats, hba->stats_buffer, 674 memcpy(&hba->prev_stats, hba->stats_buffer,
673 sizeof(struct fcoe_statistics_params)); 675 sizeof(struct fcoe_statistics_params));
676
677unlock_stats_mutex:
678 mutex_unlock(&hba->hba_stats_mutex);
674 return bnx2fc_stats; 679 return bnx2fc_stats;
675} 680}
676 681
@@ -1302,6 +1307,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1302 } 1307 }
1303 spin_lock_init(&hba->hba_lock); 1308 spin_lock_init(&hba->hba_lock);
1304 mutex_init(&hba->hba_mutex); 1309 mutex_init(&hba->hba_mutex);
1310 mutex_init(&hba->hba_stats_mutex);
1305 1311
1306 hba->cnic = cnic; 1312 hba->cnic = cnic;
1307 1313
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 0002caf687dd..eb3b5c0f299f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1858,6 +1858,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1858 /* we will not receive ABTS response for this IO */ 1858 /* we will not receive ABTS response for this IO */
1859 BNX2FC_IO_DBG(io_req, "Timer context finished processing " 1859 BNX2FC_IO_DBG(io_req, "Timer context finished processing "
1860 "this scsi cmd\n"); 1860 "this scsi cmd\n");
1861 return;
1861 } 1862 }
1862 1863
1863 /* Cancel the timeout_work, as we received IO completion */ 1864 /* Cancel the timeout_work, as we received IO completion */
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 622bdabc8894..dab195f04da7 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
1769 goto bye; 1769 goto bye;
1770 } 1770 }
1771 1771
1772 mempool_free(mbp, hw->mb_mempool);
1773 if (finicsum != cfcsum) { 1772 if (finicsum != cfcsum) {
1774 csio_warn(hw, 1773 csio_warn(hw,
1775 "Config File checksum mismatch: csum=%#x, computed=%#x\n", 1774 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
@@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
1780 rv = csio_hw_validate_caps(hw, mbp); 1779 rv = csio_hw_validate_caps(hw, mbp);
1781 if (rv != 0) 1780 if (rv != 0)
1782 goto bye; 1781 goto bye;
1782
1783 mempool_free(mbp, hw->mb_mempool);
1784 mbp = NULL;
1785
1783 /* 1786 /*
1784 * Note that we're operating with parameters 1787 * Note that we're operating with parameters
1785 * not supplied by the driver, rather than from hard-wired 1788 * not supplied by the driver, rather than from hard-wired
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index d4cda5e9600e..21c8d210c456 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -180,11 +180,14 @@ static u8 adpt_read_blink_led(adpt_hba* host)
180 *============================================================================ 180 *============================================================================
181 */ 181 */
182 182
183#ifdef MODULE
183static struct pci_device_id dptids[] = { 184static struct pci_device_id dptids[] = {
184 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 185 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 186 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { 0, } 187 { 0, }
187}; 188};
189#endif
190
188MODULE_DEVICE_TABLE(pci,dptids); 191MODULE_DEVICE_TABLE(pci,dptids);
189 192
190static int adpt_detect(struct scsi_host_template* sht) 193static int adpt_detect(struct scsi_host_template* sht)
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index eefe14d453db..b87ab38a4530 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1768,7 +1768,7 @@ struct scsi_host_template fdomain_driver_template = {
1768}; 1768};
1769 1769
1770#ifndef PCMCIA 1770#ifndef PCMCIA
1771#ifdef CONFIG_PCI 1771#if defined(CONFIG_PCI) && defined(MODULE)
1772 1772
1773static struct pci_device_id fdomain_pci_tbl[] = { 1773static struct pci_device_id fdomain_pci_tbl[] = {
1774 { PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70, 1774 { PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70,
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index f8d2478b11cc..87e081f8a386 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -538,7 +538,10 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst,
538 printk(KERN_ERR "53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks); 538 printk(KERN_ERR "53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
539 return -1; 539 return -1;
540 } 540 }
541 while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY); 541 while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
542 {
543 // FIXME - no timeout
544 }
542 545
543#ifndef SCSI_G_NCR5380_MEM 546#ifndef SCSI_G_NCR5380_MEM
544 { 547 {
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 8fae03215a85..543c10266984 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -366,7 +366,7 @@ enum ibmvfc_fcp_rsp_info_codes {
366}; 366};
367 367
368struct ibmvfc_fcp_rsp_info { 368struct ibmvfc_fcp_rsp_info {
369 __be16 reserved; 369 u8 reserved[3];
370 u8 rsp_code; 370 u8 rsp_code;
371 u8 reserved2[4]; 371 u8 reserved2[4];
372}__attribute__((packed, aligned (2))); 372}__attribute__((packed, aligned (2)));
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 6a926bae76b2..7a91cf3ff173 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -110,11 +110,6 @@
110#define i91u_MAXQUEUE 2 110#define i91u_MAXQUEUE 2
111#define i91u_REVID "Initio INI-9X00U/UW SCSI device driver; Revision: 1.04a" 111#define i91u_REVID "Initio INI-9X00U/UW SCSI device driver; Revision: 1.04a"
112 112
113#define I950_DEVICE_ID 0x9500 /* Initio's inic-950 product ID */
114#define I940_DEVICE_ID 0x9400 /* Initio's inic-940 product ID */
115#define I935_DEVICE_ID 0x9401 /* Initio's inic-935 product ID */
116#define I920_DEVICE_ID 0x0002 /* Initio's other product ID */
117
118#ifdef DEBUG_i91u 113#ifdef DEBUG_i91u
119static unsigned int i91u_debug = DEBUG_DEFAULT; 114static unsigned int i91u_debug = DEBUG_DEFAULT;
120#endif 115#endif
@@ -127,17 +122,6 @@ static int setup_debug = 0;
127 122
128static void i91uSCBPost(u8 * pHcb, u8 * pScb); 123static void i91uSCBPost(u8 * pHcb, u8 * pScb);
129 124
130/* PCI Devices supported by this driver */
131static struct pci_device_id i91u_pci_devices[] = {
132 { PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
133 { PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
134 { PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
135 { PCI_VENDOR_ID_INIT, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
136 { PCI_VENDOR_ID_DOMEX, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
137 { }
138};
139MODULE_DEVICE_TABLE(pci, i91u_pci_devices);
140
141#define DEBUG_INTERRUPT 0 125#define DEBUG_INTERRUPT 0
142#define DEBUG_QUEUE 0 126#define DEBUG_QUEUE 0
143#define DEBUG_STATE 0 127#define DEBUG_STATE 0
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 7a58128a0000..2f61d8cd5882 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -835,8 +835,10 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
835 835
836 qc->err_mask |= AC_ERR_OTHER; 836 qc->err_mask |= AC_ERR_OTHER;
837 sata_port->ioasa.status |= ATA_BUSY; 837 sata_port->ioasa.status |= ATA_BUSY;
838 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
839 ata_qc_complete(qc); 838 ata_qc_complete(qc);
839 if (ipr_cmd->eh_comp)
840 complete(ipr_cmd->eh_comp);
841 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
840} 842}
841 843
842/** 844/**
@@ -5864,8 +5866,10 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5864 res->in_erp = 0; 5866 res->in_erp = 0;
5865 } 5867 }
5866 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5868 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5867 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5868 scsi_cmd->scsi_done(scsi_cmd); 5869 scsi_cmd->scsi_done(scsi_cmd);
5870 if (ipr_cmd->eh_comp)
5871 complete(ipr_cmd->eh_comp);
5872 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5869} 5873}
5870 5874
5871/** 5875/**
@@ -6255,8 +6259,10 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6255 } 6259 }
6256 6260
6257 scsi_dma_unmap(ipr_cmd->scsi_cmd); 6261 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6258 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6259 scsi_cmd->scsi_done(scsi_cmd); 6262 scsi_cmd->scsi_done(scsi_cmd);
6263 if (ipr_cmd->eh_comp)
6264 complete(ipr_cmd->eh_comp);
6265 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6260} 6266}
6261 6267
6262/** 6268/**
@@ -6282,8 +6288,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6282 scsi_dma_unmap(scsi_cmd); 6288 scsi_dma_unmap(scsi_cmd);
6283 6289
6284 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); 6290 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6285 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6286 scsi_cmd->scsi_done(scsi_cmd); 6291 scsi_cmd->scsi_done(scsi_cmd);
6292 if (ipr_cmd->eh_comp)
6293 complete(ipr_cmd->eh_comp);
6294 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6287 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); 6295 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6288 } else { 6296 } else {
6289 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 6297 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c1ccf1ee99ea..9f0b00c38658 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1695,6 +1695,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1695 */ 1695 */
1696 switch (session->state) { 1696 switch (session->state) {
1697 case ISCSI_STATE_FAILED: 1697 case ISCSI_STATE_FAILED:
1698 /*
1699 * cmds should fail during shutdown, if the session
1700 * state is bad, allowing completion to happen
1701 */
1702 if (unlikely(system_state != SYSTEM_RUNNING)) {
1703 reason = FAILURE_SESSION_FAILED;
1704 sc->result = DID_NO_CONNECT << 16;
1705 break;
1706 }
1698 case ISCSI_STATE_IN_RECOVERY: 1707 case ISCSI_STATE_IN_RECOVERY:
1699 reason = FAILURE_SESSION_IN_RECOVERY; 1708 reason = FAILURE_SESSION_IN_RECOVERY;
1700 sc->result = DID_IMM_RETRY << 16; 1709 sc->result = DID_IMM_RETRY << 16;
@@ -1727,7 +1736,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1727 1736
1728 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { 1737 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1729 reason = FAILURE_SESSION_IN_RECOVERY; 1738 reason = FAILURE_SESSION_IN_RECOVERY;
1730 sc->result = DID_REQUEUE; 1739 sc->result = DID_REQUEUE << 16;
1731 goto fault; 1740 goto fault;
1732 } 1741 }
1733 1742
@@ -1980,6 +1989,19 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1980 1989
1981 if (session->state != ISCSI_STATE_LOGGED_IN) { 1990 if (session->state != ISCSI_STATE_LOGGED_IN) {
1982 /* 1991 /*
1992 * During shutdown, if session is prematurely disconnected,
1993 * recovery won't happen and there will be hung cmds. Not
1994 * handling cmds would trigger EH, also bad in this case.
1995 * Instead, handle cmd, allow completion to happen and let
1996 * upper layer to deal with the result.
1997 */
1998 if (unlikely(system_state != SYSTEM_RUNNING)) {
1999 sc->result = DID_NO_CONNECT << 16;
2000 ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
2001 rc = BLK_EH_HANDLED;
2002 goto done;
2003 }
2004 /*
1983 * We are probably in the middle of iscsi recovery so let 2005 * We are probably in the middle of iscsi recovery so let
1984 * that complete and handle the error. 2006 * that complete and handle the error.
1985 */ 2007 */
@@ -2083,7 +2105,7 @@ done:
2083 task->last_timeout = jiffies; 2105 task->last_timeout = jiffies;
2084 spin_unlock(&session->frwd_lock); 2106 spin_unlock(&session->frwd_lock);
2085 ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? 2107 ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
2086 "timer reset" : "nh"); 2108 "timer reset" : "shutdown or nh");
2087 return rc; 2109 return rc;
2088} 2110}
2089 2111
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 022bb6e10d98..12886f96b286 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -282,6 +282,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
282 phy->phy->minimum_linkrate = dr->pmin_linkrate; 282 phy->phy->minimum_linkrate = dr->pmin_linkrate;
283 phy->phy->maximum_linkrate = dr->pmax_linkrate; 283 phy->phy->maximum_linkrate = dr->pmax_linkrate;
284 phy->phy->negotiated_linkrate = phy->linkrate; 284 phy->phy->negotiated_linkrate = phy->linkrate;
285 phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
285 286
286 skip: 287 skip:
287 if (new_phy) 288 if (new_phy)
@@ -675,7 +676,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
675 res = smp_execute_task(dev, req, RPEL_REQ_SIZE, 676 res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
676 resp, RPEL_RESP_SIZE); 677 resp, RPEL_RESP_SIZE);
677 678
678 if (!res) 679 if (res)
679 goto out; 680 goto out;
680 681
681 phy->invalid_dword_count = scsi_to_u32(&resp[12]); 682 phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -684,6 +685,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
684 phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); 685 phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
685 686
686 out: 687 out:
688 kfree(req);
687 kfree(resp); 689 kfree(resp);
688 return res; 690 return res;
689 691
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 519dac4e341e..9a8c2f97ed70 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -222,6 +222,7 @@ out_done:
222static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) 222static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
223{ 223{
224 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); 224 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
225 struct domain_device *dev = cmd_to_domain_dev(cmd);
225 struct sas_task *task = TO_SAS_TASK(cmd); 226 struct sas_task *task = TO_SAS_TASK(cmd);
226 227
227 /* At this point, we only get called following an actual abort 228 /* At this point, we only get called following an actual abort
@@ -230,6 +231,14 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
230 */ 231 */
231 sas_end_task(cmd, task); 232 sas_end_task(cmd, task);
232 233
234 if (dev_is_sata(dev)) {
235 /* defer commands to libata so that libata EH can
236 * handle ata qcs correctly
237 */
238 list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
239 return;
240 }
241
233 /* now finish the command and move it on to the error 242 /* now finish the command and move it on to the error
234 * handler done list, this also takes it off the 243 * handler done list, this also takes it off the
235 * error handler pending list. 244 * error handler pending list.
@@ -237,22 +246,6 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
237 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); 246 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
238} 247}
239 248
240static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
241{
242 struct domain_device *dev = cmd_to_domain_dev(cmd);
243 struct sas_ha_struct *ha = dev->port->ha;
244 struct sas_task *task = TO_SAS_TASK(cmd);
245
246 if (!dev_is_sata(dev)) {
247 sas_eh_finish_cmd(cmd);
248 return;
249 }
250
251 /* report the timeout to libata */
252 sas_end_task(cmd, task);
253 list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
254}
255
256static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) 249static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
257{ 250{
258 struct scsi_cmnd *cmd, *n; 251 struct scsi_cmnd *cmd, *n;
@@ -260,7 +253,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd
260 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 253 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
261 if (cmd->device->sdev_target == my_cmd->device->sdev_target && 254 if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
262 cmd->device->lun == my_cmd->device->lun) 255 cmd->device->lun == my_cmd->device->lun)
263 sas_eh_defer_cmd(cmd); 256 sas_eh_finish_cmd(cmd);
264 } 257 }
265} 258}
266 259
@@ -622,12 +615,12 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
622 case TASK_IS_DONE: 615 case TASK_IS_DONE:
623 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 616 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
624 task); 617 task);
625 sas_eh_defer_cmd(cmd); 618 sas_eh_finish_cmd(cmd);
626 continue; 619 continue;
627 case TASK_IS_ABORTED: 620 case TASK_IS_ABORTED:
628 SAS_DPRINTK("%s: task 0x%p is aborted\n", 621 SAS_DPRINTK("%s: task 0x%p is aborted\n",
629 __func__, task); 622 __func__, task);
630 sas_eh_defer_cmd(cmd); 623 sas_eh_finish_cmd(cmd);
631 continue; 624 continue;
632 case TASK_IS_AT_LU: 625 case TASK_IS_AT_LU:
633 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); 626 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@@ -638,7 +631,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
638 "recovered\n", 631 "recovered\n",
639 SAS_ADDR(task->dev), 632 SAS_ADDR(task->dev),
640 cmd->device->lun); 633 cmd->device->lun);
641 sas_eh_defer_cmd(cmd); 634 sas_eh_finish_cmd(cmd);
642 sas_scsi_clear_queue_lu(work_q, cmd); 635 sas_scsi_clear_queue_lu(work_q, cmd);
643 goto Again; 636 goto Again;
644 } 637 }
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4639dac64e7f..f096766150bc 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -634,7 +634,12 @@ lpfc_issue_lip(struct Scsi_Host *shost)
634 LPFC_MBOXQ_t *pmboxq; 634 LPFC_MBOXQ_t *pmboxq;
635 int mbxstatus = MBXERR_ERROR; 635 int mbxstatus = MBXERR_ERROR;
636 636
637 /*
638 * If the link is offline, disabled or BLOCK_MGMT_IO
639 * it doesn't make any sense to allow issue_lip
640 */
637 if ((vport->fc_flag & FC_OFFLINE_MODE) || 641 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
642 (phba->hba_flag & LINK_DISABLED) ||
638 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) 643 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
639 return -EPERM; 644 return -EPERM;
640 645
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index be901f6db6d3..4131addfb872 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -691,8 +691,9 @@ lpfc_work_done(struct lpfc_hba *phba)
691 (phba->hba_flag & HBA_SP_QUEUE_EVT)) { 691 (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
692 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 692 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
693 pring->flag |= LPFC_DEFERRED_RING_EVENT; 693 pring->flag |= LPFC_DEFERRED_RING_EVENT;
694 /* Set the lpfc data pending flag */ 694 /* Preserve legacy behavior. */
695 set_bit(LPFC_DATA_READY, &phba->data_flags); 695 if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
696 set_bit(LPFC_DATA_READY, &phba->data_flags);
696 } else { 697 } else {
697 if (phba->link_state >= LPFC_LINK_UP) { 698 if (phba->link_state >= LPFC_LINK_UP) {
698 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 699 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8379fbbc60db..3406586b9201 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -115,6 +115,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
115 /* set consumption flag every once in a while */ 115 /* set consumption flag every once in a while */
116 if (!((q->host_index + 1) % q->entry_repost)) 116 if (!((q->host_index + 1) % q->entry_repost))
117 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 117 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
118 else
119 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
118 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 120 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
119 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 121 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
120 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 122 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
@@ -13493,6 +13495,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
13493 case LPFC_Q_CREATE_VERSION_1: 13495 case LPFC_Q_CREATE_VERSION_1:
13494 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 13496 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
13495 wq->entry_count); 13497 wq->entry_count);
13498 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13499 LPFC_Q_CREATE_VERSION_1);
13500
13496 switch (wq->entry_size) { 13501 switch (wq->entry_size) {
13497 default: 13502 default:
13498 case 64: 13503 case 64:
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 14c0334f41e4..26c67c42985c 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -55,6 +55,7 @@ struct mac_esp_priv {
55 int error; 55 int error;
56}; 56};
57static struct esp *esp_chips[2]; 57static struct esp *esp_chips[2];
58static DEFINE_SPINLOCK(esp_chips_lock);
58 59
59#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \ 60#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
60 platform_get_drvdata((struct platform_device *) \ 61 platform_get_drvdata((struct platform_device *) \
@@ -562,15 +563,18 @@ static int esp_mac_probe(struct platform_device *dev)
562 } 563 }
563 564
564 host->irq = IRQ_MAC_SCSI; 565 host->irq = IRQ_MAC_SCSI;
565 esp_chips[dev->id] = esp; 566
566 mb(); 567 /* The request_irq() call is intended to succeed for the first device
567 if (esp_chips[!dev->id] == NULL) { 568 * and fail for the second device.
568 err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); 569 */
569 if (err < 0) { 570 err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
570 esp_chips[dev->id] = NULL; 571 spin_lock(&esp_chips_lock);
571 goto fail_free_priv; 572 if (err < 0 && esp_chips[!dev->id] == NULL) {
572 } 573 spin_unlock(&esp_chips_lock);
574 goto fail_free_priv;
573 } 575 }
576 esp_chips[dev->id] = esp;
577 spin_unlock(&esp_chips_lock);
574 578
575 err = scsi_esp_register(esp, &dev->dev); 579 err = scsi_esp_register(esp, &dev->dev);
576 if (err) 580 if (err)
@@ -579,8 +583,13 @@ static int esp_mac_probe(struct platform_device *dev)
579 return 0; 583 return 0;
580 584
581fail_free_irq: 585fail_free_irq:
582 if (esp_chips[!dev->id] == NULL) 586 spin_lock(&esp_chips_lock);
587 esp_chips[dev->id] = NULL;
588 if (esp_chips[!dev->id] == NULL) {
589 spin_unlock(&esp_chips_lock);
583 free_irq(host->irq, esp); 590 free_irq(host->irq, esp);
591 } else
592 spin_unlock(&esp_chips_lock);
584fail_free_priv: 593fail_free_priv:
585 kfree(mep); 594 kfree(mep);
586fail_free_command_block: 595fail_free_command_block:
@@ -599,9 +608,13 @@ static int esp_mac_remove(struct platform_device *dev)
599 608
600 scsi_esp_unregister(esp); 609 scsi_esp_unregister(esp);
601 610
611 spin_lock(&esp_chips_lock);
602 esp_chips[dev->id] = NULL; 612 esp_chips[dev->id] = NULL;
603 if (!(esp_chips[0] || esp_chips[1])) 613 if (esp_chips[!dev->id] == NULL) {
614 spin_unlock(&esp_chips_lock);
604 free_irq(irq, NULL); 615 free_irq(irq, NULL);
616 } else
617 spin_unlock(&esp_chips_lock);
605 618
606 kfree(mep); 619 kfree(mep);
607 620
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 9d05302a3bcd..19bffe0b2cc0 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4197,6 +4197,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4197 int irq, i, j; 4197 int irq, i, j;
4198 int error = -ENODEV; 4198 int error = -ENODEV;
4199 4199
4200 if (hba_count >= MAX_CONTROLLERS)
4201 goto out;
4202
4200 if (pci_enable_device(pdev)) 4203 if (pci_enable_device(pdev))
4201 goto out; 4204 goto out;
4202 pci_set_master(pdev); 4205 pci_set_master(pdev);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 96007633ad39..213944ed64d9 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1886,6 +1886,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
1886 pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value); 1886 pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
1887 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 1887 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1888 } else { 1888 } else {
1889 if (os_timeout_value)
1890 os_timeout_value++;
1891
1889 /* system pd Fast Path */ 1892 /* system pd Fast Path */
1890 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1893 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1891 timeout_limit = (scmd->device->type == TYPE_DISK) ? 1894 timeout_limit = (scmd->device->type == TYPE_DISK) ?
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index e111c3d8c5d6..7d67a68bcc62 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3886,19 +3886,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3886 return 0; 3886 return 0;
3887 } 3887 }
3888 3888
3889 /*
3890 * Bug work around for firmware SATL handling. The loop
3891 * is based on atomic operations and ensures consistency
3892 * since we're lockless at this point
3893 */
3894 do {
3895 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
3896 scmd->result = SAM_STAT_BUSY;
3897 scmd->scsi_done(scmd);
3898 return 0;
3899 }
3900 } while (_scsih_set_satl_pending(scmd, true));
3901
3902 sas_target_priv_data = sas_device_priv_data->sas_target; 3889 sas_target_priv_data = sas_device_priv_data->sas_target;
3903 3890
3904 /* invalid device handle */ 3891 /* invalid device handle */
@@ -3924,6 +3911,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3924 sas_device_priv_data->block) 3911 sas_device_priv_data->block)
3925 return SCSI_MLQUEUE_DEVICE_BUSY; 3912 return SCSI_MLQUEUE_DEVICE_BUSY;
3926 3913
3914 /*
3915 * Bug work around for firmware SATL handling. The loop
3916 * is based on atomic operations and ensures consistency
3917 * since we're lockless at this point
3918 */
3919 do {
3920 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
3921 scmd->result = SAM_STAT_BUSY;
3922 scmd->scsi_done(scmd);
3923 return 0;
3924 }
3925 } while (_scsih_set_satl_pending(scmd, true));
3926
3927 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 3927 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
3928 mpi_control = MPI2_SCSIIO_CONTROL_READ; 3928 mpi_control = MPI2_SCSIIO_CONTROL_READ;
3929 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 3929 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
@@ -3945,6 +3945,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3945 if (!smid) { 3945 if (!smid) {
3946 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 3946 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3947 ioc->name, __func__); 3947 ioc->name, __func__);
3948 _scsih_set_satl_pending(scmd, false);
3948 goto out; 3949 goto out;
3949 } 3950 }
3950 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3951 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -3975,6 +3976,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3975 if (mpi_request->DataLength) { 3976 if (mpi_request->DataLength) {
3976 if (ioc->build_sg_scmd(ioc, scmd, smid)) { 3977 if (ioc->build_sg_scmd(ioc, scmd, smid)) {
3977 mpt3sas_base_free_smid(ioc, smid); 3978 mpt3sas_base_free_smid(ioc, smid);
3979 _scsih_set_satl_pending(scmd, false);
3978 goto out; 3980 goto out;
3979 } 3981 }
3980 } else 3982 } else
@@ -8635,7 +8637,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8635 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 8637 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
8636 "fw_event_%s%d", ioc->driver_name, ioc->id); 8638 "fw_event_%s%d", ioc->driver_name, ioc->id);
8637 ioc->firmware_event_thread = alloc_ordered_workqueue( 8639 ioc->firmware_event_thread = alloc_ordered_workqueue(
8638 ioc->firmware_event_name, WQ_MEM_RECLAIM); 8640 ioc->firmware_event_name, 0);
8639 if (!ioc->firmware_event_thread) { 8641 if (!ioc->firmware_event_thread) {
8640 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 8642 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
8641 ioc->name, __FILE__, __LINE__, __func__); 8643 ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 02360de6b7e0..39285070f3b5 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2629,7 +2629,7 @@ static void mvumi_shutdown(struct pci_dev *pdev)
2629 mvumi_flush_cache(mhba); 2629 mvumi_flush_cache(mhba);
2630} 2630}
2631 2631
2632static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state) 2632static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2633{ 2633{
2634 struct mvumi_hba *mhba = NULL; 2634 struct mvumi_hba *mhba = NULL;
2635 2635
@@ -2648,7 +2648,7 @@ static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2648 return 0; 2648 return 0;
2649} 2649}
2650 2650
2651static int mvumi_resume(struct pci_dev *pdev) 2651static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
2652{ 2652{
2653 int ret; 2653 int ret;
2654 struct mvumi_hba *mhba = NULL; 2654 struct mvumi_hba *mhba = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e197c6f39de2..41a646696bab 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -325,11 +325,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
325 325
326 wait_for_completion(&tm_iocb->u.tmf.comp); 326 wait_for_completion(&tm_iocb->u.tmf.comp);
327 327
328 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? 328 rval = tm_iocb->u.tmf.data;
329 QLA_SUCCESS : QLA_FUNCTION_FAILED;
330 329
331 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { 330 if (rval != QLA_SUCCESS) {
332 ql_dbg(ql_dbg_taskm, vha, 0x8030, 331 ql_log(ql_log_warn, vha, 0x8030,
333 "TM IOCB failed (%x).\n", rval); 332 "TM IOCB failed (%x).\n", rval);
334 } 333 }
335 334
@@ -365,6 +364,7 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res)
365 srb_t *sp = (srb_t *)ptr; 364 srb_t *sp = (srb_t *)ptr;
366 struct srb_iocb *abt = &sp->u.iocb_cmd; 365 struct srb_iocb *abt = &sp->u.iocb_cmd;
367 366
367 del_timer(&sp->u.iocb_cmd.timer);
368 complete(&abt->u.abt.comp); 368 complete(&abt->u.abt.comp);
369} 369}
370 370
@@ -3260,7 +3260,8 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3260 return; 3260 return;
3261 3261
3262 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 3262 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
3263 fcport->fp_speed > ha->link_data_rate) 3263 fcport->fp_speed > ha->link_data_rate ||
3264 !ha->flags.gpsc_supported)
3264 return; 3265 return;
3265 3266
3266 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 3267 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 1f6a3b86965f..440d79e6aea5 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -268,7 +268,8 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
268 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 268 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
269 269
270 /* Read all mbox registers? */ 270 /* Read all mbox registers? */
271 mboxes = (1 << ha->mbx_count) - 1; 271 WARN_ON_ONCE(ha->mbx_count > 32);
272 mboxes = (1ULL << ha->mbx_count) - 1;
272 if (!ha->mcp) 273 if (!ha->mcp)
273 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 274 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
274 else 275 else
@@ -2495,7 +2496,8 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2495 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2496 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2496 2497
2497 /* Read all mbox registers? */ 2498 /* Read all mbox registers? */
2498 mboxes = (1 << ha->mbx_count) - 1; 2499 WARN_ON_ONCE(ha->mbx_count > 32);
2500 mboxes = (1ULL << ha->mbx_count) - 1;
2499 if (!ha->mcp) 2501 if (!ha->mcp)
2500 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 2502 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2501 else 2503 else
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5cbf20ab94aa..18b19744398a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4938,8 +4938,9 @@ qla2x00_do_dpc(void *data)
4938 } 4938 }
4939 } 4939 }
4940 4940
4941 if (test_and_clear_bit(ISP_ABORT_NEEDED, 4941 if (test_and_clear_bit
4942 &base_vha->dpc_flags)) { 4942 (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
4943 !test_bit(UNLOADING, &base_vha->dpc_flags)) {
4943 4944
4944 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 4945 ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
4945 "ISP abort scheduled.\n"); 4946 "ISP abort scheduled.\n");
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index e6faa0b050d1..824e27eec7a1 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -5502,7 +5502,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
5502 fc_port_t *fcport; 5502 fc_port_t *fcport;
5503 int rc; 5503 int rc;
5504 5504
5505 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); 5505 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5506 if (!fcport) { 5506 if (!fcport) {
5507 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 5507 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
5508 "qla_target(%d): Allocation of tmp FC port failed", 5508 "qla_target(%d): Allocation of tmp FC port failed",
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a7cfc270bd08..ce1d063f3e83 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -168,6 +168,8 @@
168#define DEV_DB_NON_PERSISTENT 0 168#define DEV_DB_NON_PERSISTENT 0
169#define DEV_DB_PERSISTENT 1 169#define DEV_DB_PERSISTENT 1
170 170
171#define QL4_ISP_REG_DISCONNECT 0xffffffffU
172
171#define COPY_ISID(dst_isid, src_isid) { \ 173#define COPY_ISID(dst_isid, src_isid) { \
172 int i, j; \ 174 int i, j; \
173 for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \ 175 for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 01c3610a60cf..d8c03431d0aa 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -262,6 +262,24 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
262 262
263static struct scsi_transport_template *qla4xxx_scsi_transport; 263static struct scsi_transport_template *qla4xxx_scsi_transport;
264 264
265static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
266{
267 u32 reg_val = 0;
268 int rval = QLA_SUCCESS;
269
270 if (is_qla8022(ha))
271 reg_val = readl(&ha->qla4_82xx_reg->host_status);
272 else if (is_qla8032(ha) || is_qla8042(ha))
273 reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
274 else
275 reg_val = readw(&ha->reg->ctrl_status);
276
277 if (reg_val == QL4_ISP_REG_DISCONNECT)
278 rval = QLA_ERROR;
279
280 return rval;
281}
282
265static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 283static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
266 uint32_t iface_type, uint32_t payload_size, 284 uint32_t iface_type, uint32_t payload_size,
267 uint32_t pid, struct sockaddr *dst_addr) 285 uint32_t pid, struct sockaddr *dst_addr)
@@ -9196,10 +9214,17 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
9196 struct srb *srb = NULL; 9214 struct srb *srb = NULL;
9197 int ret = SUCCESS; 9215 int ret = SUCCESS;
9198 int wait = 0; 9216 int wait = 0;
9217 int rval;
9199 9218
9200 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", 9219 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
9201 ha->host_no, id, lun, cmd, cmd->cmnd[0]); 9220 ha->host_no, id, lun, cmd, cmd->cmnd[0]);
9202 9221
9222 rval = qla4xxx_isp_check_reg(ha);
9223 if (rval != QLA_SUCCESS) {
9224 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9225 return FAILED;
9226 }
9227
9203 spin_lock_irqsave(&ha->hardware_lock, flags); 9228 spin_lock_irqsave(&ha->hardware_lock, flags);
9204 srb = (struct srb *) CMD_SP(cmd); 9229 srb = (struct srb *) CMD_SP(cmd);
9205 if (!srb) { 9230 if (!srb) {
@@ -9251,6 +9276,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
9251 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9276 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9252 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9277 struct ddb_entry *ddb_entry = cmd->device->hostdata;
9253 int ret = FAILED, stat; 9278 int ret = FAILED, stat;
9279 int rval;
9254 9280
9255 if (!ddb_entry) 9281 if (!ddb_entry)
9256 return ret; 9282 return ret;
@@ -9270,6 +9296,12 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
9270 cmd, jiffies, cmd->request->timeout / HZ, 9296 cmd, jiffies, cmd->request->timeout / HZ,
9271 ha->dpc_flags, cmd->result, cmd->allowed)); 9297 ha->dpc_flags, cmd->result, cmd->allowed));
9272 9298
9299 rval = qla4xxx_isp_check_reg(ha);
9300 if (rval != QLA_SUCCESS) {
9301 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9302 return FAILED;
9303 }
9304
9273 /* FIXME: wait for hba to go online */ 9305 /* FIXME: wait for hba to go online */
9274 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); 9306 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
9275 if (stat != QLA_SUCCESS) { 9307 if (stat != QLA_SUCCESS) {
@@ -9313,6 +9345,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
9313 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9345 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9314 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9346 struct ddb_entry *ddb_entry = cmd->device->hostdata;
9315 int stat, ret; 9347 int stat, ret;
9348 int rval;
9316 9349
9317 if (!ddb_entry) 9350 if (!ddb_entry)
9318 return FAILED; 9351 return FAILED;
@@ -9330,6 +9363,12 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
9330 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, 9363 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
9331 ha->dpc_flags, cmd->result, cmd->allowed)); 9364 ha->dpc_flags, cmd->result, cmd->allowed));
9332 9365
9366 rval = qla4xxx_isp_check_reg(ha);
9367 if (rval != QLA_SUCCESS) {
9368 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9369 return FAILED;
9370 }
9371
9333 stat = qla4xxx_reset_target(ha, ddb_entry); 9372 stat = qla4xxx_reset_target(ha, ddb_entry);
9334 if (stat != QLA_SUCCESS) { 9373 if (stat != QLA_SUCCESS) {
9335 starget_printk(KERN_INFO, scsi_target(cmd->device), 9374 starget_printk(KERN_INFO, scsi_target(cmd->device),
@@ -9384,9 +9423,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
9384{ 9423{
9385 int return_status = FAILED; 9424 int return_status = FAILED;
9386 struct scsi_qla_host *ha; 9425 struct scsi_qla_host *ha;
9426 int rval;
9387 9427
9388 ha = to_qla_host(cmd->device->host); 9428 ha = to_qla_host(cmd->device->host);
9389 9429
9430 rval = qla4xxx_isp_check_reg(ha);
9431 if (rval != QLA_SUCCESS) {
9432 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9433 return FAILED;
9434 }
9435
9390 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) 9436 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
9391 qla4_83xx_set_idc_dontreset(ha); 9437 qla4_83xx_set_idc_dontreset(ha);
9392 9438
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 60720e5b1ebc..6b61b09b3226 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -180,7 +180,7 @@ static struct {
180 {"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 180 {"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
181 {"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 181 {"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
182 {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ 182 {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
183 {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */ 183 {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */
184 {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, 184 {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
185 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, 185 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
186 {"HP", "C1557A", NULL, BLIST_FORCELUN}, 186 {"HP", "C1557A", NULL, BLIST_FORCELUN},
@@ -589,17 +589,12 @@ int scsi_get_device_flags_keyed(struct scsi_device *sdev,
589 int key) 589 int key)
590{ 590{
591 struct scsi_dev_info_list *devinfo; 591 struct scsi_dev_info_list *devinfo;
592 int err;
593 592
594 devinfo = scsi_dev_info_list_find(vendor, model, key); 593 devinfo = scsi_dev_info_list_find(vendor, model, key);
595 if (!IS_ERR(devinfo)) 594 if (!IS_ERR(devinfo))
596 return devinfo->flags; 595 return devinfo->flags;
597 596
598 err = PTR_ERR(devinfo); 597 /* key or device not found: return nothing */
599 if (err != -ENOENT)
600 return err;
601
602 /* nothing found, return nothing */
603 if (key != SCSI_DEVINFO_GLOBAL) 598 if (key != SCSI_DEVINFO_GLOBAL)
604 return 0; 599 return 0;
605 600
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 4d655b568269..a8ebaeace154 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -56,10 +56,16 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
56 {"IBM", "1815", "rdac", }, 56 {"IBM", "1815", "rdac", },
57 {"IBM", "1818", "rdac", }, 57 {"IBM", "1818", "rdac", },
58 {"IBM", "3526", "rdac", }, 58 {"IBM", "3526", "rdac", },
59 {"SGI", "TP9", "rdac", }, 59 {"IBM", "3542", "rdac", },
60 {"IBM", "3552", "rdac", },
61 {"SGI", "TP9300", "rdac", },
62 {"SGI", "TP9400", "rdac", },
63 {"SGI", "TP9500", "rdac", },
64 {"SGI", "TP9700", "rdac", },
60 {"SGI", "IS", "rdac", }, 65 {"SGI", "IS", "rdac", },
61 {"STK", "OPENstorage D280", "rdac", }, 66 {"STK", "OPENstorage", "rdac", },
62 {"STK", "FLEXLINE 380", "rdac", }, 67 {"STK", "FLEXLINE 380", "rdac", },
68 {"STK", "BladeCtlr", "rdac", },
63 {"SUN", "CSM", "rdac", }, 69 {"SUN", "CSM", "rdac", },
64 {"SUN", "LCSM100", "rdac", }, 70 {"SUN", "LCSM100", "rdac", },
65 {"SUN", "STK6580_6780", "rdac", }, 71 {"SUN", "STK6580_6780", "rdac", },
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index e3cd3ece4412..c3d1891d2d3f 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -52,6 +52,8 @@ struct srp_internal {
52 struct transport_container rport_attr_cont; 52 struct transport_container rport_attr_cont;
53}; 53};
54 54
55static int scsi_is_srp_rport(const struct device *dev);
56
55#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t) 57#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
56 58
57#define dev_to_rport(d) container_of(d, struct srp_rport, dev) 59#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
@@ -61,9 +63,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
61 return dev_to_shost(r->dev.parent); 63 return dev_to_shost(r->dev.parent);
62} 64}
63 65
66static int find_child_rport(struct device *dev, void *data)
67{
68 struct device **child = data;
69
70 if (scsi_is_srp_rport(dev)) {
71 WARN_ON_ONCE(*child);
72 *child = dev;
73 }
74 return 0;
75}
76
64static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) 77static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
65{ 78{
66 return transport_class_to_srp_rport(&shost->shost_gendev); 79 struct device *child = NULL;
80
81 WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
82 find_child_rport) < 0);
83 return child ? dev_to_rport(child) : NULL;
67} 84}
68 85
69/** 86/**
@@ -637,7 +654,8 @@ static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
637 struct srp_rport *rport = shost_to_rport(shost); 654 struct srp_rport *rport = shost_to_rport(shost);
638 655
639 pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); 656 pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
640 return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 && 657 return rport && rport->fast_io_fail_tmo < 0 &&
658 rport->dev_loss_tmo < 0 &&
641 i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? 659 i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
642 BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; 660 BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
643} 661}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index dd72205ba298..6fffb73766de 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1929,6 +1929,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
1929 break; /* standby */ 1929 break; /* standby */
1930 if (sshdr.asc == 4 && sshdr.ascq == 0xc) 1930 if (sshdr.asc == 4 && sshdr.ascq == 0xc)
1931 break; /* unavailable */ 1931 break; /* unavailable */
1932 if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
1933 break; /* sanitize in progress */
1932 /* 1934 /*
1933 * Issue command to spin up drive when not ready 1935 * Issue command to spin up drive when not ready
1934 */ 1936 */
@@ -2393,6 +2395,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2393 int res; 2395 int res;
2394 struct scsi_device *sdp = sdkp->device; 2396 struct scsi_device *sdp = sdkp->device;
2395 struct scsi_mode_data data; 2397 struct scsi_mode_data data;
2398 int disk_ro = get_disk_ro(sdkp->disk);
2396 int old_wp = sdkp->write_prot; 2399 int old_wp = sdkp->write_prot;
2397 2400
2398 set_disk_ro(sdkp->disk, 0); 2401 set_disk_ro(sdkp->disk, 0);
@@ -2433,7 +2436,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2433 "Test WP failed, assume Write Enabled\n"); 2436 "Test WP failed, assume Write Enabled\n");
2434 } else { 2437 } else {
2435 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 2438 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
2436 set_disk_ro(sdkp->disk, sdkp->write_prot); 2439 set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
2437 if (sdkp->first_scan || old_wp != sdkp->write_prot) { 2440 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
2438 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", 2441 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
2439 sdkp->write_prot ? "on" : "off"); 2442 sdkp->write_prot ? "on" : "off");
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 044d06410d4c..01168acc864d 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -546,7 +546,6 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
546 ecomp = &edev->component[components++]; 546 ecomp = &edev->component[components++];
547 547
548 if (!IS_ERR(ecomp)) { 548 if (!IS_ERR(ecomp)) {
549 ses_get_power_status(edev, ecomp);
550 if (addl_desc_ptr) 549 if (addl_desc_ptr)
551 ses_process_descriptor( 550 ses_process_descriptor(
552 ecomp, 551 ecomp,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0f0ff75755e0..e1639e80db53 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */
51#include <linux/atomic.h> 51#include <linux/atomic.h>
52#include <linux/ratelimit.h> 52#include <linux/ratelimit.h>
53#include <linux/uio.h> 53#include <linux/uio.h>
54#include <linux/cred.h> /* for sg_check_file_access() */
54 55
55#include "scsi.h" 56#include "scsi.h"
56#include <scsi/scsi_dbg.h> 57#include <scsi/scsi_dbg.h>
@@ -221,6 +222,33 @@ static void sg_device_destroy(struct kref *kref);
221 sdev_prefix_printk(prefix, (sdp)->device, \ 222 sdev_prefix_printk(prefix, (sdp)->device, \
222 (sdp)->disk->disk_name, fmt, ##a) 223 (sdp)->disk->disk_name, fmt, ##a)
223 224
225/*
226 * The SCSI interfaces that use read() and write() as an asynchronous variant of
227 * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
228 * to trigger read() and write() calls from various contexts with elevated
229 * privileges. This can lead to kernel memory corruption (e.g. if these
230 * interfaces are called through splice()) and privilege escalation inside
231 * userspace (e.g. if a process with access to such a device passes a file
232 * descriptor to a SUID binary as stdin/stdout/stderr).
233 *
234 * This function provides protection for the legacy API by restricting the
235 * calling context.
236 */
237static int sg_check_file_access(struct file *filp, const char *caller)
238{
239 if (filp->f_cred != current_real_cred()) {
240 pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
241 caller, task_tgid_vnr(current), current->comm);
242 return -EPERM;
243 }
244 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
245 pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
246 caller, task_tgid_vnr(current), current->comm);
247 return -EACCES;
248 }
249 return 0;
250}
251
224static int sg_allow_access(struct file *filp, unsigned char *cmd) 252static int sg_allow_access(struct file *filp, unsigned char *cmd)
225{ 253{
226 struct sg_fd *sfp = filp->private_data; 254 struct sg_fd *sfp = filp->private_data;
@@ -405,6 +433,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
405 struct sg_header *old_hdr = NULL; 433 struct sg_header *old_hdr = NULL;
406 int retval = 0; 434 int retval = 0;
407 435
436 /*
437 * This could cause a response to be stranded. Close the associated
438 * file descriptor to free up any resources being held.
439 */
440 retval = sg_check_file_access(filp, __func__);
441 if (retval)
442 return retval;
443
408 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 444 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
409 return -ENXIO; 445 return -ENXIO;
410 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, 446 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
@@ -535,6 +571,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
535 } else 571 } else
536 count = (old_hdr->result == 0) ? 0 : -EIO; 572 count = (old_hdr->result == 0) ? 0 : -EIO;
537 sg_finish_rem_req(srp); 573 sg_finish_rem_req(srp);
574 sg_remove_request(sfp, srp);
538 retval = count; 575 retval = count;
539free_old_hdr: 576free_old_hdr:
540 kfree(old_hdr); 577 kfree(old_hdr);
@@ -575,6 +612,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
575 } 612 }
576err_out: 613err_out:
577 err2 = sg_finish_rem_req(srp); 614 err2 = sg_finish_rem_req(srp);
615 sg_remove_request(sfp, srp);
578 return err ? : err2 ? : count; 616 return err ? : err2 ? : count;
579} 617}
580 618
@@ -590,9 +628,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
590 struct sg_header old_hdr; 628 struct sg_header old_hdr;
591 sg_io_hdr_t *hp; 629 sg_io_hdr_t *hp;
592 unsigned char cmnd[SG_MAX_CDB_SIZE]; 630 unsigned char cmnd[SG_MAX_CDB_SIZE];
631 int retval;
593 632
594 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) 633 retval = sg_check_file_access(filp, __func__);
595 return -EINVAL; 634 if (retval)
635 return retval;
596 636
597 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 637 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
598 return -ENXIO; 638 return -ENXIO;
@@ -674,18 +714,14 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
674 * is a non-zero input_size, so emit a warning. 714 * is a non-zero input_size, so emit a warning.
675 */ 715 */
676 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { 716 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
677 static char cmd[TASK_COMM_LEN]; 717 printk_ratelimited(KERN_WARNING
678 if (strcmp(current->comm, cmd)) { 718 "sg_write: data in/out %d/%d bytes "
679 printk_ratelimited(KERN_WARNING 719 "for SCSI command 0x%x-- guessing "
680 "sg_write: data in/out %d/%d bytes " 720 "data in;\n program %s not setting "
681 "for SCSI command 0x%x-- guessing " 721 "count and/or reply_len properly\n",
682 "data in;\n program %s not setting " 722 old_hdr.reply_len - (int)SZ_SG_HEADER,
683 "count and/or reply_len properly\n", 723 input_size, (unsigned int) cmnd[0],
684 old_hdr.reply_len - (int)SZ_SG_HEADER, 724 current->comm);
685 input_size, (unsigned int) cmnd[0],
686 current->comm);
687 strcpy(cmd, current->comm);
688 }
689 } 725 }
690 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); 726 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
691 return (k < 0) ? k : count; 727 return (k < 0) ? k : count;
@@ -784,11 +820,15 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
784 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 820 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
785 (int) cmnd[0], (int) hp->cmd_len)); 821 (int) cmnd[0], (int) hp->cmd_len));
786 822
823 if (hp->dxfer_len >= SZ_256M)
824 return -EINVAL;
825
787 k = sg_start_req(srp, cmnd); 826 k = sg_start_req(srp, cmnd);
788 if (k) { 827 if (k) {
789 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, 828 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
790 "sg_common_write: start_req err=%d\n", k)); 829 "sg_common_write: start_req err=%d\n", k));
791 sg_finish_rem_req(srp); 830 sg_finish_rem_req(srp);
831 sg_remove_request(sfp, srp);
792 return k; /* probably out of space --> ENOMEM */ 832 return k; /* probably out of space --> ENOMEM */
793 } 833 }
794 if (atomic_read(&sdp->detaching)) { 834 if (atomic_read(&sdp->detaching)) {
@@ -801,6 +841,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
801 } 841 }
802 842
803 sg_finish_rem_req(srp); 843 sg_finish_rem_req(srp);
844 sg_remove_request(sfp, srp);
804 return -ENODEV; 845 return -ENODEV;
805 } 846 }
806 847
@@ -1290,6 +1331,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
1290 struct sg_fd *sfp = srp->parentfp; 1331 struct sg_fd *sfp = srp->parentfp;
1291 1332
1292 sg_finish_rem_req(srp); 1333 sg_finish_rem_req(srp);
1334 sg_remove_request(sfp, srp);
1293 kref_put(&sfp->f_ref, sg_remove_sfp); 1335 kref_put(&sfp->f_ref, sg_remove_sfp);
1294} 1336}
1295 1337
@@ -1834,8 +1876,6 @@ sg_finish_rem_req(Sg_request *srp)
1834 else 1876 else
1835 sg_remove_scat(sfp, req_schp); 1877 sg_remove_scat(sfp, req_schp);
1836 1878
1837 sg_remove_request(sfp, srp);
1838
1839 return ret; 1879 return ret;
1840} 1880}
1841 1881
@@ -1901,7 +1941,7 @@ retry:
1901 num = (rem_sz > scatter_elem_sz_prev) ? 1941 num = (rem_sz > scatter_elem_sz_prev) ?
1902 scatter_elem_sz_prev : rem_sz; 1942 scatter_elem_sz_prev : rem_sz;
1903 1943
1904 schp->pages[k] = alloc_pages(gfp_mask, order); 1944 schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
1905 if (!schp->pages[k]) 1945 if (!schp->pages[k])
1906 goto out; 1946 goto out;
1907 1947
@@ -2072,11 +2112,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2072 if ((1 == resp->done) && (!resp->sg_io_owned) && 2112 if ((1 == resp->done) && (!resp->sg_io_owned) &&
2073 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { 2113 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2074 resp->done = 2; /* guard against other readers */ 2114 resp->done = 2; /* guard against other readers */
2075 break; 2115 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2116 return resp;
2076 } 2117 }
2077 } 2118 }
2078 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2119 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2079 return resp; 2120 return NULL;
2080} 2121}
2081 2122
2082/* always adds to end of list */ 2123/* always adds to end of list */
@@ -2154,6 +2195,7 @@ sg_add_sfp(Sg_device * sdp)
2154 write_lock_irqsave(&sdp->sfd_lock, iflags); 2195 write_lock_irqsave(&sdp->sfd_lock, iflags);
2155 if (atomic_read(&sdp->detaching)) { 2196 if (atomic_read(&sdp->detaching)) {
2156 write_unlock_irqrestore(&sdp->sfd_lock, iflags); 2197 write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2198 kfree(sfp);
2157 return ERR_PTR(-ENODEV); 2199 return ERR_PTR(-ENODEV);
2158 } 2200 }
2159 list_add_tail(&sfp->sfd_siblings, &sdp->sfds); 2201 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
@@ -2182,12 +2224,17 @@ sg_remove_sfp_usercontext(struct work_struct *work)
2182 struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); 2224 struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
2183 struct sg_device *sdp = sfp->parentdp; 2225 struct sg_device *sdp = sfp->parentdp;
2184 Sg_request *srp; 2226 Sg_request *srp;
2227 unsigned long iflags;
2185 2228
2186 /* Cleanup any responses which were never read(). */ 2229 /* Cleanup any responses which were never read(). */
2230 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2187 while (!list_empty(&sfp->rq_list)) { 2231 while (!list_empty(&sfp->rq_list)) {
2188 srp = list_first_entry(&sfp->rq_list, Sg_request, entry); 2232 srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
2189 sg_finish_rem_req(srp); 2233 sg_finish_rem_req(srp);
2234 list_del(&srp->entry);
2235 srp->parentfp = NULL;
2190 } 2236 }
2237 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2191 2238
2192 if (sfp->reserve.bufflen > 0) { 2239 if (sfp->reserve.bufflen > 0) {
2193 SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, 2240 SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 3b3b56f4a830..82ed99848378 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -176,8 +176,7 @@ static struct eisa_device_id sim710_eisa_ids[] = {
176}; 176};
177MODULE_DEVICE_TABLE(eisa, sim710_eisa_ids); 177MODULE_DEVICE_TABLE(eisa, sim710_eisa_ids);
178 178
179static __init int 179static int sim710_eisa_probe(struct device *dev)
180sim710_eisa_probe(struct device *dev)
181{ 180{
182 struct eisa_device *edev = to_eisa_device(dev); 181 struct eisa_device *edev = to_eisa_device(dev);
183 unsigned long io_addr = edev->base_addr; 182 unsigned long io_addr = edev->base_addr;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 804586aeaffe..5dc288fecace 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -520,16 +520,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
520static int sr_block_open(struct block_device *bdev, fmode_t mode) 520static int sr_block_open(struct block_device *bdev, fmode_t mode)
521{ 521{
522 struct scsi_cd *cd; 522 struct scsi_cd *cd;
523 struct scsi_device *sdev;
523 int ret = -ENXIO; 524 int ret = -ENXIO;
524 525
525 mutex_lock(&sr_mutex);
526 cd = scsi_cd_get(bdev->bd_disk); 526 cd = scsi_cd_get(bdev->bd_disk);
527 if (cd) { 527 if (!cd)
528 ret = cdrom_open(&cd->cdi, bdev, mode); 528 goto out;
529 if (ret) 529
530 scsi_cd_put(cd); 530 sdev = cd->device;
531 } 531 scsi_autopm_get_device(sdev);
532 check_disk_change(bdev);
533
534 mutex_lock(&sr_mutex);
535 ret = cdrom_open(&cd->cdi, bdev, mode);
532 mutex_unlock(&sr_mutex); 536 mutex_unlock(&sr_mutex);
537
538 scsi_autopm_put_device(sdev);
539 if (ret)
540 scsi_cd_put(cd);
541
542out:
533 return ret; 543 return ret;
534} 544}
535 545
@@ -557,6 +567,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
557 if (ret) 567 if (ret)
558 goto out; 568 goto out;
559 569
570 scsi_autopm_get_device(sdev);
571
560 /* 572 /*
561 * Send SCSI addressing ioctls directly to mid level, send other 573 * Send SCSI addressing ioctls directly to mid level, send other
562 * ioctls to cdrom/block level. 574 * ioctls to cdrom/block level.
@@ -565,15 +577,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
565 case SCSI_IOCTL_GET_IDLUN: 577 case SCSI_IOCTL_GET_IDLUN:
566 case SCSI_IOCTL_GET_BUS_NUMBER: 578 case SCSI_IOCTL_GET_BUS_NUMBER:
567 ret = scsi_ioctl(sdev, cmd, argp); 579 ret = scsi_ioctl(sdev, cmd, argp);
568 goto out; 580 goto put;
569 } 581 }
570 582
571 ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); 583 ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
572 if (ret != -ENOSYS) 584 if (ret != -ENOSYS)
573 goto out; 585 goto put;
574 586
575 ret = scsi_ioctl(sdev, cmd, argp); 587 ret = scsi_ioctl(sdev, cmd, argp);
576 588
589put:
590 scsi_autopm_put_device(sdev);
591
577out: 592out:
578 mutex_unlock(&sr_mutex); 593 mutex_unlock(&sr_mutex);
579 return ret; 594 return ret;
@@ -582,18 +597,28 @@ out:
582static unsigned int sr_block_check_events(struct gendisk *disk, 597static unsigned int sr_block_check_events(struct gendisk *disk,
583 unsigned int clearing) 598 unsigned int clearing)
584{ 599{
585 struct scsi_cd *cd = scsi_cd(disk); 600 unsigned int ret = 0;
601 struct scsi_cd *cd;
586 602
587 if (atomic_read(&cd->device->disk_events_disable_depth)) 603 cd = scsi_cd_get(disk);
604 if (!cd)
588 return 0; 605 return 0;
589 606
590 return cdrom_check_events(&cd->cdi, clearing); 607 if (!atomic_read(&cd->device->disk_events_disable_depth))
608 ret = cdrom_check_events(&cd->cdi, clearing);
609
610 scsi_cd_put(cd);
611 return ret;
591} 612}
592 613
593static int sr_block_revalidate_disk(struct gendisk *disk) 614static int sr_block_revalidate_disk(struct gendisk *disk)
594{ 615{
595 struct scsi_cd *cd = scsi_cd(disk);
596 struct scsi_sense_hdr sshdr; 616 struct scsi_sense_hdr sshdr;
617 struct scsi_cd *cd;
618
619 cd = scsi_cd_get(disk);
620 if (!cd)
621 return -ENXIO;
597 622
598 /* if the unit is not ready, nothing more to do */ 623 /* if the unit is not ready, nothing more to do */
599 if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) 624 if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
@@ -602,6 +627,7 @@ static int sr_block_revalidate_disk(struct gendisk *disk)
602 sr_cd_check(&cd->cdi); 627 sr_cd_check(&cd->cdi);
603 get_sectorsize(cd); 628 get_sectorsize(cd);
604out: 629out:
630 scsi_cd_put(cd);
605 return 0; 631 return 0;
606} 632}
607 633
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 5e4e1ba96f10..44b7a69d022a 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -890,10 +890,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
890 case TEST_UNIT_READY: 890 case TEST_UNIT_READY:
891 break; 891 break;
892 default: 892 default:
893 set_host_byte(scmnd, DID_TARGET_FAILURE); 893 set_host_byte(scmnd, DID_ERROR);
894 } 894 }
895 break; 895 break;
896 case SRB_STATUS_INVALID_LUN: 896 case SRB_STATUS_INVALID_LUN:
897 set_host_byte(scmnd, DID_NO_CONNECT);
897 do_work = true; 898 do_work = true;
898 process_err_fn = storvsc_remove_lun; 899 process_err_fn = storvsc_remove_lun;
899 break; 900 break;
@@ -1537,7 +1538,7 @@ static struct scsi_host_template scsi_driver = {
1537 .eh_timed_out = storvsc_eh_timed_out, 1538 .eh_timed_out = storvsc_eh_timed_out,
1538 .slave_alloc = storvsc_device_alloc, 1539 .slave_alloc = storvsc_device_alloc,
1539 .slave_configure = storvsc_device_configure, 1540 .slave_configure = storvsc_device_configure,
1540 .cmd_per_lun = 255, 1541 .cmd_per_lun = 2048,
1541 .this_id = -1, 1542 .this_id = -1,
1542 .use_clustering = ENABLE_CLUSTERING, 1543 .use_clustering = ENABLE_CLUSTERING,
1543 /* Make sure we dont get a sg segment crosses a page boundary */ 1544 /* Make sure we dont get a sg segment crosses a page boundary */
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 6b349e301869..c6425e3df5a0 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -536,7 +536,7 @@ sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fa
536 * Look for the greatest clock divisor that allows an 536 * Look for the greatest clock divisor that allows an
537 * input speed faster than the period. 537 * input speed faster than the period.
538 */ 538 */
539 while (div-- > 0) 539 while (--div > 0)
540 if (kpc >= (div_10M[div] << 2)) break; 540 if (kpc >= (div_10M[div] << 2)) break;
541 541
542 /* 542 /*
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 0c2482ec7d21..8c58adadb728 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2923,6 +2923,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
2923 /* REPORT SUPPORTED OPERATION CODES is not supported */ 2923 /* REPORT SUPPORTED OPERATION CODES is not supported */
2924 sdev->no_report_opcodes = 1; 2924 sdev->no_report_opcodes = 1;
2925 2925
2926 /* WRITE_SAME command is not supported */
2927 sdev->no_write_same = 1;
2926 2928
2927 ufshcd_set_queue_depth(sdev); 2929 ufshcd_set_queue_depth(sdev);
2928 2930
@@ -3445,6 +3447,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
3445 hba = container_of(work, struct ufs_hba, eeh_work); 3447 hba = container_of(work, struct ufs_hba, eeh_work);
3446 3448
3447 pm_runtime_get_sync(hba->dev); 3449 pm_runtime_get_sync(hba->dev);
3450 scsi_block_requests(hba->host);
3448 err = ufshcd_get_ee_status(hba, &status); 3451 err = ufshcd_get_ee_status(hba, &status);
3449 if (err) { 3452 if (err) {
3450 dev_err(hba->dev, "%s: failed to get exception status %d\n", 3453 dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -3460,6 +3463,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
3460 __func__, err); 3463 __func__, err);
3461 } 3464 }
3462out: 3465out:
3466 scsi_unblock_requests(hba->host);
3463 pm_runtime_put_sync(hba->dev); 3467 pm_runtime_put_sync(hba->dev);
3464 return; 3468 return;
3465} 3469}
@@ -4392,12 +4396,15 @@ static int ufshcd_config_vreg(struct device *dev,
4392 struct ufs_vreg *vreg, bool on) 4396 struct ufs_vreg *vreg, bool on)
4393{ 4397{
4394 int ret = 0; 4398 int ret = 0;
4395 struct regulator *reg = vreg->reg; 4399 struct regulator *reg;
4396 const char *name = vreg->name; 4400 const char *name;
4397 int min_uV, uA_load; 4401 int min_uV, uA_load;
4398 4402
4399 BUG_ON(!vreg); 4403 BUG_ON(!vreg);
4400 4404
4405 reg = vreg->reg;
4406 name = vreg->name;
4407
4401 if (regulator_count_voltages(reg) > 0) { 4408 if (regulator_count_voltages(reg) > 0) {
4402 min_uV = on ? vreg->min_uV : 0; 4409 min_uV = on ? vreg->min_uV : 0;
4403 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); 4410 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 03a2aadf0d3c..8ef905cbfc9c 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -28,6 +28,7 @@
28#include <scsi/scsi_device.h> 28#include <scsi/scsi_device.h>
29#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_devinfo.h>
31#include <linux/seqlock.h> 32#include <linux/seqlock.h>
32 33
33#define VIRTIO_SCSI_MEMPOOL_SZ 64 34#define VIRTIO_SCSI_MEMPOOL_SZ 64
@@ -704,6 +705,28 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
704 return virtscsi_tmf(vscsi, cmd); 705 return virtscsi_tmf(vscsi, cmd);
705} 706}
706 707
708static int virtscsi_device_alloc(struct scsi_device *sdevice)
709{
710 /*
711 * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
712 * may have transfer limits which come from the host SCSI
713 * controller or something on the host side other than the
714 * target itself.
715 *
716 * To make this work properly, the hypervisor can adjust the
717 * target's VPD information to advertise these limits. But
718 * for that to work, the guest has to look at the VPD pages,
719 * which we won't do by default if it is an SPC-2 device, even
720 * if it does actually support it.
721 *
722 * So, set the blist to always try to read the VPD pages.
723 */
724 sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
725
726 return 0;
727}
728
729
707/** 730/**
708 * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth 731 * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
709 * @sdev: Virtscsi target whose queue depth to change 732 * @sdev: Virtscsi target whose queue depth to change
@@ -775,6 +798,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
775 .change_queue_depth = virtscsi_change_queue_depth, 798 .change_queue_depth = virtscsi_change_queue_depth,
776 .eh_abort_handler = virtscsi_abort, 799 .eh_abort_handler = virtscsi_abort,
777 .eh_device_reset_handler = virtscsi_device_reset, 800 .eh_device_reset_handler = virtscsi_device_reset,
801 .slave_alloc = virtscsi_device_alloc,
778 802
779 .can_queue = 1024, 803 .can_queue = 1024,
780 .dma_boundary = UINT_MAX, 804 .dma_boundary = UINT_MAX,
@@ -795,6 +819,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
795 .eh_abort_handler = virtscsi_abort, 819 .eh_abort_handler = virtscsi_abort,
796 .eh_device_reset_handler = virtscsi_device_reset, 820 .eh_device_reset_handler = virtscsi_device_reset,
797 821
822 .slave_alloc = virtscsi_device_alloc,
798 .can_queue = 1024, 823 .can_queue = 1024,
799 .dma_boundary = UINT_MAX, 824 .dma_boundary = UINT_MAX,
800 .use_clustering = ENABLE_CLUSTERING, 825 .use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 8feac599e9ab..44be6b593b30 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1669,12 +1669,12 @@ static int atmel_spi_remove(struct platform_device *pdev)
1669 pm_runtime_get_sync(&pdev->dev); 1669 pm_runtime_get_sync(&pdev->dev);
1670 1670
1671 /* reset the hardware and block queue progress */ 1671 /* reset the hardware and block queue progress */
1672 spin_lock_irq(&as->lock);
1673 if (as->use_dma) { 1672 if (as->use_dma) {
1674 atmel_spi_stop_dma(as); 1673 atmel_spi_stop_dma(as);
1675 atmel_spi_release_dma(as); 1674 atmel_spi_release_dma(as);
1676 } 1675 }
1677 1676
1677 spin_lock_irq(&as->lock);
1678 spi_writel(as, CR, SPI_BIT(SWRST)); 1678 spi_writel(as, CR, SPI_BIT(SWRST));
1679 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1679 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1680 spi_readl(as, SR); 1680 spi_readl(as, SR);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 02fb96797ac8..0d8f43a17edb 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
646 buf = t->rx_buf; 646 buf = t->rx_buf;
647 t->rx_dma = dma_map_single(&spi->dev, buf, 647 t->rx_dma = dma_map_single(&spi->dev, buf,
648 t->len, DMA_FROM_DEVICE); 648 t->len, DMA_FROM_DEVICE);
649 if (dma_mapping_error(&spi->dev, !t->rx_dma)) { 649 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
650 ret = -EFAULT; 650 ret = -EFAULT;
651 goto err_rx_map; 651 goto err_rx_map;
652 } 652 }
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index a6d7029a85ac..581df3ebfc88 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -120,8 +120,8 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
120{ 120{
121 struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev); 121 struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
122 122
123 clk_disable_unprepare(dwsmmio->clk);
124 dw_spi_remove_host(&dwsmmio->dws); 123 dw_spi_remove_host(&dwsmmio->dws);
124 clk_disable_unprepare(dwsmmio->clk);
125 125
126 return 0; 126 return 0;
127} 127}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 0e5723ab47f0..d17ec6775718 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1228,12 +1228,23 @@ static int spi_imx_remove(struct platform_device *pdev)
1228{ 1228{
1229 struct spi_master *master = platform_get_drvdata(pdev); 1229 struct spi_master *master = platform_get_drvdata(pdev);
1230 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 1230 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1231 int ret;
1231 1232
1232 spi_bitbang_stop(&spi_imx->bitbang); 1233 spi_bitbang_stop(&spi_imx->bitbang);
1233 1234
1235 ret = clk_enable(spi_imx->clk_per);
1236 if (ret)
1237 return ret;
1238
1239 ret = clk_enable(spi_imx->clk_ipg);
1240 if (ret) {
1241 clk_disable(spi_imx->clk_per);
1242 return ret;
1243 }
1244
1234 writel(0, spi_imx->base + MXC_CSPICTRL); 1245 writel(0, spi_imx->base + MXC_CSPICTRL);
1235 clk_unprepare(spi_imx->clk_ipg); 1246 clk_disable_unprepare(spi_imx->clk_ipg);
1236 clk_unprepare(spi_imx->clk_per); 1247 clk_disable_unprepare(spi_imx->clk_per);
1237 spi_imx_sdma_exit(spi_imx); 1248 spi_imx_sdma_exit(spi_imx);
1238 spi_master_put(master); 1249 spi_master_put(master);
1239 1250
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 1a7a67495507..651381495f7a 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -454,6 +454,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
454 int elements = 0; 454 int elements = 0;
455 int word_len, element_count; 455 int word_len, element_count;
456 struct omap2_mcspi_cs *cs = spi->controller_state; 456 struct omap2_mcspi_cs *cs = spi->controller_state;
457 void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
458
457 mcspi = spi_master_get_devdata(spi->master); 459 mcspi = spi_master_get_devdata(spi->master);
458 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 460 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
459 count = xfer->len; 461 count = xfer->len;
@@ -549,8 +551,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
549 if (l & OMAP2_MCSPI_CHCONF_TURBO) { 551 if (l & OMAP2_MCSPI_CHCONF_TURBO) {
550 elements--; 552 elements--;
551 553
552 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) 554 if (!mcspi_wait_for_reg_bit(chstat_reg,
553 & OMAP2_MCSPI_CHSTAT_RXS)) { 555 OMAP2_MCSPI_CHSTAT_RXS)) {
554 u32 w; 556 u32 w;
555 557
556 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); 558 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
@@ -568,8 +570,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
568 return count; 570 return count;
569 } 571 }
570 } 572 }
571 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) 573 if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) {
572 & OMAP2_MCSPI_CHSTAT_RXS)) {
573 u32 w; 574 u32 w;
574 575
575 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); 576 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 58efa98313aa..24c07fea9de2 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -38,7 +38,7 @@ struct driver_data {
38 38
39 /* SSP register addresses */ 39 /* SSP register addresses */
40 void __iomem *ioaddr; 40 void __iomem *ioaddr;
41 u32 ssdr_physical; 41 phys_addr_t ssdr_physical;
42 42
43 /* SSP masks*/ 43 /* SSP masks*/
44 u32 dma_cr1; 44 u32 dma_cr1;
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index 39d7c7c70112..2eea3de5a668 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -458,7 +458,7 @@ err_free_master:
458 458
459static int sun4i_spi_remove(struct platform_device *pdev) 459static int sun4i_spi_remove(struct platform_device *pdev)
460{ 460{
461 pm_runtime_disable(&pdev->dev); 461 pm_runtime_force_suspend(&pdev->dev);
462 462
463 return 0; 463 return 0;
464} 464}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index e77add01b0e9..48888ab630c2 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -457,7 +457,7 @@ err_free_master:
457 457
458static int sun6i_spi_remove(struct platform_device *pdev) 458static int sun6i_spi_remove(struct platform_device *pdev)
459{ 459{
460 pm_runtime_disable(&pdev->dev); 460 pm_runtime_force_suspend(&pdev->dev);
461 461
462 return 0; 462 return 0;
463} 463}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index d39a202696c5..6638832e3bd8 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -707,8 +707,14 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
707 for (i = 0; i < sgs; i++) { 707 for (i = 0; i < sgs; i++) {
708 708
709 if (vmalloced_buf) { 709 if (vmalloced_buf) {
710 min = min_t(size_t, 710 /*
711 len, desc_len - offset_in_page(buf)); 711 * Next scatterlist entry size is the minimum between
712 * the desc_len and the remaining buffer length that
713 * fits in a page.
714 */
715 min = min_t(size_t, desc_len,
716 min_t(size_t, len,
717 PAGE_SIZE - offset_in_page(buf)));
712 vm_page = vmalloc_to_page(buf); 718 vm_page = vmalloc_to_page(buf);
713 if (!vm_page) { 719 if (!vm_page) {
714 sg_free_table(sgt); 720 sg_free_table(sgt);
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 5d1e9a0fc389..e2ff6b5b2094 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -613,9 +613,10 @@ out:
613 return err; 613 return err;
614} 614}
615 615
616static int ssb_bus_register(struct ssb_bus *bus, 616static int __maybe_unused
617 ssb_invariants_func_t get_invariants, 617ssb_bus_register(struct ssb_bus *bus,
618 unsigned long baseaddr) 618 ssb_invariants_func_t get_invariants,
619 unsigned long baseaddr)
619{ 620{
620 int err; 621 int err;
621 622
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index b64327722660..013b33760639 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -330,24 +330,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
330 mutex_lock(&ashmem_mutex); 330 mutex_lock(&ashmem_mutex);
331 331
332 if (asma->size == 0) { 332 if (asma->size == 0) {
333 ret = -EINVAL; 333 mutex_unlock(&ashmem_mutex);
334 goto out; 334 return -EINVAL;
335 } 335 }
336 336
337 if (!asma->file) { 337 if (!asma->file) {
338 ret = -EBADF; 338 mutex_unlock(&ashmem_mutex);
339 goto out; 339 return -EBADF;
340 } 340 }
341 341
342 mutex_unlock(&ashmem_mutex);
343
342 ret = vfs_llseek(asma->file, offset, origin); 344 ret = vfs_llseek(asma->file, offset, origin);
343 if (ret < 0) 345 if (ret < 0)
344 goto out; 346 return ret;
345 347
346 /** Copy f_pos from backing file, since f_ops->llseek() sets it */ 348 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
347 file->f_pos = asma->file->f_pos; 349 file->f_pos = asma->file->f_pos;
348
349out:
350 mutex_unlock(&ashmem_mutex);
351 return ret; 350 return ret;
352} 351}
353 352
@@ -704,30 +703,30 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
704 size_t pgstart, pgend; 703 size_t pgstart, pgend;
705 int ret = -EINVAL; 704 int ret = -EINVAL;
706 705
707 if (unlikely(!asma->file))
708 return -EINVAL;
709
710 if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) 706 if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
711 return -EFAULT; 707 return -EFAULT;
712 708
709 mutex_lock(&ashmem_mutex);
710
711 if (unlikely(!asma->file))
712 goto out_unlock;
713
713 /* per custom, you can pass zero for len to mean "everything onward" */ 714 /* per custom, you can pass zero for len to mean "everything onward" */
714 if (!pin.len) 715 if (!pin.len)
715 pin.len = PAGE_ALIGN(asma->size) - pin.offset; 716 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
716 717
717 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) 718 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
718 return -EINVAL; 719 goto out_unlock;
719 720
720 if (unlikely(((__u32)-1) - pin.offset < pin.len)) 721 if (unlikely(((__u32)-1) - pin.offset < pin.len))
721 return -EINVAL; 722 goto out_unlock;
722 723
723 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) 724 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
724 return -EINVAL; 725 goto out_unlock;
725 726
726 pgstart = pin.offset / PAGE_SIZE; 727 pgstart = pin.offset / PAGE_SIZE;
727 pgend = pgstart + (pin.len / PAGE_SIZE) - 1; 728 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
728 729
729 mutex_lock(&ashmem_mutex);
730
731 switch (cmd) { 730 switch (cmd) {
732 case ASHMEM_PIN: 731 case ASHMEM_PIN:
733 ret = ashmem_pin(asma, pgstart, pgend); 732 ret = ashmem_pin(asma, pgstart, pgend);
@@ -740,6 +739,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
740 break; 739 break;
741 } 740 }
742 741
742out_unlock:
743 mutex_unlock(&ashmem_mutex); 743 mutex_unlock(&ashmem_mutex);
744 744
745 return ret; 745 return ret;
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index ca15a87f6fd3..13a9b4c42b26 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
38 struct page **tmp = pages; 38 struct page **tmp = pages;
39 39
40 if (!pages) 40 if (!pages)
41 return NULL; 41 return ERR_PTR(-ENOMEM);
42 42
43 if (buffer->flags & ION_FLAG_CACHED) 43 if (buffer->flags & ION_FLAG_CACHED)
44 pgprot = PAGE_KERNEL; 44 pgprot = PAGE_KERNEL;
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index d4c3e5512dd5..b69dfc706440 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -27,7 +27,7 @@
27#include "ion_priv.h" 27#include "ion_priv.h"
28 28
29static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | 29static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
30 __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM; 30 __GFP_NORETRY) & ~__GFP_RECLAIM;
31static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN); 31static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
32static const unsigned int orders[] = {8, 4, 0}; 32static const unsigned int orders[] = {8, 4, 0};
33static const int num_orders = ARRAY_SIZE(orders); 33static const int num_orders = ARRAY_SIZE(orders);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index b63dd2ef78b5..1f398d06f4ee 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -484,8 +484,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
484 struct comedi_cmd *cmd = &async->cmd; 484 struct comedi_cmd *cmd = &async->cmd;
485 485
486 if (cmd->stop_src == TRIG_COUNT) { 486 if (cmd->stop_src == TRIG_COUNT) {
487 unsigned int nscans = nsamples / cmd->scan_end_arg; 487 unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg);
488 unsigned int scans_left = __comedi_nscans_left(s, nscans);
489 unsigned int scan_pos = 488 unsigned int scan_pos =
490 comedi_bytes_to_samples(s, async->scan_progress); 489 comedi_bytes_to_samples(s, async->scan_progress);
491 unsigned long long samples_left = 0; 490 unsigned long long samples_left = 0;
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index c975f6e8be49..8f181caffca3 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1348,6 +1348,8 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status)
1348 ack |= NISTC_INTA_ACK_AI_START; 1348 ack |= NISTC_INTA_ACK_AI_START;
1349 if (a_status & NISTC_AI_STATUS1_STOP) 1349 if (a_status & NISTC_AI_STATUS1_STOP)
1350 ack |= NISTC_INTA_ACK_AI_STOP; 1350 ack |= NISTC_INTA_ACK_AI_STOP;
1351 if (a_status & NISTC_AI_STATUS1_OVER)
1352 ack |= NISTC_INTA_ACK_AI_ERR;
1351 if (ack) 1353 if (ack)
1352 ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG); 1354 ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG);
1353} 1355}
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index e9e43139157d..769a94015117 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
642 /* Make sure D/A update mode is direct update */ 642 /* Make sure D/A update mode is direct update */
643 outb(0, dev->iobase + DAQP_AUX_REG); 643 outb(0, dev->iobase + DAQP_AUX_REG);
644 644
645 for (i = 0; i > insn->n; i++) { 645 for (i = 0; i < insn->n; i++) {
646 unsigned val = data[i]; 646 unsigned val = data[i];
647 int ret; 647 int ret;
648 648
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index abc66908681d..6f032009f93f 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -124,6 +124,8 @@
124#define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */ 124#define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */
125#define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */ 125#define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */
126 126
127#define AD7192_EXT_FREQ_MHZ_MIN 2457600
128#define AD7192_EXT_FREQ_MHZ_MAX 5120000
127#define AD7192_INT_FREQ_MHZ 4915200 129#define AD7192_INT_FREQ_MHZ 4915200
128 130
129/* NOTE: 131/* NOTE:
@@ -199,6 +201,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st)
199 ARRAY_SIZE(ad7192_calib_arr)); 201 ARRAY_SIZE(ad7192_calib_arr));
200} 202}
201 203
204static inline bool ad7192_valid_external_frequency(u32 freq)
205{
206 return (freq >= AD7192_EXT_FREQ_MHZ_MIN &&
207 freq <= AD7192_EXT_FREQ_MHZ_MAX);
208}
209
202static int ad7192_setup(struct ad7192_state *st, 210static int ad7192_setup(struct ad7192_state *st,
203 const struct ad7192_platform_data *pdata) 211 const struct ad7192_platform_data *pdata)
204{ 212{
@@ -224,17 +232,20 @@ static int ad7192_setup(struct ad7192_state *st,
224 id); 232 id);
225 233
226 switch (pdata->clock_source_sel) { 234 switch (pdata->clock_source_sel) {
227 case AD7192_CLK_EXT_MCLK1_2:
228 case AD7192_CLK_EXT_MCLK2:
229 st->mclk = AD7192_INT_FREQ_MHZ;
230 break;
231 case AD7192_CLK_INT: 235 case AD7192_CLK_INT:
232 case AD7192_CLK_INT_CO: 236 case AD7192_CLK_INT_CO:
233 if (pdata->ext_clk_hz) 237 st->mclk = AD7192_INT_FREQ_MHZ;
234 st->mclk = pdata->ext_clk_hz;
235 else
236 st->mclk = AD7192_INT_FREQ_MHZ;
237 break; 238 break;
239 case AD7192_CLK_EXT_MCLK1_2:
240 case AD7192_CLK_EXT_MCLK2:
241 if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) {
242 st->mclk = pdata->ext_clk_hz;
243 break;
244 }
245 dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n",
246 pdata->ext_clk_hz);
247 ret = -EINVAL;
248 goto out;
238 default: 249 default:
239 ret = -EINVAL; 250 ret = -EINVAL;
240 goto out; 251 goto out;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index 39f5261c9854..5cf5b7334089 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -824,7 +824,7 @@ void sptlrpc_request_out_callback(struct ptlrpc_request *req)
824 if (req->rq_pool || !req->rq_reqbuf) 824 if (req->rq_pool || !req->rq_reqbuf)
825 return; 825 return;
826 826
827 kfree(req->rq_reqbuf); 827 kvfree(req->rq_reqbuf);
828 req->rq_reqbuf = NULL; 828 req->rq_reqbuf = NULL;
829 req->rq_reqbuf_len = 0; 829 req->rq_reqbuf_len = 0;
830} 830}
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index a076ede50b22..ec90f2781085 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -1399,19 +1399,13 @@ static int rtw_wx_get_essid(struct net_device *dev,
1399 if ((check_fwstate(pmlmepriv, _FW_LINKED)) || 1399 if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
1400 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) { 1400 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
1401 len = pcur_bss->Ssid.SsidLength; 1401 len = pcur_bss->Ssid.SsidLength;
1402
1403 wrqu->essid.length = len;
1404
1405 memcpy(extra, pcur_bss->Ssid.Ssid, len); 1402 memcpy(extra, pcur_bss->Ssid.Ssid, len);
1406
1407 wrqu->essid.flags = 1;
1408 } else { 1403 } else {
1409 ret = -1; 1404 len = 0;
1410 goto exit; 1405 *extra = 0;
1411 } 1406 }
1412 1407 wrqu->essid.length = len;
1413exit: 1408 wrqu->essid.flags = 1;
1414
1415 1409
1416 return ret; 1410 return ret;
1417} 1411}
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index e06864f64beb..0f6bc6b8e4c6 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1749,6 +1749,8 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
1749 1749
1750 priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL); 1750 priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL);
1751 priv->oldaddr = kmalloc(16, GFP_KERNEL); 1751 priv->oldaddr = kmalloc(16, GFP_KERNEL);
1752 if (!priv->oldaddr)
1753 return -ENOMEM;
1752 oldaddr = priv->oldaddr; 1754 oldaddr = priv->oldaddr;
1753 align = ((long)oldaddr) & 3; 1755 align = ((long)oldaddr) & 3;
1754 if (align) { 1756 if (align) {
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index fdfeb42b2b8f..06ef26872462 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -831,7 +831,9 @@ static ssize_t message_show(struct kobject *kobj,
831 struct msg_group_t *group = spk_find_msg_group(attr->attr.name); 831 struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
832 unsigned long flags; 832 unsigned long flags;
833 833
834 BUG_ON(!group); 834 if (WARN_ON(!group))
835 return -EINVAL;
836
835 spin_lock_irqsave(&speakup_info.spinlock, flags); 837 spin_lock_irqsave(&speakup_info.spinlock, flags);
836 retval = message_show_helper(buf, group->start, group->end); 838 retval = message_show_helper(buf, group->start, group->end);
837 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 839 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
@@ -843,7 +845,9 @@ static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr,
843{ 845{
844 struct msg_group_t *group = spk_find_msg_group(attr->attr.name); 846 struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
845 847
846 BUG_ON(!group); 848 if (WARN_ON(!group))
849 return -EINVAL;
850
847 return message_store_helper(buf, count, group); 851 return message_store_helper(buf, count, group);
848} 852}
849 853
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index 824d460911ec..58ccafb97344 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -1039,7 +1039,6 @@ static int synaptics_rmi4_remove(struct i2c_client *client)
1039 return 0; 1039 return 0;
1040} 1040}
1041 1041
1042#ifdef CONFIG_PM
1043/** 1042/**
1044 * synaptics_rmi4_suspend() - suspend the touch screen controller 1043 * synaptics_rmi4_suspend() - suspend the touch screen controller
1045 * @dev: pointer to device structure 1044 * @dev: pointer to device structure
@@ -1047,7 +1046,7 @@ static int synaptics_rmi4_remove(struct i2c_client *client)
1047 * This function is used to suspend the 1046 * This function is used to suspend the
1048 * touch panel controller and returns integer 1047 * touch panel controller and returns integer
1049 */ 1048 */
1050static int synaptics_rmi4_suspend(struct device *dev) 1049static int __maybe_unused synaptics_rmi4_suspend(struct device *dev)
1051{ 1050{
1052 /* Touch sleep mode */ 1051 /* Touch sleep mode */
1053 int retval; 1052 int retval;
@@ -1081,7 +1080,7 @@ static int synaptics_rmi4_suspend(struct device *dev)
1081 * This function is used to resume the touch panel 1080 * This function is used to resume the touch panel
1082 * controller and returns integer. 1081 * controller and returns integer.
1083 */ 1082 */
1084static int synaptics_rmi4_resume(struct device *dev) 1083static int __maybe_unused synaptics_rmi4_resume(struct device *dev)
1085{ 1084{
1086 int retval; 1085 int retval;
1087 unsigned char intr_status; 1086 unsigned char intr_status;
@@ -1112,8 +1111,6 @@ static int synaptics_rmi4_resume(struct device *dev)
1112 return 0; 1111 return 0;
1113} 1112}
1114 1113
1115#endif
1116
1117static SIMPLE_DEV_PM_OPS(synaptics_rmi4_dev_pm_ops, synaptics_rmi4_suspend, 1114static SIMPLE_DEV_PM_OPS(synaptics_rmi4_dev_pm_ops, synaptics_rmi4_suspend,
1118 synaptics_rmi4_resume); 1115 synaptics_rmi4_resume);
1119 1116
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index c119f20dfd44..3f2ccf9d7358 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -792,7 +792,7 @@ static void
792do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) 792do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
793{ 793{
794 struct scsi_device *scsidev; 794 struct scsi_device *scsidev;
795 unsigned char buf[36]; 795 unsigned char *buf;
796 struct scatterlist *sg; 796 struct scatterlist *sg;
797 unsigned int i; 797 unsigned int i;
798 char *this_page; 798 char *this_page;
@@ -807,6 +807,10 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
807 if (cmdrsp->scsi.no_disk_result == 0) 807 if (cmdrsp->scsi.no_disk_result == 0)
808 return; 808 return;
809 809
810 buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
811 if (!buf)
812 return;
813
810 /* Linux scsi code wants a device at Lun 0 814 /* Linux scsi code wants a device at Lun 0
811 * to issue report luns, but we don't want 815 * to issue report luns, but we don't want
812 * a disk there so we'll present a processor 816 * a disk there so we'll present a processor
@@ -820,6 +824,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
820 if (scsi_sg_count(scsicmd) == 0) { 824 if (scsi_sg_count(scsicmd) == 0) {
821 memcpy(scsi_sglist(scsicmd), buf, 825 memcpy(scsi_sglist(scsicmd), buf,
822 cmdrsp->scsi.bufflen); 826 cmdrsp->scsi.bufflen);
827 kfree(buf);
823 return; 828 return;
824 } 829 }
825 830
@@ -831,6 +836,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
831 memcpy(this_page, buf + bufind, sg[i].length); 836 memcpy(this_page, buf + bufind, sg[i].length);
832 kunmap_atomic(this_page_orig); 837 kunmap_atomic(this_page_orig);
833 } 838 }
839 kfree(buf);
834 } else { 840 } else {
835 devdata = (struct visorhba_devdata *)scsidev->host->hostdata; 841 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
836 for_each_vdisk_match(vdisk, devdata, scsidev) { 842 for_each_vdisk_match(vdisk, devdata, scsidev) {
diff --git a/drivers/staging/unisys/visorinput/Kconfig b/drivers/staging/unisys/visorinput/Kconfig
index d83deb4137e8..6baba2795ce7 100644
--- a/drivers/staging/unisys/visorinput/Kconfig
+++ b/drivers/staging/unisys/visorinput/Kconfig
@@ -4,7 +4,7 @@
4 4
5config UNISYS_VISORINPUT 5config UNISYS_VISORINPUT
6 tristate "Unisys visorinput driver" 6 tristate "Unisys visorinput driver"
7 depends on UNISYSSPAR && UNISYS_VISORBUS && FB 7 depends on UNISYSSPAR && UNISYS_VISORBUS && FB && INPUT
8 ---help--- 8 ---help---
9 If you say Y here, you will enable the Unisys visorinput driver. 9 If you say Y here, you will enable the Unisys visorinput driver.
10 10
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index dbbe72c7e255..f78353ddeea5 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -2179,6 +2179,8 @@ static s32 Handle_Get_InActiveTime(struct host_if_drv *hif_drv,
2179 wid.type = WID_STR; 2179 wid.type = WID_STR;
2180 wid.size = ETH_ALEN; 2180 wid.size = ETH_ALEN;
2181 wid.val = kmalloc(wid.size, GFP_KERNEL); 2181 wid.val = kmalloc(wid.size, GFP_KERNEL);
2182 if (!wid.val)
2183 return -ENOMEM;
2182 2184
2183 stamac = wid.val; 2185 stamac = wid.val;
2184 memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN); 2186 memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index 450af1b77f99..b2092c5ec7f3 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -251,6 +251,8 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
251 251
252 if (skb->data[0] == 0xc0 && (!(memcmp(broadcast, &skb->data[4], 6)))) { 252 if (skb->data[0] == 0xc0 && (!(memcmp(broadcast, &skb->data[4], 6)))) {
253 skb2 = dev_alloc_skb(skb->len + sizeof(struct wilc_wfi_radiotap_cb_hdr)); 253 skb2 = dev_alloc_skb(skb->len + sizeof(struct wilc_wfi_radiotap_cb_hdr));
254 if (!skb2)
255 return -ENOMEM;
254 256
255 memcpy(skb_put(skb2, skb->len), skb->data, skb->len); 257 memcpy(skb_put(skb2, skb->len), skb->data, skb->len);
256 258
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index be972afe6e62..bfc3e96d8d25 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -12,6 +12,7 @@
12 12
13#include <linux/semaphore.h> 13#include <linux/semaphore.h>
14#include "linux_wlan_common.h" 14#include "linux_wlan_common.h"
15#include <linux/netdevice.h>
15 16
16/******************************************** 17/********************************************
17 * 18 *
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 013a6240f193..c1ad0aea23b9 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -169,7 +169,7 @@ int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp)
169 hw->ident_sta_fw.variant) > 169 hw->ident_sta_fw.variant) >
170 HFA384x_FIRMWARE_VERSION(1, 5, 0)) { 170 HFA384x_FIRMWARE_VERSION(1, 5, 0)) {
171 if (msg->scantype.data != P80211ENUM_scantype_active) 171 if (msg->scantype.data != P80211ENUM_scantype_active)
172 word = cpu_to_le16(msg->maxchanneltime.data); 172 word = msg->maxchanneltime.data;
173 else 173 else
174 word = 0; 174 word = 0;
175 175
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 2e35db7f4aac..c15af2fcf2ba 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -276,12 +276,11 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
276 else 276 else
277 ret = vfs_iter_read(fd, &iter, &pos); 277 ret = vfs_iter_read(fd, &iter, &pos);
278 278
279 kfree(bvec);
280
281 if (is_write) { 279 if (is_write) {
282 if (ret < 0 || ret != data_length) { 280 if (ret < 0 || ret != data_length) {
283 pr_err("%s() write returned %d\n", __func__, ret); 281 pr_err("%s() write returned %d\n", __func__, ret);
284 return (ret < 0 ? ret : -EINVAL); 282 if (ret >= 0)
283 ret = -EINVAL;
285 } 284 }
286 } else { 285 } else {
287 /* 286 /*
@@ -294,17 +293,29 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
294 pr_err("%s() returned %d, expecting %u for " 293 pr_err("%s() returned %d, expecting %u for "
295 "S_ISBLK\n", __func__, ret, 294 "S_ISBLK\n", __func__, ret,
296 data_length); 295 data_length);
297 return (ret < 0 ? ret : -EINVAL); 296 if (ret >= 0)
297 ret = -EINVAL;
298 } 298 }
299 } else { 299 } else {
300 if (ret < 0) { 300 if (ret < 0) {
301 pr_err("%s() returned %d for non S_ISBLK\n", 301 pr_err("%s() returned %d for non S_ISBLK\n",
302 __func__, ret); 302 __func__, ret);
303 return ret; 303 } else if (ret != data_length) {
304 /*
305 * Short read case:
306 * Probably some one truncate file under us.
307 * We must explicitly zero sg-pages to prevent
308 * expose uninizialized pages to userspace.
309 */
310 if (ret < data_length)
311 ret += iov_iter_zero(data_length - ret, &iter);
312 else
313 ret = -EINVAL;
304 } 314 }
305 } 315 }
306 } 316 }
307 return 1; 317 kfree(bvec);
318 return ret;
308} 319}
309 320
310static sense_reason_t 321static sense_reason_t
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index a7d30e894cab..c43c942e1f87 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -900,7 +900,7 @@ static int tcmu_configure_device(struct se_device *dev)
900 info->version = __stringify(TCMU_MAILBOX_VERSION); 900 info->version = __stringify(TCMU_MAILBOX_VERSION);
901 901
902 info->mem[0].name = "tcm-user command & data buffer"; 902 info->mem[0].name = "tcm-user command & data buffer";
903 info->mem[0].addr = (phys_addr_t) udev->mb_addr; 903 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
904 info->mem[0].size = TCMU_RING_SIZE; 904 info->mem[0].size = TCMU_RING_SIZE;
905 info->mem[0].memtype = UIO_MEM_VIRTUAL; 905 info->mem[0].memtype = UIO_MEM_VIRTUAL;
906 906
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 8cc4ac64a91c..4b660b5beb98 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -299,7 +299,7 @@ config X86_PKG_TEMP_THERMAL
299 299
300config INTEL_SOC_DTS_IOSF_CORE 300config INTEL_SOC_DTS_IOSF_CORE
301 tristate 301 tristate
302 depends on X86 302 depends on X86 && PCI
303 select IOSF_MBI 303 select IOSF_MBI
304 help 304 help
305 This is becoming a common feature for Intel SoCs to expose the additional 305 This is becoming a common feature for Intel SoCs to expose the additional
@@ -309,7 +309,7 @@ config INTEL_SOC_DTS_IOSF_CORE
309 309
310config INTEL_SOC_DTS_THERMAL 310config INTEL_SOC_DTS_THERMAL
311 tristate "Intel SoCs DTS thermal driver" 311 tristate "Intel SoCs DTS thermal driver"
312 depends on X86 312 depends on X86 && PCI
313 select INTEL_SOC_DTS_IOSF_CORE 313 select INTEL_SOC_DTS_IOSF_CORE
314 select THERMAL_WRITABLE_TRIPS 314 select THERMAL_WRITABLE_TRIPS
315 help 315 help
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index c5547bd711db..6a8300108148 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -589,6 +589,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
589 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); 589 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
590 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); 590 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
591 591
592 data->irq_enabled = true;
593 data->mode = THERMAL_DEVICE_ENABLED;
594
592 ret = devm_request_threaded_irq(&pdev->dev, data->irq, 595 ret = devm_request_threaded_irq(&pdev->dev, data->irq,
593 imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread, 596 imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
594 0, "imx_thermal", data); 597 0, "imx_thermal", data);
@@ -600,9 +603,6 @@ static int imx_thermal_probe(struct platform_device *pdev)
600 return ret; 603 return ret;
601 } 604 }
602 605
603 data->irq_enabled = true;
604 data->mode = THERMAL_DEVICE_ENABLED;
605
606 return 0; 606 return 0;
607} 607}
608 608
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 1246aa6fcab0..737635f0bec0 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -523,6 +523,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
523 struct thermal_instance *instance; 523 struct thermal_instance *instance;
524 struct power_allocator_params *params = tz->governor_data; 524 struct power_allocator_params *params = tz->governor_data;
525 525
526 mutex_lock(&tz->lock);
526 list_for_each_entry(instance, &tz->thermal_instances, tz_node) { 527 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
527 if ((instance->trip != params->trip_max_desired_temperature) || 528 if ((instance->trip != params->trip_max_desired_temperature) ||
528 (!cdev_is_power_actor(instance->cdev))) 529 (!cdev_is_power_actor(instance->cdev)))
@@ -532,6 +533,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
532 instance->cdev->updated = false; 533 instance->cdev->updated = false;
533 thermal_cdev_update(instance->cdev); 534 thermal_cdev_update(instance->cdev);
534 } 535 }
536 mutex_unlock(&tz->lock);
535} 537}
536 538
537/** 539/**
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index fa61eff88496..16d45a25284f 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -585,6 +585,7 @@ static int exynos5433_tmu_initialize(struct platform_device *pdev)
585 threshold_code = temp_to_code(data, temp); 585 threshold_code = temp_to_code(data, temp);
586 586
587 rising_threshold = readl(data->base + rising_reg_offset); 587 rising_threshold = readl(data->base + rising_reg_offset);
588 rising_threshold &= ~(0xff << j * 8);
588 rising_threshold |= (threshold_code << j * 8); 589 rising_threshold |= (threshold_code << j * 8);
589 writel(rising_threshold, data->base + rising_reg_offset); 590 writel(rising_threshold, data->base + rising_reg_offset);
590 591
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 534dd9136662..81b35aace9de 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -54,8 +54,7 @@ static struct thermal_zone_device_ops ops = {
54 .get_temp = thermal_get_temp, 54 .get_temp = thermal_get_temp,
55}; 55};
56 56
57#ifdef CONFIG_PM 57static int __maybe_unused spear_thermal_suspend(struct device *dev)
58static int spear_thermal_suspend(struct device *dev)
59{ 58{
60 struct platform_device *pdev = to_platform_device(dev); 59 struct platform_device *pdev = to_platform_device(dev);
61 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); 60 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -72,7 +71,7 @@ static int spear_thermal_suspend(struct device *dev)
72 return 0; 71 return 0;
73} 72}
74 73
75static int spear_thermal_resume(struct device *dev) 74static int __maybe_unused spear_thermal_resume(struct device *dev)
76{ 75{
77 struct platform_device *pdev = to_platform_device(dev); 76 struct platform_device *pdev = to_platform_device(dev);
78 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); 77 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -94,7 +93,6 @@ static int spear_thermal_resume(struct device *dev)
94 93
95 return 0; 94 return 0;
96} 95}
97#endif
98 96
99static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend, 97static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
100 spear_thermal_resume); 98 spear_thermal_resume);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 20a41f7de76f..6713fd1958e7 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -627,6 +627,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
627 * we just disable hotplug, the 627 * we just disable hotplug, the
628 * pci-tunnels stay alive. 628 * pci-tunnels stay alive.
629 */ 629 */
630 .thaw_noirq = nhi_resume_noirq,
630 .restore_noirq = nhi_resume_noirq, 631 .restore_noirq = nhi_resume_noirq,
631}; 632};
632 633
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index c01f45095877..82c4d2e45319 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -226,7 +226,7 @@ config CYCLADES
226 226
227config CYZ_INTR 227config CYZ_INTR
228 bool "Cyclades-Z interrupt mode operation" 228 bool "Cyclades-Z interrupt mode operation"
229 depends on CYCLADES 229 depends on CYCLADES && PCI
230 help 230 help
231 The Cyclades-Z family of multiport cards allows 2 (two) driver op 231 The Cyclades-Z family of multiport cards allows 2 (two) driver op
232 modes: polling and interrupt. In polling mode, the driver will check 232 modes: polling and interrupt. In polling mode, the driver will check
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 47b54c6aefd2..9f660e55d1ba 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -323,7 +323,6 @@ static void udbg_init_opal_common(void)
323 udbg_putc = udbg_opal_putc; 323 udbg_putc = udbg_opal_putc;
324 udbg_getc = udbg_opal_getc; 324 udbg_getc = udbg_opal_getc;
325 udbg_getc_poll = udbg_opal_getc_poll; 325 udbg_getc_poll = udbg_opal_getc_poll;
326 tb_ticks_per_usec = 0x200; /* Make udelay not suck */
327} 326}
328 327
329void __init hvc_opal_init_early(void) 328void __init hvc_opal_init_early(void)
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index fa816b7193b6..11725422dacb 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -323,6 +323,7 @@ void xen_console_resume(void)
323 } 323 }
324} 324}
325 325
326#ifdef CONFIG_HVC_XEN_FRONTEND
326static void xencons_disconnect_backend(struct xencons_info *info) 327static void xencons_disconnect_backend(struct xencons_info *info)
327{ 328{
328 if (info->irq > 0) 329 if (info->irq > 0)
@@ -363,7 +364,6 @@ static int xen_console_remove(struct xencons_info *info)
363 return 0; 364 return 0;
364} 365}
365 366
366#ifdef CONFIG_HVC_XEN_FRONTEND
367static int xencons_remove(struct xenbus_device *dev) 367static int xencons_remove(struct xenbus_device *dev)
368{ 368{
369 return xen_console_remove(dev_get_drvdata(&dev->dev)); 369 return xen_console_remove(dev_get_drvdata(&dev->dev));
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 9aff37186246..6060c3e8925e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -137,6 +137,9 @@ struct gsm_dlci {
137 struct mutex mutex; 137 struct mutex mutex;
138 138
139 /* Link layer */ 139 /* Link layer */
140 int mode;
141#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */
142#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */
140 spinlock_t lock; /* Protects the internal state */ 143 spinlock_t lock; /* Protects the internal state */
141 struct timer_list t1; /* Retransmit timer for SABM and UA */ 144 struct timer_list t1; /* Retransmit timer for SABM and UA */
142 int retries; 145 int retries;
@@ -1380,7 +1383,13 @@ retry:
1380 ctrl->data = data; 1383 ctrl->data = data;
1381 ctrl->len = clen; 1384 ctrl->len = clen;
1382 gsm->pending_cmd = ctrl; 1385 gsm->pending_cmd = ctrl;
1383 gsm->cretries = gsm->n2; 1386
1387 /* If DLCI0 is in ADM mode skip retries, it won't respond */
1388 if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
1389 gsm->cretries = 1;
1390 else
1391 gsm->cretries = gsm->n2;
1392
1384 mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); 1393 mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
1385 gsm_control_transmit(gsm, ctrl); 1394 gsm_control_transmit(gsm, ctrl);
1386 spin_unlock_irqrestore(&gsm->control_lock, flags); 1395 spin_unlock_irqrestore(&gsm->control_lock, flags);
@@ -1467,6 +1476,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
1467 * in which case an opening port goes back to closed and a closing port 1476 * in which case an opening port goes back to closed and a closing port
1468 * is simply put into closed state (any further frames from the other 1477 * is simply put into closed state (any further frames from the other
1469 * end will get a DM response) 1478 * end will get a DM response)
1479 *
1480 * Some control dlci can stay in ADM mode with other dlci working just
1481 * fine. In that case we can just keep the control dlci open after the
1482 * DLCI_OPENING retries time out.
1470 */ 1483 */
1471 1484
1472static void gsm_dlci_t1(unsigned long data) 1485static void gsm_dlci_t1(unsigned long data)
@@ -1480,8 +1493,16 @@ static void gsm_dlci_t1(unsigned long data)
1480 if (dlci->retries) { 1493 if (dlci->retries) {
1481 gsm_command(dlci->gsm, dlci->addr, SABM|PF); 1494 gsm_command(dlci->gsm, dlci->addr, SABM|PF);
1482 mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); 1495 mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
1483 } else 1496 } else if (!dlci->addr && gsm->control == (DM | PF)) {
1497 if (debug & 8)
1498 pr_info("DLCI %d opening in ADM mode.\n",
1499 dlci->addr);
1500 dlci->mode = DLCI_MODE_ADM;
1501 gsm_dlci_open(dlci);
1502 } else {
1484 gsm_dlci_close(dlci); 1503 gsm_dlci_close(dlci);
1504 }
1505
1485 break; 1506 break;
1486 case DLCI_CLOSING: 1507 case DLCI_CLOSING:
1487 dlci->retries--; 1508 dlci->retries--;
@@ -1499,8 +1520,8 @@ static void gsm_dlci_t1(unsigned long data)
1499 * @dlci: DLCI to open 1520 * @dlci: DLCI to open
1500 * 1521 *
1501 * Commence opening a DLCI from the Linux side. We issue SABM messages 1522 * Commence opening a DLCI from the Linux side. We issue SABM messages
1502 * to the modem which should then reply with a UA, at which point we 1523 * to the modem which should then reply with a UA or ADM, at which point
1503 * will move into open state. Opening is done asynchronously with retry 1524 * we will move into open state. Opening is done asynchronously with retry
1504 * running off timers and the responses. 1525 * running off timers and the responses.
1505 */ 1526 */
1506 1527
@@ -2870,11 +2891,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
2870static int gsm_carrier_raised(struct tty_port *port) 2891static int gsm_carrier_raised(struct tty_port *port)
2871{ 2892{
2872 struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); 2893 struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
2894 struct gsm_mux *gsm = dlci->gsm;
2895
2873 /* Not yet open so no carrier info */ 2896 /* Not yet open so no carrier info */
2874 if (dlci->state != DLCI_OPEN) 2897 if (dlci->state != DLCI_OPEN)
2875 return 0; 2898 return 0;
2876 if (debug & 2) 2899 if (debug & 2)
2877 return 1; 2900 return 1;
2901
2902 /*
2903 * Basic mode with control channel in ADM mode may not respond
2904 * to CMD_MSC at all and modem_rx is empty.
2905 */
2906 if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
2907 !dlci->modem_rx)
2908 return 1;
2909
2878 return dlci->modem_rx & TIOCM_CD; 2910 return dlci->modem_rx & TIOCM_CD;
2879} 2911}
2880 2912
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 41dda25da049..b1ec202099b2 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -128,6 +128,8 @@ struct n_tty_data {
128 struct mutex output_lock; 128 struct mutex output_lock;
129}; 129};
130 130
131#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
132
131static inline size_t read_cnt(struct n_tty_data *ldata) 133static inline size_t read_cnt(struct n_tty_data *ldata)
132{ 134{
133 return ldata->read_head - ldata->read_tail; 135 return ldata->read_head - ldata->read_tail;
@@ -145,6 +147,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
145 147
146static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i) 148static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
147{ 149{
150 smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
148 return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; 151 return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
149} 152}
150 153
@@ -322,9 +325,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
322static void reset_buffer_flags(struct n_tty_data *ldata) 325static void reset_buffer_flags(struct n_tty_data *ldata)
323{ 326{
324 ldata->read_head = ldata->canon_head = ldata->read_tail = 0; 327 ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
325 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
326 ldata->commit_head = 0; 328 ldata->commit_head = 0;
327 ldata->echo_mark = 0;
328 ldata->line_start = 0; 329 ldata->line_start = 0;
329 330
330 ldata->erasing = 0; 331 ldata->erasing = 0;
@@ -645,13 +646,20 @@ static size_t __process_echoes(struct tty_struct *tty)
645 old_space = space = tty_write_room(tty); 646 old_space = space = tty_write_room(tty);
646 647
647 tail = ldata->echo_tail; 648 tail = ldata->echo_tail;
648 while (ldata->echo_commit != tail) { 649 while (MASK(ldata->echo_commit) != MASK(tail)) {
649 c = echo_buf(ldata, tail); 650 c = echo_buf(ldata, tail);
650 if (c == ECHO_OP_START) { 651 if (c == ECHO_OP_START) {
651 unsigned char op; 652 unsigned char op;
652 int no_space_left = 0; 653 int no_space_left = 0;
653 654
654 /* 655 /*
656 * Since add_echo_byte() is called without holding
657 * output_lock, we might see only portion of multi-byte
658 * operation.
659 */
660 if (MASK(ldata->echo_commit) == MASK(tail + 1))
661 goto not_yet_stored;
662 /*
655 * If the buffer byte is the start of a multi-byte 663 * If the buffer byte is the start of a multi-byte
656 * operation, get the next byte, which is either the 664 * operation, get the next byte, which is either the
657 * op code or a control character value. 665 * op code or a control character value.
@@ -662,6 +670,8 @@ static size_t __process_echoes(struct tty_struct *tty)
662 unsigned int num_chars, num_bs; 670 unsigned int num_chars, num_bs;
663 671
664 case ECHO_OP_ERASE_TAB: 672 case ECHO_OP_ERASE_TAB:
673 if (MASK(ldata->echo_commit) == MASK(tail + 2))
674 goto not_yet_stored;
665 num_chars = echo_buf(ldata, tail + 2); 675 num_chars = echo_buf(ldata, tail + 2);
666 676
667 /* 677 /*
@@ -756,7 +766,8 @@ static size_t __process_echoes(struct tty_struct *tty)
756 /* If the echo buffer is nearly full (so that the possibility exists 766 /* If the echo buffer is nearly full (so that the possibility exists
757 * of echo overrun before the next commit), then discard enough 767 * of echo overrun before the next commit), then discard enough
758 * data at the tail to prevent a subsequent overrun */ 768 * data at the tail to prevent a subsequent overrun */
759 while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { 769 while (ldata->echo_commit > tail &&
770 ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
760 if (echo_buf(ldata, tail) == ECHO_OP_START) { 771 if (echo_buf(ldata, tail) == ECHO_OP_START) {
761 if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB) 772 if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
762 tail += 3; 773 tail += 3;
@@ -766,6 +777,7 @@ static size_t __process_echoes(struct tty_struct *tty)
766 tail++; 777 tail++;
767 } 778 }
768 779
780 not_yet_stored:
769 ldata->echo_tail = tail; 781 ldata->echo_tail = tail;
770 return old_space - space; 782 return old_space - space;
771} 783}
@@ -776,6 +788,7 @@ static void commit_echoes(struct tty_struct *tty)
776 size_t nr, old, echoed; 788 size_t nr, old, echoed;
777 size_t head; 789 size_t head;
778 790
791 mutex_lock(&ldata->output_lock);
779 head = ldata->echo_head; 792 head = ldata->echo_head;
780 ldata->echo_mark = head; 793 ldata->echo_mark = head;
781 old = ldata->echo_commit - ldata->echo_tail; 794 old = ldata->echo_commit - ldata->echo_tail;
@@ -784,10 +797,12 @@ static void commit_echoes(struct tty_struct *tty)
784 * is over the threshold (and try again each time another 797 * is over the threshold (and try again each time another
785 * block is accumulated) */ 798 * block is accumulated) */
786 nr = head - ldata->echo_tail; 799 nr = head - ldata->echo_tail;
787 if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK)) 800 if (nr < ECHO_COMMIT_WATERMARK ||
801 (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
802 mutex_unlock(&ldata->output_lock);
788 return; 803 return;
804 }
789 805
790 mutex_lock(&ldata->output_lock);
791 ldata->echo_commit = head; 806 ldata->echo_commit = head;
792 echoed = __process_echoes(tty); 807 echoed = __process_echoes(tty);
793 mutex_unlock(&ldata->output_lock); 808 mutex_unlock(&ldata->output_lock);
@@ -838,7 +853,9 @@ static void flush_echoes(struct tty_struct *tty)
838 853
839static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata) 854static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
840{ 855{
841 *echo_buf_addr(ldata, ldata->echo_head++) = c; 856 *echo_buf_addr(ldata, ldata->echo_head) = c;
857 smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
858 ldata->echo_head++;
842} 859}
843 860
844/** 861/**
@@ -1006,14 +1023,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1006 } 1023 }
1007 1024
1008 seen_alnums = 0; 1025 seen_alnums = 0;
1009 while (ldata->read_head != ldata->canon_head) { 1026 while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
1010 head = ldata->read_head; 1027 head = ldata->read_head;
1011 1028
1012 /* erase a single possibly multibyte character */ 1029 /* erase a single possibly multibyte character */
1013 do { 1030 do {
1014 head--; 1031 head--;
1015 c = read_buf(ldata, head); 1032 c = read_buf(ldata, head);
1016 } while (is_continuation(c, tty) && head != ldata->canon_head); 1033 } while (is_continuation(c, tty) &&
1034 MASK(head) != MASK(ldata->canon_head));
1017 1035
1018 /* do not partially erase */ 1036 /* do not partially erase */
1019 if (is_continuation(c, tty)) 1037 if (is_continuation(c, tty))
@@ -1055,7 +1073,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1055 * This info is used to go back the correct 1073 * This info is used to go back the correct
1056 * number of columns. 1074 * number of columns.
1057 */ 1075 */
1058 while (tail != ldata->canon_head) { 1076 while (MASK(tail) != MASK(ldata->canon_head)) {
1059 tail--; 1077 tail--;
1060 c = read_buf(ldata, tail); 1078 c = read_buf(ldata, tail);
1061 if (c == '\t') { 1079 if (c == '\t') {
@@ -1332,7 +1350,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
1332 finish_erasing(ldata); 1350 finish_erasing(ldata);
1333 echo_char(c, tty); 1351 echo_char(c, tty);
1334 echo_char_raw('\n', ldata); 1352 echo_char_raw('\n', ldata);
1335 while (tail != ldata->read_head) { 1353 while (MASK(tail) != MASK(ldata->read_head)) {
1336 echo_char(read_buf(ldata, tail), tty); 1354 echo_char(read_buf(ldata, tail), tty);
1337 tail++; 1355 tail++;
1338 } 1356 }
@@ -1917,31 +1935,22 @@ static int n_tty_open(struct tty_struct *tty)
1917 struct n_tty_data *ldata; 1935 struct n_tty_data *ldata;
1918 1936
1919 /* Currently a malloc failure here can panic */ 1937 /* Currently a malloc failure here can panic */
1920 ldata = vmalloc(sizeof(*ldata)); 1938 ldata = vzalloc(sizeof(*ldata));
1921 if (!ldata) 1939 if (!ldata)
1922 goto err; 1940 return -ENOMEM;
1923 1941
1924 ldata->overrun_time = jiffies; 1942 ldata->overrun_time = jiffies;
1925 mutex_init(&ldata->atomic_read_lock); 1943 mutex_init(&ldata->atomic_read_lock);
1926 mutex_init(&ldata->output_lock); 1944 mutex_init(&ldata->output_lock);
1927 1945
1928 tty->disc_data = ldata; 1946 tty->disc_data = ldata;
1929 reset_buffer_flags(tty->disc_data);
1930 ldata->column = 0;
1931 ldata->canon_column = 0;
1932 ldata->minimum_to_wake = 1; 1947 ldata->minimum_to_wake = 1;
1933 ldata->num_overrun = 0;
1934 ldata->no_room = 0;
1935 ldata->lnext = 0;
1936 tty->closing = 0; 1948 tty->closing = 0;
1937 /* indicate buffer work may resume */ 1949 /* indicate buffer work may resume */
1938 clear_bit(TTY_LDISC_HALTED, &tty->flags); 1950 clear_bit(TTY_LDISC_HALTED, &tty->flags);
1939 n_tty_set_termios(tty, NULL); 1951 n_tty_set_termios(tty, NULL);
1940 tty_unthrottle(tty); 1952 tty_unthrottle(tty);
1941
1942 return 0; 1953 return 0;
1943err:
1944 return -ENOMEM;
1945} 1954}
1946 1955
1947static inline int input_available_p(struct tty_struct *tty, int poll) 1956static inline int input_available_p(struct tty_struct *tty, int poll)
@@ -2238,6 +2247,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2238 } 2247 }
2239 if (tty_hung_up_p(file)) 2248 if (tty_hung_up_p(file))
2240 break; 2249 break;
2250 /*
2251 * Abort readers for ttys which never actually
2252 * get hung up. See __tty_hangup().
2253 */
2254 if (test_bit(TTY_HUPPING, &tty->flags))
2255 break;
2241 if (!timeout) 2256 if (!timeout)
2242 break; 2257 break;
2243 if (file->f_flags & O_NONBLOCK) { 2258 if (file->f_flags & O_NONBLOCK) {
@@ -2473,7 +2488,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
2473 tail = ldata->read_tail; 2488 tail = ldata->read_tail;
2474 nr = head - tail; 2489 nr = head - tail;
2475 /* Skip EOF-chars.. */ 2490 /* Skip EOF-chars.. */
2476 while (head != tail) { 2491 while (MASK(head) != MASK(tail)) {
2477 if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) && 2492 if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
2478 read_buf(ldata, tail) == __DISABLED_CHAR) 2493 read_buf(ldata, tail) == __DISABLED_CHAR)
2479 nr--; 2494 nr--;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 96aa0ad32497..c8a2e5b0eff7 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -106,16 +106,19 @@ static void pty_unthrottle(struct tty_struct *tty)
106static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) 106static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
107{ 107{
108 struct tty_struct *to = tty->link; 108 struct tty_struct *to = tty->link;
109 unsigned long flags;
109 110
110 if (tty->stopped) 111 if (tty->stopped)
111 return 0; 112 return 0;
112 113
113 if (c > 0) { 114 if (c > 0) {
115 spin_lock_irqsave(&to->port->lock, flags);
114 /* Stuff the data into the input queue of the other end */ 116 /* Stuff the data into the input queue of the other end */
115 c = tty_insert_flip_string(to->port, buf, c); 117 c = tty_insert_flip_string(to->port, buf, c);
116 /* And shovel */ 118 /* And shovel */
117 if (c) 119 if (c)
118 tty_flip_buffer_push(to->port); 120 tty_flip_buffer_push(to->port);
121 spin_unlock_irqrestore(&to->port->lock, flags);
119 } 122 }
120 return c; 123 return c;
121} 124}
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 7025f47fa284..746c76b358a0 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5300,6 +5300,17 @@ static struct pci_device_id serial_pci_tbl[] = {
5300 PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ 5300 PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
5301 pbn_b2_4_115200 }, 5301 pbn_b2_4_115200 },
5302 /* 5302 /*
5303 * BrainBoxes UC-260
5304 */
5305 { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
5306 PCI_ANY_ID, PCI_ANY_ID,
5307 PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
5308 pbn_b2_4_115200 },
5309 { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
5310 PCI_ANY_ID, PCI_ANY_ID,
5311 PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
5312 pbn_b2_4_115200 },
5313 /*
5303 * Perle PCI-RAS cards 5314 * Perle PCI-RAS cards
5304 */ 5315 */
5305 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, 5316 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 4d94d59884e7..a19cfed2858a 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -372,7 +372,7 @@ config SERIAL_8250_MID
372 tristate "Support for serial ports on Intel MID platforms" 372 tristate "Support for serial ports on Intel MID platforms"
373 depends on SERIAL_8250 && PCI 373 depends on SERIAL_8250 && PCI
374 select HSU_DMA if SERIAL_8250_DMA 374 select HSU_DMA if SERIAL_8250_DMA
375 select HSU_DMA_PCI if X86_INTEL_MID 375 select HSU_DMA_PCI if (HSU_DMA && X86_INTEL_MID)
376 select RATIONAL 376 select RATIONAL
377 help 377 help
378 Selecting this option will enable handling of the extra features 378 Selecting this option will enable handling of the extra features
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 03ebe401fff7..040018d59608 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -597,6 +597,11 @@ static int arc_serial_probe(struct platform_device *pdev)
597 if (dev_id < 0) 597 if (dev_id < 0)
598 dev_id = 0; 598 dev_id = 0;
599 599
600 if (dev_id >= ARRAY_SIZE(arc_uart_ports)) {
601 dev_err(&pdev->dev, "serial%d out of range\n", dev_id);
602 return -EINVAL;
603 }
604
600 uart = &arc_uart_ports[dev_id]; 605 uart = &arc_uart_ports[dev_id];
601 port = &uart->port; 606 port = &uart->port;
602 607
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 53e4d5056db7..e0277cf0bf58 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1783,6 +1783,7 @@ static void atmel_get_ip_name(struct uart_port *port)
1783 switch (version) { 1783 switch (version) {
1784 case 0x302: 1784 case 0x302:
1785 case 0x10213: 1785 case 0x10213:
1786 case 0x10302:
1786 dev_dbg(port->dev, "This version is usart\n"); 1787 dev_dbg(port->dev, "This version is usart\n");
1787 atmel_port->is_usart = true; 1788 atmel_port->is_usart = true;
1788 break; 1789 break;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 3d790033744e..01e2274b23f2 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1818,6 +1818,10 @@ static int lpuart_probe(struct platform_device *pdev)
1818 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); 1818 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
1819 return ret; 1819 return ret;
1820 } 1820 }
1821 if (ret >= ARRAY_SIZE(lpuart_ports)) {
1822 dev_err(&pdev->dev, "serial%d out of range\n", ret);
1823 return -EINVAL;
1824 }
1821 sport->port.line = ret; 1825 sport->port.line = ret;
1822 sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart"); 1826 sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart");
1823 1827
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 016e4be05cec..07ede982b472 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1923,6 +1923,12 @@ static int serial_imx_probe(struct platform_device *pdev)
1923 else if (ret < 0) 1923 else if (ret < 0)
1924 return ret; 1924 return ret;
1925 1925
1926 if (sport->port.line >= ARRAY_SIZE(imx_ports)) {
1927 dev_err(&pdev->dev, "serial%d out of range\n",
1928 sport->port.line);
1929 return -EINVAL;
1930 }
1931
1926 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1932 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1927 base = devm_ioremap_resource(&pdev->dev, res); 1933 base = devm_ioremap_resource(&pdev->dev, res);
1928 if (IS_ERR(base)) 1934 if (IS_ERR(base))
@@ -2057,12 +2063,14 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
2057 val &= ~UCR3_AWAKEN; 2063 val &= ~UCR3_AWAKEN;
2058 writel(val, sport->port.membase + UCR3); 2064 writel(val, sport->port.membase + UCR3);
2059 2065
2060 val = readl(sport->port.membase + UCR1); 2066 if (sport->have_rtscts) {
2061 if (on) 2067 val = readl(sport->port.membase + UCR1);
2062 val |= UCR1_RTSDEN; 2068 if (on)
2063 else 2069 val |= UCR1_RTSDEN;
2064 val &= ~UCR1_RTSDEN; 2070 else
2065 writel(val, sport->port.membase + UCR1); 2071 val &= ~UCR1_RTSDEN;
2072 writel(val, sport->port.membase + UCR1);
2073 }
2066} 2074}
2067 2075
2068static int imx_serial_port_suspend_noirq(struct device *dev) 2076static int imx_serial_port_suspend_noirq(struct device *dev)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index cd0414bbe094..daa4a65ef6ff 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1274,6 +1274,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
1274 s->port.line = pdev->id < 0 ? 0 : pdev->id; 1274 s->port.line = pdev->id < 0 ? 0 : pdev->id;
1275 else if (ret < 0) 1275 else if (ret < 0)
1276 return ret; 1276 return ret;
1277 if (s->port.line >= ARRAY_SIZE(auart_port)) {
1278 dev_err(&pdev->dev, "serial%d out of range\n", s->port.line);
1279 return -EINVAL;
1280 }
1277 1281
1278 if (of_id) { 1282 if (of_id) {
1279 pdev->id_entry = of_id->data; 1283 pdev->id_entry = of_id->data;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index e6bc1a6be4a4..4d532a085db9 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -860,15 +860,12 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
860 dma->rx_conf.direction = DMA_DEV_TO_MEM; 860 dma->rx_conf.direction = DMA_DEV_TO_MEM;
861 dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 861 dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
862 dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH; 862 dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
863 dma->rx_conf.src_maxburst = 16; 863 dma->rx_conf.src_maxburst = 1;
864 864
865 dma->tx_conf.direction = DMA_MEM_TO_DEV; 865 dma->tx_conf.direction = DMA_MEM_TO_DEV;
866 dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 866 dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
867 dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH; 867 dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
868 if (dma_get_cache_alignment() >= 16) 868 dma->tx_conf.dst_maxburst = 1;
869 dma->tx_conf.dst_maxburst = 16;
870 else
871 dma->tx_conf.dst_maxburst = 1;
872 869
873 dma_cap_zero(mask); 870 dma_cap_zero(mask);
874 dma_cap_set(DMA_SLAVE, mask); 871 dma_cap_set(DMA_SLAVE, mask);
@@ -1807,6 +1804,10 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
1807 1804
1808 dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index); 1805 dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
1809 1806
1807 if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
1808 dev_err(&pdev->dev, "serial%d out of range\n", index);
1809 return -EINVAL;
1810 }
1810 ourport = &s3c24xx_serial_ports[index]; 1811 ourport = &s3c24xx_serial_ports[index];
1811 1812
1812 ourport->drv_data = s3c24xx_get_driver_data(pdev); 1813 ourport->drv_data = s3c24xx_get_driver_data(pdev);
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index fcf803ffad19..cdd2f942317c 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -884,14 +884,19 @@ static int sccnxp_probe(struct platform_device *pdev)
884 884
885 clk = devm_clk_get(&pdev->dev, NULL); 885 clk = devm_clk_get(&pdev->dev, NULL);
886 if (IS_ERR(clk)) { 886 if (IS_ERR(clk)) {
887 if (PTR_ERR(clk) == -EPROBE_DEFER) { 887 ret = PTR_ERR(clk);
888 ret = -EPROBE_DEFER; 888 if (ret == -EPROBE_DEFER)
889 goto err_out; 889 goto err_out;
890 } 890 uartclk = 0;
891 } else {
892 clk_prepare_enable(clk);
893 uartclk = clk_get_rate(clk);
894 }
895
896 if (!uartclk) {
891 dev_notice(&pdev->dev, "Using default clock frequency\n"); 897 dev_notice(&pdev->dev, "Using default clock frequency\n");
892 uartclk = s->chip->freq_std; 898 uartclk = s->chip->freq_std;
893 } else 899 }
894 uartclk = clk_get_rate(clk);
895 900
896 /* Check input frequency */ 901 /* Check input frequency */
897 if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) { 902 if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) {
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 3eb57eb532f1..02147361eaa9 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -20,6 +20,7 @@
20#include <linux/gpio/consumer.h> 20#include <linux/gpio/consumer.h>
21#include <linux/termios.h> 21#include <linux/termios.h>
22#include <linux/serial_core.h> 22#include <linux/serial_core.h>
23#include <linux/module.h>
23 24
24#include "serial_mctrl_gpio.h" 25#include "serial_mctrl_gpio.h"
25 26
@@ -193,6 +194,7 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
193 194
194 return gpios; 195 return gpios;
195} 196}
197EXPORT_SYMBOL_GPL(mctrl_gpio_init);
196 198
197void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios) 199void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
198{ 200{
@@ -247,3 +249,6 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
247 disable_irq(gpios->irq[i]); 249 disable_irq(gpios->irq[i]);
248 } 250 }
249} 251}
252EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms);
253
254MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 80d0ffe7abc1..b63920481b1d 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -847,6 +847,8 @@ static void sci_receive_chars(struct uart_port *port)
847 /* Tell the rest of the system the news. New characters! */ 847 /* Tell the rest of the system the news. New characters! */
848 tty_flip_buffer_push(tport); 848 tty_flip_buffer_push(tport);
849 } else { 849 } else {
850 /* TTY buffers full; read from RX reg to prevent lockup */
851 serial_port_in(port, SCxRDR);
850 serial_port_in(port, SCxSR); /* dummy read */ 852 serial_port_in(port, SCxSR); /* dummy read */
851 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); 853 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
852 } 854 }
@@ -1455,7 +1457,16 @@ static void sci_free_dma(struct uart_port *port)
1455 if (s->chan_rx) 1457 if (s->chan_rx)
1456 sci_rx_dma_release(s, false); 1458 sci_rx_dma_release(s, false);
1457} 1459}
1458#else 1460
1461static void sci_flush_buffer(struct uart_port *port)
1462{
1463 /*
1464 * In uart_flush_buffer(), the xmit circular buffer has just been
1465 * cleared, so we have to reset tx_dma_len accordingly.
1466 */
1467 to_sci_port(port)->tx_dma_len = 0;
1468}
1469#else /* !CONFIG_SERIAL_SH_SCI_DMA */
1459static inline void sci_request_dma(struct uart_port *port) 1470static inline void sci_request_dma(struct uart_port *port)
1460{ 1471{
1461} 1472}
@@ -1463,7 +1474,9 @@ static inline void sci_request_dma(struct uart_port *port)
1463static inline void sci_free_dma(struct uart_port *port) 1474static inline void sci_free_dma(struct uart_port *port)
1464{ 1475{
1465} 1476}
1466#endif 1477
1478#define sci_flush_buffer NULL
1479#endif /* !CONFIG_SERIAL_SH_SCI_DMA */
1467 1480
1468static irqreturn_t sci_rx_interrupt(int irq, void *ptr) 1481static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
1469{ 1482{
@@ -2203,6 +2216,7 @@ static struct uart_ops sci_uart_ops = {
2203 .break_ctl = sci_break_ctl, 2216 .break_ctl = sci_break_ctl,
2204 .startup = sci_startup, 2217 .startup = sci_startup,
2205 .shutdown = sci_shutdown, 2218 .shutdown = sci_shutdown,
2219 .flush_buffer = sci_flush_buffer,
2206 .set_termios = sci_set_termios, 2220 .set_termios = sci_set_termios,
2207 .pm = sci_pm, 2221 .pm = sci_pm,
2208 .type = sci_type, 2222 .type = sci_type,
@@ -2405,13 +2419,12 @@ static void serial_console_write(struct console *co, const char *s,
2405 unsigned long flags; 2419 unsigned long flags;
2406 int locked = 1; 2420 int locked = 1;
2407 2421
2408 local_irq_save(flags);
2409 if (port->sysrq) 2422 if (port->sysrq)
2410 locked = 0; 2423 locked = 0;
2411 else if (oops_in_progress) 2424 else if (oops_in_progress)
2412 locked = spin_trylock(&port->lock); 2425 locked = spin_trylock_irqsave(&port->lock, flags);
2413 else 2426 else
2414 spin_lock(&port->lock); 2427 spin_lock_irqsave(&port->lock, flags);
2415 2428
2416 /* first save the SCSCR then disable the interrupts */ 2429 /* first save the SCSCR then disable the interrupts */
2417 ctrl = serial_port_in(port, SCSCR); 2430 ctrl = serial_port_in(port, SCSCR);
@@ -2428,8 +2441,7 @@ static void serial_console_write(struct console *co, const char *s,
2428 serial_port_out(port, SCSCR, ctrl); 2441 serial_port_out(port, SCSCR, ctrl);
2429 2442
2430 if (locked) 2443 if (locked)
2431 spin_unlock(&port->lock); 2444 spin_unlock_irqrestore(&port->lock, flags);
2432 local_irq_restore(flags);
2433} 2445}
2434 2446
2435static int serial_console_setup(struct console *co, char *options) 2447static int serial_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 009e0dbc12d2..4f2f4aca8d2e 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1026,7 +1026,7 @@ static struct uart_port *cdns_uart_get_port(int id)
1026 struct uart_port *port; 1026 struct uart_port *port;
1027 1027
1028 /* Try the given port id if failed use default method */ 1028 /* Try the given port id if failed use default method */
1029 if (cdns_uart_port[id].mapbase != 0) { 1029 if (id < CDNS_UART_NR_PORTS && cdns_uart_port[id].mapbase != 0) {
1030 /* Find the next unused port */ 1030 /* Find the next unused port */
1031 for (id = 0; id < CDNS_UART_NR_PORTS; id++) 1031 for (id = 0; id < CDNS_UART_NR_PORTS; id++)
1032 if (cdns_uart_port[id].mapbase == 0) 1032 if (cdns_uart_port[id].mapbase == 0)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 1bb629ab8ecc..198451fa9e5d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -702,6 +702,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
702 return; 702 return;
703 } 703 }
704 704
705 /*
706 * Some console devices aren't actually hung up for technical and
707 * historical reasons, which can lead to indefinite interruptible
708 * sleep in n_tty_read(). The following explicitly tells
709 * n_tty_read() to abort readers.
710 */
711 set_bit(TTY_HUPPING, &tty->flags);
712
705 /* inuse_filps is protected by the single tty lock, 713 /* inuse_filps is protected by the single tty lock,
706 this really needs to change if we want to flush the 714 this really needs to change if we want to flush the
707 workqueue with the lock held */ 715 workqueue with the lock held */
@@ -757,6 +765,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
757 * can't yet guarantee all that. 765 * can't yet guarantee all that.
758 */ 766 */
759 set_bit(TTY_HUPPED, &tty->flags); 767 set_bit(TTY_HUPPED, &tty->flags);
768 clear_bit(TTY_HUPPING, &tty->flags);
760 tty_unlock(tty); 769 tty_unlock(tty);
761 770
762 if (f) 771 if (f)
@@ -1694,6 +1703,8 @@ static void release_tty(struct tty_struct *tty, int idx)
1694 if (tty->link) 1703 if (tty->link)
1695 tty->link->port->itty = NULL; 1704 tty->link->port->itty = NULL;
1696 tty_buffer_cancel_work(tty->port); 1705 tty_buffer_cancel_work(tty->port);
1706 if (tty->link)
1707 tty_buffer_cancel_work(tty->link->port);
1697 1708
1698 tty_kref_put(tty->link); 1709 tty_kref_put(tty->link);
1699 tty_kref_put(tty); 1710 tty_kref_put(tty);
@@ -3143,7 +3154,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
3143 3154
3144 kref_init(&tty->kref); 3155 kref_init(&tty->kref);
3145 tty->magic = TTY_MAGIC; 3156 tty->magic = TTY_MAGIC;
3146 tty_ldisc_init(tty); 3157 if (tty_ldisc_init(tty)) {
3158 kfree(tty);
3159 return NULL;
3160 }
3147 tty->session = NULL; 3161 tty->session = NULL;
3148 tty->pgrp = NULL; 3162 tty->pgrp = NULL;
3149 mutex_init(&tty->legacy_mutex); 3163 mutex_init(&tty->legacy_mutex);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 9bee25cfa0be..d9e013dc2c08 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -168,12 +168,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
168 return ERR_CAST(ldops); 168 return ERR_CAST(ldops);
169 } 169 }
170 170
171 ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL); 171 /*
172 if (ld == NULL) { 172 * There is no way to handle allocation failure of only 16 bytes.
173 put_ldops(ldops); 173 * Let's simplify error handling and save more memory.
174 return ERR_PTR(-ENOMEM); 174 */
175 } 175 ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
176
177 ld->ops = ldops; 176 ld->ops = ldops;
178 ld->tty = tty; 177 ld->tty = tty;
179 178
@@ -804,12 +803,13 @@ void tty_ldisc_release(struct tty_struct *tty)
804 * the tty structure is not completely set up when this call is made. 803 * the tty structure is not completely set up when this call is made.
805 */ 804 */
806 805
807void tty_ldisc_init(struct tty_struct *tty) 806int tty_ldisc_init(struct tty_struct *tty)
808{ 807{
809 struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY); 808 struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
810 if (IS_ERR(ld)) 809 if (IS_ERR(ld))
811 panic("n_tty: init_tty"); 810 return PTR_ERR(ld);
812 tty->ldisc = ld; 811 tty->ldisc = ld;
812 return 0;
813} 813}
814 814
815/** 815/**
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index e4f69bddcfb1..ff3286fc22d8 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1312,6 +1312,11 @@ static void csi_m(struct vc_data *vc)
1312 case 3: 1312 case 3:
1313 vc->vc_italic = 1; 1313 vc->vc_italic = 1;
1314 break; 1314 break;
1315 case 21:
1316 /*
1317 * No console drivers support double underline, so
1318 * convert it to a single underline.
1319 */
1315 case 4: 1320 case 4:
1316 vc->vc_underline = 1; 1321 vc->vc_underline = 1;
1317 break; 1322 break;
@@ -1348,7 +1353,6 @@ static void csi_m(struct vc_data *vc)
1348 vc->vc_disp_ctrl = 1; 1353 vc->vc_disp_ctrl = 1;
1349 vc->vc_toggle_meta = 1; 1354 vc->vc_toggle_meta = 1;
1350 break; 1355 break;
1351 case 21:
1352 case 22: 1356 case 22:
1353 vc->vc_intensity = 1; 1357 vc->vc_intensity = 1;
1354 break; 1358 break;
@@ -1725,7 +1729,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
1725 default_attr(vc); 1729 default_attr(vc);
1726 update_attr(vc); 1730 update_attr(vc);
1727 1731
1728 vc->vc_tab_stop[0] = 0x01010100; 1732 vc->vc_tab_stop[0] =
1729 vc->vc_tab_stop[1] = 1733 vc->vc_tab_stop[1] =
1730 vc->vc_tab_stop[2] = 1734 vc->vc_tab_stop[2] =
1731 vc->vc_tab_stop[3] = 1735 vc->vc_tab_stop[3] =
@@ -1769,7 +1773,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1769 vc->vc_pos -= (vc->vc_x << 1); 1773 vc->vc_pos -= (vc->vc_x << 1);
1770 while (vc->vc_x < vc->vc_cols - 1) { 1774 while (vc->vc_x < vc->vc_cols - 1) {
1771 vc->vc_x++; 1775 vc->vc_x++;
1772 if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31))) 1776 if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31)))
1773 break; 1777 break;
1774 } 1778 }
1775 vc->vc_pos += (vc->vc_x << 1); 1779 vc->vc_pos += (vc->vc_x << 1);
@@ -1829,7 +1833,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1829 lf(vc); 1833 lf(vc);
1830 return; 1834 return;
1831 case 'H': 1835 case 'H':
1832 vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31)); 1836 vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31));
1833 return; 1837 return;
1834 case 'Z': 1838 case 'Z':
1835 respond_ID(tty); 1839 respond_ID(tty);
@@ -2022,7 +2026,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
2022 return; 2026 return;
2023 case 'g': 2027 case 'g':
2024 if (!vc->vc_par[0]) 2028 if (!vc->vc_par[0])
2025 vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31)); 2029 vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31));
2026 else if (vc->vc_par[0] == 3) { 2030 else if (vc->vc_par[0] == 3) {
2027 vc->vc_tab_stop[0] = 2031 vc->vc_tab_stop[0] =
2028 vc->vc_tab_stop[1] = 2032 vc->vc_tab_stop[1] =
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 939c6ad71068..57ee43512992 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -851,7 +851,7 @@ static inline void ci_role_destroy(struct ci_hdrc *ci)
851{ 851{
852 ci_hdrc_gadget_destroy(ci); 852 ci_hdrc_gadget_destroy(ci);
853 ci_hdrc_host_destroy(ci); 853 ci_hdrc_host_destroy(ci);
854 if (ci->is_otg) 854 if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
855 ci_hdrc_otg_destroy(ci); 855 ci_hdrc_otg_destroy(ci);
856} 856}
857 857
@@ -951,27 +951,35 @@ static int ci_hdrc_probe(struct platform_device *pdev)
951 /* initialize role(s) before the interrupt is requested */ 951 /* initialize role(s) before the interrupt is requested */
952 if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) { 952 if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
953 ret = ci_hdrc_host_init(ci); 953 ret = ci_hdrc_host_init(ci);
954 if (ret) 954 if (ret) {
955 dev_info(dev, "doesn't support host\n"); 955 if (ret == -ENXIO)
956 dev_info(dev, "doesn't support host\n");
957 else
958 goto deinit_phy;
959 }
956 } 960 }
957 961
958 if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) { 962 if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
959 ret = ci_hdrc_gadget_init(ci); 963 ret = ci_hdrc_gadget_init(ci);
960 if (ret) 964 if (ret) {
961 dev_info(dev, "doesn't support gadget\n"); 965 if (ret == -ENXIO)
966 dev_info(dev, "doesn't support gadget\n");
967 else
968 goto deinit_host;
969 }
962 } 970 }
963 971
964 if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) { 972 if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
965 dev_err(dev, "no supported roles\n"); 973 dev_err(dev, "no supported roles\n");
966 ret = -ENODEV; 974 ret = -ENODEV;
967 goto deinit_phy; 975 goto deinit_gadget;
968 } 976 }
969 977
970 if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) { 978 if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) {
971 ret = ci_hdrc_otg_init(ci); 979 ret = ci_hdrc_otg_init(ci);
972 if (ret) { 980 if (ret) {
973 dev_err(dev, "init otg fails, ret = %d\n", ret); 981 dev_err(dev, "init otg fails, ret = %d\n", ret);
974 goto stop; 982 goto deinit_gadget;
975 } 983 }
976 } 984 }
977 985
@@ -1036,7 +1044,12 @@ static int ci_hdrc_probe(struct platform_device *pdev)
1036 1044
1037 ci_extcon_unregister(ci); 1045 ci_extcon_unregister(ci);
1038stop: 1046stop:
1039 ci_role_destroy(ci); 1047 if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
1048 ci_hdrc_otg_destroy(ci);
1049deinit_gadget:
1050 ci_hdrc_gadget_destroy(ci);
1051deinit_host:
1052 ci_hdrc_host_destroy(ci);
1040deinit_phy: 1053deinit_phy:
1041 ci_usb_phy_exit(ci); 1054 ci_usb_phy_exit(ci);
1042 1055
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 3f6bb3fff890..a501f3ba6a3f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -377,7 +377,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
377 377
378 res = usb_submit_urb(acm->read_urbs[index], mem_flags); 378 res = usb_submit_urb(acm->read_urbs[index], mem_flags);
379 if (res) { 379 if (res) {
380 if (res != -EPERM) { 380 if (res != -EPERM && res != -ENODEV) {
381 dev_err(&acm->data->dev, 381 dev_err(&acm->data->dev,
382 "%s - usb_submit_urb failed: %d\n", 382 "%s - usb_submit_urb failed: %d\n",
383 __func__, res); 383 __func__, res);
@@ -1695,6 +1695,12 @@ static const struct usb_device_id acm_ids[] = {
1695 { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */ 1695 { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */
1696 .driver_info = SINGLE_RX_URB, /* firmware bug */ 1696 .driver_info = SINGLE_RX_URB, /* firmware bug */
1697 }, 1697 },
1698 { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
1699 .driver_info = SINGLE_RX_URB,
1700 },
1701 { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
1702 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1703 },
1698 { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */ 1704 { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
1699 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1705 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1700 }, 1706 },
@@ -1765,6 +1771,9 @@ static const struct usb_device_id acm_ids[] = {
1765 { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ 1771 { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
1766 .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ 1772 .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
1767 }, 1773 },
1774 { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
1775 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1776 },
1768 1777
1769 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ 1778 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
1770 .driver_info = CLEAR_HALT_CONDITIONS, 1779 .driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 22dcccf2d286..6a287c81a7be 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -157,7 +157,9 @@ static const unsigned short full_speed_maxpacket_maxes[4] = {
157static const unsigned short high_speed_maxpacket_maxes[4] = { 157static const unsigned short high_speed_maxpacket_maxes[4] = {
158 [USB_ENDPOINT_XFER_CONTROL] = 64, 158 [USB_ENDPOINT_XFER_CONTROL] = 64,
159 [USB_ENDPOINT_XFER_ISOC] = 1024, 159 [USB_ENDPOINT_XFER_ISOC] = 1024,
160 [USB_ENDPOINT_XFER_BULK] = 512, 160
161 /* Bulk should be 512, but some devices use 1024: we will warn below */
162 [USB_ENDPOINT_XFER_BULK] = 1024,
161 [USB_ENDPOINT_XFER_INT] = 1024, 163 [USB_ENDPOINT_XFER_INT] = 1024,
162}; 164};
163static const unsigned short super_speed_maxpacket_maxes[4] = { 165static const unsigned short super_speed_maxpacket_maxes[4] = {
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 358ca8dd784f..a5240b4d7ab9 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -208,8 +208,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
208 if (!udev->parent) 208 if (!udev->parent)
209 rc = hcd_bus_suspend(udev, msg); 209 rc = hcd_bus_suspend(udev, msg);
210 210
211 /* Non-root devices don't need to do anything for FREEZE or PRETHAW */ 211 /*
212 else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW) 212 * Non-root USB2 devices don't need to do anything for FREEZE
213 * or PRETHAW. USB3 devices don't support global suspend and
214 * needs to be selectively suspended.
215 */
216 else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
217 && (udev->speed < USB_SPEED_SUPER))
213 rc = 0; 218 rc = 0;
214 else 219 else
215 rc = usb_port_suspend(udev, msg); 220 rc = usb_port_suspend(udev, msg);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 24b084748b63..d90d08e3c8de 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2340,6 +2340,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
2340 2340
2341 spin_lock_irqsave (&hcd_root_hub_lock, flags); 2341 spin_lock_irqsave (&hcd_root_hub_lock, flags);
2342 if (hcd->rh_registered) { 2342 if (hcd->rh_registered) {
2343 pm_wakeup_event(&hcd->self.root_hub->dev, 0);
2343 set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); 2344 set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
2344 queue_work(pm_wq, &hcd->wakeup_work); 2345 queue_work(pm_wq, &hcd->wakeup_work);
2345 } 2346 }
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 926e9ef10f0b..4e032545a420 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -632,12 +632,17 @@ void usb_wakeup_notification(struct usb_device *hdev,
632 unsigned int portnum) 632 unsigned int portnum)
633{ 633{
634 struct usb_hub *hub; 634 struct usb_hub *hub;
635 struct usb_port *port_dev;
635 636
636 if (!hdev) 637 if (!hdev)
637 return; 638 return;
638 639
639 hub = usb_hub_to_struct_hub(hdev); 640 hub = usb_hub_to_struct_hub(hdev);
640 if (hub) { 641 if (hub) {
642 port_dev = hub->ports[portnum - 1];
643 if (port_dev && port_dev->child)
644 pm_wakeup_event(&port_dev->child->dev, 0);
645
641 set_bit(portnum, hub->wakeup_bits); 646 set_bit(portnum, hub->wakeup_bits);
642 kick_hub_wq(hub); 647 kick_hub_wq(hub);
643 } 648 }
@@ -1118,10 +1123,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1118 1123
1119 if (!udev || udev->state == USB_STATE_NOTATTACHED) { 1124 if (!udev || udev->state == USB_STATE_NOTATTACHED) {
1120 /* Tell hub_wq to disconnect the device or 1125 /* Tell hub_wq to disconnect the device or
1121 * check for a new connection 1126 * check for a new connection or over current condition.
1127 * Based on USB2.0 Spec Section 11.12.5,
1128 * C_PORT_OVER_CURRENT could be set while
1129 * PORT_OVER_CURRENT is not. So check for any of them.
1122 */ 1130 */
1123 if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || 1131 if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
1124 (portstatus & USB_PORT_STAT_OVERCURRENT)) 1132 (portstatus & USB_PORT_STAT_OVERCURRENT) ||
1133 (portchange & USB_PORT_STAT_C_OVERCURRENT))
1125 set_bit(port1, hub->change_bits); 1134 set_bit(port1, hub->change_bits);
1126 1135
1127 } else if (portstatus & USB_PORT_STAT_ENABLE) { 1136 } else if (portstatus & USB_PORT_STAT_ENABLE) {
@@ -3306,6 +3315,10 @@ static int wait_for_ss_port_enable(struct usb_device *udev,
3306 while (delay_ms < 2000) { 3315 while (delay_ms < 2000) {
3307 if (status || *portstatus & USB_PORT_STAT_CONNECTION) 3316 if (status || *portstatus & USB_PORT_STAT_CONNECTION)
3308 break; 3317 break;
3318 if (!port_is_power_on(hub, *portstatus)) {
3319 status = -ENODEV;
3320 break;
3321 }
3309 msleep(20); 3322 msleep(20);
3310 delay_ms += 20; 3323 delay_ms += 20;
3311 status = hub_port_status(hub, *port1, portstatus, portchange); 3324 status = hub_port_status(hub, *port1, portstatus, portchange);
@@ -3368,8 +3381,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
3368 3381
3369 /* Skip the initial Clear-Suspend step for a remote wakeup */ 3382 /* Skip the initial Clear-Suspend step for a remote wakeup */
3370 status = hub_port_status(hub, port1, &portstatus, &portchange); 3383 status = hub_port_status(hub, port1, &portstatus, &portchange);
3371 if (status == 0 && !port_is_suspended(hub, portstatus)) 3384 if (status == 0 && !port_is_suspended(hub, portstatus)) {
3385 if (portchange & USB_PORT_STAT_C_SUSPEND)
3386 pm_wakeup_event(&udev->dev, 0);
3372 goto SuspendCleared; 3387 goto SuspendCleared;
3388 }
3373 3389
3374 /* see 7.1.7.7; affects power usage, but not budgeting */ 3390 /* see 7.1.7.7; affects power usage, but not budgeting */
3375 if (hub_is_superspeed(hub->hdev)) 3391 if (hub_is_superspeed(hub->hdev))
@@ -4446,7 +4462,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
4446 * reset. But only on the first attempt, 4462 * reset. But only on the first attempt,
4447 * lest we get into a time out/reset loop 4463 * lest we get into a time out/reset loop
4448 */ 4464 */
4449 if (r == 0 || (r == -ETIMEDOUT && retries == 0)) 4465 if (r == 0 || (r == -ETIMEDOUT &&
4466 retries == 0 &&
4467 udev->speed > USB_SPEED_FULL))
4450 break; 4468 break;
4451 } 4469 }
4452 udev->descriptor.bMaxPacketSize0 = 4470 udev->descriptor.bMaxPacketSize0 =
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index f73a0305b3f5..60ff61915d25 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -147,6 +147,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
147 147
148 ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); 148 ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout);
149 149
150 /* Linger a bit, prior to the next control message. */
151 if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
152 msleep(200);
153
150 kfree(dr); 154 kfree(dr);
151 155
152 return ret; 156 return ret;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index c05c4f877750..99f67764765f 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -45,6 +45,9 @@ static const struct usb_device_id usb_quirk_list[] = {
45 { USB_DEVICE(0x03f0, 0x0701), .driver_info = 45 { USB_DEVICE(0x03f0, 0x0701), .driver_info =
46 USB_QUIRK_STRING_FETCH_255 }, 46 USB_QUIRK_STRING_FETCH_255 },
47 47
48 /* HP v222w 16GB Mini USB Drive */
49 { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
50
48 /* Creative SB Audigy 2 NX */ 51 /* Creative SB Audigy 2 NX */
49 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, 52 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
50 53
@@ -225,8 +228,16 @@ static const struct usb_device_id usb_quirk_list[] = {
225 { USB_DEVICE(0x1a0a, 0x0200), .driver_info = 228 { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
226 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, 229 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
227 230
231 /* Corsair K70 RGB */
232 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
233
234 /* Corsair Strafe */
235 { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
236 USB_QUIRK_DELAY_CTRL_MSG },
237
228 /* Corsair Strafe RGB */ 238 /* Corsair Strafe RGB */
229 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, 239 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
240 USB_QUIRK_DELAY_CTRL_MSG },
230 241
231 /* Corsair K70 LUX */ 242 /* Corsair K70 LUX */
232 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, 243 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index a738a68d2292..a899d47c2a7c 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -187,7 +187,7 @@ struct dwc2_hsotg_ep {
187 unsigned char dir_in; 187 unsigned char dir_in;
188 unsigned char index; 188 unsigned char index;
189 unsigned char mc; 189 unsigned char mc;
190 unsigned char interval; 190 u16 interval;
191 191
192 unsigned int halted:1; 192 unsigned int halted:1;
193 unsigned int periodic:1; 193 unsigned int periodic:1;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 0abf73c91beb..98705b83d2dc 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2424,12 +2424,6 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
2424 dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | 2424 dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
2425 DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0); 2425 DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0);
2426 2426
2427 dwc2_hsotg_enqueue_setup(hsotg);
2428
2429 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2430 dwc2_readl(hsotg->regs + DIEPCTL0),
2431 dwc2_readl(hsotg->regs + DOEPCTL0));
2432
2433 /* clear global NAKs */ 2427 /* clear global NAKs */
2434 val = DCTL_CGOUTNAK | DCTL_CGNPINNAK; 2428 val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
2435 if (!is_usb_reset) 2429 if (!is_usb_reset)
@@ -2440,6 +2434,12 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
2440 mdelay(3); 2434 mdelay(3);
2441 2435
2442 hsotg->lx_state = DWC2_L0; 2436 hsotg->lx_state = DWC2_L0;
2437
2438 dwc2_hsotg_enqueue_setup(hsotg);
2439
2440 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2441 dwc2_readl(hsotg->regs + DIEPCTL0),
2442 dwc2_readl(hsotg->regs + DOEPCTL0));
2443} 2443}
2444 2444
2445static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) 2445static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 571c21727ff9..85fb6226770c 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -1402,8 +1402,12 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
1402 if (count > 250) 1402 if (count > 250)
1403 dev_err(hsotg->dev, 1403 dev_err(hsotg->dev,
1404 "Connection id status change timed out\n"); 1404 "Connection id status change timed out\n");
1405 hsotg->op_state = OTG_STATE_A_HOST;
1406 1405
1406 spin_lock_irqsave(&hsotg->lock, flags);
1407 dwc2_hsotg_disconnect(hsotg);
1408 spin_unlock_irqrestore(&hsotg->lock, flags);
1409
1410 hsotg->op_state = OTG_STATE_A_HOST;
1407 /* Initialize the Core for Host mode */ 1411 /* Initialize the Core for Host mode */
1408 dwc2_core_init(hsotg, false, -1); 1412 dwc2_core_init(hsotg, false, -1);
1409 dwc2_enable_global_interrupts(hsotg); 1413 dwc2_enable_global_interrupts(hsotg);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index b66723a77db3..1ef92df8e3e7 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -214,6 +214,8 @@
214#define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1) 214#define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1)
215 215
216/* Global TX Fifo Size Register */ 216/* Global TX Fifo Size Register */
217#define DWC31_GTXFIFOSIZ_TXFRAMNUM BIT(15) /* DWC_usb31 only */
218#define DWC31_GTXFIFOSIZ_TXFDEF(n) ((n) & 0x7fff) /* DWC_usb31 only */
217#define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff) 219#define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff)
218#define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000) 220#define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000)
219 221
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 72664700b8a2..12ee23f53cdd 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -107,6 +107,10 @@ static int kdwc3_probe(struct platform_device *pdev)
107 return PTR_ERR(kdwc->usbss); 107 return PTR_ERR(kdwc->usbss);
108 108
109 kdwc->clk = devm_clk_get(kdwc->dev, "usb"); 109 kdwc->clk = devm_clk_get(kdwc->dev, "usb");
110 if (IS_ERR(kdwc->clk)) {
111 dev_err(kdwc->dev, "unable to get usb clock\n");
112 return PTR_ERR(kdwc->clk);
113 }
110 114
111 error = clk_prepare_enable(kdwc->clk); 115 error = clk_prepare_enable(kdwc->clk);
112 if (error < 0) { 116 if (error < 0) {
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index d2c0c1a8d979..68230adf2449 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -167,7 +167,7 @@ static int dwc3_pci_probe(struct pci_dev *pci,
167 ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res)); 167 ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
168 if (ret) { 168 if (ret) {
169 dev_err(dev, "couldn't add resources to dwc3 device\n"); 169 dev_err(dev, "couldn't add resources to dwc3 device\n");
170 return ret; 170 goto err;
171 } 171 }
172 172
173 pci_set_drvdata(pci, dwc3); 173 pci_set_drvdata(pci, dwc3);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index c778c511ea90..3e35a67b6369 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2532,6 +2532,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2532 break; 2532 break;
2533 } 2533 }
2534 2534
2535 dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket;
2536
2535 /* Enable USB2 LPM Capability */ 2537 /* Enable USB2 LPM Capability */
2536 2538
2537 if ((dwc->revision > DWC3_REVISION_194A) 2539 if ((dwc->revision > DWC3_REVISION_194A)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d186d0282a38..eb445c2ab15e 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -104,7 +104,6 @@ int config_ep_by_speed(struct usb_gadget *g,
104 struct usb_function *f, 104 struct usb_function *f,
105 struct usb_ep *_ep) 105 struct usb_ep *_ep)
106{ 106{
107 struct usb_composite_dev *cdev = get_gadget_data(g);
108 struct usb_endpoint_descriptor *chosen_desc = NULL; 107 struct usb_endpoint_descriptor *chosen_desc = NULL;
109 struct usb_descriptor_header **speed_desc = NULL; 108 struct usb_descriptor_header **speed_desc = NULL;
110 109
@@ -176,8 +175,12 @@ ep_found:
176 _ep->maxburst = comp_desc->bMaxBurst + 1; 175 _ep->maxburst = comp_desc->bMaxBurst + 1;
177 break; 176 break;
178 default: 177 default:
179 if (comp_desc->bMaxBurst != 0) 178 if (comp_desc->bMaxBurst != 0) {
179 struct usb_composite_dev *cdev;
180
181 cdev = get_gadget_data(g);
180 ERROR(cdev, "ep0 bMaxBurst must be 0\n"); 182 ERROR(cdev, "ep0 bMaxBurst must be 0\n");
183 }
181 _ep->maxburst = 1; 184 _ep->maxburst = 1;
182 break; 185 break;
183 } 186 }
@@ -1325,7 +1328,7 @@ static int count_ext_compat(struct usb_configuration *c)
1325 return res; 1328 return res;
1326} 1329}
1327 1330
1328static void fill_ext_compat(struct usb_configuration *c, u8 *buf) 1331static int fill_ext_compat(struct usb_configuration *c, u8 *buf)
1329{ 1332{
1330 int i, count; 1333 int i, count;
1331 1334
@@ -1352,10 +1355,12 @@ static void fill_ext_compat(struct usb_configuration *c, u8 *buf)
1352 buf += 23; 1355 buf += 23;
1353 } 1356 }
1354 count += 24; 1357 count += 24;
1355 if (count >= 4096) 1358 if (count + 24 >= USB_COMP_EP0_OS_DESC_BUFSIZ)
1356 return; 1359 return count;
1357 } 1360 }
1358 } 1361 }
1362
1363 return count;
1359} 1364}
1360 1365
1361static int count_ext_prop(struct usb_configuration *c, int interface) 1366static int count_ext_prop(struct usb_configuration *c, int interface)
@@ -1400,25 +1405,20 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf)
1400 struct usb_os_desc *d; 1405 struct usb_os_desc *d;
1401 struct usb_os_desc_ext_prop *ext_prop; 1406 struct usb_os_desc_ext_prop *ext_prop;
1402 int j, count, n, ret; 1407 int j, count, n, ret;
1403 u8 *start = buf;
1404 1408
1405 f = c->interface[interface]; 1409 f = c->interface[interface];
1410 count = 10; /* header length */
1406 for (j = 0; j < f->os_desc_n; ++j) { 1411 for (j = 0; j < f->os_desc_n; ++j) {
1407 if (interface != f->os_desc_table[j].if_id) 1412 if (interface != f->os_desc_table[j].if_id)
1408 continue; 1413 continue;
1409 d = f->os_desc_table[j].os_desc; 1414 d = f->os_desc_table[j].os_desc;
1410 if (d) 1415 if (d)
1411 list_for_each_entry(ext_prop, &d->ext_prop, entry) { 1416 list_for_each_entry(ext_prop, &d->ext_prop, entry) {
1412 /* 4kB minus header length */ 1417 n = ext_prop->data_len +
1413 n = buf - start;
1414 if (n >= 4086)
1415 return 0;
1416
1417 count = ext_prop->data_len +
1418 ext_prop->name_len + 14; 1418 ext_prop->name_len + 14;
1419 if (count > 4086 - n) 1419 if (count + n >= USB_COMP_EP0_OS_DESC_BUFSIZ)
1420 return -EINVAL; 1420 return count;
1421 usb_ext_prop_put_size(buf, count); 1421 usb_ext_prop_put_size(buf, n);
1422 usb_ext_prop_put_type(buf, ext_prop->type); 1422 usb_ext_prop_put_type(buf, ext_prop->type);
1423 ret = usb_ext_prop_put_name(buf, ext_prop->name, 1423 ret = usb_ext_prop_put_name(buf, ext_prop->name,
1424 ext_prop->name_len); 1424 ext_prop->name_len);
@@ -1444,11 +1444,12 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf)
1444 default: 1444 default:
1445 return -EINVAL; 1445 return -EINVAL;
1446 } 1446 }
1447 buf += count; 1447 buf += n;
1448 count += n;
1448 } 1449 }
1449 } 1450 }
1450 1451
1451 return 0; 1452 return count;
1452} 1453}
1453 1454
1454/* 1455/*
@@ -1717,6 +1718,7 @@ unknown:
1717 req->complete = composite_setup_complete; 1718 req->complete = composite_setup_complete;
1718 buf = req->buf; 1719 buf = req->buf;
1719 os_desc_cfg = cdev->os_desc_config; 1720 os_desc_cfg = cdev->os_desc_config;
1721 w_length = min_t(u16, w_length, USB_COMP_EP0_OS_DESC_BUFSIZ);
1720 memset(buf, 0, w_length); 1722 memset(buf, 0, w_length);
1721 buf[5] = 0x01; 1723 buf[5] = 0x01;
1722 switch (ctrl->bRequestType & USB_RECIP_MASK) { 1724 switch (ctrl->bRequestType & USB_RECIP_MASK) {
@@ -1740,8 +1742,8 @@ unknown:
1740 count += 16; /* header */ 1742 count += 16; /* header */
1741 put_unaligned_le32(count, buf); 1743 put_unaligned_le32(count, buf);
1742 buf += 16; 1744 buf += 16;
1743 fill_ext_compat(os_desc_cfg, buf); 1745 value = fill_ext_compat(os_desc_cfg, buf);
1744 value = w_length; 1746 value = min_t(u16, w_length, value);
1745 } 1747 }
1746 break; 1748 break;
1747 case USB_RECIP_INTERFACE: 1749 case USB_RECIP_INTERFACE:
@@ -1770,8 +1772,7 @@ unknown:
1770 interface, buf); 1772 interface, buf);
1771 if (value < 0) 1773 if (value < 0)
1772 return value; 1774 return value;
1773 1775 value = min_t(u16, w_length, value);
1774 value = w_length;
1775 } 1776 }
1776 break; 1777 break;
1777 } 1778 }
@@ -2035,8 +2036,8 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
2035 goto end; 2036 goto end;
2036 } 2037 }
2037 2038
2038 /* OS feature descriptor length <= 4kB */ 2039 cdev->os_desc_req->buf = kmalloc(USB_COMP_EP0_OS_DESC_BUFSIZ,
2039 cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); 2040 GFP_KERNEL);
2040 if (!cdev->os_desc_req->buf) { 2041 if (!cdev->os_desc_req->buf) {
2041 ret = PTR_ERR(cdev->os_desc_req->buf); 2042 ret = PTR_ERR(cdev->os_desc_req->buf);
2042 kfree(cdev->os_desc_req); 2043 kfree(cdev->os_desc_req);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 39bb65265bff..4800bb22cdd6 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -649,11 +649,15 @@ static void ffs_user_copy_worker(struct work_struct *work)
649 bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD; 649 bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
650 650
651 if (io_data->read && ret > 0) { 651 if (io_data->read && ret > 0) {
652 mm_segment_t oldfs = get_fs();
653
654 set_fs(USER_DS);
652 use_mm(io_data->mm); 655 use_mm(io_data->mm);
653 ret = copy_to_iter(io_data->buf, ret, &io_data->data); 656 ret = copy_to_iter(io_data->buf, ret, &io_data->data);
654 if (ret != io_data->req->actual && iov_iter_count(&io_data->data)) 657 if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
655 ret = -EFAULT; 658 ret = -EFAULT;
656 unuse_mm(io_data->mm); 659 unuse_mm(io_data->mm);
660 set_fs(oldfs);
657 } 661 }
658 662
659 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret); 663 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
@@ -1333,7 +1337,6 @@ ffs_fs_kill_sb(struct super_block *sb)
1333 if (sb->s_fs_info) { 1337 if (sb->s_fs_info) {
1334 ffs_release_dev(sb->s_fs_info); 1338 ffs_release_dev(sb->s_fs_info);
1335 ffs_data_closed(sb->s_fs_info); 1339 ffs_data_closed(sb->s_fs_info);
1336 ffs_data_put(sb->s_fs_info);
1337 } 1340 }
1338} 1341}
1339 1342
@@ -2756,10 +2759,8 @@ static int _ffs_func_bind(struct usb_configuration *c,
2756 struct ffs_data *ffs = func->ffs; 2759 struct ffs_data *ffs = func->ffs;
2757 2760
2758 const int full = !!func->ffs->fs_descs_count; 2761 const int full = !!func->ffs->fs_descs_count;
2759 const int high = gadget_is_dualspeed(func->gadget) && 2762 const int high = !!func->ffs->hs_descs_count;
2760 func->ffs->hs_descs_count; 2763 const int super = !!func->ffs->ss_descs_count;
2761 const int super = gadget_is_superspeed(func->gadget) &&
2762 func->ffs->ss_descs_count;
2763 2764
2764 int fs_len, hs_len, ss_len, ret, i; 2765 int fs_len, hs_len, ss_len, ret, i;
2765 struct ffs_ep *eps_ptr; 2766 struct ffs_ep *eps_ptr;
@@ -3036,7 +3037,7 @@ static int ffs_func_setup(struct usb_function *f,
3036 __ffs_event_add(ffs, FUNCTIONFS_SETUP); 3037 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
3037 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); 3038 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
3038 3039
3039 return 0; 3040 return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
3040} 3041}
3041 3042
3042static void ffs_func_suspend(struct usb_function *f) 3043static void ffs_func_suspend(struct usb_function *f)
@@ -3490,7 +3491,8 @@ static void ffs_closed(struct ffs_data *ffs)
3490 ci = opts->func_inst.group.cg_item.ci_parent->ci_parent; 3491 ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
3491 ffs_dev_unlock(); 3492 ffs_dev_unlock();
3492 3493
3493 unregister_gadget_item(ci); 3494 if (test_bit(FFS_FL_BOUND, &ffs->flags))
3495 unregister_gadget_item(ci);
3494 return; 3496 return;
3495done: 3497done:
3496 ffs_dev_unlock(); 3498 ffs_dev_unlock();
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index ee579ba2b59e..a5dae5bb62ab 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -223,6 +223,13 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
223 /* pick the first one */ 223 /* pick the first one */
224 list = list_first_entry(&hidg->completed_out_req, 224 list = list_first_entry(&hidg->completed_out_req,
225 struct f_hidg_req_list, list); 225 struct f_hidg_req_list, list);
226
227 /*
228 * Remove this from list to protect it from beign free()
229 * while host disables our function
230 */
231 list_del(&list->list);
232
226 req = list->req; 233 req = list->req;
227 count = min_t(unsigned int, count, req->actual - list->pos); 234 count = min_t(unsigned int, count, req->actual - list->pos);
228 spin_unlock_irqrestore(&hidg->spinlock, flags); 235 spin_unlock_irqrestore(&hidg->spinlock, flags);
@@ -238,15 +245,20 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
238 * call, taking into account its current read position. 245 * call, taking into account its current read position.
239 */ 246 */
240 if (list->pos == req->actual) { 247 if (list->pos == req->actual) {
241 spin_lock_irqsave(&hidg->spinlock, flags);
242 list_del(&list->list);
243 kfree(list); 248 kfree(list);
244 spin_unlock_irqrestore(&hidg->spinlock, flags);
245 249
246 req->length = hidg->report_length; 250 req->length = hidg->report_length;
247 ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL); 251 ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL);
248 if (ret < 0) 252 if (ret < 0) {
253 free_ep_req(hidg->out_ep, req);
249 return ret; 254 return ret;
255 }
256 } else {
257 spin_lock_irqsave(&hidg->spinlock, flags);
258 list_add(&list->list, &hidg->completed_out_req);
259 spin_unlock_irqrestore(&hidg->spinlock, flags);
260
261 wake_up(&hidg->read_queue);
250 } 262 }
251 263
252 return count; 264 return count;
@@ -490,14 +502,18 @@ static void hidg_disable(struct usb_function *f)
490{ 502{
491 struct f_hidg *hidg = func_to_hidg(f); 503 struct f_hidg *hidg = func_to_hidg(f);
492 struct f_hidg_req_list *list, *next; 504 struct f_hidg_req_list *list, *next;
505 unsigned long flags;
493 506
494 usb_ep_disable(hidg->in_ep); 507 usb_ep_disable(hidg->in_ep);
495 usb_ep_disable(hidg->out_ep); 508 usb_ep_disable(hidg->out_ep);
496 509
510 spin_lock_irqsave(&hidg->spinlock, flags);
497 list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) { 511 list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
512 free_ep_req(hidg->out_ep, list->req);
498 list_del(&list->list); 513 list_del(&list->list);
499 kfree(list); 514 kfree(list);
500 } 515 }
516 spin_unlock_irqrestore(&hidg->spinlock, flags);
501} 517}
502 518
503static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 519static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index af60cc3714c1..5ead414586a1 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -201,12 +201,6 @@ static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep,
201 return alloc_ep_req(ep, length, length); 201 return alloc_ep_req(ep, length, length);
202} 202}
203 203
204static void free_ep_req(struct usb_ep *ep, struct usb_request *req)
205{
206 kfree(req->buf);
207 usb_ep_free_request(ep, req);
208}
209
210static const uint8_t f_midi_cin_length[] = { 204static const uint8_t f_midi_cin_length[] = {
211 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1 205 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1
212}; 206};
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 9f3ced62d916..67b243989938 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -303,12 +303,6 @@ static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
303 return alloc_ep_req(ep, len, ss->buflen); 303 return alloc_ep_req(ep, len, ss->buflen);
304} 304}
305 305
306void free_ep_req(struct usb_ep *ep, struct usb_request *req)
307{
308 kfree(req->buf);
309 usb_ep_free_request(ep, req);
310}
311
312static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep) 306static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
313{ 307{
314 int value; 308 int value;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 12064d3bddf6..b5dab103be38 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1052,6 +1052,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
1052 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); 1052 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
1053 return ret; 1053 return ret;
1054 } 1054 }
1055 iad_desc.bFirstInterface = ret;
1056
1055 std_ac_if_desc.bInterfaceNumber = ret; 1057 std_ac_if_desc.bInterfaceNumber = ret;
1056 agdev->ac_intf = ret; 1058 agdev->ac_intf = ret;
1057 agdev->ac_alt = 0; 1059 agdev->ac_alt = 0;
diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h
index 15f180904f8a..5ed90b437f18 100644
--- a/drivers/usb/gadget/function/g_zero.h
+++ b/drivers/usb/gadget/function/g_zero.h
@@ -59,7 +59,6 @@ void lb_modexit(void);
59int lb_modinit(void); 59int lb_modinit(void);
60 60
61/* common utilities */ 61/* common utilities */
62void free_ep_req(struct usb_ep *ep, struct usb_request *req);
63void disable_endpoints(struct usb_composite_dev *cdev, 62void disable_endpoints(struct usb_composite_dev *cdev,
64 struct usb_ep *in, struct usb_ep *out, 63 struct usb_ep *in, struct usb_ep *out,
65 struct usb_ep *iso_in, struct usb_ep *iso_out); 64 struct usb_ep *iso_in, struct usb_ep *iso_out);
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index ad8c9b05572d..01656f1c6d65 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -2202,7 +2202,7 @@ static struct configfs_item_operations uvc_item_ops = {
2202 .release = uvc_attr_release, 2202 .release = uvc_attr_release,
2203}; 2203};
2204 2204
2205#define UVCG_OPTS_ATTR(cname, conv, str2u, uxx, vnoc, limit) \ 2205#define UVCG_OPTS_ATTR(cname, aname, conv, str2u, uxx, vnoc, limit) \
2206static ssize_t f_uvc_opts_##cname##_show( \ 2206static ssize_t f_uvc_opts_##cname##_show( \
2207 struct config_item *item, char *page) \ 2207 struct config_item *item, char *page) \
2208{ \ 2208{ \
@@ -2245,16 +2245,16 @@ end: \
2245 return ret; \ 2245 return ret; \
2246} \ 2246} \
2247 \ 2247 \
2248UVC_ATTR(f_uvc_opts_, cname, aname) 2248UVC_ATTR(f_uvc_opts_, cname, cname)
2249 2249
2250#define identity_conv(x) (x) 2250#define identity_conv(x) (x)
2251 2251
2252UVCG_OPTS_ATTR(streaming_interval, identity_conv, kstrtou8, u8, identity_conv, 2252UVCG_OPTS_ATTR(streaming_interval, streaming_interval, identity_conv,
2253 16); 2253 kstrtou8, u8, identity_conv, 16);
2254UVCG_OPTS_ATTR(streaming_maxpacket, le16_to_cpu, kstrtou16, u16, le16_to_cpu, 2254UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, le16_to_cpu,
2255 3072); 2255 kstrtou16, u16, le16_to_cpu, 3072);
2256UVCG_OPTS_ATTR(streaming_maxburst, identity_conv, kstrtou8, u8, identity_conv, 2256UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, identity_conv,
2257 15); 2257 kstrtou8, u8, identity_conv, 15);
2258 2258
2259#undef identity_conv 2259#undef identity_conv
2260 2260
diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c
index c6276f0268ae..907f8144813c 100644
--- a/drivers/usb/gadget/u_f.c
+++ b/drivers/usb/gadget/u_f.c
@@ -11,16 +11,18 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/usb/gadget.h>
15#include "u_f.h" 14#include "u_f.h"
15#include <linux/usb/ch9.h>
16 16
17struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len) 17struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len)
18{ 18{
19 struct usb_request *req; 19 struct usb_request *req;
20 20
21 req = usb_ep_alloc_request(ep, GFP_ATOMIC); 21 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
22 if (req) { 22 if (req) {
23 req->length = len ?: default_len; 23 req->length = len ?: default_len;
24 if (usb_endpoint_dir_out(ep->desc))
25 req->length = usb_ep_align(ep, req->length);
24 req->buf = kmalloc(req->length, GFP_ATOMIC); 26 req->buf = kmalloc(req->length, GFP_ATOMIC);
25 if (!req->buf) { 27 if (!req->buf) {
26 usb_ep_free_request(ep, req); 28 usb_ep_free_request(ep, req);
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
index 1d5f0eb68552..69a1d10df04f 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/drivers/usb/gadget/u_f.h
@@ -16,6 +16,8 @@
16#ifndef __U_F_H__ 16#ifndef __U_F_H__
17#define __U_F_H__ 17#define __U_F_H__
18 18
19#include <linux/usb/gadget.h>
20
19/* Variable Length Array Macros **********************************************/ 21/* Variable Length Array Macros **********************************************/
20#define vla_group(groupname) size_t groupname##__next = 0 22#define vla_group(groupname) size_t groupname##__next = 0
21#define vla_group_size(groupname) groupname##__next 23#define vla_group_size(groupname) groupname##__next
@@ -45,8 +47,26 @@
45struct usb_ep; 47struct usb_ep;
46struct usb_request; 48struct usb_request;
47 49
48struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len); 50/**
49 51 * alloc_ep_req - returns a usb_request allocated by the gadget driver and
50#endif /* __U_F_H__ */ 52 * allocates the request's buffer.
53 *
54 * @ep: the endpoint to allocate a usb_request
55 * @len: usb_requests's buffer suggested size
56 * @default_len: used if @len is not provided, ie, is 0
57 *
58 * In case @ep direction is OUT, the @len will be aligned to ep's
59 * wMaxPacketSize. In order to avoid memory leaks or drops, *always* use
60 * usb_requests's length (req->length) to refer to the allocated buffer size.
61 * Requests allocated via alloc_ep_req() *must* be freed by free_ep_req().
62 */
63struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len);
51 64
65/* Frees a usb_request previously allocated by alloc_ep_req() */
66static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
67{
68 kfree(req->buf);
69 usb_ep_free_request(ep, req);
70}
52 71
72#endif /* __U_F_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index ccb9c213cc9f..e9bd8d4abca0 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -475,7 +475,7 @@ static int bdc_probe(struct platform_device *pdev)
475 bdc->dev = dev; 475 bdc->dev = dev;
476 dev_dbg(bdc->dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq); 476 dev_dbg(bdc->dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
477 477
478 temp = bdc_readl(bdc->regs, BDC_BDCSC); 478 temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
479 if ((temp & BDC_P64) && 479 if ((temp & BDC_P64) &&
480 !dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) { 480 !dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
481 dev_dbg(bdc->dev, "Using 64-bit address\n"); 481 dev_dbg(bdc->dev, "Using 64-bit address\n");
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
index 02968842b359..708e36f530d8 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_pci.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c
@@ -82,6 +82,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
82 if (ret) { 82 if (ret) {
83 dev_err(&pci->dev, 83 dev_err(&pci->dev,
84 "couldn't add resources to bdc device\n"); 84 "couldn't add resources to bdc device\n");
85 platform_device_put(bdc);
85 return ret; 86 return ret;
86 } 87 }
87 88
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 8080a11947b7..eb876ed96861 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2105,16 +2105,13 @@ static int dummy_hub_control(
2105 } 2105 }
2106 break; 2106 break;
2107 case USB_PORT_FEAT_POWER: 2107 case USB_PORT_FEAT_POWER:
2108 if (hcd->speed == HCD_USB3) { 2108 dev_dbg(dummy_dev(dum_hcd), "power-off\n");
2109 if (dum_hcd->port_status & USB_PORT_STAT_POWER) 2109 if (hcd->speed == HCD_USB3)
2110 dev_dbg(dummy_dev(dum_hcd), 2110 dum_hcd->port_status &= ~USB_SS_PORT_STAT_POWER;
2111 "power-off\n"); 2111 else
2112 } else 2112 dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
2113 if (dum_hcd->port_status & 2113 set_link_state(dum_hcd);
2114 USB_SS_PORT_STAT_POWER) 2114 break;
2115 dev_dbg(dummy_dev(dum_hcd),
2116 "power-off\n");
2117 /* FALLS THROUGH */
2118 default: 2115 default:
2119 dum_hcd->port_status &= ~(1 << wValue); 2116 dum_hcd->port_status &= ~(1 << wValue);
2120 set_link_state(dum_hcd); 2117 set_link_state(dum_hcd);
@@ -2285,14 +2282,13 @@ static int dummy_hub_control(
2285 if ((dum_hcd->port_status & 2282 if ((dum_hcd->port_status &
2286 USB_SS_PORT_STAT_POWER) != 0) { 2283 USB_SS_PORT_STAT_POWER) != 0) {
2287 dum_hcd->port_status |= (1 << wValue); 2284 dum_hcd->port_status |= (1 << wValue);
2288 set_link_state(dum_hcd);
2289 } 2285 }
2290 } else 2286 } else
2291 if ((dum_hcd->port_status & 2287 if ((dum_hcd->port_status &
2292 USB_PORT_STAT_POWER) != 0) { 2288 USB_PORT_STAT_POWER) != 0) {
2293 dum_hcd->port_status |= (1 << wValue); 2289 dum_hcd->port_status |= (1 << wValue);
2294 set_link_state(dum_hcd);
2295 } 2290 }
2291 set_link_state(dum_hcd);
2296 } 2292 }
2297 break; 2293 break;
2298 case GetPortErrorCount: 2294 case GetPortErrorCount:
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index aac0ce8aeb0b..8991a4070792 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1310,7 +1310,7 @@ static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
1310{ 1310{
1311 struct fsl_ep *ep = get_ep_by_pipe(udc, pipe); 1311 struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
1312 1312
1313 if (ep->name) 1313 if (ep->ep.name)
1314 nuke(ep, -ESHUTDOWN); 1314 nuke(ep, -ESHUTDOWN);
1315} 1315}
1316 1316
@@ -1698,7 +1698,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
1698 curr_ep = get_ep_by_pipe(udc, i); 1698 curr_ep = get_ep_by_pipe(udc, i);
1699 1699
1700 /* If the ep is configured */ 1700 /* If the ep is configured */
1701 if (curr_ep->name == NULL) { 1701 if (!curr_ep->ep.name) {
1702 WARNING("Invalid EP?"); 1702 WARNING("Invalid EP?");
1703 continue; 1703 continue;
1704 } 1704 }
diff --git a/drivers/usb/gadget/udc/goku_udc.h b/drivers/usb/gadget/udc/goku_udc.h
index 86d2adafe149..64eb0f2b5ea0 100644
--- a/drivers/usb/gadget/udc/goku_udc.h
+++ b/drivers/usb/gadget/udc/goku_udc.h
@@ -28,7 +28,7 @@ struct goku_udc_regs {
28# define INT_EP1DATASET 0x00040 28# define INT_EP1DATASET 0x00040
29# define INT_EP2DATASET 0x00080 29# define INT_EP2DATASET 0x00080
30# define INT_EP3DATASET 0x00100 30# define INT_EP3DATASET 0x00100
31#define INT_EPnNAK(n) (0x00100 < (n)) /* 0 < n < 4 */ 31#define INT_EPnNAK(n) (0x00100 << (n)) /* 0 < n < 4 */
32# define INT_EP1NAK 0x00200 32# define INT_EP1NAK 0x00200
33# define INT_EP2NAK 0x00400 33# define INT_EP2NAK 0x00400
34# define INT_EP3NAK 0x00800 34# define INT_EP3NAK 0x00800
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 3bb08870148f..95e72d75e0a0 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -220,6 +220,8 @@ config USB_EHCI_TEGRA
220 depends on ARCH_TEGRA 220 depends on ARCH_TEGRA
221 select USB_EHCI_ROOT_HUB_TT 221 select USB_EHCI_ROOT_HUB_TT
222 select USB_PHY 222 select USB_PHY
223 select USB_ULPI
224 select USB_ULPI_VIEWPORT
223 help 225 help
224 This driver enables support for the internal USB Host Controllers 226 This driver enables support for the internal USB Host Controllers
225 found in NVIDIA Tegra SoCs. The controllers are EHCI compliant. 227 found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 9d1192aea9d0..602c6e42c34d 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -444,7 +444,8 @@ static int ohci_init (struct ohci_hcd *ohci)
444 struct usb_hcd *hcd = ohci_to_hcd(ohci); 444 struct usb_hcd *hcd = ohci_to_hcd(ohci);
445 445
446 /* Accept arbitrarily long scatter-gather lists */ 446 /* Accept arbitrarily long scatter-gather lists */
447 hcd->self.sg_tablesize = ~0; 447 if (!(hcd->driver->flags & HCD_LOCAL_MEM))
448 hcd->self.sg_tablesize = ~0;
448 449
449 if (distrust_firmware) 450 if (distrust_firmware)
450 ohci->flags |= OHCI_QUIRK_HUB_POWER; 451 ohci->flags |= OHCI_QUIRK_HUB_POWER;
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 641fed609911..24edb7674710 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -1018,6 +1018,8 @@ skip_ed:
1018 * have modified this list. normally it's just prepending 1018 * have modified this list. normally it's just prepending
1019 * entries (which we'd ignore), but paranoia won't hurt. 1019 * entries (which we'd ignore), but paranoia won't hurt.
1020 */ 1020 */
1021 *last = ed->ed_next;
1022 ed->ed_next = NULL;
1021 modified = 0; 1023 modified = 0;
1022 1024
1023 /* unlink urbs as requested, but rescan the list after 1025 /* unlink urbs as requested, but rescan the list after
@@ -1076,21 +1078,22 @@ rescan_this:
1076 goto rescan_this; 1078 goto rescan_this;
1077 1079
1078 /* 1080 /*
1079 * If no TDs are queued, take ED off the ed_rm_list. 1081 * If no TDs are queued, ED is now idle.
1080 * Otherwise, if the HC is running, reschedule. 1082 * Otherwise, if the HC is running, reschedule.
1081 * If not, leave it on the list for further dequeues. 1083 * If the HC isn't running, add ED back to the
1084 * start of the list for later processing.
1082 */ 1085 */
1083 if (list_empty(&ed->td_list)) { 1086 if (list_empty(&ed->td_list)) {
1084 *last = ed->ed_next;
1085 ed->ed_next = NULL;
1086 ed->state = ED_IDLE; 1087 ed->state = ED_IDLE;
1087 list_del(&ed->in_use_list); 1088 list_del(&ed->in_use_list);
1088 } else if (ohci->rh_state == OHCI_RH_RUNNING) { 1089 } else if (ohci->rh_state == OHCI_RH_RUNNING) {
1089 *last = ed->ed_next;
1090 ed->ed_next = NULL;
1091 ed_schedule(ohci, ed); 1090 ed_schedule(ohci, ed);
1092 } else { 1091 } else {
1093 last = &ed->ed_next; 1092 ed->ed_next = ohci->ed_rm_list;
1093 ohci->ed_rm_list = ed;
1094 /* Don't loop on the same ED */
1095 if (last == &ohci->ed_rm_list)
1096 last = &ed->ed_next;
1094 } 1097 }
1095 1098
1096 if (modified) 1099 if (modified)
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d9363713b7f1..0ec809a35a3f 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -638,7 +638,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
638 if (!ep->stream_info) 638 if (!ep->stream_info)
639 return NULL; 639 return NULL;
640 640
641 if (stream_id > ep->stream_info->num_streams) 641 if (stream_id >= ep->stream_info->num_streams)
642 return NULL; 642 return NULL;
643 return ep->stream_info->stream_rings[stream_id]; 643 return ep->stream_info->stream_rings[stream_id];
644} 644}
@@ -960,6 +960,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
960 if (dev->out_ctx) 960 if (dev->out_ctx)
961 xhci_free_container_ctx(xhci, dev->out_ctx); 961 xhci_free_container_ctx(xhci, dev->out_ctx);
962 962
963 if (dev->udev && dev->udev->slot_id)
964 dev->udev->slot_id = 0;
963 kfree(xhci->devs[slot_id]); 965 kfree(xhci->devs[slot_id]);
964 xhci->devs[slot_id] = NULL; 966 xhci->devs[slot_id] = NULL;
965} 967}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 64e722c59a18..e01d353a5978 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -290,7 +290,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
290static struct platform_driver usb_xhci_driver = { 290static struct platform_driver usb_xhci_driver = {
291 .probe = xhci_plat_probe, 291 .probe = xhci_plat_probe,
292 .remove = xhci_plat_remove, 292 .remove = xhci_plat_remove,
293 .shutdown = usb_hcd_platform_shutdown,
294 .driver = { 293 .driver = {
295 .name = "xhci-hcd", 294 .name = "xhci-hcd",
296 .pm = DEV_PM_OPS, 295 .pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 82d7419b2c16..d90580a750ba 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -887,6 +887,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
887 spin_unlock_irqrestore(&xhci->lock, flags); 887 spin_unlock_irqrestore(&xhci->lock, flags);
888} 888}
889 889
890static bool xhci_pending_portevent(struct xhci_hcd *xhci)
891{
892 __le32 __iomem **port_array;
893 int port_index;
894 u32 status;
895 u32 portsc;
896
897 status = readl(&xhci->op_regs->status);
898 if (status & STS_EINT)
899 return true;
900 /*
901 * Checking STS_EINT is not enough as there is a lag between a change
902 * bit being set and the Port Status Change Event that it generated
903 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
904 */
905
906 port_index = xhci->num_usb2_ports;
907 port_array = xhci->usb2_ports;
908 while (port_index--) {
909 portsc = readl(port_array[port_index]);
910 if (portsc & PORT_CHANGE_MASK ||
911 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
912 return true;
913 }
914 port_index = xhci->num_usb3_ports;
915 port_array = xhci->usb3_ports;
916 while (port_index--) {
917 portsc = readl(port_array[port_index]);
918 if (portsc & PORT_CHANGE_MASK ||
919 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
920 return true;
921 }
922 return false;
923}
924
890/* 925/*
891 * Stop HC (not bus-specific) 926 * Stop HC (not bus-specific)
892 * 927 *
@@ -983,7 +1018,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
983 */ 1018 */
984int xhci_resume(struct xhci_hcd *xhci, bool hibernated) 1019int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
985{ 1020{
986 u32 command, temp = 0, status; 1021 u32 command, temp = 0;
987 struct usb_hcd *hcd = xhci_to_hcd(xhci); 1022 struct usb_hcd *hcd = xhci_to_hcd(xhci);
988 struct usb_hcd *secondary_hcd; 1023 struct usb_hcd *secondary_hcd;
989 int retval = 0; 1024 int retval = 0;
@@ -1105,8 +1140,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1105 done: 1140 done:
1106 if (retval == 0) { 1141 if (retval == 0) {
1107 /* Resume root hubs only when have pending events. */ 1142 /* Resume root hubs only when have pending events. */
1108 status = readl(&xhci->op_regs->status); 1143 if (xhci_pending_portevent(xhci)) {
1109 if (status & STS_EINT) {
1110 usb_hcd_resume_root_hub(xhci->shared_hcd); 1144 usb_hcd_resume_root_hub(xhci->shared_hcd);
1111 usb_hcd_resume_root_hub(hcd); 1145 usb_hcd_resume_root_hub(hcd);
1112 } 1146 }
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 9ed3de1a1d8c..260a50f6070e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -382,6 +382,10 @@ struct xhci_op_regs {
382#define PORT_PLC (1 << 22) 382#define PORT_PLC (1 << 22)
383/* port configure error change - port failed to configure its link partner */ 383/* port configure error change - port failed to configure its link partner */
384#define PORT_CEC (1 << 23) 384#define PORT_CEC (1 << 23)
385#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
386 PORT_RC | PORT_PLC | PORT_CEC)
387
388
385/* Cold Attach Status - xHC can set this bit to report device attached during 389/* Cold Attach Status - xHC can set this bit to report device attached during
386 * Sx state. Warm port reset should be perfomed to clear this bit and move port 390 * Sx state. Warm port reset should be perfomed to clear this bit and move port
387 * to connected state. 391 * to connected state.
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index cce22ff1c2eb..e9113238d9e3 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -46,6 +46,9 @@
46#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */ 46#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */
47#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */ 47#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */
48#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */ 48#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */
49#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 /* USB Product ID of Power Analyser CASSY */
50#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 /* USB Product ID of Converter Controller CASSY */
51#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 /* USB Product ID of Machine Test CASSY */
49#define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */ 52#define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */
50#define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */ 53#define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */
51#define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */ 54#define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */
@@ -88,6 +91,9 @@ static const struct usb_device_id ld_usb_table[] = {
88 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, 91 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
89 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, 92 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
90 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, 93 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
94 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
95 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
96 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
91 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, 97 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
92 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, 98 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
93 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, 99 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 343fa6ff9f4b..512c84adcace 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -414,8 +414,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
414 loff_t *ppos) 414 loff_t *ppos)
415{ 415{
416 struct usb_yurex *dev; 416 struct usb_yurex *dev;
417 int retval = 0; 417 int len = 0;
418 int bytes_read = 0;
419 char in_buffer[20]; 418 char in_buffer[20];
420 unsigned long flags; 419 unsigned long flags;
421 420
@@ -423,26 +422,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
423 422
424 mutex_lock(&dev->io_mutex); 423 mutex_lock(&dev->io_mutex);
425 if (!dev->interface) { /* already disconnected */ 424 if (!dev->interface) { /* already disconnected */
426 retval = -ENODEV; 425 mutex_unlock(&dev->io_mutex);
427 goto exit; 426 return -ENODEV;
428 } 427 }
429 428
430 spin_lock_irqsave(&dev->lock, flags); 429 spin_lock_irqsave(&dev->lock, flags);
431 bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu); 430 len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
432 spin_unlock_irqrestore(&dev->lock, flags); 431 spin_unlock_irqrestore(&dev->lock, flags);
433
434 if (*ppos < bytes_read) {
435 if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
436 retval = -EFAULT;
437 else {
438 retval = bytes_read - *ppos;
439 *ppos += bytes_read;
440 }
441 }
442
443exit:
444 mutex_unlock(&dev->io_mutex); 432 mutex_unlock(&dev->io_mutex);
445 return retval; 433
434 return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
446} 435}
447 436
448static ssize_t yurex_write(struct file *file, const char __user *user_buffer, 437static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index ad408251d955..108dcc5f5350 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -82,6 +82,8 @@ struct mon_reader_text {
82 82
83 wait_queue_head_t wait; 83 wait_queue_head_t wait;
84 int printf_size; 84 int printf_size;
85 size_t printf_offset;
86 size_t printf_togo;
85 char *printf_buf; 87 char *printf_buf;
86 struct mutex printf_lock; 88 struct mutex printf_lock;
87 89
@@ -373,73 +375,103 @@ err_alloc:
373 return rc; 375 return rc;
374} 376}
375 377
376/* 378static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp,
377 * For simplicity, we read one record in one system call and throw out 379 char __user * const buf, const size_t nbytes)
378 * what does not fit. This means that the following does not work: 380{
379 * dd if=/dbg/usbmon/0t bs=10 381 const size_t togo = min(nbytes, rp->printf_togo);
380 * Also, we do not allow seeks and do not bother advancing the offset. 382
381 */ 383 if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo))
384 return -EFAULT;
385 rp->printf_togo -= togo;
386 rp->printf_offset += togo;
387 return togo;
388}
389
390/* ppos is not advanced since the llseek operation is not permitted. */
382static ssize_t mon_text_read_t(struct file *file, char __user *buf, 391static ssize_t mon_text_read_t(struct file *file, char __user *buf,
383 size_t nbytes, loff_t *ppos) 392 size_t nbytes, loff_t *ppos)
384{ 393{
385 struct mon_reader_text *rp = file->private_data; 394 struct mon_reader_text *rp = file->private_data;
386 struct mon_event_text *ep; 395 struct mon_event_text *ep;
387 struct mon_text_ptr ptr; 396 struct mon_text_ptr ptr;
397 ssize_t ret;
388 398
389 if (IS_ERR(ep = mon_text_read_wait(rp, file)))
390 return PTR_ERR(ep);
391 mutex_lock(&rp->printf_lock); 399 mutex_lock(&rp->printf_lock);
392 ptr.cnt = 0; 400
393 ptr.pbuf = rp->printf_buf; 401 if (rp->printf_togo == 0) {
394 ptr.limit = rp->printf_size; 402
395 403 ep = mon_text_read_wait(rp, file);
396 mon_text_read_head_t(rp, &ptr, ep); 404 if (IS_ERR(ep)) {
397 mon_text_read_statset(rp, &ptr, ep); 405 mutex_unlock(&rp->printf_lock);
398 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, 406 return PTR_ERR(ep);
399 " %d", ep->length); 407 }
400 mon_text_read_data(rp, &ptr, ep); 408 ptr.cnt = 0;
401 409 ptr.pbuf = rp->printf_buf;
402 if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) 410 ptr.limit = rp->printf_size;
403 ptr.cnt = -EFAULT; 411
412 mon_text_read_head_t(rp, &ptr, ep);
413 mon_text_read_statset(rp, &ptr, ep);
414 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
415 " %d", ep->length);
416 mon_text_read_data(rp, &ptr, ep);
417
418 rp->printf_togo = ptr.cnt;
419 rp->printf_offset = 0;
420
421 kmem_cache_free(rp->e_slab, ep);
422 }
423
424 ret = mon_text_copy_to_user(rp, buf, nbytes);
404 mutex_unlock(&rp->printf_lock); 425 mutex_unlock(&rp->printf_lock);
405 kmem_cache_free(rp->e_slab, ep); 426 return ret;
406 return ptr.cnt;
407} 427}
408 428
429/* ppos is not advanced since the llseek operation is not permitted. */
409static ssize_t mon_text_read_u(struct file *file, char __user *buf, 430static ssize_t mon_text_read_u(struct file *file, char __user *buf,
410 size_t nbytes, loff_t *ppos) 431 size_t nbytes, loff_t *ppos)
411{ 432{
412 struct mon_reader_text *rp = file->private_data; 433 struct mon_reader_text *rp = file->private_data;
413 struct mon_event_text *ep; 434 struct mon_event_text *ep;
414 struct mon_text_ptr ptr; 435 struct mon_text_ptr ptr;
436 ssize_t ret;
415 437
416 if (IS_ERR(ep = mon_text_read_wait(rp, file)))
417 return PTR_ERR(ep);
418 mutex_lock(&rp->printf_lock); 438 mutex_lock(&rp->printf_lock);
419 ptr.cnt = 0;
420 ptr.pbuf = rp->printf_buf;
421 ptr.limit = rp->printf_size;
422 439
423 mon_text_read_head_u(rp, &ptr, ep); 440 if (rp->printf_togo == 0) {
424 if (ep->type == 'E') { 441
425 mon_text_read_statset(rp, &ptr, ep); 442 ep = mon_text_read_wait(rp, file);
426 } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { 443 if (IS_ERR(ep)) {
427 mon_text_read_isostat(rp, &ptr, ep); 444 mutex_unlock(&rp->printf_lock);
428 mon_text_read_isodesc(rp, &ptr, ep); 445 return PTR_ERR(ep);
429 } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { 446 }
430 mon_text_read_intstat(rp, &ptr, ep); 447 ptr.cnt = 0;
431 } else { 448 ptr.pbuf = rp->printf_buf;
432 mon_text_read_statset(rp, &ptr, ep); 449 ptr.limit = rp->printf_size;
450
451 mon_text_read_head_u(rp, &ptr, ep);
452 if (ep->type == 'E') {
453 mon_text_read_statset(rp, &ptr, ep);
454 } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
455 mon_text_read_isostat(rp, &ptr, ep);
456 mon_text_read_isodesc(rp, &ptr, ep);
457 } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
458 mon_text_read_intstat(rp, &ptr, ep);
459 } else {
460 mon_text_read_statset(rp, &ptr, ep);
461 }
462 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
463 " %d", ep->length);
464 mon_text_read_data(rp, &ptr, ep);
465
466 rp->printf_togo = ptr.cnt;
467 rp->printf_offset = 0;
468
469 kmem_cache_free(rp->e_slab, ep);
433 } 470 }
434 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
435 " %d", ep->length);
436 mon_text_read_data(rp, &ptr, ep);
437 471
438 if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) 472 ret = mon_text_copy_to_user(rp, buf, nbytes);
439 ptr.cnt = -EFAULT;
440 mutex_unlock(&rp->printf_lock); 473 mutex_unlock(&rp->printf_lock);
441 kmem_cache_free(rp->e_slab, ep); 474 return ret;
442 return ptr.cnt;
443} 475}
444 476
445static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, 477static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 054d8adff5ab..4fa5565f11aa 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1781,6 +1781,7 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1781 int vbus; 1781 int vbus;
1782 u8 devctl; 1782 u8 devctl;
1783 1783
1784 pm_runtime_get_sync(dev);
1784 spin_lock_irqsave(&musb->lock, flags); 1785 spin_lock_irqsave(&musb->lock, flags);
1785 val = musb->a_wait_bcon; 1786 val = musb->a_wait_bcon;
1786 vbus = musb_platform_get_vbus_status(musb); 1787 vbus = musb_platform_get_vbus_status(musb);
@@ -1794,6 +1795,7 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1794 vbus = 0; 1795 vbus = 0;
1795 } 1796 }
1796 spin_unlock_irqrestore(&musb->lock, flags); 1797 spin_unlock_irqrestore(&musb->lock, flags);
1798 pm_runtime_put_sync(dev);
1797 1799
1798 return sprintf(buf, "Vbus %s, timeout %lu msec\n", 1800 return sprintf(buf, "Vbus %s, timeout %lu msec\n",
1799 vbus ? "on" : "off", val); 1801 vbus ? "on" : "off", val);
@@ -2528,7 +2530,8 @@ static int musb_resume(struct device *dev)
2528 pm_runtime_set_active(dev); 2530 pm_runtime_set_active(dev);
2529 pm_runtime_enable(dev); 2531 pm_runtime_enable(dev);
2530 2532
2531 musb_start(musb); 2533 musb_enable_interrupts(musb);
2534 musb_platform_enable(musb);
2532 2535
2533 return 0; 2536 return 0;
2534} 2537}
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 10d30afe4a3c..a0d1417362cd 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -114,15 +114,19 @@ static int service_tx_status_request(
114 } 114 }
115 115
116 is_in = epnum & USB_DIR_IN; 116 is_in = epnum & USB_DIR_IN;
117 if (is_in) { 117 epnum &= 0x0f;
118 epnum &= 0x0f; 118 if (epnum >= MUSB_C_NUM_EPS) {
119 handled = -EINVAL;
120 break;
121 }
122
123 if (is_in)
119 ep = &musb->endpoints[epnum].ep_in; 124 ep = &musb->endpoints[epnum].ep_in;
120 } else { 125 else
121 ep = &musb->endpoints[epnum].ep_out; 126 ep = &musb->endpoints[epnum].ep_out;
122 }
123 regs = musb->endpoints[epnum].regs; 127 regs = musb->endpoints[epnum].regs;
124 128
125 if (epnum >= MUSB_C_NUM_EPS || !ep->desc) { 129 if (!ep->desc) {
126 handled = -EINVAL; 130 handled = -EINVAL;
127 break; 131 break;
128 } 132 }
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 22358ab524b0..4926d89c787d 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1054,7 +1054,9 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
1054 /* set tx_reinit and schedule the next qh */ 1054 /* set tx_reinit and schedule the next qh */
1055 ep->tx_reinit = 1; 1055 ep->tx_reinit = 1;
1056 } 1056 }
1057 musb_start_urb(musb, is_in, next_qh); 1057
1058 if (next_qh)
1059 musb_start_urb(musb, is_in, next_qh);
1058 } 1060 }
1059} 1061}
1060 1062
@@ -2587,8 +2589,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
2587{ 2589{
2588 struct musb *musb = hcd_to_musb(hcd); 2590 struct musb *musb = hcd_to_musb(hcd);
2589 u8 devctl; 2591 u8 devctl;
2592 int ret;
2590 2593
2591 musb_port_suspend(musb, true); 2594 ret = musb_port_suspend(musb, true);
2595 if (ret)
2596 return ret;
2592 2597
2593 if (!is_host_active(musb)) 2598 if (!is_host_active(musb))
2594 return 0; 2599 return 0;
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 7bbf01bf4bb0..54d02ed032df 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -92,7 +92,7 @@ extern void musb_host_rx(struct musb *, u8);
92extern void musb_root_disconnect(struct musb *musb); 92extern void musb_root_disconnect(struct musb *musb);
93extern void musb_host_resume_root_hub(struct musb *musb); 93extern void musb_host_resume_root_hub(struct musb *musb);
94extern void musb_host_poke_root_hub(struct musb *musb); 94extern void musb_host_poke_root_hub(struct musb *musb);
95extern void musb_port_suspend(struct musb *musb, bool do_suspend); 95extern int musb_port_suspend(struct musb *musb, bool do_suspend);
96extern void musb_port_reset(struct musb *musb, bool do_reset); 96extern void musb_port_reset(struct musb *musb, bool do_reset);
97extern void musb_host_finish_resume(struct work_struct *work); 97extern void musb_host_finish_resume(struct work_struct *work);
98#else 98#else
@@ -124,7 +124,10 @@ static inline void musb_root_disconnect(struct musb *musb) {}
124static inline void musb_host_resume_root_hub(struct musb *musb) {} 124static inline void musb_host_resume_root_hub(struct musb *musb) {}
125static inline void musb_host_poll_rh_status(struct musb *musb) {} 125static inline void musb_host_poll_rh_status(struct musb *musb) {}
126static inline void musb_host_poke_root_hub(struct musb *musb) {} 126static inline void musb_host_poke_root_hub(struct musb *musb) {}
127static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {} 127static inline int musb_port_suspend(struct musb *musb, bool do_suspend)
128{
129 return 0;
130}
128static inline void musb_port_reset(struct musb *musb, bool do_reset) {} 131static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
129static inline void musb_host_finish_resume(struct work_struct *work) {} 132static inline void musb_host_finish_resume(struct work_struct *work) {}
130#endif 133#endif
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 92d5f718659b..ac5458a69de5 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -74,14 +74,14 @@ void musb_host_finish_resume(struct work_struct *work)
74 spin_unlock_irqrestore(&musb->lock, flags); 74 spin_unlock_irqrestore(&musb->lock, flags);
75} 75}
76 76
77void musb_port_suspend(struct musb *musb, bool do_suspend) 77int musb_port_suspend(struct musb *musb, bool do_suspend)
78{ 78{
79 struct usb_otg *otg = musb->xceiv->otg; 79 struct usb_otg *otg = musb->xceiv->otg;
80 u8 power; 80 u8 power;
81 void __iomem *mbase = musb->mregs; 81 void __iomem *mbase = musb->mregs;
82 82
83 if (!is_host_active(musb)) 83 if (!is_host_active(musb))
84 return; 84 return 0;
85 85
86 /* NOTE: this doesn't necessarily put PHY into low power mode, 86 /* NOTE: this doesn't necessarily put PHY into low power mode,
87 * turning off its clock; that's a function of PHY integration and 87 * turning off its clock; that's a function of PHY integration and
@@ -92,16 +92,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
92 if (do_suspend) { 92 if (do_suspend) {
93 int retries = 10000; 93 int retries = 10000;
94 94
95 power &= ~MUSB_POWER_RESUME; 95 if (power & MUSB_POWER_RESUME)
96 power |= MUSB_POWER_SUSPENDM; 96 return -EBUSY;
97 musb_writeb(mbase, MUSB_POWER, power);
98 97
99 /* Needed for OPT A tests */ 98 if (!(power & MUSB_POWER_SUSPENDM)) {
100 power = musb_readb(mbase, MUSB_POWER); 99 power |= MUSB_POWER_SUSPENDM;
101 while (power & MUSB_POWER_SUSPENDM) { 100 musb_writeb(mbase, MUSB_POWER, power);
101
102 /* Needed for OPT A tests */
102 power = musb_readb(mbase, MUSB_POWER); 103 power = musb_readb(mbase, MUSB_POWER);
103 if (retries-- < 1) 104 while (power & MUSB_POWER_SUSPENDM) {
104 break; 105 power = musb_readb(mbase, MUSB_POWER);
106 if (retries-- < 1)
107 break;
108 }
105 } 109 }
106 110
107 dev_dbg(musb->controller, "Root port suspended, power %02x\n", power); 111 dev_dbg(musb->controller, "Root port suspended, power %02x\n", power);
@@ -138,6 +142,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
138 schedule_delayed_work(&musb->finish_resume_work, 142 schedule_delayed_work(&musb->finish_resume_work,
139 msecs_to_jiffies(USB_RESUME_TIMEOUT)); 143 msecs_to_jiffies(USB_RESUME_TIMEOUT));
140 } 144 }
145 return 0;
141} 146}
142 147
143void musb_port_reset(struct musb *musb, bool do_reset) 148void musb_port_reset(struct musb *musb, bool do_reset)
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index d0b6a1cd7f62..c92a295049ad 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -207,9 +207,6 @@ static int ux500_dma_channel_program(struct dma_channel *channel,
207 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 207 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
208 channel->status == MUSB_DMA_STATUS_BUSY); 208 channel->status == MUSB_DMA_STATUS_BUSY);
209 209
210 if (!ux500_dma_is_compatible(channel, packet_sz, (void *)dma_addr, len))
211 return false;
212
213 channel->status = MUSB_DMA_STATUS_BUSY; 210 channel->status = MUSB_DMA_STATUS_BUSY;
214 channel->actual_len = 0; 211 channel->actual_len = 0;
215 ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len); 212 ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 8fbb08e3e754..03b1aade253a 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -141,6 +141,7 @@ config USB_MSM_OTG
141 tristate "Qualcomm on-chip USB OTG controller support" 141 tristate "Qualcomm on-chip USB OTG controller support"
142 depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST) 142 depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
143 depends on RESET_CONTROLLER 143 depends on RESET_CONTROLLER
144 depends on REGULATOR
144 depends on EXTCON 145 depends on EXTCON
145 select USB_PHY 146 select USB_PHY
146 help 147 help
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 8bb9367ada45..6f37966ea54b 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -999,6 +999,10 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
999 if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1)) 999 if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
1000 goto usbhsf_pio_prepare_pop; 1000 goto usbhsf_pio_prepare_pop;
1001 1001
1002 /* return at this time if the pipe is running */
1003 if (usbhs_pipe_is_running(pipe))
1004 return 0;
1005
1002 usbhs_pipe_config_change_bfre(pipe, 1); 1006 usbhs_pipe_config_change_bfre(pipe, 1);
1003 1007
1004 ret = usbhsf_fifo_select(pipe, fifo, 0); 1008 ret = usbhsf_fifo_select(pipe, fifo, 0);
@@ -1189,6 +1193,7 @@ static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
1189 usbhsf_fifo_clear(pipe, fifo); 1193 usbhsf_fifo_clear(pipe, fifo);
1190 pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len); 1194 pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
1191 1195
1196 usbhs_pipe_running(pipe, 0);
1192 usbhsf_dma_stop(pipe, fifo); 1197 usbhsf_dma_stop(pipe, fifo);
1193 usbhsf_dma_unmap(pkt); 1198 usbhsf_dma_unmap(pkt);
1194 usbhsf_fifo_unselect(pipe, pipe->fifo); 1199 usbhsf_fifo_unselect(pipe, pipe->fifo);
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 56ecb8b5115d..77c3ebe860c5 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -62,7 +62,9 @@ config USB_SERIAL_SIMPLE
62 - Fundamental Software dongle. 62 - Fundamental Software dongle.
63 - Google USB serial devices 63 - Google USB serial devices
64 - HP4x calculators 64 - HP4x calculators
65 - Libtransistor USB console
65 - a number of Motorola phones 66 - a number of Motorola phones
67 - Motorola Tetra devices
66 - Novatel Wireless GPS receivers 68 - Novatel Wireless GPS receivers
67 - Siemens USB/MPI adapter. 69 - Siemens USB/MPI adapter.
68 - ViVOtech ViVOpay USB device. 70 - ViVOtech ViVOpay USB device.
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 71133d96f97d..f73ea14e8173 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -118,7 +118,7 @@ static int ch341_control_in(struct usb_device *dev,
118 r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, 118 r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
119 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 119 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
120 value, index, buf, bufsize, DEFAULT_TIMEOUT); 120 value, index, buf, bufsize, DEFAULT_TIMEOUT);
121 if (r < bufsize) { 121 if (r < (int)bufsize) {
122 if (r >= 0) { 122 if (r >= 0) {
123 dev_err(&dev->dev, 123 dev_err(&dev->dev,
124 "short control message received (%d < %u)\n", 124 "short control message received (%d < %u)\n",
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index a4ab4fdf5ba3..97382301c393 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -33,7 +33,7 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *);
33static void cp210x_close(struct usb_serial_port *); 33static void cp210x_close(struct usb_serial_port *);
34static void cp210x_get_termios(struct tty_struct *, struct usb_serial_port *); 34static void cp210x_get_termios(struct tty_struct *, struct usb_serial_port *);
35static void cp210x_get_termios_port(struct usb_serial_port *port, 35static void cp210x_get_termios_port(struct usb_serial_port *port,
36 unsigned int *cflagp, unsigned int *baudp); 36 tcflag_t *cflagp, unsigned int *baudp);
37static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *, 37static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
38 struct ktermios *); 38 struct ktermios *);
39static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *, 39static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
@@ -91,6 +91,9 @@ static const struct usb_device_id id_table[] = {
91 { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ 91 { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
92 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ 92 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
93 { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */ 93 { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
94 { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
95 { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
96 { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
94 { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ 97 { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
95 { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ 98 { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
96 { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ 99 { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -108,6 +111,9 @@ static const struct usb_device_id id_table[] = {
108 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ 111 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
109 { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ 112 { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
110 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ 113 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
114 { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
115 { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
116 { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
111 { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */ 117 { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
112 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ 118 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
113 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 119 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
@@ -120,7 +126,9 @@ static const struct usb_device_id id_table[] = {
120 { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ 126 { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
121 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ 127 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
122 { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ 128 { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
129 { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
123 { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */ 130 { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
131 { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
124 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ 132 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
125 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ 133 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
126 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 134 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -130,17 +138,24 @@ static const struct usb_device_id id_table[] = {
130 { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ 138 { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
131 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ 139 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
132 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ 140 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
141 { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
142 { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
133 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 143 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
134 { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ 144 { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
135 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ 145 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
136 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ 146 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
147 { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
148 { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
137 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ 149 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
138 { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ 150 { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
139 { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ 151 { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
140 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 152 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
141 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 153 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
154 { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
142 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 155 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
143 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ 156 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
157 { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
158 { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
144 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ 159 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
145 { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ 160 { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
146 { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */ 161 { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
@@ -151,6 +166,7 @@ static const struct usb_device_id id_table[] = {
151 { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */ 166 { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
152 { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ 167 { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
153 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ 168 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
169 { USB_DEVICE(0x155A, 0x1006) }, /* ELDAT Easywave RX09 */
154 { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */ 170 { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
155 { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */ 171 { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
156 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ 172 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
@@ -209,6 +225,7 @@ static const struct usb_device_id id_table[] = {
209 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ 225 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
210 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ 226 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
211 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ 227 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
228 { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
212 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ 229 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
213 { } /* Terminating Entry */ 230 { } /* Terminating Entry */
214}; 231};
@@ -513,7 +530,7 @@ static void cp210x_get_termios(struct tty_struct *tty,
513 &tty->termios.c_cflag, &baud); 530 &tty->termios.c_cflag, &baud);
514 tty_encode_baud_rate(tty, baud, baud); 531 tty_encode_baud_rate(tty, baud, baud);
515 } else { 532 } else {
516 unsigned int cflag; 533 tcflag_t cflag;
517 cflag = 0; 534 cflag = 0;
518 cp210x_get_termios_port(port, &cflag, &baud); 535 cp210x_get_termios_port(port, &cflag, &baud);
519 } 536 }
@@ -524,10 +541,11 @@ static void cp210x_get_termios(struct tty_struct *tty,
524 * This is the heart of cp210x_get_termios which always uses a &usb_serial_port. 541 * This is the heart of cp210x_get_termios which always uses a &usb_serial_port.
525 */ 542 */
526static void cp210x_get_termios_port(struct usb_serial_port *port, 543static void cp210x_get_termios_port(struct usb_serial_port *port,
527 unsigned int *cflagp, unsigned int *baudp) 544 tcflag_t *cflagp, unsigned int *baudp)
528{ 545{
529 struct device *dev = &port->dev; 546 struct device *dev = &port->dev;
530 unsigned int cflag, modem_ctl[4]; 547 tcflag_t cflag;
548 unsigned int modem_ctl[4];
531 unsigned int baud; 549 unsigned int baud;
532 unsigned int bits; 550 unsigned int bits;
533 551
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 64fe9dc25ed4..3e5b189a79b4 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -773,6 +773,7 @@ static const struct usb_device_id id_table_combined[] = {
773 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, 773 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
774 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, 774 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
775 { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) }, 775 { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
776 { USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) },
776 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, 777 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
777 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, 778 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
778 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, 779 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
@@ -935,6 +936,7 @@ static const struct usb_device_id id_table_combined[] = {
935 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, 936 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
936 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, 937 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
937 { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) }, 938 { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) },
939 { USB_DEVICE(FTDI_VID, FTDI_FHE_PID) },
938 { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, 940 { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
939 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), 941 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
940 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 942 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
@@ -1909,7 +1911,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
1909 return ftdi_jtag_probe(serial); 1911 return ftdi_jtag_probe(serial);
1910 1912
1911 if (udev->product && 1913 if (udev->product &&
1912 (!strcmp(udev->product, "BeagleBone/XDS100V2") || 1914 (!strcmp(udev->product, "Arrow USB Blaster") ||
1915 !strcmp(udev->product, "BeagleBone/XDS100V2") ||
1913 !strcmp(udev->product, "SNAP Connect E10"))) 1916 !strcmp(udev->product, "SNAP Connect E10")))
1914 return ftdi_jtag_probe(serial); 1917 return ftdi_jtag_probe(serial);
1915 1918
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 543d2801632b..76a10b222ff9 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -922,6 +922,9 @@
922/* 922/*
923 * RT Systems programming cables for various ham radios 923 * RT Systems programming cables for various ham radios
924 */ 924 */
925/* This device uses the VID of FTDI */
926#define RTSYSTEMS_USB_VX8_PID 0x9e50 /* USB-VX8 USB to 7 pin modular plug for Yaesu VX-8 radio */
927
925#define RTSYSTEMS_VID 0x2100 /* Vendor ID */ 928#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
926#define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */ 929#define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */
927#define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */ 930#define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */
@@ -1441,6 +1444,12 @@
1441#define FTDI_CINTERION_MC55I_PID 0xA951 1444#define FTDI_CINTERION_MC55I_PID 0xA951
1442 1445
1443/* 1446/*
1447 * Product: FirmwareHubEmulator
1448 * Manufacturer: Harman Becker Automotive Systems
1449 */
1450#define FTDI_FHE_PID 0xA9A0
1451
1452/*
1444 * Product: Comet Caller ID decoder 1453 * Product: Comet Caller ID decoder
1445 * Manufacturer: Crucible Technologies 1454 * Manufacturer: Crucible Technologies
1446 */ 1455 */
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 749e1b674145..6947985ccfb0 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2219,7 +2219,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port,
2219 /* something went wrong */ 2219 /* something went wrong */
2220 dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n", 2220 dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n",
2221 __func__, status); 2221 __func__, status);
2222 usb_kill_urb(urb);
2223 usb_free_urb(urb); 2222 usb_free_urb(urb);
2224 atomic_dec(&CmdUrbs); 2223 atomic_dec(&CmdUrbs);
2225 return status; 2224 return status;
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 6b0942428917..8a4047de43dc 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -373,8 +373,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
373 3, /* get pins */ 373 3, /* get pins */
374 USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN, 374 USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
375 0, 0, data, 1, 2000); 375 0, 0, data, 1, 2000);
376 if (rc >= 0) 376 if (rc == 1)
377 *value = *data; 377 *value = *data;
378 else if (rc >= 0)
379 rc = -EIO;
378 380
379 kfree(data); 381 kfree(data);
380 return rc; 382 return rc;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index ed883a7ad533..58ba6904a087 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -482,6 +482,9 @@ static void mos7840_control_callback(struct urb *urb)
482 } 482 }
483 483
484 dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); 484 dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
485 if (urb->actual_length < 1)
486 goto out;
487
485 dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__, 488 dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
486 mos7840_port->MsrLsr, mos7840_port->port_num); 489 mos7840_port->MsrLsr, mos7840_port->port_num);
487 data = urb->transfer_buffer; 490 data = urb->transfer_buffer;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index a818c43a02ec..d982c455e18e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -236,6 +236,8 @@ static void option_instat_callback(struct urb *urb);
236/* These Quectel products use Qualcomm's vendor ID */ 236/* These Quectel products use Qualcomm's vendor ID */
237#define QUECTEL_PRODUCT_UC20 0x9003 237#define QUECTEL_PRODUCT_UC20 0x9003
238#define QUECTEL_PRODUCT_UC15 0x9090 238#define QUECTEL_PRODUCT_UC15 0x9090
239/* These u-blox products use Qualcomm's vendor ID */
240#define UBLOX_PRODUCT_R410M 0x90b2
239/* These Yuga products use Qualcomm's vendor ID */ 241/* These Yuga products use Qualcomm's vendor ID */
240#define YUGA_PRODUCT_CLM920_NC5 0x9625 242#define YUGA_PRODUCT_CLM920_NC5 0x9625
241 243
@@ -244,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
244#define QUECTEL_PRODUCT_EC21 0x0121 246#define QUECTEL_PRODUCT_EC21 0x0121
245#define QUECTEL_PRODUCT_EC25 0x0125 247#define QUECTEL_PRODUCT_EC25 0x0125
246#define QUECTEL_PRODUCT_BG96 0x0296 248#define QUECTEL_PRODUCT_BG96 0x0296
249#define QUECTEL_PRODUCT_EP06 0x0306
247 250
248#define CMOTECH_VENDOR_ID 0x16d8 251#define CMOTECH_VENDOR_ID 0x16d8
249#define CMOTECH_PRODUCT_6001 0x6001 252#define CMOTECH_PRODUCT_6001 0x6001
@@ -383,6 +386,9 @@ static void option_instat_callback(struct urb *urb);
383#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 386#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
384#define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01 387#define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
385 388
389/* Fujisoft products */
390#define FUJISOFT_PRODUCT_FS040U 0x9b02
391
386/* iBall 3.5G connect wireless modem */ 392/* iBall 3.5G connect wireless modem */
387#define IBALL_3_5G_CONNECT 0x9605 393#define IBALL_3_5G_CONNECT 0x9605
388 394
@@ -547,147 +553,15 @@ static void option_instat_callback(struct urb *urb);
547#define WETELECOM_PRODUCT_6802 0x6802 553#define WETELECOM_PRODUCT_6802 0x6802
548#define WETELECOM_PRODUCT_WMD300 0x6803 554#define WETELECOM_PRODUCT_WMD300 0x6803
549 555
550struct option_blacklist_info {
551 /* bitmask of interface numbers blacklisted for send_setup */
552 const unsigned long sendsetup;
553 /* bitmask of interface numbers that are reserved */
554 const unsigned long reserved;
555};
556
557static const struct option_blacklist_info four_g_w14_blacklist = {
558 .sendsetup = BIT(0) | BIT(1),
559};
560
561static const struct option_blacklist_info four_g_w100_blacklist = {
562 .sendsetup = BIT(1) | BIT(2),
563 .reserved = BIT(3),
564};
565
566static const struct option_blacklist_info alcatel_x200_blacklist = {
567 .sendsetup = BIT(0) | BIT(1),
568 .reserved = BIT(4),
569};
570
571static const struct option_blacklist_info zte_0037_blacklist = {
572 .sendsetup = BIT(0) | BIT(1),
573};
574
575static const struct option_blacklist_info zte_k3765_z_blacklist = {
576 .sendsetup = BIT(0) | BIT(1) | BIT(2),
577 .reserved = BIT(4),
578};
579
580static const struct option_blacklist_info zte_ad3812_z_blacklist = {
581 .sendsetup = BIT(0) | BIT(1) | BIT(2),
582};
583
584static const struct option_blacklist_info zte_mc2718_z_blacklist = {
585 .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
586};
587
588static const struct option_blacklist_info zte_mc2716_z_blacklist = {
589 .sendsetup = BIT(1) | BIT(2) | BIT(3),
590};
591
592static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
593 .reserved = BIT(2) | BIT(3) | BIT(4),
594};
595
596static const struct option_blacklist_info zte_me3620_xl_blacklist = {
597 .reserved = BIT(3) | BIT(4) | BIT(5),
598};
599
600static const struct option_blacklist_info zte_zm8620_x_blacklist = {
601 .reserved = BIT(3) | BIT(4) | BIT(5),
602};
603
604static const struct option_blacklist_info huawei_cdc12_blacklist = {
605 .reserved = BIT(1) | BIT(2),
606};
607
608static const struct option_blacklist_info net_intf0_blacklist = {
609 .reserved = BIT(0),
610};
611
612static const struct option_blacklist_info net_intf1_blacklist = {
613 .reserved = BIT(1),
614};
615
616static const struct option_blacklist_info net_intf2_blacklist = {
617 .reserved = BIT(2),
618};
619
620static const struct option_blacklist_info net_intf3_blacklist = {
621 .reserved = BIT(3),
622};
623
624static const struct option_blacklist_info net_intf4_blacklist = {
625 .reserved = BIT(4),
626};
627
628static const struct option_blacklist_info net_intf5_blacklist = {
629 .reserved = BIT(5),
630};
631
632static const struct option_blacklist_info net_intf6_blacklist = {
633 .reserved = BIT(6),
634};
635
636static const struct option_blacklist_info zte_mf626_blacklist = {
637 .sendsetup = BIT(0) | BIT(1),
638 .reserved = BIT(4),
639};
640
641static const struct option_blacklist_info zte_1255_blacklist = {
642 .reserved = BIT(3) | BIT(4),
643};
644
645static const struct option_blacklist_info simcom_sim7100e_blacklist = {
646 .reserved = BIT(5) | BIT(6),
647};
648
649static const struct option_blacklist_info telit_me910_blacklist = {
650 .sendsetup = BIT(0),
651 .reserved = BIT(1) | BIT(3),
652};
653
654static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
655 .sendsetup = BIT(0),
656 .reserved = BIT(3),
657};
658
659static const struct option_blacklist_info telit_le910_blacklist = {
660 .sendsetup = BIT(0),
661 .reserved = BIT(1) | BIT(2),
662};
663
664static const struct option_blacklist_info telit_le920_blacklist = {
665 .sendsetup = BIT(0),
666 .reserved = BIT(1) | BIT(5),
667};
668
669static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
670 .sendsetup = BIT(0),
671 .reserved = BIT(1),
672};
673 556
674static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { 557/* Device flags */
675 .sendsetup = BIT(2),
676 .reserved = BIT(0) | BIT(1) | BIT(3),
677};
678 558
679static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = { 559/* Interface does not support modem-control requests */
680 .sendsetup = BIT(0), 560#define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8)
681 .reserved = BIT(1) | BIT(2) | BIT(3),
682};
683 561
684static const struct option_blacklist_info cinterion_rmnet2_blacklist = { 562/* Interface is reserved */
685 .reserved = BIT(4) | BIT(5), 563#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0)
686};
687 564
688static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
689 .reserved = BIT(1) | BIT(4),
690};
691 565
692static const struct usb_device_id option_ids[] = { 566static const struct usb_device_id option_ids[] = {
693 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 567 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -721,26 +595,26 @@ static const struct usb_device_id option_ids[] = {
721 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, 595 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
722 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, 596 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
723 { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42), 597 { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
724 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 598 .driver_info = RSVD(4) },
725 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) }, 599 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
726 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) }, 600 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) },
727 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, 601 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
728 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), 602 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
729 .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, 603 .driver_info = RSVD(1) },
730 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff), 604 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
731 .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, 605 .driver_info = RSVD(1) },
732 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff), 606 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
733 .driver_info = (kernel_ulong_t) &net_intf2_blacklist }, 607 .driver_info = RSVD(2) },
734 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, 608 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
735 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, 609 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
736 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), 610 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
737 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, 611 .driver_info = RSVD(1) | RSVD(2) },
738 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), 612 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
739 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, 613 .driver_info = RSVD(1) | RSVD(2) },
740 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */ 614 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */
741 .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, 615 .driver_info = RSVD(1) },
742 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), 616 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
743 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, 617 .driver_info = RSVD(1) | RSVD(2) },
744 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) }, 618 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
745 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) }, 619 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
746 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) }, 620 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
@@ -1185,65 +1059,70 @@ static const struct usb_device_id option_ids[] = {
1185 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 1059 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
1186 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ 1060 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
1187 { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */ 1061 { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
1188 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 1062 .driver_info = RSVD(3) },
1189 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1063 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1190 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1064 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1191 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1065 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1192 /* Quectel products using Qualcomm vendor ID */ 1066 /* Quectel products using Qualcomm vendor ID */
1193 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, 1067 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
1194 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), 1068 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
1195 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1069 .driver_info = RSVD(4) },
1196 /* Yuga products use Qualcomm vendor ID */ 1070 /* Yuga products use Qualcomm vendor ID */
1197 { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5), 1071 { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
1198 .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist }, 1072 .driver_info = RSVD(1) | RSVD(4) },
1073 /* u-blox products using Qualcomm vendor ID */
1074 { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
1075 .driver_info = RSVD(1) | RSVD(3) },
1199 /* Quectel products using Quectel vendor ID */ 1076 /* Quectel products using Quectel vendor ID */
1200 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), 1077 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
1201 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1078 .driver_info = RSVD(4) },
1202 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), 1079 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
1203 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1080 .driver_info = RSVD(4) },
1204 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), 1081 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
1205 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1082 .driver_info = RSVD(4) },
1083 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
1084 .driver_info = RSVD(4) | RSVD(5) },
1206 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1085 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1207 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1086 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1208 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1087 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
1209 .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, 1088 .driver_info = RSVD(0) },
1210 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) }, 1089 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
1211 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) }, 1090 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
1212 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) }, 1091 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
1213 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S), 1092 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
1214 .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, 1093 .driver_info = RSVD(0) },
1215 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301), 1094 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
1216 .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, 1095 .driver_info = RSVD(0) },
1217 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628), 1096 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
1218 .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, 1097 .driver_info = RSVD(0) },
1219 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) }, 1098 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
1220 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) }, 1099 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
1221 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) }, 1100 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
1222 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S), 1101 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
1223 .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, 1102 .driver_info = RSVD(0) },
1224 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002), 1103 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
1225 .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, 1104 .driver_info = RSVD(0) },
1226 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K), 1105 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
1227 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1106 .driver_info = RSVD(4) },
1228 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004), 1107 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
1229 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 1108 .driver_info = RSVD(3) },
1230 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) }, 1109 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
1231 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629), 1110 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
1232 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 1111 .driver_info = RSVD(5) },
1233 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S), 1112 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
1234 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1113 .driver_info = RSVD(4) },
1235 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I), 1114 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
1236 .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, 1115 .driver_info = RSVD(0) },
1237 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212), 1116 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
1238 .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, 1117 .driver_info = RSVD(0) },
1239 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213), 1118 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
1240 .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, 1119 .driver_info = RSVD(0) },
1241 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251), 1120 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
1242 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, 1121 .driver_info = RSVD(1) },
1243 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252), 1122 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
1244 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, 1123 .driver_info = RSVD(1) },
1245 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253), 1124 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
1246 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, 1125 .driver_info = RSVD(1) },
1247 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, 1126 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
1248 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, 1127 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
1249 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, 1128 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
@@ -1251,38 +1130,38 @@ static const struct usb_device_id option_ids[] = {
1251 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, 1130 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
1252 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, 1131 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
1253 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), 1132 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
1254 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, 1133 .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
1255 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), 1134 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
1256 .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, 1135 .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
1257 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2), 1136 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
1258 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, 1137 .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
1259 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), 1138 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
1260 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, 1139 .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
1261 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), 1140 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
1262 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, 1141 .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
1263 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), 1142 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
1264 .driver_info = (kernel_ulong_t)&telit_me910_blacklist }, 1143 .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
1265 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), 1144 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
1266 .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist }, 1145 .driver_info = NCTRL(0) | RSVD(3) },
1267 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), 1146 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1268 .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, 1147 .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
1269 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), 1148 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
1270 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, 1149 .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
1271 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), 1150 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
1272 .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, 1151 .driver_info = NCTRL(0) | RSVD(1) | RSVD(5) },
1273 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) }, 1152 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
1274 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208), 1153 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
1275 .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, 1154 .driver_info = NCTRL(0) | RSVD(1) },
1276 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211), 1155 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
1277 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, 1156 .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
1278 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212), 1157 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
1279 .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, 1158 .driver_info = NCTRL(0) | RSVD(1) },
1280 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, 1159 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
1281 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), 1160 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
1282 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, 1161 .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
1283 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ 1162 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
1284 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), 1163 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
1285 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, 1164 .driver_info = RSVD(1) },
1286 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) }, 1165 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
1287 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) }, 1166 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
1288 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) }, 1167 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
@@ -1298,58 +1177,58 @@ static const struct usb_device_id option_ids[] = {
1298 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) }, 1177 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
1299 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, 1178 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
1300 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff), 1179 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
1301 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, 1180 .driver_info = RSVD(1) },
1302 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, 1181 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
1303 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, 1182 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
1304 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, 1183 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
1305 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff), 1184 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
1306 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 1185 .driver_info = RSVD(3) },
1307 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, 1186 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
1308 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff), 1187 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
1309 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 1188 .driver_info = RSVD(3) },
1310 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, 1189 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
1311 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff), 1190 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
1312 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1191 .driver_info = RSVD(4) },
1313 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) }, 1192 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
1314 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, 1193 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
1315 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, 1194 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
1316 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff), 1195 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
1317 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, 1196 .driver_info = RSVD(1) },
1318 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, 1197 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
1319 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, 1198 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
1320 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, 1199 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
1321 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 1200 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff),
1322 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist }, 1201 .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
1323 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, 1202 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
1324 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, 1203 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
1325 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) }, 1204 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
1326 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff), 1205 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff),
1327 .driver_info = (kernel_ulong_t)&zte_0037_blacklist }, 1206 .driver_info = NCTRL(0) | NCTRL(1) },
1328 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) }, 1207 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
1329 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, 1208 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
1330 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) }, 1209 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
1331 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff), 1210 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff),
1332 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1211 .driver_info = RSVD(4) },
1333 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, 1212 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
1334 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) }, 1213 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
1335 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, 1214 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
1336 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff), 1215 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff),
1337 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 1216 .driver_info = RSVD(5) },
1338 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) }, 1217 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
1339 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, 1218 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
1340 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff), 1219 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
1341 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1220 .driver_info = RSVD(4) },
1342 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, 1221 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
1343 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff), 1222 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
1344 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, 1223 .driver_info = RSVD(1) },
1345 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) }, 1224 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
1346 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, 1225 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
1347 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff), 1226 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
1348 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1227 .driver_info = RSVD(4) },
1349 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, 1228 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
1350 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, 1229 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
1351 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff), 1230 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
1352 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1231 .driver_info = RSVD(4) },
1353 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, 1232 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
1354 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) }, 1233 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
1355 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, 1234 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
@@ -1374,26 +1253,26 @@ static const struct usb_device_id option_ids[] = {
1374 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) }, 1253 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
1375 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) }, 1254 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
1376 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff), 1255 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
1377 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1256 .driver_info = RSVD(4) },
1378 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) }, 1257 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
1379 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) }, 1258 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
1380 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) }, 1259 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
1381 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff), 1260 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
1382 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 1261 .driver_info = RSVD(5) },
1383 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) }, 1262 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
1384 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff), 1263 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
1385 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 1264 .driver_info = RSVD(5) },
1386 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff), 1265 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
1387 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 1266 .driver_info = RSVD(5) },
1388 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) }, 1267 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
1389 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff), 1268 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
1390 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1269 .driver_info = RSVD(4) },
1391 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff), 1270 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
1392 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 1271 .driver_info = RSVD(5) },
1393 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff), 1272 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
1394 .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, 1273 .driver_info = RSVD(6) },
1395 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff), 1274 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
1396 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 1275 .driver_info = RSVD(5) },
1397 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, 1276 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
1398 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) }, 1277 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) },
1399 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) }, 1278 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) },
@@ -1409,50 +1288,50 @@ static const struct usb_device_id option_ids[] = {
1409 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) }, 1288 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
1410 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, 1289 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
1411 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff), 1290 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
1412 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 1291 .driver_info = RSVD(5) },
1413 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff), 1292 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
1414 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 1293 .driver_info = RSVD(3) },
1415 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, 1294 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
1416 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, 1295 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
1417 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, 1296 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
1418 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) }, 1297 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
1419 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, 1298 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
1420 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), 1299 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
1421 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1300 .driver_info = RSVD(4) },
1422 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) }, 1301 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
1423 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */ 1302 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
1424 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1303 .driver_info = RSVD(4) },
1425 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) }, 1304 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
1426 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) }, 1305 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) },
1427 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */ 1306 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
1428 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, 1307 .driver_info = RSVD(1) },
1429 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) }, 1308 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) },
1430 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) }, 1309 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) },
1431 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) }, 1310 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
1432 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ 1311 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
1433 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 1312 .driver_info = RSVD(3) },
1434 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */ 1313 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
1435 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1314 .driver_info = RSVD(4) },
1436 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */ 1315 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
1437 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1316 .driver_info = RSVD(4) },
1438 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, 1317 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
1439 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff), 1318 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
1440 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1319 .driver_info = RSVD(4) },
1441 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, 1320 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
1442 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, 1321 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
1443 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */ 1322 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
1444 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1323 .driver_info = RSVD(4) },
1445 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, 1324 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
1446 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, 1325 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
1447 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), 1326 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
1448 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1327 .driver_info = RSVD(4) },
1449 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), 1328 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
1450 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1329 .driver_info = RSVD(4) },
1451 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff), 1330 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
1452 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1331 .driver_info = RSVD(4) },
1453 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) }, 1332 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) },
1454 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff), 1333 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
1455 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1334 .driver_info = RSVD(2) },
1456 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, 1335 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
1457 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, 1336 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
1458 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, 1337 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1569,23 +1448,23 @@ static const struct usb_device_id option_ids[] = {
1569 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) }, 1448 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
1570 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) }, 1449 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
1571 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff), 1450 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
1572 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1451 .driver_info = RSVD(4) },
1573 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) }, 1452 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
1574 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff), 1453 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
1575 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1454 .driver_info = RSVD(4) },
1576 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) }, 1455 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
1577 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) }, 1456 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
1578 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) }, 1457 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
1579 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) }, 1458 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
1580 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff), 1459 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
1581 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1460 .driver_info = RSVD(4) },
1582 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) }, 1461 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
1583 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff), 1462 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
1584 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1463 .driver_info = RSVD(4) },
1585 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff), 1464 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
1586 .driver_info = (kernel_ulong_t)&zte_1255_blacklist }, 1465 .driver_info = RSVD(3) | RSVD(4) },
1587 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff), 1466 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
1588 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1467 .driver_info = RSVD(4) },
1589 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) }, 1468 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
1590 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) }, 1469 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
1591 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) }, 1470 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
@@ -1600,7 +1479,7 @@ static const struct usb_device_id option_ids[] = {
1600 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, 1479 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
1601 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, 1480 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
1602 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff), 1481 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
1603 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 1482 .driver_info = RSVD(5) },
1604 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, 1483 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
1605 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, 1484 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
1606 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, 1485 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
@@ -1636,17 +1515,17 @@ static const struct usb_device_id option_ids[] = {
1636 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) }, 1515 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) },
1637 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) }, 1516 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) },
1638 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff), 1517 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
1639 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1518 .driver_info = RSVD(2) },
1640 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff), 1519 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
1641 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1520 .driver_info = RSVD(2) },
1642 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff), 1521 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
1643 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1522 .driver_info = RSVD(2) },
1644 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff), 1523 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
1645 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1524 .driver_info = RSVD(2) },
1646 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ 1525 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
1647 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1526 .driver_info = RSVD(2) },
1648 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ 1527 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1649 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1528 .driver_info = RSVD(2) },
1650 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, 1529 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1651 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, 1530 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1652 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, 1531 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1664,8 +1543,8 @@ static const struct usb_device_id option_ids[] = {
1664 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) }, 1543 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
1665 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) }, 1544 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
1666 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) }, 1545 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
1667 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 1546 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff),
1668 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist }, 1547 .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | RSVD(4) },
1669 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, 1548 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
1670 1549
1671 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ 1550 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
@@ -1676,20 +1555,20 @@ static const struct usb_device_id option_ids[] = {
1676 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, 1555 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
1677 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, 1556 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
1678 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff), 1557 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
1679 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, 1558 .driver_info = RSVD(1) },
1680 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff), 1559 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
1681 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 1560 .driver_info = RSVD(3) },
1682 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff), 1561 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
1683 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 1562 .driver_info = RSVD(5) },
1684 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, 1563 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
1685 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, 1564 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
1686 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff), 1565 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
1687 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1566 .driver_info = RSVD(4) },
1688 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) }, 1567 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
1689 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff), 1568 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
1690 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 1569 .driver_info = RSVD(3) },
1691 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff), 1570 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
1692 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 1571 .driver_info = RSVD(3) },
1693 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) }, 1572 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
1694 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) }, 1573 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
1695 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) }, 1574 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
@@ -1841,19 +1720,19 @@ static const struct usb_device_id option_ids[] = {
1841 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, 1720 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
1842 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, 1721 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
1843 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), 1722 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
1844 .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist }, 1723 .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) | NCTRL(4) },
1845 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff), 1724 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
1846 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, 1725 .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) },
1847 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), 1726 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
1848 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, 1727 .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) },
1849 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L), 1728 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
1850 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist }, 1729 .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
1851 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM), 1730 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
1852 .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist }, 1731 .driver_info = RSVD(2) | RSVD(3) | RSVD(4) },
1853 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X), 1732 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
1854 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist }, 1733 .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
1855 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X), 1734 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
1856 .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist }, 1735 .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
1857 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, 1736 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
1858 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, 1737 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
1859 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, 1738 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
@@ -1873,35 +1752,34 @@ static const struct usb_device_id option_ids[] = {
1873 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, 1752 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
1874 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, 1753 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
1875 { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), 1754 { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1876 .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist }, 1755 .driver_info = RSVD(5) | RSVD(6) },
1877 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), 1756 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1878 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist 1757 .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
1879 },
1880 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), 1758 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
1881 .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, 1759 .driver_info = RSVD(6) },
1882 { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052), 1760 { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
1883 .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, 1761 .driver_info = RSVD(6) },
1884 { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6), 1762 { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
1885 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 1763 .driver_info = RSVD(3) },
1886 { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7), 1764 { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
1887 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 1765 .driver_info = RSVD(5) },
1888 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V), 1766 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
1889 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1767 .driver_info = RSVD(4) },
1890 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA), 1768 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
1891 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1769 .driver_info = RSVD(2) },
1892 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, 1770 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
1893 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, 1771 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
1894 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), 1772 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
1895 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist 1773 .driver_info = NCTRL(0) | NCTRL(1) },
1896 },
1897 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), 1774 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
1898 .driver_info = (kernel_ulong_t)&four_g_w100_blacklist 1775 .driver_info = NCTRL(1) | NCTRL(2) | RSVD(3) },
1899 }, 1776 {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
1777 .driver_info = RSVD(3)},
1900 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, 1778 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
1901 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff), 1779 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
1902 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 1780 .driver_info = RSVD(3) },
1903 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff), 1781 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
1904 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1782 .driver_info = RSVD(4) },
1905 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, 1783 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
1906 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, 1784 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
1907 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, 1785 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -1927,14 +1805,14 @@ static const struct usb_device_id option_ids[] = {
1927 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, 1805 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
1928 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, 1806 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
1929 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8), 1807 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
1930 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1808 .driver_info = RSVD(4) },
1931 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) }, 1809 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
1932 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), 1810 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
1933 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1811 .driver_info = RSVD(4) },
1934 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff), 1812 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
1935 .driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist }, 1813 .driver_info = RSVD(4) | RSVD(5) },
1936 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff), 1814 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
1937 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1815 .driver_info = RSVD(4) },
1938 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) }, 1816 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
1939 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) }, 1817 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
1940 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 1818 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
@@ -1944,20 +1822,20 @@ static const struct usb_device_id option_ids[] = {
1944 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ 1822 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
1945 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, 1823 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
1946 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), 1824 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
1947 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1825 .driver_info = RSVD(4) },
1948 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), 1826 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
1949 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1827 .driver_info = RSVD(4) },
1950 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140), 1828 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
1951 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1829 .driver_info = RSVD(4) },
1952 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, 1830 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
1953 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155), 1831 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
1954 .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, 1832 .driver_info = RSVD(6) },
1955 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200), 1833 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
1956 .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, 1834 .driver_info = RSVD(6) },
1957 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160), 1835 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
1958 .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, 1836 .driver_info = RSVD(6) },
1959 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500), 1837 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
1960 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1838 .driver_info = RSVD(4) },
1961 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 1839 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1962 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ 1840 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1963 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, 1841 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -2034,9 +1912,9 @@ static const struct usb_device_id option_ids[] = {
2034 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, 1912 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
2035 { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */ 1913 { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
2036 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), 1914 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
2037 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1915 .driver_info = RSVD(4) },
2038 { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ 1916 { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
2039 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1917 .driver_info = RSVD(4) },
2040 { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, 1918 { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
2041 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */ 1919 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
2042 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */ 1920 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
@@ -2047,9 +1925,9 @@ static const struct usb_device_id option_ids[] = {
2047 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ 1925 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
2048 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */ 1926 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
2049 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ 1927 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
2050 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1928 .driver_info = RSVD(4) },
2051 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ 1929 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
2052 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1930 .driver_info = RSVD(4) },
2053 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1931 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
2054 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1932 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
2055 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ 1933 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
@@ -2109,7 +1987,7 @@ static int option_probe(struct usb_serial *serial,
2109 struct usb_interface_descriptor *iface_desc = 1987 struct usb_interface_descriptor *iface_desc =
2110 &serial->interface->cur_altsetting->desc; 1988 &serial->interface->cur_altsetting->desc;
2111 struct usb_device_descriptor *dev_desc = &serial->dev->descriptor; 1989 struct usb_device_descriptor *dev_desc = &serial->dev->descriptor;
2112 const struct option_blacklist_info *blacklist; 1990 unsigned long device_flags = id->driver_info;
2113 1991
2114 /* Never bind to the CD-Rom emulation interface */ 1992 /* Never bind to the CD-Rom emulation interface */
2115 if (iface_desc->bInterfaceClass == 0x08) 1993 if (iface_desc->bInterfaceClass == 0x08)
@@ -2120,9 +1998,7 @@ static int option_probe(struct usb_serial *serial,
2120 * the same class/subclass/protocol as the serial interfaces. Look at 1998 * the same class/subclass/protocol as the serial interfaces. Look at
2121 * the Windows driver .INF files for reserved interface numbers. 1999 * the Windows driver .INF files for reserved interface numbers.
2122 */ 2000 */
2123 blacklist = (void *)id->driver_info; 2001 if (device_flags & RSVD(iface_desc->bInterfaceNumber))
2124 if (blacklist && test_bit(iface_desc->bInterfaceNumber,
2125 &blacklist->reserved))
2126 return -ENODEV; 2002 return -ENODEV;
2127 /* 2003 /*
2128 * Don't bind network interface on Samsung GT-B3730, it is handled by 2004 * Don't bind network interface on Samsung GT-B3730, it is handled by
@@ -2133,8 +2009,8 @@ static int option_probe(struct usb_serial *serial,
2133 iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA) 2009 iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA)
2134 return -ENODEV; 2010 return -ENODEV;
2135 2011
2136 /* Store the blacklist info so we can use it during attach. */ 2012 /* Store the device flags so we can use them during attach. */
2137 usb_set_serial_data(serial, (void *)blacklist); 2013 usb_set_serial_data(serial, (void *)device_flags);
2138 2014
2139 return 0; 2015 return 0;
2140} 2016}
@@ -2142,22 +2018,21 @@ static int option_probe(struct usb_serial *serial,
2142static int option_attach(struct usb_serial *serial) 2018static int option_attach(struct usb_serial *serial)
2143{ 2019{
2144 struct usb_interface_descriptor *iface_desc; 2020 struct usb_interface_descriptor *iface_desc;
2145 const struct option_blacklist_info *blacklist;
2146 struct usb_wwan_intf_private *data; 2021 struct usb_wwan_intf_private *data;
2022 unsigned long device_flags;
2147 2023
2148 data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); 2024 data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
2149 if (!data) 2025 if (!data)
2150 return -ENOMEM; 2026 return -ENOMEM;
2151 2027
2152 /* Retrieve blacklist info stored at probe. */ 2028 /* Retrieve device flags stored at probe. */
2153 blacklist = usb_get_serial_data(serial); 2029 device_flags = (unsigned long)usb_get_serial_data(serial);
2154 2030
2155 iface_desc = &serial->interface->cur_altsetting->desc; 2031 iface_desc = &serial->interface->cur_altsetting->desc;
2156 2032
2157 if (!blacklist || !test_bit(iface_desc->bInterfaceNumber, 2033 if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
2158 &blacklist->sendsetup)) {
2159 data->use_send_setup = 1; 2034 data->use_send_setup = 1;
2160 } 2035
2161 spin_lock_init(&data->susp_lock); 2036 spin_lock_init(&data->susp_lock);
2162 2037
2163 usb_set_serial_data(serial, data); 2038 usb_set_serial_data(serial, data);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index a51b28379850..3da25ad267a2 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -39,6 +39,7 @@ static const struct usb_device_id id_table[] = {
39 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) }, 39 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
40 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) }, 40 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
41 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) }, 41 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
42 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) },
42 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) }, 43 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
43 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) }, 44 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
44 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) }, 45 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 3b5a15d1dc0d..123289085ee2 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -17,6 +17,7 @@
17#define PL2303_PRODUCT_ID_DCU11 0x1234 17#define PL2303_PRODUCT_ID_DCU11 0x1234
18#define PL2303_PRODUCT_ID_PHAROS 0xaaa0 18#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
19#define PL2303_PRODUCT_ID_RSAQ3 0xaaa2 19#define PL2303_PRODUCT_ID_RSAQ3 0xaaa2
20#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8
20#define PL2303_PRODUCT_ID_ALDIGA 0x0611 21#define PL2303_PRODUCT_ID_ALDIGA 0x0611
21#define PL2303_PRODUCT_ID_MMX 0x0612 22#define PL2303_PRODUCT_ID_MMX 0x0612
22#define PL2303_PRODUCT_ID_GPRS 0x0609 23#define PL2303_PRODUCT_ID_GPRS 0x0609
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index e98b6e57b703..2674da40d9cd 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -66,6 +66,11 @@ DEVICE(flashloader, FLASHLOADER_IDS);
66 0x01) } 66 0x01) }
67DEVICE(google, GOOGLE_IDS); 67DEVICE(google, GOOGLE_IDS);
68 68
69/* Libtransistor USB console */
70#define LIBTRANSISTOR_IDS() \
71 { USB_DEVICE(0x1209, 0x8b00) }
72DEVICE(libtransistor, LIBTRANSISTOR_IDS);
73
69/* ViVOpay USB Serial Driver */ 74/* ViVOpay USB Serial Driver */
70#define VIVOPAY_IDS() \ 75#define VIVOPAY_IDS() \
71 { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ 76 { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
@@ -80,6 +85,11 @@ DEVICE(vivopay, VIVOPAY_IDS);
80 { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */ 85 { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
81DEVICE(moto_modem, MOTO_IDS); 86DEVICE(moto_modem, MOTO_IDS);
82 87
88/* Motorola Tetra driver */
89#define MOTOROLA_TETRA_IDS() \
90 { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
91DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
92
83/* Novatel Wireless GPS driver */ 93/* Novatel Wireless GPS driver */
84#define NOVATEL_IDS() \ 94#define NOVATEL_IDS() \
85 { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ 95 { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
@@ -108,8 +118,10 @@ static struct usb_serial_driver * const serial_drivers[] = {
108 &funsoft_device, 118 &funsoft_device,
109 &flashloader_device, 119 &flashloader_device,
110 &google_device, 120 &google_device,
121 &libtransistor_device,
111 &vivopay_device, 122 &vivopay_device,
112 &moto_modem_device, 123 &moto_modem_device,
124 &motorola_tetra_device,
113 &novatel_gps_device, 125 &novatel_gps_device,
114 &hp4x_device, 126 &hp4x_device,
115 &suunto_device, 127 &suunto_device,
@@ -123,8 +135,10 @@ static const struct usb_device_id id_table[] = {
123 FUNSOFT_IDS(), 135 FUNSOFT_IDS(),
124 FLASHLOADER_IDS(), 136 FLASHLOADER_IDS(),
125 GOOGLE_IDS(), 137 GOOGLE_IDS(),
138 LIBTRANSISTOR_IDS(),
126 VIVOPAY_IDS(), 139 VIVOPAY_IDS(),
127 MOTO_IDS(), 140 MOTO_IDS(),
141 MOTOROLA_TETRA_IDS(),
128 NOVATEL_IDS(), 142 NOVATEL_IDS(),
129 HP4X_IDS(), 143 HP4X_IDS(),
130 SUUNTO_IDS(), 144 SUUNTO_IDS(),
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 337a0be89fcf..dbc3801b43eb 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -338,47 +338,48 @@ static int palm_os_3_probe(struct usb_serial *serial,
338 goto exit; 338 goto exit;
339 } 339 }
340 340
341 if (retval == sizeof(*connection_info)) { 341 if (retval != sizeof(*connection_info)) {
342 connection_info = (struct visor_connection_info *) 342 dev_err(dev, "Invalid connection information received from device\n");
343 transfer_buffer; 343 retval = -ENODEV;
344 344 goto exit;
345 num_ports = le16_to_cpu(connection_info->num_ports);
346 for (i = 0; i < num_ports; ++i) {
347 switch (
348 connection_info->connections[i].port_function_id) {
349 case VISOR_FUNCTION_GENERIC:
350 string = "Generic";
351 break;
352 case VISOR_FUNCTION_DEBUGGER:
353 string = "Debugger";
354 break;
355 case VISOR_FUNCTION_HOTSYNC:
356 string = "HotSync";
357 break;
358 case VISOR_FUNCTION_CONSOLE:
359 string = "Console";
360 break;
361 case VISOR_FUNCTION_REMOTE_FILE_SYS:
362 string = "Remote File System";
363 break;
364 default:
365 string = "unknown";
366 break;
367 }
368 dev_info(dev, "%s: port %d, is for %s use\n",
369 serial->type->description,
370 connection_info->connections[i].port, string);
371 }
372 } 345 }
373 /* 346
374 * Handle devices that report invalid stuff here. 347 connection_info = (struct visor_connection_info *)transfer_buffer;
375 */ 348
349 num_ports = le16_to_cpu(connection_info->num_ports);
350
351 /* Handle devices that report invalid stuff here. */
376 if (num_ports == 0 || num_ports > 2) { 352 if (num_ports == 0 || num_ports > 2) {
377 dev_warn(dev, "%s: No valid connect info available\n", 353 dev_warn(dev, "%s: No valid connect info available\n",
378 serial->type->description); 354 serial->type->description);
379 num_ports = 2; 355 num_ports = 2;
380 } 356 }
381 357
358 for (i = 0; i < num_ports; ++i) {
359 switch (connection_info->connections[i].port_function_id) {
360 case VISOR_FUNCTION_GENERIC:
361 string = "Generic";
362 break;
363 case VISOR_FUNCTION_DEBUGGER:
364 string = "Debugger";
365 break;
366 case VISOR_FUNCTION_HOTSYNC:
367 string = "HotSync";
368 break;
369 case VISOR_FUNCTION_CONSOLE:
370 string = "Console";
371 break;
372 case VISOR_FUNCTION_REMOTE_FILE_SYS:
373 string = "Remote File System";
374 break;
375 default:
376 string = "unknown";
377 break;
378 }
379 dev_info(dev, "%s: port %d, is for %s use\n",
380 serial->type->description,
381 connection_info->connections[i].port, string);
382 }
382 dev_info(dev, "%s: Number of ports: %d\n", serial->type->description, 383 dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
383 num_ports); 384 num_ports);
384 385
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 091e8ec7a6c0..962bb6376b0c 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1953,6 +1953,8 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag)
1953 bcb->CDB[0] = 0xEF; 1953 bcb->CDB[0] = 0xEF;
1954 1954
1955 result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0); 1955 result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
1956 if (us->srb != NULL)
1957 scsi_set_resid(us->srb, 0);
1956 info->BIN_FLAG = flag; 1958 info->BIN_FLAG = flag;
1957 kfree(buf); 1959 kfree(buf);
1958 1960
@@ -2306,21 +2308,22 @@ static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
2306 2308
2307static int ene_transport(struct scsi_cmnd *srb, struct us_data *us) 2309static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
2308{ 2310{
2309 int result = 0; 2311 int result = USB_STOR_XFER_GOOD;
2310 struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); 2312 struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
2311 2313
2312 /*US_DEBUG(usb_stor_show_command(us, srb)); */ 2314 /*US_DEBUG(usb_stor_show_command(us, srb)); */
2313 scsi_set_resid(srb, 0); 2315 scsi_set_resid(srb, 0);
2314 if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) { 2316 if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready)))
2315 result = ene_init(us); 2317 result = ene_init(us);
2316 } else { 2318 if (result == USB_STOR_XFER_GOOD) {
2319 result = USB_STOR_TRANSPORT_ERROR;
2317 if (info->SD_Status.Ready) 2320 if (info->SD_Status.Ready)
2318 result = sd_scsi_irp(us, srb); 2321 result = sd_scsi_irp(us, srb);
2319 2322
2320 if (info->MS_Status.Ready) 2323 if (info->MS_Status.Ready)
2321 result = ms_scsi_irp(us, srb); 2324 result = ms_scsi_irp(us, srb);
2322 } 2325 }
2323 return 0; 2326 return result;
2324} 2327}
2325 2328
2326static struct scsi_host_template ene_ub6250_host_template; 2329static struct scsi_host_template ene_ub6250_host_template;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index f952635ebe5f..6cac8f26b97a 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -1052,20 +1052,19 @@ static int uas_post_reset(struct usb_interface *intf)
1052 return 0; 1052 return 0;
1053 1053
1054 err = uas_configure_endpoints(devinfo); 1054 err = uas_configure_endpoints(devinfo);
1055 if (err) { 1055 if (err && err != -ENODEV)
1056 shost_printk(KERN_ERR, shost, 1056 shost_printk(KERN_ERR, shost,
1057 "%s: alloc streams error %d after reset", 1057 "%s: alloc streams error %d after reset",
1058 __func__, err); 1058 __func__, err);
1059 return 1;
1060 }
1061 1059
1060 /* we must unblock the host in every case lest we deadlock */
1062 spin_lock_irqsave(shost->host_lock, flags); 1061 spin_lock_irqsave(shost->host_lock, flags);
1063 scsi_report_bus_reset(shost, 0); 1062 scsi_report_bus_reset(shost, 0);
1064 spin_unlock_irqrestore(shost->host_lock, flags); 1063 spin_unlock_irqrestore(shost->host_lock, flags);
1065 1064
1066 scsi_unblock_requests(shost); 1065 scsi_unblock_requests(shost);
1067 1066
1068 return 0; 1067 return err ? 1 : 0;
1069} 1068}
1070 1069
1071static int uas_suspend(struct usb_interface *intf, pm_message_t message) 1070static int uas_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c10eceb76c39..1a34d2a89de6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2142,6 +2142,13 @@ UNUSUAL_DEV( 0x22b8, 0x3010, 0x0001, 0x0001,
2142 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2142 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2143 US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ), 2143 US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ),
2144 2144
2145/* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */
2146UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117,
2147 "JMicron",
2148 "USB to ATA/ATAPI Bridge",
2149 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2150 US_FL_BROKEN_FUA ),
2151
2145/* Reported-by George Cherian <george.cherian@cavium.com> */ 2152/* Reported-by George Cherian <george.cherian@cavium.com> */
2146UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, 2153UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
2147 "JMicron", 2154 "JMicron",
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 266e2b0ce9a8..47ccd73a74f0 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -88,6 +88,7 @@ struct bus_id_priv {
88 struct stub_device *sdev; 88 struct stub_device *sdev;
89 struct usb_device *udev; 89 struct usb_device *udev;
90 char shutdown_busid; 90 char shutdown_busid;
91 spinlock_t busid_lock;
91}; 92};
92 93
93/* stub_priv is allocated from stub_priv_cache */ 94/* stub_priv is allocated from stub_priv_cache */
@@ -98,6 +99,7 @@ extern struct usb_device_driver stub_driver;
98 99
99/* stub_main.c */ 100/* stub_main.c */
100struct bus_id_priv *get_busid_priv(const char *busid); 101struct bus_id_priv *get_busid_priv(const char *busid);
102void put_busid_priv(struct bus_id_priv *bid);
101int del_match_busid(char *busid); 103int del_match_busid(char *busid);
102void stub_device_cleanup_urbs(struct stub_device *sdev); 104void stub_device_cleanup_urbs(struct stub_device *sdev);
103 105
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index a3ec49bdc1e6..4aad99a59958 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -87,6 +87,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
87 goto err; 87 goto err;
88 88
89 sdev->ud.tcp_socket = socket; 89 sdev->ud.tcp_socket = socket;
90 sdev->ud.sockfd = sockfd;
90 91
91 spin_unlock_irq(&sdev->ud.lock); 92 spin_unlock_irq(&sdev->ud.lock);
92 93
@@ -163,8 +164,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
163 * step 1? 164 * step 1?
164 */ 165 */
165 if (ud->tcp_socket) { 166 if (ud->tcp_socket) {
166 dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n", 167 dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
167 ud->tcp_socket);
168 kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); 168 kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
169 } 169 }
170 170
@@ -187,6 +187,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
187 if (ud->tcp_socket) { 187 if (ud->tcp_socket) {
188 sockfd_put(ud->tcp_socket); 188 sockfd_put(ud->tcp_socket);
189 ud->tcp_socket = NULL; 189 ud->tcp_socket = NULL;
190 ud->sockfd = -1;
190 } 191 }
191 192
192 /* 3. free used data */ 193 /* 3. free used data */
@@ -281,6 +282,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
281 sdev->ud.status = SDEV_ST_AVAILABLE; 282 sdev->ud.status = SDEV_ST_AVAILABLE;
282 spin_lock_init(&sdev->ud.lock); 283 spin_lock_init(&sdev->ud.lock);
283 sdev->ud.tcp_socket = NULL; 284 sdev->ud.tcp_socket = NULL;
285 sdev->ud.sockfd = -1;
284 286
285 INIT_LIST_HEAD(&sdev->priv_init); 287 INIT_LIST_HEAD(&sdev->priv_init);
286 INIT_LIST_HEAD(&sdev->priv_tx); 288 INIT_LIST_HEAD(&sdev->priv_tx);
@@ -312,9 +314,9 @@ static int stub_probe(struct usb_device *udev)
312 struct stub_device *sdev = NULL; 314 struct stub_device *sdev = NULL;
313 const char *udev_busid = dev_name(&udev->dev); 315 const char *udev_busid = dev_name(&udev->dev);
314 struct bus_id_priv *busid_priv; 316 struct bus_id_priv *busid_priv;
315 int rc; 317 int rc = 0;
316 318
317 dev_dbg(&udev->dev, "Enter\n"); 319 dev_dbg(&udev->dev, "Enter probe\n");
318 320
319 /* check we should claim or not by busid_table */ 321 /* check we should claim or not by busid_table */
320 busid_priv = get_busid_priv(udev_busid); 322 busid_priv = get_busid_priv(udev_busid);
@@ -329,13 +331,15 @@ static int stub_probe(struct usb_device *udev)
329 * other matched drivers by the driver core. 331 * other matched drivers by the driver core.
330 * See driver_probe_device() in driver/base/dd.c 332 * See driver_probe_device() in driver/base/dd.c
331 */ 333 */
332 return -ENODEV; 334 rc = -ENODEV;
335 goto call_put_busid_priv;
333 } 336 }
334 337
335 if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) { 338 if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
336 dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n", 339 dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
337 udev_busid); 340 udev_busid);
338 return -ENODEV; 341 rc = -ENODEV;
342 goto call_put_busid_priv;
339 } 343 }
340 344
341 if (!strcmp(udev->bus->bus_name, "vhci_hcd")) { 345 if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
@@ -343,13 +347,16 @@ static int stub_probe(struct usb_device *udev)
343 "%s is attached on vhci_hcd... skip!\n", 347 "%s is attached on vhci_hcd... skip!\n",
344 udev_busid); 348 udev_busid);
345 349
346 return -ENODEV; 350 rc = -ENODEV;
351 goto call_put_busid_priv;
347 } 352 }
348 353
349 /* ok, this is my device */ 354 /* ok, this is my device */
350 sdev = stub_device_alloc(udev); 355 sdev = stub_device_alloc(udev);
351 if (!sdev) 356 if (!sdev) {
352 return -ENOMEM; 357 rc = -ENOMEM;
358 goto call_put_busid_priv;
359 }
353 360
354 dev_info(&udev->dev, 361 dev_info(&udev->dev,
355 "usbip-host: register new device (bus %u dev %u)\n", 362 "usbip-host: register new device (bus %u dev %u)\n",
@@ -381,7 +388,9 @@ static int stub_probe(struct usb_device *udev)
381 } 388 }
382 busid_priv->status = STUB_BUSID_ALLOC; 389 busid_priv->status = STUB_BUSID_ALLOC;
383 390
384 return 0; 391 rc = 0;
392 goto call_put_busid_priv;
393
385err_files: 394err_files:
386 usb_hub_release_port(udev->parent, udev->portnum, 395 usb_hub_release_port(udev->parent, udev->portnum,
387 (struct usb_dev_state *) udev); 396 (struct usb_dev_state *) udev);
@@ -392,6 +401,9 @@ err_port:
392 401
393 busid_priv->sdev = NULL; 402 busid_priv->sdev = NULL;
394 stub_device_free(sdev); 403 stub_device_free(sdev);
404
405call_put_busid_priv:
406 put_busid_priv(busid_priv);
395 return rc; 407 return rc;
396} 408}
397 409
@@ -417,7 +429,7 @@ static void stub_disconnect(struct usb_device *udev)
417 struct bus_id_priv *busid_priv; 429 struct bus_id_priv *busid_priv;
418 int rc; 430 int rc;
419 431
420 dev_dbg(&udev->dev, "Enter\n"); 432 dev_dbg(&udev->dev, "Enter disconnect\n");
421 433
422 busid_priv = get_busid_priv(udev_busid); 434 busid_priv = get_busid_priv(udev_busid);
423 if (!busid_priv) { 435 if (!busid_priv) {
@@ -430,7 +442,7 @@ static void stub_disconnect(struct usb_device *udev)
430 /* get stub_device */ 442 /* get stub_device */
431 if (!sdev) { 443 if (!sdev) {
432 dev_err(&udev->dev, "could not get device"); 444 dev_err(&udev->dev, "could not get device");
433 return; 445 goto call_put_busid_priv;
434 } 446 }
435 447
436 dev_set_drvdata(&udev->dev, NULL); 448 dev_set_drvdata(&udev->dev, NULL);
@@ -445,12 +457,12 @@ static void stub_disconnect(struct usb_device *udev)
445 (struct usb_dev_state *) udev); 457 (struct usb_dev_state *) udev);
446 if (rc) { 458 if (rc) {
447 dev_dbg(&udev->dev, "unable to release port\n"); 459 dev_dbg(&udev->dev, "unable to release port\n");
448 return; 460 goto call_put_busid_priv;
449 } 461 }
450 462
451 /* If usb reset is called from event handler */ 463 /* If usb reset is called from event handler */
452 if (busid_priv->sdev->ud.eh == current) 464 if (busid_priv->sdev->ud.eh == current)
453 return; 465 goto call_put_busid_priv;
454 466
455 /* shutdown the current connection */ 467 /* shutdown the current connection */
456 shutdown_busid(busid_priv); 468 shutdown_busid(busid_priv);
@@ -461,12 +473,11 @@ static void stub_disconnect(struct usb_device *udev)
461 busid_priv->sdev = NULL; 473 busid_priv->sdev = NULL;
462 stub_device_free(sdev); 474 stub_device_free(sdev);
463 475
464 if (busid_priv->status == STUB_BUSID_ALLOC) { 476 if (busid_priv->status == STUB_BUSID_ALLOC)
465 busid_priv->status = STUB_BUSID_ADDED; 477 busid_priv->status = STUB_BUSID_ADDED;
466 } else { 478
467 busid_priv->status = STUB_BUSID_OTHER; 479call_put_busid_priv:
468 del_match_busid((char *)udev_busid); 480 put_busid_priv(busid_priv);
469 }
470} 481}
471 482
472#ifdef CONFIG_PM 483#ifdef CONFIG_PM
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 325b4c05acdd..fa90496ca7a8 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -28,6 +28,7 @@
28#define DRIVER_DESC "USB/IP Host Driver" 28#define DRIVER_DESC "USB/IP Host Driver"
29 29
30struct kmem_cache *stub_priv_cache; 30struct kmem_cache *stub_priv_cache;
31
31/* 32/*
32 * busid_tables defines matching busids that usbip can grab. A user can change 33 * busid_tables defines matching busids that usbip can grab. A user can change
33 * dynamically what device is locally used and what device is exported to a 34 * dynamically what device is locally used and what device is exported to a
@@ -39,6 +40,8 @@ static spinlock_t busid_table_lock;
39 40
40static void init_busid_table(void) 41static void init_busid_table(void)
41{ 42{
43 int i;
44
42 /* 45 /*
43 * This also sets the bus_table[i].status to 46 * This also sets the bus_table[i].status to
44 * STUB_BUSID_OTHER, which is 0. 47 * STUB_BUSID_OTHER, which is 0.
@@ -46,6 +49,9 @@ static void init_busid_table(void)
46 memset(busid_table, 0, sizeof(busid_table)); 49 memset(busid_table, 0, sizeof(busid_table));
47 50
48 spin_lock_init(&busid_table_lock); 51 spin_lock_init(&busid_table_lock);
52
53 for (i = 0; i < MAX_BUSID; i++)
54 spin_lock_init(&busid_table[i].busid_lock);
49} 55}
50 56
51/* 57/*
@@ -57,15 +63,20 @@ static int get_busid_idx(const char *busid)
57 int i; 63 int i;
58 int idx = -1; 64 int idx = -1;
59 65
60 for (i = 0; i < MAX_BUSID; i++) 66 for (i = 0; i < MAX_BUSID; i++) {
67 spin_lock(&busid_table[i].busid_lock);
61 if (busid_table[i].name[0]) 68 if (busid_table[i].name[0])
62 if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) { 69 if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
63 idx = i; 70 idx = i;
71 spin_unlock(&busid_table[i].busid_lock);
64 break; 72 break;
65 } 73 }
74 spin_unlock(&busid_table[i].busid_lock);
75 }
66 return idx; 76 return idx;
67} 77}
68 78
79/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
69struct bus_id_priv *get_busid_priv(const char *busid) 80struct bus_id_priv *get_busid_priv(const char *busid)
70{ 81{
71 int idx; 82 int idx;
@@ -73,13 +84,22 @@ struct bus_id_priv *get_busid_priv(const char *busid)
73 84
74 spin_lock(&busid_table_lock); 85 spin_lock(&busid_table_lock);
75 idx = get_busid_idx(busid); 86 idx = get_busid_idx(busid);
76 if (idx >= 0) 87 if (idx >= 0) {
77 bid = &(busid_table[idx]); 88 bid = &(busid_table[idx]);
89 /* get busid_lock before returning */
90 spin_lock(&bid->busid_lock);
91 }
78 spin_unlock(&busid_table_lock); 92 spin_unlock(&busid_table_lock);
79 93
80 return bid; 94 return bid;
81} 95}
82 96
97void put_busid_priv(struct bus_id_priv *bid)
98{
99 if (bid)
100 spin_unlock(&bid->busid_lock);
101}
102
83static int add_match_busid(char *busid) 103static int add_match_busid(char *busid)
84{ 104{
85 int i; 105 int i;
@@ -92,15 +112,19 @@ static int add_match_busid(char *busid)
92 goto out; 112 goto out;
93 } 113 }
94 114
95 for (i = 0; i < MAX_BUSID; i++) 115 for (i = 0; i < MAX_BUSID; i++) {
116 spin_lock(&busid_table[i].busid_lock);
96 if (!busid_table[i].name[0]) { 117 if (!busid_table[i].name[0]) {
97 strlcpy(busid_table[i].name, busid, BUSID_SIZE); 118 strlcpy(busid_table[i].name, busid, BUSID_SIZE);
98 if ((busid_table[i].status != STUB_BUSID_ALLOC) && 119 if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
99 (busid_table[i].status != STUB_BUSID_REMOV)) 120 (busid_table[i].status != STUB_BUSID_REMOV))
100 busid_table[i].status = STUB_BUSID_ADDED; 121 busid_table[i].status = STUB_BUSID_ADDED;
101 ret = 0; 122 ret = 0;
123 spin_unlock(&busid_table[i].busid_lock);
102 break; 124 break;
103 } 125 }
126 spin_unlock(&busid_table[i].busid_lock);
127 }
104 128
105out: 129out:
106 spin_unlock(&busid_table_lock); 130 spin_unlock(&busid_table_lock);
@@ -121,6 +145,8 @@ int del_match_busid(char *busid)
121 /* found */ 145 /* found */
122 ret = 0; 146 ret = 0;
123 147
148 spin_lock(&busid_table[idx].busid_lock);
149
124 if (busid_table[idx].status == STUB_BUSID_OTHER) 150 if (busid_table[idx].status == STUB_BUSID_OTHER)
125 memset(busid_table[idx].name, 0, BUSID_SIZE); 151 memset(busid_table[idx].name, 0, BUSID_SIZE);
126 152
@@ -128,6 +154,7 @@ int del_match_busid(char *busid)
128 (busid_table[idx].status != STUB_BUSID_ADDED)) 154 (busid_table[idx].status != STUB_BUSID_ADDED))
129 busid_table[idx].status = STUB_BUSID_REMOV; 155 busid_table[idx].status = STUB_BUSID_REMOV;
130 156
157 spin_unlock(&busid_table[idx].busid_lock);
131out: 158out:
132 spin_unlock(&busid_table_lock); 159 spin_unlock(&busid_table_lock);
133 160
@@ -140,9 +167,12 @@ static ssize_t show_match_busid(struct device_driver *drv, char *buf)
140 char *out = buf; 167 char *out = buf;
141 168
142 spin_lock(&busid_table_lock); 169 spin_lock(&busid_table_lock);
143 for (i = 0; i < MAX_BUSID; i++) 170 for (i = 0; i < MAX_BUSID; i++) {
171 spin_lock(&busid_table[i].busid_lock);
144 if (busid_table[i].name[0]) 172 if (busid_table[i].name[0])
145 out += sprintf(out, "%s ", busid_table[i].name); 173 out += sprintf(out, "%s ", busid_table[i].name);
174 spin_unlock(&busid_table[i].busid_lock);
175 }
146 spin_unlock(&busid_table_lock); 176 spin_unlock(&busid_table_lock);
147 out += sprintf(out, "\n"); 177 out += sprintf(out, "\n");
148 178
@@ -184,6 +214,51 @@ static ssize_t store_match_busid(struct device_driver *dev, const char *buf,
184static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid, 214static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid,
185 store_match_busid); 215 store_match_busid);
186 216
217static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
218{
219 int ret;
220
221 /* device_attach() callers should hold parent lock for USB */
222 if (busid_priv->udev->dev.parent)
223 device_lock(busid_priv->udev->dev.parent);
224 ret = device_attach(&busid_priv->udev->dev);
225 if (busid_priv->udev->dev.parent)
226 device_unlock(busid_priv->udev->dev.parent);
227 if (ret < 0) {
228 dev_err(&busid_priv->udev->dev, "rebind failed\n");
229 return ret;
230 }
231 return 0;
232}
233
234static void stub_device_rebind(void)
235{
236#if IS_MODULE(CONFIG_USBIP_HOST)
237 struct bus_id_priv *busid_priv;
238 int i;
239
240 /* update status to STUB_BUSID_OTHER so probe ignores the device */
241 spin_lock(&busid_table_lock);
242 for (i = 0; i < MAX_BUSID; i++) {
243 if (busid_table[i].name[0] &&
244 busid_table[i].shutdown_busid) {
245 busid_priv = &(busid_table[i]);
246 busid_priv->status = STUB_BUSID_OTHER;
247 }
248 }
249 spin_unlock(&busid_table_lock);
250
251 /* now run rebind - no need to hold locks. driver files are removed */
252 for (i = 0; i < MAX_BUSID; i++) {
253 if (busid_table[i].name[0] &&
254 busid_table[i].shutdown_busid) {
255 busid_priv = &(busid_table[i]);
256 do_rebind(busid_table[i].name, busid_priv);
257 }
258 }
259#endif
260}
261
187static ssize_t rebind_store(struct device_driver *dev, const char *buf, 262static ssize_t rebind_store(struct device_driver *dev, const char *buf,
188 size_t count) 263 size_t count)
189{ 264{
@@ -201,11 +276,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
201 if (!bid) 276 if (!bid)
202 return -ENODEV; 277 return -ENODEV;
203 278
204 ret = device_attach(&bid->udev->dev); 279 /* mark the device for deletion so probe ignores it during rescan */
205 if (ret < 0) { 280 bid->status = STUB_BUSID_OTHER;
206 dev_err(&bid->udev->dev, "rebind failed\n"); 281 /* release the busid lock */
282 put_busid_priv(bid);
283
284 ret = do_rebind((char *) buf, bid);
285 if (ret < 0)
207 return ret; 286 return ret;
208 } 287
288 /* delete device from busid_table */
289 del_match_busid((char *) buf);
209 290
210 return count; 291 return count;
211} 292}
@@ -328,6 +409,9 @@ static void __exit usbip_host_exit(void)
328 */ 409 */
329 usb_deregister_device_driver(&stub_driver); 410 usb_deregister_device_driver(&stub_driver);
330 411
412 /* initiate scan to attach devices */
413 stub_device_rebind();
414
331 kmem_cache_destroy(stub_priv_cache); 415 kmem_cache_destroy(stub_priv_cache);
332} 416}
333 417
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 7de54a66044f..56cacb68040c 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -338,23 +338,26 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
338 return priv; 338 return priv;
339} 339}
340 340
341static int get_pipe(struct stub_device *sdev, int epnum, int dir) 341static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
342{ 342{
343 struct usb_device *udev = sdev->udev; 343 struct usb_device *udev = sdev->udev;
344 struct usb_host_endpoint *ep; 344 struct usb_host_endpoint *ep;
345 struct usb_endpoint_descriptor *epd = NULL; 345 struct usb_endpoint_descriptor *epd = NULL;
346 int epnum = pdu->base.ep;
347 int dir = pdu->base.direction;
348
349 if (epnum < 0 || epnum > 15)
350 goto err_ret;
346 351
347 if (dir == USBIP_DIR_IN) 352 if (dir == USBIP_DIR_IN)
348 ep = udev->ep_in[epnum & 0x7f]; 353 ep = udev->ep_in[epnum & 0x7f];
349 else 354 else
350 ep = udev->ep_out[epnum & 0x7f]; 355 ep = udev->ep_out[epnum & 0x7f];
351 if (!ep) { 356 if (!ep)
352 dev_err(&sdev->interface->dev, "no such endpoint?, %d\n", 357 goto err_ret;
353 epnum);
354 BUG();
355 }
356 358
357 epd = &ep->desc; 359 epd = &ep->desc;
360
358 if (usb_endpoint_xfer_control(epd)) { 361 if (usb_endpoint_xfer_control(epd)) {
359 if (dir == USBIP_DIR_OUT) 362 if (dir == USBIP_DIR_OUT)
360 return usb_sndctrlpipe(udev, epnum); 363 return usb_sndctrlpipe(udev, epnum);
@@ -377,15 +380,37 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
377 } 380 }
378 381
379 if (usb_endpoint_xfer_isoc(epd)) { 382 if (usb_endpoint_xfer_isoc(epd)) {
383 /* validate packet size and number of packets */
384 unsigned int maxp, packets, bytes;
385
386#define USB_EP_MAXP_MULT_SHIFT 11
387#define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT)
388#define USB_EP_MAXP_MULT(m) \
389 (((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
390
391 maxp = usb_endpoint_maxp(epd);
392 maxp *= (USB_EP_MAXP_MULT(
393 __le16_to_cpu(epd->wMaxPacketSize)) + 1);
394 bytes = pdu->u.cmd_submit.transfer_buffer_length;
395 packets = DIV_ROUND_UP(bytes, maxp);
396
397 if (pdu->u.cmd_submit.number_of_packets < 0 ||
398 pdu->u.cmd_submit.number_of_packets > packets) {
399 dev_err(&sdev->udev->dev,
400 "CMD_SUBMIT: isoc invalid num packets %d\n",
401 pdu->u.cmd_submit.number_of_packets);
402 return -1;
403 }
380 if (dir == USBIP_DIR_OUT) 404 if (dir == USBIP_DIR_OUT)
381 return usb_sndisocpipe(udev, epnum); 405 return usb_sndisocpipe(udev, epnum);
382 else 406 else
383 return usb_rcvisocpipe(udev, epnum); 407 return usb_rcvisocpipe(udev, epnum);
384 } 408 }
385 409
410err_ret:
386 /* NOT REACHED */ 411 /* NOT REACHED */
387 dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum); 412 dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
388 return 0; 413 return -1;
389} 414}
390 415
391static void masking_bogus_flags(struct urb *urb) 416static void masking_bogus_flags(struct urb *urb)
@@ -449,7 +474,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
449 struct stub_priv *priv; 474 struct stub_priv *priv;
450 struct usbip_device *ud = &sdev->ud; 475 struct usbip_device *ud = &sdev->ud;
451 struct usb_device *udev = sdev->udev; 476 struct usb_device *udev = sdev->udev;
452 int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); 477 int pipe = get_pipe(sdev, pdu);
478
479 if (pipe == -1)
480 return;
453 481
454 priv = stub_priv_alloc(sdev, pdu); 482 priv = stub_priv_alloc(sdev, pdu);
455 if (!priv) 483 if (!priv)
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 9752b93f754e..1838f1b2c2fa 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -317,18 +317,14 @@ int usbip_recv(struct socket *sock, void *buf, int size)
317 struct msghdr msg; 317 struct msghdr msg;
318 struct kvec iov; 318 struct kvec iov;
319 int total = 0; 319 int total = 0;
320
321 /* for blocks of if (usbip_dbg_flag_xmit) */ 320 /* for blocks of if (usbip_dbg_flag_xmit) */
322 char *bp = buf; 321 char *bp = buf;
323 int osize = size; 322 int osize = size;
324 323
325 usbip_dbg_xmit("enter\n"); 324 if (!sock || !buf || !size)
326
327 if (!sock || !buf || !size) {
328 pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
329 size);
330 return -EINVAL; 325 return -EINVAL;
331 } 326
327 usbip_dbg_xmit("enter\n");
332 328
333 do { 329 do {
334 sock->sk->sk_allocation = GFP_NOIO; 330 sock->sk->sk_allocation = GFP_NOIO;
@@ -341,11 +337,8 @@ int usbip_recv(struct socket *sock, void *buf, int size)
341 msg.msg_flags = MSG_NOSIGNAL; 337 msg.msg_flags = MSG_NOSIGNAL;
342 338
343 result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL); 339 result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
344 if (result <= 0) { 340 if (result <= 0)
345 pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
346 sock, buf, size, result, total);
347 goto err; 341 goto err;
348 }
349 342
350 size -= result; 343 size -= result;
351 buf += result; 344 buf += result;
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index 86b08475c254..0fc5ace57c0e 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -248,7 +248,7 @@ enum usbip_side {
248#define SDEV_EVENT_ERROR_SUBMIT (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) 248#define SDEV_EVENT_ERROR_SUBMIT (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
249#define SDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) 249#define SDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
250 250
251#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE) 251#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
252#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) 252#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
253#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) 253#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
254#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) 254#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
@@ -261,6 +261,7 @@ struct usbip_device {
261 /* lock for status */ 261 /* lock for status */
262 spinlock_t lock; 262 spinlock_t lock;
263 263
264 int sockfd;
264 struct socket *tcp_socket; 265 struct socket *tcp_socket;
265 266
266 struct task_struct *tcp_rx; 267 struct task_struct *tcp_rx;
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index 64933b993d7a..2580a32bcdff 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -117,11 +117,12 @@ EXPORT_SYMBOL_GPL(usbip_event_add);
117int usbip_event_happened(struct usbip_device *ud) 117int usbip_event_happened(struct usbip_device *ud)
118{ 118{
119 int happened = 0; 119 int happened = 0;
120 unsigned long flags;
120 121
121 spin_lock(&ud->lock); 122 spin_lock_irqsave(&ud->lock, flags);
122 if (ud->event != 0) 123 if (ud->event != 0)
123 happened = 1; 124 happened = 1;
124 spin_unlock(&ud->lock); 125 spin_unlock_irqrestore(&ud->lock, flags);
125 126
126 return happened; 127 return happened;
127} 128}
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index f9af04d7f02f..4d68a1e9e878 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -121,9 +121,11 @@ static void dump_port_status_diff(u32 prev_status, u32 new_status)
121 121
122void rh_port_connect(int rhport, enum usb_device_speed speed) 122void rh_port_connect(int rhport, enum usb_device_speed speed)
123{ 123{
124 unsigned long flags;
125
124 usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport); 126 usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport);
125 127
126 spin_lock(&the_controller->lock); 128 spin_lock_irqsave(&the_controller->lock, flags);
127 129
128 the_controller->port_status[rhport] |= USB_PORT_STAT_CONNECTION 130 the_controller->port_status[rhport] |= USB_PORT_STAT_CONNECTION
129 | (1 << USB_PORT_FEAT_C_CONNECTION); 131 | (1 << USB_PORT_FEAT_C_CONNECTION);
@@ -139,22 +141,24 @@ void rh_port_connect(int rhport, enum usb_device_speed speed)
139 break; 141 break;
140 } 142 }
141 143
142 spin_unlock(&the_controller->lock); 144 spin_unlock_irqrestore(&the_controller->lock, flags);
143 145
144 usb_hcd_poll_rh_status(vhci_to_hcd(the_controller)); 146 usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
145} 147}
146 148
147static void rh_port_disconnect(int rhport) 149static void rh_port_disconnect(int rhport)
148{ 150{
151 unsigned long flags;
152
149 usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport); 153 usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport);
150 154
151 spin_lock(&the_controller->lock); 155 spin_lock_irqsave(&the_controller->lock, flags);
152 156
153 the_controller->port_status[rhport] &= ~USB_PORT_STAT_CONNECTION; 157 the_controller->port_status[rhport] &= ~USB_PORT_STAT_CONNECTION;
154 the_controller->port_status[rhport] |= 158 the_controller->port_status[rhport] |=
155 (1 << USB_PORT_FEAT_C_CONNECTION); 159 (1 << USB_PORT_FEAT_C_CONNECTION);
156 160
157 spin_unlock(&the_controller->lock); 161 spin_unlock_irqrestore(&the_controller->lock, flags);
158 usb_hcd_poll_rh_status(vhci_to_hcd(the_controller)); 162 usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
159} 163}
160 164
@@ -182,13 +186,14 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
182 int retval; 186 int retval;
183 int rhport; 187 int rhport;
184 int changed = 0; 188 int changed = 0;
189 unsigned long flags;
185 190
186 retval = DIV_ROUND_UP(VHCI_NPORTS + 1, 8); 191 retval = DIV_ROUND_UP(VHCI_NPORTS + 1, 8);
187 memset(buf, 0, retval); 192 memset(buf, 0, retval);
188 193
189 vhci = hcd_to_vhci(hcd); 194 vhci = hcd_to_vhci(hcd);
190 195
191 spin_lock(&vhci->lock); 196 spin_lock_irqsave(&vhci->lock, flags);
192 if (!HCD_HW_ACCESSIBLE(hcd)) { 197 if (!HCD_HW_ACCESSIBLE(hcd)) {
193 usbip_dbg_vhci_rh("hw accessible flag not on?\n"); 198 usbip_dbg_vhci_rh("hw accessible flag not on?\n");
194 goto done; 199 goto done;
@@ -209,7 +214,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
209 usb_hcd_resume_root_hub(hcd); 214 usb_hcd_resume_root_hub(hcd);
210 215
211done: 216done:
212 spin_unlock(&vhci->lock); 217 spin_unlock_irqrestore(&vhci->lock, flags);
213 return changed ? retval : 0; 218 return changed ? retval : 0;
214} 219}
215 220
@@ -236,6 +241,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
236 struct vhci_hcd *dum; 241 struct vhci_hcd *dum;
237 int retval = 0; 242 int retval = 0;
238 int rhport; 243 int rhport;
244 unsigned long flags;
239 245
240 u32 prev_port_status[VHCI_NPORTS]; 246 u32 prev_port_status[VHCI_NPORTS];
241 247
@@ -254,7 +260,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
254 260
255 dum = hcd_to_vhci(hcd); 261 dum = hcd_to_vhci(hcd);
256 262
257 spin_lock(&dum->lock); 263 spin_lock_irqsave(&dum->lock, flags);
258 264
259 /* store old status and compare now and old later */ 265 /* store old status and compare now and old later */
260 if (usbip_dbg_flag_vhci_rh) { 266 if (usbip_dbg_flag_vhci_rh) {
@@ -279,7 +285,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
279 case USB_PORT_FEAT_POWER: 285 case USB_PORT_FEAT_POWER:
280 usbip_dbg_vhci_rh( 286 usbip_dbg_vhci_rh(
281 " ClearPortFeature: USB_PORT_FEAT_POWER\n"); 287 " ClearPortFeature: USB_PORT_FEAT_POWER\n");
282 dum->port_status[rhport] = 0; 288 dum->port_status[rhport] &= ~USB_PORT_STAT_POWER;
283 dum->resuming = 0; 289 dum->resuming = 0;
284 break; 290 break;
285 case USB_PORT_FEAT_C_RESET: 291 case USB_PORT_FEAT_C_RESET:
@@ -408,7 +414,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
408 } 414 }
409 usbip_dbg_vhci_rh(" bye\n"); 415 usbip_dbg_vhci_rh(" bye\n");
410 416
411 spin_unlock(&dum->lock); 417 spin_unlock_irqrestore(&dum->lock, flags);
412 418
413 return retval; 419 return retval;
414} 420}
@@ -431,6 +437,7 @@ static void vhci_tx_urb(struct urb *urb)
431{ 437{
432 struct vhci_device *vdev = get_vdev(urb->dev); 438 struct vhci_device *vdev = get_vdev(urb->dev);
433 struct vhci_priv *priv; 439 struct vhci_priv *priv;
440 unsigned long flags;
434 441
435 if (!vdev) { 442 if (!vdev) {
436 pr_err("could not get virtual device"); 443 pr_err("could not get virtual device");
@@ -443,7 +450,7 @@ static void vhci_tx_urb(struct urb *urb)
443 return; 450 return;
444 } 451 }
445 452
446 spin_lock(&vdev->priv_lock); 453 spin_lock_irqsave(&vdev->priv_lock, flags);
447 454
448 priv->seqnum = atomic_inc_return(&the_controller->seqnum); 455 priv->seqnum = atomic_inc_return(&the_controller->seqnum);
449 if (priv->seqnum == 0xffff) 456 if (priv->seqnum == 0xffff)
@@ -457,7 +464,7 @@ static void vhci_tx_urb(struct urb *urb)
457 list_add_tail(&priv->list, &vdev->priv_tx); 464 list_add_tail(&priv->list, &vdev->priv_tx);
458 465
459 wake_up(&vdev->waitq_tx); 466 wake_up(&vdev->waitq_tx);
460 spin_unlock(&vdev->priv_lock); 467 spin_unlock_irqrestore(&vdev->priv_lock, flags);
461} 468}
462 469
463static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, 470static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
@@ -466,15 +473,16 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
466 struct device *dev = &urb->dev->dev; 473 struct device *dev = &urb->dev->dev;
467 int ret = 0; 474 int ret = 0;
468 struct vhci_device *vdev; 475 struct vhci_device *vdev;
476 unsigned long flags;
469 477
470 /* patch to usb_sg_init() is in 2.5.60 */ 478 /* patch to usb_sg_init() is in 2.5.60 */
471 BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length); 479 BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
472 480
473 spin_lock(&the_controller->lock); 481 spin_lock_irqsave(&the_controller->lock, flags);
474 482
475 if (urb->status != -EINPROGRESS) { 483 if (urb->status != -EINPROGRESS) {
476 dev_err(dev, "URB already unlinked!, status %d\n", urb->status); 484 dev_err(dev, "URB already unlinked!, status %d\n", urb->status);
477 spin_unlock(&the_controller->lock); 485 spin_unlock_irqrestore(&the_controller->lock, flags);
478 return urb->status; 486 return urb->status;
479 } 487 }
480 488
@@ -486,7 +494,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
486 vdev->ud.status == VDEV_ST_ERROR) { 494 vdev->ud.status == VDEV_ST_ERROR) {
487 dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport); 495 dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport);
488 spin_unlock(&vdev->ud.lock); 496 spin_unlock(&vdev->ud.lock);
489 spin_unlock(&the_controller->lock); 497 spin_unlock_irqrestore(&the_controller->lock, flags);
490 return -ENODEV; 498 return -ENODEV;
491 } 499 }
492 spin_unlock(&vdev->ud.lock); 500 spin_unlock(&vdev->ud.lock);
@@ -559,14 +567,14 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
559 567
560out: 568out:
561 vhci_tx_urb(urb); 569 vhci_tx_urb(urb);
562 spin_unlock(&the_controller->lock); 570 spin_unlock_irqrestore(&the_controller->lock, flags);
563 571
564 return 0; 572 return 0;
565 573
566no_need_xmit: 574no_need_xmit:
567 usb_hcd_unlink_urb_from_ep(hcd, urb); 575 usb_hcd_unlink_urb_from_ep(hcd, urb);
568no_need_unlink: 576no_need_unlink:
569 spin_unlock(&the_controller->lock); 577 spin_unlock_irqrestore(&the_controller->lock, flags);
570 if (!ret) 578 if (!ret)
571 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), 579 usb_hcd_giveback_urb(vhci_to_hcd(the_controller),
572 urb, urb->status); 580 urb, urb->status);
@@ -623,14 +631,15 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
623{ 631{
624 struct vhci_priv *priv; 632 struct vhci_priv *priv;
625 struct vhci_device *vdev; 633 struct vhci_device *vdev;
634 unsigned long flags;
626 635
627 spin_lock(&the_controller->lock); 636 spin_lock_irqsave(&the_controller->lock, flags);
628 637
629 priv = urb->hcpriv; 638 priv = urb->hcpriv;
630 if (!priv) { 639 if (!priv) {
631 /* URB was never linked! or will be soon given back by 640 /* URB was never linked! or will be soon given back by
632 * vhci_rx. */ 641 * vhci_rx. */
633 spin_unlock(&the_controller->lock); 642 spin_unlock_irqrestore(&the_controller->lock, flags);
634 return -EIDRM; 643 return -EIDRM;
635 } 644 }
636 645
@@ -639,7 +648,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
639 648
640 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 649 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
641 if (ret) { 650 if (ret) {
642 spin_unlock(&the_controller->lock); 651 spin_unlock_irqrestore(&the_controller->lock, flags);
643 return ret; 652 return ret;
644 } 653 }
645 } 654 }
@@ -664,10 +673,10 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
664 */ 673 */
665 usb_hcd_unlink_urb_from_ep(hcd, urb); 674 usb_hcd_unlink_urb_from_ep(hcd, urb);
666 675
667 spin_unlock(&the_controller->lock); 676 spin_unlock_irqrestore(&the_controller->lock, flags);
668 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, 677 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
669 urb->status); 678 urb->status);
670 spin_lock(&the_controller->lock); 679 spin_lock_irqsave(&the_controller->lock, flags);
671 680
672 } else { 681 } else {
673 /* tcp connection is alive */ 682 /* tcp connection is alive */
@@ -679,7 +688,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
679 unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC); 688 unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC);
680 if (!unlink) { 689 if (!unlink) {
681 spin_unlock(&vdev->priv_lock); 690 spin_unlock(&vdev->priv_lock);
682 spin_unlock(&the_controller->lock); 691 spin_unlock_irqrestore(&the_controller->lock, flags);
683 usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC); 692 usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
684 return -ENOMEM; 693 return -ENOMEM;
685 } 694 }
@@ -698,7 +707,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
698 spin_unlock(&vdev->priv_lock); 707 spin_unlock(&vdev->priv_lock);
699 } 708 }
700 709
701 spin_unlock(&the_controller->lock); 710 spin_unlock_irqrestore(&the_controller->lock, flags);
702 711
703 usbip_dbg_vhci_hc("leave\n"); 712 usbip_dbg_vhci_hc("leave\n");
704 return 0; 713 return 0;
@@ -707,8 +716,9 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
707static void vhci_device_unlink_cleanup(struct vhci_device *vdev) 716static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
708{ 717{
709 struct vhci_unlink *unlink, *tmp; 718 struct vhci_unlink *unlink, *tmp;
719 unsigned long flags;
710 720
711 spin_lock(&the_controller->lock); 721 spin_lock_irqsave(&the_controller->lock, flags);
712 spin_lock(&vdev->priv_lock); 722 spin_lock(&vdev->priv_lock);
713 723
714 list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) { 724 list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
@@ -742,19 +752,19 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
742 list_del(&unlink->list); 752 list_del(&unlink->list);
743 753
744 spin_unlock(&vdev->priv_lock); 754 spin_unlock(&vdev->priv_lock);
745 spin_unlock(&the_controller->lock); 755 spin_unlock_irqrestore(&the_controller->lock, flags);
746 756
747 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, 757 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
748 urb->status); 758 urb->status);
749 759
750 spin_lock(&the_controller->lock); 760 spin_lock_irqsave(&the_controller->lock, flags);
751 spin_lock(&vdev->priv_lock); 761 spin_lock(&vdev->priv_lock);
752 762
753 kfree(unlink); 763 kfree(unlink);
754 } 764 }
755 765
756 spin_unlock(&vdev->priv_lock); 766 spin_unlock(&vdev->priv_lock);
757 spin_unlock(&the_controller->lock); 767 spin_unlock_irqrestore(&the_controller->lock, flags);
758} 768}
759 769
760/* 770/*
@@ -768,7 +778,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
768 778
769 /* need this? see stub_dev.c */ 779 /* need this? see stub_dev.c */
770 if (ud->tcp_socket) { 780 if (ud->tcp_socket) {
771 pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket); 781 pr_debug("shutdown sockfd %d\n", ud->sockfd);
772 kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); 782 kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
773 } 783 }
774 784
@@ -787,6 +797,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
787 if (vdev->ud.tcp_socket) { 797 if (vdev->ud.tcp_socket) {
788 sockfd_put(vdev->ud.tcp_socket); 798 sockfd_put(vdev->ud.tcp_socket);
789 vdev->ud.tcp_socket = NULL; 799 vdev->ud.tcp_socket = NULL;
800 vdev->ud.sockfd = -1;
790 } 801 }
791 pr_info("release socket\n"); 802 pr_info("release socket\n");
792 803
@@ -821,8 +832,9 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
821static void vhci_device_reset(struct usbip_device *ud) 832static void vhci_device_reset(struct usbip_device *ud)
822{ 833{
823 struct vhci_device *vdev = container_of(ud, struct vhci_device, ud); 834 struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
835 unsigned long flags;
824 836
825 spin_lock(&ud->lock); 837 spin_lock_irqsave(&ud->lock, flags);
826 838
827 vdev->speed = 0; 839 vdev->speed = 0;
828 vdev->devid = 0; 840 vdev->devid = 0;
@@ -833,17 +845,20 @@ static void vhci_device_reset(struct usbip_device *ud)
833 if (ud->tcp_socket) { 845 if (ud->tcp_socket) {
834 sockfd_put(ud->tcp_socket); 846 sockfd_put(ud->tcp_socket);
835 ud->tcp_socket = NULL; 847 ud->tcp_socket = NULL;
848 ud->sockfd = -1;
836 } 849 }
837 ud->status = VDEV_ST_NULL; 850 ud->status = VDEV_ST_NULL;
838 851
839 spin_unlock(&ud->lock); 852 spin_unlock_irqrestore(&ud->lock, flags);
840} 853}
841 854
842static void vhci_device_unusable(struct usbip_device *ud) 855static void vhci_device_unusable(struct usbip_device *ud)
843{ 856{
844 spin_lock(&ud->lock); 857 unsigned long flags;
858
859 spin_lock_irqsave(&ud->lock, flags);
845 ud->status = VDEV_ST_ERROR; 860 ud->status = VDEV_ST_ERROR;
846 spin_unlock(&ud->lock); 861 spin_unlock_irqrestore(&ud->lock, flags);
847} 862}
848 863
849static void vhci_device_init(struct vhci_device *vdev) 864static void vhci_device_init(struct vhci_device *vdev)
@@ -933,12 +948,13 @@ static int vhci_get_frame_number(struct usb_hcd *hcd)
933static int vhci_bus_suspend(struct usb_hcd *hcd) 948static int vhci_bus_suspend(struct usb_hcd *hcd)
934{ 949{
935 struct vhci_hcd *vhci = hcd_to_vhci(hcd); 950 struct vhci_hcd *vhci = hcd_to_vhci(hcd);
951 unsigned long flags;
936 952
937 dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__); 953 dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
938 954
939 spin_lock(&vhci->lock); 955 spin_lock_irqsave(&vhci->lock, flags);
940 hcd->state = HC_STATE_SUSPENDED; 956 hcd->state = HC_STATE_SUSPENDED;
941 spin_unlock(&vhci->lock); 957 spin_unlock_irqrestore(&vhci->lock, flags);
942 958
943 return 0; 959 return 0;
944} 960}
@@ -947,15 +963,16 @@ static int vhci_bus_resume(struct usb_hcd *hcd)
947{ 963{
948 struct vhci_hcd *vhci = hcd_to_vhci(hcd); 964 struct vhci_hcd *vhci = hcd_to_vhci(hcd);
949 int rc = 0; 965 int rc = 0;
966 unsigned long flags;
950 967
951 dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__); 968 dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
952 969
953 spin_lock(&vhci->lock); 970 spin_lock_irqsave(&vhci->lock, flags);
954 if (!HCD_HW_ACCESSIBLE(hcd)) 971 if (!HCD_HW_ACCESSIBLE(hcd))
955 rc = -ESHUTDOWN; 972 rc = -ESHUTDOWN;
956 else 973 else
957 hcd->state = HC_STATE_RUNNING; 974 hcd->state = HC_STATE_RUNNING;
958 spin_unlock(&vhci->lock); 975 spin_unlock_irqrestore(&vhci->lock, flags);
959 976
960 return rc; 977 return rc;
961} 978}
@@ -1053,17 +1070,18 @@ static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
1053 int rhport = 0; 1070 int rhport = 0;
1054 int connected = 0; 1071 int connected = 0;
1055 int ret = 0; 1072 int ret = 0;
1073 unsigned long flags;
1056 1074
1057 hcd = platform_get_drvdata(pdev); 1075 hcd = platform_get_drvdata(pdev);
1058 1076
1059 spin_lock(&the_controller->lock); 1077 spin_lock_irqsave(&the_controller->lock, flags);
1060 1078
1061 for (rhport = 0; rhport < VHCI_NPORTS; rhport++) 1079 for (rhport = 0; rhport < VHCI_NPORTS; rhport++)
1062 if (the_controller->port_status[rhport] & 1080 if (the_controller->port_status[rhport] &
1063 USB_PORT_STAT_CONNECTION) 1081 USB_PORT_STAT_CONNECTION)
1064 connected += 1; 1082 connected += 1;
1065 1083
1066 spin_unlock(&the_controller->lock); 1084 spin_unlock_irqrestore(&the_controller->lock, flags);
1067 1085
1068 if (connected > 0) { 1086 if (connected > 0) {
1069 dev_info(&pdev->dev, 1087 dev_info(&pdev->dev,
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index bc4eb0855314..323aa7789989 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -71,10 +71,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
71{ 71{
72 struct usbip_device *ud = &vdev->ud; 72 struct usbip_device *ud = &vdev->ud;
73 struct urb *urb; 73 struct urb *urb;
74 unsigned long flags;
74 75
75 spin_lock(&vdev->priv_lock); 76 spin_lock_irqsave(&vdev->priv_lock, flags);
76 urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum); 77 urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
77 spin_unlock(&vdev->priv_lock); 78 spin_unlock_irqrestore(&vdev->priv_lock, flags);
78 79
79 if (!urb) { 80 if (!urb) {
80 pr_err("cannot find a urb of seqnum %u max seqnum %d\n", 81 pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
@@ -103,9 +104,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
103 104
104 usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum); 105 usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
105 106
106 spin_lock(&the_controller->lock); 107 spin_lock_irqsave(&the_controller->lock, flags);
107 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); 108 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
108 spin_unlock(&the_controller->lock); 109 spin_unlock_irqrestore(&the_controller->lock, flags);
109 110
110 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status); 111 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
111 112
@@ -116,8 +117,9 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
116 struct usbip_header *pdu) 117 struct usbip_header *pdu)
117{ 118{
118 struct vhci_unlink *unlink, *tmp; 119 struct vhci_unlink *unlink, *tmp;
120 unsigned long flags;
119 121
120 spin_lock(&vdev->priv_lock); 122 spin_lock_irqsave(&vdev->priv_lock, flags);
121 123
122 list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) { 124 list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
123 pr_info("unlink->seqnum %lu\n", unlink->seqnum); 125 pr_info("unlink->seqnum %lu\n", unlink->seqnum);
@@ -126,12 +128,12 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
126 unlink->seqnum); 128 unlink->seqnum);
127 list_del(&unlink->list); 129 list_del(&unlink->list);
128 130
129 spin_unlock(&vdev->priv_lock); 131 spin_unlock_irqrestore(&vdev->priv_lock, flags);
130 return unlink; 132 return unlink;
131 } 133 }
132 } 134 }
133 135
134 spin_unlock(&vdev->priv_lock); 136 spin_unlock_irqrestore(&vdev->priv_lock, flags);
135 137
136 return NULL; 138 return NULL;
137} 139}
@@ -141,6 +143,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
141{ 143{
142 struct vhci_unlink *unlink; 144 struct vhci_unlink *unlink;
143 struct urb *urb; 145 struct urb *urb;
146 unsigned long flags;
144 147
145 usbip_dump_header(pdu); 148 usbip_dump_header(pdu);
146 149
@@ -151,9 +154,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
151 return; 154 return;
152 } 155 }
153 156
154 spin_lock(&vdev->priv_lock); 157 spin_lock_irqsave(&vdev->priv_lock, flags);
155 urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum); 158 urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
156 spin_unlock(&vdev->priv_lock); 159 spin_unlock_irqrestore(&vdev->priv_lock, flags);
157 160
158 if (!urb) { 161 if (!urb) {
159 /* 162 /*
@@ -170,9 +173,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
170 urb->status = pdu->u.ret_unlink.status; 173 urb->status = pdu->u.ret_unlink.status;
171 pr_info("urb->status %d\n", urb->status); 174 pr_info("urb->status %d\n", urb->status);
172 175
173 spin_lock(&the_controller->lock); 176 spin_lock_irqsave(&the_controller->lock, flags);
174 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); 177 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
175 spin_unlock(&the_controller->lock); 178 spin_unlock_irqrestore(&the_controller->lock, flags);
176 179
177 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, 180 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
178 urb->status); 181 urb->status);
@@ -184,10 +187,11 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
184static int vhci_priv_tx_empty(struct vhci_device *vdev) 187static int vhci_priv_tx_empty(struct vhci_device *vdev)
185{ 188{
186 int empty = 0; 189 int empty = 0;
190 unsigned long flags;
187 191
188 spin_lock(&vdev->priv_lock); 192 spin_lock_irqsave(&vdev->priv_lock, flags);
189 empty = list_empty(&vdev->priv_rx); 193 empty = list_empty(&vdev->priv_rx);
190 spin_unlock(&vdev->priv_lock); 194 spin_unlock_irqrestore(&vdev->priv_lock, flags);
191 195
192 return empty; 196 return empty;
193} 197}
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index 211f43f67ea2..b9432fdec775 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -32,23 +32,28 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
32{ 32{
33 char *s = out; 33 char *s = out;
34 int i = 0; 34 int i = 0;
35 unsigned long flags;
35 36
36 BUG_ON(!the_controller || !out); 37 BUG_ON(!the_controller || !out);
37 38
38 spin_lock(&the_controller->lock); 39 spin_lock_irqsave(&the_controller->lock, flags);
39 40
40 /* 41 /*
41 * output example: 42 * output example:
42 * prt sta spd dev socket local_busid 43 * port sta spd dev sockfd local_busid
43 * 000 004 000 000 c5a7bb80 1-2.3 44 * 0000 004 000 00000000 000003 1-2.3
44 * 001 004 000 000 d8cee980 2-3.4 45 * 0001 004 000 00000000 000004 2-3.4
45 * 46 *
46 * IP address can be retrieved from a socket pointer address by looking 47 * Output includes socket fd instead of socket pointer address to
47 * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a 48 * avoid leaking kernel memory address in:
48 * port number and its peer IP address. 49 * /sys/devices/platform/vhci_hcd.0/status and in debug output.
50 * The socket pointer address is not used at the moment and it was
51 * made visible as a convenient way to find IP address from socket
52 * pointer address by looking up /proc/net/{tcp,tcp6}. As this opens
53 * a security hole, the change is made to use sockfd instead.
49 */ 54 */
50 out += sprintf(out, 55 out += sprintf(out,
51 "prt sta spd bus dev socket local_busid\n"); 56 "prt sta spd dev sockfd local_busid\n");
52 57
53 for (i = 0; i < VHCI_NPORTS; i++) { 58 for (i = 0; i < VHCI_NPORTS; i++) {
54 struct vhci_device *vdev = port_to_vdev(i); 59 struct vhci_device *vdev = port_to_vdev(i);
@@ -59,18 +64,17 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
59 if (vdev->ud.status == VDEV_ST_USED) { 64 if (vdev->ud.status == VDEV_ST_USED) {
60 out += sprintf(out, "%03u %08x ", 65 out += sprintf(out, "%03u %08x ",
61 vdev->speed, vdev->devid); 66 vdev->speed, vdev->devid);
62 out += sprintf(out, "%16p ", vdev->ud.tcp_socket); 67 out += sprintf(out, "%06u ", vdev->ud.sockfd);
63 out += sprintf(out, "%s", dev_name(&vdev->udev->dev)); 68 out += sprintf(out, "%s", dev_name(&vdev->udev->dev));
64 69
65 } else { 70 } else
66 out += sprintf(out, "000 000 000 0000000000000000 0-0"); 71 out += sprintf(out, "000 00000000 000000 0-0");
67 }
68 72
69 out += sprintf(out, "\n"); 73 out += sprintf(out, "\n");
70 spin_unlock(&vdev->ud.lock); 74 spin_unlock(&vdev->ud.lock);
71 } 75 }
72 76
73 spin_unlock(&the_controller->lock); 77 spin_unlock_irqrestore(&the_controller->lock, flags);
74 78
75 return out - s; 79 return out - s;
76} 80}
@@ -80,11 +84,12 @@ static DEVICE_ATTR_RO(status);
80static int vhci_port_disconnect(__u32 rhport) 84static int vhci_port_disconnect(__u32 rhport)
81{ 85{
82 struct vhci_device *vdev; 86 struct vhci_device *vdev;
87 unsigned long flags;
83 88
84 usbip_dbg_vhci_sysfs("enter\n"); 89 usbip_dbg_vhci_sysfs("enter\n");
85 90
86 /* lock */ 91 /* lock */
87 spin_lock(&the_controller->lock); 92 spin_lock_irqsave(&the_controller->lock, flags);
88 93
89 vdev = port_to_vdev(rhport); 94 vdev = port_to_vdev(rhport);
90 95
@@ -94,14 +99,14 @@ static int vhci_port_disconnect(__u32 rhport)
94 99
95 /* unlock */ 100 /* unlock */
96 spin_unlock(&vdev->ud.lock); 101 spin_unlock(&vdev->ud.lock);
97 spin_unlock(&the_controller->lock); 102 spin_unlock_irqrestore(&the_controller->lock, flags);
98 103
99 return -EINVAL; 104 return -EINVAL;
100 } 105 }
101 106
102 /* unlock */ 107 /* unlock */
103 spin_unlock(&vdev->ud.lock); 108 spin_unlock(&vdev->ud.lock);
104 spin_unlock(&the_controller->lock); 109 spin_unlock_irqrestore(&the_controller->lock, flags);
105 110
106 usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN); 111 usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
107 112
@@ -177,6 +182,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
177 int sockfd = 0; 182 int sockfd = 0;
178 __u32 rhport = 0, devid = 0, speed = 0; 183 __u32 rhport = 0, devid = 0, speed = 0;
179 int err; 184 int err;
185 unsigned long flags;
180 186
181 /* 187 /*
182 * @rhport: port number of vhci_hcd 188 * @rhport: port number of vhci_hcd
@@ -202,14 +208,14 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
202 /* now need lock until setting vdev status as used */ 208 /* now need lock until setting vdev status as used */
203 209
204 /* begin a lock */ 210 /* begin a lock */
205 spin_lock(&the_controller->lock); 211 spin_lock_irqsave(&the_controller->lock, flags);
206 vdev = port_to_vdev(rhport); 212 vdev = port_to_vdev(rhport);
207 spin_lock(&vdev->ud.lock); 213 spin_lock(&vdev->ud.lock);
208 214
209 if (vdev->ud.status != VDEV_ST_NULL) { 215 if (vdev->ud.status != VDEV_ST_NULL) {
210 /* end of the lock */ 216 /* end of the lock */
211 spin_unlock(&vdev->ud.lock); 217 spin_unlock(&vdev->ud.lock);
212 spin_unlock(&the_controller->lock); 218 spin_unlock_irqrestore(&the_controller->lock, flags);
213 219
214 sockfd_put(socket); 220 sockfd_put(socket);
215 221
@@ -223,11 +229,12 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
223 229
224 vdev->devid = devid; 230 vdev->devid = devid;
225 vdev->speed = speed; 231 vdev->speed = speed;
232 vdev->ud.sockfd = sockfd;
226 vdev->ud.tcp_socket = socket; 233 vdev->ud.tcp_socket = socket;
227 vdev->ud.status = VDEV_ST_NOTASSIGNED; 234 vdev->ud.status = VDEV_ST_NOTASSIGNED;
228 235
229 spin_unlock(&vdev->ud.lock); 236 spin_unlock(&vdev->ud.lock);
230 spin_unlock(&the_controller->lock); 237 spin_unlock_irqrestore(&the_controller->lock, flags);
231 /* end the lock */ 238 /* end the lock */
232 239
233 vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx"); 240 vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
index 3c5796c8633a..a9a663a578b6 100644
--- a/drivers/usb/usbip/vhci_tx.c
+++ b/drivers/usb/usbip/vhci_tx.c
@@ -47,16 +47,17 @@ static void setup_cmd_submit_pdu(struct usbip_header *pdup, struct urb *urb)
47static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev) 47static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev)
48{ 48{
49 struct vhci_priv *priv, *tmp; 49 struct vhci_priv *priv, *tmp;
50 unsigned long flags;
50 51
51 spin_lock(&vdev->priv_lock); 52 spin_lock_irqsave(&vdev->priv_lock, flags);
52 53
53 list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) { 54 list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) {
54 list_move_tail(&priv->list, &vdev->priv_rx); 55 list_move_tail(&priv->list, &vdev->priv_rx);
55 spin_unlock(&vdev->priv_lock); 56 spin_unlock_irqrestore(&vdev->priv_lock, flags);
56 return priv; 57 return priv;
57 } 58 }
58 59
59 spin_unlock(&vdev->priv_lock); 60 spin_unlock_irqrestore(&vdev->priv_lock, flags);
60 61
61 return NULL; 62 return NULL;
62} 63}
@@ -137,16 +138,17 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
137static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev) 138static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev)
138{ 139{
139 struct vhci_unlink *unlink, *tmp; 140 struct vhci_unlink *unlink, *tmp;
141 unsigned long flags;
140 142
141 spin_lock(&vdev->priv_lock); 143 spin_lock_irqsave(&vdev->priv_lock, flags);
142 144
143 list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) { 145 list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
144 list_move_tail(&unlink->list, &vdev->unlink_rx); 146 list_move_tail(&unlink->list, &vdev->unlink_rx);
145 spin_unlock(&vdev->priv_lock); 147 spin_unlock_irqrestore(&vdev->priv_lock, flags);
146 return unlink; 148 return unlink;
147 } 149 }
148 150
149 spin_unlock(&vdev->priv_lock); 151 spin_unlock_irqrestore(&vdev->priv_lock, flags);
150 152
151 return NULL; 153 return NULL;
152} 154}
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index fe2b470d7ec6..c55c632a3b24 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -752,6 +752,62 @@ static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
752 return 0; 752 return 0;
753} 753}
754 754
755static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
756 int count, struct perm_bits *perm,
757 int offset, __le32 val)
758{
759 __le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
760 offset + PCI_EXP_DEVCTL);
761 int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
762
763 count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
764 if (count < 0)
765 return count;
766
767 /*
768 * The FLR bit is virtualized, if set and the device supports PCIe
769 * FLR, issue a reset_function. Regardless, clear the bit, the spec
770 * requires it to be always read as zero. NB, reset_function might
771 * not use a PCIe FLR, we don't have that level of granularity.
772 */
773 if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
774 u32 cap;
775 int ret;
776
777 *ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
778
779 ret = pci_user_read_config_dword(vdev->pdev,
780 pos - offset + PCI_EXP_DEVCAP,
781 &cap);
782
783 if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
784 pci_try_reset_function(vdev->pdev);
785 }
786
787 /*
788 * MPS is virtualized to the user, writes do not change the physical
789 * register since determining a proper MPS value requires a system wide
790 * device view. The MRRS is largely independent of MPS, but since the
791 * user does not have that system-wide view, they might set a safe, but
792 * inefficiently low value. Here we allow writes through to hardware,
793 * but we set the floor to the physical device MPS setting, so that
794 * we can at least use full TLPs, as defined by the MPS value.
795 *
796 * NB, if any devices actually depend on an artificially low MRRS
797 * setting, this will need to be revisited, perhaps with a quirk
798 * though pcie_set_readrq().
799 */
800 if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
801 readrq = 128 <<
802 ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
803 readrq = max(readrq, pcie_get_mps(vdev->pdev));
804
805 pcie_set_readrq(vdev->pdev, readrq);
806 }
807
808 return count;
809}
810
755/* Permissions for PCI Express capability */ 811/* Permissions for PCI Express capability */
756static int __init init_pci_cap_exp_perm(struct perm_bits *perm) 812static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
757{ 813{
@@ -759,26 +815,67 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
759 if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2)) 815 if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
760 return -ENOMEM; 816 return -ENOMEM;
761 817
818 perm->writefn = vfio_exp_config_write;
819
762 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); 820 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
763 821
764 /* 822 /*
765 * Allow writes to device control fields (includes FLR!) 823 * Allow writes to device control fields, except devctl_phantom,
766 * but not to devctl_phantom which could confuse IOMMU 824 * which could confuse IOMMU, MPS, which can break communication
767 * or to the ARI bit in devctl2 which is set at probe time 825 * with other physical devices, and the ARI bit in devctl2, which
826 * is set at probe time. FLR and MRRS get virtualized via our
827 * writefn.
768 */ 828 */
769 p_setw(perm, PCI_EXP_DEVCTL, NO_VIRT, ~PCI_EXP_DEVCTL_PHANTOM); 829 p_setw(perm, PCI_EXP_DEVCTL,
830 PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
831 PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
770 p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI); 832 p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
771 return 0; 833 return 0;
772} 834}
773 835
836static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
837 int count, struct perm_bits *perm,
838 int offset, __le32 val)
839{
840 u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
841
842 count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
843 if (count < 0)
844 return count;
845
846 /*
847 * The FLR bit is virtualized, if set and the device supports AF
848 * FLR, issue a reset_function. Regardless, clear the bit, the spec
849 * requires it to be always read as zero. NB, reset_function might
850 * not use an AF FLR, we don't have that level of granularity.
851 */
852 if (*ctrl & PCI_AF_CTRL_FLR) {
853 u8 cap;
854 int ret;
855
856 *ctrl &= ~PCI_AF_CTRL_FLR;
857
858 ret = pci_user_read_config_byte(vdev->pdev,
859 pos - offset + PCI_AF_CAP,
860 &cap);
861
862 if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
863 pci_try_reset_function(vdev->pdev);
864 }
865
866 return count;
867}
868
774/* Permissions for Advanced Function capability */ 869/* Permissions for Advanced Function capability */
775static int __init init_pci_cap_af_perm(struct perm_bits *perm) 870static int __init init_pci_cap_af_perm(struct perm_bits *perm)
776{ 871{
777 if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF])) 872 if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
778 return -ENOMEM; 873 return -ENOMEM;
779 874
875 perm->writefn = vfio_af_config_write;
876
780 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); 877 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
781 p_setb(perm, PCI_AF_CTRL, NO_VIRT, PCI_AF_CTRL_FLR); 878 p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
782 return 0; 879 return 0;
783} 880}
784 881
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9eda69e40678..645b2197930e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -955,7 +955,8 @@ err_used:
955 if (ubufs) 955 if (ubufs)
956 vhost_net_ubuf_put_wait_and_free(ubufs); 956 vhost_net_ubuf_put_wait_and_free(ubufs);
957err_ubufs: 957err_ubufs:
958 sockfd_put(sock); 958 if (sock)
959 sockfd_put(sock);
959err_vq: 960err_vq:
960 mutex_unlock(&vq->mutex); 961 mutex_unlock(&vq->mutex);
961err: 962err:
@@ -981,6 +982,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
981 } 982 }
982 vhost_net_stop(n, &tx_sock, &rx_sock); 983 vhost_net_stop(n, &tx_sock, &rx_sock);
983 vhost_net_flush(n); 984 vhost_net_flush(n);
985 vhost_dev_stop(&n->dev);
984 vhost_dev_reset_owner(&n->dev, memory); 986 vhost_dev_reset_owner(&n->dev, memory);
985 vhost_net_vq_reset(n); 987 vhost_net_vq_reset(n);
986done: 988done:
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ad2146a9ab2d..675819a1af37 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -173,8 +173,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
173 if (mask) 173 if (mask)
174 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); 174 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
175 if (mask & POLLERR) { 175 if (mask & POLLERR) {
176 if (poll->wqh) 176 vhost_poll_stop(poll);
177 remove_wait_queue(poll->wqh, &poll->wait);
178 ret = -EINVAL; 177 ret = -EINVAL;
179 } 178 }
180 179
diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
index 734a9158946b..e55304d5cf07 100644
--- a/drivers/video/backlight/as3711_bl.c
+++ b/drivers/video/backlight/as3711_bl.c
@@ -262,10 +262,10 @@ static int as3711_bl_register(struct platform_device *pdev,
262static int as3711_backlight_parse_dt(struct device *dev) 262static int as3711_backlight_parse_dt(struct device *dev)
263{ 263{
264 struct as3711_bl_pdata *pdata = dev_get_platdata(dev); 264 struct as3711_bl_pdata *pdata = dev_get_platdata(dev);
265 struct device_node *bl = 265 struct device_node *bl, *fb;
266 of_find_node_by_name(dev->parent->of_node, "backlight"), *fb;
267 int ret; 266 int ret;
268 267
268 bl = of_get_child_by_name(dev->parent->of_node, "backlight");
269 if (!bl) { 269 if (!bl) {
270 dev_dbg(dev, "backlight node not found\n"); 270 dev_dbg(dev, "backlight node not found\n");
271 return -ENODEV; 271 return -ENODEV;
@@ -279,7 +279,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
279 if (pdata->su1_max_uA <= 0) 279 if (pdata->su1_max_uA <= 0)
280 ret = -EINVAL; 280 ret = -EINVAL;
281 if (ret < 0) 281 if (ret < 0)
282 return ret; 282 goto err_put_bl;
283 } 283 }
284 284
285 fb = of_parse_phandle(bl, "su2-dev", 0); 285 fb = of_parse_phandle(bl, "su2-dev", 0);
@@ -292,7 +292,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
292 if (pdata->su2_max_uA <= 0) 292 if (pdata->su2_max_uA <= 0)
293 ret = -EINVAL; 293 ret = -EINVAL;
294 if (ret < 0) 294 if (ret < 0)
295 return ret; 295 goto err_put_bl;
296 296
297 if (of_find_property(bl, "su2-feedback-voltage", NULL)) { 297 if (of_find_property(bl, "su2-feedback-voltage", NULL)) {
298 pdata->su2_feedback = AS3711_SU2_VOLTAGE; 298 pdata->su2_feedback = AS3711_SU2_VOLTAGE;
@@ -314,8 +314,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
314 pdata->su2_feedback = AS3711_SU2_CURR_AUTO; 314 pdata->su2_feedback = AS3711_SU2_CURR_AUTO;
315 count++; 315 count++;
316 } 316 }
317 if (count != 1) 317 if (count != 1) {
318 return -EINVAL; 318 ret = -EINVAL;
319 goto err_put_bl;
320 }
319 321
320 count = 0; 322 count = 0;
321 if (of_find_property(bl, "su2-fbprot-lx-sd4", NULL)) { 323 if (of_find_property(bl, "su2-fbprot-lx-sd4", NULL)) {
@@ -334,8 +336,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
334 pdata->su2_fbprot = AS3711_SU2_GPIO4; 336 pdata->su2_fbprot = AS3711_SU2_GPIO4;
335 count++; 337 count++;
336 } 338 }
337 if (count != 1) 339 if (count != 1) {
338 return -EINVAL; 340 ret = -EINVAL;
341 goto err_put_bl;
342 }
339 343
340 count = 0; 344 count = 0;
341 if (of_find_property(bl, "su2-auto-curr1", NULL)) { 345 if (of_find_property(bl, "su2-auto-curr1", NULL)) {
@@ -355,11 +359,20 @@ static int as3711_backlight_parse_dt(struct device *dev)
355 * At least one su2-auto-curr* must be specified iff 359 * At least one su2-auto-curr* must be specified iff
356 * AS3711_SU2_CURR_AUTO is used 360 * AS3711_SU2_CURR_AUTO is used
357 */ 361 */
358 if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO)) 362 if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO)) {
359 return -EINVAL; 363 ret = -EINVAL;
364 goto err_put_bl;
365 }
360 } 366 }
361 367
368 of_node_put(bl);
369
362 return 0; 370 return 0;
371
372err_put_bl:
373 of_node_put(bl);
374
375 return ret;
363} 376}
364 377
365static int as3711_backlight_probe(struct platform_device *pdev) 378static int as3711_backlight_probe(struct platform_device *pdev)
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 7b738d60ecc2..f3aa6088f1d9 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -116,7 +116,7 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
116 if (!pdata) 116 if (!pdata)
117 return; 117 return;
118 118
119 np = of_find_node_by_name(nproot, "backlight"); 119 np = of_get_child_by_name(nproot, "backlight");
120 if (!np) { 120 if (!np) {
121 dev_err(&pdev->dev, "failed to find backlight node\n"); 121 dev_err(&pdev->dev, "failed to find backlight node\n");
122 return; 122 return;
@@ -125,6 +125,8 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
125 if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val)) 125 if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val))
126 pdata->dual_string = val; 126 pdata->dual_string = val;
127 127
128 of_node_put(np);
129
128 pdev->dev.platform_data = pdata; 130 pdev->dev.platform_data = pdata;
129} 131}
130 132
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index 61d72bffd402..dc920e2aa094 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -184,11 +184,11 @@ static struct tps65217_bl_pdata *
184tps65217_bl_parse_dt(struct platform_device *pdev) 184tps65217_bl_parse_dt(struct platform_device *pdev)
185{ 185{
186 struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent); 186 struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
187 struct device_node *node = of_node_get(tps->dev->of_node); 187 struct device_node *node;
188 struct tps65217_bl_pdata *pdata, *err; 188 struct tps65217_bl_pdata *pdata, *err;
189 u32 val; 189 u32 val;
190 190
191 node = of_find_node_by_name(node, "backlight"); 191 node = of_get_child_by_name(tps->dev->of_node, "backlight");
192 if (!node) 192 if (!node)
193 return ERR_PTR(-ENODEV); 193 return ERR_PTR(-ENODEV);
194 194
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index 0efc52f11ad0..b30e7d87804b 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -68,7 +68,6 @@ const struct consw dummy_con = {
68 .con_switch = DUMMY, 68 .con_switch = DUMMY,
69 .con_blank = DUMMY, 69 .con_blank = DUMMY,
70 .con_font_set = DUMMY, 70 .con_font_set = DUMMY,
71 .con_font_get = DUMMY,
72 .con_font_default = DUMMY, 71 .con_font_default = DUMMY,
73 .con_font_copy = DUMMY, 72 .con_font_copy = DUMMY,
74 .con_set_palette = DUMMY, 73 .con_set_palette = DUMMY,
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 517f565b65d7..598ec7545e84 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -409,7 +409,10 @@ static const char *vgacon_startup(void)
409 vga_video_port_val = VGA_CRT_DM; 409 vga_video_port_val = VGA_CRT_DM;
410 if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { 410 if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) {
411 static struct resource ega_console_resource = 411 static struct resource ega_console_resource =
412 { .name = "ega", .start = 0x3B0, .end = 0x3BF }; 412 { .name = "ega",
413 .flags = IORESOURCE_IO,
414 .start = 0x3B0,
415 .end = 0x3BF };
413 vga_video_type = VIDEO_TYPE_EGAM; 416 vga_video_type = VIDEO_TYPE_EGAM;
414 vga_vram_size = 0x8000; 417 vga_vram_size = 0x8000;
415 display_desc = "EGA+"; 418 display_desc = "EGA+";
@@ -417,9 +420,15 @@ static const char *vgacon_startup(void)
417 &ega_console_resource); 420 &ega_console_resource);
418 } else { 421 } else {
419 static struct resource mda1_console_resource = 422 static struct resource mda1_console_resource =
420 { .name = "mda", .start = 0x3B0, .end = 0x3BB }; 423 { .name = "mda",
424 .flags = IORESOURCE_IO,
425 .start = 0x3B0,
426 .end = 0x3BB };
421 static struct resource mda2_console_resource = 427 static struct resource mda2_console_resource =
422 { .name = "mda", .start = 0x3BF, .end = 0x3BF }; 428 { .name = "mda",
429 .flags = IORESOURCE_IO,
430 .start = 0x3BF,
431 .end = 0x3BF };
423 vga_video_type = VIDEO_TYPE_MDA; 432 vga_video_type = VIDEO_TYPE_MDA;
424 vga_vram_size = 0x2000; 433 vga_vram_size = 0x2000;
425 display_desc = "*MDA"; 434 display_desc = "*MDA";
@@ -441,15 +450,21 @@ static const char *vgacon_startup(void)
441 vga_vram_size = 0x8000; 450 vga_vram_size = 0x8000;
442 451
443 if (!screen_info.orig_video_isVGA) { 452 if (!screen_info.orig_video_isVGA) {
444 static struct resource ega_console_resource 453 static struct resource ega_console_resource =
445 = { .name = "ega", .start = 0x3C0, .end = 0x3DF }; 454 { .name = "ega",
455 .flags = IORESOURCE_IO,
456 .start = 0x3C0,
457 .end = 0x3DF };
446 vga_video_type = VIDEO_TYPE_EGAC; 458 vga_video_type = VIDEO_TYPE_EGAC;
447 display_desc = "EGA"; 459 display_desc = "EGA";
448 request_resource(&ioport_resource, 460 request_resource(&ioport_resource,
449 &ega_console_resource); 461 &ega_console_resource);
450 } else { 462 } else {
451 static struct resource vga_console_resource 463 static struct resource vga_console_resource =
452 = { .name = "vga+", .start = 0x3C0, .end = 0x3DF }; 464 { .name = "vga+",
465 .flags = IORESOURCE_IO,
466 .start = 0x3C0,
467 .end = 0x3DF };
453 vga_video_type = VIDEO_TYPE_VGAC; 468 vga_video_type = VIDEO_TYPE_VGAC;
454 display_desc = "VGA+"; 469 display_desc = "VGA+";
455 request_resource(&ioport_resource, 470 request_resource(&ioport_resource,
@@ -493,7 +508,10 @@ static const char *vgacon_startup(void)
493 } 508 }
494 } else { 509 } else {
495 static struct resource cga_console_resource = 510 static struct resource cga_console_resource =
496 { .name = "cga", .start = 0x3D4, .end = 0x3D5 }; 511 { .name = "cga",
512 .flags = IORESOURCE_IO,
513 .start = 0x3D4,
514 .end = 0x3D5 };
497 vga_video_type = VIDEO_TYPE_CGA; 515 vga_video_type = VIDEO_TYPE_CGA;
498 vga_vram_size = 0x2000; 516 vga_vram_size = 0x2000;
499 display_desc = "*CGA"; 517 display_desc = "*CGA";
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 9362424c2340..924b3d6c3e9b 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -759,8 +759,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb)
759 if (err) 759 if (err)
760 return err; 760 return err;
761 761
762 framesize = fb->panel->mode.xres * fb->panel->mode.yres * 762 framesize = PAGE_ALIGN(fb->panel->mode.xres * fb->panel->mode.yres *
763 fb->panel->bpp / 8; 763 fb->panel->bpp / 8);
764 fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize, 764 fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize,
765 &dma, GFP_KERNEL); 765 &dma, GFP_KERNEL);
766 if (!fb->fb.screen_base) 766 if (!fb->fb.screen_base)
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 19eb42b57d87..a6da82648c92 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1120,7 +1120,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
1120 goto put_display_node; 1120 goto put_display_node;
1121 } 1121 }
1122 1122
1123 timings_np = of_find_node_by_name(display_np, "display-timings"); 1123 timings_np = of_get_child_by_name(display_np, "display-timings");
1124 if (!timings_np) { 1124 if (!timings_np) {
1125 dev_err(dev, "failed to find display-timings node\n"); 1125 dev_err(dev, "failed to find display-timings node\n");
1126 ret = -ENODEV; 1126 ret = -ENODEV;
@@ -1141,6 +1141,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
1141 fb_add_videomode(&fb_vm, &info->modelist); 1141 fb_add_videomode(&fb_vm, &info->modelist);
1142 } 1142 }
1143 1143
1144 /*
1145 * FIXME: Make sure we are not referencing any fields in display_np
1146 * and timings_np and drop our references to them before returning to
1147 * avoid leaking the nodes on probe deferral and driver unbind.
1148 */
1149
1144 return 0; 1150 return 0;
1145 1151
1146put_timings_node: 1152put_timings_node:
diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c
index 95873f26e39c..de2f3e793786 100644
--- a/drivers/video/fbdev/exynos/s6e8ax0.c
+++ b/drivers/video/fbdev/exynos/s6e8ax0.c
@@ -829,8 +829,7 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
829 return 0; 829 return 0;
830} 830}
831 831
832#ifdef CONFIG_PM 832static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
833static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
834{ 833{
835 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); 834 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
836 835
@@ -843,7 +842,7 @@ static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
843 return 0; 842 return 0;
844} 843}
845 844
846static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev) 845static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
847{ 846{
848 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); 847 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
849 848
@@ -855,10 +854,6 @@ static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
855 854
856 return 0; 855 return 0;
857} 856}
858#else
859#define s6e8ax0_suspend NULL
860#define s6e8ax0_resume NULL
861#endif
862 857
863static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = { 858static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
864 .name = "s6e8ax0", 859 .name = "s6e8ax0",
@@ -867,8 +862,8 @@ static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
867 .power_on = s6e8ax0_power_on, 862 .power_on = s6e8ax0_power_on,
868 .set_sequence = s6e8ax0_set_sequence, 863 .set_sequence = s6e8ax0_set_sequence,
869 .probe = s6e8ax0_probe, 864 .probe = s6e8ax0_probe,
870 .suspend = s6e8ax0_suspend, 865 .suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL,
871 .resume = s6e8ax0_resume, 866 .resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL,
872}; 867};
873 868
874static int s6e8ax0_init(void) 869static int s6e8ax0_init(void)
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index bbec737eef30..bf207444ba0c 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -302,7 +302,7 @@ static __inline__ int get_opt_int(const char *this_opt, const char *name,
302} 302}
303 303
304static __inline__ int get_opt_bool(const char *this_opt, const char *name, 304static __inline__ int get_opt_bool(const char *this_opt, const char *name,
305 int *ret) 305 bool *ret)
306{ 306{
307 if (!ret) 307 if (!ret)
308 return 0; 308 return 0;
diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c
index a0f496049db7..3a6bb6561ba0 100644
--- a/drivers/video/fbdev/mmp/core.c
+++ b/drivers/video/fbdev/mmp/core.c
@@ -23,6 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/export.h> 25#include <linux/export.h>
26#include <linux/module.h>
26#include <video/mmp_disp.h> 27#include <video/mmp_disp.h>
27 28
28static struct mmp_overlay *path_get_overlay(struct mmp_path *path, 29static struct mmp_overlay *path_get_overlay(struct mmp_path *path,
@@ -249,3 +250,7 @@ void mmp_unregister_path(struct mmp_path *path)
249 mutex_unlock(&disp_lock); 250 mutex_unlock(&disp_lock);
250} 251}
251EXPORT_SYMBOL_GPL(mmp_unregister_path); 252EXPORT_SYMBOL_GPL(mmp_unregister_path);
253
254MODULE_AUTHOR("Zhou Zhu <zzhu3@marvell.com>");
255MODULE_DESCRIPTION("Marvell MMP display framework");
256MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index a350209ffbd3..31c301d6be62 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -121,7 +121,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
121 unsigned char __user *ured; 121 unsigned char __user *ured;
122 unsigned char __user *ugreen; 122 unsigned char __user *ugreen;
123 unsigned char __user *ublue; 123 unsigned char __user *ublue;
124 int index, count, i; 124 unsigned int index, count, i;
125 125
126 if (get_user(index, &c->index) || 126 if (get_user(index, &c->index) ||
127 __get_user(count, &c->count) || 127 __get_user(count, &c->count) ||
@@ -160,7 +160,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
160 unsigned char __user *ugreen; 160 unsigned char __user *ugreen;
161 unsigned char __user *ublue; 161 unsigned char __user *ublue;
162 struct fb_cmap *cmap = &info->cmap; 162 struct fb_cmap *cmap = &info->cmap;
163 int index, count, i; 163 unsigned int index, count, i;
164 u8 red, green, blue; 164 u8 red, green, blue;
165 165
166 if (get_user(index, &c->index) || 166 if (get_user(index, &c->index) ||
diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
index 295e0dedaf1f..20f7234e809e 100644
--- a/drivers/video/fbdev/sis/init301.c
+++ b/drivers/video/fbdev/sis/init301.c
@@ -2151,17 +2151,15 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor
2151 unsigned short RefreshRateTableIndex) 2151 unsigned short RefreshRateTableIndex)
2152{ 2152{
2153 unsigned short CRT2Index, VCLKIndex = 0, VCLKIndexGEN = 0, VCLKIndexGENCRT = 0; 2153 unsigned short CRT2Index, VCLKIndex = 0, VCLKIndexGEN = 0, VCLKIndexGENCRT = 0;
2154 unsigned short modeflag, resinfo, tempbx; 2154 unsigned short resinfo, tempbx;
2155 const unsigned char *CHTVVCLKPtr = NULL; 2155 const unsigned char *CHTVVCLKPtr = NULL;
2156 2156
2157 if(ModeNo <= 0x13) { 2157 if(ModeNo <= 0x13) {
2158 modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag;
2159 resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo; 2158 resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo;
2160 CRT2Index = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC; 2159 CRT2Index = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC;
2161 VCLKIndexGEN = (SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02)) >> 2) & 0x03; 2160 VCLKIndexGEN = (SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02)) >> 2) & 0x03;
2162 VCLKIndexGENCRT = VCLKIndexGEN; 2161 VCLKIndexGENCRT = VCLKIndexGEN;
2163 } else { 2162 } else {
2164 modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
2165 resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; 2163 resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO;
2166 CRT2Index = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC; 2164 CRT2Index = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC;
2167 VCLKIndexGEN = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK; 2165 VCLKIndexGEN = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
@@ -7270,7 +7268,7 @@ SiS_ShiftXPos(struct SiS_Private *SiS_Pr, int shift)
7270static void 7268static void
7271SiS_SetGroup4_C_ELV(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) 7269SiS_SetGroup4_C_ELV(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex)
7272{ 7270{
7273 unsigned short temp, temp1, resinfo = 0; 7271 unsigned short temp, temp1;
7274 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; 7272 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
7275 7273
7276 if(!(SiS_Pr->SiS_VBType & VB_SIS30xCLV)) return; 7274 if(!(SiS_Pr->SiS_VBType & VB_SIS30xCLV)) return;
@@ -7282,10 +7280,6 @@ SiS_SetGroup4_C_ELV(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
7282 if(!(ROMAddr[0x61] & 0x04)) return; 7280 if(!(ROMAddr[0x61] & 0x04)) return;
7283 } 7281 }
7284 7282
7285 if(ModeNo > 0x13) {
7286 resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO;
7287 }
7288
7289 SiS_SetRegOR(SiS_Pr->SiS_Part4Port,0x3a,0x08); 7283 SiS_SetRegOR(SiS_Pr->SiS_Part4Port,0x3a,0x08);
7290 temp = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x3a); 7284 temp = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x3a);
7291 if(!(temp & 0x01)) { 7285 if(!(temp & 0x01)) {
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index d0a4e2f79a57..d215faacce04 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1600,6 +1600,7 @@ static int sm501fb_start(struct sm501fb_info *info,
1600 info->fbmem = ioremap(res->start, resource_size(res)); 1600 info->fbmem = ioremap(res->start, resource_size(res));
1601 if (info->fbmem == NULL) { 1601 if (info->fbmem == NULL) {
1602 dev_err(dev, "cannot remap framebuffer\n"); 1602 dev_err(dev, "cannot remap framebuffer\n");
1603 ret = -ENXIO;
1603 goto err_mem_res; 1604 goto err_mem_res;
1604 } 1605 }
1605 1606
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 53326badfb61..2add8def83be 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1487,15 +1487,25 @@ static struct device_attribute fb_device_attrs[] = {
1487static int dlfb_select_std_channel(struct dlfb_data *dev) 1487static int dlfb_select_std_channel(struct dlfb_data *dev)
1488{ 1488{
1489 int ret; 1489 int ret;
1490 u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7, 1490 void *buf;
1491 static const u8 set_def_chn[] = {
1492 0x57, 0xCD, 0xDC, 0xA7,
1491 0x1C, 0x88, 0x5E, 0x15, 1493 0x1C, 0x88, 0x5E, 0x15,
1492 0x60, 0xFE, 0xC6, 0x97, 1494 0x60, 0xFE, 0xC6, 0x97,
1493 0x16, 0x3D, 0x47, 0xF2 }; 1495 0x16, 0x3D, 0x47, 0xF2 };
1494 1496
1497 buf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
1498
1499 if (!buf)
1500 return -ENOMEM;
1501
1495 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 1502 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
1496 NR_USB_REQUEST_CHANNEL, 1503 NR_USB_REQUEST_CHANNEL,
1497 (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, 1504 (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
1498 set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); 1505 buf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
1506
1507 kfree(buf);
1508
1499 return ret; 1509 return ret;
1500} 1510}
1501 1511
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 178ae93b7ebd..381236ff34d9 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1059,7 +1059,8 @@ static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
1059 info->cmap.len || cmap->start < info->cmap.start) 1059 info->cmap.len || cmap->start < info->cmap.start)
1060 return -EINVAL; 1060 return -EINVAL;
1061 1061
1062 entries = kmalloc(sizeof(*entries) * cmap->len, GFP_KERNEL); 1062 entries = kmalloc_array(cmap->len, sizeof(*entries),
1063 GFP_KERNEL);
1063 if (!entries) 1064 if (!entries)
1064 return -ENOMEM; 1065 return -ENOMEM;
1065 1066
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index b9c2f81fb6b9..556c39997aab 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -291,8 +291,23 @@ static int vfb_check_var(struct fb_var_screeninfo *var,
291 */ 291 */
292static int vfb_set_par(struct fb_info *info) 292static int vfb_set_par(struct fb_info *info)
293{ 293{
294 switch (info->var.bits_per_pixel) {
295 case 1:
296 info->fix.visual = FB_VISUAL_MONO01;
297 break;
298 case 8:
299 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
300 break;
301 case 16:
302 case 24:
303 case 32:
304 info->fix.visual = FB_VISUAL_TRUECOLOR;
305 break;
306 }
307
294 info->fix.line_length = get_line_length(info->var.xres_virtual, 308 info->fix.line_length = get_line_length(info->var.xres_virtual,
295 info->var.bits_per_pixel); 309 info->var.bits_per_pixel);
310
296 return 0; 311 return 0;
297} 312}
298 313
@@ -525,6 +540,8 @@ static int vfb_probe(struct platform_device *dev)
525 goto err2; 540 goto err2;
526 platform_set_drvdata(dev, info); 541 platform_set_drvdata(dev, info);
527 542
543 vfb_set_par(info);
544
528 fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n", 545 fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n",
529 videomemorysize >> 10); 546 videomemorysize >> 10);
530 return 0; 547 return 0;
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index f9718f012aae..badee04ef496 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1630,16 +1630,14 @@ static void viafb_init_proc(struct viafb_shared *shared)
1630} 1630}
1631static void viafb_remove_proc(struct viafb_shared *shared) 1631static void viafb_remove_proc(struct viafb_shared *shared)
1632{ 1632{
1633 struct proc_dir_entry *viafb_entry = shared->proc_entry, 1633 struct proc_dir_entry *viafb_entry = shared->proc_entry;
1634 *iga1_entry = shared->iga1_proc_entry,
1635 *iga2_entry = shared->iga2_proc_entry;
1636 1634
1637 if (!viafb_entry) 1635 if (!viafb_entry)
1638 return; 1636 return;
1639 1637
1640 remove_proc_entry("output_devices", iga2_entry); 1638 remove_proc_entry("output_devices", shared->iga2_proc_entry);
1641 remove_proc_entry("iga2", viafb_entry); 1639 remove_proc_entry("iga2", viafb_entry);
1642 remove_proc_entry("output_devices", iga1_entry); 1640 remove_proc_entry("output_devices", shared->iga1_proc_entry);
1643 remove_proc_entry("iga1", viafb_entry); 1641 remove_proc_entry("iga1", viafb_entry);
1644 remove_proc_entry("supported_output_devices", viafb_entry); 1642 remove_proc_entry("supported_output_devices", viafb_entry);
1645 1643
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 162689227a23..b73520aaf697 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -321,6 +321,17 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
321} 321}
322EXPORT_SYMBOL(hdmi_vendor_infoframe_init); 322EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
323 323
324static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *frame)
325{
326 /* for side by side (half) we also need to provide 3D_Ext_Data */
327 if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
328 return 6;
329 else if (frame->vic != 0 || frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
330 return 5;
331 else
332 return 4;
333}
334
324/** 335/**
325 * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer 336 * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
326 * @frame: HDMI infoframe 337 * @frame: HDMI infoframe
@@ -341,19 +352,11 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
341 u8 *ptr = buffer; 352 u8 *ptr = buffer;
342 size_t length; 353 size_t length;
343 354
344 /* empty info frame */
345 if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
346 return -EINVAL;
347
348 /* only one of those can be supplied */ 355 /* only one of those can be supplied */
349 if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) 356 if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
350 return -EINVAL; 357 return -EINVAL;
351 358
352 /* for side by side (half) we also need to provide 3D_Ext_Data */ 359 frame->length = hdmi_vendor_infoframe_length(frame);
353 if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
354 frame->length = 6;
355 else
356 frame->length = 5;
357 360
358 length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; 361 length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
359 362
@@ -372,14 +375,16 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
372 ptr[5] = 0x0c; 375 ptr[5] = 0x0c;
373 ptr[6] = 0x00; 376 ptr[6] = 0x00;
374 377
375 if (frame->vic) { 378 if (frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) {
376 ptr[7] = 0x1 << 5; /* video format */
377 ptr[8] = frame->vic;
378 } else {
379 ptr[7] = 0x2 << 5; /* video format */ 379 ptr[7] = 0x2 << 5; /* video format */
380 ptr[8] = (frame->s3d_struct & 0xf) << 4; 380 ptr[8] = (frame->s3d_struct & 0xf) << 4;
381 if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) 381 if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
382 ptr[9] = (frame->s3d_ext_data & 0xf) << 4; 382 ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
383 } else if (frame->vic) {
384 ptr[7] = 0x1 << 5; /* video format */
385 ptr[8] = frame->vic;
386 } else {
387 ptr[7] = 0x0 << 5; /* video format */
383 } 388 }
384 389
385 hdmi_infoframe_set_checksum(buffer, length); 390 hdmi_infoframe_set_checksum(buffer, length);
@@ -1161,7 +1166,7 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
1161 1166
1162 if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR || 1167 if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR ||
1163 ptr[1] != 1 || 1168 ptr[1] != 1 ||
1164 (ptr[2] != 5 && ptr[2] != 6)) 1169 (ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6))
1165 return -EINVAL; 1170 return -EINVAL;
1166 1171
1167 length = ptr[2]; 1172 length = ptr[2];
@@ -1189,16 +1194,22 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
1189 1194
1190 hvf->length = length; 1195 hvf->length = length;
1191 1196
1192 if (hdmi_video_format == 0x1) { 1197 if (hdmi_video_format == 0x2) {
1193 hvf->vic = ptr[4]; 1198 if (length != 5 && length != 6)
1194 } else if (hdmi_video_format == 0x2) { 1199 return -EINVAL;
1195 hvf->s3d_struct = ptr[4] >> 4; 1200 hvf->s3d_struct = ptr[4] >> 4;
1196 if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) { 1201 if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) {
1197 if (length == 6) 1202 if (length != 6)
1198 hvf->s3d_ext_data = ptr[5] >> 4;
1199 else
1200 return -EINVAL; 1203 return -EINVAL;
1204 hvf->s3d_ext_data = ptr[5] >> 4;
1201 } 1205 }
1206 } else if (hdmi_video_format == 0x1) {
1207 if (length != 5)
1208 return -EINVAL;
1209 hvf->vic = ptr[4];
1210 } else {
1211 if (length != 4)
1212 return -EINVAL;
1202 } 1213 }
1203 1214
1204 return 0; 1215 return 0;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0c5533813cde..e3ac3e157485 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -239,12 +239,14 @@ static void update_balloon_stats(struct virtio_balloon *vb)
239 all_vm_events(events); 239 all_vm_events(events);
240 si_meminfo(&i); 240 si_meminfo(&i);
241 241
242#ifdef CONFIG_VM_EVENT_COUNTERS
242 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, 243 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
243 pages_to_bytes(events[PSWPIN])); 244 pages_to_bytes(events[PSWPIN]));
244 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, 245 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
245 pages_to_bytes(events[PSWPOUT])); 246 pages_to_bytes(events[PSWPOUT]));
246 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); 247 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
247 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); 248 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
249#endif
248 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, 250 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
249 pages_to_bytes(i.freeram)); 251 pages_to_bytes(i.freeram));
250 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, 252 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
@@ -477,7 +479,9 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
477 tell_host(vb, vb->inflate_vq); 479 tell_host(vb, vb->inflate_vq);
478 480
479 /* balloon's page migration 2nd step -- deflate "page" */ 481 /* balloon's page migration 2nd step -- deflate "page" */
482 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
480 balloon_page_delete(page); 483 balloon_page_delete(page);
484 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
481 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; 485 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
482 set_page_pfns(vb, vb->pfns, page); 486 set_page_pfns(vb, vb->pfns, page);
483 tell_host(vb, vb->deflate_vq); 487 tell_host(vb, vb->deflate_vq);
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index a4621757a47f..dacb5919970c 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -113,6 +113,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
113 if (IS_ERR(mdev->clk)) 113 if (IS_ERR(mdev->clk))
114 return PTR_ERR(mdev->clk); 114 return PTR_ERR(mdev->clk);
115 115
116 err = clk_prepare_enable(mdev->clk);
117 if (err)
118 return err;
119
116 clkrate = clk_get_rate(mdev->clk); 120 clkrate = clk_get_rate(mdev->clk);
117 if (clkrate < 10000000) 121 if (clkrate < 10000000)
118 dev_warn(&pdev->dev, 122 dev_warn(&pdev->dev,
@@ -126,12 +130,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
126 130
127 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 131 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
128 mdev->regs = devm_ioremap_resource(&pdev->dev, res); 132 mdev->regs = devm_ioremap_resource(&pdev->dev, res);
129 if (IS_ERR(mdev->regs)) 133 if (IS_ERR(mdev->regs)) {
130 return PTR_ERR(mdev->regs); 134 err = PTR_ERR(mdev->regs);
131 135 goto out_disable_clk;
132 err = clk_prepare_enable(mdev->clk); 136 }
133 if (err)
134 return err;
135 137
136 /* Software reset 1-Wire module */ 138 /* Software reset 1-Wire module */
137 writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET); 139 writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
@@ -147,8 +149,12 @@ static int mxc_w1_probe(struct platform_device *pdev)
147 149
148 err = w1_add_master_device(&mdev->bus_master); 150 err = w1_add_master_device(&mdev->bus_master);
149 if (err) 151 if (err)
150 clk_disable_unprepare(mdev->clk); 152 goto out_disable_clk;
151 153
154 return 0;
155
156out_disable_clk:
157 clk_disable_unprepare(mdev->clk);
152 return err; 158 return err;
153} 159}
154 160
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 39886edfa222..88c1b8c01473 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -741,7 +741,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
741 741
742 /* slave modules need to be loaded in a context with unlocked mutex */ 742 /* slave modules need to be loaded in a context with unlocked mutex */
743 mutex_unlock(&dev->mutex); 743 mutex_unlock(&dev->mutex);
744 request_module("w1-family-0x%02x", rn->family); 744 request_module("w1-family-0x%02X", rn->family);
745 mutex_lock(&dev->mutex); 745 mutex_lock(&dev->mutex);
746 746
747 spin_lock(&w1_flock); 747 spin_lock(&w1_flock);
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 016bd9355190..2048aad91add 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -450,7 +450,7 @@ static bool watchdog_is_running(void)
450 450
451 is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0)) 451 is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
452 && (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF) 452 && (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
453 & F71808FG_FLAG_WD_EN); 453 & BIT(F71808FG_FLAG_WD_EN));
454 454
455 superio_exit(watchdog.sioaddr); 455 superio_exit(watchdog.sioaddr);
456 456
@@ -520,7 +520,8 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf,
520 char c; 520 char c;
521 if (get_user(c, buf + i)) 521 if (get_user(c, buf + i))
522 return -EFAULT; 522 return -EFAULT;
523 expect_close = (c == 'V'); 523 if (c == 'V')
524 expect_close = true;
524 } 525 }
525 526
526 /* Properly order writes across fork()ed processes */ 527 /* Properly order writes across fork()ed processes */
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 286369d4f0f5..be99112fad00 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -51,6 +51,7 @@ static char expect_release;
51static unsigned long hpwdt_is_open; 51static unsigned long hpwdt_is_open;
52 52
53static void __iomem *pci_mem_addr; /* the PCI-memory address */ 53static void __iomem *pci_mem_addr; /* the PCI-memory address */
54static unsigned long __iomem *hpwdt_nmistat;
54static unsigned long __iomem *hpwdt_timer_reg; 55static unsigned long __iomem *hpwdt_timer_reg;
55static unsigned long __iomem *hpwdt_timer_con; 56static unsigned long __iomem *hpwdt_timer_con;
56 57
@@ -474,6 +475,11 @@ static int hpwdt_time_left(void)
474} 475}
475 476
476#ifdef CONFIG_HPWDT_NMI_DECODING 477#ifdef CONFIG_HPWDT_NMI_DECODING
478static int hpwdt_my_nmi(void)
479{
480 return ioread8(hpwdt_nmistat) & 0x6;
481}
482
477/* 483/*
478 * NMI Handler 484 * NMI Handler
479 */ 485 */
@@ -485,6 +491,9 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
485 if (!hpwdt_nmi_decoding) 491 if (!hpwdt_nmi_decoding)
486 goto out; 492 goto out;
487 493
494 if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
495 return NMI_DONE;
496
488 spin_lock_irqsave(&rom_lock, rom_pl); 497 spin_lock_irqsave(&rom_lock, rom_pl);
489 if (!die_nmi_called && !is_icru && !is_uefi) 498 if (!die_nmi_called && !is_icru && !is_uefi)
490 asminline_call(&cmn_regs, cru_rom_addr); 499 asminline_call(&cmn_regs, cru_rom_addr);
@@ -700,7 +709,7 @@ static void dmi_find_icru(const struct dmi_header *dm, void *dummy)
700 smbios_proliant_ptr = (struct smbios_proliant_info *) dm; 709 smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
701 if (smbios_proliant_ptr->misc_features & 0x01) 710 if (smbios_proliant_ptr->misc_features & 0x01)
702 is_icru = 1; 711 is_icru = 1;
703 if (smbios_proliant_ptr->misc_features & 0x408) 712 if (smbios_proliant_ptr->misc_features & 0x1400)
704 is_uefi = 1; 713 is_uefi = 1;
705 } 714 }
706} 715}
@@ -840,6 +849,7 @@ static int hpwdt_init_one(struct pci_dev *dev,
840 retval = -ENOMEM; 849 retval = -ENOMEM;
841 goto error_pci_iomap; 850 goto error_pci_iomap;
842 } 851 }
852 hpwdt_nmistat = pci_mem_addr + 0x6e;
843 hpwdt_timer_reg = pci_mem_addr + 0x70; 853 hpwdt_timer_reg = pci_mem_addr + 0x70;
844 hpwdt_timer_con = pci_mem_addr + 0x72; 854 hpwdt_timer_con = pci_mem_addr + 0x72;
845 855
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 29ef719a6a3c..d69ab1e28d7d 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -161,15 +161,21 @@ static void imx2_wdt_timer_ping(unsigned long arg)
161 mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2); 161 mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2);
162} 162}
163 163
164static int imx2_wdt_set_timeout(struct watchdog_device *wdog, 164static void __imx2_wdt_set_timeout(struct watchdog_device *wdog,
165 unsigned int new_timeout) 165 unsigned int new_timeout)
166{ 166{
167 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); 167 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
168 168
169 wdog->timeout = new_timeout;
170
171 regmap_update_bits(wdev->regmap, IMX2_WDT_WCR, IMX2_WDT_WCR_WT, 169 regmap_update_bits(wdev->regmap, IMX2_WDT_WCR, IMX2_WDT_WCR_WT,
172 WDOG_SEC_TO_COUNT(new_timeout)); 170 WDOG_SEC_TO_COUNT(new_timeout));
171}
172
173static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
174 unsigned int new_timeout)
175{
176 __imx2_wdt_set_timeout(wdog, new_timeout);
177
178 wdog->timeout = new_timeout;
173 return 0; 179 return 0;
174} 180}
175 181
@@ -353,7 +359,11 @@ static int imx2_wdt_suspend(struct device *dev)
353 359
354 /* The watchdog IP block is running */ 360 /* The watchdog IP block is running */
355 if (imx2_wdt_is_running(wdev)) { 361 if (imx2_wdt_is_running(wdev)) {
356 imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); 362 /*
363 * Don't update wdog->timeout, we'll restore the current value
364 * during resume.
365 */
366 __imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
357 imx2_wdt_ping(wdog); 367 imx2_wdt_ping(wdog);
358 368
359 /* The watchdog is not active */ 369 /* The watchdog is not active */
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
index 2b28c00da0df..dfe20b81ced5 100644
--- a/drivers/watchdog/sp5100_tco.h
+++ b/drivers/watchdog/sp5100_tco.h
@@ -54,7 +54,7 @@
54#define SB800_PM_WATCHDOG_CONFIG 0x4C 54#define SB800_PM_WATCHDOG_CONFIG 0x4C
55 55
56#define SB800_PCI_WATCHDOG_DECODE_EN (1 << 0) 56#define SB800_PCI_WATCHDOG_DECODE_EN (1 << 0)
57#define SB800_PM_WATCHDOG_DISABLE (1 << 2) 57#define SB800_PM_WATCHDOG_DISABLE (1 << 1)
58#define SB800_PM_WATCHDOG_SECOND_RES (3 << 0) 58#define SB800_PM_WATCHDOG_SECOND_RES (3 << 0)
59#define SB800_ACPI_MMIO_DECODE_EN (1 << 0) 59#define SB800_ACPI_MMIO_DECODE_EN (1 << 0)
60#define SB800_ACPI_MMIO_SEL (1 << 1) 60#define SB800_ACPI_MMIO_SEL (1 << 1)
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 73708acce3ca..3a14948269b1 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -239,7 +239,7 @@ config XEN_ACPI_HOTPLUG_CPU
239 239
240config XEN_ACPI_PROCESSOR 240config XEN_ACPI_PROCESSOR
241 tristate "Xen ACPI processor" 241 tristate "Xen ACPI processor"
242 depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ 242 depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
243 default m 243 default m
244 help 244 help
245 This ACPI processor uploads Power Management information to the Xen 245 This ACPI processor uploads Power Management information to the Xen
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 83ec7b89d308..21d679f88dfa 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -637,8 +637,6 @@ static void __unbind_from_irq(unsigned int irq)
637 xen_irq_info_cleanup(info); 637 xen_irq_info_cleanup(info);
638 } 638 }
639 639
640 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
641
642 xen_free_irq(irq); 640 xen_free_irq(irq);
643} 641}
644 642
@@ -764,8 +762,8 @@ out:
764 mutex_unlock(&irq_mapping_update_lock); 762 mutex_unlock(&irq_mapping_update_lock);
765 return irq; 763 return irq;
766error_irq: 764error_irq:
767 for (; i >= 0; i--) 765 while (nvec--)
768 __unbind_from_irq(irq + i); 766 __unbind_from_irq(irq + nvec);
769 mutex_unlock(&irq_mapping_update_lock); 767 mutex_unlock(&irq_mapping_update_lock);
770 return ret; 768 return ret;
771} 769}
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index a4d749665c9f..1865bcfa869b 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -378,10 +378,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
378 } 378 }
379 range = 0; 379 range = 0;
380 while (range < pages) { 380 while (range < pages) {
381 if (map->unmap_ops[offset+range].handle == -1) { 381 if (map->unmap_ops[offset+range].handle == -1)
382 range--;
383 break; 382 break;
384 }
385 range++; 383 range++;
386 } 384 }
387 err = __unmap_grant_pages(map, offset, range); 385 err = __unmap_grant_pages(map, offset, range);
@@ -876,8 +874,10 @@ unlock_out:
876out_unlock_put: 874out_unlock_put:
877 mutex_unlock(&priv->lock); 875 mutex_unlock(&priv->lock);
878out_put_map: 876out_put_map:
879 if (use_ptemod) 877 if (use_ptemod) {
880 map->vma = NULL; 878 map->vma = NULL;
879 unmap_grant_pages(map, 0, map->count);
880 }
881 gntdev_put_map(priv, map); 881 gntdev_put_map(priv, map);
882 return err; 882 return err;
883} 883}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index c49f79ed58c5..4b7ce442d8e5 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -328,7 +328,7 @@ static void gnttab_handle_deferred(unsigned long unused)
328 if (entry->page) { 328 if (entry->page) {
329 pr_debug("freeing g.e. %#x (pfn %#lx)\n", 329 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
330 entry->ref, page_to_pfn(entry->page)); 330 entry->ref, page_to_pfn(entry->page));
331 __free_page(entry->page); 331 put_page(entry->page);
332 } else 332 } else
333 pr_info("freeing g.e. %#x\n", entry->ref); 333 pr_info("freeing g.e. %#x\n", entry->ref);
334 kfree(entry); 334 kfree(entry);
@@ -384,7 +384,7 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
384 if (gnttab_end_foreign_access_ref(ref, readonly)) { 384 if (gnttab_end_foreign_access_ref(ref, readonly)) {
385 put_free_entry(ref); 385 put_free_entry(ref);
386 if (page != 0) 386 if (page != 0)
387 free_page(page); 387 put_page(virt_to_page(page));
388 } else 388 } else
389 gnttab_add_deferred(ref, readonly, 389 gnttab_add_deferred(ref, readonly,
390 page ? virt_to_page(page) : NULL); 390 page ? virt_to_page(page) : NULL);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index f7b19c25c3a4..1889e928a0da 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -359,7 +359,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
359 * physical address */ 359 * physical address */
360 phys = xen_bus_to_phys(dev_addr); 360 phys = xen_bus_to_phys(dev_addr);
361 361
362 if (((dev_addr + size - 1 > dma_mask)) || 362 if (((dev_addr + size - 1 <= dma_mask)) ||
363 range_straddles_page_boundary(phys, size)) 363 range_straddles_page_boundary(phys, size))
364 xen_destroy_contiguous_region(phys, order); 364 xen_destroy_contiguous_region(phys, order);
365 365
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 2e319d0c395d..84cc98f3cabe 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -362,9 +362,9 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
362 } 362 }
363 /* There are more ACPI Processor objects than in x2APIC or MADT. 363 /* There are more ACPI Processor objects than in x2APIC or MADT.
364 * This can happen with incorrect ACPI SSDT declerations. */ 364 * This can happen with incorrect ACPI SSDT declerations. */
365 if (acpi_id > nr_acpi_bits) { 365 if (acpi_id >= nr_acpi_bits) {
366 pr_debug("We only have %u, trying to set %u\n", 366 pr_debug("max acpi id %u, trying to set %u\n",
367 nr_acpi_bits, acpi_id); 367 nr_acpi_bits - 1, acpi_id);
368 return AE_OK; 368 return AE_OK;
369 } 369 }
370 /* OK, There is a ACPI Processor object */ 370 /* OK, There is a ACPI Processor object */
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 33a31cfef55d..c2d447687e33 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -470,8 +470,11 @@ int xenbus_probe_node(struct xen_bus_type *bus,
470 470
471 /* Register with generic device framework. */ 471 /* Register with generic device framework. */
472 err = device_register(&xendev->dev); 472 err = device_register(&xendev->dev);
473 if (err) 473 if (err) {
474 put_device(&xendev->dev);
475 xendev = NULL;
474 goto fail; 476 goto fail;
477 }
475 478
476 return 0; 479 return 0;
477fail: 480fail:
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index d295d9878dff..8ec79385d3cc 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -16,6 +16,7 @@
16#include <linux/bitops.h> 16#include <linux/bitops.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20 21
21#include <asm/byteorder.h> 22#include <asm/byteorder.h>
@@ -185,6 +186,17 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
185 z->dev.parent = &bus->dev; 186 z->dev.parent = &bus->dev;
186 z->dev.bus = &zorro_bus_type; 187 z->dev.bus = &zorro_bus_type;
187 z->dev.id = i; 188 z->dev.id = i;
189 switch (z->rom.er_Type & ERT_TYPEMASK) {
190 case ERT_ZORROIII:
191 z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
192 break;
193
194 case ERT_ZORROII:
195 default:
196 z->dev.coherent_dma_mask = DMA_BIT_MASK(24);
197 break;
198 }
199 z->dev.dma_mask = &z->dev.coherent_dma_mask;
188 } 200 }
189 201
190 /* ... then register them */ 202 /* ... then register them */
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 181e05b46e72..92448d0ad900 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -224,9 +224,10 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
224 224
225 affs_lock_dir(dir); 225 affs_lock_dir(dir);
226 bh = affs_find_entry(dir, dentry); 226 bh = affs_find_entry(dir, dentry);
227 affs_unlock_dir(dir); 227 if (IS_ERR(bh)) {
228 if (IS_ERR(bh)) 228 affs_unlock_dir(dir);
229 return ERR_CAST(bh); 229 return ERR_CAST(bh);
230 }
230 if (bh) { 231 if (bh) {
231 u32 ino = bh->b_blocknr; 232 u32 ino = bh->b_blocknr;
232 233
@@ -240,10 +241,13 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
240 } 241 }
241 affs_brelse(bh); 242 affs_brelse(bh);
242 inode = affs_iget(sb, ino); 243 inode = affs_iget(sb, ino);
243 if (IS_ERR(inode)) 244 if (IS_ERR(inode)) {
245 affs_unlock_dir(dir);
244 return ERR_CAST(inode); 246 return ERR_CAST(inode);
247 }
245 } 248 }
246 d_add(dentry, inode); 249 d_add(dentry, inode);
250 affs_unlock_dir(dir);
247 return NULL; 251 return NULL;
248} 252}
249 253
diff --git a/fs/aio.c b/fs/aio.c
index fe4f49212b99..c283eb03cb38 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -68,9 +68,9 @@ struct aio_ring {
68#define AIO_RING_PAGES 8 68#define AIO_RING_PAGES 8
69 69
70struct kioctx_table { 70struct kioctx_table {
71 struct rcu_head rcu; 71 struct rcu_head rcu;
72 unsigned nr; 72 unsigned nr;
73 struct kioctx *table[]; 73 struct kioctx __rcu *table[];
74}; 74};
75 75
76struct kioctx_cpu { 76struct kioctx_cpu {
@@ -115,7 +115,8 @@ struct kioctx {
115 struct page **ring_pages; 115 struct page **ring_pages;
116 long nr_pages; 116 long nr_pages;
117 117
118 struct work_struct free_work; 118 struct rcu_head free_rcu;
119 struct work_struct free_work; /* see free_ioctx() */
119 120
120 /* 121 /*
121 * signals when all in-flight requests are done 122 * signals when all in-flight requests are done
@@ -326,7 +327,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
326 for (i = 0; i < table->nr; i++) { 327 for (i = 0; i < table->nr; i++) {
327 struct kioctx *ctx; 328 struct kioctx *ctx;
328 329
329 ctx = table->table[i]; 330 ctx = rcu_dereference(table->table[i]);
330 if (ctx && ctx->aio_ring_file == file) { 331 if (ctx && ctx->aio_ring_file == file) {
331 if (!atomic_read(&ctx->dead)) { 332 if (!atomic_read(&ctx->dead)) {
332 ctx->user_id = ctx->mmap_base = vma->vm_start; 333 ctx->user_id = ctx->mmap_base = vma->vm_start;
@@ -573,6 +574,12 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
573 return cancel(&kiocb->common); 574 return cancel(&kiocb->common);
574} 575}
575 576
577/*
578 * free_ioctx() should be RCU delayed to synchronize against the RCU
579 * protected lookup_ioctx() and also needs process context to call
580 * aio_free_ring(), so the double bouncing through kioctx->free_rcu and
581 * ->free_work.
582 */
576static void free_ioctx(struct work_struct *work) 583static void free_ioctx(struct work_struct *work)
577{ 584{
578 struct kioctx *ctx = container_of(work, struct kioctx, free_work); 585 struct kioctx *ctx = container_of(work, struct kioctx, free_work);
@@ -586,6 +593,14 @@ static void free_ioctx(struct work_struct *work)
586 kmem_cache_free(kioctx_cachep, ctx); 593 kmem_cache_free(kioctx_cachep, ctx);
587} 594}
588 595
596static void free_ioctx_rcufn(struct rcu_head *head)
597{
598 struct kioctx *ctx = container_of(head, struct kioctx, free_rcu);
599
600 INIT_WORK(&ctx->free_work, free_ioctx);
601 schedule_work(&ctx->free_work);
602}
603
589static void free_ioctx_reqs(struct percpu_ref *ref) 604static void free_ioctx_reqs(struct percpu_ref *ref)
590{ 605{
591 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); 606 struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
@@ -594,8 +609,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
594 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) 609 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
595 complete(&ctx->rq_wait->comp); 610 complete(&ctx->rq_wait->comp);
596 611
597 INIT_WORK(&ctx->free_work, free_ioctx); 612 /* Synchronize against RCU protected table->table[] dereferences */
598 schedule_work(&ctx->free_work); 613 call_rcu(&ctx->free_rcu, free_ioctx_rcufn);
599} 614}
600 615
601/* 616/*
@@ -613,9 +628,8 @@ static void free_ioctx_users(struct percpu_ref *ref)
613 while (!list_empty(&ctx->active_reqs)) { 628 while (!list_empty(&ctx->active_reqs)) {
614 req = list_first_entry(&ctx->active_reqs, 629 req = list_first_entry(&ctx->active_reqs,
615 struct aio_kiocb, ki_list); 630 struct aio_kiocb, ki_list);
616
617 list_del_init(&req->ki_list);
618 kiocb_cancel(req); 631 kiocb_cancel(req);
632 list_del_init(&req->ki_list);
619 } 633 }
620 634
621 spin_unlock_irq(&ctx->ctx_lock); 635 spin_unlock_irq(&ctx->ctx_lock);
@@ -636,9 +650,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
636 while (1) { 650 while (1) {
637 if (table) 651 if (table)
638 for (i = 0; i < table->nr; i++) 652 for (i = 0; i < table->nr; i++)
639 if (!table->table[i]) { 653 if (!rcu_access_pointer(table->table[i])) {
640 ctx->id = i; 654 ctx->id = i;
641 table->table[i] = ctx; 655 rcu_assign_pointer(table->table[i], ctx);
642 spin_unlock(&mm->ioctx_lock); 656 spin_unlock(&mm->ioctx_lock);
643 657
644 /* While kioctx setup is in progress, 658 /* While kioctx setup is in progress,
@@ -813,11 +827,11 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
813 } 827 }
814 828
815 table = rcu_dereference_raw(mm->ioctx_table); 829 table = rcu_dereference_raw(mm->ioctx_table);
816 WARN_ON(ctx != table->table[ctx->id]); 830 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
817 table->table[ctx->id] = NULL; 831 RCU_INIT_POINTER(table->table[ctx->id], NULL);
818 spin_unlock(&mm->ioctx_lock); 832 spin_unlock(&mm->ioctx_lock);
819 833
820 /* percpu_ref_kill() will do the necessary call_rcu() */ 834 /* free_ioctx_reqs() will do the necessary RCU synchronization */
821 wake_up_all(&ctx->wait); 835 wake_up_all(&ctx->wait);
822 836
823 /* 837 /*
@@ -859,7 +873,8 @@ void exit_aio(struct mm_struct *mm)
859 873
860 skipped = 0; 874 skipped = 0;
861 for (i = 0; i < table->nr; ++i) { 875 for (i = 0; i < table->nr; ++i) {
862 struct kioctx *ctx = table->table[i]; 876 struct kioctx *ctx =
877 rcu_dereference_protected(table->table[i], true);
863 878
864 if (!ctx) { 879 if (!ctx) {
865 skipped++; 880 skipped++;
@@ -1048,10 +1063,10 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1048 if (!table || id >= table->nr) 1063 if (!table || id >= table->nr)
1049 goto out; 1064 goto out;
1050 1065
1051 ctx = table->table[id]; 1066 ctx = rcu_dereference(table->table[id]);
1052 if (ctx && ctx->user_id == ctx_id) { 1067 if (ctx && ctx->user_id == ctx_id) {
1053 percpu_ref_get(&ctx->users); 1068 if (percpu_ref_tryget_live(&ctx->users))
1054 ret = ctx; 1069 ret = ctx;
1055 } 1070 }
1056out: 1071out:
1057 rcu_read_unlock(); 1072 rcu_read_unlock();
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 7a54c6a867c8..500098cdb960 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -746,7 +746,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t m
746 746
747 autofs4_del_active(dentry); 747 autofs4_del_active(dentry);
748 748
749 inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); 749 inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
750 if (!inode) 750 if (!inode)
751 return -ENOMEM; 751 return -ENOMEM;
752 d_add(dentry, inode); 752 d_add(dentry, inode);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 78f005f37847..dd784bcf7c96 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -369,8 +369,13 @@ static Node *create_entry(const char __user *buffer, size_t count)
369 s = strchr(p, del); 369 s = strchr(p, del);
370 if (!s) 370 if (!s)
371 goto einval; 371 goto einval;
372 *s++ = '\0'; 372 *s = '\0';
373 e->offset = simple_strtoul(p, &p, 10); 373 if (p != s) {
374 int r = kstrtoint(p, 10, &e->offset);
375 if (r != 0 || e->offset < 0)
376 goto einval;
377 }
378 p = s;
374 if (*p++) 379 if (*p++)
375 goto einval; 380 goto einval;
376 pr_debug("register: offset: %#x\n", e->offset); 381 pr_debug("register: offset: %#x\n", e->offset);
@@ -410,7 +415,8 @@ static Node *create_entry(const char __user *buffer, size_t count)
410 if (e->mask && 415 if (e->mask &&
411 string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size) 416 string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size)
412 goto einval; 417 goto einval;
413 if (e->size + e->offset > BINPRM_BUF_SIZE) 418 if (e->size > BINPRM_BUF_SIZE ||
419 BINPRM_BUF_SIZE - e->size < e->offset)
414 goto einval; 420 goto einval;
415 pr_debug("register: magic/mask length: %i\n", e->size); 421 pr_debug("register: magic/mask length: %i\n", e->size);
416 if (USE_DEBUG) { 422 if (USE_DEBUG) {
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index fb3e64d37cb4..6b16b8653d98 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -82,12 +82,6 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
82 switch (type) { 82 switch (type) {
83 case ACL_TYPE_ACCESS: 83 case ACL_TYPE_ACCESS:
84 name = POSIX_ACL_XATTR_ACCESS; 84 name = POSIX_ACL_XATTR_ACCESS;
85 if (acl) {
86 ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
87 if (ret)
88 return ret;
89 }
90 ret = 0;
91 break; 85 break;
92 case ACL_TYPE_DEFAULT: 86 case ACL_TYPE_DEFAULT:
93 if (!S_ISDIR(inode->i_mode)) 87 if (!S_ISDIR(inode->i_mode))
@@ -123,7 +117,18 @@ out:
123 117
124int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) 118int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
125{ 119{
126 return __btrfs_set_acl(NULL, inode, acl, type); 120 int ret;
121 umode_t old_mode = inode->i_mode;
122
123 if (type == ACL_TYPE_ACCESS && acl) {
124 ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
125 if (ret)
126 return ret;
127 }
128 ret = __btrfs_set_acl(NULL, inode, acl, type);
129 if (ret)
130 inode->i_mode = old_mode;
131 return ret;
127} 132}
128 133
129/* 134/*
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0f2b7c622ce3..38ee08675468 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2497,10 +2497,8 @@ read_block_for_search(struct btrfs_trans_handle *trans,
2497 if (p->reada) 2497 if (p->reada)
2498 reada_for_search(root, p, level, slot, key->objectid); 2498 reada_for_search(root, p, level, slot, key->objectid);
2499 2499
2500 btrfs_release_path(p);
2501
2502 ret = -EAGAIN; 2500 ret = -EAGAIN;
2503 tmp = read_tree_block(root, blocknr, 0); 2501 tmp = read_tree_block(root, blocknr, gen);
2504 if (!IS_ERR(tmp)) { 2502 if (!IS_ERR(tmp)) {
2505 /* 2503 /*
2506 * If the read above didn't mark this buffer up to date, 2504 * If the read above didn't mark this buffer up to date,
@@ -2512,6 +2510,8 @@ read_block_for_search(struct btrfs_trans_handle *trans,
2512 ret = -EIO; 2510 ret = -EIO;
2513 free_extent_buffer(tmp); 2511 free_extent_buffer(tmp);
2514 } 2512 }
2513
2514 btrfs_release_path(p);
2515 return ret; 2515 return ret;
2516} 2516}
2517 2517
@@ -2769,6 +2769,8 @@ again:
2769 * contention with the cow code 2769 * contention with the cow code
2770 */ 2770 */
2771 if (cow) { 2771 if (cow) {
2772 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2773
2772 /* 2774 /*
2773 * if we don't really need to cow this block 2775 * if we don't really need to cow this block
2774 * then we don't want to set the path blocking, 2776 * then we don't want to set the path blocking,
@@ -2793,9 +2795,13 @@ again:
2793 } 2795 }
2794 2796
2795 btrfs_set_path_blocking(p); 2797 btrfs_set_path_blocking(p);
2796 err = btrfs_cow_block(trans, root, b, 2798 if (last_level)
2797 p->nodes[level + 1], 2799 err = btrfs_cow_block(trans, root, b, NULL, 0,
2798 p->slots[level + 1], &b); 2800 &b);
2801 else
2802 err = btrfs_cow_block(trans, root, b,
2803 p->nodes[level + 1],
2804 p->slots[level + 1], &b);
2799 if (err) { 2805 if (err) {
2800 ret = err; 2806 ret = err;
2801 goto done; 2807 goto done;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 85b207d19aa5..d106b981d86f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -923,7 +923,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
923 if (bio_flags & EXTENT_BIO_TREE_LOG) 923 if (bio_flags & EXTENT_BIO_TREE_LOG)
924 return 0; 924 return 0;
925#ifdef CONFIG_X86 925#ifdef CONFIG_X86
926 if (cpu_has_xmm4_2) 926 if (static_cpu_has(X86_FEATURE_XMM4_2))
927 return 0; 927 return 0;
928#endif 928#endif
929 return 1; 929 return 1;
@@ -1196,7 +1196,7 @@ static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1196 if (!writers) 1196 if (!writers)
1197 return ERR_PTR(-ENOMEM); 1197 return ERR_PTR(-ENOMEM);
1198 1198
1199 ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL); 1199 ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
1200 if (ret < 0) { 1200 if (ret < 0) {
1201 kfree(writers); 1201 kfree(writers);
1202 return ERR_PTR(ret); 1202 return ERR_PTR(ret);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 260f94b019c9..982a9d509817 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4392,6 +4392,7 @@ again:
4392 if (wait_for_alloc) { 4392 if (wait_for_alloc) {
4393 mutex_unlock(&fs_info->chunk_mutex); 4393 mutex_unlock(&fs_info->chunk_mutex);
4394 wait_for_alloc = 0; 4394 wait_for_alloc = 0;
4395 cond_resched();
4395 goto again; 4396 goto again;
4396 } 4397 }
4397 4398
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e767f347f2b1..88bee6703cc0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2534,7 +2534,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2534 if (!uptodate) { 2534 if (!uptodate) {
2535 ClearPageUptodate(page); 2535 ClearPageUptodate(page);
2536 SetPageError(page); 2536 SetPageError(page);
2537 ret = ret < 0 ? ret : -EIO; 2537 ret = err < 0 ? err : -EIO;
2538 mapping_set_error(page->mapping, ret); 2538 mapping_set_error(page->mapping, ret);
2539 } 2539 }
2540 return 0; 2540 return 0;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index d4a6eef31854..052973620595 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1861,10 +1861,19 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
1861static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end) 1861static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1862{ 1862{
1863 int ret; 1863 int ret;
1864 struct blk_plug plug;
1864 1865
1866 /*
1867 * This is only called in fsync, which would do synchronous writes, so
1868 * a plug can merge adjacent IOs as much as possible. Esp. in case of
1869 * multiple disks using raid profile, a large IO can be split to
1870 * several segments of stripe length (currently 64K).
1871 */
1872 blk_start_plug(&plug);
1865 atomic_inc(&BTRFS_I(inode)->sync_writers); 1873 atomic_inc(&BTRFS_I(inode)->sync_writers);
1866 ret = btrfs_fdatawrite_range(inode, start, end); 1874 ret = btrfs_fdatawrite_range(inode, start, end);
1867 atomic_dec(&BTRFS_I(inode)->sync_writers); 1875 atomic_dec(&BTRFS_I(inode)->sync_writers);
1876 blk_finish_plug(&plug);
1868 1877
1869 return ret; 1878 return ret;
1870} 1879}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index cfe99bec49de..45934deacfd7 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1258,7 +1258,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1258 /* Lock all pages first so we can lock the extent safely. */ 1258 /* Lock all pages first so we can lock the extent safely. */
1259 ret = io_ctl_prepare_pages(io_ctl, inode, 0); 1259 ret = io_ctl_prepare_pages(io_ctl, inode, 0);
1260 if (ret) 1260 if (ret)
1261 goto out; 1261 goto out_unlock;
1262 1262
1263 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 1263 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1264 0, &cached_state); 1264 0, &cached_state);
@@ -1351,6 +1351,7 @@ out_nospc_locked:
1351out_nospc: 1351out_nospc:
1352 cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list); 1352 cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
1353 1353
1354out_unlock:
1354 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) 1355 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1355 up_write(&block_group->data_rwsem); 1356 up_write(&block_group->data_rwsem);
1356 1357
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index af1da85da509..b895be3d4311 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1202,6 +1202,8 @@ static noinline int csum_exist_in_range(struct btrfs_root *root,
1202 list_del(&sums->list); 1202 list_del(&sums->list);
1203 kfree(sums); 1203 kfree(sums);
1204 } 1204 }
1205 if (ret < 0)
1206 return ret;
1205 return 1; 1207 return 1;
1206} 1208}
1207 1209
@@ -1292,8 +1294,11 @@ next_slot:
1292 leaf = path->nodes[0]; 1294 leaf = path->nodes[0];
1293 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1295 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1294 ret = btrfs_next_leaf(root, path); 1296 ret = btrfs_next_leaf(root, path);
1295 if (ret < 0) 1297 if (ret < 0) {
1298 if (cow_start != (u64)-1)
1299 cur_offset = cow_start;
1296 goto error; 1300 goto error;
1301 }
1297 if (ret > 0) 1302 if (ret > 0)
1298 break; 1303 break;
1299 leaf = path->nodes[0]; 1304 leaf = path->nodes[0];
@@ -1348,10 +1353,23 @@ next_slot:
1348 goto out_check; 1353 goto out_check;
1349 if (btrfs_extent_readonly(root, disk_bytenr)) 1354 if (btrfs_extent_readonly(root, disk_bytenr))
1350 goto out_check; 1355 goto out_check;
1351 if (btrfs_cross_ref_exist(trans, root, ino, 1356 ret = btrfs_cross_ref_exist(trans, root, ino,
1352 found_key.offset - 1357 found_key.offset -
1353 extent_offset, disk_bytenr)) 1358 extent_offset, disk_bytenr);
1359 if (ret) {
1360 /*
1361 * ret could be -EIO if the above fails to read
1362 * metadata.
1363 */
1364 if (ret < 0) {
1365 if (cow_start != (u64)-1)
1366 cur_offset = cow_start;
1367 goto error;
1368 }
1369
1370 WARN_ON_ONCE(nolock);
1354 goto out_check; 1371 goto out_check;
1372 }
1355 disk_bytenr += extent_offset; 1373 disk_bytenr += extent_offset;
1356 disk_bytenr += cur_offset - found_key.offset; 1374 disk_bytenr += cur_offset - found_key.offset;
1357 num_bytes = min(end + 1, extent_end) - cur_offset; 1375 num_bytes = min(end + 1, extent_end) - cur_offset;
@@ -1369,8 +1387,20 @@ next_slot:
1369 * this ensure that csum for a given extent are 1387 * this ensure that csum for a given extent are
1370 * either valid or do not exist. 1388 * either valid or do not exist.
1371 */ 1389 */
1372 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 1390 ret = csum_exist_in_range(root, disk_bytenr, num_bytes);
1391 if (ret) {
1392 /*
1393 * ret could be -EIO if the above fails to read
1394 * metadata.
1395 */
1396 if (ret < 0) {
1397 if (cow_start != (u64)-1)
1398 cur_offset = cow_start;
1399 goto error;
1400 }
1401 WARN_ON_ONCE(nolock);
1373 goto out_check; 1402 goto out_check;
1403 }
1374 nocow = 1; 1404 nocow = 1;
1375 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1405 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1376 extent_end = found_key.offset + 1406 extent_end = found_key.offset +
@@ -2015,7 +2045,15 @@ again:
2015 goto out; 2045 goto out;
2016 } 2046 }
2017 2047
2018 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); 2048 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
2049 &cached_state);
2050 if (ret) {
2051 mapping_set_error(page->mapping, ret);
2052 end_extent_writepage(page, ret, page_start, page_end);
2053 ClearPageChecked(page);
2054 goto out;
2055 }
2056
2019 ClearPageChecked(page); 2057 ClearPageChecked(page);
2020 set_page_dirty(page); 2058 set_page_dirty(page);
2021out: 2059out:
@@ -6402,8 +6440,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6402 goto out_unlock_inode; 6440 goto out_unlock_inode;
6403 } else { 6441 } else {
6404 btrfs_update_inode(trans, root, inode); 6442 btrfs_update_inode(trans, root, inode);
6405 unlock_new_inode(inode); 6443 d_instantiate_new(dentry, inode);
6406 d_instantiate(dentry, inode);
6407 } 6444 }
6408 6445
6409out_unlock: 6446out_unlock:
@@ -6478,8 +6515,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
6478 goto out_unlock_inode; 6515 goto out_unlock_inode;
6479 6516
6480 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 6517 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6481 unlock_new_inode(inode); 6518 d_instantiate_new(dentry, inode);
6482 d_instantiate(dentry, inode);
6483 6519
6484out_unlock: 6520out_unlock:
6485 btrfs_end_transaction(trans, root); 6521 btrfs_end_transaction(trans, root);
@@ -6622,12 +6658,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6622 if (err) 6658 if (err)
6623 goto out_fail_inode; 6659 goto out_fail_inode;
6624 6660
6625 d_instantiate(dentry, inode); 6661 d_instantiate_new(dentry, inode);
6626 /*
6627 * mkdir is special. We're unlocking after we call d_instantiate
6628 * to avoid a race with nfsd calling d_instantiate.
6629 */
6630 unlock_new_inode(inode);
6631 drop_on_err = 0; 6662 drop_on_err = 0;
6632 6663
6633out_fail: 6664out_fail:
@@ -9778,8 +9809,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9778 goto out_unlock_inode; 9809 goto out_unlock_inode;
9779 } 9810 }
9780 9811
9781 unlock_new_inode(inode); 9812 d_instantiate_new(dentry, inode);
9782 d_instantiate(dentry, inode);
9783 9813
9784out_unlock: 9814out_unlock:
9785 btrfs_end_transaction(trans, root); 9815 btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 9c3b9d07f341..6caeb946fc1d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2231,7 +2231,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2231 if (!path) 2231 if (!path)
2232 return -ENOMEM; 2232 return -ENOMEM;
2233 2233
2234 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX]; 2234 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2235 2235
2236 key.objectid = tree_id; 2236 key.objectid = tree_id;
2237 key.type = BTRFS_ROOT_ITEM_KEY; 2237 key.type = BTRFS_ROOT_ITEM_KEY;
@@ -3923,11 +3923,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
3923 if (!(src_file.file->f_mode & FMODE_READ)) 3923 if (!(src_file.file->f_mode & FMODE_READ))
3924 goto out_fput; 3924 goto out_fput;
3925 3925
3926 /* don't make the dst file partly checksummed */
3927 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3928 (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
3929 goto out_fput;
3930
3931 ret = -EISDIR; 3926 ret = -EISDIR;
3932 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode)) 3927 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
3933 goto out_fput; 3928 goto out_fput;
@@ -3942,6 +3937,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
3942 mutex_lock(&src->i_mutex); 3937 mutex_lock(&src->i_mutex);
3943 } 3938 }
3944 3939
3940 /* don't make the dst file partly checksummed */
3941 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3942 (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
3943 ret = -EINVAL;
3944 goto out_unlock;
3945 }
3946
3945 /* determine range to clone */ 3947 /* determine range to clone */
3946 ret = -EINVAL; 3948 ret = -EINVAL;
3947 if (off + len > src->i_size || off + len < off) 3949 if (off + len > src->i_size || off + len < off)
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 88d9b66e2207..a751937dded5 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2186,6 +2186,21 @@ void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2186} 2186}
2187 2187
2188/* 2188/*
2189 * Check if the leaf is the last leaf. Which means all node pointers
2190 * are at their last position.
2191 */
2192static bool is_last_leaf(struct btrfs_path *path)
2193{
2194 int i;
2195
2196 for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
2197 if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
2198 return false;
2199 }
2200 return true;
2201}
2202
2203/*
2189 * returns < 0 on error, 0 when more leafs are to be scanned. 2204 * returns < 0 on error, 0 when more leafs are to be scanned.
2190 * returns 1 when done. 2205 * returns 1 when done.
2191 */ 2206 */
@@ -2198,6 +2213,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2198 struct ulist *roots = NULL; 2213 struct ulist *roots = NULL;
2199 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem); 2214 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2200 u64 num_bytes; 2215 u64 num_bytes;
2216 bool done;
2201 int slot; 2217 int slot;
2202 int ret; 2218 int ret;
2203 2219
@@ -2225,6 +2241,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2225 mutex_unlock(&fs_info->qgroup_rescan_lock); 2241 mutex_unlock(&fs_info->qgroup_rescan_lock);
2226 return ret; 2242 return ret;
2227 } 2243 }
2244 done = is_last_leaf(path);
2228 2245
2229 btrfs_item_key_to_cpu(path->nodes[0], &found, 2246 btrfs_item_key_to_cpu(path->nodes[0], &found,
2230 btrfs_header_nritems(path->nodes[0]) - 1); 2247 btrfs_header_nritems(path->nodes[0]) - 1);
@@ -2271,6 +2288,8 @@ out:
2271 } 2288 }
2272 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); 2289 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2273 2290
2291 if (done && !ret)
2292 ret = 1;
2274 return ret; 2293 return ret;
2275} 2294}
2276 2295
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 1a33d3eb36de..b9fa99577bf7 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2160,11 +2160,21 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2160 } 2160 }
2161 2161
2162 /* 2162 /*
2163 * reconstruct from the q stripe if they are 2163 * Loop retry:
2164 * asking for mirror 3 2164 * for 'mirror == 2', reconstruct from all other stripes.
2165 * for 'mirror_num > 2', select a stripe to fail on every retry.
2165 */ 2166 */
2166 if (mirror_num == 3) 2167 if (mirror_num > 2) {
2167 rbio->failb = rbio->real_stripes - 2; 2168 /*
2169 * 'mirror == 3' is to fail the p stripe and
2170 * reconstruct from the q stripe. 'mirror > 3' is to
2171 * fail a data stripe and reconstruct from p+q stripe.
2172 */
2173 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2174 ASSERT(rbio->failb > 0);
2175 if (rbio->failb <= rbio->faila)
2176 rbio->failb--;
2177 }
2168 2178
2169 ret = lock_stripe_add(rbio); 2179 ret = lock_stripe_add(rbio);
2170 2180
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index b091d94ceef6..6dca9f937bf6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2513,7 +2513,7 @@ static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2513 have_csum = scrub_find_csum(sctx, logical, csum); 2513 have_csum = scrub_find_csum(sctx, logical, csum);
2514 if (have_csum == 0) 2514 if (have_csum == 0)
2515 ++sctx->stat.no_csum; 2515 ++sctx->stat.no_csum;
2516 if (sctx->is_dev_replace && !have_csum) { 2516 if (0 && sctx->is_dev_replace && !have_csum) {
2517 ret = copy_nocow_pages(sctx, logical, l, 2517 ret = copy_nocow_pages(sctx, logical, l,
2518 mirror_num, 2518 mirror_num,
2519 physical_for_dev_replace); 2519 physical_for_dev_replace);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index c5bbb5300658..83c73738165e 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4674,6 +4674,9 @@ static int send_hole(struct send_ctx *sctx, u64 end)
4674 u64 len; 4674 u64 len;
4675 int ret = 0; 4675 int ret = 0;
4676 4676
4677 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
4678 return send_update_extent(sctx, offset, end - offset);
4679
4677 p = fs_path_alloc(); 4680 p = fs_path_alloc();
4678 if (!p) 4681 if (!p)
4679 return -ENOMEM; 4682 return -ENOMEM;
@@ -5008,13 +5011,19 @@ static int is_extent_unchanged(struct send_ctx *sctx,
5008 while (key.offset < ekey->offset + left_len) { 5011 while (key.offset < ekey->offset + left_len) {
5009 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 5012 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5010 right_type = btrfs_file_extent_type(eb, ei); 5013 right_type = btrfs_file_extent_type(eb, ei);
5011 if (right_type != BTRFS_FILE_EXTENT_REG) { 5014 if (right_type != BTRFS_FILE_EXTENT_REG &&
5015 right_type != BTRFS_FILE_EXTENT_INLINE) {
5012 ret = 0; 5016 ret = 0;
5013 goto out; 5017 goto out;
5014 } 5018 }
5015 5019
5016 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 5020 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5017 right_len = btrfs_file_extent_num_bytes(eb, ei); 5021 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5022 right_len = btrfs_file_extent_inline_len(eb, slot, ei);
5023 right_len = PAGE_ALIGN(right_len);
5024 } else {
5025 right_len = btrfs_file_extent_num_bytes(eb, ei);
5026 }
5018 right_offset = btrfs_file_extent_offset(eb, ei); 5027 right_offset = btrfs_file_extent_offset(eb, ei);
5019 right_gen = btrfs_file_extent_generation(eb, ei); 5028 right_gen = btrfs_file_extent_generation(eb, ei);
5020 5029
@@ -5028,6 +5037,19 @@ static int is_extent_unchanged(struct send_ctx *sctx,
5028 goto out; 5037 goto out;
5029 } 5038 }
5030 5039
5040 /*
5041 * We just wanted to see if when we have an inline extent, what
5042 * follows it is a regular extent (wanted to check the above
5043 * condition for inline extents too). This should normally not
5044 * happen but it's possible for example when we have an inline
5045 * compressed extent representing data with a size matching
5046 * the page size (currently the same as sector size).
5047 */
5048 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5049 ret = 0;
5050 goto out;
5051 }
5052
5031 left_offset_fixed = left_offset; 5053 left_offset_fixed = left_offset;
5032 if (key.offset < ekey->offset) { 5054 if (key.offset < ekey->offset) {
5033 /* Fix the right offset for 2a and 7. */ 5055 /* Fix the right offset for 2a and 7. */
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 846d277b1901..2b2978c04e80 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -70,7 +70,7 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
70 btrfs_set_extent_generation(leaf, item, 1); 70 btrfs_set_extent_generation(leaf, item, 1);
71 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK); 71 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK);
72 block_info = (struct btrfs_tree_block_info *)(item + 1); 72 block_info = (struct btrfs_tree_block_info *)(item + 1);
73 btrfs_set_tree_block_level(leaf, block_info, 1); 73 btrfs_set_tree_block_level(leaf, block_info, 0);
74 iref = (struct btrfs_extent_inline_ref *)(block_info + 1); 74 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
75 if (parent > 0) { 75 if (parent > 0) {
76 btrfs_set_extent_inline_ref_type(leaf, iref, 76 btrfs_set_extent_inline_ref_type(leaf, iref,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index ee7832e2d39d..2c7f9a5f8717 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -26,6 +26,7 @@
26#include "print-tree.h" 26#include "print-tree.h"
27#include "backref.h" 27#include "backref.h"
28#include "hash.h" 28#include "hash.h"
29#include "inode-map.h"
29 30
30/* magic values for the inode_only field in btrfs_log_inode: 31/* magic values for the inode_only field in btrfs_log_inode:
31 * 32 *
@@ -2222,8 +2223,10 @@ again:
2222 nritems = btrfs_header_nritems(path->nodes[0]); 2223 nritems = btrfs_header_nritems(path->nodes[0]);
2223 if (path->slots[0] >= nritems) { 2224 if (path->slots[0] >= nritems) {
2224 ret = btrfs_next_leaf(root, path); 2225 ret = btrfs_next_leaf(root, path);
2225 if (ret) 2226 if (ret == 1)
2226 break; 2227 break;
2228 else if (ret < 0)
2229 goto out;
2227 } 2230 }
2228 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2231 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2229 path->slots[0]); 2232 path->slots[0]);
@@ -2445,6 +2448,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2445 next); 2448 next);
2446 btrfs_wait_tree_block_writeback(next); 2449 btrfs_wait_tree_block_writeback(next);
2447 btrfs_tree_unlock(next); 2450 btrfs_tree_unlock(next);
2451 } else {
2452 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2453 clear_extent_buffer_dirty(next);
2448 } 2454 }
2449 2455
2450 WARN_ON(root_owner != 2456 WARN_ON(root_owner !=
@@ -2524,6 +2530,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2524 next); 2530 next);
2525 btrfs_wait_tree_block_writeback(next); 2531 btrfs_wait_tree_block_writeback(next);
2526 btrfs_tree_unlock(next); 2532 btrfs_tree_unlock(next);
2533 } else {
2534 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2535 clear_extent_buffer_dirty(next);
2527 } 2536 }
2528 2537
2529 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); 2538 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
@@ -2600,6 +2609,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
2600 clean_tree_block(trans, log->fs_info, next); 2609 clean_tree_block(trans, log->fs_info, next);
2601 btrfs_wait_tree_block_writeback(next); 2610 btrfs_wait_tree_block_writeback(next);
2602 btrfs_tree_unlock(next); 2611 btrfs_tree_unlock(next);
2612 } else {
2613 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2614 clear_extent_buffer_dirty(next);
2603 } 2615 }
2604 2616
2605 WARN_ON(log->root_key.objectid != 2617 WARN_ON(log->root_key.objectid !=
@@ -2949,8 +2961,11 @@ out_wake_log_root:
2949 mutex_unlock(&log_root_tree->log_mutex); 2961 mutex_unlock(&log_root_tree->log_mutex);
2950 2962
2951 /* 2963 /*
2952 * The barrier before waitqueue_active is implied by mutex_unlock 2964 * The barrier before waitqueue_active is needed so all the updates
2965 * above are seen by the woken threads. It might not be necessary, but
2966 * proving that seems to be hard.
2953 */ 2967 */
2968 smp_mb();
2954 if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) 2969 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2955 wake_up(&log_root_tree->log_commit_wait[index2]); 2970 wake_up(&log_root_tree->log_commit_wait[index2]);
2956out: 2971out:
@@ -2961,8 +2976,11 @@ out:
2961 mutex_unlock(&root->log_mutex); 2976 mutex_unlock(&root->log_mutex);
2962 2977
2963 /* 2978 /*
2964 * The barrier before waitqueue_active is implied by mutex_unlock 2979 * The barrier before waitqueue_active is needed so all the updates
2980 * above are seen by the woken threads. It might not be necessary, but
2981 * proving that seems to be hard.
2965 */ 2982 */
2983 smp_mb();
2966 if (waitqueue_active(&root->log_commit_wait[index1])) 2984 if (waitqueue_active(&root->log_commit_wait[index1]))
2967 wake_up(&root->log_commit_wait[index1]); 2985 wake_up(&root->log_commit_wait[index1]);
2968 return ret; 2986 return ret;
@@ -3368,8 +3386,11 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3368 * from this directory and from this transaction 3386 * from this directory and from this transaction
3369 */ 3387 */
3370 ret = btrfs_next_leaf(root, path); 3388 ret = btrfs_next_leaf(root, path);
3371 if (ret == 1) { 3389 if (ret) {
3372 last_offset = (u64)-1; 3390 if (ret == 1)
3391 last_offset = (u64)-1;
3392 else
3393 err = ret;
3373 goto done; 3394 goto done;
3374 } 3395 }
3375 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3396 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
@@ -3820,6 +3841,7 @@ fill_holes:
3820 ASSERT(ret == 0); 3841 ASSERT(ret == 0);
3821 src = src_path->nodes[0]; 3842 src = src_path->nodes[0];
3822 i = 0; 3843 i = 0;
3844 need_find_last_extent = true;
3823 } 3845 }
3824 3846
3825 btrfs_item_key_to_cpu(src, &key, i); 3847 btrfs_item_key_to_cpu(src, &key, i);
@@ -4558,6 +4580,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4558 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 4580 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4559 u64 logged_isize = 0; 4581 u64 logged_isize = 0;
4560 bool need_log_inode_item = true; 4582 bool need_log_inode_item = true;
4583 bool xattrs_logged = false;
4561 4584
4562 path = btrfs_alloc_path(); 4585 path = btrfs_alloc_path();
4563 if (!path) 4586 if (!path)
@@ -4798,6 +4821,7 @@ next_slot:
4798 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); 4821 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
4799 if (err) 4822 if (err)
4800 goto out_unlock; 4823 goto out_unlock;
4824 xattrs_logged = true;
4801 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { 4825 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
4802 btrfs_release_path(path); 4826 btrfs_release_path(path);
4803 btrfs_release_path(dst_path); 4827 btrfs_release_path(dst_path);
@@ -4810,6 +4834,11 @@ log_extents:
4810 btrfs_release_path(dst_path); 4834 btrfs_release_path(dst_path);
4811 if (need_log_inode_item) { 4835 if (need_log_inode_item) {
4812 err = log_inode_item(trans, log, dst_path, inode); 4836 err = log_inode_item(trans, log, dst_path, inode);
4837 if (!err && !xattrs_logged) {
4838 err = btrfs_log_all_xattrs(trans, root, inode, path,
4839 dst_path);
4840 btrfs_release_path(path);
4841 }
4813 if (err) 4842 if (err)
4814 goto out_unlock; 4843 goto out_unlock;
4815 } 4844 }
@@ -5514,6 +5543,23 @@ again:
5514 path); 5543 path);
5515 } 5544 }
5516 5545
5546 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5547 struct btrfs_root *root = wc.replay_dest;
5548
5549 btrfs_release_path(path);
5550
5551 /*
5552 * We have just replayed everything, and the highest
5553 * objectid of fs roots probably has changed in case
5554 * some inode_item's got replayed.
5555 *
5556 * root->objectid_mutex is not acquired as log replay
5557 * could only happen during mount.
5558 */
5559 ret = btrfs_find_highest_objectid(root,
5560 &root->highest_objectid);
5561 }
5562
5517 key.offset = found_key.offset - 1; 5563 key.offset = found_key.offset - 1;
5518 wc.replay_dest->log_root = NULL; 5564 wc.replay_dest->log_root = NULL;
5519 free_extent_buffer(log->node); 5565 free_extent_buffer(log->node);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 600c67ef8a03..b4d63a9842fa 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -568,6 +568,7 @@ void btrfs_free_stale_device(struct btrfs_device *cur_dev)
568 btrfs_sysfs_remove_fsid(fs_devs); 568 btrfs_sysfs_remove_fsid(fs_devs);
569 list_del(&fs_devs->list); 569 list_del(&fs_devs->list);
570 free_fs_devices(fs_devs); 570 free_fs_devices(fs_devs);
571 break;
571 } else { 572 } else {
572 fs_devs->num_devices--; 573 fs_devs->num_devices--;
573 list_del(&dev->dev_list); 574 list_del(&dev->dev_list);
@@ -3849,6 +3850,15 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3849 return 0; 3850 return 0;
3850 } 3851 }
3851 3852
3853 /*
3854 * A ro->rw remount sequence should continue with the paused balance
3855 * regardless of who pauses it, system or the user as of now, so set
3856 * the resume flag.
3857 */
3858 spin_lock(&fs_info->balance_lock);
3859 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
3860 spin_unlock(&fs_info->balance_lock);
3861
3852 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 3862 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3853 return PTR_ERR_OR_ZERO(tsk); 3863 return PTR_ERR_OR_ZERO(tsk);
3854} 3864}
@@ -4638,10 +4648,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4638 if (devs_max && ndevs > devs_max) 4648 if (devs_max && ndevs > devs_max)
4639 ndevs = devs_max; 4649 ndevs = devs_max;
4640 /* 4650 /*
4641 * the primary goal is to maximize the number of stripes, so use as many 4651 * The primary goal is to maximize the number of stripes, so use as
4642 * devices as possible, even if the stripes are not maximum sized. 4652 * many devices as possible, even if the stripes are not maximum sized.
4653 *
4654 * The DUP profile stores more than one stripe per device, the
4655 * max_avail is the total size so we have to adjust.
4643 */ 4656 */
4644 stripe_size = devices_info[ndevs-1].max_avail; 4657 stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
4645 num_stripes = ndevs * dev_stripes; 4658 num_stripes = ndevs * dev_stripes;
4646 4659
4647 /* 4660 /*
@@ -4681,8 +4694,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4681 stripe_size = devices_info[ndevs-1].max_avail; 4694 stripe_size = devices_info[ndevs-1].max_avail;
4682 } 4695 }
4683 4696
4684 stripe_size = div_u64(stripe_size, dev_stripes);
4685
4686 /* align to BTRFS_STRIPE_LEN */ 4697 /* align to BTRFS_STRIPE_LEN */
4687 stripe_size = div_u64(stripe_size, raid_stripe_len); 4698 stripe_size = div_u64(stripe_size, raid_stripe_len);
4688 stripe_size *= raid_stripe_len; 4699 stripe_size *= raid_stripe_len;
@@ -5045,7 +5056,14 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5045 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) 5056 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5046 ret = 2; 5057 ret = 2;
5047 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) 5058 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5048 ret = 3; 5059 /*
5060 * There could be two corrupted data stripes, we need
5061 * to loop retry in order to rebuild the correct data.
5062 *
5063 * Fail a stripe at a time on every retry except the
5064 * stripe under reconstruction.
5065 */
5066 ret = map->num_stripes;
5049 else 5067 else
5050 ret = 1; 5068 ret = 1;
5051 free_extent_map(em); 5069 free_extent_map(em);
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 4acbc390a7d6..1d707a67f8ac 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -306,9 +306,8 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
306{ 306{
307 int i; 307 int i;
308 int rc; 308 int rc;
309 char password_with_pad[CIFS_ENCPWD_SIZE]; 309 char password_with_pad[CIFS_ENCPWD_SIZE] = {0};
310 310
311 memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
312 if (password) 311 if (password)
313 strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE); 312 strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE);
314 313
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 0c92af11f4f4..63aea21e6298 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -150,8 +150,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
150 * greater than cifs socket timeout which is 7 seconds 150 * greater than cifs socket timeout which is 7 seconds
151 */ 151 */
152 while (server->tcpStatus == CifsNeedReconnect) { 152 while (server->tcpStatus == CifsNeedReconnect) {
153 wait_event_interruptible_timeout(server->response_q, 153 rc = wait_event_interruptible_timeout(server->response_q,
154 (server->tcpStatus != CifsNeedReconnect), 10 * HZ); 154 (server->tcpStatus != CifsNeedReconnect),
155 10 * HZ);
156 if (rc < 0) {
157 cifs_dbg(FYI, "%s: aborting reconnect due to a received"
158 " signal by the process\n", __func__);
159 return -ERESTARTSYS;
160 }
155 161
156 /* are we still trying to reconnect? */ 162 /* are we still trying to reconnect? */
157 if (server->tcpStatus != CifsNeedReconnect) 163 if (server->tcpStatus != CifsNeedReconnect)
@@ -6421,9 +6427,7 @@ SetEARetry:
6421 pSMB->InformationLevel = 6427 pSMB->InformationLevel =
6422 cpu_to_le16(SMB_SET_FILE_EA); 6428 cpu_to_le16(SMB_SET_FILE_EA);
6423 6429
6424 parm_data = 6430 parm_data = (void *)pSMB + offsetof(struct smb_hdr, Protocol) + offset;
6425 (struct fealist *) (((char *) &pSMB->hdr.Protocol) +
6426 offset);
6427 pSMB->ParameterOffset = cpu_to_le16(param_offset); 6431 pSMB->ParameterOffset = cpu_to_le16(param_offset);
6428 pSMB->DataOffset = cpu_to_le16(offset); 6432 pSMB->DataOffset = cpu_to_le16(offset);
6429 pSMB->SetupCount = 1; 6433 pSMB->SetupCount = 1;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 0a2bf9462637..077ad3a06c9a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1695,7 +1695,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1695 tmp_end++; 1695 tmp_end++;
1696 if (!(tmp_end < end && tmp_end[1] == delim)) { 1696 if (!(tmp_end < end && tmp_end[1] == delim)) {
1697 /* No it is not. Set the password to NULL */ 1697 /* No it is not. Set the password to NULL */
1698 kfree(vol->password); 1698 kzfree(vol->password);
1699 vol->password = NULL; 1699 vol->password = NULL;
1700 break; 1700 break;
1701 } 1701 }
@@ -1733,7 +1733,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1733 options = end; 1733 options = end;
1734 } 1734 }
1735 1735
1736 kfree(vol->password); 1736 kzfree(vol->password);
1737 /* Now build new password string */ 1737 /* Now build new password string */
1738 temp_len = strlen(value); 1738 temp_len = strlen(value);
1739 vol->password = kzalloc(temp_len+1, GFP_KERNEL); 1739 vol->password = kzalloc(temp_len+1, GFP_KERNEL);
@@ -4148,7 +4148,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
4148 reset_cifs_unix_caps(0, tcon, NULL, vol_info); 4148 reset_cifs_unix_caps(0, tcon, NULL, vol_info);
4149out: 4149out:
4150 kfree(vol_info->username); 4150 kfree(vol_info->username);
4151 kfree(vol_info->password); 4151 kzfree(vol_info->password);
4152 kfree(vol_info); 4152 kfree(vol_info);
4153 4153
4154 return tcon; 4154 return tcon;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 49a0d6b027c1..76dacd5307b9 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -673,6 +673,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
673 goto mknod_out; 673 goto mknod_out;
674 } 674 }
675 675
676 if (!S_ISCHR(mode) && !S_ISBLK(mode))
677 goto mknod_out;
678
676 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) 679 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
677 goto mknod_out; 680 goto mknod_out;
678 681
@@ -681,10 +684,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
681 684
682 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); 685 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
683 if (buf == NULL) { 686 if (buf == NULL) {
684 kfree(full_path);
685 rc = -ENOMEM; 687 rc = -ENOMEM;
686 free_xid(xid); 688 goto mknod_out;
687 return rc;
688 } 689 }
689 690
690 if (backup_cred(cifs_sb)) 691 if (backup_cred(cifs_sb))
@@ -731,7 +732,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
731 pdev->minor = cpu_to_le64(MINOR(device_number)); 732 pdev->minor = cpu_to_le64(MINOR(device_number));
732 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, 733 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
733 &bytes_written, iov, 1); 734 &bytes_written, iov, 1);
734 } /* else if (S_ISFIFO) */ 735 }
735 tcon->ses->server->ops->close(xid, tcon, &fid); 736 tcon->ses->server->ops->close(xid, tcon, &fid);
736 d_drop(direntry); 737 d_drop(direntry);
737 738
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ec2d07bb9beb..0141aba9eca6 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -589,7 +589,7 @@ cifs_relock_file(struct cifsFileInfo *cfile)
589 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 589 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
590 int rc = 0; 590 int rc = 0;
591 591
592 down_read(&cinode->lock_sem); 592 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
593 if (cinode->can_cache_brlcks) { 593 if (cinode->can_cache_brlcks) {
594 /* can cache locks - no need to relock */ 594 /* can cache locks - no need to relock */
595 up_read(&cinode->lock_sem); 595 up_read(&cinode->lock_sem);
@@ -3241,20 +3241,18 @@ static const struct vm_operations_struct cifs_file_vm_ops = {
3241 3241
3242int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) 3242int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3243{ 3243{
3244 int rc, xid; 3244 int xid, rc = 0;
3245 struct inode *inode = file_inode(file); 3245 struct inode *inode = file_inode(file);
3246 3246
3247 xid = get_xid(); 3247 xid = get_xid();
3248 3248
3249 if (!CIFS_CACHE_READ(CIFS_I(inode))) { 3249 if (!CIFS_CACHE_READ(CIFS_I(inode)))
3250 rc = cifs_zap_mapping(inode); 3250 rc = cifs_zap_mapping(inode);
3251 if (rc) 3251 if (!rc)
3252 return rc; 3252 rc = generic_file_mmap(file, vma);
3253 } 3253 if (!rc)
3254
3255 rc = generic_file_mmap(file, vma);
3256 if (rc == 0)
3257 vma->vm_ops = &cifs_file_vm_ops; 3254 vma->vm_ops = &cifs_file_vm_ops;
3255
3258 free_xid(xid); 3256 free_xid(xid);
3259 return rc; 3257 return rc;
3260} 3258}
@@ -3264,16 +3262,16 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3264 int rc, xid; 3262 int rc, xid;
3265 3263
3266 xid = get_xid(); 3264 xid = get_xid();
3265
3267 rc = cifs_revalidate_file(file); 3266 rc = cifs_revalidate_file(file);
3268 if (rc) { 3267 if (rc)
3269 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", 3268 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3270 rc); 3269 rc);
3271 free_xid(xid); 3270 if (!rc)
3272 return rc; 3271 rc = generic_file_mmap(file, vma);
3273 } 3272 if (!rc)
3274 rc = generic_file_mmap(file, vma);
3275 if (rc == 0)
3276 vma->vm_ops = &cifs_file_vm_ops; 3273 vma->vm_ops = &cifs_file_vm_ops;
3274
3277 free_xid(xid); 3275 free_xid(xid);
3278 return rc; 3276 return rc;
3279} 3277}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 2396ab099849..0cc699d9b932 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -99,14 +99,11 @@ sesInfoFree(struct cifs_ses *buf_to_free)
99 kfree(buf_to_free->serverOS); 99 kfree(buf_to_free->serverOS);
100 kfree(buf_to_free->serverDomain); 100 kfree(buf_to_free->serverDomain);
101 kfree(buf_to_free->serverNOS); 101 kfree(buf_to_free->serverNOS);
102 if (buf_to_free->password) { 102 kzfree(buf_to_free->password);
103 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
104 kfree(buf_to_free->password);
105 }
106 kfree(buf_to_free->user_name); 103 kfree(buf_to_free->user_name);
107 kfree(buf_to_free->domainName); 104 kfree(buf_to_free->domainName);
108 kfree(buf_to_free->auth_key.response); 105 kzfree(buf_to_free->auth_key.response);
109 kfree(buf_to_free); 106 kzfree(buf_to_free);
110} 107}
111 108
112struct cifs_tcon * 109struct cifs_tcon *
@@ -137,10 +134,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
137 } 134 }
138 atomic_dec(&tconInfoAllocCount); 135 atomic_dec(&tconInfoAllocCount);
139 kfree(buf_to_free->nativeFileSystem); 136 kfree(buf_to_free->nativeFileSystem);
140 if (buf_to_free->password) { 137 kzfree(buf_to_free->password);
141 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
142 kfree(buf_to_free->password);
143 }
144 kfree(buf_to_free); 138 kfree(buf_to_free);
145} 139}
146 140
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index abae6dd2c6b9..cc88f4f0325e 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -980,10 +980,10 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
980 cifs_dbg(VFS, "illegal hours %d\n", st->Hours); 980 cifs_dbg(VFS, "illegal hours %d\n", st->Hours);
981 days = sd->Day; 981 days = sd->Day;
982 month = sd->Month; 982 month = sd->Month;
983 if ((days > 31) || (month > 12)) { 983 if (days < 1 || days > 31 || month < 1 || month > 12) {
984 cifs_dbg(VFS, "illegal date, month %d day: %d\n", month, days); 984 cifs_dbg(VFS, "illegal date, month %d day: %d\n", month, days);
985 if (month > 12) 985 days = clamp(days, 1, 31);
986 month = 12; 986 month = clamp(month, 1, 12);
987 } 987 }
988 month -= 1; 988 month -= 1;
989 days += total_days_of_prev_months[month]; 989 days += total_days_of_prev_months[month];
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index e88ffe1da045..a035d1a95882 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -344,13 +344,12 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
344 /* BB is NTLMV2 session security format easier to use here? */ 344 /* BB is NTLMV2 session security format easier to use here? */
345 flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | 345 flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
346 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | 346 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
347 NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; 347 NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
348 if (ses->server->sign) { 348 NTLMSSP_NEGOTIATE_SEAL;
349 if (ses->server->sign)
349 flags |= NTLMSSP_NEGOTIATE_SIGN; 350 flags |= NTLMSSP_NEGOTIATE_SIGN;
350 if (!ses->server->session_estab || 351 if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
351 ses->ntlmssp->sesskey_per_smbsess) 352 flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
352 flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
353 }
354 353
355 sec_blob->NegotiateFlags = cpu_to_le32(flags); 354 sec_blob->NegotiateFlags = cpu_to_le32(flags);
356 355
@@ -407,13 +406,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
407 flags = NTLMSSP_NEGOTIATE_56 | 406 flags = NTLMSSP_NEGOTIATE_56 |
408 NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | 407 NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
409 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | 408 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
410 NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; 409 NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
411 if (ses->server->sign) { 410 NTLMSSP_NEGOTIATE_SEAL;
411 if (ses->server->sign)
412 flags |= NTLMSSP_NEGOTIATE_SIGN; 412 flags |= NTLMSSP_NEGOTIATE_SIGN;
413 if (!ses->server->session_estab || 413 if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
414 ses->ntlmssp->sesskey_per_smbsess) 414 flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
415 flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
416 }
417 415
418 tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE); 416 tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
419 sec_blob->NegotiateFlags = cpu_to_le32(flags); 417 sec_blob->NegotiateFlags = cpu_to_le32(flags);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index f2ff60e58ec8..5f5ba807b414 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -158,7 +158,7 @@ out:
158static int 158static int
159smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) 159smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
160{ 160{
161 int rc = 0; 161 int rc;
162 struct nls_table *nls_codepage; 162 struct nls_table *nls_codepage;
163 struct cifs_ses *ses; 163 struct cifs_ses *ses;
164 struct TCP_Server_Info *server; 164 struct TCP_Server_Info *server;
@@ -169,10 +169,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
169 * for those three - in the calling routine. 169 * for those three - in the calling routine.
170 */ 170 */
171 if (tcon == NULL) 171 if (tcon == NULL)
172 return rc; 172 return 0;
173 173
174 if (smb2_command == SMB2_TREE_CONNECT) 174 if (smb2_command == SMB2_TREE_CONNECT)
175 return rc; 175 return 0;
176 176
177 if (tcon->tidStatus == CifsExiting) { 177 if (tcon->tidStatus == CifsExiting) {
178 /* 178 /*
@@ -215,8 +215,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
215 return -EAGAIN; 215 return -EAGAIN;
216 } 216 }
217 217
218 wait_event_interruptible_timeout(server->response_q, 218 rc = wait_event_interruptible_timeout(server->response_q,
219 (server->tcpStatus != CifsNeedReconnect), 10 * HZ); 219 (server->tcpStatus != CifsNeedReconnect),
220 10 * HZ);
221 if (rc < 0) {
222 cifs_dbg(FYI, "%s: aborting reconnect due to a received"
223 " signal by the process\n", __func__);
224 return -ERESTARTSYS;
225 }
220 226
221 /* are we still trying to reconnect? */ 227 /* are we still trying to reconnect? */
222 if (server->tcpStatus != CifsNeedReconnect) 228 if (server->tcpStatus != CifsNeedReconnect)
@@ -234,7 +240,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
234 } 240 }
235 241
236 if (!tcon->ses->need_reconnect && !tcon->need_reconnect) 242 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
237 return rc; 243 return 0;
238 244
239 nls_codepage = load_nls_default(); 245 nls_codepage = load_nls_default();
240 246
@@ -580,8 +586,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
580 } 586 }
581 587
582 /* check validate negotiate info response matches what we got earlier */ 588 /* check validate negotiate info response matches what we got earlier */
583 if (pneg_rsp->Dialect != 589 if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect))
584 cpu_to_le16(tcon->ses->server->vals->protocol_id))
585 goto vneg_out; 590 goto vneg_out;
586 591
587 if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode)) 592 if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
@@ -833,10 +838,8 @@ ssetup_exit:
833 838
834 if (!rc) { 839 if (!rc) {
835 mutex_lock(&server->srv_mutex); 840 mutex_lock(&server->srv_mutex);
836 if (server->sign && server->ops->generate_signingkey) { 841 if (server->ops->generate_signingkey) {
837 rc = server->ops->generate_signingkey(ses); 842 rc = server->ops->generate_signingkey(ses);
838 kfree(ses->auth_key.response);
839 ses->auth_key.response = NULL;
840 if (rc) { 843 if (rc) {
841 cifs_dbg(FYI, 844 cifs_dbg(FYI,
842 "SMB3 session key generation failed\n"); 845 "SMB3 session key generation failed\n");
@@ -858,10 +861,6 @@ ssetup_exit:
858 } 861 }
859 862
860keygen_exit: 863keygen_exit:
861 if (!server->sign) {
862 kfree(ses->auth_key.response);
863 ses->auth_key.response = NULL;
864 }
865 if (spnego_key) { 864 if (spnego_key) {
866 key_invalidate(spnego_key); 865 key_invalidate(spnego_key);
867 key_put(spnego_key); 866 key_put(spnego_key);
@@ -1006,15 +1005,19 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1006 goto tcon_exit; 1005 goto tcon_exit;
1007 } 1006 }
1008 1007
1009 if (rsp->ShareType & SMB2_SHARE_TYPE_DISK) 1008 switch (rsp->ShareType) {
1009 case SMB2_SHARE_TYPE_DISK:
1010 cifs_dbg(FYI, "connection to disk share\n"); 1010 cifs_dbg(FYI, "connection to disk share\n");
1011 else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) { 1011 break;
1012 case SMB2_SHARE_TYPE_PIPE:
1012 tcon->ipc = true; 1013 tcon->ipc = true;
1013 cifs_dbg(FYI, "connection to pipe share\n"); 1014 cifs_dbg(FYI, "connection to pipe share\n");
1014 } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) { 1015 break;
1015 tcon->print = true; 1016 case SMB2_SHARE_TYPE_PRINT:
1017 tcon->ipc = true;
1016 cifs_dbg(FYI, "connection to printer\n"); 1018 cifs_dbg(FYI, "connection to printer\n");
1017 } else { 1019 break;
1020 default:
1018 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType); 1021 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
1019 rc = -EOPNOTSUPP; 1022 rc = -EOPNOTSUPP;
1020 goto tcon_error_exit; 1023 goto tcon_error_exit;
@@ -1559,6 +1562,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1559 } else 1562 } else
1560 iov[0].iov_len = get_rfc1002_length(req) + 4; 1563 iov[0].iov_len = get_rfc1002_length(req) + 4;
1561 1564
1565 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
1566 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
1567 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
1562 1568
1563 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0); 1569 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1564 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; 1570 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index 4d24d17bcfc1..943be5ecfcd9 100644
--- a/fs/compat_binfmt_elf.c
+++ b/fs/compat_binfmt_elf.c
@@ -51,6 +51,7 @@
51#define elf_prstatus compat_elf_prstatus 51#define elf_prstatus compat_elf_prstatus
52#define elf_prpsinfo compat_elf_prpsinfo 52#define elf_prpsinfo compat_elf_prpsinfo
53 53
54#ifdef CONFIG_ELF_CORE
54/* 55/*
55 * Compat version of cputime_to_compat_timeval, perhaps this 56 * Compat version of cputime_to_compat_timeval, perhaps this
56 * should be an inline in <linux/compat.h>. 57 * should be an inline in <linux/compat.h>.
@@ -63,6 +64,7 @@ static void cputime_to_compat_timeval(const cputime_t cputime,
63 value->tv_sec = tv.tv_sec; 64 value->tv_sec = tv.tv_sec;
64 value->tv_usec = tv.tv_usec; 65 value->tv_usec = tv.tv_usec;
65} 66}
67#endif
66 68
67#undef cputime_to_timeval 69#undef cputime_to_timeval
68#define cputime_to_timeval cputime_to_compat_timeval 70#define cputime_to_timeval cputime_to_compat_timeval
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index dcf26537c935..a52ca5cba015 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -811,7 +811,7 @@ static int compat_ioctl_preallocate(struct file *file,
811 */ 811 */
812#define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff) 812#define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff)
813 813
814#define COMPATIBLE_IOCTL(cmd) XFORM(cmd), 814#define COMPATIBLE_IOCTL(cmd) XFORM((u32)cmd),
815/* ioctl should not be warned about even if it's not implemented. 815/* ioctl should not be warned about even if it's not implemented.
816 Valid reasons to use this: 816 Valid reasons to use this:
817 - It is implemented with ->compat_ioctl on some device, but programs 817 - It is implemented with ->compat_ioctl on some device, but programs
diff --git a/fs/dcache.c b/fs/dcache.c
index 3ed642e0a0c2..807efaab838e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -634,11 +634,16 @@ again:
634 spin_unlock(&parent->d_lock); 634 spin_unlock(&parent->d_lock);
635 goto again; 635 goto again;
636 } 636 }
637 rcu_read_unlock(); 637 if (parent != dentry) {
638 if (parent != dentry)
639 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 638 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
640 else 639 if (unlikely(dentry->d_lockref.count < 0)) {
640 spin_unlock(&parent->d_lock);
641 parent = NULL;
642 }
643 } else {
641 parent = NULL; 644 parent = NULL;
645 }
646 rcu_read_unlock();
642 return parent; 647 return parent;
643} 648}
644 649
@@ -1892,6 +1897,28 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1892 1897
1893EXPORT_SYMBOL(d_instantiate_unique); 1898EXPORT_SYMBOL(d_instantiate_unique);
1894 1899
1900/*
1901 * This should be equivalent to d_instantiate() + unlock_new_inode(),
1902 * with lockdep-related part of unlock_new_inode() done before
1903 * anything else. Use that instead of open-coding d_instantiate()/
1904 * unlock_new_inode() combinations.
1905 */
1906void d_instantiate_new(struct dentry *entry, struct inode *inode)
1907{
1908 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1909 BUG_ON(!inode);
1910 lockdep_annotate_inode_mutex_key(inode);
1911 security_d_instantiate(entry, inode);
1912 spin_lock(&inode->i_lock);
1913 __d_instantiate(entry, inode);
1914 WARN_ON(!(inode->i_state & I_NEW));
1915 inode->i_state &= ~I_NEW;
1916 smp_mb();
1917 wake_up_bit(&inode->i_state, __I_NEW);
1918 spin_unlock(&inode->i_lock);
1919}
1920EXPORT_SYMBOL(d_instantiate_new);
1921
1895/** 1922/**
1896 * d_instantiate_no_diralias - instantiate a non-aliased dentry 1923 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1897 * @entry: dentry to complete 1924 * @entry: dentry to complete
@@ -1927,10 +1954,12 @@ struct dentry *d_make_root(struct inode *root_inode)
1927 static const struct qstr name = QSTR_INIT("/", 1); 1954 static const struct qstr name = QSTR_INIT("/", 1);
1928 1955
1929 res = __d_alloc(root_inode->i_sb, &name); 1956 res = __d_alloc(root_inode->i_sb, &name);
1930 if (res) 1957 if (res) {
1958 res->d_flags |= DCACHE_RCUACCESS;
1931 d_instantiate(res, root_inode); 1959 d_instantiate(res, root_inode);
1932 else 1960 } else {
1933 iput(root_inode); 1961 iput(root_inode);
1962 }
1934 } 1963 }
1935 return res; 1964 return res;
1936} 1965}
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index e2e47ba5d313..844d0c4da84f 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -287,8 +287,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
287 iput(ecryptfs_inode); 287 iput(ecryptfs_inode);
288 goto out; 288 goto out;
289 } 289 }
290 unlock_new_inode(ecryptfs_inode); 290 d_instantiate_new(ecryptfs_dentry, ecryptfs_inode);
291 d_instantiate(ecryptfs_dentry, ecryptfs_inode);
292out: 291out:
293 return rc; 292 return rc;
294} 293}
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index d6aeb84e90b6..d882d873c5a3 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -178,11 +178,8 @@ ext2_get_acl(struct inode *inode, int type)
178 return acl; 178 return acl;
179} 179}
180 180
181/* 181static int
182 * inode->i_mutex: down 182__ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
183 */
184int
185ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
186{ 183{
187 int name_index; 184 int name_index;
188 void *value = NULL; 185 void *value = NULL;
@@ -192,13 +189,6 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
192 switch(type) { 189 switch(type) {
193 case ACL_TYPE_ACCESS: 190 case ACL_TYPE_ACCESS:
194 name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; 191 name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
195 if (acl) {
196 error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
197 if (error)
198 return error;
199 inode->i_ctime = CURRENT_TIME_SEC;
200 mark_inode_dirty(inode);
201 }
202 break; 192 break;
203 193
204 case ACL_TYPE_DEFAULT: 194 case ACL_TYPE_DEFAULT:
@@ -225,6 +215,24 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
225} 215}
226 216
227/* 217/*
218 * inode->i_mutex: down
219 */
220int
221ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
222{
223 int error;
224
225 if (type == ACL_TYPE_ACCESS && acl) {
226 error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
227 if (error)
228 return error;
229 inode->i_ctime = CURRENT_TIME_SEC;
230 mark_inode_dirty(inode);
231 }
232 return __ext2_set_acl(inode, acl, type);
233}
234
235/*
228 * Initialize the ACLs of a new inode. Called from ext2_new_inode. 236 * Initialize the ACLs of a new inode. Called from ext2_new_inode.
229 * 237 *
230 * dir->i_mutex: down 238 * dir->i_mutex: down
@@ -241,12 +249,12 @@ ext2_init_acl(struct inode *inode, struct inode *dir)
241 return error; 249 return error;
242 250
243 if (default_acl) { 251 if (default_acl) {
244 error = ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); 252 error = __ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
245 posix_acl_release(default_acl); 253 posix_acl_release(default_acl);
246 } 254 }
247 if (acl) { 255 if (acl) {
248 if (!error) 256 if (!error)
249 error = ext2_set_acl(inode, acl, ACL_TYPE_ACCESS); 257 error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
250 posix_acl_release(acl); 258 posix_acl_release(acl);
251 } 259 }
252 return error; 260 return error;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 0aa9bf6e6e53..f600c43f0047 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1175,21 +1175,11 @@ do_indirects:
1175 1175
1176static void ext2_truncate_blocks(struct inode *inode, loff_t offset) 1176static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1177{ 1177{
1178 /*
1179 * XXX: it seems like a bug here that we don't allow
1180 * IS_APPEND inode to have blocks-past-i_size trimmed off.
1181 * review and fix this.
1182 *
1183 * Also would be nice to be able to handle IO errors and such,
1184 * but that's probably too much to ask.
1185 */
1186 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1178 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1187 S_ISLNK(inode->i_mode))) 1179 S_ISLNK(inode->i_mode)))
1188 return; 1180 return;
1189 if (ext2_inode_is_fast_symlink(inode)) 1181 if (ext2_inode_is_fast_symlink(inode))
1190 return; 1182 return;
1191 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1192 return;
1193 1183
1194 dax_sem_down_write(EXT2_I(inode)); 1184 dax_sem_down_write(EXT2_I(inode));
1195 __ext2_truncate_blocks(inode, offset); 1185 __ext2_truncate_blocks(inode, offset);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 3267a80dbbe2..da3d40ef1668 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -40,8 +40,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
40{ 40{
41 int err = ext2_add_link(dentry, inode); 41 int err = ext2_add_link(dentry, inode);
42 if (!err) { 42 if (!err) {
43 unlock_new_inode(inode); 43 d_instantiate_new(dentry, inode);
44 d_instantiate(dentry, inode);
45 return 0; 44 return 0;
46 } 45 }
47 inode_dec_link_count(inode); 46 inode_dec_link_count(inode);
@@ -267,8 +266,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
267 if (err) 266 if (err)
268 goto out_fail; 267 goto out_fail;
269 268
270 unlock_new_inode(inode); 269 d_instantiate_new(dentry, inode);
271 d_instantiate(dentry, inode);
272out: 270out:
273 return err; 271 return err;
274 272
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index f97110461c19..e0fb7cdcee89 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -183,7 +183,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
183 unsigned int bit, bit_max; 183 unsigned int bit, bit_max;
184 struct ext4_sb_info *sbi = EXT4_SB(sb); 184 struct ext4_sb_info *sbi = EXT4_SB(sb);
185 ext4_fsblk_t start, tmp; 185 ext4_fsblk_t start, tmp;
186 int flex_bg = 0;
187 struct ext4_group_info *grp; 186 struct ext4_group_info *grp;
188 187
189 J_ASSERT_BH(bh, buffer_locked(bh)); 188 J_ASSERT_BH(bh, buffer_locked(bh));
@@ -216,22 +215,19 @@ static int ext4_init_block_bitmap(struct super_block *sb,
216 215
217 start = ext4_group_first_block_no(sb, block_group); 216 start = ext4_group_first_block_no(sb, block_group);
218 217
219 if (ext4_has_feature_flex_bg(sb))
220 flex_bg = 1;
221
222 /* Set bits for block and inode bitmaps, and inode table */ 218 /* Set bits for block and inode bitmaps, and inode table */
223 tmp = ext4_block_bitmap(sb, gdp); 219 tmp = ext4_block_bitmap(sb, gdp);
224 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 220 if (ext4_block_in_group(sb, tmp, block_group))
225 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 221 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
226 222
227 tmp = ext4_inode_bitmap(sb, gdp); 223 tmp = ext4_inode_bitmap(sb, gdp);
228 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 224 if (ext4_block_in_group(sb, tmp, block_group))
229 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 225 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
230 226
231 tmp = ext4_inode_table(sb, gdp); 227 tmp = ext4_inode_table(sb, gdp);
232 for (; tmp < ext4_inode_table(sb, gdp) + 228 for (; tmp < ext4_inode_table(sb, gdp) +
233 sbi->s_itb_per_group; tmp++) { 229 sbi->s_itb_per_group; tmp++) {
234 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 230 if (ext4_block_in_group(sb, tmp, block_group))
235 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 231 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
236 } 232 }
237 233
@@ -242,8 +238,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
242 */ 238 */
243 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), 239 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
244 sb->s_blocksize * 8, bh->b_data); 240 sb->s_blocksize * 8, bh->b_data);
245 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
246 ext4_group_desc_csum_set(sb, block_group, gdp);
247 return 0; 241 return 0;
248} 242}
249 243
@@ -322,6 +316,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
322 struct ext4_sb_info *sbi = EXT4_SB(sb); 316 struct ext4_sb_info *sbi = EXT4_SB(sb);
323 ext4_grpblk_t offset; 317 ext4_grpblk_t offset;
324 ext4_grpblk_t next_zero_bit; 318 ext4_grpblk_t next_zero_bit;
319 ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
325 ext4_fsblk_t blk; 320 ext4_fsblk_t blk;
326 ext4_fsblk_t group_first_block; 321 ext4_fsblk_t group_first_block;
327 322
@@ -339,20 +334,25 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
339 /* check whether block bitmap block number is set */ 334 /* check whether block bitmap block number is set */
340 blk = ext4_block_bitmap(sb, desc); 335 blk = ext4_block_bitmap(sb, desc);
341 offset = blk - group_first_block; 336 offset = blk - group_first_block;
342 if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 337 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
338 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
343 /* bad block bitmap */ 339 /* bad block bitmap */
344 return blk; 340 return blk;
345 341
346 /* check whether the inode bitmap block number is set */ 342 /* check whether the inode bitmap block number is set */
347 blk = ext4_inode_bitmap(sb, desc); 343 blk = ext4_inode_bitmap(sb, desc);
348 offset = blk - group_first_block; 344 offset = blk - group_first_block;
349 if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 345 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
346 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
350 /* bad block bitmap */ 347 /* bad block bitmap */
351 return blk; 348 return blk;
352 349
353 /* check whether the inode table block number is set */ 350 /* check whether the inode table block number is set */
354 blk = ext4_inode_table(sb, desc); 351 blk = ext4_inode_table(sb, desc);
355 offset = blk - group_first_block; 352 offset = blk - group_first_block;
353 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
354 EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
355 return blk;
356 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, 356 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
357 EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group), 357 EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
358 EXT4_B2C(sbi, offset)); 358 EXT4_B2C(sbi, offset));
@@ -378,6 +378,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
378 return -EFSCORRUPTED; 378 return -EFSCORRUPTED;
379 379
380 ext4_lock_group(sb, block_group); 380 ext4_lock_group(sb, block_group);
381 if (buffer_verified(bh))
382 goto verified;
381 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, 383 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
382 desc, bh))) { 384 desc, bh))) {
383 ext4_unlock_group(sb, block_group); 385 ext4_unlock_group(sb, block_group);
@@ -400,6 +402,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
400 return -EFSCORRUPTED; 402 return -EFSCORRUPTED;
401 } 403 }
402 set_buffer_verified(bh); 404 set_buffer_verified(bh);
405verified:
403 ext4_unlock_group(sb, block_group); 406 ext4_unlock_group(sb, block_group);
404 return 0; 407 return 0;
405} 408}
@@ -418,6 +421,7 @@ struct buffer_head *
418ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) 421ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
419{ 422{
420 struct ext4_group_desc *desc; 423 struct ext4_group_desc *desc;
424 struct ext4_sb_info *sbi = EXT4_SB(sb);
421 struct buffer_head *bh; 425 struct buffer_head *bh;
422 ext4_fsblk_t bitmap_blk; 426 ext4_fsblk_t bitmap_blk;
423 int err; 427 int err;
@@ -426,6 +430,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
426 if (!desc) 430 if (!desc)
427 return ERR_PTR(-EFSCORRUPTED); 431 return ERR_PTR(-EFSCORRUPTED);
428 bitmap_blk = ext4_block_bitmap(sb, desc); 432 bitmap_blk = ext4_block_bitmap(sb, desc);
433 if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
434 (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
435 ext4_error(sb, "Invalid block bitmap block %llu in "
436 "block_group %u", bitmap_blk, block_group);
437 return ERR_PTR(-EFSCORRUPTED);
438 }
429 bh = sb_getblk(sb, bitmap_blk); 439 bh = sb_getblk(sb, bitmap_blk);
430 if (unlikely(!bh)) { 440 if (unlikely(!bh)) {
431 ext4_error(sb, "Cannot get buffer for block bitmap - " 441 ext4_error(sb, "Cannot get buffer for block bitmap - "
@@ -443,10 +453,20 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
443 goto verify; 453 goto verify;
444 } 454 }
445 ext4_lock_group(sb, block_group); 455 ext4_lock_group(sb, block_group);
446 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 456 if (ext4_has_group_desc_csum(sb) &&
457 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
458 if (block_group == 0) {
459 ext4_unlock_group(sb, block_group);
460 unlock_buffer(bh);
461 ext4_error(sb, "Block bitmap for bg 0 marked "
462 "uninitialized");
463 err = -EFSCORRUPTED;
464 goto out;
465 }
447 err = ext4_init_block_bitmap(sb, bh, block_group, desc); 466 err = ext4_init_block_bitmap(sb, bh, block_group, desc);
448 set_bitmap_uptodate(bh); 467 set_bitmap_uptodate(bh);
449 set_buffer_uptodate(bh); 468 set_buffer_uptodate(bh);
469 set_buffer_verified(bh);
450 ext4_unlock_group(sb, block_group); 470 ext4_unlock_group(sb, block_group);
451 unlock_buffer(bh); 471 unlock_buffer(bh);
452 if (err) { 472 if (err) {
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c8ad14c697c4..f5d9f82b173a 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1468,11 +1468,6 @@ static inline struct timespec ext4_current_time(struct inode *inode)
1468static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) 1468static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
1469{ 1469{
1470 return ino == EXT4_ROOT_INO || 1470 return ino == EXT4_ROOT_INO ||
1471 ino == EXT4_USR_QUOTA_INO ||
1472 ino == EXT4_GRP_QUOTA_INO ||
1473 ino == EXT4_BOOT_LOADER_INO ||
1474 ino == EXT4_JOURNAL_INO ||
1475 ino == EXT4_RESIZE_INO ||
1476 (ino >= EXT4_FIRST_INO(sb) && 1471 (ino >= EXT4_FIRST_INO(sb) &&
1477 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); 1472 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
1478} 1473}
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 3c9381547094..2d8e73793512 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -103,6 +103,7 @@ struct ext4_extent_header {
103}; 103};
104 104
105#define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) 105#define EXT4_EXT_MAGIC cpu_to_le16(0xf30a)
106#define EXT4_MAX_EXTENT_DEPTH 5
106 107
107#define EXT4_EXTENT_TAIL_OFFSET(hdr) \ 108#define EXT4_EXTENT_TAIL_OFFSET(hdr) \
108 (sizeof(struct ext4_extent_header) + \ 109 (sizeof(struct ext4_extent_header) + \
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 403c4bae3e18..1708597659a1 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -876,6 +876,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
876 876
877 eh = ext_inode_hdr(inode); 877 eh = ext_inode_hdr(inode);
878 depth = ext_depth(inode); 878 depth = ext_depth(inode);
879 if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
880 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
881 depth);
882 ret = -EFSCORRUPTED;
883 goto err;
884 }
879 885
880 if (path) { 886 if (path) {
881 ext4_ext_drop_refs(path); 887 ext4_ext_drop_refs(path);
@@ -5380,8 +5386,9 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5380 stop = le32_to_cpu(extent->ee_block); 5386 stop = le32_to_cpu(extent->ee_block);
5381 5387
5382 /* 5388 /*
5383 * In case of left shift, Don't start shifting extents until we make 5389 * For left shifts, make sure the hole on the left is big enough to
5384 * sure the hole is big enough to accommodate the shift. 5390 * accommodate the shift. For right shifts, make sure the last extent
5391 * won't be shifted beyond EXT_MAX_BLOCKS.
5385 */ 5392 */
5386 if (SHIFT == SHIFT_LEFT) { 5393 if (SHIFT == SHIFT_LEFT) {
5387 path = ext4_find_extent(inode, start - 1, &path, 5394 path = ext4_find_extent(inode, start - 1, &path,
@@ -5401,9 +5408,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5401 5408
5402 if ((start == ex_start && shift > ex_start) || 5409 if ((start == ex_start && shift > ex_start) ||
5403 (shift > start - ex_end)) { 5410 (shift > start - ex_end)) {
5404 ext4_ext_drop_refs(path); 5411 ret = -EINVAL;
5405 kfree(path); 5412 goto out;
5406 return -EINVAL; 5413 }
5414 } else {
5415 if (shift > EXT_MAX_BLOCKS -
5416 (stop + ext4_ext_get_actual_len(extent))) {
5417 ret = -EINVAL;
5418 goto out;
5407 } 5419 }
5408 } 5420 }
5409 5421
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index a8b1749d79a8..debf0707789d 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -460,7 +460,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
460 int i, num; 460 int i, num;
461 unsigned long nr_pages; 461 unsigned long nr_pages;
462 462
463 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 463 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
464 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 464 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
465 (pgoff_t)num); 465 (pgoff_t)num);
466 if (nr_pages == 0) 466 if (nr_pages == 0)
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 5388207d2832..0963213e9cd3 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -63,44 +63,6 @@ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
63 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); 63 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
64} 64}
65 65
66/* Initializes an uninitialized inode bitmap */
67static int ext4_init_inode_bitmap(struct super_block *sb,
68 struct buffer_head *bh,
69 ext4_group_t block_group,
70 struct ext4_group_desc *gdp)
71{
72 struct ext4_group_info *grp;
73 struct ext4_sb_info *sbi = EXT4_SB(sb);
74 J_ASSERT_BH(bh, buffer_locked(bh));
75
76 /* If checksum is bad mark all blocks and inodes use to prevent
77 * allocation, essentially implementing a per-group read-only flag. */
78 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
79 grp = ext4_get_group_info(sb, block_group);
80 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
81 percpu_counter_sub(&sbi->s_freeclusters_counter,
82 grp->bb_free);
83 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
84 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
85 int count;
86 count = ext4_free_inodes_count(sb, gdp);
87 percpu_counter_sub(&sbi->s_freeinodes_counter,
88 count);
89 }
90 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
91 return -EFSBADCRC;
92 }
93
94 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
95 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
96 bh->b_data);
97 ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
98 EXT4_INODES_PER_GROUP(sb) / 8);
99 ext4_group_desc_csum_set(sb, block_group, gdp);
100
101 return 0;
102}
103
104void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate) 66void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
105{ 67{
106 if (uptodate) { 68 if (uptodate) {
@@ -126,6 +88,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
126 return -EFSCORRUPTED; 88 return -EFSCORRUPTED;
127 89
128 ext4_lock_group(sb, block_group); 90 ext4_lock_group(sb, block_group);
91 if (buffer_verified(bh))
92 goto verified;
129 blk = ext4_inode_bitmap(sb, desc); 93 blk = ext4_inode_bitmap(sb, desc);
130 if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh, 94 if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
131 EXT4_INODES_PER_GROUP(sb) / 8)) { 95 EXT4_INODES_PER_GROUP(sb) / 8)) {
@@ -143,6 +107,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
143 return -EFSBADCRC; 107 return -EFSBADCRC;
144 } 108 }
145 set_buffer_verified(bh); 109 set_buffer_verified(bh);
110verified:
146 ext4_unlock_group(sb, block_group); 111 ext4_unlock_group(sb, block_group);
147 return 0; 112 return 0;
148} 113}
@@ -157,6 +122,7 @@ static struct buffer_head *
157ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) 122ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
158{ 123{
159 struct ext4_group_desc *desc; 124 struct ext4_group_desc *desc;
125 struct ext4_sb_info *sbi = EXT4_SB(sb);
160 struct buffer_head *bh = NULL; 126 struct buffer_head *bh = NULL;
161 ext4_fsblk_t bitmap_blk; 127 ext4_fsblk_t bitmap_blk;
162 int err; 128 int err;
@@ -166,6 +132,12 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
166 return ERR_PTR(-EFSCORRUPTED); 132 return ERR_PTR(-EFSCORRUPTED);
167 133
168 bitmap_blk = ext4_inode_bitmap(sb, desc); 134 bitmap_blk = ext4_inode_bitmap(sb, desc);
135 if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
136 (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
137 ext4_error(sb, "Invalid inode bitmap blk %llu in "
138 "block_group %u", bitmap_blk, block_group);
139 return ERR_PTR(-EFSCORRUPTED);
140 }
169 bh = sb_getblk(sb, bitmap_blk); 141 bh = sb_getblk(sb, bitmap_blk);
170 if (unlikely(!bh)) { 142 if (unlikely(!bh)) {
171 ext4_error(sb, "Cannot read inode bitmap - " 143 ext4_error(sb, "Cannot read inode bitmap - "
@@ -183,18 +155,24 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
183 } 155 }
184 156
185 ext4_lock_group(sb, block_group); 157 ext4_lock_group(sb, block_group);
186 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 158 if (ext4_has_group_desc_csum(sb) &&
187 err = ext4_init_inode_bitmap(sb, bh, block_group, desc); 159 (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
160 if (block_group == 0) {
161 ext4_unlock_group(sb, block_group);
162 unlock_buffer(bh);
163 ext4_error(sb, "Inode bitmap for bg 0 marked "
164 "uninitialized");
165 err = -EFSCORRUPTED;
166 goto out;
167 }
168 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
169 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
170 sb->s_blocksize * 8, bh->b_data);
188 set_bitmap_uptodate(bh); 171 set_bitmap_uptodate(bh);
189 set_buffer_uptodate(bh); 172 set_buffer_uptodate(bh);
190 set_buffer_verified(bh); 173 set_buffer_verified(bh);
191 ext4_unlock_group(sb, block_group); 174 ext4_unlock_group(sb, block_group);
192 unlock_buffer(bh); 175 unlock_buffer(bh);
193 if (err) {
194 ext4_error(sb, "Failed to init inode bitmap for group "
195 "%u: %d", block_group, err);
196 goto out;
197 }
198 return bh; 176 return bh;
199 } 177 }
200 ext4_unlock_group(sb, block_group); 178 ext4_unlock_group(sb, block_group);
@@ -953,7 +931,8 @@ got:
953 931
954 /* recheck and clear flag under lock if we still need to */ 932 /* recheck and clear flag under lock if we still need to */
955 ext4_lock_group(sb, group); 933 ext4_lock_group(sb, group);
956 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 934 if (ext4_has_group_desc_csum(sb) &&
935 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
957 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 936 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
958 ext4_free_group_clusters_set(sb, gdp, 937 ext4_free_group_clusters_set(sb, gdp,
959 ext4_free_clusters_after_init(sb, group, gdp)); 938 ext4_free_clusters_after_init(sb, group, gdp));
@@ -1329,7 +1308,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1329 ext4_itable_unused_count(sb, gdp)), 1308 ext4_itable_unused_count(sb, gdp)),
1330 sbi->s_inodes_per_block); 1309 sbi->s_inodes_per_block);
1331 1310
1332 if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { 1311 if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
1312 ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
1313 ext4_itable_unused_count(sb, gdp)) <
1314 EXT4_FIRST_INO(sb)))) {
1333 ext4_error(sb, "Something is wrong with group %u: " 1315 ext4_error(sb, "Something is wrong with group %u: "
1334 "used itable blocks: %d; " 1316 "used itable blocks: %d; "
1335 "itable unused count: %u", 1317 "itable unused count: %u",
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index dad8e7bdf0a6..c449bc089c94 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -376,7 +376,7 @@ out:
376static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode, 376static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
377 unsigned int len) 377 unsigned int len)
378{ 378{
379 int ret, size; 379 int ret, size, no_expand;
380 struct ext4_inode_info *ei = EXT4_I(inode); 380 struct ext4_inode_info *ei = EXT4_I(inode);
381 381
382 if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) 382 if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
@@ -386,15 +386,14 @@ static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
386 if (size < len) 386 if (size < len)
387 return -ENOSPC; 387 return -ENOSPC;
388 388
389 down_write(&EXT4_I(inode)->xattr_sem); 389 ext4_write_lock_xattr(inode, &no_expand);
390 390
391 if (ei->i_inline_off) 391 if (ei->i_inline_off)
392 ret = ext4_update_inline_data(handle, inode, len); 392 ret = ext4_update_inline_data(handle, inode, len);
393 else 393 else
394 ret = ext4_create_inline_data(handle, inode, len); 394 ret = ext4_create_inline_data(handle, inode, len);
395 395
396 up_write(&EXT4_I(inode)->xattr_sem); 396 ext4_write_unlock_xattr(inode, &no_expand);
397
398 return ret; 397 return ret;
399} 398}
400 399
@@ -435,6 +434,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
435 434
436 memset((void *)ext4_raw_inode(&is.iloc)->i_block, 435 memset((void *)ext4_raw_inode(&is.iloc)->i_block,
437 0, EXT4_MIN_INLINE_DATA_SIZE); 436 0, EXT4_MIN_INLINE_DATA_SIZE);
437 memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE);
438 438
439 if (ext4_has_feature_extents(inode->i_sb)) { 439 if (ext4_has_feature_extents(inode->i_sb)) {
440 if (S_ISDIR(inode->i_mode) || 440 if (S_ISDIR(inode->i_mode) ||
@@ -523,7 +523,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
523 struct inode *inode, 523 struct inode *inode,
524 unsigned flags) 524 unsigned flags)
525{ 525{
526 int ret, needed_blocks; 526 int ret, needed_blocks, no_expand;
527 handle_t *handle = NULL; 527 handle_t *handle = NULL;
528 int retries = 0, sem_held = 0; 528 int retries = 0, sem_held = 0;
529 struct page *page = NULL; 529 struct page *page = NULL;
@@ -563,7 +563,7 @@ retry:
563 goto out; 563 goto out;
564 } 564 }
565 565
566 down_write(&EXT4_I(inode)->xattr_sem); 566 ext4_write_lock_xattr(inode, &no_expand);
567 sem_held = 1; 567 sem_held = 1;
568 /* If some one has already done this for us, just exit. */ 568 /* If some one has already done this for us, just exit. */
569 if (!ext4_has_inline_data(inode)) { 569 if (!ext4_has_inline_data(inode)) {
@@ -599,7 +599,7 @@ retry:
599 page_cache_release(page); 599 page_cache_release(page);
600 page = NULL; 600 page = NULL;
601 ext4_orphan_add(handle, inode); 601 ext4_orphan_add(handle, inode);
602 up_write(&EXT4_I(inode)->xattr_sem); 602 ext4_write_unlock_xattr(inode, &no_expand);
603 sem_held = 0; 603 sem_held = 0;
604 ext4_journal_stop(handle); 604 ext4_journal_stop(handle);
605 handle = NULL; 605 handle = NULL;
@@ -625,7 +625,7 @@ out:
625 page_cache_release(page); 625 page_cache_release(page);
626 } 626 }
627 if (sem_held) 627 if (sem_held)
628 up_write(&EXT4_I(inode)->xattr_sem); 628 ext4_write_unlock_xattr(inode, &no_expand);
629 if (handle) 629 if (handle)
630 ext4_journal_stop(handle); 630 ext4_journal_stop(handle);
631 brelse(iloc.bh); 631 brelse(iloc.bh);
@@ -678,6 +678,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
678 goto convert; 678 goto convert;
679 } 679 }
680 680
681 ret = ext4_journal_get_write_access(handle, iloc.bh);
682 if (ret)
683 goto out;
684
681 flags |= AOP_FLAG_NOFS; 685 flags |= AOP_FLAG_NOFS;
682 686
683 page = grab_cache_page_write_begin(mapping, 0, flags); 687 page = grab_cache_page_write_begin(mapping, 0, flags);
@@ -706,7 +710,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
706out_up_read: 710out_up_read:
707 up_read(&EXT4_I(inode)->xattr_sem); 711 up_read(&EXT4_I(inode)->xattr_sem);
708out: 712out:
709 if (handle) 713 if (handle && (ret != 1))
710 ext4_journal_stop(handle); 714 ext4_journal_stop(handle);
711 brelse(iloc.bh); 715 brelse(iloc.bh);
712 return ret; 716 return ret;
@@ -718,7 +722,7 @@ convert:
718int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, 722int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
719 unsigned copied, struct page *page) 723 unsigned copied, struct page *page)
720{ 724{
721 int ret; 725 int ret, no_expand;
722 void *kaddr; 726 void *kaddr;
723 struct ext4_iloc iloc; 727 struct ext4_iloc iloc;
724 728
@@ -736,7 +740,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
736 goto out; 740 goto out;
737 } 741 }
738 742
739 down_write(&EXT4_I(inode)->xattr_sem); 743 ext4_write_lock_xattr(inode, &no_expand);
740 BUG_ON(!ext4_has_inline_data(inode)); 744 BUG_ON(!ext4_has_inline_data(inode));
741 745
742 kaddr = kmap_atomic(page); 746 kaddr = kmap_atomic(page);
@@ -746,8 +750,9 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
746 /* clear page dirty so that writepages wouldn't work for us. */ 750 /* clear page dirty so that writepages wouldn't work for us. */
747 ClearPageDirty(page); 751 ClearPageDirty(page);
748 752
749 up_write(&EXT4_I(inode)->xattr_sem); 753 ext4_write_unlock_xattr(inode, &no_expand);
750 brelse(iloc.bh); 754 brelse(iloc.bh);
755 mark_inode_dirty(inode);
751out: 756out:
752 return copied; 757 return copied;
753} 758}
@@ -757,7 +762,7 @@ ext4_journalled_write_inline_data(struct inode *inode,
757 unsigned len, 762 unsigned len,
758 struct page *page) 763 struct page *page)
759{ 764{
760 int ret; 765 int ret, no_expand;
761 void *kaddr; 766 void *kaddr;
762 struct ext4_iloc iloc; 767 struct ext4_iloc iloc;
763 768
@@ -767,11 +772,11 @@ ext4_journalled_write_inline_data(struct inode *inode,
767 return NULL; 772 return NULL;
768 } 773 }
769 774
770 down_write(&EXT4_I(inode)->xattr_sem); 775 ext4_write_lock_xattr(inode, &no_expand);
771 kaddr = kmap_atomic(page); 776 kaddr = kmap_atomic(page);
772 ext4_write_inline_data(inode, &iloc, kaddr, 0, len); 777 ext4_write_inline_data(inode, &iloc, kaddr, 0, len);
773 kunmap_atomic(kaddr); 778 kunmap_atomic(kaddr);
774 up_write(&EXT4_I(inode)->xattr_sem); 779 ext4_write_unlock_xattr(inode, &no_expand);
775 780
776 return iloc.bh; 781 return iloc.bh;
777} 782}
@@ -894,7 +899,6 @@ retry_journal:
894 goto out; 899 goto out;
895 } 900 }
896 901
897
898 page = grab_cache_page_write_begin(mapping, 0, flags); 902 page = grab_cache_page_write_begin(mapping, 0, flags);
899 if (!page) { 903 if (!page) {
900 ret = -ENOMEM; 904 ret = -ENOMEM;
@@ -912,6 +916,9 @@ retry_journal:
912 if (ret < 0) 916 if (ret < 0)
913 goto out_release_page; 917 goto out_release_page;
914 } 918 }
919 ret = ext4_journal_get_write_access(handle, iloc.bh);
920 if (ret)
921 goto out_release_page;
915 922
916 up_read(&EXT4_I(inode)->xattr_sem); 923 up_read(&EXT4_I(inode)->xattr_sem);
917 *pagep = page; 924 *pagep = page;
@@ -932,7 +939,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
932 unsigned len, unsigned copied, 939 unsigned len, unsigned copied,
933 struct page *page) 940 struct page *page)
934{ 941{
935 int i_size_changed = 0;
936 int ret; 942 int ret;
937 943
938 ret = ext4_write_inline_data_end(inode, pos, len, copied, page); 944 ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
@@ -950,10 +956,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
950 * But it's important to update i_size while still holding page lock: 956 * But it's important to update i_size while still holding page lock:
951 * page writeout could otherwise come in and zero beyond i_size. 957 * page writeout could otherwise come in and zero beyond i_size.
952 */ 958 */
953 if (pos+copied > inode->i_size) { 959 if (pos+copied > inode->i_size)
954 i_size_write(inode, pos+copied); 960 i_size_write(inode, pos+copied);
955 i_size_changed = 1;
956 }
957 unlock_page(page); 961 unlock_page(page);
958 page_cache_release(page); 962 page_cache_release(page);
959 963
@@ -963,8 +967,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
963 * ordering of page lock and transaction start for journaling 967 * ordering of page lock and transaction start for journaling
964 * filesystems. 968 * filesystems.
965 */ 969 */
966 if (i_size_changed) 970 mark_inode_dirty(inode);
967 mark_inode_dirty(inode);
968 971
969 return copied; 972 return copied;
970} 973}
@@ -1255,7 +1258,7 @@ out:
1255int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname, 1258int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
1256 struct dentry *dentry, struct inode *inode) 1259 struct dentry *dentry, struct inode *inode)
1257{ 1260{
1258 int ret, inline_size; 1261 int ret, inline_size, no_expand;
1259 void *inline_start; 1262 void *inline_start;
1260 struct ext4_iloc iloc; 1263 struct ext4_iloc iloc;
1261 struct inode *dir = d_inode(dentry->d_parent); 1264 struct inode *dir = d_inode(dentry->d_parent);
@@ -1264,7 +1267,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
1264 if (ret) 1267 if (ret)
1265 return ret; 1268 return ret;
1266 1269
1267 down_write(&EXT4_I(dir)->xattr_sem); 1270 ext4_write_lock_xattr(dir, &no_expand);
1268 if (!ext4_has_inline_data(dir)) 1271 if (!ext4_has_inline_data(dir))
1269 goto out; 1272 goto out;
1270 1273
@@ -1310,7 +1313,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
1310 1313
1311out: 1314out:
1312 ext4_mark_inode_dirty(handle, dir); 1315 ext4_mark_inode_dirty(handle, dir);
1313 up_write(&EXT4_I(dir)->xattr_sem); 1316 ext4_write_unlock_xattr(dir, &no_expand);
1314 brelse(iloc.bh); 1317 brelse(iloc.bh);
1315 return ret; 1318 return ret;
1316} 1319}
@@ -1670,7 +1673,7 @@ int ext4_delete_inline_entry(handle_t *handle,
1670 struct buffer_head *bh, 1673 struct buffer_head *bh,
1671 int *has_inline_data) 1674 int *has_inline_data)
1672{ 1675{
1673 int err, inline_size; 1676 int err, inline_size, no_expand;
1674 struct ext4_iloc iloc; 1677 struct ext4_iloc iloc;
1675 void *inline_start; 1678 void *inline_start;
1676 1679
@@ -1678,7 +1681,7 @@ int ext4_delete_inline_entry(handle_t *handle,
1678 if (err) 1681 if (err)
1679 return err; 1682 return err;
1680 1683
1681 down_write(&EXT4_I(dir)->xattr_sem); 1684 ext4_write_lock_xattr(dir, &no_expand);
1682 if (!ext4_has_inline_data(dir)) { 1685 if (!ext4_has_inline_data(dir)) {
1683 *has_inline_data = 0; 1686 *has_inline_data = 0;
1684 goto out; 1687 goto out;
@@ -1713,7 +1716,7 @@ int ext4_delete_inline_entry(handle_t *handle,
1713 1716
1714 ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size); 1717 ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size);
1715out: 1718out:
1716 up_write(&EXT4_I(dir)->xattr_sem); 1719 ext4_write_unlock_xattr(dir, &no_expand);
1717 brelse(iloc.bh); 1720 brelse(iloc.bh);
1718 if (err != -ENOENT) 1721 if (err != -ENOENT)
1719 ext4_std_error(dir->i_sb, err); 1722 ext4_std_error(dir->i_sb, err);
@@ -1812,11 +1815,11 @@ out:
1812 1815
1813int ext4_destroy_inline_data(handle_t *handle, struct inode *inode) 1816int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
1814{ 1817{
1815 int ret; 1818 int ret, no_expand;
1816 1819
1817 down_write(&EXT4_I(inode)->xattr_sem); 1820 ext4_write_lock_xattr(inode, &no_expand);
1818 ret = ext4_destroy_inline_data_nolock(handle, inode); 1821 ret = ext4_destroy_inline_data_nolock(handle, inode);
1819 up_write(&EXT4_I(inode)->xattr_sem); 1822 ext4_write_unlock_xattr(inode, &no_expand);
1820 1823
1821 return ret; 1824 return ret;
1822} 1825}
@@ -1901,7 +1904,7 @@ out:
1901void ext4_inline_data_truncate(struct inode *inode, int *has_inline) 1904void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
1902{ 1905{
1903 handle_t *handle; 1906 handle_t *handle;
1904 int inline_size, value_len, needed_blocks; 1907 int inline_size, value_len, needed_blocks, no_expand;
1905 size_t i_size; 1908 size_t i_size;
1906 void *value = NULL; 1909 void *value = NULL;
1907 struct ext4_xattr_ibody_find is = { 1910 struct ext4_xattr_ibody_find is = {
@@ -1918,7 +1921,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
1918 if (IS_ERR(handle)) 1921 if (IS_ERR(handle))
1919 return; 1922 return;
1920 1923
1921 down_write(&EXT4_I(inode)->xattr_sem); 1924 ext4_write_lock_xattr(inode, &no_expand);
1922 if (!ext4_has_inline_data(inode)) { 1925 if (!ext4_has_inline_data(inode)) {
1923 *has_inline = 0; 1926 *has_inline = 0;
1924 ext4_journal_stop(handle); 1927 ext4_journal_stop(handle);
@@ -1976,7 +1979,7 @@ out_error:
1976 up_write(&EXT4_I(inode)->i_data_sem); 1979 up_write(&EXT4_I(inode)->i_data_sem);
1977out: 1980out:
1978 brelse(is.iloc.bh); 1981 brelse(is.iloc.bh);
1979 up_write(&EXT4_I(inode)->xattr_sem); 1982 ext4_write_unlock_xattr(inode, &no_expand);
1980 kfree(value); 1983 kfree(value);
1981 if (inode->i_nlink) 1984 if (inode->i_nlink)
1982 ext4_orphan_del(handle, inode); 1985 ext4_orphan_del(handle, inode);
@@ -1992,7 +1995,7 @@ out:
1992 1995
1993int ext4_convert_inline_data(struct inode *inode) 1996int ext4_convert_inline_data(struct inode *inode)
1994{ 1997{
1995 int error, needed_blocks; 1998 int error, needed_blocks, no_expand;
1996 handle_t *handle; 1999 handle_t *handle;
1997 struct ext4_iloc iloc; 2000 struct ext4_iloc iloc;
1998 2001
@@ -2014,15 +2017,10 @@ int ext4_convert_inline_data(struct inode *inode)
2014 goto out_free; 2017 goto out_free;
2015 } 2018 }
2016 2019
2017 down_write(&EXT4_I(inode)->xattr_sem); 2020 ext4_write_lock_xattr(inode, &no_expand);
2018 if (!ext4_has_inline_data(inode)) { 2021 if (ext4_has_inline_data(inode))
2019 up_write(&EXT4_I(inode)->xattr_sem); 2022 error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
2020 goto out; 2023 ext4_write_unlock_xattr(inode, &no_expand);
2021 }
2022
2023 error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
2024 up_write(&EXT4_I(inode)->xattr_sem);
2025out:
2026 ext4_journal_stop(handle); 2024 ext4_journal_stop(handle);
2027out_free: 2025out_free:
2028 brelse(iloc.bh); 2026 brelse(iloc.bh);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4df1cb19a243..181db3c7f5d1 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -380,9 +380,9 @@ static int __check_block_validity(struct inode *inode, const char *func,
380 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 380 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
381 map->m_len)) { 381 map->m_len)) {
382 ext4_error_inode(inode, func, line, map->m_pblk, 382 ext4_error_inode(inode, func, line, map->m_pblk,
383 "lblock %lu mapped to illegal pblock " 383 "lblock %lu mapped to illegal pblock %llu "
384 "(length %d)", (unsigned long) map->m_lblk, 384 "(length %d)", (unsigned long) map->m_lblk,
385 map->m_len); 385 map->m_pblk, map->m_len);
386 return -EFSCORRUPTED; 386 return -EFSCORRUPTED;
387 } 387 }
388 return 0; 388 return 0;
@@ -1164,9 +1164,10 @@ static int ext4_write_end(struct file *file,
1164 loff_t old_size = inode->i_size; 1164 loff_t old_size = inode->i_size;
1165 int ret = 0, ret2; 1165 int ret = 0, ret2;
1166 int i_size_changed = 0; 1166 int i_size_changed = 0;
1167 int inline_data = ext4_has_inline_data(inode);
1167 1168
1168 trace_ext4_write_end(inode, pos, len, copied); 1169 trace_ext4_write_end(inode, pos, len, copied);
1169 if (ext4_has_inline_data(inode)) { 1170 if (inline_data) {
1170 ret = ext4_write_inline_data_end(inode, pos, len, 1171 ret = ext4_write_inline_data_end(inode, pos, len,
1171 copied, page); 1172 copied, page);
1172 if (ret < 0) { 1173 if (ret < 0) {
@@ -1194,7 +1195,7 @@ static int ext4_write_end(struct file *file,
1194 * ordering of page lock and transaction start for journaling 1195 * ordering of page lock and transaction start for journaling
1195 * filesystems. 1196 * filesystems.
1196 */ 1197 */
1197 if (i_size_changed) 1198 if (i_size_changed || inline_data)
1198 ext4_mark_inode_dirty(handle, inode); 1199 ext4_mark_inode_dirty(handle, inode);
1199 1200
1200 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1201 if (pos + len > inode->i_size && ext4_can_truncate(inode))
@@ -1268,6 +1269,7 @@ static int ext4_journalled_write_end(struct file *file,
1268 int partial = 0; 1269 int partial = 0;
1269 unsigned from, to; 1270 unsigned from, to;
1270 int size_changed = 0; 1271 int size_changed = 0;
1272 int inline_data = ext4_has_inline_data(inode);
1271 1273
1272 trace_ext4_journalled_write_end(inode, pos, len, copied); 1274 trace_ext4_journalled_write_end(inode, pos, len, copied);
1273 from = pos & (PAGE_CACHE_SIZE - 1); 1275 from = pos & (PAGE_CACHE_SIZE - 1);
@@ -1275,7 +1277,7 @@ static int ext4_journalled_write_end(struct file *file,
1275 1277
1276 BUG_ON(!ext4_handle_valid(handle)); 1278 BUG_ON(!ext4_handle_valid(handle));
1277 1279
1278 if (ext4_has_inline_data(inode)) { 1280 if (inline_data) {
1279 ret = ext4_write_inline_data_end(inode, pos, len, 1281 ret = ext4_write_inline_data_end(inode, pos, len,
1280 copied, page); 1282 copied, page);
1281 if (ret < 0) { 1283 if (ret < 0) {
@@ -1306,7 +1308,7 @@ static int ext4_journalled_write_end(struct file *file,
1306 if (old_size < pos) 1308 if (old_size < pos)
1307 pagecache_isize_extended(inode, old_size, pos); 1309 pagecache_isize_extended(inode, old_size, pos);
1308 1310
1309 if (size_changed) { 1311 if (size_changed || inline_data) {
1310 ret2 = ext4_mark_inode_dirty(handle, inode); 1312 ret2 = ext4_mark_inode_dirty(handle, inode);
1311 if (!ret) 1313 if (!ret)
1312 ret = ret2; 1314 ret = ret2;
@@ -1515,6 +1517,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1515 BUG_ON(!PageLocked(page)); 1517 BUG_ON(!PageLocked(page));
1516 BUG_ON(PageWriteback(page)); 1518 BUG_ON(PageWriteback(page));
1517 if (invalidate) { 1519 if (invalidate) {
1520 if (page_mapped(page))
1521 clear_page_dirty_for_io(page);
1518 block_invalidatepage(page, 0, PAGE_CACHE_SIZE); 1522 block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
1519 ClearPageUptodate(page); 1523 ClearPageUptodate(page);
1520 } 1524 }
@@ -1802,11 +1806,7 @@ static int __ext4_journalled_writepage(struct page *page,
1802 } 1806 }
1803 1807
1804 if (inline_data) { 1808 if (inline_data) {
1805 BUFFER_TRACE(inode_bh, "get write access"); 1809 ret = ext4_mark_inode_dirty(handle, inode);
1806 ret = ext4_journal_get_write_access(handle, inode_bh);
1807
1808 err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
1809
1810 } else { 1810 } else {
1811 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 1811 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1812 do_journal_get_write_access); 1812 do_journal_get_write_access);
@@ -3256,29 +3256,29 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
3256 * case, we allocate an io_end structure to hook to the iocb. 3256 * case, we allocate an io_end structure to hook to the iocb.
3257 */ 3257 */
3258 iocb->private = NULL; 3258 iocb->private = NULL;
3259 ext4_inode_aio_set(inode, NULL);
3260 if (!is_sync_kiocb(iocb)) {
3261 io_end = ext4_init_io_end(inode, GFP_NOFS);
3262 if (!io_end) {
3263 ret = -ENOMEM;
3264 goto retake_lock;
3265 }
3266 /*
3267 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
3268 */
3269 iocb->private = ext4_get_io_end(io_end);
3270 /*
3271 * we save the io structure for current async direct
3272 * IO, so that later ext4_map_blocks() could flag the
3273 * io structure whether there is a unwritten extents
3274 * needs to be converted when IO is completed.
3275 */
3276 ext4_inode_aio_set(inode, io_end);
3277 }
3278
3279 if (overwrite) { 3259 if (overwrite) {
3280 get_block_func = ext4_get_block_write_nolock; 3260 get_block_func = ext4_get_block_write_nolock;
3281 } else { 3261 } else {
3262 ext4_inode_aio_set(inode, NULL);
3263 if (!is_sync_kiocb(iocb)) {
3264 io_end = ext4_init_io_end(inode, GFP_NOFS);
3265 if (!io_end) {
3266 ret = -ENOMEM;
3267 goto retake_lock;
3268 }
3269 /*
3270 * Grab reference for DIO. Will be dropped in
3271 * ext4_end_io_dio()
3272 */
3273 iocb->private = ext4_get_io_end(io_end);
3274 /*
3275 * we save the io structure for current async direct
3276 * IO, so that later ext4_map_blocks() could flag the
3277 * io structure whether there is a unwritten extents
3278 * needs to be converted when IO is completed.
3279 */
3280 ext4_inode_aio_set(inode, io_end);
3281 }
3282 get_block_func = ext4_get_block_write; 3282 get_block_func = ext4_get_block_write;
3283 dio_flags = DIO_LOCKING; 3283 dio_flags = DIO_LOCKING;
3284 } 3284 }
@@ -3785,28 +3785,28 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3785 EXT4_BLOCK_SIZE_BITS(sb); 3785 EXT4_BLOCK_SIZE_BITS(sb);
3786 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 3786 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
3787 3787
3788 /* If there are no blocks to remove, return now */ 3788 /* If there are blocks to remove, do it */
3789 if (first_block >= stop_block) 3789 if (stop_block > first_block) {
3790 goto out_stop;
3791 3790
3792 down_write(&EXT4_I(inode)->i_data_sem); 3791 down_write(&EXT4_I(inode)->i_data_sem);
3793 ext4_discard_preallocations(inode); 3792 ext4_discard_preallocations(inode);
3794 3793
3795 ret = ext4_es_remove_extent(inode, first_block, 3794 ret = ext4_es_remove_extent(inode, first_block,
3796 stop_block - first_block); 3795 stop_block - first_block);
3797 if (ret) { 3796 if (ret) {
3798 up_write(&EXT4_I(inode)->i_data_sem); 3797 up_write(&EXT4_I(inode)->i_data_sem);
3799 goto out_stop; 3798 goto out_stop;
3800 } 3799 }
3801 3800
3802 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3801 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3803 ret = ext4_ext_remove_space(inode, first_block, 3802 ret = ext4_ext_remove_space(inode, first_block,
3804 stop_block - 1); 3803 stop_block - 1);
3805 else 3804 else
3806 ret = ext4_ind_remove_space(handle, inode, first_block, 3805 ret = ext4_ind_remove_space(handle, inode, first_block,
3807 stop_block); 3806 stop_block);
3808 3807
3809 up_write(&EXT4_I(inode)->i_data_sem); 3808 up_write(&EXT4_I(inode)->i_data_sem);
3809 }
3810 if (IS_SYNC(inode)) 3810 if (IS_SYNC(inode))
3811 ext4_handle_sync(handle); 3811 ext4_handle_sync(handle);
3812 3812
@@ -3989,7 +3989,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
3989 int inodes_per_block, inode_offset; 3989 int inodes_per_block, inode_offset;
3990 3990
3991 iloc->bh = NULL; 3991 iloc->bh = NULL;
3992 if (!ext4_valid_inum(sb, inode->i_ino)) 3992 if (inode->i_ino < EXT4_ROOT_INO ||
3993 inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
3993 return -EFSCORRUPTED; 3994 return -EFSCORRUPTED;
3994 3995
3995 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3996 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
@@ -4231,6 +4232,12 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4231 goto bad_inode; 4232 goto bad_inode;
4232 raw_inode = ext4_raw_inode(&iloc); 4233 raw_inode = ext4_raw_inode(&iloc);
4233 4234
4235 if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
4236 EXT4_ERROR_INODE(inode, "root inode unallocated");
4237 ret = -EFSCORRUPTED;
4238 goto bad_inode;
4239 }
4240
4234 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4241 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4235 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4242 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4236 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4243 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
@@ -4417,6 +4424,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4417 inode->i_op = &ext4_symlink_inode_operations; 4424 inode->i_op = &ext4_symlink_inode_operations;
4418 ext4_set_aops(inode); 4425 ext4_set_aops(inode);
4419 } 4426 }
4427 inode_nohighmem(inode);
4420 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4428 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4421 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4429 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4422 inode->i_op = &ext4_special_inode_operations; 4430 inode->i_op = &ext4_special_inode_operations;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 1ba82dc5afa3..75f79ff29ce0 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2445,7 +2445,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2445 * initialize bb_free to be able to skip 2445 * initialize bb_free to be able to skip
2446 * empty groups without initialization 2446 * empty groups without initialization
2447 */ 2447 */
2448 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2448 if (ext4_has_group_desc_csum(sb) &&
2449 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
2449 meta_group_info[i]->bb_free = 2450 meta_group_info[i]->bb_free =
2450 ext4_free_clusters_after_init(sb, group, desc); 2451 ext4_free_clusters_after_init(sb, group, desc);
2451 } else { 2452 } else {
@@ -2966,7 +2967,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2966#endif 2967#endif
2967 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 2968 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2968 ac->ac_b_ex.fe_len); 2969 ac->ac_b_ex.fe_len);
2969 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2970 if (ext4_has_group_desc_csum(sb) &&
2971 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
2970 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 2972 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2971 ext4_free_group_clusters_set(sb, gdp, 2973 ext4_free_group_clusters_set(sb, gdp,
2972 ext4_free_clusters_after_init(sb, 2974 ext4_free_clusters_after_init(sb,
@@ -3874,7 +3876,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
3874 3876
3875 err = ext4_mb_load_buddy(sb, group, &e4b); 3877 err = ext4_mb_load_buddy(sb, group, &e4b);
3876 if (err) { 3878 if (err) {
3877 ext4_error(sb, "Error loading buddy information for %u", group); 3879 ext4_warning(sb, "Error %d loading buddy information for %u",
3880 err, group);
3878 put_bh(bitmap_bh); 3881 put_bh(bitmap_bh);
3879 return 0; 3882 return 0;
3880 } 3883 }
@@ -4031,10 +4034,11 @@ repeat:
4031 BUG_ON(pa->pa_type != MB_INODE_PA); 4034 BUG_ON(pa->pa_type != MB_INODE_PA);
4032 group = ext4_get_group_number(sb, pa->pa_pstart); 4035 group = ext4_get_group_number(sb, pa->pa_pstart);
4033 4036
4034 err = ext4_mb_load_buddy(sb, group, &e4b); 4037 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4038 GFP_NOFS|__GFP_NOFAIL);
4035 if (err) { 4039 if (err) {
4036 ext4_error(sb, "Error loading buddy information for %u", 4040 ext4_error(sb, "Error %d loading buddy information for %u",
4037 group); 4041 err, group);
4038 continue; 4042 continue;
4039 } 4043 }
4040 4044
@@ -4290,11 +4294,14 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
4290 spin_unlock(&lg->lg_prealloc_lock); 4294 spin_unlock(&lg->lg_prealloc_lock);
4291 4295
4292 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 4296 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4297 int err;
4293 4298
4294 group = ext4_get_group_number(sb, pa->pa_pstart); 4299 group = ext4_get_group_number(sb, pa->pa_pstart);
4295 if (ext4_mb_load_buddy(sb, group, &e4b)) { 4300 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4296 ext4_error(sb, "Error loading buddy information for %u", 4301 GFP_NOFS|__GFP_NOFAIL);
4297 group); 4302 if (err) {
4303 ext4_error(sb, "Error %d loading buddy information for %u",
4304 err, group);
4298 continue; 4305 continue;
4299 } 4306 }
4300 ext4_lock_group(sb, group); 4307 ext4_lock_group(sb, group);
@@ -5116,8 +5123,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
5116 5123
5117 ret = ext4_mb_load_buddy(sb, group, &e4b); 5124 ret = ext4_mb_load_buddy(sb, group, &e4b);
5118 if (ret) { 5125 if (ret) {
5119 ext4_error(sb, "Error in loading buddy " 5126 ext4_warning(sb, "Error %d loading buddy information for %u",
5120 "information for %u", group); 5127 ret, group);
5121 return ret; 5128 return ret;
5122 } 5129 }
5123 bitmap = e4b.bd_bitmap; 5130 bitmap = e4b.bd_bitmap;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 4c36dca486cc..97472088d65a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2429,8 +2429,7 @@ static int ext4_add_nondir(handle_t *handle,
2429 int err = ext4_add_entry(handle, dentry, inode); 2429 int err = ext4_add_entry(handle, dentry, inode);
2430 if (!err) { 2430 if (!err) {
2431 ext4_mark_inode_dirty(handle, inode); 2431 ext4_mark_inode_dirty(handle, inode);
2432 unlock_new_inode(inode); 2432 d_instantiate_new(dentry, inode);
2433 d_instantiate(dentry, inode);
2434 return 0; 2433 return 0;
2435 } 2434 }
2436 drop_nlink(inode); 2435 drop_nlink(inode);
@@ -2669,8 +2668,7 @@ out_clear_inode:
2669 err = ext4_mark_inode_dirty(handle, dir); 2668 err = ext4_mark_inode_dirty(handle, dir);
2670 if (err) 2669 if (err)
2671 goto out_clear_inode; 2670 goto out_clear_inode;
2672 unlock_new_inode(inode); 2671 d_instantiate_new(dentry, inode);
2673 d_instantiate(dentry, inode);
2674 if (IS_DIRSYNC(dir)) 2672 if (IS_DIRSYNC(dir))
2675 ext4_handle_sync(handle); 2673 ext4_handle_sync(handle);
2676 2674
@@ -3151,6 +3149,7 @@ static int ext4_symlink(struct inode *dir,
3151 if ((disk_link.len > EXT4_N_BLOCKS * 4)) { 3149 if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
3152 if (!encryption_required) 3150 if (!encryption_required)
3153 inode->i_op = &ext4_symlink_inode_operations; 3151 inode->i_op = &ext4_symlink_inode_operations;
3152 inode_nohighmem(inode);
3154 ext4_set_aops(inode); 3153 ext4_set_aops(inode);
3155 /* 3154 /*
3156 * We cannot call page_symlink() with transaction started 3155 * We cannot call page_symlink() with transaction started
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 74516efd874c..d2421fd38833 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1903,7 +1903,7 @@ retry:
1903 return 0; 1903 return 0;
1904 1904
1905 n_group = ext4_get_group_number(sb, n_blocks_count - 1); 1905 n_group = ext4_get_group_number(sb, n_blocks_count - 1);
1906 if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { 1906 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1907 ext4_warning(sb, "resize would cause inodes_count overflow"); 1907 ext4_warning(sb, "resize would cause inodes_count overflow");
1908 return -EINVAL; 1908 return -EINVAL;
1909 } 1909 }
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 8bdb0cc2722f..8d18f6142da5 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -688,6 +688,7 @@ __acquires(bitlock)
688 } 688 }
689 689
690 ext4_unlock_group(sb, grp); 690 ext4_unlock_group(sb, grp);
691 ext4_commit_super(sb, 1);
691 ext4_handle_error(sb); 692 ext4_handle_error(sb);
692 /* 693 /*
693 * We only get here in the ERRORS_RO case; relocking the group 694 * We only get here in the ERRORS_RO case; relocking the group
@@ -2101,6 +2102,7 @@ static int ext4_check_descriptors(struct super_block *sb,
2101 struct ext4_sb_info *sbi = EXT4_SB(sb); 2102 struct ext4_sb_info *sbi = EXT4_SB(sb);
2102 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 2103 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
2103 ext4_fsblk_t last_block; 2104 ext4_fsblk_t last_block;
2105 ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
2104 ext4_fsblk_t block_bitmap; 2106 ext4_fsblk_t block_bitmap;
2105 ext4_fsblk_t inode_bitmap; 2107 ext4_fsblk_t inode_bitmap;
2106 ext4_fsblk_t inode_table; 2108 ext4_fsblk_t inode_table;
@@ -2130,6 +2132,16 @@ static int ext4_check_descriptors(struct super_block *sb,
2130 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 2132 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2131 "Block bitmap for group %u overlaps " 2133 "Block bitmap for group %u overlaps "
2132 "superblock", i); 2134 "superblock", i);
2135 if (!(sb->s_flags & MS_RDONLY))
2136 return 0;
2137 }
2138 if (block_bitmap >= sb_block + 1 &&
2139 block_bitmap <= last_bg_block) {
2140 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2141 "Block bitmap for group %u overlaps "
2142 "block group descriptors", i);
2143 if (!(sb->s_flags & MS_RDONLY))
2144 return 0;
2133 } 2145 }
2134 if (block_bitmap < first_block || block_bitmap > last_block) { 2146 if (block_bitmap < first_block || block_bitmap > last_block) {
2135 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 2147 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -2142,6 +2154,16 @@ static int ext4_check_descriptors(struct super_block *sb,
2142 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 2154 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2143 "Inode bitmap for group %u overlaps " 2155 "Inode bitmap for group %u overlaps "
2144 "superblock", i); 2156 "superblock", i);
2157 if (!(sb->s_flags & MS_RDONLY))
2158 return 0;
2159 }
2160 if (inode_bitmap >= sb_block + 1 &&
2161 inode_bitmap <= last_bg_block) {
2162 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2163 "Inode bitmap for group %u overlaps "
2164 "block group descriptors", i);
2165 if (!(sb->s_flags & MS_RDONLY))
2166 return 0;
2145 } 2167 }
2146 if (inode_bitmap < first_block || inode_bitmap > last_block) { 2168 if (inode_bitmap < first_block || inode_bitmap > last_block) {
2147 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 2169 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -2154,6 +2176,16 @@ static int ext4_check_descriptors(struct super_block *sb,
2154 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 2176 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2155 "Inode table for group %u overlaps " 2177 "Inode table for group %u overlaps "
2156 "superblock", i); 2178 "superblock", i);
2179 if (!(sb->s_flags & MS_RDONLY))
2180 return 0;
2181 }
2182 if (inode_table >= sb_block + 1 &&
2183 inode_table <= last_bg_block) {
2184 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2185 "Inode table for group %u overlaps "
2186 "block group descriptors", i);
2187 if (!(sb->s_flags & MS_RDONLY))
2188 return 0;
2157 } 2189 }
2158 if (inode_table < first_block || 2190 if (inode_table < first_block ||
2159 inode_table + sbi->s_itb_per_group - 1 > last_block) { 2191 inode_table + sbi->s_itb_per_group - 1 > last_block) {
@@ -2835,6 +2867,9 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
2835 ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; 2867 ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
2836 struct ext4_group_desc *gdp = NULL; 2868 struct ext4_group_desc *gdp = NULL;
2837 2869
2870 if (!ext4_has_group_desc_csum(sb))
2871 return ngroups;
2872
2838 for (group = 0; group < ngroups; group++) { 2873 for (group = 0; group < ngroups; group++) {
2839 gdp = ext4_get_group_desc(sb, group, NULL); 2874 gdp = ext4_get_group_desc(sb, group, NULL);
2840 if (!gdp) 2875 if (!gdp)
@@ -3444,6 +3479,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3444 le32_to_cpu(es->s_log_block_size)); 3479 le32_to_cpu(es->s_log_block_size));
3445 goto failed_mount; 3480 goto failed_mount;
3446 } 3481 }
3482 if (le32_to_cpu(es->s_log_cluster_size) >
3483 (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
3484 ext4_msg(sb, KERN_ERR,
3485 "Invalid log cluster size: %u",
3486 le32_to_cpu(es->s_log_cluster_size));
3487 goto failed_mount;
3488 }
3447 3489
3448 if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { 3490 if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
3449 ext4_msg(sb, KERN_ERR, 3491 ext4_msg(sb, KERN_ERR,
@@ -3508,6 +3550,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3508 } else { 3550 } else {
3509 sbi->s_inode_size = le16_to_cpu(es->s_inode_size); 3551 sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
3510 sbi->s_first_ino = le32_to_cpu(es->s_first_ino); 3552 sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
3553 if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
3554 ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
3555 sbi->s_first_ino);
3556 goto failed_mount;
3557 }
3511 if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || 3558 if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
3512 (!is_power_of_2(sbi->s_inode_size)) || 3559 (!is_power_of_2(sbi->s_inode_size)) ||
3513 (sbi->s_inode_size > blocksize)) { 3560 (sbi->s_inode_size > blocksize)) {
@@ -3584,13 +3631,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3584 "block size (%d)", clustersize, blocksize); 3631 "block size (%d)", clustersize, blocksize);
3585 goto failed_mount; 3632 goto failed_mount;
3586 } 3633 }
3587 if (le32_to_cpu(es->s_log_cluster_size) >
3588 (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
3589 ext4_msg(sb, KERN_ERR,
3590 "Invalid log cluster size: %u",
3591 le32_to_cpu(es->s_log_cluster_size));
3592 goto failed_mount;
3593 }
3594 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - 3634 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
3595 le32_to_cpu(es->s_log_block_size); 3635 le32_to_cpu(es->s_log_block_size);
3596 sbi->s_clusters_per_group = 3636 sbi->s_clusters_per_group =
@@ -3611,10 +3651,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3611 } 3651 }
3612 } else { 3652 } else {
3613 if (clustersize != blocksize) { 3653 if (clustersize != blocksize) {
3614 ext4_warning(sb, "fragment/cluster size (%d) != " 3654 ext4_msg(sb, KERN_ERR,
3615 "block size (%d)", clustersize, 3655 "fragment/cluster size (%d) != "
3616 blocksize); 3656 "block size (%d)", clustersize, blocksize);
3617 clustersize = blocksize; 3657 goto failed_mount;
3618 } 3658 }
3619 if (sbi->s_blocks_per_group > blocksize * 8) { 3659 if (sbi->s_blocks_per_group > blocksize * 8) {
3620 ext4_msg(sb, KERN_ERR, 3660 ext4_msg(sb, KERN_ERR,
@@ -3668,6 +3708,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3668 ext4_blocks_count(es)); 3708 ext4_blocks_count(es));
3669 goto failed_mount; 3709 goto failed_mount;
3670 } 3710 }
3711 if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
3712 (sbi->s_cluster_ratio == 1)) {
3713 ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
3714 "block is 0 with a 1k block and cluster size");
3715 goto failed_mount;
3716 }
3717
3671 blocks_count = (ext4_blocks_count(es) - 3718 blocks_count = (ext4_blocks_count(es) -
3672 le32_to_cpu(es->s_first_data_block) + 3719 le32_to_cpu(es->s_first_data_block) +
3673 EXT4_BLOCKS_PER_GROUP(sb) - 1); 3720 EXT4_BLOCKS_PER_GROUP(sb) - 1);
@@ -3703,6 +3750,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3703 ret = -ENOMEM; 3750 ret = -ENOMEM;
3704 goto failed_mount; 3751 goto failed_mount;
3705 } 3752 }
3753 if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
3754 le32_to_cpu(es->s_inodes_count)) {
3755 ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
3756 le32_to_cpu(es->s_inodes_count),
3757 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
3758 ret = -EINVAL;
3759 goto failed_mount;
3760 }
3706 3761
3707 bgl_lock_init(sbi->s_blockgroup_lock); 3762 bgl_lock_init(sbi->s_blockgroup_lock);
3708 3763
@@ -3716,13 +3771,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3716 goto failed_mount2; 3771 goto failed_mount2;
3717 } 3772 }
3718 } 3773 }
3774 sbi->s_gdb_count = db_count;
3719 if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { 3775 if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
3720 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 3776 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
3721 ret = -EFSCORRUPTED; 3777 ret = -EFSCORRUPTED;
3722 goto failed_mount2; 3778 goto failed_mount2;
3723 } 3779 }
3724 3780
3725 sbi->s_gdb_count = db_count;
3726 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 3781 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
3727 spin_lock_init(&sbi->s_next_gen_lock); 3782 spin_lock_init(&sbi->s_next_gen_lock);
3728 3783
@@ -4381,6 +4436,14 @@ static int ext4_commit_super(struct super_block *sb, int sync)
4381 4436
4382 if (!sbh || block_device_ejected(sb)) 4437 if (!sbh || block_device_ejected(sb))
4383 return error; 4438 return error;
4439
4440 /*
4441 * The superblock bh should be mapped, but it might not be if the
4442 * device was hot-removed. Not much we can do but fail the I/O.
4443 */
4444 if (!buffer_mapped(sbh))
4445 return error;
4446
4384 if (buffer_write_io_error(sbh)) { 4447 if (buffer_write_io_error(sbh)) {
4385 /* 4448 /*
4386 * Oh, dear. A previous attempt to write the 4449 * Oh, dear. A previous attempt to write the
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index e8e7af62ac95..287c3980fa0b 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -45,7 +45,7 @@ static const char *ext4_encrypted_follow_link(struct dentry *dentry, void **cook
45 cpage = read_mapping_page(inode->i_mapping, 0, NULL); 45 cpage = read_mapping_page(inode->i_mapping, 0, NULL);
46 if (IS_ERR(cpage)) 46 if (IS_ERR(cpage))
47 return ERR_CAST(cpage); 47 return ERR_CAST(cpage);
48 caddr = kmap(cpage); 48 caddr = page_address(cpage);
49 caddr[size] = 0; 49 caddr[size] = 0;
50 } 50 }
51 51
@@ -75,16 +75,12 @@ static const char *ext4_encrypted_follow_link(struct dentry *dentry, void **cook
75 /* Null-terminate the name */ 75 /* Null-terminate the name */
76 if (res <= plen) 76 if (res <= plen)
77 paddr[res] = '\0'; 77 paddr[res] = '\0';
78 if (cpage) { 78 if (cpage)
79 kunmap(cpage);
80 page_cache_release(cpage); 79 page_cache_release(cpage);
81 }
82 return *cookie = paddr; 80 return *cookie = paddr;
83errout: 81errout:
84 if (cpage) { 82 if (cpage)
85 kunmap(cpage);
86 page_cache_release(cpage); 83 page_cache_release(cpage);
87 }
88 kfree(paddr); 84 kfree(paddr);
89 return ERR_PTR(res); 85 return ERR_PTR(res);
90} 86}
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 7c23363ecf19..c7cad05aed27 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -828,8 +828,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
828 if (!IS_LAST_ENTRY(s->first)) 828 if (!IS_LAST_ENTRY(s->first))
829 ext4_xattr_rehash(header(s->base), 829 ext4_xattr_rehash(header(s->base),
830 s->here); 830 s->here);
831 ext4_xattr_cache_insert(ext4_mb_cache,
832 bs->bh);
833 } 831 }
834 unlock_buffer(bs->bh); 832 unlock_buffer(bs->bh);
835 if (error == -EFSCORRUPTED) 833 if (error == -EFSCORRUPTED)
@@ -918,6 +916,7 @@ inserted:
918 } else if (bs->bh && s->base == bs->bh->b_data) { 916 } else if (bs->bh && s->base == bs->bh->b_data) {
919 /* We were modifying this block in-place. */ 917 /* We were modifying this block in-place. */
920 ea_bdebug(bs->bh, "keeping this block"); 918 ea_bdebug(bs->bh, "keeping this block");
919 ext4_xattr_cache_insert(ext4_mb_cache, bs->bh);
921 new_bh = bs->bh; 920 new_bh = bs->bh;
922 get_bh(new_bh); 921 get_bh(new_bh);
923 } else { 922 } else {
@@ -1118,16 +1117,14 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
1118 struct ext4_xattr_block_find bs = { 1117 struct ext4_xattr_block_find bs = {
1119 .s = { .not_found = -ENODATA, }, 1118 .s = { .not_found = -ENODATA, },
1120 }; 1119 };
1121 unsigned long no_expand; 1120 int no_expand;
1122 int error; 1121 int error;
1123 1122
1124 if (!name) 1123 if (!name)
1125 return -EINVAL; 1124 return -EINVAL;
1126 if (strlen(name) > 255) 1125 if (strlen(name) > 255)
1127 return -ERANGE; 1126 return -ERANGE;
1128 down_write(&EXT4_I(inode)->xattr_sem); 1127 ext4_write_lock_xattr(inode, &no_expand);
1129 no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
1130 ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
1131 1128
1132 error = ext4_reserve_inode_write(handle, inode, &is.iloc); 1129 error = ext4_reserve_inode_write(handle, inode, &is.iloc);
1133 if (error) 1130 if (error)
@@ -1188,7 +1185,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
1188 ext4_xattr_update_super_block(handle, inode->i_sb); 1185 ext4_xattr_update_super_block(handle, inode->i_sb);
1189 inode->i_ctime = ext4_current_time(inode); 1186 inode->i_ctime = ext4_current_time(inode);
1190 if (!value) 1187 if (!value)
1191 ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); 1188 no_expand = 0;
1192 error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); 1189 error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
1193 /* 1190 /*
1194 * The bh is consumed by ext4_mark_iloc_dirty, even with 1191 * The bh is consumed by ext4_mark_iloc_dirty, even with
@@ -1202,9 +1199,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
1202cleanup: 1199cleanup:
1203 brelse(is.iloc.bh); 1200 brelse(is.iloc.bh);
1204 brelse(bs.bh); 1201 brelse(bs.bh);
1205 if (no_expand == 0) 1202 ext4_write_unlock_xattr(inode, &no_expand);
1206 ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
1207 up_write(&EXT4_I(inode)->xattr_sem);
1208 return error; 1203 return error;
1209} 1204}
1210 1205
@@ -1288,12 +1283,11 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
1288 int error = 0, tried_min_extra_isize = 0; 1283 int error = 0, tried_min_extra_isize = 0;
1289 int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize); 1284 int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
1290 int isize_diff; /* How much do we need to grow i_extra_isize */ 1285 int isize_diff; /* How much do we need to grow i_extra_isize */
1286 int no_expand;
1287
1288 if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
1289 return 0;
1291 1290
1292 down_write(&EXT4_I(inode)->xattr_sem);
1293 /*
1294 * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
1295 */
1296 ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
1297retry: 1291retry:
1298 isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize; 1292 isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
1299 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 1293 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
@@ -1487,8 +1481,7 @@ retry:
1487 } 1481 }
1488 brelse(bh); 1482 brelse(bh);
1489out: 1483out:
1490 ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); 1484 ext4_write_unlock_xattr(inode, &no_expand);
1491 up_write(&EXT4_I(inode)->xattr_sem);
1492 return 0; 1485 return 0;
1493 1486
1494cleanup: 1487cleanup:
@@ -1500,10 +1493,10 @@ cleanup:
1500 kfree(bs); 1493 kfree(bs);
1501 brelse(bh); 1494 brelse(bh);
1502 /* 1495 /*
1503 * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode 1496 * Inode size expansion failed; don't try again
1504 * size expansion failed.
1505 */ 1497 */
1506 up_write(&EXT4_I(inode)->xattr_sem); 1498 no_expand = 1;
1499 ext4_write_unlock_xattr(inode, &no_expand);
1507 return error; 1500 return error;
1508} 1501}
1509 1502
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index ddc0957760ba..c000ed398555 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -101,6 +101,38 @@ extern const struct xattr_handler ext4_xattr_security_handler;
101 101
102#define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c" 102#define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
103 103
104/*
105 * The EXT4_STATE_NO_EXPAND is overloaded and used for two purposes.
106 * The first is to signal that there the inline xattrs and data are
107 * taking up so much space that we might as well not keep trying to
108 * expand it. The second is that xattr_sem is taken for writing, so
109 * we shouldn't try to recurse into the inode expansion. For this
110 * second case, we need to make sure that we take save and restore the
111 * NO_EXPAND state flag appropriately.
112 */
113static inline void ext4_write_lock_xattr(struct inode *inode, int *save)
114{
115 down_write(&EXT4_I(inode)->xattr_sem);
116 *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
117 ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
118}
119
120static inline int ext4_write_trylock_xattr(struct inode *inode, int *save)
121{
122 if (down_write_trylock(&EXT4_I(inode)->xattr_sem) == 0)
123 return 0;
124 *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
125 ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
126 return 1;
127}
128
129static inline void ext4_write_unlock_xattr(struct inode *inode, int *save)
130{
131 if (*save == 0)
132 ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
133 up_write(&EXT4_I(inode)->xattr_sem);
134}
135
104extern ssize_t ext4_listxattr(struct dentry *, char *, size_t); 136extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
105 137
106extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t); 138extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 98b2fc2678ff..f6ccb21f286b 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -721,7 +721,7 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
721 if (!ret) { 721 if (!ret) {
722 map_bh(bh, inode->i_sb, map.m_pblk); 722 map_bh(bh, inode->i_sb, map.m_pblk);
723 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; 723 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
724 bh->b_size = map.m_len << inode->i_blkbits; 724 bh->b_size = (u64)map.m_len << inode->i_blkbits;
725 } 725 }
726 return ret; 726 return ret;
727} 727}
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 7ddba812e11b..6827b9c942dc 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -172,7 +172,7 @@ void f2fs_drop_largest_extent(struct inode *inode, pgoff_t fofs)
172 __drop_largest_extent(inode, fofs, 1); 172 __drop_largest_extent(inode, fofs, 1);
173} 173}
174 174
175void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext) 175static void __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
176{ 176{
177 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 177 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
178 struct extent_tree *et; 178 struct extent_tree *et;
@@ -204,6 +204,14 @@ out:
204 write_unlock(&et->lock); 204 write_unlock(&et->lock);
205} 205}
206 206
207void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
208{
209 __f2fs_init_extent_tree(inode, i_ext);
210
211 if (!F2FS_I(inode)->extent_tree)
212 set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
213}
214
207static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs, 215static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
208 struct extent_info *ei) 216 struct extent_info *ei)
209{ 217{
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index fedbf67a0842..928b9e046d8a 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -522,8 +522,10 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
522 get_node_info(sbi, nid, dni); 522 get_node_info(sbi, nid, dni);
523 523
524 if (sum->version != dni->version) { 524 if (sum->version != dni->version) {
525 f2fs_put_page(node_page, 1); 525 f2fs_msg(sbi->sb, KERN_WARNING,
526 return false; 526 "%s: valid data with mismatched node version.",
527 __func__);
528 set_sbi_flag(sbi, SBI_NEED_FSCK);
527 } 529 }
528 530
529 *nofs = ofs_of_node(node_page); 531 *nofs = ofs_of_node(node_page);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 97e20decacb4..5528801a5baf 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -202,6 +202,7 @@ make_now:
202 inode->i_op = &f2fs_encrypted_symlink_inode_operations; 202 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
203 else 203 else
204 inode->i_op = &f2fs_symlink_inode_operations; 204 inode->i_op = &f2fs_symlink_inode_operations;
205 inode_nohighmem(inode);
205 inode->i_mapping->a_ops = &f2fs_dblock_aops; 206 inode->i_mapping->a_ops = &f2fs_dblock_aops;
206 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 207 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
207 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 208 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 2c32110f9fc0..e5553cd8fe4e 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -150,8 +150,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
150 150
151 alloc_nid_done(sbi, ino); 151 alloc_nid_done(sbi, ino);
152 152
153 d_instantiate(dentry, inode); 153 d_instantiate_new(dentry, inode);
154 unlock_new_inode(inode);
155 154
156 if (IS_DIRSYNC(dir)) 155 if (IS_DIRSYNC(dir))
157 f2fs_sync_fs(sbi->sb, 1); 156 f2fs_sync_fs(sbi->sb, 1);
@@ -351,6 +350,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
351 inode->i_op = &f2fs_encrypted_symlink_inode_operations; 350 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
352 else 351 else
353 inode->i_op = &f2fs_symlink_inode_operations; 352 inode->i_op = &f2fs_symlink_inode_operations;
353 inode_nohighmem(inode);
354 inode->i_mapping->a_ops = &f2fs_dblock_aops; 354 inode->i_mapping->a_ops = &f2fs_dblock_aops;
355 355
356 f2fs_lock_op(sbi); 356 f2fs_lock_op(sbi);
@@ -398,8 +398,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
398 err = page_symlink(inode, p_str, p_len); 398 err = page_symlink(inode, p_str, p_len);
399 399
400err_out: 400err_out:
401 d_instantiate(dentry, inode); 401 d_instantiate_new(dentry, inode);
402 unlock_new_inode(inode);
403 402
404 /* 403 /*
405 * Let's flush symlink data in order to avoid broken symlink as much as 404 * Let's flush symlink data in order to avoid broken symlink as much as
@@ -453,8 +452,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
453 452
454 alloc_nid_done(sbi, inode->i_ino); 453 alloc_nid_done(sbi, inode->i_ino);
455 454
456 d_instantiate(dentry, inode); 455 d_instantiate_new(dentry, inode);
457 unlock_new_inode(inode);
458 456
459 if (IS_DIRSYNC(dir)) 457 if (IS_DIRSYNC(dir))
460 f2fs_sync_fs(sbi->sb, 1); 458 f2fs_sync_fs(sbi->sb, 1);
@@ -498,8 +496,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
498 496
499 alloc_nid_done(sbi, inode->i_ino); 497 alloc_nid_done(sbi, inode->i_ino);
500 498
501 d_instantiate(dentry, inode); 499 d_instantiate_new(dentry, inode);
502 unlock_new_inode(inode);
503 500
504 if (IS_DIRSYNC(dir)) 501 if (IS_DIRSYNC(dir))
505 f2fs_sync_fs(sbi->sb, 1); 502 f2fs_sync_fs(sbi->sb, 1);
@@ -942,7 +939,7 @@ static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cook
942 cpage = read_mapping_page(inode->i_mapping, 0, NULL); 939 cpage = read_mapping_page(inode->i_mapping, 0, NULL);
943 if (IS_ERR(cpage)) 940 if (IS_ERR(cpage))
944 return ERR_CAST(cpage); 941 return ERR_CAST(cpage);
945 caddr = kmap(cpage); 942 caddr = page_address(cpage);
946 caddr[size] = 0; 943 caddr[size] = 0;
947 944
948 /* Symlink is encrypted */ 945 /* Symlink is encrypted */
@@ -982,13 +979,11 @@ static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cook
982 /* Null-terminate the name */ 979 /* Null-terminate the name */
983 paddr[res] = '\0'; 980 paddr[res] = '\0';
984 981
985 kunmap(cpage);
986 page_cache_release(cpage); 982 page_cache_release(cpage);
987 return *cookie = paddr; 983 return *cookie = paddr;
988errout: 984errout:
989 kfree(cstr.name); 985 kfree(cstr.name);
990 f2fs_fname_crypto_free_buffer(&pstr); 986 f2fs_fname_crypto_free_buffer(&pstr);
991 kunmap(cpage);
992 page_cache_release(cpage); 987 page_cache_release(cpage);
993 return ERR_PTR(res); 988 return ERR_PTR(res);
994} 989}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index f77b3258454a..2bba0c4ef4b7 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -295,6 +295,9 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
295 295
296void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) 296void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
297{ 297{
298 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
299 return;
300
298 /* try to shrink extent cache when there is no enough memory */ 301 /* try to shrink extent cache when there is no enough memory */
299 if (!available_free_memory(sbi, EXTENT_CACHE)) 302 if (!available_free_memory(sbi, EXTENT_CACHE))
300 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); 303 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 4f666368aa85..6cc67e1bbb41 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1566,6 +1566,12 @@ static int __init init_f2fs_fs(void)
1566{ 1566{
1567 int err; 1567 int err;
1568 1568
1569 if (PAGE_SIZE != F2FS_BLKSIZE) {
1570 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
1571 PAGE_SIZE, F2FS_BLKSIZE);
1572 return -EINVAL;
1573 }
1574
1569 f2fs_build_trace_ios(); 1575 f2fs_build_trace_ios();
1570 1576
1571 err = init_inodecache(); 1577 err = init_inodecache();
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index cf644d52c0cf..c81cfb79a339 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -613,13 +613,21 @@ static void fat_set_state(struct super_block *sb,
613 brelse(bh); 613 brelse(bh);
614} 614}
615 615
616static void fat_reset_iocharset(struct fat_mount_options *opts)
617{
618 if (opts->iocharset != fat_default_iocharset) {
619 /* Note: opts->iocharset can be NULL here */
620 kfree(opts->iocharset);
621 opts->iocharset = fat_default_iocharset;
622 }
623}
624
616static void delayed_free(struct rcu_head *p) 625static void delayed_free(struct rcu_head *p)
617{ 626{
618 struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu); 627 struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
619 unload_nls(sbi->nls_disk); 628 unload_nls(sbi->nls_disk);
620 unload_nls(sbi->nls_io); 629 unload_nls(sbi->nls_io);
621 if (sbi->options.iocharset != fat_default_iocharset) 630 fat_reset_iocharset(&sbi->options);
622 kfree(sbi->options.iocharset);
623 kfree(sbi); 631 kfree(sbi);
624} 632}
625 633
@@ -1034,7 +1042,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
1034 opts->fs_fmask = opts->fs_dmask = current_umask(); 1042 opts->fs_fmask = opts->fs_dmask = current_umask();
1035 opts->allow_utime = -1; 1043 opts->allow_utime = -1;
1036 opts->codepage = fat_default_codepage; 1044 opts->codepage = fat_default_codepage;
1037 opts->iocharset = fat_default_iocharset; 1045 fat_reset_iocharset(opts);
1038 if (is_vfat) { 1046 if (is_vfat) {
1039 opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95; 1047 opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
1040 opts->rodir = 0; 1048 opts->rodir = 0;
@@ -1184,8 +1192,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
1184 1192
1185 /* vfat specific */ 1193 /* vfat specific */
1186 case Opt_charset: 1194 case Opt_charset:
1187 if (opts->iocharset != fat_default_iocharset) 1195 fat_reset_iocharset(opts);
1188 kfree(opts->iocharset);
1189 iocharset = match_strdup(&args[0]); 1196 iocharset = match_strdup(&args[0]);
1190 if (!iocharset) 1197 if (!iocharset)
1191 return -ENOMEM; 1198 return -ENOMEM;
@@ -1776,8 +1783,7 @@ out_fail:
1776 iput(fat_inode); 1783 iput(fat_inode);
1777 unload_nls(sbi->nls_io); 1784 unload_nls(sbi->nls_io);
1778 unload_nls(sbi->nls_disk); 1785 unload_nls(sbi->nls_disk);
1779 if (sbi->options.iocharset != fat_default_iocharset) 1786 fat_reset_iocharset(&sbi->options);
1780 kfree(sbi->options.iocharset);
1781 sb->s_fs_info = NULL; 1787 sb->s_fs_info = NULL;
1782 kfree(sbi); 1788 kfree(sbi);
1783 return error; 1789 return error;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 62376451bbce..5df914943d96 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -113,6 +113,10 @@ void f_setown(struct file *filp, unsigned long arg, int force)
113 int who = arg; 113 int who = arg;
114 type = PIDTYPE_PID; 114 type = PIDTYPE_PID;
115 if (who < 0) { 115 if (who < 0) {
116 /* avoid overflow below */
117 if (who == INT_MIN)
118 return;
119
116 type = PIDTYPE_PGID; 120 type = PIDTYPE_PGID;
117 who = -who; 121 who = -who;
118 } 122 }
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 22b30249fbcb..cfb75dbb96f5 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -747,11 +747,12 @@ int inode_congested(struct inode *inode, int cong_bits)
747 */ 747 */
748 if (inode && inode_to_wb_is_valid(inode)) { 748 if (inode && inode_to_wb_is_valid(inode)) {
749 struct bdi_writeback *wb; 749 struct bdi_writeback *wb;
750 bool locked, congested; 750 struct wb_lock_cookie lock_cookie = {};
751 bool congested;
751 752
752 wb = unlocked_inode_to_wb_begin(inode, &locked); 753 wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
753 congested = wb_congested(wb, cong_bits); 754 congested = wb_congested(wb, cong_bits);
754 unlocked_inode_to_wb_end(inode, locked); 755 unlocked_inode_to_wb_end(inode, &lock_cookie);
755 return congested; 756 return congested;
756 } 757 }
757 758
@@ -1905,7 +1906,7 @@ void wb_workfn(struct work_struct *work)
1905 } 1906 }
1906 1907
1907 if (!list_empty(&wb->work_list)) 1908 if (!list_empty(&wb->work_list))
1908 mod_delayed_work(bdi_wq, &wb->dwork, 0); 1909 wb_wakeup(wb);
1909 else if (wb_has_dirty_io(wb) && dirty_writeback_interval) 1910 else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
1910 wb_wakeup_delayed(wb); 1911 wb_wakeup_delayed(wb);
1911 1912
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 6b35fc4860a0..1de16a5a5c4e 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -776,6 +776,7 @@ static void fscache_write_op(struct fscache_operation *_op)
776 776
777 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); 777 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
778 778
779again:
779 spin_lock(&object->lock); 780 spin_lock(&object->lock);
780 cookie = object->cookie; 781 cookie = object->cookie;
781 782
@@ -816,10 +817,6 @@ static void fscache_write_op(struct fscache_operation *_op)
816 goto superseded; 817 goto superseded;
817 page = results[0]; 818 page = results[0];
818 _debug("gang %d [%lx]", n, page->index); 819 _debug("gang %d [%lx]", n, page->index);
819 if (page->index >= op->store_limit) {
820 fscache_stat(&fscache_n_store_pages_over_limit);
821 goto superseded;
822 }
823 820
824 radix_tree_tag_set(&cookie->stores, page->index, 821 radix_tree_tag_set(&cookie->stores, page->index,
825 FSCACHE_COOKIE_STORING_TAG); 822 FSCACHE_COOKIE_STORING_TAG);
@@ -829,6 +826,9 @@ static void fscache_write_op(struct fscache_operation *_op)
829 spin_unlock(&cookie->stores_lock); 826 spin_unlock(&cookie->stores_lock);
830 spin_unlock(&object->lock); 827 spin_unlock(&object->lock);
831 828
829 if (page->index >= op->store_limit)
830 goto discard_page;
831
832 fscache_stat(&fscache_n_store_pages); 832 fscache_stat(&fscache_n_store_pages);
833 fscache_stat(&fscache_n_cop_write_page); 833 fscache_stat(&fscache_n_cop_write_page);
834 ret = object->cache->ops->write_page(op, page); 834 ret = object->cache->ops->write_page(op, page);
@@ -844,6 +844,11 @@ static void fscache_write_op(struct fscache_operation *_op)
844 _leave(""); 844 _leave("");
845 return; 845 return;
846 846
847discard_page:
848 fscache_stat(&fscache_n_store_pages_over_limit);
849 fscache_end_page_write(object, page);
850 goto again;
851
847superseded: 852superseded:
848 /* this writer is going away and there aren't any more things to 853 /* this writer is going away and there aren't any more things to
849 * write */ 854 * write */
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index f863ac6647ac..89a4b231e79c 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -211,10 +211,11 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
211 if (!dentry) 211 if (!dentry)
212 return NULL; 212 return NULL;
213 213
214 fc->ctl_dentry[fc->ctl_ndents++] = dentry;
215 inode = new_inode(fuse_control_sb); 214 inode = new_inode(fuse_control_sb);
216 if (!inode) 215 if (!inode) {
216 dput(dentry);
217 return NULL; 217 return NULL;
218 }
218 219
219 inode->i_ino = get_next_ino(); 220 inode->i_ino = get_next_ino();
220 inode->i_mode = mode; 221 inode->i_mode = mode;
@@ -228,6 +229,9 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
228 set_nlink(inode, nlink); 229 set_nlink(inode, nlink);
229 inode->i_private = fc; 230 inode->i_private = fc;
230 d_add(dentry, inode); 231 d_add(dentry, inode);
232
233 fc->ctl_dentry[fc->ctl_ndents++] = dentry;
234
231 return dentry; 235 return dentry;
232} 236}
233 237
@@ -284,7 +288,10 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
284 for (i = fc->ctl_ndents - 1; i >= 0; i--) { 288 for (i = fc->ctl_ndents - 1; i >= 0; i--) {
285 struct dentry *dentry = fc->ctl_dentry[i]; 289 struct dentry *dentry = fc->ctl_dentry[i];
286 d_inode(dentry)->i_private = NULL; 290 d_inode(dentry)->i_private = NULL;
287 d_drop(dentry); 291 if (!i) {
292 /* Get rid of submounts: */
293 d_invalidate(dentry);
294 }
288 dput(dentry); 295 dput(dentry);
289 } 296 }
290 drop_nlink(d_inode(fuse_control_sb->s_root)); 297 drop_nlink(d_inode(fuse_control_sb->s_root));
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 5068dbf80ff8..49b7b40f7598 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1609,8 +1609,19 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
1609 return err; 1609 return err;
1610 1610
1611 if (attr->ia_valid & ATTR_OPEN) { 1611 if (attr->ia_valid & ATTR_OPEN) {
1612 if (fc->atomic_o_trunc) 1612 /* This is coming from open(..., ... | O_TRUNC); */
1613 WARN_ON(!(attr->ia_valid & ATTR_SIZE));
1614 WARN_ON(attr->ia_size != 0);
1615 if (fc->atomic_o_trunc) {
1616 /*
1617 * No need to send request to userspace, since actual
1618 * truncation has already been done by OPEN. But still
1619 * need to truncate page cache.
1620 */
1621 i_size_write(inode, 0);
1622 truncate_pagecache(inode, 0);
1613 return 0; 1623 return 0;
1624 }
1614 file = NULL; 1625 file = NULL;
1615 } 1626 }
1616 1627
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 0d5e8e59b390..f0b73e0c6d48 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1158,6 +1158,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1158 err_put_conn: 1158 err_put_conn:
1159 fuse_bdi_destroy(fc); 1159 fuse_bdi_destroy(fc);
1160 fuse_conn_put(fc); 1160 fuse_conn_put(fc);
1161 sb->s_fs_info = NULL;
1161 err_fput: 1162 err_fput:
1162 fput(file); 1163 fput(file);
1163 err: 1164 err:
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 1543aa1b2a93..8744bd773823 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -806,7 +806,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
806 struct gfs2_inode *ip = GFS2_I(inode); 806 struct gfs2_inode *ip = GFS2_I(inode);
807 struct gfs2_alloc_parms ap = { .aflags = 0, }; 807 struct gfs2_alloc_parms ap = { .aflags = 0, };
808 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 808 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
809 loff_t bytes, max_bytes, max_blks = UINT_MAX; 809 loff_t bytes, max_bytes, max_blks;
810 int error; 810 int error;
811 const loff_t pos = offset; 811 const loff_t pos = offset;
812 const loff_t count = len; 812 const loff_t count = len;
@@ -858,7 +858,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
858 return error; 858 return error;
859 /* ap.allowed tells us how many blocks quota will allow 859 /* ap.allowed tells us how many blocks quota will allow
860 * us to write. Check if this reduces max_blks */ 860 * us to write. Check if this reduces max_blks */
861 if (ap.allowed && ap.allowed < max_blks) 861 max_blks = UINT_MAX;
862 if (ap.allowed)
862 max_blks = ap.allowed; 863 max_blks = ap.allowed;
863 864
864 error = gfs2_inplace_reserve(ip, &ap); 865 error = gfs2_inplace_reserve(ip, &ap);
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index ad04b3acae2b..a81ed38d8442 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -43,6 +43,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
43{ 43{
44 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 44 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
45 int ret; 45 int ret;
46
47 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
46 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 48 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
47 return 0; 49 return 0;
48 ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 50 ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 7302d96ae8bf..fa40e756c501 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -585,6 +585,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
585 return 0; 585 return 0;
586 586
587out_put_hidden_dir: 587out_put_hidden_dir:
588 cancel_delayed_work_sync(&sbi->sync_work);
588 iput(sbi->hidden_dir); 589 iput(sbi->hidden_dir);
589out_put_root: 590out_put_root:
590 dput(sb->s_root); 591 dput(sb->s_root);
diff --git a/fs/inode.c b/fs/inode.c
index 2c16b758831d..b5c3a6473aaa 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1943,8 +1943,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
1943 inode->i_uid = current_fsuid(); 1943 inode->i_uid = current_fsuid();
1944 if (dir && dir->i_mode & S_ISGID) { 1944 if (dir && dir->i_mode & S_ISGID) {
1945 inode->i_gid = dir->i_gid; 1945 inode->i_gid = dir->i_gid;
1946
1947 /* Directories are special, and always inherit S_ISGID */
1946 if (S_ISDIR(mode)) 1948 if (S_ISDIR(mode))
1947 mode |= S_ISGID; 1949 mode |= S_ISGID;
1950 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
1951 !in_group_p(inode->i_gid) &&
1952 !capable_wrt_inode_uidgid(dir, CAP_FSETID))
1953 mode &= ~S_ISGID;
1948 } else 1954 } else
1949 inode->i_gid = current_fsgid(); 1955 inode->i_gid = current_fsgid();
1950 inode->i_mode = mode; 1956 inode->i_mode = mode;
@@ -2034,3 +2040,9 @@ void inode_set_flags(struct inode *inode, unsigned int flags,
2034 new_flags) != old_flags)); 2040 new_flags) != old_flags));
2035} 2041}
2036EXPORT_SYMBOL(inode_set_flags); 2042EXPORT_SYMBOL(inode_set_flags);
2043
2044void inode_nohighmem(struct inode *inode)
2045{
2046 mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2047}
2048EXPORT_SYMBOL(inode_nohighmem);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 624a57a9c4aa..9398d1b70545 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -275,11 +275,11 @@ loop:
275 goto loop; 275 goto loop;
276 276
277end_loop: 277end_loop:
278 write_unlock(&journal->j_state_lock);
279 del_timer_sync(&journal->j_commit_timer); 278 del_timer_sync(&journal->j_commit_timer);
280 journal->j_task = NULL; 279 journal->j_task = NULL;
281 wake_up(&journal->j_wait_done_commit); 280 wake_up(&journal->j_wait_done_commit);
282 jbd_debug(1, "Journal thread exiting.\n"); 281 jbd_debug(1, "Journal thread exiting.\n");
282 write_unlock(&journal->j_state_lock);
283 return 0; 283 return 0;
284} 284}
285 285
@@ -914,7 +914,7 @@ out:
914} 914}
915 915
916/* 916/*
917 * This is a variaon of __jbd2_update_log_tail which checks for validity of 917 * This is a variation of __jbd2_update_log_tail which checks for validity of
918 * provided log tail and locks j_checkpoint_mutex. So it is safe against races 918 * provided log tail and locks j_checkpoint_mutex. So it is safe against races
919 * with other threads updating log tail. 919 * with other threads updating log tail.
920 */ 920 */
@@ -1384,6 +1384,9 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
1384 journal_superblock_t *sb = journal->j_superblock; 1384 journal_superblock_t *sb = journal->j_superblock;
1385 int ret; 1385 int ret;
1386 1386
1387 if (is_journal_aborted(journal))
1388 return -EIO;
1389
1387 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); 1390 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
1388 jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n", 1391 jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
1389 tail_block, tail_tid); 1392 tail_block, tail_tid);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index a2e724053919..bce343febb9e 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -527,6 +527,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
527 */ 527 */
528 ret = start_this_handle(journal, handle, GFP_NOFS); 528 ret = start_this_handle(journal, handle, GFP_NOFS);
529 if (ret < 0) { 529 if (ret < 0) {
530 handle->h_journal = journal;
530 jbd2_journal_free_reserved(handle); 531 jbd2_journal_free_reserved(handle);
531 return ret; 532 return ret;
532 } 533 }
@@ -1362,6 +1363,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1362 if (jh->b_transaction == transaction && 1363 if (jh->b_transaction == transaction &&
1363 jh->b_jlist != BJ_Metadata) { 1364 jh->b_jlist != BJ_Metadata) {
1364 jbd_lock_bh_state(bh); 1365 jbd_lock_bh_state(bh);
1366 if (jh->b_transaction == transaction &&
1367 jh->b_jlist != BJ_Metadata)
1368 pr_err("JBD2: assertion failure: h_type=%u "
1369 "h_line_no=%u block_no=%llu jlist=%u\n",
1370 handle->h_type, handle->h_line_no,
1371 (unsigned long long) bh->b_blocknr,
1372 jh->b_jlist);
1365 J_ASSERT_JH(jh, jh->b_transaction != transaction || 1373 J_ASSERT_JH(jh, jh->b_transaction != transaction ||
1366 jh->b_jlist == BJ_Metadata); 1374 jh->b_jlist == BJ_Metadata);
1367 jbd_unlock_bh_state(bh); 1375 jbd_unlock_bh_state(bh);
@@ -1381,11 +1389,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1381 * of the transaction. This needs to be done 1389 * of the transaction. This needs to be done
1382 * once a transaction -bzzz 1390 * once a transaction -bzzz
1383 */ 1391 */
1384 jh->b_modified = 1;
1385 if (handle->h_buffer_credits <= 0) { 1392 if (handle->h_buffer_credits <= 0) {
1386 ret = -ENOSPC; 1393 ret = -ENOSPC;
1387 goto out_unlock_bh; 1394 goto out_unlock_bh;
1388 } 1395 }
1396 jh->b_modified = 1;
1389 handle->h_buffer_credits--; 1397 handle->h_buffer_credits--;
1390 } 1398 }
1391 1399
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 30c4c9ebb693..e27317169697 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -207,8 +207,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
207 __func__, inode->i_ino, inode->i_mode, inode->i_nlink, 207 __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
208 f->inocache->pino_nlink, inode->i_mapping->nrpages); 208 f->inocache->pino_nlink, inode->i_mapping->nrpages);
209 209
210 unlock_new_inode(inode); 210 d_instantiate_new(dentry, inode);
211 d_instantiate(dentry, inode);
212 return 0; 211 return 0;
213 212
214 fail: 213 fail:
@@ -428,8 +427,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
428 mutex_unlock(&dir_f->sem); 427 mutex_unlock(&dir_f->sem);
429 jffs2_complete_reservation(c); 428 jffs2_complete_reservation(c);
430 429
431 unlock_new_inode(inode); 430 d_instantiate_new(dentry, inode);
432 d_instantiate(dentry, inode);
433 return 0; 431 return 0;
434 432
435 fail: 433 fail:
@@ -573,8 +571,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode
573 mutex_unlock(&dir_f->sem); 571 mutex_unlock(&dir_f->sem);
574 jffs2_complete_reservation(c); 572 jffs2_complete_reservation(c);
575 573
576 unlock_new_inode(inode); 574 d_instantiate_new(dentry, inode);
577 d_instantiate(dentry, inode);
578 return 0; 575 return 0;
579 576
580 fail: 577 fail:
@@ -745,8 +742,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
745 mutex_unlock(&dir_f->sem); 742 mutex_unlock(&dir_f->sem);
746 jffs2_complete_reservation(c); 743 jffs2_complete_reservation(c);
747 744
748 unlock_new_inode(inode); 745 d_instantiate_new(dentry, inode);
749 d_instantiate(dentry, inode);
750 return 0; 746 return 0;
751 747
752 fail: 748 fail:
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 2caf1682036d..85e2594fe95c 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -361,7 +361,6 @@ error_io:
361 ret = -EIO; 361 ret = -EIO;
362error: 362error:
363 mutex_unlock(&f->sem); 363 mutex_unlock(&f->sem);
364 jffs2_do_clear_inode(c, f);
365 iget_failed(inode); 364 iget_failed(inode);
366 return ERR_PTR(ret); 365 return ERR_PTR(ret);
367} 366}
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index d86c5e3176a1..600da1a4df29 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -345,7 +345,7 @@ static void jffs2_put_super (struct super_block *sb)
345static void jffs2_kill_sb(struct super_block *sb) 345static void jffs2_kill_sb(struct super_block *sb)
346{ 346{
347 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); 347 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
348 if (!(sb->s_flags & MS_RDONLY)) 348 if (c && !(sb->s_flags & MS_RDONLY))
349 jffs2_stop_garbage_collect_thread(c); 349 jffs2_stop_garbage_collect_thread(c);
350 kill_mtd_super(sb); 350 kill_mtd_super(sb);
351 kfree(c); 351 kfree(c);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 9d7551f5c32a..f217ae750adb 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -178,8 +178,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
178 unlock_new_inode(ip); 178 unlock_new_inode(ip);
179 iput(ip); 179 iput(ip);
180 } else { 180 } else {
181 unlock_new_inode(ip); 181 d_instantiate_new(dentry, ip);
182 d_instantiate(dentry, ip);
183 } 182 }
184 183
185 out2: 184 out2:
@@ -313,8 +312,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
313 unlock_new_inode(ip); 312 unlock_new_inode(ip);
314 iput(ip); 313 iput(ip);
315 } else { 314 } else {
316 unlock_new_inode(ip); 315 d_instantiate_new(dentry, ip);
317 d_instantiate(dentry, ip);
318 } 316 }
319 317
320 out2: 318 out2:
@@ -1058,8 +1056,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
1058 unlock_new_inode(ip); 1056 unlock_new_inode(ip);
1059 iput(ip); 1057 iput(ip);
1060 } else { 1058 } else {
1061 unlock_new_inode(ip); 1059 d_instantiate_new(dentry, ip);
1062 d_instantiate(dentry, ip);
1063 } 1060 }
1064 1061
1065 out2: 1062 out2:
@@ -1443,8 +1440,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
1443 unlock_new_inode(ip); 1440 unlock_new_inode(ip);
1444 iput(ip); 1441 iput(ip);
1445 } else { 1442 } else {
1446 unlock_new_inode(ip); 1443 d_instantiate_new(dentry, ip);
1447 d_instantiate(dentry, ip);
1448 } 1444 }
1449 1445
1450 out1: 1446 out1:
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 48b15a6e5558..40a26a542341 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -493,15 +493,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
493 if (size > PSIZE) { 493 if (size > PSIZE) {
494 /* 494 /*
495 * To keep the rest of the code simple. Allocate a 495 * To keep the rest of the code simple. Allocate a
496 * contiguous buffer to work with 496 * contiguous buffer to work with. Make the buffer large
497 * enough to make use of the whole extent.
497 */ 498 */
498 ea_buf->xattr = kmalloc(size, GFP_KERNEL); 499 ea_buf->max_size = (size + sb->s_blocksize - 1) &
500 ~(sb->s_blocksize - 1);
501
502 ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
499 if (ea_buf->xattr == NULL) 503 if (ea_buf->xattr == NULL)
500 return -ENOMEM; 504 return -ENOMEM;
501 505
502 ea_buf->flag = EA_MALLOC; 506 ea_buf->flag = EA_MALLOC;
503 ea_buf->max_size = (size + sb->s_blocksize - 1) &
504 ~(sb->s_blocksize - 1);
505 507
506 if (ea_size == 0) 508 if (ea_size == 0)
507 return 0; 509 return 0;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 6e9a912d394c..6875bd5d35f6 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -272,7 +272,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
272{ 272{
273 struct kernfs_open_file *of = kernfs_of(file); 273 struct kernfs_open_file *of = kernfs_of(file);
274 const struct kernfs_ops *ops; 274 const struct kernfs_ops *ops;
275 size_t len; 275 ssize_t len;
276 char *buf; 276 char *buf;
277 277
278 if (of->atomic_write_len) { 278 if (of->atomic_write_len) {
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 5f31ebd96c06..f038d4ac9aec 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -129,6 +129,8 @@ lockd(void *vrqstp)
129{ 129{
130 int err = 0; 130 int err = 0;
131 struct svc_rqst *rqstp = vrqstp; 131 struct svc_rqst *rqstp = vrqstp;
132 struct net *net = &init_net;
133 struct lockd_net *ln = net_generic(net, lockd_net_id);
132 134
133 /* try_to_freeze() is called from svc_recv() */ 135 /* try_to_freeze() is called from svc_recv() */
134 set_freezable(); 136 set_freezable();
@@ -173,6 +175,8 @@ lockd(void *vrqstp)
173 if (nlmsvc_ops) 175 if (nlmsvc_ops)
174 nlmsvc_invalidate_all(); 176 nlmsvc_invalidate_all();
175 nlm_shutdown_hosts(); 177 nlm_shutdown_hosts();
178 cancel_delayed_work_sync(&ln->grace_period_end);
179 locks_end_grace(&ln->lockd_manager);
176 return 0; 180 return 0;
177} 181}
178 182
diff --git a/fs/namei.c b/fs/namei.c
index 3f96ae087488..de57dd59d95f 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -219,9 +219,10 @@ getname_kernel(const char * filename)
219 if (len <= EMBEDDED_NAME_MAX) { 219 if (len <= EMBEDDED_NAME_MAX) {
220 result->name = (char *)result->iname; 220 result->name = (char *)result->iname;
221 } else if (len <= PATH_MAX) { 221 } else if (len <= PATH_MAX) {
222 const size_t size = offsetof(struct filename, iname[1]);
222 struct filename *tmp; 223 struct filename *tmp;
223 224
224 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); 225 tmp = kmalloc(size, GFP_KERNEL);
225 if (unlikely(!tmp)) { 226 if (unlikely(!tmp)) {
226 __putname(result); 227 __putname(result);
227 return ERR_PTR(-ENOMEM); 228 return ERR_PTR(-ENOMEM);
@@ -570,9 +571,10 @@ static int __nd_alloc_stack(struct nameidata *nd)
570static bool path_connected(const struct path *path) 571static bool path_connected(const struct path *path)
571{ 572{
572 struct vfsmount *mnt = path->mnt; 573 struct vfsmount *mnt = path->mnt;
574 struct super_block *sb = mnt->mnt_sb;
573 575
574 /* Only bind mounts can have disconnected paths */ 576 /* Bind mounts and multi-root filesystems can have disconnected paths */
575 if (mnt->mnt_root == mnt->mnt_sb->s_root) 577 if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root))
576 return true; 578 return true;
577 579
578 return is_subdir(path->dentry, mnt->mnt_root); 580 return is_subdir(path->dentry, mnt->mnt_root);
@@ -2000,6 +2002,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
2000 int retval = 0; 2002 int retval = 0;
2001 const char *s = nd->name->name; 2003 const char *s = nd->name->name;
2002 2004
2005 if (!*s)
2006 flags &= ~LOOKUP_RCU;
2007
2003 nd->last_type = LAST_ROOT; /* if there are only slashes... */ 2008 nd->last_type = LAST_ROOT; /* if there are only slashes... */
2004 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; 2009 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
2005 nd->depth = 0; 2010 nd->depth = 0;
diff --git a/fs/namespace.c b/fs/namespace.c
index ec4078d16eb7..b56b50e3da11 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -603,12 +603,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
603 return 0; 603 return 0;
604 mnt = real_mount(bastard); 604 mnt = real_mount(bastard);
605 mnt_add_count(mnt, 1); 605 mnt_add_count(mnt, 1);
606 smp_mb(); // see mntput_no_expire()
606 if (likely(!read_seqretry(&mount_lock, seq))) 607 if (likely(!read_seqretry(&mount_lock, seq)))
607 return 0; 608 return 0;
608 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { 609 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
609 mnt_add_count(mnt, -1); 610 mnt_add_count(mnt, -1);
610 return 1; 611 return 1;
611 } 612 }
613 lock_mount_hash();
614 if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
615 mnt_add_count(mnt, -1);
616 unlock_mount_hash();
617 return 1;
618 }
619 unlock_mount_hash();
620 /* caller will mntput() */
612 return -1; 621 return -1;
613} 622}
614 623
@@ -1018,7 +1027,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
1018 goto out_free; 1027 goto out_free;
1019 } 1028 }
1020 1029
1021 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); 1030 mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1031 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
1022 /* Don't allow unprivileged users to change mount flags */ 1032 /* Don't allow unprivileged users to change mount flags */
1023 if (flag & CL_UNPRIVILEGED) { 1033 if (flag & CL_UNPRIVILEGED) {
1024 mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; 1034 mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
@@ -1123,12 +1133,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1123static void mntput_no_expire(struct mount *mnt) 1133static void mntput_no_expire(struct mount *mnt)
1124{ 1134{
1125 rcu_read_lock(); 1135 rcu_read_lock();
1126 mnt_add_count(mnt, -1); 1136 if (likely(READ_ONCE(mnt->mnt_ns))) {
1127 if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ 1137 /*
1138 * Since we don't do lock_mount_hash() here,
1139 * ->mnt_ns can change under us. However, if it's
1140 * non-NULL, then there's a reference that won't
1141 * be dropped until after an RCU delay done after
1142 * turning ->mnt_ns NULL. So if we observe it
1143 * non-NULL under rcu_read_lock(), the reference
1144 * we are dropping is not the final one.
1145 */
1146 mnt_add_count(mnt, -1);
1128 rcu_read_unlock(); 1147 rcu_read_unlock();
1129 return; 1148 return;
1130 } 1149 }
1131 lock_mount_hash(); 1150 lock_mount_hash();
1151 /*
1152 * make sure that if __legitimize_mnt() has not seen us grab
1153 * mount_lock, we'll see their refcount increment here.
1154 */
1155 smp_mb();
1156 mnt_add_count(mnt, -1);
1132 if (mnt_get_count(mnt)) { 1157 if (mnt_get_count(mnt)) {
1133 rcu_read_unlock(); 1158 rcu_read_unlock();
1134 unlock_mount_hash(); 1159 unlock_mount_hash();
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 03446c5a3ec1..4e1144512522 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -133,12 +133,11 @@ ncp_hash_dentry(const struct dentry *dentry, struct qstr *this)
133 return 0; 133 return 0;
134 134
135 if (!ncp_case_sensitive(inode)) { 135 if (!ncp_case_sensitive(inode)) {
136 struct super_block *sb = dentry->d_sb;
137 struct nls_table *t; 136 struct nls_table *t;
138 unsigned long hash; 137 unsigned long hash;
139 int i; 138 int i;
140 139
141 t = NCP_IO_TABLE(sb); 140 t = NCP_IO_TABLE(dentry->d_sb);
142 hash = init_name_hash(); 141 hash = init_name_hash();
143 for (i=0; i<this->len ; i++) 142 for (i=0; i<this->len ; i++)
144 hash = partial_name_hash(ncp_tolower(t, this->name[i]), 143 hash = partial_name_hash(ncp_tolower(t, this->name[i]),
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index 88dbbc9fcf4d..f571570a2e72 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -980,6 +980,10 @@ ncp_read_kernel(struct ncp_server *server, const char *file_id,
980 goto out; 980 goto out;
981 } 981 }
982 *bytes_read = ncp_reply_be16(server, 0); 982 *bytes_read = ncp_reply_be16(server, 0);
983 if (*bytes_read > to_read) {
984 result = -EINVAL;
985 goto out;
986 }
983 source = ncp_reply_data(server, 2 + (offset & 1)); 987 source = ncp_reply_data(server, 2 + (offset & 1));
984 988
985 memcpy(target, source, *bytes_read); 989 memcpy(target, source, *bytes_read);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 4b1d08f56aba..211440722e24 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -86,9 +86,9 @@ struct nfs_direct_req {
86 struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX]; 86 struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
87 int mirror_count; 87 int mirror_count;
88 88
89 loff_t io_start; /* Start offset for I/O */
89 ssize_t count, /* bytes actually processed */ 90 ssize_t count, /* bytes actually processed */
90 bytes_left, /* bytes left to be sent */ 91 bytes_left, /* bytes left to be sent */
91 io_start, /* start of IO */
92 error; /* any reported error */ 92 error; /* any reported error */
93 struct completion completion; /* wait for i/o completion */ 93 struct completion completion; /* wait for i/o completion */
94 94
@@ -787,10 +787,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
787 787
788 spin_lock(&dreq->lock); 788 spin_lock(&dreq->lock);
789 789
790 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { 790 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
791 dreq->flags = 0;
792 dreq->error = hdr->error; 791 dreq->error = hdr->error;
793 }
794 if (dreq->error == 0) { 792 if (dreq->error == 0) {
795 nfs_direct_good_bytes(dreq, hdr); 793 nfs_direct_good_bytes(dreq, hdr);
796 if (nfs_write_need_commit(hdr)) { 794 if (nfs_write_need_commit(hdr)) {
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 54313322ee5b..c8e90152b61b 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -461,6 +461,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
461 goto out_err_free; 461 goto out_err_free;
462 462
463 /* fh */ 463 /* fh */
464 rc = -EIO;
464 p = xdr_inline_decode(&stream, 4); 465 p = xdr_inline_decode(&stream, 4);
465 if (!p) 466 if (!p)
466 goto out_err_free; 467 goto out_err_free;
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 5ba22c6b0ffa..c99a887100db 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -343,7 +343,7 @@ static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf,
343 int id_len; 343 int id_len;
344 ssize_t ret; 344 ssize_t ret;
345 345
346 id_len = snprintf(id_str, sizeof(id_str), "%u", id); 346 id_len = nfs_map_numeric_to_string(id, id_str, sizeof(id_str));
347 ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap); 347 ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap);
348 if (ret < 0) 348 if (ret < 0)
349 return -EINVAL; 349 return -EINVAL;
@@ -567,9 +567,13 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
567 struct idmap_msg *im; 567 struct idmap_msg *im;
568 struct idmap *idmap = (struct idmap *)aux; 568 struct idmap *idmap = (struct idmap *)aux;
569 struct key *key = cons->key; 569 struct key *key = cons->key;
570 int ret = -ENOMEM; 570 int ret = -ENOKEY;
571
572 if (!aux)
573 goto out1;
571 574
572 /* msg and im are freed in idmap_pipe_destroy_msg */ 575 /* msg and im are freed in idmap_pipe_destroy_msg */
576 ret = -ENOMEM;
573 data = kzalloc(sizeof(*data), GFP_KERNEL); 577 data = kzalloc(sizeof(*data), GFP_KERNEL);
574 if (!data) 578 if (!data)
575 goto out1; 579 goto out1;
@@ -622,7 +626,8 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
622 if (strcmp(upcall->im_name, im->im_name) != 0) 626 if (strcmp(upcall->im_name, im->im_name) != 0)
623 break; 627 break;
624 /* Note: here we store the NUL terminator too */ 628 /* Note: here we store the NUL terminator too */
625 len = sprintf(id_str, "%d", im->im_id) + 1; 629 len = 1 + nfs_map_numeric_to_string(im->im_id, id_str,
630 sizeof(id_str));
626 ret = nfs_idmap_instantiate(key, authkey, id_str, len); 631 ret = nfs_idmap_instantiate(key, authkey, id_str, len);
627 break; 632 break;
628 case IDMAP_CONV_IDTONAME: 633 case IDMAP_CONV_IDTONAME:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8ef6f70c9e25..41c8ddbc80dc 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1780,7 +1780,7 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
1780 return ret; 1780 return ret;
1781} 1781}
1782 1782
1783static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err) 1783static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
1784{ 1784{
1785 switch (err) { 1785 switch (err) {
1786 default: 1786 default:
@@ -1827,7 +1827,11 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
1827 return -EAGAIN; 1827 return -EAGAIN;
1828 case -ENOMEM: 1828 case -ENOMEM:
1829 case -NFS4ERR_DENIED: 1829 case -NFS4ERR_DENIED:
1830 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1830 if (fl) {
1831 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
1832 if (lsp)
1833 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
1834 }
1831 return 0; 1835 return 0;
1832 } 1836 }
1833 return err; 1837 return err;
@@ -1863,7 +1867,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
1863 err = nfs4_open_recover_helper(opendata, FMODE_READ); 1867 err = nfs4_open_recover_helper(opendata, FMODE_READ);
1864 } 1868 }
1865 nfs4_opendata_put(opendata); 1869 nfs4_opendata_put(opendata);
1866 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 1870 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
1867} 1871}
1868 1872
1869static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 1873static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
@@ -3025,6 +3029,7 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
3025 .rpc_resp = &res, 3029 .rpc_resp = &res,
3026 }; 3030 };
3027 int status; 3031 int status;
3032 int i;
3028 3033
3029 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3034 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3030 FATTR4_WORD0_FH_EXPIRE_TYPE | 3035 FATTR4_WORD0_FH_EXPIRE_TYPE |
@@ -3090,8 +3095,13 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
3090 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 3095 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3091 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 3096 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3092 server->cache_consistency_bitmask[2] = 0; 3097 server->cache_consistency_bitmask[2] = 0;
3098
3099 /* Avoid a regression due to buggy server */
3100 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
3101 res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
3093 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 3102 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3094 sizeof(server->exclcreat_bitmask)); 3103 sizeof(server->exclcreat_bitmask));
3104
3095 server->acl_bitmask = res.acl_bitmask; 3105 server->acl_bitmask = res.acl_bitmask;
3096 server->fh_expire_type = res.fh_expire_type; 3106 server->fh_expire_type = res.fh_expire_type;
3097 } 3107 }
@@ -6151,7 +6161,7 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
6151 if (err != 0) 6161 if (err != 0)
6152 return err; 6162 return err;
6153 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 6163 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6154 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 6164 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
6155} 6165}
6156 6166
6157struct nfs_release_lockowner_data { 6167struct nfs_release_lockowner_data {
@@ -7670,6 +7680,12 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
7670 /* fall through */ 7680 /* fall through */
7671 case -NFS4ERR_RETRY_UNCACHED_REP: 7681 case -NFS4ERR_RETRY_UNCACHED_REP:
7672 return -EAGAIN; 7682 return -EAGAIN;
7683 case -NFS4ERR_BADSESSION:
7684 case -NFS4ERR_DEADSESSION:
7685 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
7686 nfs4_schedule_session_recovery(clp->cl_session,
7687 task->tk_status);
7688 break;
7673 default: 7689 default:
7674 nfs4_schedule_lease_recovery(clp); 7690 nfs4_schedule_lease_recovery(clp);
7675 } 7691 }
@@ -7748,7 +7764,6 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7748 if (status == 0) 7764 if (status == 0)
7749 status = task->tk_status; 7765 status = task->tk_status;
7750 rpc_put_task(task); 7766 rpc_put_task(task);
7751 return 0;
7752out: 7767out:
7753 dprintk("<-- %s status=%d\n", __func__, status); 7768 dprintk("<-- %s status=%d\n", __func__, status);
7754 return status; 7769 return status;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 9a0b219ff74d..44f5cea49699 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1386,6 +1386,7 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
1386 struct inode *inode = state->inode; 1386 struct inode *inode = state->inode;
1387 struct nfs_inode *nfsi = NFS_I(inode); 1387 struct nfs_inode *nfsi = NFS_I(inode);
1388 struct file_lock *fl; 1388 struct file_lock *fl;
1389 struct nfs4_lock_state *lsp;
1389 int status = 0; 1390 int status = 0;
1390 struct file_lock_context *flctx = inode->i_flctx; 1391 struct file_lock_context *flctx = inode->i_flctx;
1391 struct list_head *list; 1392 struct list_head *list;
@@ -1426,7 +1427,9 @@ restart:
1426 case -NFS4ERR_DENIED: 1427 case -NFS4ERR_DENIED:
1427 case -NFS4ERR_RECLAIM_BAD: 1428 case -NFS4ERR_RECLAIM_BAD:
1428 case -NFS4ERR_RECLAIM_CONFLICT: 1429 case -NFS4ERR_RECLAIM_CONFLICT:
1429 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1430 lsp = fl->fl_u.nfs4_fl.owner;
1431 if (lsp)
1432 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
1430 status = 0; 1433 status = 0;
1431 } 1434 }
1432 spin_lock(&flctx->flc_lock); 1435 spin_lock(&flctx->flc_lock);
@@ -1593,13 +1596,14 @@ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1593 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot); 1596 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1594} 1597}
1595 1598
1596static void nfs4_reclaim_complete(struct nfs_client *clp, 1599static int nfs4_reclaim_complete(struct nfs_client *clp,
1597 const struct nfs4_state_recovery_ops *ops, 1600 const struct nfs4_state_recovery_ops *ops,
1598 struct rpc_cred *cred) 1601 struct rpc_cred *cred)
1599{ 1602{
1600 /* Notify the server we're done reclaiming our state */ 1603 /* Notify the server we're done reclaiming our state */
1601 if (ops->reclaim_complete) 1604 if (ops->reclaim_complete)
1602 (void)ops->reclaim_complete(clp, cred); 1605 return ops->reclaim_complete(clp, cred);
1606 return 0;
1603} 1607}
1604 1608
1605static void nfs4_clear_reclaim_server(struct nfs_server *server) 1609static void nfs4_clear_reclaim_server(struct nfs_server *server)
@@ -1646,13 +1650,16 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1646{ 1650{
1647 const struct nfs4_state_recovery_ops *ops; 1651 const struct nfs4_state_recovery_ops *ops;
1648 struct rpc_cred *cred; 1652 struct rpc_cred *cred;
1653 int err;
1649 1654
1650 if (!nfs4_state_clear_reclaim_reboot(clp)) 1655 if (!nfs4_state_clear_reclaim_reboot(clp))
1651 return; 1656 return;
1652 ops = clp->cl_mvops->reboot_recovery_ops; 1657 ops = clp->cl_mvops->reboot_recovery_ops;
1653 cred = nfs4_get_clid_cred(clp); 1658 cred = nfs4_get_clid_cred(clp);
1654 nfs4_reclaim_complete(clp, ops, cred); 1659 err = nfs4_reclaim_complete(clp, ops, cred);
1655 put_rpccred(cred); 1660 put_rpccred(cred);
1661 if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
1662 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1656} 1663}
1657 1664
1658static void nfs_delegation_clear_all(struct nfs_client *clp) 1665static void nfs_delegation_clear_all(struct nfs_client *clp)
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
index 0fbd3ab1be22..44a7bbbf92f8 100644
--- a/fs/nfs/nfs4sysctl.c
+++ b/fs/nfs/nfs4sysctl.c
@@ -31,7 +31,7 @@ static struct ctl_table nfs4_cb_sysctls[] = {
31 .data = &nfs_idmap_cache_timeout, 31 .data = &nfs_idmap_cache_timeout,
32 .maxlen = sizeof(int), 32 .maxlen = sizeof(int),
33 .mode = 0644, 33 .mode = 0644,
34 .proc_handler = proc_dointvec_jiffies, 34 .proc_handler = proc_dointvec,
35 }, 35 },
36 { } 36 { }
37}; 37};
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 8ebfdd00044b..4bdc2fc86280 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -1273,8 +1273,10 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
1273 mirror = &desc->pg_mirrors[midx]; 1273 mirror = &desc->pg_mirrors[midx];
1274 if (!list_empty(&mirror->pg_list)) { 1274 if (!list_empty(&mirror->pg_list)) {
1275 prev = nfs_list_entry(mirror->pg_list.prev); 1275 prev = nfs_list_entry(mirror->pg_list.prev);
1276 if (index != prev->wb_index + 1) 1276 if (index != prev->wb_index + 1) {
1277 nfs_pageio_complete_mirror(desc, midx); 1277 nfs_pageio_complete(desc);
1278 break;
1279 }
1278 } 1280 }
1279 } 1281 }
1280} 1282}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 7af7bedd7c02..c8e75e5e6a67 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1943,7 +1943,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1943 nfs_pageio_reset_write_mds(desc); 1943 nfs_pageio_reset_write_mds(desc);
1944 mirror->pg_recoalesce = 1; 1944 mirror->pg_recoalesce = 1;
1945 } 1945 }
1946 hdr->release(hdr); 1946 hdr->completion_ops->completion(hdr);
1947} 1947}
1948 1948
1949static enum pnfs_try_status 1949static enum pnfs_try_status
@@ -2058,7 +2058,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
2058 nfs_pageio_reset_read_mds(desc); 2058 nfs_pageio_reset_read_mds(desc);
2059 mirror->pg_recoalesce = 1; 2059 mirror->pg_recoalesce = 1;
2060 } 2060 }
2061 hdr->release(hdr); 2061 hdr->completion_ops->completion(hdr);
2062} 2062}
2063 2063
2064/* 2064/*
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 3149f7e58d6f..62f358f67764 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2581,6 +2581,8 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
2581 /* initial superblock/root creation */ 2581 /* initial superblock/root creation */
2582 mount_info->fill_super(s, mount_info); 2582 mount_info->fill_super(s, mount_info);
2583 nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned); 2583 nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
2584 if (!(server->flags & NFS_MOUNT_UNSHARED))
2585 s->s_iflags |= SB_I_MULTIROOT;
2584 } 2586 }
2585 2587
2586 mntroot = nfs_get_root(s, mount_info->mntfh, dev_name); 2588 mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 7a9b6e347249..6e81a5b5858e 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1746,6 +1746,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
1746 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); 1746 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1747 next: 1747 next:
1748 nfs_unlock_and_release_request(req); 1748 nfs_unlock_and_release_request(req);
1749 /* Latency breaker */
1750 cond_resched();
1749 } 1751 }
1750 nfss = NFS_SERVER(data->inode); 1752 nfss = NFS_SERVER(data->inode);
1751 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 1753 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
index fd8c9a5bcac4..77d136ac8909 100644
--- a/fs/nfs_common/grace.c
+++ b/fs/nfs_common/grace.c
@@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm)
30 struct list_head *grace_list = net_generic(net, grace_net_id); 30 struct list_head *grace_list = net_generic(net, grace_net_id);
31 31
32 spin_lock(&grace_lock); 32 spin_lock(&grace_lock);
33 list_add(&lm->list, grace_list); 33 if (list_empty(&lm->list))
34 list_add(&lm->list, grace_list);
35 else
36 WARN(1, "double list_add attempt detected in net %x %s\n",
37 net->ns.inum, (net == &init_net) ? "(init_net)" : "");
34 spin_unlock(&grace_lock); 38 spin_unlock(&grace_lock);
35} 39}
36EXPORT_SYMBOL_GPL(locks_start_grace); 40EXPORT_SYMBOL_GPL(locks_start_grace);
@@ -104,7 +108,9 @@ grace_exit_net(struct net *net)
104{ 108{
105 struct list_head *grace_list = net_generic(net, grace_net_id); 109 struct list_head *grace_list = net_generic(net, grace_net_id);
106 110
107 BUG_ON(!list_empty(grace_list)); 111 WARN_ONCE(!list_empty(grace_list),
112 "net %x %s: grace_list is not empty\n",
113 net->ns.inum, __func__);
108} 114}
109 115
110static struct pernet_operations grace_net_ops = { 116static struct pernet_operations grace_net_ops = {
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index a260060042ad..67eb154af881 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -60,9 +60,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
60 else 60 else
61 GROUP_AT(gi, i) = GROUP_AT(rqgi, i); 61 GROUP_AT(gi, i) = GROUP_AT(rqgi, i);
62 62
63 /* Each thread allocates its own gi, no race */
64 groups_sort(gi);
65 } 63 }
64
65 /* Each thread allocates its own gi, no race */
66 groups_sort(gi);
66 } else { 67 } else {
67 gi = get_group_info(rqgi); 68 gi = get_group_info(rqgi);
68 } 69 }
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 209dbfc50cd4..bfbee8ddf978 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1245,14 +1245,14 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
1245 const struct nfsd4_layout_ops *ops; 1245 const struct nfsd4_layout_ops *ops;
1246 struct nfs4_layout_stateid *ls; 1246 struct nfs4_layout_stateid *ls;
1247 __be32 nfserr; 1247 __be32 nfserr;
1248 int accmode; 1248 int accmode = NFSD_MAY_READ_IF_EXEC;
1249 1249
1250 switch (lgp->lg_seg.iomode) { 1250 switch (lgp->lg_seg.iomode) {
1251 case IOMODE_READ: 1251 case IOMODE_READ:
1252 accmode = NFSD_MAY_READ; 1252 accmode |= NFSD_MAY_READ;
1253 break; 1253 break;
1254 case IOMODE_RW: 1254 case IOMODE_RW:
1255 accmode = NFSD_MAY_READ | NFSD_MAY_WRITE; 1255 accmode |= NFSD_MAY_READ | NFSD_MAY_WRITE;
1256 break; 1256 break;
1257 default: 1257 default:
1258 dprintk("%s: invalid iomode %d\n", 1258 dprintk("%s: invalid iomode %d\n",
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 11c67e8b939d..ba27a5ff8677 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -63,12 +63,16 @@ static const stateid_t zero_stateid = {
63static const stateid_t currentstateid = { 63static const stateid_t currentstateid = {
64 .si_generation = 1, 64 .si_generation = 1,
65}; 65};
66static const stateid_t close_stateid = {
67 .si_generation = 0xffffffffU,
68};
66 69
67static u64 current_sessionid = 1; 70static u64 current_sessionid = 1;
68 71
69#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) 72#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
70#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) 73#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
71#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t))) 74#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
75#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
72 76
73/* forward declarations */ 77/* forward declarations */
74static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); 78static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
@@ -4701,7 +4705,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4701 struct nfs4_stid *s; 4705 struct nfs4_stid *s;
4702 __be32 status = nfserr_bad_stateid; 4706 __be32 status = nfserr_bad_stateid;
4703 4707
4704 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 4708 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4709 CLOSE_STATEID(stateid))
4705 return status; 4710 return status;
4706 /* Client debugging aid. */ 4711 /* Client debugging aid. */
4707 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { 4712 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
@@ -4759,7 +4764,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
4759 else if (typemask & NFS4_DELEG_STID) 4764 else if (typemask & NFS4_DELEG_STID)
4760 typemask |= NFS4_REVOKED_DELEG_STID; 4765 typemask |= NFS4_REVOKED_DELEG_STID;
4761 4766
4762 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 4767 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4768 CLOSE_STATEID(stateid))
4763 return nfserr_bad_stateid; 4769 return nfserr_bad_stateid;
4764 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn); 4770 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
4765 if (status == nfserr_stale_clientid) { 4771 if (status == nfserr_stale_clientid) {
@@ -5011,15 +5017,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
5011 status = nfsd4_check_seqid(cstate, sop, seqid); 5017 status = nfsd4_check_seqid(cstate, sop, seqid);
5012 if (status) 5018 if (status)
5013 return status; 5019 return status;
5014 if (stp->st_stid.sc_type == NFS4_CLOSED_STID 5020 status = nfsd4_lock_ol_stateid(stp);
5015 || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID) 5021 if (status != nfs_ok)
5016 /* 5022 return status;
5017 * "Closed" stateid's exist *only* to return
5018 * nfserr_replay_me from the previous step, and
5019 * revoked delegations are kept only for free_stateid.
5020 */
5021 return nfserr_bad_stateid;
5022 mutex_lock(&stp->st_mutex);
5023 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 5023 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5024 if (status == nfs_ok) 5024 if (status == nfs_ok)
5025 status = nfs4_check_fh(current_fh, &stp->st_stid); 5025 status = nfs4_check_fh(current_fh, &stp->st_stid);
@@ -5243,6 +5243,11 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5243 nfsd4_close_open_stateid(stp); 5243 nfsd4_close_open_stateid(stp);
5244 mutex_unlock(&stp->st_mutex); 5244 mutex_unlock(&stp->st_mutex);
5245 5245
5246 /* See RFC5661 sectionm 18.2.4 */
5247 if (stp->st_stid.sc_client->cl_minorversion)
5248 memcpy(&close->cl_stateid, &close_stateid,
5249 sizeof(close->cl_stateid));
5250
5246 /* put reference from nfs4_preprocess_seqid_op */ 5251 /* put reference from nfs4_preprocess_seqid_op */
5247 nfs4_put_stid(&stp->st_stid); 5252 nfs4_put_stid(&stp->st_stid);
5248out: 5253out:
@@ -6787,6 +6792,10 @@ static int nfs4_state_create_net(struct net *net)
6787 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); 6792 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
6788 nn->conf_name_tree = RB_ROOT; 6793 nn->conf_name_tree = RB_ROOT;
6789 nn->unconf_name_tree = RB_ROOT; 6794 nn->unconf_name_tree = RB_ROOT;
6795 nn->boot_time = get_seconds();
6796 nn->grace_ended = false;
6797 nn->nfsd4_manager.block_opens = true;
6798 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
6790 INIT_LIST_HEAD(&nn->client_lru); 6799 INIT_LIST_HEAD(&nn->client_lru);
6791 INIT_LIST_HEAD(&nn->close_lru); 6800 INIT_LIST_HEAD(&nn->close_lru);
6792 INIT_LIST_HEAD(&nn->del_recall_lru); 6801 INIT_LIST_HEAD(&nn->del_recall_lru);
@@ -6841,9 +6850,6 @@ nfs4_state_start_net(struct net *net)
6841 ret = nfs4_state_create_net(net); 6850 ret = nfs4_state_create_net(net);
6842 if (ret) 6851 if (ret)
6843 return ret; 6852 return ret;
6844 nn->boot_time = get_seconds();
6845 nn->grace_ended = false;
6846 nn->nfsd4_manager.block_opens = true;
6847 locks_start_grace(net, &nn->nfsd4_manager); 6853 locks_start_grace(net, &nn->nfsd4_manager);
6848 nfsd4_client_tracking_init(net); 6854 nfsd4_client_tracking_init(net);
6849 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n", 6855 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 544672b440de..ee0da259a3d3 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1538,6 +1538,8 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
1538 gdev->gd_maxcount = be32_to_cpup(p++); 1538 gdev->gd_maxcount = be32_to_cpup(p++);
1539 num = be32_to_cpup(p++); 1539 num = be32_to_cpup(p++);
1540 if (num) { 1540 if (num) {
1541 if (num > 1000)
1542 goto xdr_error;
1541 READ_BUF(4 * num); 1543 READ_BUF(4 * num);
1542 gdev->gd_notify_types = be32_to_cpup(p++); 1544 gdev->gd_notify_types = be32_to_cpup(p++);
1543 for (i = 1; i < num; i++) { 1545 for (i = 1; i < num; i++) {
@@ -3595,7 +3597,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
3595 nfserr = nfserr_resource; 3597 nfserr = nfserr_resource;
3596 goto err_no_verf; 3598 goto err_no_verf;
3597 } 3599 }
3598 maxcount = min_t(u32, readdir->rd_maxcount, INT_MAX); 3600 maxcount = svc_max_payload(resp->rqstp);
3601 maxcount = min_t(u32, readdir->rd_maxcount, maxcount);
3599 /* 3602 /*
3600 * Note the rfc defines rd_maxcount as the size of the 3603 * Note the rfc defines rd_maxcount as the size of the
3601 * READDIR4resok structure, which includes the verifier above 3604 * READDIR4resok structure, which includes the verifier above
@@ -3609,7 +3612,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
3609 3612
3610 /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */ 3613 /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
3611 if (!readdir->rd_dircount) 3614 if (!readdir->rd_dircount)
3612 readdir->rd_dircount = INT_MAX; 3615 readdir->rd_dircount = svc_max_payload(resp->rqstp);
3613 3616
3614 readdir->xdr = xdr; 3617 readdir->xdr = xdr;
3615 readdir->rd_maxcount = maxcount; 3618 readdir->rd_maxcount = maxcount;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 91e0c5429b4d..17138a97f306 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -92,6 +92,12 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
92 err = follow_down(&path); 92 err = follow_down(&path);
93 if (err < 0) 93 if (err < 0)
94 goto out; 94 goto out;
95 if (path.mnt == exp->ex_path.mnt && path.dentry == dentry &&
96 nfsd_mountpoint(dentry, exp) == 2) {
97 /* This is only a mountpoint in some other namespace */
98 path_put(&path);
99 goto out;
100 }
95 101
96 exp2 = rqst_exp_get_by_name(rqstp, &path); 102 exp2 = rqst_exp_get_by_name(rqstp, &path);
97 if (IS_ERR(exp2)) { 103 if (IS_ERR(exp2)) {
@@ -165,16 +171,26 @@ static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, st
165/* 171/*
166 * For nfsd purposes, we treat V4ROOT exports as though there was an 172 * For nfsd purposes, we treat V4ROOT exports as though there was an
167 * export at *every* directory. 173 * export at *every* directory.
174 * We return:
175 * '1' if this dentry *must* be an export point,
176 * '2' if it might be, if there is really a mount here, and
177 * '0' if there is no chance of an export point here.
168 */ 178 */
169int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp) 179int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
170{ 180{
171 if (d_mountpoint(dentry)) 181 if (!d_inode(dentry))
182 return 0;
183 if (exp->ex_flags & NFSEXP_V4ROOT)
172 return 1; 184 return 1;
173 if (nfsd4_is_junction(dentry)) 185 if (nfsd4_is_junction(dentry))
174 return 1; 186 return 1;
175 if (!(exp->ex_flags & NFSEXP_V4ROOT)) 187 if (d_mountpoint(dentry))
176 return 0; 188 /*
177 return d_inode(dentry) != NULL; 189 * Might only be a mountpoint in a different namespace,
190 * but we need to check.
191 */
192 return 2;
193 return 0;
178} 194}
179 195
180__be32 196__be32
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index c9a1a491aa91..cd7f5b0abe84 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -50,8 +50,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
50{ 50{
51 int err = nilfs_add_link(dentry, inode); 51 int err = nilfs_add_link(dentry, inode);
52 if (!err) { 52 if (!err) {
53 d_instantiate(dentry, inode); 53 d_instantiate_new(dentry, inode);
54 unlock_new_inode(inode);
55 return 0; 54 return 0;
56 } 55 }
57 inode_dec_link_count(inode); 56 inode_dec_link_count(inode);
@@ -246,8 +245,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
246 goto out_fail; 245 goto out_fail;
247 246
248 nilfs_mark_inode_dirty(inode); 247 nilfs_mark_inode_dirty(inode);
249 d_instantiate(dentry, inode); 248 d_instantiate_new(dentry, inode);
250 unlock_new_inode(inode);
251out: 249out:
252 if (!err) 250 if (!err)
253 err = nilfs_transaction_commit(dir->i_sb); 251 err = nilfs_transaction_commit(dir->i_sb);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index e0e5f7c3c99f..8a459b179183 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
92 u32 event_mask, 92 u32 event_mask,
93 void *data, int data_type) 93 void *data, int data_type)
94{ 94{
95 __u32 marks_mask, marks_ignored_mask; 95 __u32 marks_mask = 0, marks_ignored_mask = 0;
96 struct path *path = data; 96 struct path *path = data;
97 97
98 pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p" 98 pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
@@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
108 !d_can_lookup(path->dentry)) 108 !d_can_lookup(path->dentry))
109 return false; 109 return false;
110 110
111 if (inode_mark && vfsmnt_mark) { 111 /*
112 marks_mask = (vfsmnt_mark->mask | inode_mark->mask); 112 * if the event is for a child and this inode doesn't care about
113 marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask); 113 * events on the child, don't send it!
114 } else if (inode_mark) { 114 */
115 /* 115 if (inode_mark &&
116 * if the event is for a child and this inode doesn't care about 116 (!(event_mask & FS_EVENT_ON_CHILD) ||
117 * events on the child, don't send it! 117 (inode_mark->mask & FS_EVENT_ON_CHILD))) {
118 */ 118 marks_mask |= inode_mark->mask;
119 if ((event_mask & FS_EVENT_ON_CHILD) && 119 marks_ignored_mask |= inode_mark->ignored_mask;
120 !(inode_mark->mask & FS_EVENT_ON_CHILD)) 120 }
121 return false; 121
122 marks_mask = inode_mark->mask; 122 if (vfsmnt_mark) {
123 marks_ignored_mask = inode_mark->ignored_mask; 123 marks_mask |= vfsmnt_mark->mask;
124 } else if (vfsmnt_mark) { 124 marks_ignored_mask |= vfsmnt_mark->ignored_mask;
125 marks_mask = vfsmnt_mark->mask;
126 marks_ignored_mask = vfsmnt_mark->ignored_mask;
127 } else {
128 BUG();
129 } 125 }
130 126
131 if (d_is_dir(path->dentry) && 127 if (d_is_dir(path->dentry) &&
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 8f20d6016e20..914ca6b2794d 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -95,6 +95,7 @@ slow:
95 return ERR_PTR(-ENOMEM); 95 return ERR_PTR(-ENOMEM);
96 } 96 }
97 d_instantiate(dentry, inode); 97 d_instantiate(dentry, inode);
98 dentry->d_flags |= DCACHE_RCUACCESS;
98 dentry->d_fsdata = (void *)ns_ops; 99 dentry->d_fsdata = (void *)ns_ops;
99 d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry); 100 d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
100 if (d) { 101 if (d) {
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 164307b99405..1e0d8da0d3cd 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -314,7 +314,9 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
314 return ERR_PTR(ret); 314 return ERR_PTR(ret);
315 } 315 }
316 316
317 down_read(&OCFS2_I(inode)->ip_xattr_sem);
317 acl = ocfs2_get_acl_nolock(inode, type, di_bh); 318 acl = ocfs2_get_acl_nolock(inode, type, di_bh);
319 up_read(&OCFS2_I(inode)->ip_xattr_sem);
318 320
319 ocfs2_inode_unlock(inode, 0); 321 ocfs2_inode_unlock(inode, 0);
320 brelse(di_bh); 322 brelse(di_bh);
@@ -333,7 +335,9 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
333 if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) 335 if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
334 return 0; 336 return 0;
335 337
338 down_read(&OCFS2_I(inode)->ip_xattr_sem);
336 acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh); 339 acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
340 up_read(&OCFS2_I(inode)->ip_xattr_sem);
337 if (IS_ERR(acl) || !acl) 341 if (IS_ERR(acl) || !acl)
338 return PTR_ERR(acl); 342 return PTR_ERR(acl);
339 ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); 343 ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
@@ -364,8 +368,10 @@ int ocfs2_init_acl(handle_t *handle,
364 368
365 if (!S_ISLNK(inode->i_mode)) { 369 if (!S_ISLNK(inode->i_mode)) {
366 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) { 370 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
371 down_read(&OCFS2_I(dir)->ip_xattr_sem);
367 acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT, 372 acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
368 dir_bh); 373 dir_bh);
374 up_read(&OCFS2_I(dir)->ip_xattr_sem);
369 if (IS_ERR(acl)) 375 if (IS_ERR(acl))
370 return PTR_ERR(acl); 376 return PTR_ERR(acl);
371 } 377 }
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 72afdca3cea7..3c45a9301a09 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -40,6 +40,9 @@ char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
40 "panic", /* O2NM_FENCE_PANIC */ 40 "panic", /* O2NM_FENCE_PANIC */
41}; 41};
42 42
43static inline void o2nm_lock_subsystem(void);
44static inline void o2nm_unlock_subsystem(void);
45
43struct o2nm_node *o2nm_get_node_by_num(u8 node_num) 46struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
44{ 47{
45 struct o2nm_node *node = NULL; 48 struct o2nm_node *node = NULL;
@@ -181,7 +184,10 @@ static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
181{ 184{
182 /* through the first node_set .parent 185 /* through the first node_set .parent
183 * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ 186 * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
184 return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); 187 if (node->nd_item.ci_parent)
188 return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
189 else
190 return NULL;
185} 191}
186 192
187enum { 193enum {
@@ -194,7 +200,7 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
194 size_t count) 200 size_t count)
195{ 201{
196 struct o2nm_node *node = to_o2nm_node(item); 202 struct o2nm_node *node = to_o2nm_node(item);
197 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); 203 struct o2nm_cluster *cluster;
198 unsigned long tmp; 204 unsigned long tmp;
199 char *p = (char *)page; 205 char *p = (char *)page;
200 int ret = 0; 206 int ret = 0;
@@ -214,6 +220,13 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
214 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) 220 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
215 return -EINVAL; /* XXX */ 221 return -EINVAL; /* XXX */
216 222
223 o2nm_lock_subsystem();
224 cluster = to_o2nm_cluster_from_node(node);
225 if (!cluster) {
226 o2nm_unlock_subsystem();
227 return -EINVAL;
228 }
229
217 write_lock(&cluster->cl_nodes_lock); 230 write_lock(&cluster->cl_nodes_lock);
218 if (cluster->cl_nodes[tmp]) 231 if (cluster->cl_nodes[tmp])
219 ret = -EEXIST; 232 ret = -EEXIST;
@@ -226,6 +239,8 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
226 set_bit(tmp, cluster->cl_nodes_bitmap); 239 set_bit(tmp, cluster->cl_nodes_bitmap);
227 } 240 }
228 write_unlock(&cluster->cl_nodes_lock); 241 write_unlock(&cluster->cl_nodes_lock);
242 o2nm_unlock_subsystem();
243
229 if (ret) 244 if (ret)
230 return ret; 245 return ret;
231 246
@@ -269,7 +284,7 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
269 size_t count) 284 size_t count)
270{ 285{
271 struct o2nm_node *node = to_o2nm_node(item); 286 struct o2nm_node *node = to_o2nm_node(item);
272 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); 287 struct o2nm_cluster *cluster;
273 int ret, i; 288 int ret, i;
274 struct rb_node **p, *parent; 289 struct rb_node **p, *parent;
275 unsigned int octets[4]; 290 unsigned int octets[4];
@@ -286,6 +301,13 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
286 be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); 301 be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
287 } 302 }
288 303
304 o2nm_lock_subsystem();
305 cluster = to_o2nm_cluster_from_node(node);
306 if (!cluster) {
307 o2nm_unlock_subsystem();
308 return -EINVAL;
309 }
310
289 ret = 0; 311 ret = 0;
290 write_lock(&cluster->cl_nodes_lock); 312 write_lock(&cluster->cl_nodes_lock);
291 if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) 313 if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
@@ -298,6 +320,8 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
298 rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); 320 rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
299 } 321 }
300 write_unlock(&cluster->cl_nodes_lock); 322 write_unlock(&cluster->cl_nodes_lock);
323 o2nm_unlock_subsystem();
324
301 if (ret) 325 if (ret)
302 return ret; 326 return ret;
303 327
@@ -315,7 +339,7 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
315 size_t count) 339 size_t count)
316{ 340{
317 struct o2nm_node *node = to_o2nm_node(item); 341 struct o2nm_node *node = to_o2nm_node(item);
318 struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); 342 struct o2nm_cluster *cluster;
319 unsigned long tmp; 343 unsigned long tmp;
320 char *p = (char *)page; 344 char *p = (char *)page;
321 ssize_t ret; 345 ssize_t ret;
@@ -333,17 +357,26 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
333 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) 357 !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
334 return -EINVAL; /* XXX */ 358 return -EINVAL; /* XXX */
335 359
360 o2nm_lock_subsystem();
361 cluster = to_o2nm_cluster_from_node(node);
362 if (!cluster) {
363 ret = -EINVAL;
364 goto out;
365 }
366
336 /* the only failure case is trying to set a new local node 367 /* the only failure case is trying to set a new local node
337 * when a different one is already set */ 368 * when a different one is already set */
338 if (tmp && tmp == cluster->cl_has_local && 369 if (tmp && tmp == cluster->cl_has_local &&
339 cluster->cl_local_node != node->nd_num) 370 cluster->cl_local_node != node->nd_num) {
340 return -EBUSY; 371 ret = -EBUSY;
372 goto out;
373 }
341 374
342 /* bring up the rx thread if we're setting the new local node. */ 375 /* bring up the rx thread if we're setting the new local node. */
343 if (tmp && !cluster->cl_has_local) { 376 if (tmp && !cluster->cl_has_local) {
344 ret = o2net_start_listening(node); 377 ret = o2net_start_listening(node);
345 if (ret) 378 if (ret)
346 return ret; 379 goto out;
347 } 380 }
348 381
349 if (!tmp && cluster->cl_has_local && 382 if (!tmp && cluster->cl_has_local &&
@@ -358,7 +391,11 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
358 cluster->cl_local_node = node->nd_num; 391 cluster->cl_local_node = node->nd_num;
359 } 392 }
360 393
361 return count; 394 ret = count;
395
396out:
397 o2nm_unlock_subsystem();
398 return ret;
362} 399}
363 400
364CONFIGFS_ATTR(o2nm_node_, num); 401CONFIGFS_ATTR(o2nm_node_, num);
@@ -750,6 +787,16 @@ static struct o2nm_cluster_group o2nm_cluster_group = {
750 }, 787 },
751}; 788};
752 789
790static inline void o2nm_lock_subsystem(void)
791{
792 mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
793}
794
795static inline void o2nm_unlock_subsystem(void)
796{
797 mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
798}
799
753int o2nm_depend_item(struct config_item *item) 800int o2nm_depend_item(struct config_item *item)
754{ 801{
755 return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); 802 return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 2ee7fe747cea..c55a9c47ac17 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -674,20 +674,6 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm)
674 spin_unlock(&dlm->spinlock); 674 spin_unlock(&dlm->spinlock);
675} 675}
676 676
677int dlm_shutting_down(struct dlm_ctxt *dlm)
678{
679 int ret = 0;
680
681 spin_lock(&dlm_domain_lock);
682
683 if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
684 ret = 1;
685
686 spin_unlock(&dlm_domain_lock);
687
688 return ret;
689}
690
691void dlm_unregister_domain(struct dlm_ctxt *dlm) 677void dlm_unregister_domain(struct dlm_ctxt *dlm)
692{ 678{
693 int leave = 0; 679 int leave = 0;
diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h
index fd6122a38dbd..8a9281411c18 100644
--- a/fs/ocfs2/dlm/dlmdomain.h
+++ b/fs/ocfs2/dlm/dlmdomain.h
@@ -28,7 +28,30 @@
28extern spinlock_t dlm_domain_lock; 28extern spinlock_t dlm_domain_lock;
29extern struct list_head dlm_domains; 29extern struct list_head dlm_domains;
30 30
31int dlm_shutting_down(struct dlm_ctxt *dlm); 31static inline int dlm_joined(struct dlm_ctxt *dlm)
32{
33 int ret = 0;
34
35 spin_lock(&dlm_domain_lock);
36 if (dlm->dlm_state == DLM_CTXT_JOINED)
37 ret = 1;
38 spin_unlock(&dlm_domain_lock);
39
40 return ret;
41}
42
43static inline int dlm_shutting_down(struct dlm_ctxt *dlm)
44{
45 int ret = 0;
46
47 spin_lock(&dlm_domain_lock);
48 if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
49 ret = 1;
50 spin_unlock(&dlm_domain_lock);
51
52 return ret;
53}
54
32void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, 55void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
33 int node_num); 56 int node_num);
34 57
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 4a338803e7e9..88149b4387c2 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1377,6 +1377,15 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1377 if (!dlm_grab(dlm)) 1377 if (!dlm_grab(dlm))
1378 return -EINVAL; 1378 return -EINVAL;
1379 1379
1380 if (!dlm_joined(dlm)) {
1381 mlog(ML_ERROR, "Domain %s not joined! "
1382 "lockres %.*s, master %u\n",
1383 dlm->name, mres->lockname_len,
1384 mres->lockname, mres->master);
1385 dlm_put(dlm);
1386 return -EINVAL;
1387 }
1388
1380 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1389 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1381 1390
1382 real_master = mres->master; 1391 real_master = mres->master;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 13534f4fe5b5..722eb5bc9b8f 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -666,23 +666,24 @@ static int __ocfs2_journal_access(handle_t *handle,
666 /* we can safely remove this assertion after testing. */ 666 /* we can safely remove this assertion after testing. */
667 if (!buffer_uptodate(bh)) { 667 if (!buffer_uptodate(bh)) {
668 mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n"); 668 mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
669 mlog(ML_ERROR, "b_blocknr=%llu\n", 669 mlog(ML_ERROR, "b_blocknr=%llu, b_state=0x%lx\n",
670 (unsigned long long)bh->b_blocknr); 670 (unsigned long long)bh->b_blocknr, bh->b_state);
671 671
672 lock_buffer(bh); 672 lock_buffer(bh);
673 /* 673 /*
674 * A previous attempt to write this buffer head failed. 674 * A previous transaction with a couple of buffer heads fail
675 * Nothing we can do but to retry the write and hope for 675 * to checkpoint, so all the bhs are marked as BH_Write_EIO.
676 * the best. 676 * For current transaction, the bh is just among those error
677 * bhs which previous transaction handle. We can't just clear
678 * its BH_Write_EIO and reuse directly, since other bhs are
679 * not written to disk yet and that will cause metadata
680 * inconsistency. So we should set fs read-only to avoid
681 * further damage.
677 */ 682 */
678 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) { 683 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) {
679 clear_buffer_write_io_error(bh);
680 set_buffer_uptodate(bh);
681 }
682
683 if (!buffer_uptodate(bh)) {
684 unlock_buffer(bh); 684 unlock_buffer(bh);
685 return -EIO; 685 return ocfs2_error(osb->sb, "A previous attempt to "
686 "write this buffer head failed\n");
686 } 687 }
687 unlock_buffer(bh); 688 unlock_buffer(bh);
688 } 689 }
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 2de4c8a9340c..4f5141350af8 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -477,9 +477,8 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
477 new = ocfs2_get_system_file_inode(osb, i, osb->slot_num); 477 new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
478 if (!new) { 478 if (!new) {
479 ocfs2_release_system_inodes(osb); 479 ocfs2_release_system_inodes(osb);
480 status = -EINVAL; 480 status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
481 mlog_errno(status); 481 mlog_errno(status);
482 /* FIXME: Should ERROR_RO_FS */
483 mlog(ML_ERROR, "Unable to load system inode %d, " 482 mlog(ML_ERROR, "Unable to load system inode %d, "
484 "possibly corrupt fs?", i); 483 "possibly corrupt fs?", i);
485 goto bail; 484 goto bail;
@@ -508,7 +507,7 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
508 new = ocfs2_get_system_file_inode(osb, i, osb->slot_num); 507 new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
509 if (!new) { 508 if (!new) {
510 ocfs2_release_system_inodes(osb); 509 ocfs2_release_system_inodes(osb);
511 status = -EINVAL; 510 status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
512 mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n", 511 mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n",
513 status, i, osb->slot_num); 512 status, i, osb->slot_num);
514 goto bail; 513 goto bail;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 877830b05e12..4f0788232f2f 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -639,9 +639,11 @@ int ocfs2_calc_xattr_init(struct inode *dir,
639 si->value_len); 639 si->value_len);
640 640
641 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) { 641 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
642 down_read(&OCFS2_I(dir)->ip_xattr_sem);
642 acl_len = ocfs2_xattr_get_nolock(dir, dir_bh, 643 acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
643 OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT, 644 OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
644 "", NULL, 0); 645 "", NULL, 0);
646 up_read(&OCFS2_I(dir)->ip_xattr_sem);
645 if (acl_len > 0) { 647 if (acl_len > 0) {
646 a_size = ocfs2_xattr_entry_real_size(0, acl_len); 648 a_size = ocfs2_xattr_entry_real_size(0, acl_len);
647 if (S_ISDIR(mode)) 649 if (S_ISDIR(mode))
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 220b04f04523..985a4cdae06d 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -272,6 +272,16 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
272 return vfs_getxattr(realpath.dentry, name, value, size); 272 return vfs_getxattr(realpath.dentry, name, value, size);
273} 273}
274 274
275static bool ovl_can_list(const char *s)
276{
277 /* List all non-trusted xatts */
278 if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
279 return true;
280
281 /* Never list trusted.overlay, list other trusted for superuser only */
282 return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
283}
284
275ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) 285ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
276{ 286{
277 struct path realpath; 287 struct path realpath;
@@ -296,7 +306,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
296 return -EIO; 306 return -EIO;
297 307
298 len -= slen; 308 len -= slen;
299 if (ovl_is_private_xattr(s)) { 309 if (!ovl_can_list(s)) {
300 res -= slen; 310 res -= slen;
301 memmove(s, s + slen, len); 311 memmove(s, s + slen, len);
302 } else { 312 } else {
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index adcb1398c481..299a6e1d6b77 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -441,10 +441,14 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
441 struct dentry *dentry = file->f_path.dentry; 441 struct dentry *dentry = file->f_path.dentry;
442 struct file *realfile = od->realfile; 442 struct file *realfile = od->realfile;
443 443
444 /* Nothing to sync for lower */
445 if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
446 return 0;
447
444 /* 448 /*
445 * Need to check if we started out being a lower dir, but got copied up 449 * Need to check if we started out being a lower dir, but got copied up
446 */ 450 */
447 if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) { 451 if (!od->is_upper) {
448 struct inode *inode = file_inode(file); 452 struct inode *inode = file_inode(file);
449 453
450 realfile = lockless_dereference(od->upperfile); 454 realfile = lockless_dereference(od->upperfile);
diff --git a/fs/pipe.c b/fs/pipe.c
index 39eff9a67253..1e7263bb837a 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -616,6 +616,9 @@ struct pipe_inode_info *alloc_pipe_info(void)
616 unsigned long pipe_bufs = PIPE_DEF_BUFFERS; 616 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
617 struct user_struct *user = get_current_user(); 617 struct user_struct *user = get_current_user();
618 618
619 if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE))
620 pipe_bufs = pipe_max_size >> PAGE_SHIFT;
621
619 if (!too_many_pipe_buffers_hard(user)) { 622 if (!too_many_pipe_buffers_hard(user)) {
620 if (too_many_pipe_buffers_soft(user)) 623 if (too_many_pipe_buffers_soft(user))
621 pipe_bufs = 1; 624 pipe_bufs = 1;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index b6c00ce0e29e..cb71cbae606d 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -79,6 +79,7 @@
79#include <linux/delayacct.h> 79#include <linux/delayacct.h>
80#include <linux/seq_file.h> 80#include <linux/seq_file.h>
81#include <linux/pid_namespace.h> 81#include <linux/pid_namespace.h>
82#include <linux/prctl.h>
82#include <linux/ptrace.h> 83#include <linux/ptrace.h>
83#include <linux/tracehook.h> 84#include <linux/tracehook.h>
84#include <linux/string_helpers.h> 85#include <linux/string_helpers.h>
@@ -332,6 +333,31 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
332#ifdef CONFIG_SECCOMP 333#ifdef CONFIG_SECCOMP
333 seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode); 334 seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode);
334#endif 335#endif
336 seq_printf(m, "\nSpeculation_Store_Bypass:\t");
337 switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
338 case -EINVAL:
339 seq_printf(m, "unknown");
340 break;
341 case PR_SPEC_NOT_AFFECTED:
342 seq_printf(m, "not vulnerable");
343 break;
344 case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
345 seq_printf(m, "thread force mitigated");
346 break;
347 case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
348 seq_printf(m, "thread mitigated");
349 break;
350 case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
351 seq_printf(m, "thread vulnerable");
352 break;
353 case PR_SPEC_DISABLE:
354 seq_printf(m, "globally mitigated");
355 break;
356 default:
357 seq_printf(m, "vulnerable");
358 break;
359 }
360 seq_putc(m, '\n');
335} 361}
336 362
337static inline void task_context_switch_counts(struct seq_file *m, 363static inline void task_context_switch_counts(struct seq_file *m,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index dd732400578e..5f9cec2db6c3 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -94,6 +94,8 @@
94#include "internal.h" 94#include "internal.h"
95#include "fd.h" 95#include "fd.h"
96 96
97#include "../../lib/kstrtox.h"
98
97/* NOTE: 99/* NOTE:
98 * Implementing inode permission operations in /proc is almost 100 * Implementing inode permission operations in /proc is almost
99 * certainly an error. Permission checks need to happen during 101 * certainly an error. Permission checks need to happen during
@@ -953,6 +955,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
953 unsigned long src = *ppos; 955 unsigned long src = *ppos;
954 int ret = 0; 956 int ret = 0;
955 struct mm_struct *mm = file->private_data; 957 struct mm_struct *mm = file->private_data;
958 unsigned long env_start, env_end;
956 959
957 /* Ensure the process spawned far enough to have an environment. */ 960 /* Ensure the process spawned far enough to have an environment. */
958 if (!mm || !mm->env_end) 961 if (!mm || !mm->env_end)
@@ -965,19 +968,25 @@ static ssize_t environ_read(struct file *file, char __user *buf,
965 ret = 0; 968 ret = 0;
966 if (!atomic_inc_not_zero(&mm->mm_users)) 969 if (!atomic_inc_not_zero(&mm->mm_users))
967 goto free; 970 goto free;
971
972 down_read(&mm->mmap_sem);
973 env_start = mm->env_start;
974 env_end = mm->env_end;
975 up_read(&mm->mmap_sem);
976
968 while (count > 0) { 977 while (count > 0) {
969 size_t this_len, max_len; 978 size_t this_len, max_len;
970 int retval; 979 int retval;
971 980
972 if (src >= (mm->env_end - mm->env_start)) 981 if (src >= (env_end - env_start))
973 break; 982 break;
974 983
975 this_len = mm->env_end - (mm->env_start + src); 984 this_len = env_end - (env_start + src);
976 985
977 max_len = min_t(size_t, PAGE_SIZE, count); 986 max_len = min_t(size_t, PAGE_SIZE, count);
978 this_len = min(max_len, this_len); 987 this_len = min(max_len, this_len);
979 988
980 retval = access_remote_vm(mm, (mm->env_start + src), 989 retval = access_remote_vm(mm, (env_start + src),
981 page, this_len, 0); 990 page, this_len, 0);
982 991
983 if (retval <= 0) { 992 if (retval <= 0) {
@@ -1829,8 +1838,33 @@ end_instantiate:
1829static int dname_to_vma_addr(struct dentry *dentry, 1838static int dname_to_vma_addr(struct dentry *dentry,
1830 unsigned long *start, unsigned long *end) 1839 unsigned long *start, unsigned long *end)
1831{ 1840{
1832 if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2) 1841 const char *str = dentry->d_name.name;
1842 unsigned long long sval, eval;
1843 unsigned int len;
1844
1845 len = _parse_integer(str, 16, &sval);
1846 if (len & KSTRTOX_OVERFLOW)
1833 return -EINVAL; 1847 return -EINVAL;
1848 if (sval != (unsigned long)sval)
1849 return -EINVAL;
1850 str += len;
1851
1852 if (*str != '-')
1853 return -EINVAL;
1854 str++;
1855
1856 len = _parse_integer(str, 16, &eval);
1857 if (len & KSTRTOX_OVERFLOW)
1858 return -EINVAL;
1859 if (eval != (unsigned long)eval)
1860 return -EINVAL;
1861 str += len;
1862
1863 if (*str != '\0')
1864 return -EINVAL;
1865
1866 *start = sval;
1867 *end = eval;
1834 1868
1835 return 0; 1869 return 0;
1836} 1870}
@@ -3076,6 +3110,44 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
3076} 3110}
3077 3111
3078/* 3112/*
3113 * proc_tid_comm_permission is a special permission function exclusively
3114 * used for the node /proc/<pid>/task/<tid>/comm.
3115 * It bypasses generic permission checks in the case where a task of the same
3116 * task group attempts to access the node.
3117 * The rationale behind this is that glibc and bionic access this node for
3118 * cross thread naming (pthread_set/getname_np(!self)). However, if
3119 * PR_SET_DUMPABLE gets set to 0 this node among others becomes uid=0 gid=0,
3120 * which locks out the cross thread naming implementation.
3121 * This function makes sure that the node is always accessible for members of
3122 * same thread group.
3123 */
3124static int proc_tid_comm_permission(struct inode *inode, int mask)
3125{
3126 bool is_same_tgroup;
3127 struct task_struct *task;
3128
3129 task = get_proc_task(inode);
3130 if (!task)
3131 return -ESRCH;
3132 is_same_tgroup = same_thread_group(current, task);
3133 put_task_struct(task);
3134
3135 if (likely(is_same_tgroup && !(mask & MAY_EXEC))) {
3136 /* This file (/proc/<pid>/task/<tid>/comm) can always be
3137 * read or written by the members of the corresponding
3138 * thread group.
3139 */
3140 return 0;
3141 }
3142
3143 return generic_permission(inode, mask);
3144}
3145
3146static const struct inode_operations proc_tid_comm_inode_operations = {
3147 .permission = proc_tid_comm_permission,
3148};
3149
3150/*
3079 * Tasks 3151 * Tasks
3080 */ 3152 */
3081static const struct pid_entry tid_base_stuff[] = { 3153static const struct pid_entry tid_base_stuff[] = {
@@ -3093,7 +3165,9 @@ static const struct pid_entry tid_base_stuff[] = {
3093#ifdef CONFIG_SCHED_DEBUG 3165#ifdef CONFIG_SCHED_DEBUG
3094 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 3166 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
3095#endif 3167#endif
3096 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), 3168 NOD("comm", S_IFREG|S_IRUGO|S_IWUSR,
3169 &proc_tid_comm_inode_operations,
3170 &proc_pid_set_comm_operations, {}),
3097#ifdef CONFIG_HAVE_ARCH_TRACEHOOK 3171#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
3098 ONE("syscall", S_IRUSR, proc_pid_syscall), 3172 ONE("syscall", S_IRUSR, proc_pid_syscall),
3099#endif 3173#endif
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 9155a5a0d3b9..df4661abadc4 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -57,11 +57,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
57 /* 57 /*
58 * Estimate the amount of memory available for userspace allocations, 58 * Estimate the amount of memory available for userspace allocations,
59 * without causing swapping. 59 * without causing swapping.
60 *
61 * Free memory cannot be taken below the low watermark, before the
62 * system starts swapping.
63 */ 60 */
64 available = i.freeram - wmark_low; 61 available = i.freeram - totalreserve_pages;
65 62
66 /* 63 /*
67 * Not all the page cache can be freed, otherwise the system will 64 * Not all the page cache can be freed, otherwise the system will
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 4dbe1e2daeca..5e1054f028af 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -654,7 +654,10 @@ static bool proc_sys_link_fill_cache(struct file *file,
654 struct ctl_table *table) 654 struct ctl_table *table)
655{ 655{
656 bool ret = true; 656 bool ret = true;
657
657 head = sysctl_head_grab(head); 658 head = sysctl_head_grab(head);
659 if (IS_ERR(head))
660 return false;
658 661
659 if (S_ISLNK(table->mode)) { 662 if (S_ISLNK(table->mode)) {
660 /* It is not an error if we can not follow the link ignore it */ 663 /* It is not an error if we can not follow the link ignore it */
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 07ef85e19fbc..75691a20313c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -253,24 +253,15 @@ static int do_maps_open(struct inode *inode, struct file *file,
253 * /proc/PID/maps that is the stack of the main task. 253 * /proc/PID/maps that is the stack of the main task.
254 */ 254 */
255static int is_stack(struct proc_maps_private *priv, 255static int is_stack(struct proc_maps_private *priv,
256 struct vm_area_struct *vma, int is_pid) 256 struct vm_area_struct *vma)
257{ 257{
258 int stack = 0; 258 /*
259 259 * We make no effort to guess what a given thread considers to be
260 if (is_pid) { 260 * its "stack". It's not even well-defined for programs written
261 stack = vma->vm_start <= vma->vm_mm->start_stack && 261 * languages like Go.
262 vma->vm_end >= vma->vm_mm->start_stack; 262 */
263 } else { 263 return vma->vm_start <= vma->vm_mm->start_stack &&
264 struct inode *inode = priv->inode; 264 vma->vm_end >= vma->vm_mm->start_stack;
265 struct task_struct *task;
266
267 rcu_read_lock();
268 task = pid_task(proc_pid(inode), PIDTYPE_PID);
269 if (task)
270 stack = vma_is_stack_for_task(vma, task);
271 rcu_read_unlock();
272 }
273 return stack;
274} 265}
275 266
276static void 267static void
@@ -337,7 +328,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
337 goto done; 328 goto done;
338 } 329 }
339 330
340 if (is_stack(priv, vma, is_pid)) 331 if (is_stack(priv, vma))
341 name = "[stack]"; 332 name = "[stack]";
342 } 333 }
343 334
@@ -1560,7 +1551,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1560 seq_file_path(m, file, "\n\t= "); 1551 seq_file_path(m, file, "\n\t= ");
1561 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 1552 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1562 seq_puts(m, " heap"); 1553 seq_puts(m, " heap");
1563 } else if (is_stack(proc_priv, vma, is_pid)) { 1554 } else if (is_stack(proc_priv, vma)) {
1564 seq_puts(m, " stack"); 1555 seq_puts(m, " stack");
1565 } 1556 }
1566 1557
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index faacb0c0d857..37175621e890 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -124,25 +124,17 @@ unsigned long task_statm(struct mm_struct *mm,
124} 124}
125 125
126static int is_stack(struct proc_maps_private *priv, 126static int is_stack(struct proc_maps_private *priv,
127 struct vm_area_struct *vma, int is_pid) 127 struct vm_area_struct *vma)
128{ 128{
129 struct mm_struct *mm = vma->vm_mm; 129 struct mm_struct *mm = vma->vm_mm;
130 int stack = 0; 130
131 131 /*
132 if (is_pid) { 132 * We make no effort to guess what a given thread considers to be
133 stack = vma->vm_start <= mm->start_stack && 133 * its "stack". It's not even well-defined for programs written
134 vma->vm_end >= mm->start_stack; 134 * languages like Go.
135 } else { 135 */
136 struct inode *inode = priv->inode; 136 return vma->vm_start <= mm->start_stack &&
137 struct task_struct *task; 137 vma->vm_end >= mm->start_stack;
138
139 rcu_read_lock();
140 task = pid_task(proc_pid(inode), PIDTYPE_PID);
141 if (task)
142 stack = vma_is_stack_for_task(vma, task);
143 rcu_read_unlock();
144 }
145 return stack;
146} 138}
147 139
148/* 140/*
@@ -184,7 +176,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
184 if (file) { 176 if (file) {
185 seq_pad(m, ' '); 177 seq_pad(m, ' ');
186 seq_file_path(m, file, ""); 178 seq_file_path(m, file, "");
187 } else if (mm && is_stack(priv, vma, is_pid)) { 179 } else if (mm && is_stack(priv, vma)) {
188 seq_pad(m, ' '); 180 seq_pad(m, ' ');
189 seq_printf(m, "[stack]"); 181 seq_printf(m, "[stack]");
190 } 182 }
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 353ff31dcee1..1cb1d02c5937 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2919,7 +2919,8 @@ static int __init dquot_init(void)
2919 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld," 2919 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
2920 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); 2920 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
2921 2921
2922 register_shrinker(&dqcache_shrinker); 2922 if (register_shrinker(&dqcache_shrinker))
2923 panic("Cannot register dquot shrinker");
2923 2924
2924 return 0; 2925 return 0;
2925} 2926}
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index dc198bc64c61..edc8ef78b63f 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -513,9 +513,17 @@ static void __discard_prealloc(struct reiserfs_transaction_handle *th,
513 "inode has negative prealloc blocks count."); 513 "inode has negative prealloc blocks count.");
514#endif 514#endif
515 while (ei->i_prealloc_count > 0) { 515 while (ei->i_prealloc_count > 0) {
516 reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block); 516 b_blocknr_t block_to_free;
517 ei->i_prealloc_block++; 517
518 /*
519 * reiserfs_free_prealloc_block can drop the write lock,
520 * which could allow another caller to free the same block.
521 * We can protect against it by modifying the prealloc
522 * state before calling it.
523 */
524 block_to_free = ei->i_prealloc_block++;
518 ei->i_prealloc_count--; 525 ei->i_prealloc_count--;
526 reiserfs_free_prealloc_block(th, inode, block_to_free);
519 dirty = 1; 527 dirty = 1;
520 } 528 }
521 if (dirty) 529 if (dirty)
@@ -1128,7 +1136,7 @@ static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
1128 hint->prealloc_size = 0; 1136 hint->prealloc_size = 0;
1129 1137
1130 if (!hint->formatted_node && hint->preallocate) { 1138 if (!hint->formatted_node && hint->preallocate) {
1131 if (S_ISREG(hint->inode->i_mode) 1139 if (S_ISREG(hint->inode->i_mode) && !IS_PRIVATE(hint->inode)
1132 && hint->inode->i_size >= 1140 && hint->inode->i_size >=
1133 REISERFS_SB(hint->th->t_super)->s_alloc_options. 1141 REISERFS_SB(hint->th->t_super)->s_alloc_options.
1134 preallocmin * hint->inode->i_sb->s_blocksize) 1142 preallocmin * hint->inode->i_sb->s_blocksize)
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 9d6486d416a3..00985f9db9f7 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1961,7 +1961,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
1961 * will be requeued because superblock is being shutdown and doesn't 1961 * will be requeued because superblock is being shutdown and doesn't
1962 * have MS_ACTIVE set. 1962 * have MS_ACTIVE set.
1963 */ 1963 */
1964 cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); 1964 reiserfs_cancel_old_flush(sb);
1965 /* wait for all commits to finish */ 1965 /* wait for all commits to finish */
1966 cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work); 1966 cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
1967 1967
@@ -2643,7 +2643,7 @@ static int journal_init_dev(struct super_block *super,
2643 if (IS_ERR(journal->j_dev_bd)) { 2643 if (IS_ERR(journal->j_dev_bd)) {
2644 result = PTR_ERR(journal->j_dev_bd); 2644 result = PTR_ERR(journal->j_dev_bd);
2645 journal->j_dev_bd = NULL; 2645 journal->j_dev_bd = NULL;
2646 reiserfs_warning(super, 2646 reiserfs_warning(super, "sh-457",
2647 "journal_init_dev: Cannot open '%s': %i", 2647 "journal_init_dev: Cannot open '%s': %i",
2648 jdev_name, result); 2648 jdev_name, result);
2649 return result; 2649 return result;
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
index 249594a821e0..f5cebd70d903 100644
--- a/fs/reiserfs/lbalance.c
+++ b/fs/reiserfs/lbalance.c
@@ -475,7 +475,7 @@ static void leaf_item_bottle(struct buffer_info *dest_bi,
475 * 'cpy_bytes'; create new item header; 475 * 'cpy_bytes'; create new item header;
476 * n_ih = new item_header; 476 * n_ih = new item_header;
477 */ 477 */
478 memcpy(&n_ih, ih, SHORT_KEY_SIZE); 478 memcpy(&n_ih.ih_key, &ih->ih_key, KEY_SIZE);
479 479
480 /* Endian safe, both le */ 480 /* Endian safe, both le */
481 n_ih.ih_version = ih->ih_version; 481 n_ih.ih_version = ih->ih_version;
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 3ebc70167e41..eb611bdd4725 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -687,8 +687,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
687 reiserfs_update_inode_transaction(inode); 687 reiserfs_update_inode_transaction(inode);
688 reiserfs_update_inode_transaction(dir); 688 reiserfs_update_inode_transaction(dir);
689 689
690 unlock_new_inode(inode); 690 d_instantiate_new(dentry, inode);
691 d_instantiate(dentry, inode);
692 retval = journal_end(&th); 691 retval = journal_end(&th);
693 692
694out_failed: 693out_failed:
@@ -771,8 +770,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
771 goto out_failed; 770 goto out_failed;
772 } 771 }
773 772
774 unlock_new_inode(inode); 773 d_instantiate_new(dentry, inode);
775 d_instantiate(dentry, inode);
776 retval = journal_end(&th); 774 retval = journal_end(&th);
777 775
778out_failed: 776out_failed:
@@ -871,8 +869,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
871 /* the above add_entry did not update dir's stat data */ 869 /* the above add_entry did not update dir's stat data */
872 reiserfs_update_sd(&th, dir); 870 reiserfs_update_sd(&th, dir);
873 871
874 unlock_new_inode(inode); 872 d_instantiate_new(dentry, inode);
875 d_instantiate(dentry, inode);
876 retval = journal_end(&th); 873 retval = journal_end(&th);
877out_failed: 874out_failed:
878 reiserfs_write_unlock(dir->i_sb); 875 reiserfs_write_unlock(dir->i_sb);
@@ -1186,8 +1183,7 @@ static int reiserfs_symlink(struct inode *parent_dir,
1186 goto out_failed; 1183 goto out_failed;
1187 } 1184 }
1188 1185
1189 unlock_new_inode(inode); 1186 d_instantiate_new(dentry, inode);
1190 d_instantiate(dentry, inode);
1191 retval = journal_end(&th); 1187 retval = journal_end(&th);
1192out_failed: 1188out_failed:
1193 reiserfs_write_unlock(parent_dir->i_sb); 1189 reiserfs_write_unlock(parent_dir->i_sb);
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 2adcde137c3f..6ca00471afbf 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -1326,7 +1326,6 @@ struct cpu_key {
1326#define KEY_NOT_FOUND 0 1326#define KEY_NOT_FOUND 0
1327 1327
1328#define KEY_SIZE (sizeof(struct reiserfs_key)) 1328#define KEY_SIZE (sizeof(struct reiserfs_key))
1329#define SHORT_KEY_SIZE (sizeof (__u32) + sizeof (__u32))
1330 1329
1331/* return values for search_by_key and clones */ 1330/* return values for search_by_key and clones */
1332#define ITEM_FOUND 1 1331#define ITEM_FOUND 1
@@ -2949,6 +2948,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *s,
2949 struct reiserfs_list_bitmap *, unsigned int); 2948 struct reiserfs_list_bitmap *, unsigned int);
2950 2949
2951void reiserfs_schedule_old_flush(struct super_block *s); 2950void reiserfs_schedule_old_flush(struct super_block *s);
2951void reiserfs_cancel_old_flush(struct super_block *s);
2952void add_save_link(struct reiserfs_transaction_handle *th, 2952void add_save_link(struct reiserfs_transaction_handle *th,
2953 struct inode *inode, int truncate); 2953 struct inode *inode, int truncate);
2954int remove_save_link(struct inode *inode, int truncate); 2954int remove_save_link(struct inode *inode, int truncate);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f9f3be50081a..ee095246da4e 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -90,7 +90,9 @@ static void flush_old_commits(struct work_struct *work)
90 s = sbi->s_journal->j_work_sb; 90 s = sbi->s_journal->j_work_sb;
91 91
92 spin_lock(&sbi->old_work_lock); 92 spin_lock(&sbi->old_work_lock);
93 sbi->work_queued = 0; 93 /* Avoid clobbering the cancel state... */
94 if (sbi->work_queued == 1)
95 sbi->work_queued = 0;
94 spin_unlock(&sbi->old_work_lock); 96 spin_unlock(&sbi->old_work_lock);
95 97
96 reiserfs_sync_fs(s, 1); 98 reiserfs_sync_fs(s, 1);
@@ -117,21 +119,22 @@ void reiserfs_schedule_old_flush(struct super_block *s)
117 spin_unlock(&sbi->old_work_lock); 119 spin_unlock(&sbi->old_work_lock);
118} 120}
119 121
120static void cancel_old_flush(struct super_block *s) 122void reiserfs_cancel_old_flush(struct super_block *s)
121{ 123{
122 struct reiserfs_sb_info *sbi = REISERFS_SB(s); 124 struct reiserfs_sb_info *sbi = REISERFS_SB(s);
123 125
124 cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
125 spin_lock(&sbi->old_work_lock); 126 spin_lock(&sbi->old_work_lock);
126 sbi->work_queued = 0; 127 /* Make sure no new flushes will be queued */
128 sbi->work_queued = 2;
127 spin_unlock(&sbi->old_work_lock); 129 spin_unlock(&sbi->old_work_lock);
130 cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
128} 131}
129 132
130static int reiserfs_freeze(struct super_block *s) 133static int reiserfs_freeze(struct super_block *s)
131{ 134{
132 struct reiserfs_transaction_handle th; 135 struct reiserfs_transaction_handle th;
133 136
134 cancel_old_flush(s); 137 reiserfs_cancel_old_flush(s);
135 138
136 reiserfs_write_lock(s); 139 reiserfs_write_lock(s);
137 if (!(s->s_flags & MS_RDONLY)) { 140 if (!(s->s_flags & MS_RDONLY)) {
@@ -152,7 +155,13 @@ static int reiserfs_freeze(struct super_block *s)
152 155
153static int reiserfs_unfreeze(struct super_block *s) 156static int reiserfs_unfreeze(struct super_block *s)
154{ 157{
158 struct reiserfs_sb_info *sbi = REISERFS_SB(s);
159
155 reiserfs_allow_writes(s); 160 reiserfs_allow_writes(s);
161 spin_lock(&sbi->old_work_lock);
162 /* Allow old_work to run again */
163 sbi->work_queued = 0;
164 spin_unlock(&sbi->old_work_lock);
156 return 0; 165 return 0;
157} 166}
158 167
@@ -2187,7 +2196,7 @@ error_unlocked:
2187 if (sbi->commit_wq) 2196 if (sbi->commit_wq)
2188 destroy_workqueue(sbi->commit_wq); 2197 destroy_workqueue(sbi->commit_wq);
2189 2198
2190 cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); 2199 reiserfs_cancel_old_flush(s);
2191 2200
2192 reiserfs_free_bitmap_cache(s); 2201 reiserfs_free_bitmap_cache(s);
2193 if (SB_BUFFER_WITH_SB(s)) 2202 if (SB_BUFFER_WITH_SB(s))
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 9b1824f35501..91b036902a17 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -37,7 +37,14 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
37 error = journal_begin(&th, inode->i_sb, jcreate_blocks); 37 error = journal_begin(&th, inode->i_sb, jcreate_blocks);
38 reiserfs_write_unlock(inode->i_sb); 38 reiserfs_write_unlock(inode->i_sb);
39 if (error == 0) { 39 if (error == 0) {
40 if (type == ACL_TYPE_ACCESS && acl) {
41 error = posix_acl_update_mode(inode, &inode->i_mode,
42 &acl);
43 if (error)
44 goto unlock;
45 }
40 error = __reiserfs_set_acl(&th, inode, type, acl); 46 error = __reiserfs_set_acl(&th, inode, type, acl);
47unlock:
41 reiserfs_write_lock(inode->i_sb); 48 reiserfs_write_lock(inode->i_sb);
42 error2 = journal_end(&th); 49 error2 = journal_end(&th);
43 reiserfs_write_unlock(inode->i_sb); 50 reiserfs_write_unlock(inode->i_sb);
@@ -245,11 +252,6 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
245 switch (type) { 252 switch (type) {
246 case ACL_TYPE_ACCESS: 253 case ACL_TYPE_ACCESS:
247 name = POSIX_ACL_XATTR_ACCESS; 254 name = POSIX_ACL_XATTR_ACCESS;
248 if (acl) {
249 error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
250 if (error)
251 return error;
252 }
253 break; 255 break;
254 case ACL_TYPE_DEFAULT: 256 case ACL_TYPE_DEFAULT:
255 name = POSIX_ACL_XATTR_DEFAULT; 257 name = POSIX_ACL_XATTR_DEFAULT;
diff --git a/fs/select.c b/fs/select.c
index 015547330e88..f4dd55fc638c 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -29,6 +29,7 @@
29#include <linux/sched/rt.h> 29#include <linux/sched/rt.h>
30#include <linux/freezer.h> 30#include <linux/freezer.h>
31#include <net/busy_poll.h> 31#include <net/busy_poll.h>
32#include <linux/vmalloc.h>
32 33
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
34 35
@@ -550,7 +551,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
550 fd_set_bits fds; 551 fd_set_bits fds;
551 void *bits; 552 void *bits;
552 int ret, max_fds; 553 int ret, max_fds;
553 unsigned int size; 554 size_t size, alloc_size;
554 struct fdtable *fdt; 555 struct fdtable *fdt;
555 /* Allocate small arguments on the stack to save memory and be faster */ 556 /* Allocate small arguments on the stack to save memory and be faster */
556 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; 557 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
@@ -577,7 +578,14 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
577 if (size > sizeof(stack_fds) / 6) { 578 if (size > sizeof(stack_fds) / 6) {
578 /* Not enough space in on-stack array; must use kmalloc */ 579 /* Not enough space in on-stack array; must use kmalloc */
579 ret = -ENOMEM; 580 ret = -ENOMEM;
580 bits = kmalloc(6 * size, GFP_KERNEL); 581 if (size > (SIZE_MAX / 6))
582 goto out_nofds;
583
584 alloc_size = 6 * size;
585 bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
586 if (!bits && alloc_size > PAGE_SIZE)
587 bits = vmalloc(alloc_size);
588
581 if (!bits) 589 if (!bits)
582 goto out_nofds; 590 goto out_nofds;
583 } 591 }
@@ -614,7 +622,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
614 622
615out: 623out:
616 if (bits != stack_fds) 624 if (bits != stack_fds)
617 kfree(bits); 625 kvfree(bits);
618out_nofds: 626out_nofds:
619 return ret; 627 return ret;
620} 628}
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 0cea9b9236d0..82bc942fc437 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -166,6 +166,8 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
166 } 166 }
167 167
168 if (compressed) { 168 if (compressed) {
169 if (!msblk->stream)
170 goto read_failure;
169 length = squashfs_decompress(msblk, bh, b, offset, length, 171 length = squashfs_decompress(msblk, bh, b, offset, length,
170 output); 172 output);
171 if (length < 0) 173 if (length < 0)
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 1cb70a0b2168..91ce49c05b7c 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer,
350 350
351 TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); 351 TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
352 352
353 if (unlikely(length < 0))
354 return -EIO;
355
353 while (length) { 356 while (length) {
354 entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); 357 entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
355 if (entry->error) { 358 if (entry->error) {
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index e5c9689062ba..1ec7bae2751d 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -194,7 +194,11 @@ static long long read_indexes(struct super_block *sb, int n,
194 } 194 }
195 195
196 for (i = 0; i < blocks; i++) { 196 for (i = 0; i < blocks; i++) {
197 int size = le32_to_cpu(blist[i]); 197 int size = squashfs_block_size(blist[i]);
198 if (size < 0) {
199 err = size;
200 goto failure;
201 }
198 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); 202 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
199 } 203 }
200 n -= blocks; 204 n -= blocks;
@@ -367,7 +371,7 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
367 sizeof(size)); 371 sizeof(size));
368 if (res < 0) 372 if (res < 0)
369 return res; 373 return res;
370 return le32_to_cpu(size); 374 return squashfs_block_size(size);
371} 375}
372 376
373/* Copy data into page cache */ 377/* Copy data into page cache */
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index 0ed6edbc5c71..0681feab4a84 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -49,11 +49,16 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
49 u64 *fragment_block) 49 u64 *fragment_block)
50{ 50{
51 struct squashfs_sb_info *msblk = sb->s_fs_info; 51 struct squashfs_sb_info *msblk = sb->s_fs_info;
52 int block = SQUASHFS_FRAGMENT_INDEX(fragment); 52 int block, offset, size;
53 int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
54 u64 start_block = le64_to_cpu(msblk->fragment_index[block]);
55 struct squashfs_fragment_entry fragment_entry; 53 struct squashfs_fragment_entry fragment_entry;
56 int size; 54 u64 start_block;
55
56 if (fragment >= msblk->fragments)
57 return -EIO;
58 block = SQUASHFS_FRAGMENT_INDEX(fragment);
59 offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
60
61 start_block = le64_to_cpu(msblk->fragment_index[block]);
57 62
58 size = squashfs_read_metadata(sb, &fragment_entry, &start_block, 63 size = squashfs_read_metadata(sb, &fragment_entry, &start_block,
59 &offset, sizeof(fragment_entry)); 64 &offset, sizeof(fragment_entry));
@@ -61,9 +66,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
61 return size; 66 return size;
62 67
63 *fragment_block = le64_to_cpu(fragment_entry.start_block); 68 *fragment_block = le64_to_cpu(fragment_entry.start_block);
64 size = le32_to_cpu(fragment_entry.size); 69 return squashfs_block_size(fragment_entry.size);
65
66 return size;
67} 70}
68 71
69 72
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 506f4ba5b983..e66486366f02 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -129,6 +129,12 @@
129 129
130#define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK)) 130#define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
131 131
132static inline int squashfs_block_size(__le32 raw)
133{
134 u32 size = le32_to_cpu(raw);
135 return (size >> 25) ? -EIO : size;
136}
137
132/* 138/*
133 * Inode number ops. Inodes consist of a compressed block number, and an 139 * Inode number ops. Inodes consist of a compressed block number, and an
134 * uncompressed offset within that block 140 * uncompressed offset within that block
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 1da565cb50c3..ef69c31947bf 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -75,6 +75,7 @@ struct squashfs_sb_info {
75 unsigned short block_log; 75 unsigned short block_log;
76 long long bytes_used; 76 long long bytes_used;
77 unsigned int inodes; 77 unsigned int inodes;
78 unsigned int fragments;
78 int xattr_ids; 79 int xattr_ids;
79}; 80};
80#endif 81#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 5056babe00df..93aa3e23c845 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -176,6 +176,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
176 msblk->inode_table = le64_to_cpu(sblk->inode_table_start); 176 msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
177 msblk->directory_table = le64_to_cpu(sblk->directory_table_start); 177 msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
178 msblk->inodes = le32_to_cpu(sblk->inodes); 178 msblk->inodes = le32_to_cpu(sblk->inodes);
179 msblk->fragments = le32_to_cpu(sblk->fragments);
179 flags = le16_to_cpu(sblk->flags); 180 flags = le16_to_cpu(sblk->flags);
180 181
181 TRACE("Found valid superblock on %s\n", bdevname(sb->s_bdev, b)); 182 TRACE("Found valid superblock on %s\n", bdevname(sb->s_bdev, b));
@@ -186,7 +187,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
186 TRACE("Filesystem size %lld bytes\n", msblk->bytes_used); 187 TRACE("Filesystem size %lld bytes\n", msblk->bytes_used);
187 TRACE("Block size %d\n", msblk->block_size); 188 TRACE("Block size %d\n", msblk->block_size);
188 TRACE("Number of inodes %d\n", msblk->inodes); 189 TRACE("Number of inodes %d\n", msblk->inodes);
189 TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments)); 190 TRACE("Number of fragments %d\n", msblk->fragments);
190 TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); 191 TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
191 TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); 192 TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
192 TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); 193 TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
@@ -273,7 +274,7 @@ allocate_id_index_table:
273 sb->s_export_op = &squashfs_export_ops; 274 sb->s_export_op = &squashfs_export_ops;
274 275
275handle_fragments: 276handle_fragments:
276 fragments = le32_to_cpu(sblk->fragments); 277 fragments = msblk->fragments;
277 if (fragments == 0) 278 if (fragments == 0)
278 goto check_directory_table; 279 goto check_directory_table;
279 280
diff --git a/fs/super.c b/fs/super.c
index d4d2591b77c8..09b526a50986 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -497,7 +497,11 @@ retry:
497 hlist_add_head(&s->s_instances, &type->fs_supers); 497 hlist_add_head(&s->s_instances, &type->fs_supers);
498 spin_unlock(&sb_lock); 498 spin_unlock(&sb_lock);
499 get_filesystem(type); 499 get_filesystem(type);
500 register_shrinker(&s->s_shrink); 500 err = register_shrinker(&s->s_shrink);
501 if (err) {
502 deactivate_locked_super(s);
503 s = ERR_PTR(err);
504 }
501 return s; 505 return s;
502} 506}
503 507
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 0b9da5b6e0f9..22dba8837a86 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -1107,7 +1107,7 @@ static int recomp_data_node(const struct ubifs_info *c,
1107 int err, len, compr_type, out_len; 1107 int err, len, compr_type, out_len;
1108 1108
1109 out_len = le32_to_cpu(dn->size); 1109 out_len = le32_to_cpu(dn->size);
1110 buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS); 1110 buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
1111 if (!buf) 1111 if (!buf)
1112 return -ENOMEM; 1112 return -ENOMEM;
1113 1113
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1fd90c079537..0bb6de356451 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1728,8 +1728,11 @@ static void ubifs_remount_ro(struct ubifs_info *c)
1728 1728
1729 dbg_save_space_info(c); 1729 dbg_save_space_info(c);
1730 1730
1731 for (i = 0; i < c->jhead_cnt; i++) 1731 for (i = 0; i < c->jhead_cnt; i++) {
1732 ubifs_wbuf_sync(&c->jheads[i].wbuf); 1732 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
1733 if (err)
1734 ubifs_ro_mode(c, err);
1735 }
1733 1736
1734 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); 1737 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
1735 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); 1738 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
@@ -1795,8 +1798,11 @@ static void ubifs_put_super(struct super_block *sb)
1795 int err; 1798 int err;
1796 1799
1797 /* Synchronize write-buffers */ 1800 /* Synchronize write-buffers */
1798 for (i = 0; i < c->jhead_cnt; i++) 1801 for (i = 0; i < c->jhead_cnt; i++) {
1799 ubifs_wbuf_sync(&c->jheads[i].wbuf); 1802 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
1803 if (err)
1804 ubifs_ro_mode(c, err);
1805 }
1800 1806
1801 /* 1807 /*
1802 * We are being cleanly unmounted which means the 1808 * We are being cleanly unmounted which means the
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index c763fda257bf..637114e8c7fd 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -150,6 +150,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
150 sizeof(struct fileIdentDesc)); 150 sizeof(struct fileIdentDesc));
151 } 151 }
152 } 152 }
153 /* Got last entry outside of dir size - fs is corrupted! */
154 if (*nf_pos > dir->i_size)
155 return NULL;
153 return fi; 156 return fi;
154} 157}
155 158
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index c97b5a8d1e24..f34c545f4e54 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -611,8 +611,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
611 if (fibh.sbh != fibh.ebh) 611 if (fibh.sbh != fibh.ebh)
612 brelse(fibh.ebh); 612 brelse(fibh.ebh);
613 brelse(fibh.sbh); 613 brelse(fibh.sbh);
614 unlock_new_inode(inode); 614 d_instantiate_new(dentry, inode);
615 d_instantiate(dentry, inode);
616 615
617 return 0; 616 return 0;
618} 617}
@@ -722,8 +721,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
722 inc_nlink(dir); 721 inc_nlink(dir);
723 dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb); 722 dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
724 mark_inode_dirty(dir); 723 mark_inode_dirty(dir);
725 unlock_new_inode(inode); 724 d_instantiate_new(dentry, inode);
726 d_instantiate(dentry, inode);
727 if (fibh.sbh != fibh.ebh) 725 if (fibh.sbh != fibh.ebh)
728 brelse(fibh.ebh); 726 brelse(fibh.ebh);
729 brelse(fibh.sbh); 727 brelse(fibh.sbh);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index ee09c97f3ab2..159977ec8e54 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -2073,8 +2073,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2073 bool lvid_open = false; 2073 bool lvid_open = false;
2074 2074
2075 uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); 2075 uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
2076 uopt.uid = INVALID_UID; 2076 /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
2077 uopt.gid = INVALID_GID; 2077 uopt.uid = make_kuid(current_user_ns(), overflowuid);
2078 uopt.gid = make_kgid(current_user_ns(), overflowgid);
2078 uopt.umask = 0; 2079 uopt.umask = 0;
2079 uopt.fmode = UDF_INVALID_MODE; 2080 uopt.fmode = UDF_INVALID_MODE;
2080 uopt.dmode = UDF_INVALID_MODE; 2081 uopt.dmode = UDF_INVALID_MODE;
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 47966554317c..2ec7689c25cf 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -38,8 +38,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
38{ 38{
39 int err = ufs_add_link(dentry, inode); 39 int err = ufs_add_link(dentry, inode);
40 if (!err) { 40 if (!err) {
41 unlock_new_inode(inode); 41 d_instantiate_new(dentry, inode);
42 d_instantiate(dentry, inode);
43 return 0; 42 return 0;
44 } 43 }
45 inode_dec_link_count(inode); 44 inode_dec_link_count(inode);
@@ -191,8 +190,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
191 if (err) 190 if (err)
192 goto out_fail; 191 goto out_fail;
193 192
194 unlock_new_inode(inode); 193 d_instantiate_new(dentry, inode);
195 d_instantiate(dentry, inode);
196 return 0; 194 return 0;
197 195
198out_fail: 196out_fail:
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index e1e7fe3b5424..b663b756f552 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1924,6 +1924,93 @@ xfs_alloc_space_available(
1924} 1924}
1925 1925
1926/* 1926/*
1927 * Check the agfl fields of the agf for inconsistency or corruption. The purpose
1928 * is to detect an agfl header padding mismatch between current and early v5
1929 * kernels. This problem manifests as a 1-slot size difference between the
1930 * on-disk flcount and the active [first, last] range of a wrapped agfl. This
1931 * may also catch variants of agfl count corruption unrelated to padding. Either
1932 * way, we'll reset the agfl and warn the user.
1933 *
1934 * Return true if a reset is required before the agfl can be used, false
1935 * otherwise.
1936 */
1937static bool
1938xfs_agfl_needs_reset(
1939 struct xfs_mount *mp,
1940 struct xfs_agf *agf)
1941{
1942 uint32_t f = be32_to_cpu(agf->agf_flfirst);
1943 uint32_t l = be32_to_cpu(agf->agf_fllast);
1944 uint32_t c = be32_to_cpu(agf->agf_flcount);
1945 int agfl_size = XFS_AGFL_SIZE(mp);
1946 int active;
1947
1948 /* no agfl header on v4 supers */
1949 if (!xfs_sb_version_hascrc(&mp->m_sb))
1950 return false;
1951
1952 /*
1953 * The agf read verifier catches severe corruption of these fields.
1954 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
1955 * the verifier allows it.
1956 */
1957 if (f >= agfl_size || l >= agfl_size)
1958 return true;
1959 if (c > agfl_size)
1960 return true;
1961
1962 /*
1963 * Check consistency between the on-disk count and the active range. An
1964 * agfl padding mismatch manifests as an inconsistent flcount.
1965 */
1966 if (c && l >= f)
1967 active = l - f + 1;
1968 else if (c)
1969 active = agfl_size - f + l + 1;
1970 else
1971 active = 0;
1972
1973 return active != c;
1974}
1975
1976/*
1977 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
1978 * agfl content cannot be trusted. Warn the user that a repair is required to
1979 * recover leaked blocks.
1980 *
1981 * The purpose of this mechanism is to handle filesystems affected by the agfl
1982 * header padding mismatch problem. A reset keeps the filesystem online with a
1983 * relatively minor free space accounting inconsistency rather than suffer the
1984 * inevitable crash from use of an invalid agfl block.
1985 */
1986static void
1987xfs_agfl_reset(
1988 struct xfs_trans *tp,
1989 struct xfs_buf *agbp,
1990 struct xfs_perag *pag)
1991{
1992 struct xfs_mount *mp = tp->t_mountp;
1993 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
1994
1995 ASSERT(pag->pagf_agflreset);
1996 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
1997
1998 xfs_warn(mp,
1999 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2000 "Please unmount and run xfs_repair.",
2001 pag->pag_agno, pag->pagf_flcount);
2002
2003 agf->agf_flfirst = 0;
2004 agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
2005 agf->agf_flcount = 0;
2006 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2007 XFS_AGF_FLCOUNT);
2008
2009 pag->pagf_flcount = 0;
2010 pag->pagf_agflreset = false;
2011}
2012
2013/*
1927 * Decide whether to use this allocation group for this allocation. 2014 * Decide whether to use this allocation group for this allocation.
1928 * If so, fix up the btree freelist's size. 2015 * If so, fix up the btree freelist's size.
1929 */ 2016 */
@@ -1983,6 +2070,10 @@ xfs_alloc_fix_freelist(
1983 } 2070 }
1984 } 2071 }
1985 2072
2073 /* reset a padding mismatched agfl before final free space check */
2074 if (pag->pagf_agflreset)
2075 xfs_agfl_reset(tp, agbp, pag);
2076
1986 /* If there isn't enough total space or single-extent, reject it. */ 2077 /* If there isn't enough total space or single-extent, reject it. */
1987 need = xfs_alloc_min_freelist(mp, pag); 2078 need = xfs_alloc_min_freelist(mp, pag);
1988 if (!xfs_alloc_space_available(args, need, flags)) 2079 if (!xfs_alloc_space_available(args, need, flags))
@@ -2121,6 +2212,7 @@ xfs_alloc_get_freelist(
2121 agf->agf_flfirst = 0; 2212 agf->agf_flfirst = 0;
2122 2213
2123 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); 2214 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2215 ASSERT(!pag->pagf_agflreset);
2124 be32_add_cpu(&agf->agf_flcount, -1); 2216 be32_add_cpu(&agf->agf_flcount, -1);
2125 xfs_trans_agflist_delta(tp, -1); 2217 xfs_trans_agflist_delta(tp, -1);
2126 pag->pagf_flcount--; 2218 pag->pagf_flcount--;
@@ -2226,6 +2318,7 @@ xfs_alloc_put_freelist(
2226 agf->agf_fllast = 0; 2318 agf->agf_fllast = 0;
2227 2319
2228 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); 2320 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2321 ASSERT(!pag->pagf_agflreset);
2229 be32_add_cpu(&agf->agf_flcount, 1); 2322 be32_add_cpu(&agf->agf_flcount, 1);
2230 xfs_trans_agflist_delta(tp, 1); 2323 xfs_trans_agflist_delta(tp, 1);
2231 pag->pagf_flcount++; 2324 pag->pagf_flcount++;
@@ -2417,6 +2510,7 @@ xfs_alloc_read_agf(
2417 pag->pagb_count = 0; 2510 pag->pagb_count = 0;
2418 pag->pagb_tree = RB_ROOT; 2511 pag->pagb_tree = RB_ROOT;
2419 pag->pagf_init = 1; 2512 pag->pagf_init = 1;
2513 pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
2420 } 2514 }
2421#ifdef DEBUG 2515#ifdef DEBUG
2422 else if (!XFS_FORCED_SHUTDOWN(mp)) { 2516 else if (!XFS_FORCED_SHUTDOWN(mp)) {
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index f949818fa1c7..fb9636cc927c 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -130,9 +130,6 @@ xfs_attr_get(
130 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 130 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
131 return -EIO; 131 return -EIO;
132 132
133 if (!xfs_inode_hasattr(ip))
134 return -ENOATTR;
135
136 error = xfs_attr_args_init(&args, ip, name, flags); 133 error = xfs_attr_args_init(&args, ip, name, flags);
137 if (error) 134 if (error)
138 return error; 135 return error;
@@ -417,9 +414,6 @@ xfs_attr_remove(
417 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 414 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
418 return -EIO; 415 return -EIO;
419 416
420 if (!xfs_inode_hasattr(dp))
421 return -ENOATTR;
422
423 error = xfs_attr_args_init(&args, dp, name, flags); 417 error = xfs_attr_args_init(&args, dp, name, flags);
424 if (error) 418 if (error)
425 return error; 419 return error;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index a9063ac50c4e..da72090b9ce7 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -310,7 +310,7 @@ xfs_map_blocks(
310 (ip->i_df.if_flags & XFS_IFEXTENTS)); 310 (ip->i_df.if_flags & XFS_IFEXTENTS));
311 ASSERT(offset <= mp->m_super->s_maxbytes); 311 ASSERT(offset <= mp->m_super->s_maxbytes);
312 312
313 if (offset + count > mp->m_super->s_maxbytes) 313 if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
314 count = mp->m_super->s_maxbytes - offset; 314 count = mp->m_super->s_maxbytes - offset;
315 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 315 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
316 offset_fsb = XFS_B_TO_FSBT(mp, offset); 316 offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -1360,7 +1360,7 @@ xfs_map_trim_size(
1360 if (mapping_size > size) 1360 if (mapping_size > size)
1361 mapping_size = size; 1361 mapping_size = size;
1362 if (offset < i_size_read(inode) && 1362 if (offset < i_size_read(inode) &&
1363 offset + mapping_size >= i_size_read(inode)) { 1363 (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
1364 /* limit mapping to block that spans EOF */ 1364 /* limit mapping to block that spans EOF */
1365 mapping_size = roundup_64(i_size_read(inode) - offset, 1365 mapping_size = roundup_64(i_size_read(inode) - offset,
1366 i_blocksize(inode)); 1366 i_blocksize(inode));
@@ -1416,7 +1416,7 @@ __xfs_get_blocks(
1416 } 1416 }
1417 1417
1418 ASSERT(offset <= mp->m_super->s_maxbytes); 1418 ASSERT(offset <= mp->m_super->s_maxbytes);
1419 if (offset + size > mp->m_super->s_maxbytes) 1419 if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
1420 size = mp->m_super->s_maxbytes - offset; 1420 size = mp->m_super->s_maxbytes - offset;
1421 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); 1421 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1422 offset_fsb = XFS_B_TO_FSBT(mp, offset); 1422 offset_fsb = XFS_B_TO_FSBT(mp, offset);
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index e85a9519a5ae..64ad05cb831a 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -50,19 +50,19 @@ xfs_trim_extents(
50 50
51 pag = xfs_perag_get(mp, agno); 51 pag = xfs_perag_get(mp, agno);
52 52
53 error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
54 if (error || !agbp)
55 goto out_put_perag;
56
57 cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
58
59 /* 53 /*
60 * Force out the log. This means any transactions that might have freed 54 * Force out the log. This means any transactions that might have freed
61 * space before we took the AGF buffer lock are now on disk, and the 55 * space before we take the AGF buffer lock are now on disk, and the
62 * volatile disk cache is flushed. 56 * volatile disk cache is flushed.
63 */ 57 */
64 xfs_log_force(mp, XFS_LOG_SYNC); 58 xfs_log_force(mp, XFS_LOG_SYNC);
65 59
60 error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
61 if (error || !agbp)
62 goto out_put_perag;
63
64 cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
65
66 /* 66 /*
67 * Look up the longest btree in the AGF and start with it. 67 * Look up the longest btree in the AGF and start with it.
68 */ 68 */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 3dd47307363f..e917aec4babe 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -969,22 +969,26 @@ xfs_file_fallocate(
969 if (error) 969 if (error)
970 goto out_unlock; 970 goto out_unlock;
971 } else if (mode & FALLOC_FL_INSERT_RANGE) { 971 } else if (mode & FALLOC_FL_INSERT_RANGE) {
972 unsigned int blksize_mask = i_blocksize(inode) - 1; 972 unsigned int blksize_mask = i_blocksize(inode) - 1;
973 loff_t isize = i_size_read(inode);
973 974
974 new_size = i_size_read(inode) + len;
975 if (offset & blksize_mask || len & blksize_mask) { 975 if (offset & blksize_mask || len & blksize_mask) {
976 error = -EINVAL; 976 error = -EINVAL;
977 goto out_unlock; 977 goto out_unlock;
978 } 978 }
979 979
980 /* check the new inode size does not wrap through zero */ 980 /*
981 if (new_size > inode->i_sb->s_maxbytes) { 981 * New inode size must not exceed ->s_maxbytes, accounting for
982 * possible signed overflow.
983 */
984 if (inode->i_sb->s_maxbytes - isize < len) {
982 error = -EFBIG; 985 error = -EFBIG;
983 goto out_unlock; 986 goto out_unlock;
984 } 987 }
988 new_size = isize + len;
985 989
986 /* Offset should be less than i_size */ 990 /* Offset should be less than i_size */
987 if (offset >= i_size_read(inode)) { 991 if (offset >= isize) {
988 error = -EINVAL; 992 error = -EINVAL;
989 goto out_unlock; 993 goto out_unlock;
990 } 994 }
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index f52c72a1a06f..73b725f965eb 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3323,8 +3323,6 @@ maybe_sleep:
3323 */ 3323 */
3324 if (iclog->ic_state & XLOG_STATE_IOERROR) 3324 if (iclog->ic_state & XLOG_STATE_IOERROR)
3325 return -EIO; 3325 return -EIO;
3326 if (log_flushed)
3327 *log_flushed = 1;
3328 } else { 3326 } else {
3329 3327
3330no_sleep: 3328no_sleep:
@@ -3432,8 +3430,6 @@ try_again:
3432 3430
3433 xlog_wait(&iclog->ic_prev->ic_write_wait, 3431 xlog_wait(&iclog->ic_prev->ic_write_wait,
3434 &log->l_icloglock); 3432 &log->l_icloglock);
3435 if (log_flushed)
3436 *log_flushed = 1;
3437 already_slept = 1; 3433 already_slept = 1;
3438 goto try_again; 3434 goto try_again;
3439 } 3435 }
@@ -3467,9 +3463,6 @@ try_again:
3467 */ 3463 */
3468 if (iclog->ic_state & XLOG_STATE_IOERROR) 3464 if (iclog->ic_state & XLOG_STATE_IOERROR)
3469 return -EIO; 3465 return -EIO;
3470
3471 if (log_flushed)
3472 *log_flushed = 1;
3473 } else { /* just return */ 3466 } else { /* just return */
3474 spin_unlock(&log->l_icloglock); 3467 spin_unlock(&log->l_icloglock);
3475 } 3468 }
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index b57098481c10..ae3e52749f20 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -278,6 +278,7 @@ typedef struct xfs_perag {
278 char pagi_inodeok; /* The agi is ok for inodes */ 278 char pagi_inodeok; /* The agi is ok for inodes */
279 __uint8_t pagf_levels[XFS_BTNUM_AGF]; 279 __uint8_t pagf_levels[XFS_BTNUM_AGF];
280 /* # of levels in bno & cnt btree */ 280 /* # of levels in bno & cnt btree */
281 bool pagf_agflreset; /* agfl requires reset before use */
281 __uint32_t pagf_flcount; /* count of blocks in freelist */ 282 __uint32_t pagf_flcount; /* count of blocks in freelist */
282 xfs_extlen_t pagf_freeblks; /* total free blocks */ 283 xfs_extlen_t pagf_freeblks; /* total free blocks */
283 xfs_extlen_t pagf_longest; /* longest free space */ 284 xfs_extlen_t pagf_longest; /* longest free space */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 572b64a135b3..b148aa0e10f7 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -47,7 +47,7 @@
47STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 47STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
48STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 48STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
49 49
50 50STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
51STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); 51STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
52/* 52/*
53 * We use the batch lookup interface to iterate over the dquots as it 53 * We use the batch lookup interface to iterate over the dquots as it
@@ -660,9 +660,17 @@ xfs_qm_init_quotainfo(
660 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; 660 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
661 qinf->qi_shrinker.seeks = DEFAULT_SEEKS; 661 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
662 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; 662 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
663 register_shrinker(&qinf->qi_shrinker); 663
664 error = register_shrinker(&qinf->qi_shrinker);
665 if (error)
666 goto out_free_inos;
667
664 return 0; 668 return 0;
665 669
670out_free_inos:
671 mutex_destroy(&qinf->qi_quotaofflock);
672 mutex_destroy(&qinf->qi_tree_lock);
673 xfs_qm_destroy_quotainos(qinf);
666out_free_lru: 674out_free_lru:
667 list_lru_destroy(&qinf->qi_lru); 675 list_lru_destroy(&qinf->qi_lru);
668out_free_qinf: 676out_free_qinf:
@@ -671,7 +679,6 @@ out_free_qinf:
671 return error; 679 return error;
672} 680}
673 681
674
675/* 682/*
676 * Gets called when unmounting a filesystem or when all quotas get 683 * Gets called when unmounting a filesystem or when all quotas get
677 * turned off. 684 * turned off.
@@ -688,19 +695,8 @@ xfs_qm_destroy_quotainfo(
688 695
689 unregister_shrinker(&qi->qi_shrinker); 696 unregister_shrinker(&qi->qi_shrinker);
690 list_lru_destroy(&qi->qi_lru); 697 list_lru_destroy(&qi->qi_lru);
691 698 xfs_qm_destroy_quotainos(qi);
692 if (qi->qi_uquotaip) { 699 mutex_destroy(&qi->qi_tree_lock);
693 IRELE(qi->qi_uquotaip);
694 qi->qi_uquotaip = NULL; /* paranoia */
695 }
696 if (qi->qi_gquotaip) {
697 IRELE(qi->qi_gquotaip);
698 qi->qi_gquotaip = NULL;
699 }
700 if (qi->qi_pquotaip) {
701 IRELE(qi->qi_pquotaip);
702 qi->qi_pquotaip = NULL;
703 }
704 mutex_destroy(&qi->qi_quotaofflock); 700 mutex_destroy(&qi->qi_quotaofflock);
705 kmem_free(qi); 701 kmem_free(qi);
706 mp->m_quotainfo = NULL; 702 mp->m_quotainfo = NULL;
@@ -1562,6 +1558,24 @@ error_rele:
1562} 1558}
1563 1559
1564STATIC void 1560STATIC void
1561xfs_qm_destroy_quotainos(
1562 xfs_quotainfo_t *qi)
1563{
1564 if (qi->qi_uquotaip) {
1565 IRELE(qi->qi_uquotaip);
1566 qi->qi_uquotaip = NULL; /* paranoia */
1567 }
1568 if (qi->qi_gquotaip) {
1569 IRELE(qi->qi_gquotaip);
1570 qi->qi_gquotaip = NULL;
1571 }
1572 if (qi->qi_pquotaip) {
1573 IRELE(qi->qi_pquotaip);
1574 qi->qi_pquotaip = NULL;
1575 }
1576}
1577
1578STATIC void
1565xfs_qm_dqfree_one( 1579xfs_qm_dqfree_one(
1566 struct xfs_dquot *dqp) 1580 struct xfs_dquot *dqp)
1567{ 1581{
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 877079eb0f8f..cc6fa64821d2 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -1485,7 +1485,7 @@ TRACE_EVENT(xfs_trans_commit_lsn,
1485 __entry->lsn) 1485 __entry->lsn)
1486); 1486);
1487 1487
1488TRACE_EVENT(xfs_agf, 1488DECLARE_EVENT_CLASS(xfs_agf_class,
1489 TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, 1489 TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
1490 unsigned long caller_ip), 1490 unsigned long caller_ip),
1491 TP_ARGS(mp, agf, flags, caller_ip), 1491 TP_ARGS(mp, agf, flags, caller_ip),
@@ -1541,6 +1541,13 @@ TRACE_EVENT(xfs_agf,
1541 __entry->longest, 1541 __entry->longest,
1542 (void *)__entry->caller_ip) 1542 (void *)__entry->caller_ip)
1543); 1543);
1544#define DEFINE_AGF_EVENT(name) \
1545DEFINE_EVENT(xfs_agf_class, name, \
1546 TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \
1547 unsigned long caller_ip), \
1548 TP_ARGS(mp, agf, flags, caller_ip))
1549DEFINE_AGF_EVENT(xfs_agf);
1550DEFINE_AGF_EVENT(xfs_agfl_reset);
1544 1551
1545TRACE_EVENT(xfs_free_extent, 1552TRACE_EVENT(xfs_free_extent,
1546 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, 1553 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index bf2d34c9d804..f0d8b1c51343 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -13,7 +13,7 @@
13 */ 13 */
14 14
15/** 15/**
16 * futex_atomic_op_inuser() - Atomic arithmetic operation with constant 16 * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
17 * argument and comparison of the previous 17 * argument and comparison of the previous
18 * futex value with another constant. 18 * futex value with another constant.
19 * 19 *
@@ -25,18 +25,11 @@
25 * <0 - On error 25 * <0 - On error
26 */ 26 */
27static inline int 27static inline int
28futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 28arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
29{ 29{
30 int op = (encoded_op >> 28) & 7;
31 int cmp = (encoded_op >> 24) & 15;
32 int oparg = (encoded_op << 8) >> 20;
33 int cmparg = (encoded_op << 20) >> 20;
34 int oldval, ret; 30 int oldval, ret;
35 u32 tmp; 31 u32 tmp;
36 32
37 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
38 oparg = 1 << oparg;
39
40 preempt_disable(); 33 preempt_disable();
41 pagefault_disable(); 34 pagefault_disable();
42 35
@@ -74,17 +67,9 @@ out_pagefault_enable:
74 pagefault_enable(); 67 pagefault_enable();
75 preempt_enable(); 68 preempt_enable();
76 69
77 if (ret == 0) { 70 if (ret == 0)
78 switch (cmp) { 71 *oval = oldval;
79 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 72
80 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
81 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
82 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
83 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
84 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
85 default: ret = -ENOSYS;
86 }
87 }
88 return ret; 73 return ret;
89} 74}
90 75
@@ -126,18 +111,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
126 111
127#else 112#else
128static inline int 113static inline int
129futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 114arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
130{ 115{
131 int op = (encoded_op >> 28) & 7;
132 int cmp = (encoded_op >> 24) & 15;
133 int oparg = (encoded_op << 8) >> 20;
134 int cmparg = (encoded_op << 20) >> 20;
135 int oldval = 0, ret; 116 int oldval = 0, ret;
136 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
137 oparg = 1 << oparg;
138
139 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
140 return -EFAULT;
141 117
142 pagefault_disable(); 118 pagefault_disable();
143 119
@@ -153,17 +129,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
153 129
154 pagefault_enable(); 130 pagefault_enable();
155 131
156 if (!ret) { 132 if (!ret)
157 switch (cmp) { 133 *oval = oldval;
158 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 134
159 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
160 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
161 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
162 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
163 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
164 default: ret = -ENOSYS;
165 }
166 }
167 return ret; 135 return ret;
168} 136}
169 137
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 14b0ff32fb9f..53a47d75cc43 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -237,6 +237,21 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
237extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 237extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
238#endif 238#endif
239 239
240#ifdef CONFIG_TRANSPARENT_HUGEPAGE
241/*
242 * This is an implementation of pmdp_establish() that is only suitable for an
243 * architecture that doesn't have hardware dirty/accessed bits. In this case we
244 * can't race with CPU which sets these bits and non-atomic aproach is fine.
245 */
246static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
247 unsigned long address, pmd_t *pmdp, pmd_t pmd)
248{
249 pmd_t old_pmd = *pmdp;
250 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
251 return old_pmd;
252}
253#endif
254
240#ifndef __HAVE_ARCH_PMDP_INVALIDATE 255#ifndef __HAVE_ARCH_PMDP_INVALIDATE
241extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 256extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
242 pmd_t *pmdp); 257 pmd_t *pmdp);
@@ -755,6 +770,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
755int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); 770int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
756int pud_clear_huge(pud_t *pud); 771int pud_clear_huge(pud_t *pud);
757int pmd_clear_huge(pmd_t *pmd); 772int pmd_clear_huge(pmd_t *pmd);
773int pud_free_pmd_page(pud_t *pud, unsigned long addr);
774int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
758#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 775#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
759static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) 776static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
760{ 777{
@@ -772,8 +789,28 @@ static inline int pmd_clear_huge(pmd_t *pmd)
772{ 789{
773 return 0; 790 return 0;
774} 791}
792static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
793{
794 return 0;
795}
796static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
797{
798 return 0;
799}
775#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 800#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
776 801
802#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
803static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
804{
805 return true;
806}
807
808static inline bool arch_has_pfn_modify_check(void)
809{
810 return false;
811}
812#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
813
777#endif /* !__ASSEMBLY__ */ 814#endif /* !__ASSEMBLY__ */
778 815
779#ifndef io_remap_pfn_range 816#ifndef io_remap_pfn_range
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 9779c35f8454..dab9569f22bf 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -91,6 +91,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
91 return alg->setkey != shash_no_setkey; 91 return alg->setkey != shash_no_setkey;
92} 92}
93 93
94bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
95
94int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, 96int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
95 struct hash_alg_common *alg, 97 struct hash_alg_common *alg,
96 struct crypto_instance *inst); 98 struct crypto_instance *inst);
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index 894df59b74e4..d586f741cab5 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -30,8 +30,6 @@ struct poly1305_desc_ctx {
30}; 30};
31 31
32int crypto_poly1305_init(struct shash_desc *desc); 32int crypto_poly1305_init(struct shash_desc *desc);
33int crypto_poly1305_setkey(struct crypto_shash *tfm,
34 const u8 *key, unsigned int keylen);
35unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, 33unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
36 const u8 *src, unsigned int srclen); 34 const u8 *src, unsigned int srclen);
37int crypto_poly1305_update(struct shash_desc *desc, 35int crypto_poly1305_update(struct shash_desc *desc,
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
deleted file mode 100644
index 6b700c7b2fe1..000000000000
--- a/include/crypto/vmac.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Modified to interface to the Linux kernel
3 * Copyright (c) 2009, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19#ifndef __CRYPTO_VMAC_H
20#define __CRYPTO_VMAC_H
21
22/* --------------------------------------------------------------------------
23 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
24 * This implementation is herby placed in the public domain.
25 * The authors offers no warranty. Use at your own risk.
26 * Please send bug reports to the authors.
27 * Last modified: 17 APR 08, 1700 PDT
28 * ----------------------------------------------------------------------- */
29
30/*
31 * User definable settings.
32 */
33#define VMAC_TAG_LEN 64
34#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
35#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
36#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
37
38/*
39 * This implementation uses u32 and u64 as names for unsigned 32-
40 * and 64-bit integer types. These are defined in C99 stdint.h. The
41 * following may need adaptation if you are not running a C99 or
42 * Microsoft C environment.
43 */
44struct vmac_ctx {
45 u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
46 u64 polykey[2*VMAC_TAG_LEN/64];
47 u64 l3key[2*VMAC_TAG_LEN/64];
48 u64 polytmp[2*VMAC_TAG_LEN/64];
49 u64 cached_nonce[2];
50 u64 cached_aes[2];
51 int first_block_processed;
52};
53
54typedef u64 vmac_t;
55
56struct vmac_ctx_t {
57 struct crypto_cipher *child;
58 struct vmac_ctx __vmac_ctx;
59 u8 partial[VMAC_NHBYTES]; /* partial block */
60 int partial_size; /* size of the partial block */
61};
62
63#endif /* __CRYPTO_VMAC_H */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 3febb4b9fce9..d842bec3d271 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -241,5 +241,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
241extern void drm_kms_helper_poll_disable(struct drm_device *dev); 241extern void drm_kms_helper_poll_disable(struct drm_device *dev);
242extern void drm_kms_helper_poll_enable(struct drm_device *dev); 242extern void drm_kms_helper_poll_enable(struct drm_device *dev);
243extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); 243extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
244extern bool drm_kms_helper_is_poll_worker(void);
244 245
245#endif 246#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index bb9d0deca07c..0fb4975fae91 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -342,6 +342,7 @@
342# define DP_PSR_FRAME_CAPTURE (1 << 3) 342# define DP_PSR_FRAME_CAPTURE (1 << 3)
343# define DP_PSR_SELECTIVE_UPDATE (1 << 4) 343# define DP_PSR_SELECTIVE_UPDATE (1 << 4)
344# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5) 344# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
345# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */
345 346
346#define DP_ADAPTER_CTRL 0x1a0 347#define DP_ADAPTER_CTRL 0x1a0
347# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) 348# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
diff --git a/include/linux/audit.h b/include/linux/audit.h
index faac391badac..9b95bb222e73 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -26,6 +26,7 @@
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/ptrace.h> 27#include <linux/ptrace.h>
28#include <uapi/linux/audit.h> 28#include <uapi/linux/audit.h>
29#include <linux/tty.h>
29 30
30#define AUDIT_INO_UNSET ((unsigned long)-1) 31#define AUDIT_INO_UNSET ((unsigned long)-1)
31#define AUDIT_DEV_UNSET ((dev_t)-1) 32#define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -239,6 +240,23 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
239 return tsk->sessionid; 240 return tsk->sessionid;
240} 241}
241 242
243static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
244{
245 struct tty_struct *tty = NULL;
246 unsigned long flags;
247
248 spin_lock_irqsave(&tsk->sighand->siglock, flags);
249 if (tsk->signal)
250 tty = tty_kref_get(tsk->signal->tty);
251 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
252 return tty;
253}
254
255static inline void audit_put_tty(struct tty_struct *tty)
256{
257 tty_kref_put(tty);
258}
259
242extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); 260extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
243extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); 261extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
244extern void __audit_bprm(struct linux_binprm *bprm); 262extern void __audit_bprm(struct linux_binprm *bprm);
@@ -410,6 +428,12 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
410{ 428{
411 return -1; 429 return -1;
412} 430}
431static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
432{
433 return NULL;
434}
435static inline void audit_put_tty(struct tty_struct *tty)
436{ }
413static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) 437static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
414{ } 438{ }
415static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, 439static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 140c29635069..a307c37c2e6c 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -191,6 +191,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
191 set_wb_congested(bdi->wb.congested, sync); 191 set_wb_congested(bdi->wb.congested, sync);
192} 192}
193 193
194struct wb_lock_cookie {
195 bool locked;
196 unsigned long flags;
197};
198
194#ifdef CONFIG_CGROUP_WRITEBACK 199#ifdef CONFIG_CGROUP_WRITEBACK
195 200
196/** 201/**
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 89d3de3e096b..361274ce5815 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -366,7 +366,7 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
366/** 366/**
367 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction 367 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
368 * @inode: target inode 368 * @inode: target inode
369 * @lockedp: temp bool output param, to be passed to the end function 369 * @cookie: output param, to be passed to the end function
370 * 370 *
371 * The caller wants to access the wb associated with @inode but isn't 371 * The caller wants to access the wb associated with @inode but isn't
372 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This 372 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
@@ -374,12 +374,12 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
374 * association doesn't change until the transaction is finished with 374 * association doesn't change until the transaction is finished with
375 * unlocked_inode_to_wb_end(). 375 * unlocked_inode_to_wb_end().
376 * 376 *
377 * The caller must call unlocked_inode_to_wb_end() with *@lockdep 377 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
378 * afterwards and can't sleep during transaction. IRQ may or may not be 378 * can't sleep during the transaction. IRQs may or may not be disabled on
379 * disabled on return. 379 * return.
380 */ 380 */
381static inline struct bdi_writeback * 381static inline struct bdi_writeback *
382unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) 382unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
383{ 383{
384 rcu_read_lock(); 384 rcu_read_lock();
385 385
@@ -387,10 +387,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
387 * Paired with store_release in inode_switch_wb_work_fn() and 387 * Paired with store_release in inode_switch_wb_work_fn() and
388 * ensures that we see the new wb if we see cleared I_WB_SWITCH. 388 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
389 */ 389 */
390 *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; 390 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
391 391
392 if (unlikely(*lockedp)) 392 if (unlikely(cookie->locked))
393 spin_lock_irq(&inode->i_mapping->tree_lock); 393 spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
394 394
395 /* 395 /*
396 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock. 396 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
@@ -402,12 +402,14 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
402/** 402/**
403 * unlocked_inode_to_wb_end - end inode wb access transaction 403 * unlocked_inode_to_wb_end - end inode wb access transaction
404 * @inode: target inode 404 * @inode: target inode
405 * @locked: *@lockedp from unlocked_inode_to_wb_begin() 405 * @cookie: @cookie from unlocked_inode_to_wb_begin()
406 */ 406 */
407static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) 407static inline void unlocked_inode_to_wb_end(struct inode *inode,
408 struct wb_lock_cookie *cookie)
408{ 409{
409 if (unlikely(locked)) 410 if (unlikely(cookie->locked))
410 spin_unlock_irq(&inode->i_mapping->tree_lock); 411 spin_unlock_irqrestore(&inode->i_mapping->tree_lock,
412 cookie->flags);
411 413
412 rcu_read_unlock(); 414 rcu_read_unlock();
413} 415}
@@ -454,12 +456,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
454} 456}
455 457
456static inline struct bdi_writeback * 458static inline struct bdi_writeback *
457unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) 459unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
458{ 460{
459 return inode_to_wb(inode); 461 return inode_to_wb(inode);
460} 462}
461 463
462static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) 464static inline void unlocked_inode_to_wb_end(struct inode *inode,
465 struct wb_lock_cookie *cookie)
463{ 466{
464} 467}
465 468
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fe14382f9664..1383e1c03ff2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -882,8 +882,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
882 if (!q->limits.chunk_sectors) 882 if (!q->limits.chunk_sectors)
883 return q->limits.max_sectors; 883 return q->limits.max_sectors;
884 884
885 return q->limits.chunk_sectors - 885 return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
886 (offset & (q->limits.chunk_sectors - 1)); 886 (offset & (q->limits.chunk_sectors - 1))));
887} 887}
888 888
889static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 889static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f2157159b26f..132585a7fbd8 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -31,17 +31,25 @@ struct bpf_map_ops {
31}; 31};
32 32
33struct bpf_map { 33struct bpf_map {
34 atomic_t refcnt; 34 /* 1st cacheline with read-mostly members of which some
35 * are also accessed in fast-path (e.g. ops, max_entries).
36 */
37 const struct bpf_map_ops *ops ____cacheline_aligned;
35 enum bpf_map_type map_type; 38 enum bpf_map_type map_type;
36 u32 key_size; 39 u32 key_size;
37 u32 value_size; 40 u32 value_size;
38 u32 max_entries; 41 u32 max_entries;
39 u32 pages; 42 u32 pages;
40 bool unpriv_array; 43 bool unpriv_array;
41 struct user_struct *user; 44 /* 7 bytes hole */
42 const struct bpf_map_ops *ops; 45
43 struct work_struct work; 46 /* 2nd cacheline with misc members to avoid false sharing
47 * particularly with refcounting.
48 */
49 struct user_struct *user ____cacheline_aligned;
50 atomic_t refcnt;
44 atomic_t usercnt; 51 atomic_t usercnt;
52 struct work_struct work;
45}; 53};
46 54
47struct bpf_map_type_list { 55struct bpf_map_type_list {
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 2189935075b4..a951fd10aaaa 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -71,6 +71,7 @@ struct cpu_cacheinfo {
71 struct cacheinfo *info_list; 71 struct cacheinfo *info_list;
72 unsigned int num_levels; 72 unsigned int num_levels;
73 unsigned int num_leaves; 73 unsigned int num_leaves;
74 bool cpu_map_populated;
74}; 75};
75 76
76/* 77/*
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index d1e49d52b640..de179993e039 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -10,3 +10,8 @@
10#undef uninitialized_var 10#undef uninitialized_var
11#define uninitialized_var(x) x = *(&(x)) 11#define uninitialized_var(x) x = *(&(x))
12#endif 12#endif
13
14/* same as gcc, this was present in clang-2.6 so we can assume it works
15 * with any version that can compile the kernel
16 */
17#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 287e698c28de..143d40e8a1ea 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -65,21 +65,40 @@
65#endif 65#endif
66 66
67/* 67/*
68 * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
69 * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
70 * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
71 * defined so the gnu89 semantics are the default.
72 */
73#ifdef __GNUC_STDC_INLINE__
74# define __gnu_inline __attribute__((gnu_inline))
75#else
76# define __gnu_inline
77#endif
78
79/*
68 * Force always-inline if the user requests it so via the .config, 80 * Force always-inline if the user requests it so via the .config,
69 * or if gcc is too old: 81 * or if gcc is too old.
82 * GCC does not warn about unused static inline functions for
83 * -Wunused-function. This turns out to avoid the need for complex #ifdef
84 * directives. Suppress the warning in clang as well by using "unused"
85 * function attribute, which is redundant but not harmful for gcc.
86 * Prefer gnu_inline, so that extern inline functions do not emit an
87 * externally visible function. This makes extern inline behave as per gnu89
88 * semantics rather than c99. This prevents multiple symbol definition errors
89 * of extern inline functions at link time.
90 * A lot of inline functions can cause havoc with function tracing.
70 */ 91 */
71#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ 92#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
72 !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) 93 !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
73#define inline inline __attribute__((always_inline)) notrace 94#define inline \
74#define __inline__ __inline__ __attribute__((always_inline)) notrace 95 inline __attribute__((always_inline, unused)) notrace __gnu_inline
75#define __inline __inline __attribute__((always_inline)) notrace
76#else 96#else
77/* A lot of inline functions can cause havoc with function tracing */ 97#define inline inline __attribute__((unused)) notrace __gnu_inline
78#define inline inline notrace
79#define __inline__ __inline__ notrace
80#define __inline __inline notrace
81#endif 98#endif
82 99
100#define __inline__ inline
101#define __inline inline
83#define __always_inline inline __attribute__((always_inline)) 102#define __always_inline inline __attribute__((always_inline))
84#define noinline __attribute__((noinline)) 103#define noinline __attribute__((noinline))
85 104
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 6fc9a6dd5ed2..0db1fa621d8a 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -111,7 +111,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
111#define unlikely_notrace(x) __builtin_expect(!!(x), 0) 111#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
112 112
113#define __branch_check__(x, expect) ({ \ 113#define __branch_check__(x, expect) ({ \
114 int ______r; \ 114 long ______r; \
115 static struct ftrace_branch_data \ 115 static struct ftrace_branch_data \
116 __attribute__((__aligned__(4))) \ 116 __attribute__((__aligned__(4))) \
117 __attribute__((section("_ftrace_annotated_branch"))) \ 117 __attribute__((section("_ftrace_annotated_branch"))) \
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 7e04bcd9af8e..063c73ed6d78 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -46,6 +46,10 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev,
46 struct device_attribute *attr, char *buf); 46 struct device_attribute *attr, char *buf);
47extern ssize_t cpu_show_spectre_v2(struct device *dev, 47extern ssize_t cpu_show_spectre_v2(struct device *dev,
48 struct device_attribute *attr, char *buf); 48 struct device_attribute *attr, char *buf);
49extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
50 struct device_attribute *attr, char *buf);
51extern ssize_t cpu_show_l1tf(struct device *dev,
52 struct device_attribute *attr, char *buf);
49 53
50extern __printf(4, 5) 54extern __printf(4, 5)
51struct device *cpu_device_create(struct device *parent, void *drvdata, 55struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index a91b3b75da0f..bb3a4bb35183 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -661,6 +661,11 @@ void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
661void free_cpumask_var(cpumask_var_t mask); 661void free_cpumask_var(cpumask_var_t mask);
662void free_bootmem_cpumask_var(cpumask_var_t mask); 662void free_bootmem_cpumask_var(cpumask_var_t mask);
663 663
664static inline bool cpumask_available(cpumask_var_t mask)
665{
666 return mask != NULL;
667}
668
664#else 669#else
665typedef struct cpumask cpumask_var_t[1]; 670typedef struct cpumask cpumask_var_t[1];
666 671
@@ -701,6 +706,11 @@ static inline void free_cpumask_var(cpumask_var_t mask)
701static inline void free_bootmem_cpumask_var(cpumask_var_t mask) 706static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
702{ 707{
703} 708}
709
710static inline bool cpumask_available(cpumask_var_t mask)
711{
712 return true;
713}
704#endif /* CONFIG_CPUMASK_OFFSTACK */ 714#endif /* CONFIG_CPUMASK_OFFSTACK */
705 715
706/* It's common to want to use cpu_all_mask in struct member initializers, 716/* It's common to want to use cpu_all_mask in struct member initializers,
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d516847e0fae..11f4334ab177 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -236,6 +236,7 @@ extern seqlock_t rename_lock;
236 * These are the low-level FS interfaces to the dcache.. 236 * These are the low-level FS interfaces to the dcache..
237 */ 237 */
238extern void d_instantiate(struct dentry *, struct inode *); 238extern void d_instantiate(struct dentry *, struct inode *);
239extern void d_instantiate_new(struct dentry *, struct inode *);
239extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); 240extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
240extern int d_instantiate_no_diralias(struct dentry *, struct inode *); 241extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
241extern void __d_drop(struct dentry *dentry); 242extern void __d_drop(struct dentry *dentry);
diff --git a/include/linux/device.h b/include/linux/device.h
index 7075a2485ed3..834000903525 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1272,8 +1272,11 @@ do { \
1272 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ 1272 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
1273} while (0) 1273} while (0)
1274#else 1274#else
1275#define dev_dbg_ratelimited(dev, fmt, ...) \ 1275#define dev_dbg_ratelimited(dev, fmt, ...) \
1276 no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) 1276do { \
1277 if (0) \
1278 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
1279} while (0)
1277#endif 1280#endif
1278 1281
1279#ifdef VERBOSE_DEBUG 1282#ifdef VERBOSE_DEBUG
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index fc481037478a..19baa7f4f403 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -17,6 +17,7 @@
17#define __DMA_IOMMU_H 17#define __DMA_IOMMU_H
18 18
19#ifdef __KERNEL__ 19#ifdef __KERNEL__
20#include <linux/types.h>
20#include <asm/errno.h> 21#include <asm/errno.h>
21 22
22#ifdef CONFIG_IOMMU_DMA 23#ifdef CONFIG_IOMMU_DMA
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 16a1cad30c33..cff20d515efe 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -800,6 +800,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
800 sg_dma_address(&sg) = buf; 800 sg_dma_address(&sg) = buf;
801 sg_dma_len(&sg) = len; 801 sg_dma_len(&sg) = len;
802 802
803 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
804 return NULL;
805
803 return chan->device->device_prep_slave_sg(chan, &sg, 1, 806 return chan->device->device_prep_slave_sg(chan, &sg, 1,
804 dir, flags, NULL); 807 dir, flags, NULL);
805} 808}
@@ -808,6 +811,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
808 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 811 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
809 enum dma_transfer_direction dir, unsigned long flags) 812 enum dma_transfer_direction dir, unsigned long flags)
810{ 813{
814 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
815 return NULL;
816
811 return chan->device->device_prep_slave_sg(chan, sgl, sg_len, 817 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
812 dir, flags, NULL); 818 dir, flags, NULL);
813} 819}
@@ -819,6 +825,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
819 enum dma_transfer_direction dir, unsigned long flags, 825 enum dma_transfer_direction dir, unsigned long flags,
820 struct rio_dma_ext *rio_ext) 826 struct rio_dma_ext *rio_ext)
821{ 827{
828 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
829 return NULL;
830
822 return chan->device->device_prep_slave_sg(chan, sgl, sg_len, 831 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
823 dir, flags, rio_ext); 832 dir, flags, rio_ext);
824} 833}
@@ -829,6 +838,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
829 size_t period_len, enum dma_transfer_direction dir, 838 size_t period_len, enum dma_transfer_direction dir,
830 unsigned long flags) 839 unsigned long flags)
831{ 840{
841 if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
842 return NULL;
843
832 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, 844 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
833 period_len, dir, flags); 845 period_len, dir, flags);
834} 846}
@@ -837,6 +849,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
837 struct dma_chan *chan, struct dma_interleaved_template *xt, 849 struct dma_chan *chan, struct dma_interleaved_template *xt,
838 unsigned long flags) 850 unsigned long flags)
839{ 851{
852 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
853 return NULL;
854
840 return chan->device->device_prep_interleaved_dma(chan, xt, flags); 855 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
841} 856}
842 857
@@ -844,7 +859,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
844 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 859 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
845 unsigned long flags) 860 unsigned long flags)
846{ 861{
847 if (!chan || !chan->device) 862 if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
848 return NULL; 863 return NULL;
849 864
850 return chan->device->device_prep_dma_memset(chan, dest, value, 865 return chan->device->device_prep_dma_memset(chan, dest, value,
@@ -857,6 +872,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
857 struct scatterlist *src_sg, unsigned int src_nents, 872 struct scatterlist *src_sg, unsigned int src_nents,
858 unsigned long flags) 873 unsigned long flags)
859{ 874{
875 if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
876 return NULL;
877
860 return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, 878 return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
861 src_sg, src_nents, flags); 879 src_sg, src_nents, flags);
862} 880}
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 333d0ca6940f..b558bf8f1a7b 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -364,8 +364,8 @@ typedef struct {
364 u32 attributes; 364 u32 attributes;
365 u32 get_bar_attributes; 365 u32 get_bar_attributes;
366 u32 set_bar_attributes; 366 u32 set_bar_attributes;
367 uint64_t romsize; 367 u64 romsize;
368 void *romimage; 368 u32 romimage;
369} efi_pci_io_protocol_32; 369} efi_pci_io_protocol_32;
370 370
371typedef struct { 371typedef struct {
@@ -384,8 +384,8 @@ typedef struct {
384 u64 attributes; 384 u64 attributes;
385 u64 get_bar_attributes; 385 u64 get_bar_attributes;
386 u64 set_bar_attributes; 386 u64 set_bar_attributes;
387 uint64_t romsize; 387 u64 romsize;
388 void *romimage; 388 u64 romimage;
389} efi_pci_io_protocol_64; 389} efi_pci_io_protocol_64;
390 390
391typedef struct { 391typedef struct {
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 5295535b60c6..a7b7a050bfa8 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -9,6 +9,7 @@
9#include <linux/compiler.h> 9#include <linux/compiler.h>
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
12#include <linux/nospec.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/fs.h> 15#include <linux/fs.h>
@@ -81,8 +82,10 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
81{ 82{
82 struct fdtable *fdt = rcu_dereference_raw(files->fdt); 83 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
83 84
84 if (fd < fdt->max_fds) 85 if (fd < fdt->max_fds) {
86 fd = array_index_nospec(fd, fdt->max_fds);
85 return rcu_dereference_raw(fdt->fd[fd]); 87 return rcu_dereference_raw(fdt->fd[fd]);
88 }
86 return NULL; 89 return NULL;
87} 90}
88 91
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c8decb7075d6..240cbaee819f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -926,9 +926,9 @@ static inline struct file *get_file(struct file *f)
926/* Page cache limit. The filesystems should put that into their s_maxbytes 926/* Page cache limit. The filesystems should put that into their s_maxbytes
927 limits, otherwise bad things can happen in VM. */ 927 limits, otherwise bad things can happen in VM. */
928#if BITS_PER_LONG==32 928#if BITS_PER_LONG==32
929#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 929#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
930#elif BITS_PER_LONG==64 930#elif BITS_PER_LONG==64
931#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) 931#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
932#endif 932#endif
933 933
934#define FL_POSIX 1 934#define FL_POSIX 1
@@ -1295,6 +1295,7 @@ struct mm_struct;
1295/* sb->s_iflags */ 1295/* sb->s_iflags */
1296#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ 1296#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
1297#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ 1297#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
1298#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */
1298 1299
1299/* Possible states of 'frozen' field */ 1300/* Possible states of 'frozen' field */
1300enum { 1301enum {
@@ -3066,5 +3067,6 @@ static inline bool dir_relax(struct inode *inode)
3066} 3067}
3067 3068
3068extern bool path_noexec(const struct path *path); 3069extern bool path_noexec(const struct path *path);
3070extern void inode_nohighmem(struct inode *inode);
3069 3071
3070#endif /* _LINUX_FS_H */ 3072#endif /* _LINUX_FS_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 251a1d382e23..fd86687f8119 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -793,7 +793,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
793extern void hidinput_disconnect(struct hid_device *); 793extern void hidinput_disconnect(struct hid_device *);
794 794
795int hid_set_field(struct hid_field *, unsigned, __s32); 795int hid_set_field(struct hid_field *, unsigned, __s32);
796int hid_input_report(struct hid_device *, int type, u8 *, int, int); 796int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
797int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field); 797int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
798struct hid_field *hidinput_get_led_field(struct hid_device *hid); 798struct hid_field *hidinput_get_led_field(struct hid_device *hid);
799unsigned int hidinput_count_leds(struct hid_device *hid); 799unsigned int hidinput_count_leds(struct hid_device *hid);
@@ -1098,13 +1098,13 @@ static inline void hid_hw_wait(struct hid_device *hdev)
1098 * 1098 *
1099 * @report: the report we want to know the length 1099 * @report: the report we want to know the length
1100 */ 1100 */
1101static inline int hid_report_len(struct hid_report *report) 1101static inline u32 hid_report_len(struct hid_report *report)
1102{ 1102{
1103 /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ 1103 /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
1104 return ((report->size - 1) >> 3) + 1 + (report->id > 0); 1104 return ((report->size - 1) >> 3) + 1 + (report->id > 0);
1105} 1105}
1106 1106
1107int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, 1107int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
1108 int interrupt); 1108 int interrupt);
1109 1109
1110/* HID quirks API */ 1110/* HID quirks API */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 19db03dbbd00..dd676ba758ee 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -585,7 +585,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
585 * Returns true if the skb is tagged with multiple vlan headers, regardless 585 * Returns true if the skb is tagged with multiple vlan headers, regardless
586 * of whether it is hardware accelerated or not. 586 * of whether it is hardware accelerated or not.
587 */ 587 */
588static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) 588static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
589{ 589{
590 __be16 protocol = skb->protocol; 590 __be16 protocol = skb->protocol;
591 591
@@ -596,6 +596,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
596 protocol != htons(ETH_P_8021AD))) 596 protocol != htons(ETH_P_8021AD)))
597 return false; 597 return false;
598 598
599 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
600 return false;
601
599 veh = (struct vlan_ethhdr *)skb->data; 602 veh = (struct vlan_ethhdr *)skb->data;
600 protocol = veh->h_vlan_encapsulated_proto; 603 protocol = veh->h_vlan_encapsulated_proto;
601 } 604 }
@@ -613,7 +616,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
613 * 616 *
614 * Returns features without unsafe ones if the skb has multiple tags. 617 * Returns features without unsafe ones if the skb has multiple tags.
615 */ 618 */
616static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, 619static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
617 netdev_features_t features) 620 netdev_features_t features)
618{ 621{
619 if (skb_vlan_tagged_multi(skb)) { 622 if (skb_vlan_tagged_multi(skb)) {
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 1600c55828e0..93a774ce4922 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -49,7 +49,7 @@ struct iio_buffer_access_funcs {
49 int (*request_update)(struct iio_buffer *buffer); 49 int (*request_update)(struct iio_buffer *buffer);
50 50
51 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); 51 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
52 int (*set_length)(struct iio_buffer *buffer, int length); 52 int (*set_length)(struct iio_buffer *buffer, unsigned int length);
53 53
54 void (*release)(struct iio_buffer *buffer); 54 void (*release)(struct iio_buffer *buffer);
55 55
@@ -78,8 +78,8 @@ struct iio_buffer_access_funcs {
78 * @watermark: [INTERN] number of datums to wait for poll/read. 78 * @watermark: [INTERN] number of datums to wait for poll/read.
79 */ 79 */
80struct iio_buffer { 80struct iio_buffer {
81 int length; 81 unsigned int length;
82 int bytes_per_datum; 82 size_t bytes_per_datum;
83 struct attribute_group *scan_el_attrs; 83 struct attribute_group *scan_el_attrs;
84 long *scan_mask; 84 long *scan_mask;
85 bool scan_timestamp; 85 bool scan_timestamp;
diff --git a/include/linux/init.h b/include/linux/init.h
index aedb254abc37..3561ea30ed8c 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -4,6 +4,13 @@
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7/* Built-in __init functions needn't be compiled with retpoline */
8#if defined(RETPOLINE) && !defined(MODULE)
9#define __noretpoline __attribute__((indirect_branch("keep")))
10#else
11#define __noretpoline
12#endif
13
7/* These macros are used to mark some functions or 14/* These macros are used to mark some functions or
8 * initialized data (doesn't apply to uninitialized data) 15 * initialized data (doesn't apply to uninitialized data)
9 * as `initialization' functions. The kernel can take this 16 * as `initialization' functions. The kernel can take this
@@ -39,7 +46,7 @@
39 46
40/* These are for everybody (although not all archs will actually 47/* These are for everybody (although not all archs will actually
41 discard it in modules) */ 48 discard it in modules) */
42#define __init __section(.init.text) __cold notrace 49#define __init __section(.init.text) __cold notrace __noretpoline
43#define __initdata __section(.init.data) 50#define __initdata __section(.init.data)
44#define __initconst __constsection(.init.rodata) 51#define __initconst __constsection(.init.rodata)
45#define __exitdata __section(.exit.data) 52#define __exitdata __section(.exit.data)
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 5fdc55312334..2fb10601febe 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -1,6 +1,7 @@
1#ifndef _LINUX_JIFFIES_H 1#ifndef _LINUX_JIFFIES_H
2#define _LINUX_JIFFIES_H 2#define _LINUX_JIFFIES_H
3 3
4#include <linux/cache.h>
4#include <linux/math64.h> 5#include <linux/math64.h>
5#include <linux/kernel.h> 6#include <linux/kernel.h>
6#include <linux/types.h> 7#include <linux/types.h>
@@ -63,19 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate);
63/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ 64/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
64#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) 65#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
65 66
66/* some arch's have a small-data section that can be accessed register-relative 67#ifndef __jiffy_arch_data
67 * but that can only take up to, say, 4-byte variables. jiffies being part of 68#define __jiffy_arch_data
68 * an 8-byte variable may not be correctly accessed unless we force the issue 69#endif
69 */
70#define __jiffy_data __attribute__((section(".data")))
71 70
72/* 71/*
73 * The 64-bit value is not atomic - you MUST NOT read it 72 * The 64-bit value is not atomic - you MUST NOT read it
74 * without sampling the sequence number in jiffies_lock. 73 * without sampling the sequence number in jiffies_lock.
75 * get_jiffies_64() will do this for you as appropriate. 74 * get_jiffies_64() will do this for you as appropriate.
76 */ 75 */
77extern u64 __jiffy_data jiffies_64; 76extern u64 __cacheline_aligned_in_smp jiffies_64;
78extern unsigned long volatile __jiffy_data jiffies; 77extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
79 78
80#if (BITS_PER_LONG < 64) 79#if (BITS_PER_LONG < 64)
81u64 get_jiffies_64(void); 80u64 get_jiffies_64(void);
diff --git a/include/linux/kaiser.h b/include/linux/kaiser.h
index 58c55b1589d0..b56c19010480 100644
--- a/include/linux/kaiser.h
+++ b/include/linux/kaiser.h
@@ -32,7 +32,7 @@ static inline void kaiser_init(void)
32{ 32{
33} 33}
34static inline int kaiser_add_mapping(unsigned long addr, 34static inline int kaiser_add_mapping(unsigned long addr,
35 unsigned long size, unsigned long flags) 35 unsigned long size, u64 flags)
36{ 36{
37 return 0; 37 return 0;
38} 38}
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 2b6a204bd8d4..3ffc69ebe967 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -64,6 +64,13 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
64 ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; }) 64 ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
65 65
66/* 66/*
67 * Same as ktime_add(), but avoids undefined behaviour on overflow; however,
68 * this means that you must check the result for overflow yourself.
69 */
70#define ktime_add_unsafe(lhs, rhs) \
71 ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
72
73/*
67 * Add a ktime_t variable and a scalar nanosecond value. 74 * Add a ktime_t variable and a scalar nanosecond value.
68 * res = kt + nsval: 75 * res = kt + nsval:
69 */ 76 */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index b20a2752f934..6428ac4746de 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -210,6 +210,7 @@ enum {
210 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ 210 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
211 /* (doesn't imply presence) */ 211 /* (doesn't imply presence) */
212 ATA_FLAG_SATA = (1 << 1), 212 ATA_FLAG_SATA = (1 << 1),
213 ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */
213 ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ 214 ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
214 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ 215 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
215 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ 216 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fd4ca0b4fe0f..ac6796138ba0 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -88,6 +88,23 @@ static inline void init_llist_head(struct llist_head *list)
88 container_of(ptr, type, member) 88 container_of(ptr, type, member)
89 89
90/** 90/**
91 * member_address_is_nonnull - check whether the member address is not NULL
92 * @ptr: the object pointer (struct type * that contains the llist_node)
93 * @member: the name of the llist_node within the struct.
94 *
95 * This macro is conceptually the same as
96 * &ptr->member != NULL
97 * but it works around the fact that compilers can decide that taking a member
98 * address is never a NULL pointer.
99 *
100 * Real objects that start at a high address and have a member at NULL are
101 * unlikely to exist, but such pointers may be returned e.g. by the
102 * container_of() macro.
103 */
104#define member_address_is_nonnull(ptr, member) \
105 ((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0)
106
107/**
91 * llist_for_each - iterate over some deleted entries of a lock-less list 108 * llist_for_each - iterate over some deleted entries of a lock-less list
92 * @pos: the &struct llist_node to use as a loop cursor 109 * @pos: the &struct llist_node to use as a loop cursor
93 * @node: the first entry of deleted list entries 110 * @node: the first entry of deleted list entries
@@ -121,7 +138,7 @@ static inline void init_llist_head(struct llist_head *list)
121 */ 138 */
122#define llist_for_each_entry(pos, node, member) \ 139#define llist_for_each_entry(pos, node, member) \
123 for ((pos) = llist_entry((node), typeof(*(pos)), member); \ 140 for ((pos) = llist_entry((node), typeof(*(pos)), member); \
124 &(pos)->member != NULL; \ 141 member_address_is_nonnull(pos, member); \
125 (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) 142 (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
126 143
127/** 144/**
@@ -143,7 +160,7 @@ static inline void init_llist_head(struct llist_head *list)
143 */ 160 */
144#define llist_for_each_entry_safe(pos, n, node, member) \ 161#define llist_for_each_entry_safe(pos, n, node, member) \
145 for (pos = llist_entry((node), typeof(*pos), member); \ 162 for (pos = llist_entry((node), typeof(*pos), member); \
146 &pos->member != NULL && \ 163 member_address_is_nonnull(pos, member) && \
147 (n = llist_entry(pos->member.next, typeof(*n), member), true); \ 164 (n = llist_entry(pos->member.next, typeof(*n), member), true); \
148 pos = n) 165 pos = n)
149 166
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index fe052e234906..bb1018882199 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -465,6 +465,7 @@ struct mlx4_update_qp_params {
465 u16 rate_val; 465 u16 rate_val;
466}; 466};
467 467
468struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
468int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, 469int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
469 enum mlx4_update_qp_attr attr, 470 enum mlx4_update_qp_attr attr,
470 struct mlx4_update_qp_params *params); 471 struct mlx4_update_qp_params *params);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index a91b67b18a73..5c93f4a89afa 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -635,8 +635,14 @@ enum {
635}; 635};
636 636
637enum { 637enum {
638 CQE_RSS_HTYPE_IP = 0x3 << 6, 638 CQE_RSS_HTYPE_IP = 0x3 << 2,
639 CQE_RSS_HTYPE_L4 = 0x3 << 2, 639 /* cqe->rss_hash_type[3:2] - IP destination selected for hash
640 * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
641 */
642 CQE_RSS_HTYPE_L4 = 0x3 << 6,
643 /* cqe->rss_hash_type[7:6] - L4 destination selected for hash
644 * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
645 */
640}; 646};
641 647
642enum { 648enum {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5c2bf9096b7a..5c105bf78aa2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -225,10 +225,14 @@ extern pgprot_t protection_map[16];
225 * ->fault function. The vma's ->fault is responsible for returning a bitmask 225 * ->fault function. The vma's ->fault is responsible for returning a bitmask
226 * of VM_FAULT_xxx flags that give details about how the fault was handled. 226 * of VM_FAULT_xxx flags that give details about how the fault was handled.
227 * 227 *
228 * MM layer fills up gfp_mask for page allocations but fault handler might
229 * alter it if its implementation requires a different allocation context.
230 *
228 * pgoff should be used in favour of virtual_address, if possible. 231 * pgoff should be used in favour of virtual_address, if possible.
229 */ 232 */
230struct vm_fault { 233struct vm_fault {
231 unsigned int flags; /* FAULT_FLAG_xxx flags */ 234 unsigned int flags; /* FAULT_FLAG_xxx flags */
235 gfp_t gfp_mask; /* gfp mask to be used for allocations */
232 pgoff_t pgoff; /* Logical page offset based on vma */ 236 pgoff_t pgoff; /* Logical page offset based on vma */
233 void __user *virtual_address; /* Faulting virtual address */ 237 void __user *virtual_address; /* Faulting virtual address */
234 238
@@ -2079,6 +2083,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2079int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 2083int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2080int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2084int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2081 unsigned long pfn); 2085 unsigned long pfn);
2086int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2087 unsigned long pfn, pgprot_t pgprot);
2082int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2088int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2083 unsigned long pfn); 2089 unsigned long pfn);
2084int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 2090int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 83430f2ea757..e0325706b76d 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -33,6 +33,7 @@
33#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d 33#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
34#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 34#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
35#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 35#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
36#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4
36#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 37#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
37#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 38#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
38#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 39#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
diff --git a/include/linux/module.h b/include/linux/module.h
index b229a9961d02..c9f2f85017ad 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -789,6 +789,15 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
789static inline void module_bug_cleanup(struct module *mod) {} 789static inline void module_bug_cleanup(struct module *mod) {}
790#endif /* CONFIG_GENERIC_BUG */ 790#endif /* CONFIG_GENERIC_BUG */
791 791
792#ifdef RETPOLINE
793extern bool retpoline_module_ok(bool has_retpoline);
794#else
795static inline bool retpoline_module_ok(bool has_retpoline)
796{
797 return true;
798}
799#endif
800
792#ifdef CONFIG_MODULE_SIG 801#ifdef CONFIG_MODULE_SIG
793static inline bool module_sig_ok(struct module *module) 802static inline bool module_sig_ok(struct module *module)
794{ 803{
diff --git a/include/linux/msi.h b/include/linux/msi.h
index f0f43ec45ee7..d0d50cf00b4d 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -17,7 +17,13 @@ struct msi_desc;
17struct pci_dev; 17struct pci_dev;
18struct platform_msi_priv_data; 18struct platform_msi_priv_data;
19void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 19void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
20#ifdef CONFIG_GENERIC_MSI_IRQ
20void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 21void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
22#else
23static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
24{
25}
26#endif
21 27
22typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, 28typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
23 struct msi_msg *msg); 29 struct msi_msg *msg);
@@ -105,18 +111,21 @@ struct msi_desc {
105 111
106struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); 112struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
107void *msi_desc_to_pci_sysdata(struct msi_desc *desc); 113void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
114void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
108#else /* CONFIG_PCI_MSI */ 115#else /* CONFIG_PCI_MSI */
109static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) 116static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
110{ 117{
111 return NULL; 118 return NULL;
112} 119}
120static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
121{
122}
113#endif /* CONFIG_PCI_MSI */ 123#endif /* CONFIG_PCI_MSI */
114 124
115struct msi_desc *alloc_msi_entry(struct device *dev); 125struct msi_desc *alloc_msi_entry(struct device *dev);
116void free_msi_entry(struct msi_desc *entry); 126void free_msi_entry(struct msi_desc *entry);
117void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 127void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
118void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 128void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
119void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
120 129
121u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); 130u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
122u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); 131u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa457febd..3529683f691e 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -85,6 +85,7 @@ struct flchip {
85 unsigned int write_suspended:1; 85 unsigned int write_suspended:1;
86 unsigned int erase_suspended:1; 86 unsigned int erase_suspended:1;
87 unsigned long in_progress_block_addr; 87 unsigned long in_progress_block_addr;
88 unsigned long in_progress_block_mask;
88 89
89 struct mutex mutex; 90 struct mutex mutex;
90 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip 91 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 806d0ab845e0..676d3d2a1a0a 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -265,75 +265,67 @@ void map_destroy(struct mtd_info *mtd);
265#define INVALIDATE_CACHED_RANGE(map, from, size) \ 265#define INVALIDATE_CACHED_RANGE(map, from, size) \
266 do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) 266 do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
267 267
268 268#define map_word_equal(map, val1, val2) \
269static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2) 269({ \
270{ 270 int i, ret = 1; \
271 int i; 271 for (i = 0; i < map_words(map); i++) \
272 272 if ((val1).x[i] != (val2).x[i]) { \
273 for (i = 0; i < map_words(map); i++) { 273 ret = 0; \
274 if (val1.x[i] != val2.x[i]) 274 break; \
275 return 0; 275 } \
276 } 276 ret; \
277 277})
278 return 1; 278
279} 279#define map_word_and(map, val1, val2) \
280 280({ \
281static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2) 281 map_word r; \
282{ 282 int i; \
283 map_word r; 283 for (i = 0; i < map_words(map); i++) \
284 int i; 284 r.x[i] = (val1).x[i] & (val2).x[i]; \
285 285 r; \
286 for (i = 0; i < map_words(map); i++) 286})
287 r.x[i] = val1.x[i] & val2.x[i]; 287
288 288#define map_word_clr(map, val1, val2) \
289 return r; 289({ \
290} 290 map_word r; \
291 291 int i; \
292static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2) 292 for (i = 0; i < map_words(map); i++) \
293{ 293 r.x[i] = (val1).x[i] & ~(val2).x[i]; \
294 map_word r; 294 r; \
295 int i; 295})
296 296
297 for (i = 0; i < map_words(map); i++) 297#define map_word_or(map, val1, val2) \
298 r.x[i] = val1.x[i] & ~val2.x[i]; 298({ \
299 299 map_word r; \
300 return r; 300 int i; \
301} 301 for (i = 0; i < map_words(map); i++) \
302 302 r.x[i] = (val1).x[i] | (val2).x[i]; \
303static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2) 303 r; \
304{ 304})
305 map_word r; 305
306 int i; 306#define map_word_andequal(map, val1, val2, val3) \
307 307({ \
308 for (i = 0; i < map_words(map); i++) 308 int i, ret = 1; \
309 r.x[i] = val1.x[i] | val2.x[i]; 309 for (i = 0; i < map_words(map); i++) { \
310 310 if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
311 return r; 311 ret = 0; \
312} 312 break; \
313 313 } \
314static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3) 314 } \
315{ 315 ret; \
316 int i; 316})
317 317
318 for (i = 0; i < map_words(map); i++) { 318#define map_word_bitsset(map, val1, val2) \
319 if ((val1.x[i] & val2.x[i]) != val3.x[i]) 319({ \
320 return 0; 320 int i, ret = 0; \
321 } 321 for (i = 0; i < map_words(map); i++) { \
322 322 if ((val1).x[i] & (val2).x[i]) { \
323 return 1; 323 ret = 1; \
324} 324 break; \
325 325 } \
326static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2) 326 } \
327{ 327 ret; \
328 int i; 328})
329
330 for (i = 0; i < map_words(map); i++) {
331 if (val1.x[i] & val2.x[i])
332 return 1;
333 }
334
335 return 0;
336}
337 329
338static inline map_word map_word_load(struct map_info *map, const void *ptr) 330static inline map_word map_word_load(struct map_info *map, const void *ptr)
339{ 331{
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index 1c28f8879b1c..067b37aff4a1 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -148,6 +148,7 @@ struct sh_flctl {
148 struct platform_device *pdev; 148 struct platform_device *pdev;
149 struct dev_pm_qos_request pm_qos; 149 struct dev_pm_qos_request pm_qos;
150 void __iomem *reg; 150 void __iomem *reg;
151 resource_size_t fifo;
151 152
152 uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ 153 uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
153 int read_bytes; 154 int read_bytes;
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 1d6a935c1ac5..8793f5a7b820 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -65,8 +65,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value)
65static inline u32 65static inline u32
66ip_set_timeout_get(unsigned long *timeout) 66ip_set_timeout_get(unsigned long *timeout)
67{ 67{
68 return *timeout == IPSET_ELEM_PERMANENT ? 0 : 68 u32 t;
69 jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; 69
70 if (*timeout == IPSET_ELEM_PERMANENT)
71 return 0;
72
73 t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
74 /* Zero value in userspace means no timeout */
75 return t == 0 ? 1 : t;
70} 76}
71 77
72#endif /* __KERNEL__ */ 78#endif /* __KERNEL__ */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 04078e8a4803..6923e4049de3 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -243,6 +243,12 @@ int xt_check_entry_offsets(const void *base, const char *elems,
243 unsigned int target_offset, 243 unsigned int target_offset,
244 unsigned int next_offset); 244 unsigned int next_offset);
245 245
246unsigned int *xt_alloc_entry_offsets(unsigned int size);
247bool xt_find_jump_offset(const unsigned int *offsets,
248 unsigned int target, unsigned int size);
249
250int xt_check_proc_name(const char *name, unsigned int size);
251
246int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, 252int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
247 bool inv_proto); 253 bool inv_proto);
248int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, 254int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
@@ -364,38 +370,14 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
364 return ret; 370 return ret;
365} 371}
366 372
373struct xt_percpu_counter_alloc_state {
374 unsigned int off;
375 const char __percpu *mem;
376};
367 377
368/* On SMP, ip(6)t_entry->counters.pcnt holds address of the 378bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
369 * real (percpu) counter. On !SMP, its just the packet count, 379 struct xt_counters *counter);
370 * so nothing needs to be done there. 380void xt_percpu_counter_free(struct xt_counters *cnt);
371 *
372 * xt_percpu_counter_alloc returns the address of the percpu
373 * counter, or 0 on !SMP. We force an alignment of 16 bytes
374 * so that bytes/packets share a common cache line.
375 *
376 * Hence caller must use IS_ERR_VALUE to check for error, this
377 * allows us to return 0 for single core systems without forcing
378 * callers to deal with SMP vs. NONSMP issues.
379 */
380static inline u64 xt_percpu_counter_alloc(void)
381{
382 if (nr_cpu_ids > 1) {
383 void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
384 sizeof(struct xt_counters));
385
386 if (res == NULL)
387 return (u64) -ENOMEM;
388
389 return (u64) (__force unsigned long) res;
390 }
391
392 return 0;
393}
394static inline void xt_percpu_counter_free(u64 pcnt)
395{
396 if (nr_cpu_ids > 1)
397 free_percpu((void __percpu *) (unsigned long) pcnt);
398}
399 381
400static inline struct xt_counters * 382static inline struct xt_counters *
401xt_get_this_cpu_counter(struct xt_counters *cnt) 383xt_get_this_cpu_counter(struct xt_counters *cnt)
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
new file mode 100644
index 000000000000..0c5ef54fd416
--- /dev/null
+++ b/include/linux/nospec.h
@@ -0,0 +1,68 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright(c) 2018 Linus Torvalds. All rights reserved.
3// Copyright(c) 2018 Alexei Starovoitov. All rights reserved.
4// Copyright(c) 2018 Intel Corporation. All rights reserved.
5
6#ifndef _LINUX_NOSPEC_H
7#define _LINUX_NOSPEC_H
8#include <asm/barrier.h>
9
10struct task_struct;
11
12/**
13 * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
14 * @index: array element index
15 * @size: number of elements in array
16 *
17 * When @index is out of bounds (@index >= @size), the sign bit will be
18 * set. Extend the sign bit to all bits and invert, giving a result of
19 * zero for an out of bounds index, or ~0 if within bounds [0, @size).
20 */
21#ifndef array_index_mask_nospec
22static inline unsigned long array_index_mask_nospec(unsigned long index,
23 unsigned long size)
24{
25 /*
26 * Always calculate and emit the mask even if the compiler
27 * thinks the mask is not needed. The compiler does not take
28 * into account the value of @index under speculation.
29 */
30 OPTIMIZER_HIDE_VAR(index);
31 return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1);
32}
33#endif
34
35/*
36 * array_index_nospec - sanitize an array index after a bounds check
37 *
38 * For a code sequence like:
39 *
40 * if (index < size) {
41 * index = array_index_nospec(index, size);
42 * val = array[index];
43 * }
44 *
45 * ...if the CPU speculates past the bounds check then
46 * array_index_nospec() will clamp the index within the range of [0,
47 * size).
48 */
49#define array_index_nospec(index, size) \
50({ \
51 typeof(index) _i = (index); \
52 typeof(size) _s = (size); \
53 unsigned long _mask = array_index_mask_nospec(_i, _s); \
54 \
55 BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
56 BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
57 \
58 (typeof(_i)) (_i & _mask); \
59})
60
61/* Speculation control prctl */
62int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
63int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
64 unsigned long ctrl);
65/* Speculation control for seccomp enforced mitigation */
66void arch_seccomp_spec_mitigate(struct task_struct *task);
67
68#endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index fbfadba81c5a..771774e13f10 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -153,7 +153,7 @@ static inline int page_cache_get_speculative(struct page *page)
153 153
154#ifdef CONFIG_TINY_RCU 154#ifdef CONFIG_TINY_RCU
155# ifdef CONFIG_PREEMPT_COUNT 155# ifdef CONFIG_PREEMPT_COUNT
156 VM_BUG_ON(!in_atomic()); 156 VM_BUG_ON(!in_atomic() && !irqs_disabled());
157# endif 157# endif
158 /* 158 /*
159 * Preempt must be disabled here - we rely on rcu_read_lock doing 159 * Preempt must be disabled here - we rely on rcu_read_lock doing
@@ -191,7 +191,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
191 191
192#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) 192#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
193# ifdef CONFIG_PREEMPT_COUNT 193# ifdef CONFIG_PREEMPT_COUNT
194 VM_BUG_ON(!in_atomic()); 194 VM_BUG_ON(!in_atomic() && !irqs_disabled());
195# endif 195# endif
196 VM_BUG_ON_PAGE(page_count(page) == 0, page); 196 VM_BUG_ON_PAGE(page_count(page) == 0, page);
197 atomic_add(count, &page->_count); 197 atomic_add(count, &page->_count);
diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h
index 1419133fa69e..4ac1a070af0a 100644
--- a/include/linux/platform_data/isl9305.h
+++ b/include/linux/platform_data/isl9305.h
@@ -24,7 +24,7 @@
24struct regulator_init_data; 24struct regulator_init_data;
25 25
26struct isl9305_pdata { 26struct isl9305_pdata {
27 struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR]; 27 struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR + 1];
28}; 28};
29 29
30#endif 30#endif
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 34c4498b800f..83b22ae9ae12 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -59,23 +59,23 @@ struct posix_clock_operations {
59 59
60 int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx); 60 int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx);
61 61
62 int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts); 62 int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts);
63 63
64 int (*clock_getres) (struct posix_clock *pc, struct timespec *ts); 64 int (*clock_getres) (struct posix_clock *pc, struct timespec64 *ts);
65 65
66 int (*clock_settime)(struct posix_clock *pc, 66 int (*clock_settime)(struct posix_clock *pc,
67 const struct timespec *ts); 67 const struct timespec64 *ts);
68 68
69 int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit); 69 int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit);
70 70
71 int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit); 71 int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit);
72 72
73 void (*timer_gettime)(struct posix_clock *pc, 73 void (*timer_gettime)(struct posix_clock *pc,
74 struct k_itimer *kit, struct itimerspec *tsp); 74 struct k_itimer *kit, struct itimerspec64 *tsp);
75 75
76 int (*timer_settime)(struct posix_clock *pc, 76 int (*timer_settime)(struct posix_clock *pc,
77 struct k_itimer *kit, int flags, 77 struct k_itimer *kit, int flags,
78 struct itimerspec *tsp, struct itimerspec *old); 78 struct itimerspec64 *tsp, struct itimerspec64 *old);
79 /* 79 /*
80 * Optional character device methods: 80 * Optional character device methods:
81 */ 81 */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 4acc552e9279..19d0778ec382 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -162,6 +162,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
162void ring_buffer_record_off(struct ring_buffer *buffer); 162void ring_buffer_record_off(struct ring_buffer *buffer);
163void ring_buffer_record_on(struct ring_buffer *buffer); 163void ring_buffer_record_on(struct ring_buffer *buffer);
164int ring_buffer_record_is_on(struct ring_buffer *buffer); 164int ring_buffer_record_is_on(struct ring_buffer *buffer);
165int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
165void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); 166void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
166void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); 167void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
167 168
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e887c8d6f395..725498cc5d30 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1313,6 +1313,7 @@ struct sched_dl_entity {
1313 u64 dl_deadline; /* relative deadline of each instance */ 1313 u64 dl_deadline; /* relative deadline of each instance */
1314 u64 dl_period; /* separation of two instances (period) */ 1314 u64 dl_period; /* separation of two instances (period) */
1315 u64 dl_bw; /* dl_runtime / dl_deadline */ 1315 u64 dl_bw; /* dl_runtime / dl_deadline */
1316 u64 dl_density; /* dl_runtime / dl_deadline */
1316 1317
1317 /* 1318 /*
1318 * Actual scheduling parameters. Initialized with the values above, 1319 * Actual scheduling parameters. Initialized with the values above,
@@ -2166,6 +2167,8 @@ static inline void memalloc_noio_restore(unsigned int flags)
2166#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 2167#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
2167#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 2168#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
2168#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 2169#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
2170#define PFA_SPEC_SSB_DISABLE 4 /* Speculative Store Bypass disabled */
2171#define PFA_SPEC_SSB_FORCE_DISABLE 5 /* Speculative Store Bypass force disabled*/
2169 2172
2170 2173
2171#define TASK_PFA_TEST(name, func) \ 2174#define TASK_PFA_TEST(name, func) \
@@ -2189,6 +2192,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2189TASK_PFA_SET(SPREAD_SLAB, spread_slab) 2192TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2190TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 2193TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2191 2194
2195TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
2196TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
2197TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
2198
2199TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
2200TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
2201
2192/* 2202/*
2193 * task->jobctl flags 2203 * task->jobctl flags
2194 */ 2204 */
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 2296e6b2f690..5a53d34bba26 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -3,7 +3,8 @@
3 3
4#include <uapi/linux/seccomp.h> 4#include <uapi/linux/seccomp.h>
5 5
6#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC) 6#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
7 SECCOMP_FILTER_FLAG_SPEC_ALLOW)
7 8
8#ifdef CONFIG_SECCOMP 9#ifdef CONFIG_SECCOMP
9 10
diff --git a/include/linux/signal.h b/include/linux/signal.h
index d80259afb9e5..bcc094cb697c 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -97,6 +97,23 @@ static inline int sigisemptyset(sigset_t *set)
97 } 97 }
98} 98}
99 99
100static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
101{
102 switch (_NSIG_WORDS) {
103 case 4:
104 return (set1->sig[3] == set2->sig[3]) &&
105 (set1->sig[2] == set2->sig[2]) &&
106 (set1->sig[1] == set2->sig[1]) &&
107 (set1->sig[0] == set2->sig[0]);
108 case 2:
109 return (set1->sig[1] == set2->sig[1]) &&
110 (set1->sig[0] == set2->sig[0]);
111 case 1:
112 return set1->sig[0] == set2->sig[0];
113 }
114 return 0;
115}
116
100#define sigmask(sig) (1UL << ((sig) - 1)) 117#define sigmask(sig) (1UL << ((sig) - 1))
101 118
102#ifndef __HAVE_ARCH_SIG_SETOPS 119#ifndef __HAVE_ARCH_SIG_SETOPS
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b5421f6f155a..c28bd8be290a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -514,6 +514,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
514 * @hash: the packet hash 514 * @hash: the packet hash
515 * @queue_mapping: Queue mapping for multiqueue devices 515 * @queue_mapping: Queue mapping for multiqueue devices
516 * @xmit_more: More SKBs are pending for this queue 516 * @xmit_more: More SKBs are pending for this queue
517 * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
517 * @ndisc_nodetype: router type (from link layer) 518 * @ndisc_nodetype: router type (from link layer)
518 * @ooo_okay: allow the mapping of a socket to a queue to be changed 519 * @ooo_okay: allow the mapping of a socket to a queue to be changed
519 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport 520 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -594,8 +595,8 @@ struct sk_buff {
594 fclone:2, 595 fclone:2,
595 peeked:1, 596 peeked:1,
596 head_frag:1, 597 head_frag:1,
597 xmit_more:1; 598 xmit_more:1,
598 /* one bit hole */ 599 pfmemalloc:1;
599 kmemcheck_bitfield_end(flags1); 600 kmemcheck_bitfield_end(flags1);
600 601
601 /* fields enclosed in headers_start/headers_end are copied 602 /* fields enclosed in headers_start/headers_end are copied
@@ -615,19 +616,18 @@ struct sk_buff {
615 616
616 __u8 __pkt_type_offset[0]; 617 __u8 __pkt_type_offset[0];
617 __u8 pkt_type:3; 618 __u8 pkt_type:3;
618 __u8 pfmemalloc:1;
619 __u8 ignore_df:1; 619 __u8 ignore_df:1;
620 __u8 nfctinfo:3; 620 __u8 nfctinfo:3;
621
622 __u8 nf_trace:1; 621 __u8 nf_trace:1;
622
623 __u8 ip_summed:2; 623 __u8 ip_summed:2;
624 __u8 ooo_okay:1; 624 __u8 ooo_okay:1;
625 __u8 l4_hash:1; 625 __u8 l4_hash:1;
626 __u8 sw_hash:1; 626 __u8 sw_hash:1;
627 __u8 wifi_acked_valid:1; 627 __u8 wifi_acked_valid:1;
628 __u8 wifi_acked:1; 628 __u8 wifi_acked:1;
629
630 __u8 no_fcs:1; 629 __u8 no_fcs:1;
630
631 /* Indicates the inner headers are valid in the skbuff. */ 631 /* Indicates the inner headers are valid in the skbuff. */
632 __u8 encapsulation:1; 632 __u8 encapsulation:1;
633 __u8 encap_hdr_csum:1; 633 __u8 encap_hdr_csum:1;
@@ -635,11 +635,11 @@ struct sk_buff {
635 __u8 csum_complete_sw:1; 635 __u8 csum_complete_sw:1;
636 __u8 csum_level:2; 636 __u8 csum_level:2;
637 __u8 csum_bad:1; 637 __u8 csum_bad:1;
638
639#ifdef CONFIG_IPV6_NDISC_NODETYPE 638#ifdef CONFIG_IPV6_NDISC_NODETYPE
640 __u8 ndisc_nodetype:2; 639 __u8 ndisc_nodetype:2;
641#endif 640#endif
642 __u8 ipvs_property:1; 641 __u8 ipvs_property:1;
642
643 __u8 inner_protocol_type:1; 643 __u8 inner_protocol_type:1;
644 __u8 remcsum_offload:1; 644 __u8 remcsum_offload:1;
645 /* 3 or 5 bit hole */ 645 /* 3 or 5 bit hole */
@@ -879,10 +879,10 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
879 unsigned int headroom); 879 unsigned int headroom);
880struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, 880struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
881 int newtailroom, gfp_t priority); 881 int newtailroom, gfp_t priority);
882int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 882int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
883 int offset, int len); 883 int offset, int len);
884int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, 884int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
885 int len); 885 int offset, int len);
886int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); 886int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
887int skb_pad(struct sk_buff *skb, int pad); 887int skb_pad(struct sk_buff *skb, int pad);
888#define dev_kfree_skb(a) consume_skb(a) 888#define dev_kfree_skb(a) consume_skb(a)
diff --git a/include/linux/string.h b/include/linux/string.h
index aa30789b0f65..98bb781a2eff 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -122,6 +122,7 @@ extern char *kstrdup(const char *s, gfp_t gfp);
122extern const char *kstrdup_const(const char *s, gfp_t gfp); 122extern const char *kstrdup_const(const char *s, gfp_t gfp);
123extern char *kstrndup(const char *s, size_t len, gfp_t gfp); 123extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
124extern void *kmemdup(const void *src, size_t len, gfp_t gfp); 124extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
125extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
125 126
126extern char **argv_split(gfp_t gfp, const char *str, int *argcp); 127extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
127extern void argv_free(char **argv); 128extern void argv_free(char **argv);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 8b6ec7ef0854..4a69bca7c6ab 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -377,6 +377,8 @@ extern int swsusp_page_is_forbidden(struct page *);
377extern void swsusp_set_page_free(struct page *); 377extern void swsusp_set_page_free(struct page *);
378extern void swsusp_unset_page_free(struct page *); 378extern void swsusp_unset_page_free(struct page *);
379extern unsigned long get_safe_page(gfp_t gfp_mask); 379extern unsigned long get_safe_page(gfp_t gfp_mask);
380extern asmlinkage int swsusp_arch_suspend(void);
381extern asmlinkage int swsusp_arch_resume(void);
380 382
381extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); 383extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
382extern int hibernate(void); 384extern int hibernate(void);
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 388293a91e8c..e4594de79bc4 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -9,5 +9,7 @@ extern spinlock_t swap_lock;
9extern struct plist_head swap_active_head; 9extern struct plist_head swap_active_head;
10extern struct swap_info_struct *swap_info[]; 10extern struct swap_info_struct *swap_info[];
11extern int try_to_unuse(unsigned int, bool, unsigned long); 11extern int try_to_unuse(unsigned int, bool, unsigned long);
12extern unsigned long generic_max_swapfile_size(void);
13extern unsigned long max_swapfile_size(void);
12 14
13#endif /* _LINUX_SWAPFILE_H */ 15#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 318c24612458..5b6df1a8dc74 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
29 return (struct tcphdr *)skb_transport_header(skb); 29 return (struct tcphdr *)skb_transport_header(skb);
30} 30}
31 31
32static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
33{
34 return th->doff * 4;
35}
36
32static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) 37static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
33{ 38{
34 return tcp_hdr(skb)->doff * 4; 39 return __tcp_hdrlen(tcp_hdr(skb));
35} 40}
36 41
37static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb) 42static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
@@ -319,7 +324,7 @@ struct tcp_sock {
319 324
320/* Receiver queue space */ 325/* Receiver queue space */
321 struct { 326 struct {
322 int space; 327 u32 space;
323 u32 seq; 328 u32 seq;
324 u32 time; 329 u32 time;
325 } rcvq_space; 330 } rcvq_space;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index eded095fe81e..1cfa07c86bc5 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -55,11 +55,7 @@ extern long do_no_restart_syscall(struct restart_block *parm);
55 55
56#ifdef __KERNEL__ 56#ifdef __KERNEL__
57 57
58#ifdef CONFIG_DEBUG_STACK_USAGE 58#define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
59# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
60#else
61# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK)
62#endif
63 59
64/* 60/*
65 * flag set/clear/test wrappers 61 * flag set/clear/test wrappers
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index f0f1793cfa49..115216ec7cfe 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -56,7 +56,7 @@ struct tk_read_base {
56 * interval. 56 * interval.
57 * @xtime_remainder: Shifted nano seconds left over when rounding 57 * @xtime_remainder: Shifted nano seconds left over when rounding
58 * @cycle_interval 58 * @cycle_interval
59 * @raw_interval: Raw nano seconds accumulated per NTP interval. 59 * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
60 * @ntp_error: Difference between accumulated time and NTP time in ntp 60 * @ntp_error: Difference between accumulated time and NTP time in ntp
61 * shifted nano seconds. 61 * shifted nano seconds.
62 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and 62 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
@@ -97,7 +97,7 @@ struct timekeeper {
97 cycle_t cycle_interval; 97 cycle_t cycle_interval;
98 u64 xtime_interval; 98 u64 xtime_interval;
99 s64 xtime_remainder; 99 s64 xtime_remainder;
100 u32 raw_interval; 100 u64 raw_interval;
101 /* The ntp_tick_length() value currently being used. 101 /* The ntp_tick_length() value currently being used.
102 * This cached copy ensures we consistently apply the tick 102 * This cached copy ensures we consistently apply the tick
103 * length for an entire tick, as ntp_tick_length may change 103 * length for an entire tick, as ntp_tick_length may change
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 83b264c52898..812cdd8cff22 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -342,6 +342,7 @@ struct tty_file_private {
342#define TTY_PTY_LOCK 16 /* pty private */ 342#define TTY_PTY_LOCK 16 /* pty private */
343#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ 343#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
344#define TTY_HUPPED 18 /* Post driver->hangup() */ 344#define TTY_HUPPED 18 /* Post driver->hangup() */
345#define TTY_HUPPING 19 /* Hangup in progress */
345#define TTY_LDISC_HALTED 22 /* Line discipline is halted */ 346#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
346 347
347#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) 348#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
@@ -372,6 +373,7 @@ extern void proc_clear_tty(struct task_struct *p);
372extern struct tty_struct *get_current_tty(void); 373extern struct tty_struct *get_current_tty(void);
373/* tty_io.c */ 374/* tty_io.c */
374extern int __init tty_init(void); 375extern int __init tty_init(void);
376extern const char *tty_name(const struct tty_struct *tty);
375#else 377#else
376static inline void console_init(void) 378static inline void console_init(void)
377{ } 379{ }
@@ -392,6 +394,8 @@ static inline struct tty_struct *get_current_tty(void)
392/* tty_io.c */ 394/* tty_io.c */
393static inline int __init tty_init(void) 395static inline int __init tty_init(void)
394{ return 0; } 396{ return 0; }
397static inline const char *tty_name(const struct tty_struct *tty)
398{ return "(none)"; }
395#endif 399#endif
396 400
397extern void tty_write_flush(struct tty_struct *); 401extern void tty_write_flush(struct tty_struct *);
@@ -420,7 +424,6 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
420 424
421extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, 425extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
422 const char *routine); 426 const char *routine);
423extern const char *tty_name(const struct tty_struct *tty);
424extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); 427extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
425extern int __tty_check_change(struct tty_struct *tty, int sig); 428extern int __tty_check_change(struct tty_struct *tty, int sig);
426extern int tty_check_change(struct tty_struct *tty); 429extern int tty_check_change(struct tty_struct *tty);
@@ -583,7 +586,7 @@ extern int tty_unregister_ldisc(int disc);
583extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); 586extern int tty_set_ldisc(struct tty_struct *tty, int ldisc);
584extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); 587extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
585extern void tty_ldisc_release(struct tty_struct *tty); 588extern void tty_ldisc_release(struct tty_struct *tty);
586extern void tty_ldisc_init(struct tty_struct *tty); 589extern int __must_check tty_ldisc_init(struct tty_struct *tty);
587extern void tty_ldisc_deinit(struct tty_struct *tty); 590extern void tty_ldisc_deinit(struct tty_struct *tty);
588extern void tty_ldisc_begin(void); 591extern void tty_ldisc_begin(void);
589 592
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 1074b8921a5d..69c728883266 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -53,6 +53,9 @@
53/* big enough to hold our biggest descriptor */ 53/* big enough to hold our biggest descriptor */
54#define USB_COMP_EP0_BUFSIZ 1024 54#define USB_COMP_EP0_BUFSIZ 1024
55 55
56/* OS feature descriptor length <= 4kB */
57#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096
58
56#define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1) 59#define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1)
57struct usb_configuration; 60struct usb_configuration;
58 61
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 59af12e0f767..f3e6b6fd1c68 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -663,8 +663,20 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
663 list_for_each_entry(tmp, &(gadget)->ep_list, ep_list) 663 list_for_each_entry(tmp, &(gadget)->ep_list, ep_list)
664 664
665/** 665/**
666 * usb_ep_align - returns @len aligned to ep's maxpacketsize.
667 * @ep: the endpoint whose maxpacketsize is used to align @len
668 * @len: buffer size's length to align to @ep's maxpacketsize
669 *
670 * This helper is used to align buffer's size to an ep's maxpacketsize.
671 */
672static inline size_t usb_ep_align(struct usb_ep *ep, size_t len)
673{
674 return round_up(len, (size_t)le16_to_cpu(ep->desc->wMaxPacketSize));
675}
676
677/**
666 * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget 678 * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
667 * requires quirk_ep_out_aligned_size, otherwise reguens len. 679 * requires quirk_ep_out_aligned_size, otherwise returns len.
668 * @g: controller to check for quirk 680 * @g: controller to check for quirk
669 * @ep: the endpoint whose maxpacketsize is used to align @len 681 * @ep: the endpoint whose maxpacketsize is used to align @len
670 * @len: buffer size's length to align to @ep's maxpacketsize 682 * @len: buffer size's length to align to @ep's maxpacketsize
@@ -675,8 +687,7 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
675static inline size_t 687static inline size_t
676usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len) 688usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len)
677{ 689{
678 return !g->quirk_ep_out_aligned_size ? len : 690 return g->quirk_ep_out_aligned_size ? usb_ep_align(ep, len) : len;
679 round_up(len, (size_t)ep->desc->wMaxPacketSize);
680} 691}
681 692
682/** 693/**
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index de2a722fe3cf..ea4f81c2a6d5 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -56,4 +56,7 @@
56 */ 56 */
57#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11) 57#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
58 58
59/* Device needs a pause after every control message. */
60#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
61
59#endif /* __LINUX_USB_QUIRKS_H */ 62#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index a3d04934aa96..6f8fbcf10dfb 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -24,16 +24,10 @@
24#ifndef MODULE_ARCH_VERMAGIC 24#ifndef MODULE_ARCH_VERMAGIC
25#define MODULE_ARCH_VERMAGIC "" 25#define MODULE_ARCH_VERMAGIC ""
26#endif 26#endif
27#ifdef RETPOLINE
28#define MODULE_VERMAGIC_RETPOLINE "retpoline "
29#else
30#define MODULE_VERMAGIC_RETPOLINE ""
31#endif
32 27
33#define VERMAGIC_STRING \ 28#define VERMAGIC_STRING \
34 UTS_RELEASE " " \ 29 UTS_RELEASE " " \
35 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ 30 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
36 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ 31 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
37 MODULE_ARCH_VERMAGIC \ 32 MODULE_ARCH_VERMAGIC
38 MODULE_VERMAGIC_RETPOLINE
39 33
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 24e6d3db70da..1fcaf70c8fb7 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -134,6 +134,9 @@ int virtio_device_freeze(struct virtio_device *dev);
134int virtio_device_restore(struct virtio_device *dev); 134int virtio_device_restore(struct virtio_device *dev);
135#endif 135#endif
136 136
137#define virtio_device_for_each_vq(vdev, vq) \
138 list_for_each_entry(vq, &vdev->vqs, list)
139
137/** 140/**
138 * virtio_driver - operations for a virtio I/O driver 141 * virtio_driver - operations for a virtio I/O driver
139 * @driver: underlying device driver (populate name and owner). 142 * @driver: underlying device driver (populate name and owner).
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 217abe56e711..f63ce973b27b 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -451,6 +451,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
451 451
452extern void workqueue_set_max_active(struct workqueue_struct *wq, 452extern void workqueue_set_max_active(struct workqueue_struct *wq,
453 int max_active); 453 int max_active);
454extern struct work_struct *current_work(void);
454extern bool current_is_workqueue_rescuer(void); 455extern bool current_is_workqueue_rescuer(void);
455extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); 456extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
456extern unsigned int work_busy(struct work_struct *work); 457extern unsigned int work_busy(struct work_struct *work);
diff --git a/include/net/arp.h b/include/net/arp.h
index 5e0f891d476c..1b3f86981757 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -19,6 +19,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
19 19
20static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) 20static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
21{ 21{
22 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
23 key = INADDR_ANY;
24
22 return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev); 25 return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
23} 26}
24 27
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 1878d0a96333..876688b5a356 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -878,7 +878,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
878 u16 conn_timeout, u8 role); 878 u16 conn_timeout, u8 role);
879struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 879struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
880 u8 dst_type, u8 sec_level, u16 conn_timeout, 880 u8 dst_type, u8 sec_level, u16 conn_timeout,
881 u8 role); 881 u8 role, bdaddr_t *direct_rpa);
882struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, 882struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
883 u8 sec_level, u8 auth_type); 883 u8 sec_level, u8 auth_type);
884struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, 884struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index b5f3693fe5b6..c05748cc1b20 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -933,9 +933,9 @@ enum rate_info_flags {
933 * @RATE_INFO_BW_160: 160 MHz bandwidth 933 * @RATE_INFO_BW_160: 160 MHz bandwidth
934 */ 934 */
935enum rate_info_bw { 935enum rate_info_bw {
936 RATE_INFO_BW_20 = 0,
936 RATE_INFO_BW_5, 937 RATE_INFO_BW_5,
937 RATE_INFO_BW_10, 938 RATE_INFO_BW_10,
938 RATE_INFO_BW_20,
939 RATE_INFO_BW_40, 939 RATE_INFO_BW_40,
940 RATE_INFO_BW_80, 940 RATE_INFO_BW_80,
941 RATE_INFO_BW_160, 941 RATE_INFO_BW_160,
diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h
new file mode 100644
index 000000000000..151accae708b
--- /dev/null
+++ b/include/net/dst_cache.h
@@ -0,0 +1,97 @@
1#ifndef _NET_DST_CACHE_H
2#define _NET_DST_CACHE_H
3
4#include <linux/jiffies.h>
5#include <net/dst.h>
6#if IS_ENABLED(CONFIG_IPV6)
7#include <net/ip6_fib.h>
8#endif
9
10struct dst_cache {
11 struct dst_cache_pcpu __percpu *cache;
12 unsigned long reset_ts;
13};
14
15/**
16 * dst_cache_get - perform cache lookup
17 * @dst_cache: the cache
18 *
19 * The caller should use dst_cache_get_ip4() if it need to retrieve the
20 * source address to be used when xmitting to the cached dst.
21 * local BH must be disabled.
22 */
23struct dst_entry *dst_cache_get(struct dst_cache *dst_cache);
24
25/**
26 * dst_cache_get_ip4 - perform cache lookup and fetch ipv4 source address
27 * @dst_cache: the cache
28 * @saddr: return value for the retrieved source address
29 *
30 * local BH must be disabled.
31 */
32struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr);
33
34/**
35 * dst_cache_set_ip4 - store the ipv4 dst into the cache
36 * @dst_cache: the cache
37 * @dst: the entry to be cached
38 * @saddr: the source address to be stored inside the cache
39 *
40 * local BH must be disabled.
41 */
42void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst,
43 __be32 saddr);
44
45#if IS_ENABLED(CONFIG_IPV6)
46
47/**
48 * dst_cache_set_ip6 - store the ipv6 dst into the cache
49 * @dst_cache: the cache
50 * @dst: the entry to be cached
51 * @saddr: the source address to be stored inside the cache
52 *
53 * local BH must be disabled.
54 */
55void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst,
56 const struct in6_addr *addr);
57
58/**
59 * dst_cache_get_ip6 - perform cache lookup and fetch ipv6 source address
60 * @dst_cache: the cache
61 * @saddr: return value for the retrieved source address
62 *
63 * local BH must be disabled.
64 */
65struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache,
66 struct in6_addr *saddr);
67#endif
68
69/**
70 * dst_cache_reset - invalidate the cache contents
71 * @dst_cache: the cache
72 *
73 * This do not free the cached dst to avoid races and contentions.
74 * the dst will be freed on later cache lookup.
75 */
76static inline void dst_cache_reset(struct dst_cache *dst_cache)
77{
78 dst_cache->reset_ts = jiffies;
79}
80
81/**
82 * dst_cache_init - initialize the cache, allocating the required storage
83 * @dst_cache: the cache
84 * @gfp: allocation flags
85 */
86int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp);
87
88/**
89 * dst_cache_destroy - empty the cache and free the allocated storage
90 * @dst_cache: the cache
91 *
92 * No synchronization is enforced: it must be called only when the cache
93 * is unsed.
94 */
95void dst_cache_destroy(struct dst_cache *dst_cache);
96
97#endif
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index c9b3eb70f340..567017b5fc9e 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -55,6 +55,7 @@ struct inet_timewait_sock {
55#define tw_family __tw_common.skc_family 55#define tw_family __tw_common.skc_family
56#define tw_state __tw_common.skc_state 56#define tw_state __tw_common.skc_state
57#define tw_reuse __tw_common.skc_reuse 57#define tw_reuse __tw_common.skc_reuse
58#define tw_reuseport __tw_common.skc_reuseport
58#define tw_ipv6only __tw_common.skc_ipv6only 59#define tw_ipv6only __tw_common.skc_ipv6only
59#define tw_bound_dev_if __tw_common.skc_bound_dev_if 60#define tw_bound_dev_if __tw_common.skc_bound_dev_if
60#define tw_node __tw_common.skc_nulls_node 61#define tw_node __tw_common.skc_nulls_node
diff --git a/include/net/ip.h b/include/net/ip.h
index 639398af273b..0530bcdbc212 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -279,6 +279,13 @@ int ip_decrease_ttl(struct iphdr *iph)
279 return --iph->ttl; 279 return --iph->ttl;
280} 280}
281 281
282static inline int ip_mtu_locked(const struct dst_entry *dst)
283{
284 const struct rtable *rt = (const struct rtable *)dst;
285
286 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
287}
288
282static inline 289static inline
283int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) 290int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
284{ 291{
@@ -286,7 +293,7 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
286 293
287 return pmtudisc == IP_PMTUDISC_DO || 294 return pmtudisc == IP_PMTUDISC_DO ||
288 (pmtudisc == IP_PMTUDISC_WANT && 295 (pmtudisc == IP_PMTUDISC_WANT &&
289 !(dst_metric_locked(dst, RTAX_MTU))); 296 !ip_mtu_locked(dst));
290} 297}
291 298
292static inline bool ip_sk_accept_pmtu(const struct sock *sk) 299static inline bool ip_sk_accept_pmtu(const struct sock *sk)
@@ -312,7 +319,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
312 struct net *net = dev_net(dst->dev); 319 struct net *net = dev_net(dst->dev);
313 320
314 if (net->ipv4.sysctl_ip_fwd_use_pmtu || 321 if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
315 dst_metric_locked(dst, RTAX_MTU) || 322 ip_mtu_locked(dst) ||
316 !forwarding) 323 !forwarding)
317 return dst_mtu(dst); 324 return dst_mtu(dst);
318 325
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 9c2c044153f6..d143c8480681 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -5,6 +5,8 @@
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/if_tunnel.h> 6#include <linux/if_tunnel.h>
7#include <linux/ip6_tunnel.h> 7#include <linux/ip6_tunnel.h>
8#include <net/ip_tunnels.h>
9#include <net/dst_cache.h>
8 10
9#define IP6TUNNEL_ERR_TIMEO (30*HZ) 11#define IP6TUNNEL_ERR_TIMEO (30*HZ)
10 12
@@ -32,12 +34,6 @@ struct __ip6_tnl_parm {
32 __be32 o_key; 34 __be32 o_key;
33}; 35};
34 36
35struct ip6_tnl_dst {
36 seqlock_t lock;
37 struct dst_entry __rcu *dst;
38 u32 cookie;
39};
40
41/* IPv6 tunnel */ 37/* IPv6 tunnel */
42struct ip6_tnl { 38struct ip6_tnl {
43 struct ip6_tnl __rcu *next; /* next tunnel in list */ 39 struct ip6_tnl __rcu *next; /* next tunnel in list */
@@ -45,7 +41,7 @@ struct ip6_tnl {
45 struct net *net; /* netns for packet i/o */ 41 struct net *net; /* netns for packet i/o */
46 struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ 42 struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
47 struct flowi fl; /* flowi template for xmit */ 43 struct flowi fl; /* flowi template for xmit */
48 struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */ 44 struct dst_cache dst_cache; /* cached dst */
49 45
50 int err_count; 46 int err_count;
51 unsigned long err_time; 47 unsigned long err_time;
@@ -65,11 +61,6 @@ struct ipv6_tlv_tnl_enc_lim {
65 __u8 encap_limit; /* tunnel encapsulation limit */ 61 __u8 encap_limit; /* tunnel encapsulation limit */
66} __packed; 62} __packed;
67 63
68struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t);
69int ip6_tnl_dst_init(struct ip6_tnl *t);
70void ip6_tnl_dst_destroy(struct ip6_tnl *t);
71void ip6_tnl_dst_reset(struct ip6_tnl *t);
72void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst);
73int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, 64int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
74 const struct in6_addr *raddr); 65 const struct in6_addr *raddr);
75int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, 66int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index bda1721e9622..3afb7c4c7098 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -56,6 +56,7 @@ struct fib_nh_exception {
56 int fnhe_genid; 56 int fnhe_genid;
57 __be32 fnhe_daddr; 57 __be32 fnhe_daddr;
58 u32 fnhe_pmtu; 58 u32 fnhe_pmtu;
59 bool fnhe_mtu_locked;
59 __be32 fnhe_gw; 60 __be32 fnhe_gw;
60 unsigned long fnhe_expires; 61 unsigned long fnhe_expires;
61 struct rtable __rcu *fnhe_rth_input; 62 struct rtable __rcu *fnhe_rth_input;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 86a7bdd61d1a..74bc08d82e14 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -13,6 +13,7 @@
13#include <net/netns/generic.h> 13#include <net/netns/generic.h>
14#include <net/rtnetlink.h> 14#include <net/rtnetlink.h>
15#include <net/lwtunnel.h> 15#include <net/lwtunnel.h>
16#include <net/dst_cache.h>
16 17
17#if IS_ENABLED(CONFIG_IPV6) 18#if IS_ENABLED(CONFIG_IPV6)
18#include <net/ipv6.h> 19#include <net/ipv6.h>
@@ -85,11 +86,6 @@ struct ip_tunnel_prl_entry {
85 struct rcu_head rcu_head; 86 struct rcu_head rcu_head;
86}; 87};
87 88
88struct ip_tunnel_dst {
89 struct dst_entry __rcu *dst;
90 __be32 saddr;
91};
92
93struct metadata_dst; 89struct metadata_dst;
94 90
95struct ip_tunnel { 91struct ip_tunnel {
@@ -108,7 +104,7 @@ struct ip_tunnel {
108 int tun_hlen; /* Precalculated header length */ 104 int tun_hlen; /* Precalculated header length */
109 int mlink; 105 int mlink;
110 106
111 struct ip_tunnel_dst __percpu *dst_cache; 107 struct dst_cache dst_cache;
112 108
113 struct ip_tunnel_parm parms; 109 struct ip_tunnel_parm parms;
114 110
@@ -248,7 +244,6 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
248int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], 244int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
249 struct ip_tunnel_parm *p); 245 struct ip_tunnel_parm *p);
250void ip_tunnel_setup(struct net_device *dev, int net_id); 246void ip_tunnel_setup(struct net_device *dev, int net_id);
251void ip_tunnel_dst_reset_all(struct ip_tunnel *t);
252int ip_tunnel_encap_setup(struct ip_tunnel *t, 247int ip_tunnel_encap_setup(struct ip_tunnel *t,
253 struct ip_tunnel_encap *ipencap); 248 struct ip_tunnel_encap *ipencap);
254 249
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 7a8066b90289..0e01d570fa22 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -281,6 +281,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
281 int flags); 281 int flags);
282int ip6_flowlabel_init(void); 282int ip6_flowlabel_init(void);
283void ip6_flowlabel_cleanup(void); 283void ip6_flowlabel_cleanup(void);
284bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
284 285
285static inline void fl6_sock_release(struct ip6_flowlabel *fl) 286static inline void fl6_sock_release(struct ip6_flowlabel *fl)
286{ 287{
@@ -761,7 +762,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
761 * to minimize possbility that any useful information to an 762 * to minimize possbility that any useful information to an
762 * attacker is leaked. Only lower 20 bits are relevant. 763 * attacker is leaked. Only lower 20 bits are relevant.
763 */ 764 */
764 rol32(hash, 16); 765 hash = rol32(hash, 16);
765 766
766 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; 767 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
767 768
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index fe994d2e5286..df528a623548 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -97,13 +97,14 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
97 97
98struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, 98struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
99 struct proto *prot, int kern); 99 struct proto *prot, int kern);
100void llc_sk_stop_all_timers(struct sock *sk, bool sync);
100void llc_sk_free(struct sock *sk); 101void llc_sk_free(struct sock *sk);
101 102
102void llc_sk_reset(struct sock *sk); 103void llc_sk_reset(struct sock *sk);
103 104
104/* Access to a connection */ 105/* Access to a connection */
105int llc_conn_state_process(struct sock *sk, struct sk_buff *skb); 106int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
106void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); 107int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
107void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb); 108void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
108void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit); 109void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
109void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit); 110void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index c493b7c0d3c8..4889398a0f62 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -975,7 +975,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
975 * @RX_FLAG_DECRYPTED: This frame was decrypted in hardware. 975 * @RX_FLAG_DECRYPTED: This frame was decrypted in hardware.
976 * @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame, 976 * @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame,
977 * verification has been done by the hardware. 977 * verification has been done by the hardware.
978 * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame. 978 * @RX_FLAG_IV_STRIPPED: The IV and ICV are stripped from this frame.
979 * If this flag is set, the stack cannot do any replay detection 979 * If this flag is set, the stack cannot do any replay detection
980 * hence the driver or hardware will have to do that. 980 * hence the driver or hardware will have to do that.
981 * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this 981 * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
@@ -1013,6 +1013,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
1013 * on this subframe 1013 * on this subframe
1014 * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC 1014 * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
1015 * is stored in the @ampdu_delimiter_crc field) 1015 * is stored in the @ampdu_delimiter_crc field)
1016 * @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was
1017 * done by the hardware
1016 * @RX_FLAG_LDPC: LDPC was used 1018 * @RX_FLAG_LDPC: LDPC was used
1017 * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3 1019 * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
1018 * @RX_FLAG_10MHZ: 10 MHz (half channel) was used 1020 * @RX_FLAG_10MHZ: 10 MHz (half channel) was used
@@ -1029,6 +1031,11 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
1029 * @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific 1031 * @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific
1030 * radiotap data in the skb->data (before the frame) as described by 1032 * radiotap data in the skb->data (before the frame) as described by
1031 * the &struct ieee80211_vendor_radiotap. 1033 * the &struct ieee80211_vendor_radiotap.
1034 * @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before.
1035 * This is used for AMSDU subframes which can have the same PN as
1036 * the first subframe.
1037 * @RX_FLAG_ICV_STRIPPED: The ICV is stripped from this frame. CRC checking must
1038 * be done in the hardware.
1032 */ 1039 */
1033enum mac80211_rx_flags { 1040enum mac80211_rx_flags {
1034 RX_FLAG_MMIC_ERROR = BIT(0), 1041 RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1059,6 +1066,9 @@ enum mac80211_rx_flags {
1059 RX_FLAG_5MHZ = BIT(29), 1066 RX_FLAG_5MHZ = BIT(29),
1060 RX_FLAG_AMSDU_MORE = BIT(30), 1067 RX_FLAG_AMSDU_MORE = BIT(30),
1061 RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31), 1068 RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31),
1069 RX_FLAG_MIC_STRIPPED = BIT_ULL(32),
1070 RX_FLAG_ALLOW_SAME_PN = BIT_ULL(33),
1071 RX_FLAG_ICV_STRIPPED = BIT_ULL(34),
1062}; 1072};
1063 1073
1064#define RX_FLAG_STBC_SHIFT 26 1074#define RX_FLAG_STBC_SHIFT 26
@@ -1113,7 +1123,7 @@ struct ieee80211_rx_status {
1113 u64 mactime; 1123 u64 mactime;
1114 u32 device_timestamp; 1124 u32 device_timestamp;
1115 u32 ampdu_reference; 1125 u32 ampdu_reference;
1116 u32 flag; 1126 u64 flag;
1117 u16 freq; 1127 u16 freq;
1118 u8 vht_flag; 1128 u8 vht_flag;
1119 u8 rate_idx; 1129 u8 rate_idx;
@@ -3888,7 +3898,7 @@ static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta,
3888 * The TX headroom reserved by mac80211 for its own tx_status functions. 3898 * The TX headroom reserved by mac80211 for its own tx_status functions.
3889 * This is enough for the radiotap header. 3899 * This is enough for the radiotap header.
3890 */ 3900 */
3891#define IEEE80211_TX_STATUS_HEADROOM 14 3901#define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4)
3892 3902
3893/** 3903/**
3894 * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames 3904 * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 2dcea635ecce..93328c61934a 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -209,6 +209,11 @@ int net_eq(const struct net *net1, const struct net *net2)
209 return net1 == net2; 209 return net1 == net2;
210} 210}
211 211
212static inline int check_net(const struct net *net)
213{
214 return atomic_read(&net->count) != 0;
215}
216
212void net_drop_ns(void *); 217void net_drop_ns(void *);
213 218
214#else 219#else
@@ -233,6 +238,11 @@ int net_eq(const struct net *net1, const struct net *net2)
233 return 1; 238 return 1;
234} 239}
235 240
241static inline int check_net(const struct net *net)
242{
243 return 1;
244}
245
236#define net_drop_ns NULL 246#define net_drop_ns NULL
237#endif 247#endif
238 248
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 9c5638ad872e..0dbce55437f2 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -28,8 +28,8 @@ struct nf_queue_handler {
28 struct nf_hook_ops *ops); 28 struct nf_hook_ops *ops);
29}; 29};
30 30
31void nf_register_queue_handler(const struct nf_queue_handler *qh); 31void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
32void nf_unregister_queue_handler(void); 32void nf_unregister_queue_handler(struct net *net);
33void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); 33void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
34 34
35void nf_queue_entry_get_refs(struct nf_queue_entry *entry); 35void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 0e3172751755..5ffaea4665f8 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -745,7 +745,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
745 */ 745 */
746static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value) 746static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
747{ 747{
748 return nla_put(skb, attrtype, sizeof(u8), &value); 748 /* temporary variables to work around GCC PR81715 with asan-stack=1 */
749 u8 tmp = value;
750
751 return nla_put(skb, attrtype, sizeof(u8), &tmp);
749} 752}
750 753
751/** 754/**
@@ -756,7 +759,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
756 */ 759 */
757static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) 760static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
758{ 761{
759 return nla_put(skb, attrtype, sizeof(u16), &value); 762 u16 tmp = value;
763
764 return nla_put(skb, attrtype, sizeof(u16), &tmp);
760} 765}
761 766
762/** 767/**
@@ -767,7 +772,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
767 */ 772 */
768static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) 773static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
769{ 774{
770 return nla_put(skb, attrtype, sizeof(__be16), &value); 775 __be16 tmp = value;
776
777 return nla_put(skb, attrtype, sizeof(__be16), &tmp);
771} 778}
772 779
773/** 780/**
@@ -778,7 +785,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
778 */ 785 */
779static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) 786static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
780{ 787{
781 return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value); 788 __be16 tmp = value;
789
790 return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
782} 791}
783 792
784/** 793/**
@@ -789,7 +798,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
789 */ 798 */
790static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) 799static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
791{ 800{
792 return nla_put(skb, attrtype, sizeof(__le16), &value); 801 __le16 tmp = value;
802
803 return nla_put(skb, attrtype, sizeof(__le16), &tmp);
793} 804}
794 805
795/** 806/**
@@ -800,7 +811,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
800 */ 811 */
801static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) 812static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
802{ 813{
803 return nla_put(skb, attrtype, sizeof(u32), &value); 814 u32 tmp = value;
815
816 return nla_put(skb, attrtype, sizeof(u32), &tmp);
804} 817}
805 818
806/** 819/**
@@ -811,7 +824,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
811 */ 824 */
812static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) 825static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
813{ 826{
814 return nla_put(skb, attrtype, sizeof(__be32), &value); 827 __be32 tmp = value;
828
829 return nla_put(skb, attrtype, sizeof(__be32), &tmp);
815} 830}
816 831
817/** 832/**
@@ -822,7 +837,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
822 */ 837 */
823static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) 838static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
824{ 839{
825 return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value); 840 __be32 tmp = value;
841
842 return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
826} 843}
827 844
828/** 845/**
@@ -833,7 +850,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
833 */ 850 */
834static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) 851static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
835{ 852{
836 return nla_put(skb, attrtype, sizeof(__le32), &value); 853 __le32 tmp = value;
854
855 return nla_put(skb, attrtype, sizeof(__le32), &tmp);
837} 856}
838 857
839/** 858/**
@@ -844,7 +863,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
844 */ 863 */
845static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) 864static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
846{ 865{
847 return nla_put(skb, attrtype, sizeof(u64), &value); 866 u64 tmp = value;
867
868 return nla_put(skb, attrtype, sizeof(u64), &tmp);
848} 869}
849 870
850/** 871/**
@@ -855,7 +876,9 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
855 */ 876 */
856static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value) 877static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
857{ 878{
858 return nla_put(skb, attrtype, sizeof(__be64), &value); 879 __be64 tmp = value;
880
881 return nla_put(skb, attrtype, sizeof(__be64), &tmp);
859} 882}
860 883
861/** 884/**
@@ -866,7 +889,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
866 */ 889 */
867static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value) 890static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
868{ 891{
869 return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value); 892 __be64 tmp = value;
893
894 return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
870} 895}
871 896
872/** 897/**
@@ -877,7 +902,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
877 */ 902 */
878static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value) 903static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
879{ 904{
880 return nla_put(skb, attrtype, sizeof(__le64), &value); 905 __le64 tmp = value;
906
907 return nla_put(skb, attrtype, sizeof(__le64), &tmp);
881} 908}
882 909
883/** 910/**
@@ -888,7 +915,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
888 */ 915 */
889static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) 916static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
890{ 917{
891 return nla_put(skb, attrtype, sizeof(s8), &value); 918 s8 tmp = value;
919
920 return nla_put(skb, attrtype, sizeof(s8), &tmp);
892} 921}
893 922
894/** 923/**
@@ -899,7 +928,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
899 */ 928 */
900static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) 929static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
901{ 930{
902 return nla_put(skb, attrtype, sizeof(s16), &value); 931 s16 tmp = value;
932
933 return nla_put(skb, attrtype, sizeof(s16), &tmp);
903} 934}
904 935
905/** 936/**
@@ -910,7 +941,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
910 */ 941 */
911static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) 942static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
912{ 943{
913 return nla_put(skb, attrtype, sizeof(s32), &value); 944 s32 tmp = value;
945
946 return nla_put(skb, attrtype, sizeof(s32), &tmp);
914} 947}
915 948
916/** 949/**
@@ -921,7 +954,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
921 */ 954 */
922static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value) 955static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
923{ 956{
924 return nla_put(skb, attrtype, sizeof(s64), &value); 957 s64 tmp = value;
958
959 return nla_put(skb, attrtype, sizeof(s64), &tmp);
925} 960}
926 961
927/** 962/**
@@ -969,7 +1004,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
969static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype, 1004static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
970 __be32 addr) 1005 __be32 addr)
971{ 1006{
972 return nla_put_be32(skb, attrtype, addr); 1007 __be32 tmp = addr;
1008
1009 return nla_put_be32(skb, attrtype, tmp);
973} 1010}
974 1011
975/** 1012/**
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 38aa4983e2a9..36d723579af2 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -5,11 +5,13 @@
5 5
6struct proc_dir_entry; 6struct proc_dir_entry;
7struct nf_logger; 7struct nf_logger;
8struct nf_queue_handler;
8 9
9struct netns_nf { 10struct netns_nf {
10#if defined CONFIG_PROC_FS 11#if defined CONFIG_PROC_FS
11 struct proc_dir_entry *proc_netfilter; 12 struct proc_dir_entry *proc_netfilter;
12#endif 13#endif
14 const struct nf_queue_handler __rcu *queue_handler;
13 const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; 15 const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
14#ifdef CONFIG_SYSCTL 16#ifdef CONFIG_SYSCTL
15 struct ctl_table_header *nf_log_dir_header; 17 struct ctl_table_header *nf_log_dir_header;
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 3334dbfa5aa4..7fc78663ec9d 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -6,7 +6,7 @@
6 6
7static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining) 7static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
8{ 8{
9 return remaining >= sizeof(*rtnh) && 9 return remaining >= (int)sizeof(*rtnh) &&
10 rtnh->rtnh_len >= sizeof(*rtnh) && 10 rtnh->rtnh_len >= sizeof(*rtnh) &&
11 rtnh->rtnh_len <= remaining; 11 rtnh->rtnh_len <= remaining;
12} 12}
diff --git a/include/net/red.h b/include/net/red.h
index 76e0b5f922c6..3618cdfec884 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -167,6 +167,17 @@ static inline void red_set_vars(struct red_vars *v)
167 v->qcount = -1; 167 v->qcount = -1;
168} 168}
169 169
170static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
171{
172 if (fls(qth_min) + Wlog > 32)
173 return false;
174 if (fls(qth_max) + Wlog > 32)
175 return false;
176 if (qth_max < qth_min)
177 return false;
178 return true;
179}
180
170static inline void red_set_parms(struct red_parms *p, 181static inline void red_set_parms(struct red_parms *p,
171 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, 182 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
172 u8 Scell_log, u8 *stab, u32 max_P) 183 u8 Scell_log, u8 *stab, u32 max_P)
@@ -178,7 +189,7 @@ static inline void red_set_parms(struct red_parms *p,
178 p->qth_max = qth_max << Wlog; 189 p->qth_max = qth_max << Wlog;
179 p->Wlog = Wlog; 190 p->Wlog = Wlog;
180 p->Plog = Plog; 191 p->Plog = Plog;
181 if (delta < 0) 192 if (delta <= 0)
182 delta = 1; 193 delta = 1;
183 p->qth_delta = delta; 194 p->qth_delta = delta;
184 if (!max_P) { 195 if (!max_P) {
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index ebc5a2ed8631..f83cacce3308 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -78,7 +78,7 @@ struct regulatory_request {
78 int wiphy_idx; 78 int wiphy_idx;
79 enum nl80211_reg_initiator initiator; 79 enum nl80211_reg_initiator initiator;
80 enum nl80211_user_reg_hint_type user_reg_hint_type; 80 enum nl80211_user_reg_hint_type user_reg_hint_type;
81 char alpha2[2]; 81 char alpha2[3];
82 enum nl80211_dfs_regions dfs_region; 82 enum nl80211_dfs_regions dfs_region;
83 bool intersect; 83 bool intersect;
84 bool processed; 84 bool processed;
diff --git a/include/net/route.h b/include/net/route.h
index a3b9ef74a389..d2a92d94ff72 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -64,7 +64,8 @@ struct rtable {
64 __be32 rt_gateway; 64 __be32 rt_gateway;
65 65
66 /* Miscellaneous cached information */ 66 /* Miscellaneous cached information */
67 u32 rt_pmtu; 67 u32 rt_mtu_locked:1,
68 rt_pmtu:31;
68 69
69 u32 rt_table_id; 70 u32 rt_table_id;
70 71
diff --git a/include/net/slhc_vj.h b/include/net/slhc_vj.h
index 8716d5942b65..8fcf8908a694 100644
--- a/include/net/slhc_vj.h
+++ b/include/net/slhc_vj.h
@@ -127,6 +127,7 @@ typedef __u32 int32;
127 */ 127 */
128struct cstate { 128struct cstate {
129 byte_t cs_this; /* connection id number (xmit) */ 129 byte_t cs_this; /* connection id number (xmit) */
130 bool initialized; /* true if initialized */
130 struct cstate *next; /* next in ring (xmit) */ 131 struct cstate *next; /* next in ring (xmit) */
131 struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */ 132 struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
132 struct tcphdr cs_tcp; 133 struct tcphdr cs_tcp;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cecb0e0eff06..cac4a6ad5db3 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -376,6 +376,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
376 struct pipe_inode_info *pipe, size_t len, 376 struct pipe_inode_info *pipe, size_t len,
377 unsigned int flags); 377 unsigned int flags);
378 378
379void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
379static inline void tcp_dec_quickack_mode(struct sock *sk, 380static inline void tcp_dec_quickack_mode(struct sock *sk,
380 const unsigned int pkts) 381 const unsigned int pkts)
381{ 382{
@@ -559,6 +560,7 @@ void tcp_send_fin(struct sock *sk);
559void tcp_send_active_reset(struct sock *sk, gfp_t priority); 560void tcp_send_active_reset(struct sock *sk, gfp_t priority);
560int tcp_send_synack(struct sock *); 561int tcp_send_synack(struct sock *);
561void tcp_push_one(struct sock *, unsigned int mss_now); 562void tcp_push_one(struct sock *, unsigned int mss_now);
563void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
562void tcp_send_ack(struct sock *sk); 564void tcp_send_ack(struct sock *sk);
563void tcp_send_delayed_ack(struct sock *sk); 565void tcp_send_delayed_ack(struct sock *sk);
564void tcp_send_loss_probe(struct sock *sk); 566void tcp_send_loss_probe(struct sock *sk);
@@ -1199,9 +1201,11 @@ void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
1199 1201
1200static inline int tcp_win_from_space(int space) 1202static inline int tcp_win_from_space(int space)
1201{ 1203{
1202 return sysctl_tcp_adv_win_scale<=0 ? 1204 int tcp_adv_win_scale = sysctl_tcp_adv_win_scale;
1203 (space>>(-sysctl_tcp_adv_win_scale)) : 1205
1204 space - (space>>sysctl_tcp_adv_win_scale); 1206 return tcp_adv_win_scale <= 0 ?
1207 (space>>(-tcp_adv_win_scale)) :
1208 space - (space>>tcp_adv_win_scale);
1205} 1209}
1206 1210
1207/* Note: caller must be prepared to deal with negative returns */ 1211/* Note: caller must be prepared to deal with negative returns */
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 80761938b9a7..8228155b305e 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -62,6 +62,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
62 UDP_SKB_CB(skb)->cscov = cscov; 62 UDP_SKB_CB(skb)->cscov = cscov;
63 if (skb->ip_summed == CHECKSUM_COMPLETE) 63 if (skb->ip_summed == CHECKSUM_COMPLETE)
64 skb->ip_summed = CHECKSUM_NONE; 64 skb->ip_summed = CHECKSUM_NONE;
65 skb->csum_valid = 0;
65 } 66 }
66 67
67 return 0; 68 return 0;
diff --git a/include/net/x25.h b/include/net/x25.h
index c383aa4edbf0..6d30a01d281d 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -298,10 +298,10 @@ void x25_check_rbuf(struct sock *);
298 298
299/* sysctl_net_x25.c */ 299/* sysctl_net_x25.c */
300#ifdef CONFIG_SYSCTL 300#ifdef CONFIG_SYSCTL
301void x25_register_sysctl(void); 301int x25_register_sysctl(void);
302void x25_unregister_sysctl(void); 302void x25_unregister_sysctl(void);
303#else 303#else
304static inline void x25_register_sysctl(void) {}; 304static inline int x25_register_sysctl(void) { return 0; };
305static inline void x25_unregister_sysctl(void) {}; 305static inline void x25_unregister_sysctl(void) {};
306#endif /* CONFIG_SYSCTL */ 306#endif /* CONFIG_SYSCTL */
307 307
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index a78ff97eb249..d77416963f05 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -123,6 +123,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
123 const unsigned char *dst_dev_addr); 123 const unsigned char *dst_dev_addr);
124 124
125int rdma_addr_size(struct sockaddr *addr); 125int rdma_addr_size(struct sockaddr *addr);
126int rdma_addr_size_in6(struct sockaddr_in6 *addr);
127int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
126 128
127int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id); 129int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
128int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid, 130int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 120da1d7f57e..10fefb0dc640 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -3007,6 +3007,20 @@ static inline int ib_check_mr_access(int flags)
3007 return 0; 3007 return 0;
3008} 3008}
3009 3009
3010static inline bool ib_access_writable(int access_flags)
3011{
3012 /*
3013 * We have writable memory backing the MR if any of the following
3014 * access flags are set. "Local write" and "remote write" obviously
3015 * require write access. "Remote atomic" can do things like fetch and
3016 * add, which will modify memory, and "MW bind" can change permissions
3017 * by binding a window.
3018 */
3019 return access_flags &
3020 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
3021 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
3022}
3023
3010/** 3024/**
3011 * ib_check_mr_status: lightweight check of MR status. 3025 * ib_check_mr_status: lightweight check of MR status.
3012 * This routine may provide status checks on a selected 3026 * This routine may provide status checks on a selected
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 44202ff897fd..f759e0918037 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -99,6 +99,8 @@ struct tegra_mc_soc {
99 u8 client_id_mask; 99 u8 client_id_mask;
100 100
101 const struct tegra_smmu_soc *smmu; 101 const struct tegra_smmu_soc *smmu;
102
103 u32 intmask;
102}; 104};
103 105
104struct tegra_mc { 106struct tegra_mc {
diff --git a/include/sound/control.h b/include/sound/control.h
index 21d047f229a1..4142757080f8 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -22,6 +22,7 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/nospec.h>
25#include <sound/asound.h> 26#include <sound/asound.h>
26 27
27#define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data) 28#define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
@@ -147,12 +148,14 @@ int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
147 148
148static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 149static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
149{ 150{
150 return id->numid - kctl->id.numid; 151 unsigned int ioff = id->numid - kctl->id.numid;
152 return array_index_nospec(ioff, kctl->count);
151} 153}
152 154
153static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 155static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
154{ 156{
155 return id->index - kctl->id.index; 157 unsigned int ioff = id->index - kctl->id.index;
158 return array_index_nospec(ioff, kctl->count);
156} 159}
157 160
158static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 161static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
index 760c969d885d..12bbf8c81112 100644
--- a/include/sound/pcm_oss.h
+++ b/include/sound/pcm_oss.h
@@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime {
57 char *buffer; /* vmallocated period */ 57 char *buffer; /* vmallocated period */
58 size_t buffer_used; /* used length from period buffer */ 58 size_t buffer_used; /* used length from period buffer */
59 struct mutex params_lock; 59 struct mutex params_lock;
60 atomic_t rw_ref; /* concurrent read/write accesses */
60#ifdef CONFIG_SND_PCM_OSS_PLUGINS 61#ifdef CONFIG_SND_PCM_OSS_PLUGINS
61 struct snd_pcm_plugin *plugin_first; 62 struct snd_pcm_plugin *plugin_first;
62 struct snd_pcm_plugin *plugin_last; 63 struct snd_pcm_plugin *plugin_last;
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index 758607226bfd..2cd449328aee 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -134,12 +134,12 @@ DECLARE_EVENT_CLASS(clk_parent,
134 134
135 TP_STRUCT__entry( 135 TP_STRUCT__entry(
136 __string( name, core->name ) 136 __string( name, core->name )
137 __string( pname, parent->name ) 137 __string( pname, parent ? parent->name : "none" )
138 ), 138 ),
139 139
140 TP_fast_assign( 140 TP_fast_assign(
141 __assign_str(name, core->name); 141 __assign_str(name, core->name);
142 __assign_str(pname, parent->name); 142 __assign_str(pname, parent ? parent->name : "none");
143 ), 143 ),
144 144
145 TP_printk("%s %s", __get_str(name), __get_str(pname)) 145 TP_printk("%s %s", __get_str(name), __get_str(pname))
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 073b9ac245ba..e844556794dc 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -125,6 +125,20 @@ DEFINE_EVENT(timer_class, timer_cancel,
125 TP_ARGS(timer) 125 TP_ARGS(timer)
126); 126);
127 127
128#define decode_clockid(type) \
129 __print_symbolic(type, \
130 { CLOCK_REALTIME, "CLOCK_REALTIME" }, \
131 { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
132 { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
133 { CLOCK_TAI, "CLOCK_TAI" })
134
135#define decode_hrtimer_mode(mode) \
136 __print_symbolic(mode, \
137 { HRTIMER_MODE_ABS, "ABS" }, \
138 { HRTIMER_MODE_REL, "REL" }, \
139 { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
140 { HRTIMER_MODE_REL_PINNED, "REL|PINNED" })
141
128/** 142/**
129 * hrtimer_init - called when the hrtimer is initialized 143 * hrtimer_init - called when the hrtimer is initialized
130 * @hrtimer: pointer to struct hrtimer 144 * @hrtimer: pointer to struct hrtimer
@@ -151,10 +165,8 @@ TRACE_EVENT(hrtimer_init,
151 ), 165 ),
152 166
153 TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer, 167 TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
154 __entry->clockid == CLOCK_REALTIME ? 168 decode_clockid(__entry->clockid),
155 "CLOCK_REALTIME" : "CLOCK_MONOTONIC", 169 decode_hrtimer_mode(__entry->mode))
156 __entry->mode == HRTIMER_MODE_ABS ?
157 "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
158); 170);
159 171
160/** 172/**
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index bce990f5a35d..d6be935caa50 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -377,22 +377,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
377DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); 377DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
378DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); 378DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
379 379
380TRACE_EVENT(xen_mmu_flush_tlb_all,
381 TP_PROTO(int x),
382 TP_ARGS(x),
383 TP_STRUCT__entry(__array(char, x, 0)),
384 TP_fast_assign((void)x),
385 TP_printk("%s", "")
386 );
387
388TRACE_EVENT(xen_mmu_flush_tlb,
389 TP_PROTO(int x),
390 TP_ARGS(x),
391 TP_STRUCT__entry(__array(char, x, 0)),
392 TP_fast_assign((void)x),
393 TP_printk("%s", "")
394 );
395
396TRACE_EVENT(xen_mmu_flush_tlb_single, 380TRACE_EVENT(xen_mmu_flush_tlb_single,
397 TP_PROTO(unsigned long addr), 381 TP_PROTO(unsigned long addr),
398 TP_ARGS(addr), 382 TP_ARGS(addr),
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index fc9e2d6e5e2f..232367124712 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -60,6 +60,7 @@ struct drm_virtgpu_execbuffer {
60}; 60};
61 61
62#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ 62#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
63#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
63 64
64struct drm_virtgpu_getparam { 65struct drm_virtgpu_getparam {
65 uint64_t param; 66 uint64_t param;
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index bc81fb2e1f0e..6f04cb419115 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -26,6 +26,19 @@
26#define EPOLL_CTL_DEL 2 26#define EPOLL_CTL_DEL 2
27#define EPOLL_CTL_MOD 3 27#define EPOLL_CTL_MOD 3
28 28
29/* Epoll event masks */
30#define EPOLLIN 0x00000001
31#define EPOLLPRI 0x00000002
32#define EPOLLOUT 0x00000004
33#define EPOLLERR 0x00000008
34#define EPOLLHUP 0x00000010
35#define EPOLLRDNORM 0x00000040
36#define EPOLLRDBAND 0x00000080
37#define EPOLLWRNORM 0x00000100
38#define EPOLLWRBAND 0x00000200
39#define EPOLLMSG 0x00000400
40#define EPOLLRDHUP 0x00002000
41
29/* 42/*
30 * Request the handling of system wakeup events so as to prevent system suspends 43 * Request the handling of system wakeup events so as to prevent system suspends
31 * from happening while those events are being processed. 44 * from happening while those events are being processed.
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index ea9221b0331a..064d2026ab38 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -29,6 +29,7 @@
29 */ 29 */
30 30
31#define ETH_ALEN 6 /* Octets in one ethernet addr */ 31#define ETH_ALEN 6 /* Octets in one ethernet addr */
32#define ETH_TLEN 2 /* Octets in ethernet type field */
32#define ETH_HLEN 14 /* Total octets in header. */ 33#define ETH_HLEN 14 /* Total octets in header. */
33#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */ 34#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
34#define ETH_DATA_LEN 1500 /* Max. octets in payload */ 35#define ETH_DATA_LEN 1500 /* Max. octets in payload */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 03f3618612aa..376d0ab5b9f2 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -831,6 +831,7 @@ struct kvm_ppc_smmu_info {
831#define KVM_CAP_GUEST_DEBUG_HW_WPS 120 831#define KVM_CAP_GUEST_DEBUG_HW_WPS 120
832#define KVM_CAP_SPLIT_IRQCHIP 121 832#define KVM_CAP_SPLIT_IRQCHIP 121
833#define KVM_CAP_IOEVENTFD_ANY_LENGTH 122 833#define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
834#define KVM_CAP_S390_BPB 152
834 835
835#ifdef KVM_CAP_IRQ_ROUTING 836#ifdef KVM_CAP_IRQ_ROUTING
836 837
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 1f0b4cf5dd03..d3aea4f10faf 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2195,6 +2195,8 @@ enum nl80211_attrs {
2195#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS 2195#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
2196#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS 2196#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
2197 2197
2198#define NL80211_WIPHY_NAME_MAXLEN 64
2199
2198#define NL80211_MAX_SUPP_RATES 32 2200#define NL80211_MAX_SUPP_RATES 32
2199#define NL80211_MAX_SUPP_HT_RATES 77 2201#define NL80211_MAX_SUPP_HT_RATES 77
2200#define NL80211_MAX_SUPP_REG_RULES 64 2202#define NL80211_MAX_SUPP_REG_RULES 64
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 1becea86c73c..eb3c786afa70 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -106,7 +106,7 @@
106#define PCI_SUBSYSTEM_ID 0x2e 106#define PCI_SUBSYSTEM_ID 0x2e
107#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */ 107#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
108#define PCI_ROM_ADDRESS_ENABLE 0x01 108#define PCI_ROM_ADDRESS_ENABLE 0x01
109#define PCI_ROM_ADDRESS_MASK (~0x7ffUL) 109#define PCI_ROM_ADDRESS_MASK (~0x7ffU)
110 110
111#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */ 111#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
112 112
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index a8d0759a9e40..64776b72e1eb 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -197,4 +197,16 @@ struct prctl_mm_map {
197# define PR_CAP_AMBIENT_LOWER 3 197# define PR_CAP_AMBIENT_LOWER 3
198# define PR_CAP_AMBIENT_CLEAR_ALL 4 198# define PR_CAP_AMBIENT_CLEAR_ALL 4
199 199
200/* Per task speculation control */
201#define PR_GET_SPECULATION_CTRL 52
202#define PR_SET_SPECULATION_CTRL 53
203/* Speculation control variants */
204# define PR_SPEC_STORE_BYPASS 0
205/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
206# define PR_SPEC_NOT_AFFECTED 0
207# define PR_SPEC_PRCTL (1UL << 0)
208# define PR_SPEC_ENABLE (1UL << 1)
209# define PR_SPEC_DISABLE (1UL << 2)
210# define PR_SPEC_FORCE_DISABLE (1UL << 3)
211
200#endif /* _LINUX_PRCTL_H */ 212#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 0f238a43ff1e..e4acb615792b 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -15,7 +15,9 @@
15#define SECCOMP_SET_MODE_FILTER 1 15#define SECCOMP_SET_MODE_FILTER 1
16 16
17/* Valid flags for SECCOMP_SET_MODE_FILTER */ 17/* Valid flags for SECCOMP_SET_MODE_FILTER */
18#define SECCOMP_FILTER_FLAG_TSYNC 1 18#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
19/* In v4.14+ SECCOMP_FILTER_FLAG_LOG is (1UL << 1) */
20#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
19 21
20/* 22/*
21 * All BPF programs must return a 32-bit value. 23 * All BPF programs must return a 32-bit value.
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index d2314be4f0c0..19f9dc2c06f6 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -369,7 +369,7 @@ static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_d
369{ 369{
370 return (protocol == UAC_VERSION_1) ? 370 return (protocol == UAC_VERSION_1) ?
371 desc->baSourceID[desc->bNrInPins + 4] : 371 desc->baSourceID[desc->bNrInPins + 4] :
372 desc->baSourceID[desc->bNrInPins + 6]; 372 2; /* in UAC2, this value is constant */
373} 373}
374 374
375static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc, 375static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc,
@@ -377,7 +377,7 @@ static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_de
377{ 377{
378 return (protocol == UAC_VERSION_1) ? 378 return (protocol == UAC_VERSION_1) ?
379 &desc->baSourceID[desc->bNrInPins + 5] : 379 &desc->baSourceID[desc->bNrInPins + 5] :
380 &desc->baSourceID[desc->bNrInPins + 7]; 380 &desc->baSourceID[desc->bNrInPins + 6];
381} 381}
382 382
383static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc, 383static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc,
diff --git a/init/Kconfig b/init/Kconfig
index e1d1d6936f92..9f9b1f6734b8 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1308,6 +1308,17 @@ source "usr/Kconfig"
1308 1308
1309endif 1309endif
1310 1310
1311choice
1312 prompt "Compiler optimization level"
1313 default CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
1314
1315config CC_OPTIMIZE_FOR_PERFORMANCE
1316 bool "Optimize for performance"
1317 help
1318 This is the default optimization level for the kernel, building
1319 with the "-O2" compiler flag for best performance and most
1320 helpful compile-time warnings.
1321
1311config CC_OPTIMIZE_FOR_SIZE 1322config CC_OPTIMIZE_FOR_SIZE
1312 bool "Optimize for size" 1323 bool "Optimize for size"
1313 help 1324 help
@@ -1316,6 +1327,8 @@ config CC_OPTIMIZE_FOR_SIZE
1316 1327
1317 If unsure, say N. 1328 If unsure, say N.
1318 1329
1330endchoice
1331
1319config SYSCTL 1332config SYSCTL
1320 bool 1333 bool
1321 1334
@@ -1556,6 +1569,13 @@ config BPF_SYSCALL
1556 Enable the bpf() system call that allows to manipulate eBPF 1569 Enable the bpf() system call that allows to manipulate eBPF
1557 programs and maps via file descriptors. 1570 programs and maps via file descriptors.
1558 1571
1572config BPF_JIT_ALWAYS_ON
1573 bool "Permanently enable BPF JIT and remove BPF interpreter"
1574 depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
1575 help
1576 Enables BPF JIT and removes BPF interpreter to avoid
1577 speculative execution of BPF instructions by the interpreter
1578
1559config SHMEM 1579config SHMEM
1560 bool "Use full shmem filesystem" if EXPERT 1580 bool "Use full shmem filesystem" if EXPERT
1561 default y 1581 default y
diff --git a/ipc/msg.c b/ipc/msg.c
index c6521c205cb4..f993f441f852 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -742,7 +742,10 @@ static inline int convert_mode(long *msgtyp, int msgflg)
742 if (*msgtyp == 0) 742 if (*msgtyp == 0)
743 return SEARCH_ANY; 743 return SEARCH_ANY;
744 if (*msgtyp < 0) { 744 if (*msgtyp < 0) {
745 *msgtyp = -*msgtyp; 745 if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */
746 *msgtyp = LONG_MAX;
747 else
748 *msgtyp = -*msgtyp;
746 return SEARCH_LESSEQUAL; 749 return SEARCH_LESSEQUAL;
747 } 750 }
748 if (msgflg & MSG_EXCEPT) 751 if (msgflg & MSG_EXCEPT)
diff --git a/ipc/shm.c b/ipc/shm.c
index 4982a4e7f009..32974cfe5947 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -198,6 +198,12 @@ static int __shm_open(struct vm_area_struct *vma)
198 if (IS_ERR(shp)) 198 if (IS_ERR(shp))
199 return PTR_ERR(shp); 199 return PTR_ERR(shp);
200 200
201 if (shp->shm_file != sfd->file) {
202 /* ID was reused */
203 shm_unlock(shp);
204 return -EINVAL;
205 }
206
201 shp->shm_atim = get_seconds(); 207 shp->shm_atim = get_seconds();
202 shp->shm_lprid = task_tgid_vnr(current); 208 shp->shm_lprid = task_tgid_vnr(current);
203 shp->shm_nattch++; 209 shp->shm_nattch++;
@@ -414,8 +420,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
414 int ret; 420 int ret;
415 421
416 /* 422 /*
417 * In case of remap_file_pages() emulation, the file can represent 423 * In case of remap_file_pages() emulation, the file can represent an
418 * removed IPC ID: propogate shm_lock() error to caller. 424 * IPC ID that was removed, and possibly even reused by another shm
425 * segment already. Propagate this case as an error to caller.
419 */ 426 */
420 ret =__shm_open(vma); 427 ret =__shm_open(vma);
421 if (ret) 428 if (ret)
@@ -439,6 +446,7 @@ static int shm_release(struct inode *ino, struct file *file)
439 struct shm_file_data *sfd = shm_file_data(file); 446 struct shm_file_data *sfd = shm_file_data(file);
440 447
441 put_ipc_ns(sfd->ns); 448 put_ipc_ns(sfd->ns);
449 fput(sfd->file);
442 shm_file_data(file) = NULL; 450 shm_file_data(file) = NULL;
443 kfree(sfd); 451 kfree(sfd);
444 return 0; 452 return 0;
@@ -1105,14 +1113,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1105 goto out; 1113 goto out;
1106 else if ((addr = (ulong)shmaddr)) { 1114 else if ((addr = (ulong)shmaddr)) {
1107 if (addr & (shmlba - 1)) { 1115 if (addr & (shmlba - 1)) {
1108 /* 1116 if (shmflg & SHM_RND) {
1109 * Round down to the nearest multiple of shmlba. 1117 addr &= ~(shmlba - 1); /* round down */
1110 * For sane do_mmap_pgoff() parameters, avoid 1118
1111 * round downs that trigger nil-page and MAP_FIXED. 1119 /*
1112 */ 1120 * Ensure that the round-down is non-nil
1113 if ((shmflg & SHM_RND) && addr >= shmlba) 1121 * when remapping. This can happen for
1114 addr &= ~(shmlba - 1); 1122 * cases when addr < shmlba.
1115 else 1123 */
1124 if (!addr && (shmflg & SHM_REMAP))
1125 goto out;
1126 } else
1116#ifndef __ARCH_FORCE_SHMLBA 1127#ifndef __ARCH_FORCE_SHMLBA
1117 if (addr & ~PAGE_MASK) 1128 if (addr & ~PAGE_MASK)
1118#endif 1129#endif
@@ -1198,7 +1209,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1198 file->f_mapping = shp->shm_file->f_mapping; 1209 file->f_mapping = shp->shm_file->f_mapping;
1199 sfd->id = shp->shm_perm.id; 1210 sfd->id = shp->shm_perm.id;
1200 sfd->ns = get_ipc_ns(ns); 1211 sfd->ns = get_ipc_ns(ns);
1201 sfd->file = shp->shm_file; 1212 /*
1213 * We need to take a reference to the real shm file to prevent the
1214 * pointer from becoming stale in cases where the lifetime of the outer
1215 * file extends beyond that of the shm segment. It's not usually
1216 * possible, but it can happen during remap_file_pages() emulation as
1217 * that unmaps the memory, then does ->mmap() via file reference only.
1218 * We'll deny the ->mmap() if the shm segment was since removed, but to
1219 * detect shm ID reuse we need to compare the file pointers.
1220 */
1221 sfd->file = get_file(shp->shm_file);
1202 sfd->vm_ops = NULL; 1222 sfd->vm_ops = NULL;
1203 1223
1204 err = security_mmap_file(file, prot, flags); 1224 err = security_mmap_file(file, prot, flags);
diff --git a/kernel/async.c b/kernel/async.c
index 4c3773c0bf63..f1fd155abff6 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -84,20 +84,24 @@ static atomic_t entry_count;
84 84
85static async_cookie_t lowest_in_progress(struct async_domain *domain) 85static async_cookie_t lowest_in_progress(struct async_domain *domain)
86{ 86{
87 struct list_head *pending; 87 struct async_entry *first = NULL;
88 async_cookie_t ret = ASYNC_COOKIE_MAX; 88 async_cookie_t ret = ASYNC_COOKIE_MAX;
89 unsigned long flags; 89 unsigned long flags;
90 90
91 spin_lock_irqsave(&async_lock, flags); 91 spin_lock_irqsave(&async_lock, flags);
92 92
93 if (domain) 93 if (domain) {
94 pending = &domain->pending; 94 if (!list_empty(&domain->pending))
95 else 95 first = list_first_entry(&domain->pending,
96 pending = &async_global_pending; 96 struct async_entry, domain_list);
97 } else {
98 if (!list_empty(&async_global_pending))
99 first = list_first_entry(&async_global_pending,
100 struct async_entry, global_list);
101 }
97 102
98 if (!list_empty(pending)) 103 if (first)
99 ret = list_first_entry(pending, struct async_entry, 104 ret = first->cookie;
100 domain_list)->cookie;
101 105
102 spin_unlock_irqrestore(&async_lock, flags); 106 spin_unlock_irqrestore(&async_lock, flags);
103 return ret; 107 return ret;
diff --git a/kernel/audit.c b/kernel/audit.c
index 41f9a38bb800..bdf0cf463815 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -64,7 +64,6 @@
64#include <linux/security.h> 64#include <linux/security.h>
65#endif 65#endif
66#include <linux/freezer.h> 66#include <linux/freezer.h>
67#include <linux/tty.h>
68#include <linux/pid_namespace.h> 67#include <linux/pid_namespace.h>
69#include <net/netns/generic.h> 68#include <net/netns/generic.h>
70 69
@@ -745,6 +744,8 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
745 return; 744 return;
746 745
747 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE); 746 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
747 if (!ab)
748 return;
748 audit_log_task_info(ab, current); 749 audit_log_task_info(ab, current);
749 audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", 750 audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
750 audit_feature_names[which], !!old_feature, !!new_feature, 751 audit_feature_names[which], !!old_feature, !!new_feature,
@@ -1876,21 +1877,14 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
1876{ 1877{
1877 const struct cred *cred; 1878 const struct cred *cred;
1878 char comm[sizeof(tsk->comm)]; 1879 char comm[sizeof(tsk->comm)];
1879 char *tty; 1880 struct tty_struct *tty;
1880 1881
1881 if (!ab) 1882 if (!ab)
1882 return; 1883 return;
1883 1884
1884 /* tsk == current */ 1885 /* tsk == current */
1885 cred = current_cred(); 1886 cred = current_cred();
1886 1887 tty = audit_get_tty(tsk);
1887 spin_lock_irq(&tsk->sighand->siglock);
1888 if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
1889 tty = tsk->signal->tty->name;
1890 else
1891 tty = "(none)";
1892 spin_unlock_irq(&tsk->sighand->siglock);
1893
1894 audit_log_format(ab, 1888 audit_log_format(ab,
1895 " ppid=%d pid=%d auid=%u uid=%u gid=%u" 1889 " ppid=%d pid=%d auid=%u uid=%u gid=%u"
1896 " euid=%u suid=%u fsuid=%u" 1890 " euid=%u suid=%u fsuid=%u"
@@ -1906,11 +1900,11 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
1906 from_kgid(&init_user_ns, cred->egid), 1900 from_kgid(&init_user_ns, cred->egid),
1907 from_kgid(&init_user_ns, cred->sgid), 1901 from_kgid(&init_user_ns, cred->sgid),
1908 from_kgid(&init_user_ns, cred->fsgid), 1902 from_kgid(&init_user_ns, cred->fsgid),
1909 tty, audit_get_sessionid(tsk)); 1903 tty ? tty_name(tty) : "(none)",
1910 1904 audit_get_sessionid(tsk));
1905 audit_put_tty(tty);
1911 audit_log_format(ab, " comm="); 1906 audit_log_format(ab, " comm=");
1912 audit_log_untrustedstring(ab, get_task_comm(comm, tsk)); 1907 audit_log_untrustedstring(ab, get_task_comm(comm, tsk));
1913
1914 audit_log_d_path_exe(ab, tsk->mm); 1908 audit_log_d_path_exe(ab, tsk->mm);
1915 audit_log_task_context(ab); 1909 audit_log_task_context(ab);
1916} 1910}
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index b8ff9e193753..b57f929f1b46 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -406,7 +406,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
406 return -EINVAL; 406 return -EINVAL;
407 break; 407 break;
408 case AUDIT_EXE: 408 case AUDIT_EXE:
409 if (f->op != Audit_equal) 409 if (f->op != Audit_not_equal && f->op != Audit_equal)
410 return -EINVAL; 410 return -EINVAL;
411 if (entry->rule.listnr != AUDIT_FILTER_EXIT) 411 if (entry->rule.listnr != AUDIT_FILTER_EXIT)
412 return -EINVAL; 412 return -EINVAL;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 48f45987dc6c..0fe8b337291a 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -470,6 +470,8 @@ static int audit_filter_rules(struct task_struct *tsk,
470 break; 470 break;
471 case AUDIT_EXE: 471 case AUDIT_EXE:
472 result = audit_exe_compare(tsk, rule->exe); 472 result = audit_exe_compare(tsk, rule->exe);
473 if (f->op == Audit_not_equal)
474 result = !result;
473 break; 475 break;
474 case AUDIT_UID: 476 case AUDIT_UID:
475 result = audit_uid_comparator(cred->uid, f->op, f->uid); 477 result = audit_uid_comparator(cred->uid, f->op, f->uid);
@@ -1976,21 +1978,26 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
1976{ 1978{
1977 struct audit_buffer *ab; 1979 struct audit_buffer *ab;
1978 uid_t uid, oldloginuid, loginuid; 1980 uid_t uid, oldloginuid, loginuid;
1981 struct tty_struct *tty;
1979 1982
1980 if (!audit_enabled) 1983 if (!audit_enabled)
1981 return; 1984 return;
1982 1985
1986 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
1987 if (!ab)
1988 return;
1989
1983 uid = from_kuid(&init_user_ns, task_uid(current)); 1990 uid = from_kuid(&init_user_ns, task_uid(current));
1984 oldloginuid = from_kuid(&init_user_ns, koldloginuid); 1991 oldloginuid = from_kuid(&init_user_ns, koldloginuid);
1985 loginuid = from_kuid(&init_user_ns, kloginuid), 1992 loginuid = from_kuid(&init_user_ns, kloginuid),
1993 tty = audit_get_tty(current);
1986 1994
1987 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
1988 if (!ab)
1989 return;
1990 audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid); 1995 audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
1991 audit_log_task_context(ab); 1996 audit_log_task_context(ab);
1992 audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d", 1997 audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d",
1993 oldloginuid, loginuid, oldsessionid, sessionid, !rc); 1998 oldloginuid, loginuid, tty ? tty_name(tty) : "(none)",
1999 oldsessionid, sessionid, !rc);
2000 audit_put_tty(tty);
1994 audit_log_end(ab); 2001 audit_log_end(ab);
1995} 2002}
1996 2003
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 3608fa1aec8a..0eb11b4ac4c7 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -102,7 +102,7 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
102static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 102static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
103{ 103{
104 struct bpf_array *array = container_of(map, struct bpf_array, map); 104 struct bpf_array *array = container_of(map, struct bpf_array, map);
105 u32 index = *(u32 *)key; 105 u32 index = key ? *(u32 *)key : U32_MAX;
106 u32 *next = (u32 *)next_key; 106 u32 *next = (u32 *)next_key;
107 107
108 if (index >= array->map.max_entries) { 108 if (index >= array->map.max_entries) {
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 3fd76cf0c21e..eb52d11fdaa7 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -256,6 +256,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
256} 256}
257EXPORT_SYMBOL_GPL(__bpf_call_base); 257EXPORT_SYMBOL_GPL(__bpf_call_base);
258 258
259#ifndef CONFIG_BPF_JIT_ALWAYS_ON
259/** 260/**
260 * __bpf_prog_run - run eBPF program on a given context 261 * __bpf_prog_run - run eBPF program on a given context
261 * @ctx: is the data we are operating on 262 * @ctx: is the data we are operating on
@@ -443,7 +444,7 @@ select_insn:
443 DST = tmp; 444 DST = tmp;
444 CONT; 445 CONT;
445 ALU_MOD_X: 446 ALU_MOD_X:
446 if (unlikely(SRC == 0)) 447 if (unlikely((u32)SRC == 0))
447 return 0; 448 return 0;
448 tmp = (u32) DST; 449 tmp = (u32) DST;
449 DST = do_div(tmp, (u32) SRC); 450 DST = do_div(tmp, (u32) SRC);
@@ -462,7 +463,7 @@ select_insn:
462 DST = div64_u64(DST, SRC); 463 DST = div64_u64(DST, SRC);
463 CONT; 464 CONT;
464 ALU_DIV_X: 465 ALU_DIV_X:
465 if (unlikely(SRC == 0)) 466 if (unlikely((u32)SRC == 0))
466 return 0; 467 return 0;
467 tmp = (u32) DST; 468 tmp = (u32) DST;
468 do_div(tmp, (u32) SRC); 469 do_div(tmp, (u32) SRC);
@@ -517,7 +518,7 @@ select_insn:
517 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 518 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
518 struct bpf_array *array = container_of(map, struct bpf_array, map); 519 struct bpf_array *array = container_of(map, struct bpf_array, map);
519 struct bpf_prog *prog; 520 struct bpf_prog *prog;
520 u64 index = BPF_R3; 521 u32 index = BPF_R3;
521 522
522 if (unlikely(index >= array->map.max_entries)) 523 if (unlikely(index >= array->map.max_entries))
523 goto out; 524 goto out;
@@ -725,6 +726,13 @@ load_byte:
725 return 0; 726 return 0;
726} 727}
727 728
729#else
730static unsigned int __bpf_prog_ret0(void *ctx, const struct bpf_insn *insn)
731{
732 return 0;
733}
734#endif
735
728bool bpf_prog_array_compatible(struct bpf_array *array, 736bool bpf_prog_array_compatible(struct bpf_array *array,
729 const struct bpf_prog *fp) 737 const struct bpf_prog *fp)
730{ 738{
@@ -771,9 +779,23 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
771 */ 779 */
772int bpf_prog_select_runtime(struct bpf_prog *fp) 780int bpf_prog_select_runtime(struct bpf_prog *fp)
773{ 781{
782#ifndef CONFIG_BPF_JIT_ALWAYS_ON
774 fp->bpf_func = (void *) __bpf_prog_run; 783 fp->bpf_func = (void *) __bpf_prog_run;
775 784#else
785 fp->bpf_func = (void *) __bpf_prog_ret0;
786#endif
787
788 /* eBPF JITs can rewrite the program in case constant
789 * blinding is active. However, in case of error during
790 * blinding, bpf_int_jit_compile() must always return a
791 * valid program, which in this case would simply not
792 * be JITed, but falls back to the interpreter.
793 */
776 bpf_int_jit_compile(fp); 794 bpf_int_jit_compile(fp);
795#ifdef CONFIG_BPF_JIT_ALWAYS_ON
796 if (!fp->jited)
797 return -ENOTSUPP;
798#endif
777 bpf_prog_lock_ro(fp); 799 bpf_prog_lock_ro(fp);
778 800
779 /* The tail call compatibility check can only be done at 801 /* The tail call compatibility check can only be done at
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 34777b3746fa..a35abe048239 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -169,12 +169,15 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
169 struct hlist_head *head; 169 struct hlist_head *head;
170 struct htab_elem *l, *next_l; 170 struct htab_elem *l, *next_l;
171 u32 hash, key_size; 171 u32 hash, key_size;
172 int i; 172 int i = 0;
173 173
174 WARN_ON_ONCE(!rcu_read_lock_held()); 174 WARN_ON_ONCE(!rcu_read_lock_held());
175 175
176 key_size = map->key_size; 176 key_size = map->key_size;
177 177
178 if (!key)
179 goto find_first_elem;
180
178 hash = htab_map_hash(key, key_size); 181 hash = htab_map_hash(key, key_size);
179 182
180 head = select_bucket(htab, hash); 183 head = select_bucket(htab, hash);
@@ -182,10 +185,8 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
182 /* lookup the key */ 185 /* lookup the key */
183 l = lookup_elem_raw(head, hash, key, key_size); 186 l = lookup_elem_raw(head, hash, key, key_size);
184 187
185 if (!l) { 188 if (!l)
186 i = 0;
187 goto find_first_elem; 189 goto find_first_elem;
188 }
189 190
190 /* key was found, get next key in the same bucket */ 191 /* key was found, get next key in the same bucket */
191 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)), 192 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 424accd20c2d..4b9bbfe764e8 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -390,14 +390,18 @@ static int map_get_next_key(union bpf_attr *attr)
390 if (IS_ERR(map)) 390 if (IS_ERR(map))
391 return PTR_ERR(map); 391 return PTR_ERR(map);
392 392
393 err = -ENOMEM; 393 if (ukey) {
394 key = kmalloc(map->key_size, GFP_USER); 394 err = -ENOMEM;
395 if (!key) 395 key = kmalloc(map->key_size, GFP_USER);
396 goto err_put; 396 if (!key)
397 397 goto err_put;
398 err = -EFAULT; 398
399 if (copy_from_user(key, ukey, map->key_size) != 0) 399 err = -EFAULT;
400 goto free_key; 400 if (copy_from_user(key, ukey, map->key_size) != 0)
401 goto free_key;
402 } else {
403 key = NULL;
404 }
401 405
402 err = -ENOMEM; 406 err = -ENOMEM;
403 next_key = kmalloc(map->key_size, GFP_USER); 407 next_key = kmalloc(map->key_size, GFP_USER);
@@ -673,7 +677,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
673 union bpf_attr attr = {}; 677 union bpf_attr attr = {};
674 int err; 678 int err;
675 679
676 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled) 680 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
677 return -EPERM; 681 return -EPERM;
678 682
679 if (!access_ok(VERIFY_READ, uattr, 1)) 683 if (!access_ok(VERIFY_READ, uattr, 1))
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 014c2d759916..35dfa9e9d69e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -191,6 +191,7 @@ struct bpf_insn_aux_data {
191 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 191 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
192 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ 192 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
193 }; 193 };
194 bool seen; /* this insn was processed by the verifier */
194}; 195};
195 196
196#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 197#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -682,6 +683,13 @@ static bool is_pointer_value(struct verifier_env *env, int regno)
682 } 683 }
683} 684}
684 685
686static bool is_ctx_reg(struct verifier_env *env, int regno)
687{
688 const struct reg_state *reg = &env->cur_state.regs[regno];
689
690 return reg->type == PTR_TO_CTX;
691}
692
685/* check whether memory at (regno + off) is accessible for t = (read | write) 693/* check whether memory at (regno + off) is accessible for t = (read | write)
686 * if t==write, value_regno is a register which value is stored into memory 694 * if t==write, value_regno is a register which value is stored into memory
687 * if t==read, value_regno is a register which will receive the value from memory 695 * if t==read, value_regno is a register which will receive the value from memory
@@ -778,6 +786,12 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
778 return -EACCES; 786 return -EACCES;
779 } 787 }
780 788
789 if (is_ctx_reg(env, insn->dst_reg)) {
790 verbose("BPF_XADD stores into R%d context is not allowed\n",
791 insn->dst_reg);
792 return -EACCES;
793 }
794
781 /* check whether atomic_add can read the memory */ 795 /* check whether atomic_add can read the memory */
782 err = check_mem_access(env, insn->dst_reg, insn->off, 796 err = check_mem_access(env, insn->dst_reg, insn->off,
783 BPF_SIZE(insn->code), BPF_READ, -1); 797 BPF_SIZE(insn->code), BPF_READ, -1);
@@ -1121,7 +1135,8 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
1121 regs[insn->dst_reg].type = UNKNOWN_VALUE; 1135 regs[insn->dst_reg].type = UNKNOWN_VALUE;
1122 regs[insn->dst_reg].map_ptr = NULL; 1136 regs[insn->dst_reg].map_ptr = NULL;
1123 } 1137 }
1124 } else { 1138 } else if (BPF_CLASS(insn->code) == BPF_ALU64 ||
1139 insn->imm >= 0) {
1125 /* case: R = imm 1140 /* case: R = imm
1126 * remember the value we stored into this reg 1141 * remember the value we stored into this reg
1127 */ 1142 */
@@ -1164,6 +1179,11 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
1164 return -EINVAL; 1179 return -EINVAL;
1165 } 1180 }
1166 1181
1182 if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
1183 verbose("BPF_ARSH not supported for 32 bit ALU\n");
1184 return -EINVAL;
1185 }
1186
1167 if ((opcode == BPF_LSH || opcode == BPF_RSH || 1187 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
1168 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 1188 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
1169 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 1189 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
@@ -1793,6 +1813,7 @@ static int do_check(struct verifier_env *env)
1793 print_bpf_insn(env, insn); 1813 print_bpf_insn(env, insn);
1794 } 1814 }
1795 1815
1816 env->insn_aux_data[insn_idx].seen = true;
1796 if (class == BPF_ALU || class == BPF_ALU64) { 1817 if (class == BPF_ALU || class == BPF_ALU64) {
1797 err = check_alu_op(env, insn); 1818 err = check_alu_op(env, insn);
1798 if (err) 1819 if (err)
@@ -1902,6 +1923,12 @@ static int do_check(struct verifier_env *env)
1902 if (err) 1923 if (err)
1903 return err; 1924 return err;
1904 1925
1926 if (is_ctx_reg(env, insn->dst_reg)) {
1927 verbose("BPF_ST stores into R%d context is not allowed\n",
1928 insn->dst_reg);
1929 return -EACCES;
1930 }
1931
1905 /* check that memory (dst_reg + off) is writeable */ 1932 /* check that memory (dst_reg + off) is writeable */
1906 err = check_mem_access(env, insn->dst_reg, insn->off, 1933 err = check_mem_access(env, insn->dst_reg, insn->off,
1907 BPF_SIZE(insn->code), BPF_WRITE, 1934 BPF_SIZE(insn->code), BPF_WRITE,
@@ -1988,6 +2015,7 @@ process_bpf_exit:
1988 return err; 2015 return err;
1989 2016
1990 insn_idx++; 2017 insn_idx++;
2018 env->insn_aux_data[insn_idx].seen = true;
1991 } else { 2019 } else {
1992 verbose("invalid BPF_LD mode\n"); 2020 verbose("invalid BPF_LD mode\n");
1993 return -EINVAL; 2021 return -EINVAL;
@@ -2073,7 +2101,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
2073 /* hold the map. If the program is rejected by verifier, 2101 /* hold the map. If the program is rejected by verifier,
2074 * the map will be released by release_maps() or it 2102 * the map will be released by release_maps() or it
2075 * will be used by the valid program until it's unloaded 2103 * will be used by the valid program until it's unloaded
2076 * and all maps are released in free_bpf_prog_info() 2104 * and all maps are released in free_used_maps()
2077 */ 2105 */
2078 map = bpf_map_inc(map, false); 2106 map = bpf_map_inc(map, false);
2079 if (IS_ERR(map)) { 2107 if (IS_ERR(map)) {
@@ -2125,6 +2153,7 @@ static int adjust_insn_aux_data(struct verifier_env *env, u32 prog_len,
2125 u32 off, u32 cnt) 2153 u32 off, u32 cnt)
2126{ 2154{
2127 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 2155 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
2156 int i;
2128 2157
2129 if (cnt == 1) 2158 if (cnt == 1)
2130 return 0; 2159 return 0;
@@ -2134,6 +2163,8 @@ static int adjust_insn_aux_data(struct verifier_env *env, u32 prog_len,
2134 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 2163 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
2135 memcpy(new_data + off + cnt - 1, old_data + off, 2164 memcpy(new_data + off + cnt - 1, old_data + off,
2136 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 2165 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
2166 for (i = off; i < off + cnt - 1; i++)
2167 new_data[i].seen = true;
2137 env->insn_aux_data = new_data; 2168 env->insn_aux_data = new_data;
2138 vfree(old_data); 2169 vfree(old_data);
2139 return 0; 2170 return 0;
@@ -2152,6 +2183,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct verifier_env *env, u32 off,
2152 return new_prog; 2183 return new_prog;
2153} 2184}
2154 2185
2186/* The verifier does more data flow analysis than llvm and will not explore
2187 * branches that are dead at run time. Malicious programs can have dead code
2188 * too. Therefore replace all dead at-run-time code with nops.
2189 */
2190static void sanitize_dead_code(struct verifier_env *env)
2191{
2192 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
2193 struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
2194 struct bpf_insn *insn = env->prog->insnsi;
2195 const int insn_cnt = env->prog->len;
2196 int i;
2197
2198 for (i = 0; i < insn_cnt; i++) {
2199 if (aux_data[i].seen)
2200 continue;
2201 memcpy(insn + i, &nop, sizeof(nop));
2202 }
2203}
2204
2155/* convert load instructions that access fields of 'struct __sk_buff' 2205/* convert load instructions that access fields of 'struct __sk_buff'
2156 * into sequence of instructions that access fields of 'struct sk_buff' 2206 * into sequence of instructions that access fields of 'struct sk_buff'
2157 */ 2207 */
@@ -2218,6 +2268,24 @@ static int fixup_bpf_calls(struct verifier_env *env)
2218 int i, cnt, delta = 0; 2268 int i, cnt, delta = 0;
2219 2269
2220 for (i = 0; i < insn_cnt; i++, insn++) { 2270 for (i = 0; i < insn_cnt; i++, insn++) {
2271 if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
2272 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
2273 /* due to JIT bugs clear upper 32-bits of src register
2274 * before div/mod operation
2275 */
2276 insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
2277 insn_buf[1] = *insn;
2278 cnt = 2;
2279 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2280 if (!new_prog)
2281 return -ENOMEM;
2282
2283 delta += cnt - 1;
2284 env->prog = prog = new_prog;
2285 insn = new_prog->insnsi + i + delta;
2286 continue;
2287 }
2288
2221 if (insn->code != (BPF_JMP | BPF_CALL)) 2289 if (insn->code != (BPF_JMP | BPF_CALL))
2222 continue; 2290 continue;
2223 2291
@@ -2371,6 +2439,9 @@ skip_full_check:
2371 free_states(env); 2439 free_states(env);
2372 2440
2373 if (ret == 0) 2441 if (ret == 0)
2442 sanitize_dead_code(env);
2443
2444 if (ret == 0)
2374 /* program is valid, convert *(u32*)(ctx + off) accesses */ 2445 /* program is valid, convert *(u32*)(ctx + off) accesses */
2375 ret = convert_ctx_accesses(env); 2446 ret = convert_ctx_accesses(env);
2376 2447
@@ -2416,7 +2487,7 @@ free_log_buf:
2416 vfree(log_buf); 2487 vfree(log_buf);
2417 if (!env->prog->aux->used_maps) 2488 if (!env->prog->aux->used_maps)
2418 /* if we didn't copy map pointers into bpf_prog_info, release 2489 /* if we didn't copy map pointers into bpf_prog_info, release
2419 * them now. Otherwise free_bpf_prog_info() will release them. 2490 * them now. Otherwise free_used_maps() will release them.
2420 */ 2491 */
2421 release_maps(env); 2492 release_maps(env);
2422 *prog = env->prog; 2493 *prog = env->prog;
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 4121345498e0..ebc52c7bd8a6 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1564,6 +1564,7 @@ static int kdb_md(int argc, const char **argv)
1564 int symbolic = 0; 1564 int symbolic = 0;
1565 int valid = 0; 1565 int valid = 0;
1566 int phys = 0; 1566 int phys = 0;
1567 int raw = 0;
1567 1568
1568 kdbgetintenv("MDCOUNT", &mdcount); 1569 kdbgetintenv("MDCOUNT", &mdcount);
1569 kdbgetintenv("RADIX", &radix); 1570 kdbgetintenv("RADIX", &radix);
@@ -1573,9 +1574,10 @@ static int kdb_md(int argc, const char **argv)
1573 repeat = mdcount * 16 / bytesperword; 1574 repeat = mdcount * 16 / bytesperword;
1574 1575
1575 if (strcmp(argv[0], "mdr") == 0) { 1576 if (strcmp(argv[0], "mdr") == 0) {
1576 if (argc != 2) 1577 if (argc == 2 || (argc == 0 && last_addr != 0))
1578 valid = raw = 1;
1579 else
1577 return KDB_ARGCOUNT; 1580 return KDB_ARGCOUNT;
1578 valid = 1;
1579 } else if (isdigit(argv[0][2])) { 1581 } else if (isdigit(argv[0][2])) {
1580 bytesperword = (int)(argv[0][2] - '0'); 1582 bytesperword = (int)(argv[0][2] - '0');
1581 if (bytesperword == 0) { 1583 if (bytesperword == 0) {
@@ -1611,7 +1613,10 @@ static int kdb_md(int argc, const char **argv)
1611 radix = last_radix; 1613 radix = last_radix;
1612 bytesperword = last_bytesperword; 1614 bytesperword = last_bytesperword;
1613 repeat = last_repeat; 1615 repeat = last_repeat;
1614 mdcount = ((repeat * bytesperword) + 15) / 16; 1616 if (raw)
1617 mdcount = repeat;
1618 else
1619 mdcount = ((repeat * bytesperword) + 15) / 16;
1615 } 1620 }
1616 1621
1617 if (argc) { 1622 if (argc) {
@@ -1628,7 +1633,10 @@ static int kdb_md(int argc, const char **argv)
1628 diag = kdbgetularg(argv[nextarg], &val); 1633 diag = kdbgetularg(argv[nextarg], &val);
1629 if (!diag) { 1634 if (!diag) {
1630 mdcount = (int) val; 1635 mdcount = (int) val;
1631 repeat = mdcount * 16 / bytesperword; 1636 if (raw)
1637 repeat = mdcount;
1638 else
1639 repeat = mdcount * 16 / bytesperword;
1632 } 1640 }
1633 } 1641 }
1634 if (argc >= nextarg+1) { 1642 if (argc >= nextarg+1) {
@@ -1638,8 +1646,15 @@ static int kdb_md(int argc, const char **argv)
1638 } 1646 }
1639 } 1647 }
1640 1648
1641 if (strcmp(argv[0], "mdr") == 0) 1649 if (strcmp(argv[0], "mdr") == 0) {
1642 return kdb_mdr(addr, mdcount); 1650 int ret;
1651 last_addr = addr;
1652 ret = kdb_mdr(addr, mdcount);
1653 last_addr += mdcount;
1654 last_repeat = mdcount;
1655 last_bytesperword = bytesperword; // to make REPEAT happy
1656 return ret;
1657 }
1643 1658
1644 switch (radix) { 1659 switch (radix) {
1645 case 10: 1660 case 10:
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 9c418002b8c1..75f835d353db 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -107,14 +107,8 @@ int get_callchain_buffers(void)
107 goto exit; 107 goto exit;
108 } 108 }
109 109
110 if (count > 1) { 110 if (count == 1)
111 /* If the allocation failed, give up */ 111 err = alloc_callchain_buffers();
112 if (!callchain_cpus_entries)
113 err = -ENOMEM;
114 goto exit;
115 }
116
117 err = alloc_callchain_buffers();
118exit: 112exit:
119 if (err) 113 if (err)
120 atomic_dec(&nr_callchain_events); 114 atomic_dec(&nr_callchain_events);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 936419f24652..0d800be8959a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -229,7 +229,7 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
229 void __user *buffer, size_t *lenp, 229 void __user *buffer, size_t *lenp,
230 loff_t *ppos) 230 loff_t *ppos)
231{ 231{
232 int ret = proc_dointvec(table, write, buffer, lenp, ppos); 232 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
233 233
234 if (ret || !write) 234 if (ret || !write)
235 return ret; 235 return ret;
@@ -419,9 +419,15 @@ static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
419 419
420static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 420static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
421{ 421{
422 struct perf_cgroup *cgrp_out = cpuctx->cgrp; 422 struct perf_cgroup *cgrp = cpuctx->cgrp;
423 if (cgrp_out) 423 struct cgroup_subsys_state *css;
424 __update_cgrp_time(cgrp_out); 424
425 if (cgrp) {
426 for (css = &cgrp->css; css; css = css->parent) {
427 cgrp = container_of(css, struct perf_cgroup, css);
428 __update_cgrp_time(cgrp);
429 }
430 }
425} 431}
426 432
427static inline void update_cgrp_time_from_event(struct perf_event *event) 433static inline void update_cgrp_time_from_event(struct perf_event *event)
@@ -449,6 +455,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
449{ 455{
450 struct perf_cgroup *cgrp; 456 struct perf_cgroup *cgrp;
451 struct perf_cgroup_info *info; 457 struct perf_cgroup_info *info;
458 struct cgroup_subsys_state *css;
452 459
453 /* 460 /*
454 * ctx->lock held by caller 461 * ctx->lock held by caller
@@ -459,8 +466,12 @@ perf_cgroup_set_timestamp(struct task_struct *task,
459 return; 466 return;
460 467
461 cgrp = perf_cgroup_from_task(task, ctx); 468 cgrp = perf_cgroup_from_task(task, ctx);
462 info = this_cpu_ptr(cgrp->info); 469
463 info->timestamp = ctx->timestamp; 470 for (css = &cgrp->css; css; css = css->parent) {
471 cgrp = container_of(css, struct perf_cgroup, css);
472 info = this_cpu_ptr(cgrp->info);
473 info->timestamp = ctx->timestamp;
474 }
464} 475}
465 476
466#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ 477#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
@@ -5322,9 +5333,6 @@ static void perf_output_read_one(struct perf_output_handle *handle,
5322 __output_copy(handle, values, n * sizeof(u64)); 5333 __output_copy(handle, values, n * sizeof(u64));
5323} 5334}
5324 5335
5325/*
5326 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
5327 */
5328static void perf_output_read_group(struct perf_output_handle *handle, 5336static void perf_output_read_group(struct perf_output_handle *handle,
5329 struct perf_event *event, 5337 struct perf_event *event,
5330 u64 enabled, u64 running) 5338 u64 enabled, u64 running)
@@ -5342,7 +5350,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
5342 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 5350 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
5343 values[n++] = running; 5351 values[n++] = running;
5344 5352
5345 if (leader != event) 5353 if ((leader != event) &&
5354 (leader->state == PERF_EVENT_STATE_ACTIVE))
5346 leader->pmu->read(leader); 5355 leader->pmu->read(leader);
5347 5356
5348 values[n++] = perf_event_count(leader); 5357 values[n++] = perf_event_count(leader);
@@ -5369,6 +5378,13 @@ static void perf_output_read_group(struct perf_output_handle *handle,
5369#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ 5378#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5370 PERF_FORMAT_TOTAL_TIME_RUNNING) 5379 PERF_FORMAT_TOTAL_TIME_RUNNING)
5371 5380
5381/*
5382 * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
5383 *
5384 * The problem is that its both hard and excessively expensive to iterate the
5385 * child list, not to mention that its impossible to IPI the children running
5386 * on another CPU, from interrupt/NMI context.
5387 */
5372static void perf_output_read(struct perf_output_handle *handle, 5388static void perf_output_read(struct perf_output_handle *handle,
5373 struct perf_event *event) 5389 struct perf_event *event)
5374{ 5390{
@@ -8093,9 +8109,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
8093 local64_set(&hwc->period_left, hwc->sample_period); 8109 local64_set(&hwc->period_left, hwc->sample_period);
8094 8110
8095 /* 8111 /*
8096 * we currently do not support PERF_FORMAT_GROUP on inherited events 8112 * We currently do not support PERF_SAMPLE_READ on inherited events.
8113 * See perf_output_read().
8097 */ 8114 */
8098 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 8115 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
8099 goto err_ns; 8116 goto err_ns;
8100 8117
8101 if (!has_branch_stack(event)) 8118 if (!has_branch_stack(event))
@@ -8263,9 +8280,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
8263 * __u16 sample size limit. 8280 * __u16 sample size limit.
8264 */ 8281 */
8265 if (attr->sample_stack_user >= USHRT_MAX) 8282 if (attr->sample_stack_user >= USHRT_MAX)
8266 ret = -EINVAL; 8283 return -EINVAL;
8267 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 8284 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
8268 ret = -EINVAL; 8285 return -EINVAL;
8269 } 8286 }
8270 8287
8271 if (attr->sample_type & PERF_SAMPLE_REGS_INTR) 8288 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 92ce5f4ccc26..a27245fdcd81 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -427,16 +427,9 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
427 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint 427 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
428 * @bp: the breakpoint structure to modify 428 * @bp: the breakpoint structure to modify
429 * @attr: new breakpoint attributes 429 * @attr: new breakpoint attributes
430 * @triggered: callback to trigger when we hit the breakpoint
431 * @tsk: pointer to 'task_struct' of the process to which the address belongs
432 */ 430 */
433int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) 431int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
434{ 432{
435 u64 old_addr = bp->attr.bp_addr;
436 u64 old_len = bp->attr.bp_len;
437 int old_type = bp->attr.bp_type;
438 int err = 0;
439
440 /* 433 /*
441 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it 434 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
442 * will not be possible to raise IPIs that invoke __perf_event_disable. 435 * will not be possible to raise IPIs that invoke __perf_event_disable.
@@ -451,27 +444,18 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
451 bp->attr.bp_addr = attr->bp_addr; 444 bp->attr.bp_addr = attr->bp_addr;
452 bp->attr.bp_type = attr->bp_type; 445 bp->attr.bp_type = attr->bp_type;
453 bp->attr.bp_len = attr->bp_len; 446 bp->attr.bp_len = attr->bp_len;
447 bp->attr.disabled = 1;
454 448
455 if (attr->disabled) 449 if (!attr->disabled) {
456 goto end; 450 int err = validate_hw_breakpoint(bp);
457
458 err = validate_hw_breakpoint(bp);
459 if (!err)
460 perf_event_enable(bp);
461 451
462 if (err) { 452 if (err)
463 bp->attr.bp_addr = old_addr; 453 return err;
464 bp->attr.bp_type = old_type;
465 bp->attr.bp_len = old_len;
466 if (!bp->attr.disabled)
467 perf_event_enable(bp);
468 454
469 return err; 455 perf_event_enable(bp);
456 bp->attr.disabled = 0;
470 } 457 }
471 458
472end:
473 bp->attr.disabled = attr->disabled;
474
475 return 0; 459 return 0;
476} 460}
477EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); 461EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 8c60a4eb4080..f4b9a369c8c3 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/circ_buf.h> 15#include <linux/circ_buf.h>
16#include <linux/poll.h> 16#include <linux/poll.h>
17#include <linux/nospec.h>
17 18
18#include "internal.h" 19#include "internal.h"
19 20
@@ -781,8 +782,10 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
781 return NULL; 782 return NULL;
782 783
783 /* AUX space */ 784 /* AUX space */
784 if (pgoff >= rb->aux_pgoff) 785 if (pgoff >= rb->aux_pgoff) {
785 return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]); 786 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
787 return virt_to_page(rb->aux_pages[aux_pgoff]);
788 }
786 } 789 }
787 790
788 return __perf_mmap_to_page(rb, pgoff); 791 return __perf_mmap_to_page(rb, pgoff);
diff --git a/kernel/exit.c b/kernel/exit.c
index ffba5df4abd5..f20e6339761b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1608,6 +1608,10 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1608 __WNOTHREAD|__WCLONE|__WALL)) 1608 __WNOTHREAD|__WCLONE|__WALL))
1609 return -EINVAL; 1609 return -EINVAL;
1610 1610
1611 /* -INT_MIN is not defined */
1612 if (upid == INT_MIN)
1613 return -ESRCH;
1614
1611 if (upid == -1) 1615 if (upid == -1)
1612 type = PIDTYPE_MAX; 1616 type = PIDTYPE_MAX;
1613 else if (upid < 0) { 1617 else if (upid < 0) {
diff --git a/kernel/futex.c b/kernel/futex.c
index a09c1dd1f659..aedb36c0fd92 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -470,6 +470,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
470 unsigned long address = (unsigned long)uaddr; 470 unsigned long address = (unsigned long)uaddr;
471 struct mm_struct *mm = current->mm; 471 struct mm_struct *mm = current->mm;
472 struct page *page, *page_head; 472 struct page *page, *page_head;
473 struct address_space *mapping;
473 int err, ro = 0; 474 int err, ro = 0;
474 475
475 /* 476 /*
@@ -555,7 +556,19 @@ again:
555 } 556 }
556#endif 557#endif
557 558
558 lock_page(page_head); 559 /*
560 * The treatment of mapping from this point on is critical. The page
561 * lock protects many things but in this context the page lock
562 * stabilizes mapping, prevents inode freeing in the shared
563 * file-backed region case and guards against movement to swap cache.
564 *
565 * Strictly speaking the page lock is not needed in all cases being
566 * considered here and page lock forces unnecessarily serialization
567 * From this point on, mapping will be re-verified if necessary and
568 * page lock will be acquired only if it is unavoidable
569 */
570
571 mapping = READ_ONCE(page_head->mapping);
559 572
560 /* 573 /*
561 * If page_head->mapping is NULL, then it cannot be a PageAnon 574 * If page_head->mapping is NULL, then it cannot be a PageAnon
@@ -572,18 +585,31 @@ again:
572 * shmem_writepage move it from filecache to swapcache beneath us: 585 * shmem_writepage move it from filecache to swapcache beneath us:
573 * an unlikely race, but we do need to retry for page_head->mapping. 586 * an unlikely race, but we do need to retry for page_head->mapping.
574 */ 587 */
575 if (!page_head->mapping) { 588 if (unlikely(!mapping)) {
576 int shmem_swizzled = PageSwapCache(page_head); 589 int shmem_swizzled;
590
591 /*
592 * Page lock is required to identify which special case above
593 * applies. If this is really a shmem page then the page lock
594 * will prevent unexpected transitions.
595 */
596 lock_page(page);
597 shmem_swizzled = PageSwapCache(page) || page->mapping;
577 unlock_page(page_head); 598 unlock_page(page_head);
578 put_page(page_head); 599 put_page(page_head);
600
579 if (shmem_swizzled) 601 if (shmem_swizzled)
580 goto again; 602 goto again;
603
581 return -EFAULT; 604 return -EFAULT;
582 } 605 }
583 606
584 /* 607 /*
585 * Private mappings are handled in a simple way. 608 * Private mappings are handled in a simple way.
586 * 609 *
610 * If the futex key is stored on an anonymous page, then the associated
611 * object is the mm which is implicitly pinned by the calling process.
612 *
587 * NOTE: When userspace waits on a MAP_SHARED mapping, even if 613 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
588 * it's a read-only handle, it's expected that futexes attach to 614 * it's a read-only handle, it's expected that futexes attach to
589 * the object not the particular process. 615 * the object not the particular process.
@@ -601,16 +627,75 @@ again:
601 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ 627 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
602 key->private.mm = mm; 628 key->private.mm = mm;
603 key->private.address = address; 629 key->private.address = address;
630
631 get_futex_key_refs(key); /* implies smp_mb(); (B) */
632
604 } else { 633 } else {
634 struct inode *inode;
635
636 /*
637 * The associated futex object in this case is the inode and
638 * the page->mapping must be traversed. Ordinarily this should
639 * be stabilised under page lock but it's not strictly
640 * necessary in this case as we just want to pin the inode, not
641 * update the radix tree or anything like that.
642 *
643 * The RCU read lock is taken as the inode is finally freed
644 * under RCU. If the mapping still matches expectations then the
645 * mapping->host can be safely accessed as being a valid inode.
646 */
647 rcu_read_lock();
648
649 if (READ_ONCE(page_head->mapping) != mapping) {
650 rcu_read_unlock();
651 put_page(page_head);
652
653 goto again;
654 }
655
656 inode = READ_ONCE(mapping->host);
657 if (!inode) {
658 rcu_read_unlock();
659 put_page(page_head);
660
661 goto again;
662 }
663
664 /*
665 * Take a reference unless it is about to be freed. Previously
666 * this reference was taken by ihold under the page lock
667 * pinning the inode in place so i_lock was unnecessary. The
668 * only way for this check to fail is if the inode was
669 * truncated in parallel which is almost certainly an
670 * application bug. In such a case, just retry.
671 *
672 * We are not calling into get_futex_key_refs() in file-backed
673 * cases, therefore a successful atomic_inc return below will
674 * guarantee that get_futex_key() will still imply smp_mb(); (B).
675 */
676 if (!atomic_inc_not_zero(&inode->i_count)) {
677 rcu_read_unlock();
678 put_page(page_head);
679
680 goto again;
681 }
682
683 /* Should be impossible but lets be paranoid for now */
684 if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
685 err = -EFAULT;
686 rcu_read_unlock();
687 iput(inode);
688
689 goto out;
690 }
691
605 key->both.offset |= FUT_OFF_INODE; /* inode-based key */ 692 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
606 key->shared.inode = page_head->mapping->host; 693 key->shared.inode = inode;
607 key->shared.pgoff = basepage_index(page); 694 key->shared.pgoff = basepage_index(page);
695 rcu_read_unlock();
608 } 696 }
609 697
610 get_futex_key_refs(key); /* implies MB (B) */
611
612out: 698out:
613 unlock_page(page_head);
614 put_page(page_head); 699 put_page(page_head);
615 return err; 700 return err;
616} 701}
@@ -1368,6 +1453,45 @@ out:
1368 return ret; 1453 return ret;
1369} 1454}
1370 1455
1456static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
1457{
1458 unsigned int op = (encoded_op & 0x70000000) >> 28;
1459 unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
1460 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
1461 int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1462 int oldval, ret;
1463
1464 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1465 if (oparg < 0 || oparg > 31)
1466 return -EINVAL;
1467 oparg = 1 << oparg;
1468 }
1469
1470 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1471 return -EFAULT;
1472
1473 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
1474 if (ret)
1475 return ret;
1476
1477 switch (cmp) {
1478 case FUTEX_OP_CMP_EQ:
1479 return oldval == cmparg;
1480 case FUTEX_OP_CMP_NE:
1481 return oldval != cmparg;
1482 case FUTEX_OP_CMP_LT:
1483 return oldval < cmparg;
1484 case FUTEX_OP_CMP_GE:
1485 return oldval >= cmparg;
1486 case FUTEX_OP_CMP_LE:
1487 return oldval <= cmparg;
1488 case FUTEX_OP_CMP_GT:
1489 return oldval > cmparg;
1490 default:
1491 return -ENOSYS;
1492 }
1493}
1494
1371/* 1495/*
1372 * Wake up all waiters hashed on the physical page that is mapped 1496 * Wake up all waiters hashed on the physical page that is mapped
1373 * to this virtual address: 1497 * to this virtual address:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index a079ed14f230..0df2b44dac7c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -836,7 +836,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
836 * This code is triggered unconditionally. Check the affinity 836 * This code is triggered unconditionally. Check the affinity
837 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. 837 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
838 */ 838 */
839 if (desc->irq_common_data.affinity) 839 if (cpumask_available(desc->irq_common_data.affinity))
840 cpumask_copy(mask, desc->irq_common_data.affinity); 840 cpumask_copy(mask, desc->irq_common_data.affinity);
841 else 841 else
842 valid = false; 842 valid = false;
@@ -1012,6 +1012,13 @@ static int irq_setup_forced_threading(struct irqaction *new)
1012 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) 1012 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1013 return 0; 1013 return 0;
1014 1014
1015 /*
1016 * No further action required for interrupts which are requested as
1017 * threaded interrupts already
1018 */
1019 if (new->handler == irq_default_primary_handler)
1020 return 0;
1021
1015 new->flags |= IRQF_ONESHOT; 1022 new->flags |= IRQF_ONESHOT;
1016 1023
1017 /* 1024 /*
@@ -1019,7 +1026,7 @@ static int irq_setup_forced_threading(struct irqaction *new)
1019 * thread handler. We force thread them as well by creating a 1026 * thread handler. We force thread them as well by creating a
1020 * secondary action. 1027 * secondary action.
1021 */ 1028 */
1022 if (new->handler != irq_default_primary_handler && new->thread_fn) { 1029 if (new->handler && new->thread_fn) {
1023 /* Allocate the secondary action */ 1030 /* Allocate the secondary action */
1024 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1031 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1025 if (!new->secondary) 1032 if (!new->secondary)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 695763516908..bbe9dd0886bd 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -125,7 +125,7 @@ static void *alloc_insn_page(void)
125 return module_alloc(PAGE_SIZE); 125 return module_alloc(PAGE_SIZE);
126} 126}
127 127
128static void free_insn_page(void *page) 128void __weak free_insn_page(void *page)
129{ 129{
130 module_memfree(page); 130 module_memfree(page);
131} 131}
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 8173bc7fec92..3b40c8809e52 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -423,6 +423,14 @@ queue:
423 tail = encode_tail(smp_processor_id(), idx); 423 tail = encode_tail(smp_processor_id(), idx);
424 424
425 node += idx; 425 node += idx;
426
427 /*
428 * Ensure that we increment the head node->count before initialising
429 * the actual node. If the compiler is kind enough to reorder these
430 * stores, then an IRQ could overwrite our assignments.
431 */
432 barrier();
433
426 node->locked = 0; 434 node->locked = 0;
427 node->next = NULL; 435 node->next = NULL;
428 pv_init_node(node); 436 pv_init_node(node);
diff --git a/kernel/module.c b/kernel/module.c
index 0a56098d3738..aa81f41f2b19 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2869,6 +2869,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
2869 return mod; 2869 return mod;
2870} 2870}
2871 2871
2872static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
2873{
2874 if (retpoline_module_ok(get_modinfo(info, "retpoline")))
2875 return;
2876
2877 pr_warn("%s: loading module not compiled with retpoline compiler.\n",
2878 mod->name);
2879}
2880
2872static int check_modinfo(struct module *mod, struct load_info *info, int flags) 2881static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2873{ 2882{
2874 const char *modmagic = get_modinfo(info, "vermagic"); 2883 const char *modmagic = get_modinfo(info, "vermagic");
@@ -2895,6 +2904,8 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2895 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); 2904 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2896 } 2905 }
2897 2906
2907 check_modinfo_retpoline(mod, info);
2908
2898 if (get_modinfo(info, "staging")) { 2909 if (get_modinfo(info, "staging")) {
2899 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); 2910 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
2900 pr_warn("%s: module is from the staging directory, the quality " 2911 pr_warn("%s: module is from the staging directory, the quality "
diff --git a/kernel/pid.c b/kernel/pid.c
index b17263be9082..5fe7cdb6d05f 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -322,8 +322,10 @@ struct pid *alloc_pid(struct pid_namespace *ns)
322 } 322 }
323 323
324 if (unlikely(is_child_reaper(pid))) { 324 if (unlikely(is_child_reaper(pid))) {
325 if (pid_ns_prepare_proc(ns)) 325 if (pid_ns_prepare_proc(ns)) {
326 disable_pid_allocation(ns);
326 goto out_free; 327 goto out_free;
328 }
327 } 329 }
328 330
329 get_pid_ns(ns); 331 get_pid_ns(ns);
diff --git a/kernel/power/power.h b/kernel/power/power.h
index efe1b3b17c88..9557977f58b2 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -94,9 +94,6 @@ extern int in_suspend;
94extern dev_t swsusp_resume_device; 94extern dev_t swsusp_resume_device;
95extern sector_t swsusp_resume_block; 95extern sector_t swsusp_resume_block;
96 96
97extern asmlinkage int swsusp_arch_suspend(void);
98extern asmlinkage int swsusp_arch_resume(void);
99
100extern int create_basic_memory_bitmaps(void); 97extern int create_basic_memory_bitmaps(void);
101extern void free_basic_memory_bitmaps(void); 98extern void free_basic_memory_bitmaps(void);
102extern int hibernate_preallocate_memory(void); 99extern int hibernate_preallocate_memory(void);
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 526e8911460a..f83c1876b39c 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -184,6 +184,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
184 res = PAGE_SIZE - pg_offp; 184 res = PAGE_SIZE - pg_offp;
185 } 185 }
186 186
187 if (!data_of(data->handle)) {
188 res = -EINVAL;
189 goto unlock;
190 }
191
187 res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp, 192 res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp,
188 buf, count); 193 buf, count);
189 if (res > 0) 194 if (res > 0)
diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c
index d5760c42f042..61d41ca41844 100644
--- a/kernel/printk/braille.c
+++ b/kernel/printk/braille.c
@@ -2,12 +2,13 @@
2 2
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/console.h> 4#include <linux/console.h>
5#include <linux/errno.h>
5#include <linux/string.h> 6#include <linux/string.h>
6 7
7#include "console_cmdline.h" 8#include "console_cmdline.h"
8#include "braille.h" 9#include "braille.h"
9 10
10char *_braille_console_setup(char **str, char **brl_options) 11int _braille_console_setup(char **str, char **brl_options)
11{ 12{
12 if (!strncmp(*str, "brl,", 4)) { 13 if (!strncmp(*str, "brl,", 4)) {
13 *brl_options = ""; 14 *brl_options = "";
@@ -15,14 +16,14 @@ char *_braille_console_setup(char **str, char **brl_options)
15 } else if (!strncmp(*str, "brl=", 4)) { 16 } else if (!strncmp(*str, "brl=", 4)) {
16 *brl_options = *str + 4; 17 *brl_options = *str + 4;
17 *str = strchr(*brl_options, ','); 18 *str = strchr(*brl_options, ',');
18 if (!*str) 19 if (!*str) {
19 pr_err("need port name after brl=\n"); 20 pr_err("need port name after brl=\n");
20 else 21 return -EINVAL;
21 *((*str)++) = 0; 22 }
22 } else 23 *((*str)++) = 0;
23 return NULL; 24 }
24 25
25 return *str; 26 return 0;
26} 27}
27 28
28int 29int
diff --git a/kernel/printk/braille.h b/kernel/printk/braille.h
index 769d771145c8..749a6756843a 100644
--- a/kernel/printk/braille.h
+++ b/kernel/printk/braille.h
@@ -9,7 +9,14 @@ braille_set_options(struct console_cmdline *c, char *brl_options)
9 c->brl_options = brl_options; 9 c->brl_options = brl_options;
10} 10}
11 11
12char * 12/*
13 * Setup console according to braille options.
14 * Return -EINVAL on syntax error, 0 on success (or no braille option was
15 * actually given).
16 * Modifies str to point to the serial options
17 * Sets brl_options to the parsed braille options.
18 */
19int
13_braille_console_setup(char **str, char **brl_options); 20_braille_console_setup(char **str, char **brl_options);
14 21
15int 22int
@@ -25,10 +32,10 @@ braille_set_options(struct console_cmdline *c, char *brl_options)
25{ 32{
26} 33}
27 34
28static inline char * 35static inline int
29_braille_console_setup(char **str, char **brl_options) 36_braille_console_setup(char **str, char **brl_options)
30{ 37{
31 return NULL; 38 return 0;
32} 39}
33 40
34static inline int 41static inline int
diff --git a/kernel/profile.c b/kernel/profile.c
index 99513e1160e5..9cd8e18e6f18 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -44,7 +44,7 @@ int prof_on __read_mostly;
44EXPORT_SYMBOL_GPL(prof_on); 44EXPORT_SYMBOL_GPL(prof_on);
45 45
46static cpumask_var_t prof_cpu_mask; 46static cpumask_var_t prof_cpu_mask;
47#ifdef CONFIG_SMP 47#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
48static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); 48static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
49static DEFINE_PER_CPU(int, cpu_profile_flip); 49static DEFINE_PER_CPU(int, cpu_profile_flip);
50static DEFINE_MUTEX(profile_flip_mutex); 50static DEFINE_MUTEX(profile_flip_mutex);
@@ -201,7 +201,7 @@ int profile_event_unregister(enum profile_type type, struct notifier_block *n)
201} 201}
202EXPORT_SYMBOL_GPL(profile_event_unregister); 202EXPORT_SYMBOL_GPL(profile_event_unregister);
203 203
204#ifdef CONFIG_SMP 204#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
205/* 205/*
206 * Each cpu has a pair of open-addressed hashtables for pending 206 * Each cpu has a pair of open-addressed hashtables for pending
207 * profile hits. read_profile() IPI's all cpus to request them 207 * profile hits. read_profile() IPI's all cpus to request them
diff --git a/kernel/relay.c b/kernel/relay.c
index 0b4570cfacae..f6d5f08bdfaa 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -163,7 +163,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
163{ 163{
164 struct rchan_buf *buf; 164 struct rchan_buf *buf;
165 165
166 if (chan->n_subbufs > UINT_MAX / sizeof(size_t *)) 166 if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
167 return NULL; 167 return NULL;
168 168
169 buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); 169 buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
diff --git a/kernel/resource.c b/kernel/resource.c
index a4a94e700fb9..41718cd8cab5 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -611,7 +611,8 @@ static int __find_resource(struct resource *root, struct resource *old,
611 alloc.start = constraint->alignf(constraint->alignf_data, &avail, 611 alloc.start = constraint->alignf(constraint->alignf_data, &avail,
612 size, constraint->align); 612 size, constraint->align);
613 alloc.end = alloc.start + size - 1; 613 alloc.end = alloc.start + size - 1;
614 if (resource_contains(&avail, &alloc)) { 614 if (alloc.start <= alloc.end &&
615 resource_contains(&avail, &alloc)) {
615 new->start = alloc.start; 616 new->start = alloc.start;
616 new->end = alloc.end; 617 new->end = alloc.end;
617 return 0; 618 return 0;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9d6b3d869592..65ed3501c2ca 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -601,7 +601,8 @@ void resched_cpu(int cpu)
601 unsigned long flags; 601 unsigned long flags;
602 602
603 raw_spin_lock_irqsave(&rq->lock, flags); 603 raw_spin_lock_irqsave(&rq->lock, flags);
604 resched_curr(rq); 604 if (cpu_online(cpu) || cpu == smp_processor_id())
605 resched_curr(rq);
605 raw_spin_unlock_irqrestore(&rq->lock, flags); 606 raw_spin_unlock_irqrestore(&rq->lock, flags);
606} 607}
607 608
@@ -2109,6 +2110,7 @@ void __dl_clear_params(struct task_struct *p)
2109 dl_se->dl_period = 0; 2110 dl_se->dl_period = 0;
2110 dl_se->flags = 0; 2111 dl_se->flags = 0;
2111 dl_se->dl_bw = 0; 2112 dl_se->dl_bw = 0;
2113 dl_se->dl_density = 0;
2112 2114
2113 dl_se->dl_throttled = 0; 2115 dl_se->dl_throttled = 0;
2114 dl_se->dl_new = 1; 2116 dl_se->dl_new = 1;
@@ -3647,6 +3649,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3647 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3649 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3648 dl_se->flags = attr->sched_flags; 3650 dl_se->flags = attr->sched_flags;
3649 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3651 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3652 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
3650 3653
3651 /* 3654 /*
3652 * Changing the parameters of a task is 'tricky' and we're not doing 3655 * Changing the parameters of a task is 'tricky' and we're not doing
@@ -5894,6 +5897,19 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5894 call_rcu_sched(&old_rd->rcu, free_rootdomain); 5897 call_rcu_sched(&old_rd->rcu, free_rootdomain);
5895} 5898}
5896 5899
5900void sched_get_rd(struct root_domain *rd)
5901{
5902 atomic_inc(&rd->refcount);
5903}
5904
5905void sched_put_rd(struct root_domain *rd)
5906{
5907 if (!atomic_dec_and_test(&rd->refcount))
5908 return;
5909
5910 call_rcu_sched(&rd->rcu, free_rootdomain);
5911}
5912
5897static int init_rootdomain(struct root_domain *rd) 5913static int init_rootdomain(struct root_domain *rd)
5898{ 5914{
5899 memset(rd, 0, sizeof(*rd)); 5915 memset(rd, 0, sizeof(*rd));
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 6be2afd9bfd6..e12b0a4df891 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -480,13 +480,84 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
480} 480}
481 481
482/* 482/*
483 * When a -deadline entity is queued back on the runqueue, its runtime and 483 * Revised wakeup rule [1]: For self-suspending tasks, rather then
484 * deadline might need updating. 484 * re-initializing task's runtime and deadline, the revised wakeup
485 * rule adjusts the task's runtime to avoid the task to overrun its
486 * density.
485 * 487 *
486 * The policy here is that we update the deadline of the entity only if: 488 * Reasoning: a task may overrun the density if:
487 * - the current deadline is in the past, 489 * runtime / (deadline - t) > dl_runtime / dl_deadline
488 * - using the remaining runtime with the current deadline would make 490 *
489 * the entity exceed its bandwidth. 491 * Therefore, runtime can be adjusted to:
492 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
493 *
494 * In such way that runtime will be equal to the maximum density
495 * the task can use without breaking any rule.
496 *
497 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
498 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
499 */
500static void
501update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
502{
503 u64 laxity = dl_se->deadline - rq_clock(rq);
504
505 /*
506 * If the task has deadline < period, and the deadline is in the past,
507 * it should already be throttled before this check.
508 *
509 * See update_dl_entity() comments for further details.
510 */
511 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
512
513 dl_se->runtime = (dl_se->dl_density * laxity) >> 20;
514}
515
516/*
517 * Regarding the deadline, a task with implicit deadline has a relative
518 * deadline == relative period. A task with constrained deadline has a
519 * relative deadline <= relative period.
520 *
521 * We support constrained deadline tasks. However, there are some restrictions
522 * applied only for tasks which do not have an implicit deadline. See
523 * update_dl_entity() to know more about such restrictions.
524 *
525 * The dl_is_implicit() returns true if the task has an implicit deadline.
526 */
527static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
528{
529 return dl_se->dl_deadline == dl_se->dl_period;
530}
531
532/*
533 * When a deadline entity is placed in the runqueue, its runtime and deadline
534 * might need to be updated. This is done by a CBS wake up rule. There are two
535 * different rules: 1) the original CBS; and 2) the Revisited CBS.
536 *
537 * When the task is starting a new period, the Original CBS is used. In this
538 * case, the runtime is replenished and a new absolute deadline is set.
539 *
540 * When a task is queued before the begin of the next period, using the
541 * remaining runtime and deadline could make the entity to overflow, see
542 * dl_entity_overflow() to find more about runtime overflow. When such case
543 * is detected, the runtime and deadline need to be updated.
544 *
545 * If the task has an implicit deadline, i.e., deadline == period, the Original
546 * CBS is applied. the runtime is replenished and a new absolute deadline is
547 * set, as in the previous cases.
548 *
549 * However, the Original CBS does not work properly for tasks with
550 * deadline < period, which are said to have a constrained deadline. By
551 * applying the Original CBS, a constrained deadline task would be able to run
552 * runtime/deadline in a period. With deadline < period, the task would
553 * overrun the runtime/period allowed bandwidth, breaking the admission test.
554 *
555 * In order to prevent this misbehave, the Revisited CBS is used for
556 * constrained deadline tasks when a runtime overflow is detected. In the
557 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
558 * the remaining runtime of the task is reduced to avoid runtime overflow.
559 * Please refer to the comments update_dl_revised_wakeup() function to find
560 * more about the Revised CBS rule.
490 */ 561 */
491static void update_dl_entity(struct sched_dl_entity *dl_se, 562static void update_dl_entity(struct sched_dl_entity *dl_se,
492 struct sched_dl_entity *pi_se) 563 struct sched_dl_entity *pi_se)
@@ -505,6 +576,14 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
505 576
506 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 577 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
507 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { 578 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
579
580 if (unlikely(!dl_is_implicit(dl_se) &&
581 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
582 !dl_se->dl_boosted)){
583 update_dl_revised_wakeup(dl_se, rq);
584 return;
585 }
586
508 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 587 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
509 dl_se->runtime = pi_se->dl_runtime; 588 dl_se->runtime = pi_se->dl_runtime;
510 } 589 }
@@ -991,11 +1070,6 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
991 __dequeue_dl_entity(dl_se); 1070 __dequeue_dl_entity(dl_se);
992} 1071}
993 1072
994static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
995{
996 return dl_se->dl_deadline < dl_se->dl_period;
997}
998
999static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1073static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1000{ 1074{
1001 struct task_struct *pi_task = rt_mutex_get_top_task(p); 1075 struct task_struct *pi_task = rt_mutex_get_top_task(p);
@@ -1027,7 +1101,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1027 * If that is the case, the task will be throttled and 1101 * If that is the case, the task will be throttled and
1028 * the replenishment timer will be set to the next period. 1102 * the replenishment timer will be set to the next period.
1029 */ 1103 */
1030 if (!p->dl.dl_throttled && dl_is_constrained(&p->dl)) 1104 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1031 dl_check_constrained_dl(&p->dl); 1105 dl_check_constrained_dl(&p->dl);
1032 1106
1033 /* 1107 /*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 812069b66f47..3b136fb4422c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2223,7 +2223,8 @@ void task_numa_work(struct callback_head *work)
2223 return; 2223 return;
2224 2224
2225 2225
2226 down_read(&mm->mmap_sem); 2226 if (!down_read_trylock(&mm->mmap_sem))
2227 return;
2227 vma = find_vma(mm, start); 2228 vma = find_vma(mm, start);
2228 if (!vma) { 2229 if (!vma) {
2229 reset_ptenuma_scan(p); 2230 reset_ptenuma_scan(p);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 95fefb364dab..801b4ec40702 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -822,6 +822,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
822 struct rq *rq = rq_of_rt_rq(rt_rq); 822 struct rq *rq = rq_of_rt_rq(rt_rq);
823 823
824 raw_spin_lock(&rq->lock); 824 raw_spin_lock(&rq->lock);
825 update_rq_clock(rq);
826
825 if (rt_rq->rt_time) { 827 if (rt_rq->rt_time) {
826 u64 runtime; 828 u64 runtime;
827 829
@@ -1833,9 +1835,8 @@ static void push_rt_tasks(struct rq *rq)
1833 * the rt_loop_next will cause the iterator to perform another scan. 1835 * the rt_loop_next will cause the iterator to perform another scan.
1834 * 1836 *
1835 */ 1837 */
1836static int rto_next_cpu(struct rq *rq) 1838static int rto_next_cpu(struct root_domain *rd)
1837{ 1839{
1838 struct root_domain *rd = rq->rd;
1839 int next; 1840 int next;
1840 int cpu; 1841 int cpu;
1841 1842
@@ -1911,19 +1912,24 @@ static void tell_cpu_to_push(struct rq *rq)
1911 * Otherwise it is finishing up and an ipi needs to be sent. 1912 * Otherwise it is finishing up and an ipi needs to be sent.
1912 */ 1913 */
1913 if (rq->rd->rto_cpu < 0) 1914 if (rq->rd->rto_cpu < 0)
1914 cpu = rto_next_cpu(rq); 1915 cpu = rto_next_cpu(rq->rd);
1915 1916
1916 raw_spin_unlock(&rq->rd->rto_lock); 1917 raw_spin_unlock(&rq->rd->rto_lock);
1917 1918
1918 rto_start_unlock(&rq->rd->rto_loop_start); 1919 rto_start_unlock(&rq->rd->rto_loop_start);
1919 1920
1920 if (cpu >= 0) 1921 if (cpu >= 0) {
1922 /* Make sure the rd does not get freed while pushing */
1923 sched_get_rd(rq->rd);
1921 irq_work_queue_on(&rq->rd->rto_push_work, cpu); 1924 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
1925 }
1922} 1926}
1923 1927
1924/* Called from hardirq context */ 1928/* Called from hardirq context */
1925void rto_push_irq_work_func(struct irq_work *work) 1929void rto_push_irq_work_func(struct irq_work *work)
1926{ 1930{
1931 struct root_domain *rd =
1932 container_of(work, struct root_domain, rto_push_work);
1927 struct rq *rq; 1933 struct rq *rq;
1928 int cpu; 1934 int cpu;
1929 1935
@@ -1939,18 +1945,20 @@ void rto_push_irq_work_func(struct irq_work *work)
1939 raw_spin_unlock(&rq->lock); 1945 raw_spin_unlock(&rq->lock);
1940 } 1946 }
1941 1947
1942 raw_spin_lock(&rq->rd->rto_lock); 1948 raw_spin_lock(&rd->rto_lock);
1943 1949
1944 /* Pass the IPI to the next rt overloaded queue */ 1950 /* Pass the IPI to the next rt overloaded queue */
1945 cpu = rto_next_cpu(rq); 1951 cpu = rto_next_cpu(rd);
1946 1952
1947 raw_spin_unlock(&rq->rd->rto_lock); 1953 raw_spin_unlock(&rd->rto_lock);
1948 1954
1949 if (cpu < 0) 1955 if (cpu < 0) {
1956 sched_put_rd(rd);
1950 return; 1957 return;
1958 }
1951 1959
1952 /* Try the next RT overloaded CPU */ 1960 /* Try the next RT overloaded CPU */
1953 irq_work_queue_on(&rq->rd->rto_push_work, cpu); 1961 irq_work_queue_on(&rd->rto_push_work, cpu);
1954} 1962}
1955#endif /* HAVE_RT_PUSH_IPI */ 1963#endif /* HAVE_RT_PUSH_IPI */
1956 1964
@@ -2138,7 +2146,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
2138 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) 2146 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2139 queue_push_tasks(rq); 2147 queue_push_tasks(rq);
2140#endif /* CONFIG_SMP */ 2148#endif /* CONFIG_SMP */
2141 if (p->prio < rq->curr->prio) 2149 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2142 resched_curr(rq); 2150 resched_curr(rq);
2143 } 2151 }
2144} 2152}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 448a8266ceea..0c9ebd82a684 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -553,6 +553,8 @@ struct root_domain {
553}; 553};
554 554
555extern struct root_domain def_root_domain; 555extern struct root_domain def_root_domain;
556extern void sched_get_rd(struct root_domain *rd);
557extern void sched_put_rd(struct root_domain *rd);
556 558
557#ifdef HAVE_RT_PUSH_IPI 559#ifdef HAVE_RT_PUSH_IPI
558extern void rto_push_irq_work_func(struct irq_work *work); 560extern void rto_push_irq_work_func(struct irq_work *work);
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index efd384f3f852..9a9203b15cde 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -16,6 +16,8 @@
16#include <linux/atomic.h> 16#include <linux/atomic.h>
17#include <linux/audit.h> 17#include <linux/audit.h>
18#include <linux/compat.h> 18#include <linux/compat.h>
19#include <linux/nospec.h>
20#include <linux/prctl.h>
19#include <linux/sched.h> 21#include <linux/sched.h>
20#include <linux/seccomp.h> 22#include <linux/seccomp.h>
21#include <linux/slab.h> 23#include <linux/slab.h>
@@ -214,8 +216,11 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
214 return true; 216 return true;
215} 217}
216 218
219void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
220
217static inline void seccomp_assign_mode(struct task_struct *task, 221static inline void seccomp_assign_mode(struct task_struct *task,
218 unsigned long seccomp_mode) 222 unsigned long seccomp_mode,
223 unsigned long flags)
219{ 224{
220 assert_spin_locked(&task->sighand->siglock); 225 assert_spin_locked(&task->sighand->siglock);
221 226
@@ -225,6 +230,9 @@ static inline void seccomp_assign_mode(struct task_struct *task,
225 * filter) is set. 230 * filter) is set.
226 */ 231 */
227 smp_mb__before_atomic(); 232 smp_mb__before_atomic();
233 /* Assume default seccomp processes want spec flaw mitigation. */
234 if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
235 arch_seccomp_spec_mitigate(task);
228 set_tsk_thread_flag(task, TIF_SECCOMP); 236 set_tsk_thread_flag(task, TIF_SECCOMP);
229} 237}
230 238
@@ -292,7 +300,7 @@ static inline pid_t seccomp_can_sync_threads(void)
292 * without dropping the locks. 300 * without dropping the locks.
293 * 301 *
294 */ 302 */
295static inline void seccomp_sync_threads(void) 303static inline void seccomp_sync_threads(unsigned long flags)
296{ 304{
297 struct task_struct *thread, *caller; 305 struct task_struct *thread, *caller;
298 306
@@ -333,7 +341,8 @@ static inline void seccomp_sync_threads(void)
333 * allow one thread to transition the other. 341 * allow one thread to transition the other.
334 */ 342 */
335 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) 343 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
336 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); 344 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
345 flags);
337 } 346 }
338} 347}
339 348
@@ -452,7 +461,7 @@ static long seccomp_attach_filter(unsigned int flags,
452 461
453 /* Now that the new filter is in place, synchronize to all threads. */ 462 /* Now that the new filter is in place, synchronize to all threads. */
454 if (flags & SECCOMP_FILTER_FLAG_TSYNC) 463 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
455 seccomp_sync_threads(); 464 seccomp_sync_threads(flags);
456 465
457 return 0; 466 return 0;
458} 467}
@@ -747,7 +756,7 @@ static long seccomp_set_mode_strict(void)
747#ifdef TIF_NOTSC 756#ifdef TIF_NOTSC
748 disable_TSC(); 757 disable_TSC();
749#endif 758#endif
750 seccomp_assign_mode(current, seccomp_mode); 759 seccomp_assign_mode(current, seccomp_mode, 0);
751 ret = 0; 760 ret = 0;
752 761
753out: 762out:
@@ -805,7 +814,7 @@ static long seccomp_set_mode_filter(unsigned int flags,
805 /* Do not free the successfully attached filter. */ 814 /* Do not free the successfully attached filter. */
806 prepared = NULL; 815 prepared = NULL;
807 816
808 seccomp_assign_mode(current, seccomp_mode); 817 seccomp_assign_mode(current, seccomp_mode, flags);
809out: 818out:
810 spin_unlock_irq(&current->sighand->siglock); 819 spin_unlock_irq(&current->sighand->siglock);
811 if (flags & SECCOMP_FILTER_FLAG_TSYNC) 820 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
diff --git a/kernel/signal.c b/kernel/signal.c
index 4a548c6a4118..8bfbc47f0a23 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1392,6 +1392,10 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1392 return ret; 1392 return ret;
1393 } 1393 }
1394 1394
1395 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1396 if (pid == INT_MIN)
1397 return -ESRCH;
1398
1395 read_lock(&tasklist_lock); 1399 read_lock(&tasklist_lock);
1396 if (pid != -1) { 1400 if (pid != -1) {
1397 ret = __kill_pgrp_info(sig, info, 1401 ret = __kill_pgrp_info(sig, info,
@@ -2495,6 +2499,13 @@ void __set_current_blocked(const sigset_t *newset)
2495{ 2499{
2496 struct task_struct *tsk = current; 2500 struct task_struct *tsk = current;
2497 2501
2502 /*
2503 * In case the signal mask hasn't changed, there is nothing we need
2504 * to do. The current->blocked shouldn't be modified by other task.
2505 */
2506 if (sigequalsets(&tsk->blocked, newset))
2507 return;
2508
2498 spin_lock_irq(&tsk->sighand->siglock); 2509 spin_lock_irq(&tsk->sighand->siglock);
2499 __set_task_blocked(tsk, newset); 2510 __set_task_blocked(tsk, newset);
2500 spin_unlock_irq(&tsk->sighand->siglock); 2511 spin_unlock_irq(&tsk->sighand->siglock);
diff --git a/kernel/sys.c b/kernel/sys.c
index 78947de6f969..f718742e55e6 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -53,6 +53,8 @@
53#include <linux/uidgid.h> 53#include <linux/uidgid.h>
54#include <linux/cred.h> 54#include <linux/cred.h>
55 55
56#include <linux/nospec.h>
57
56#include <linux/kmsg_dump.h> 58#include <linux/kmsg_dump.h>
57/* Move somewhere else to avoid recompiling? */ 59/* Move somewhere else to avoid recompiling? */
58#include <generated/utsrelease.h> 60#include <generated/utsrelease.h>
@@ -1311,6 +1313,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1311 if (resource >= RLIM_NLIMITS) 1313 if (resource >= RLIM_NLIMITS)
1312 return -EINVAL; 1314 return -EINVAL;
1313 1315
1316 resource = array_index_nospec(resource, RLIM_NLIMITS);
1314 task_lock(current->group_leader); 1317 task_lock(current->group_leader);
1315 x = current->signal->rlim[resource]; 1318 x = current->signal->rlim[resource];
1316 task_unlock(current->group_leader); 1319 task_unlock(current->group_leader);
@@ -2072,6 +2075,17 @@ static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2072} 2075}
2073#endif 2076#endif
2074 2077
2078int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2079{
2080 return -EINVAL;
2081}
2082
2083int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2084 unsigned long ctrl)
2085{
2086 return -EINVAL;
2087}
2088
2075SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, 2089SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2076 unsigned long, arg4, unsigned long, arg5) 2090 unsigned long, arg4, unsigned long, arg5)
2077{ 2091{
@@ -2266,6 +2280,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2266 case PR_GET_FP_MODE: 2280 case PR_GET_FP_MODE:
2267 error = GET_FP_MODE(me); 2281 error = GET_FP_MODE(me);
2268 break; 2282 break;
2283 case PR_GET_SPECULATION_CTRL:
2284 if (arg3 || arg4 || arg5)
2285 return -EINVAL;
2286 error = arch_prctl_spec_ctrl_get(me, arg2);
2287 break;
2288 case PR_SET_SPECULATION_CTRL:
2289 if (arg4 || arg5)
2290 return -EINVAL;
2291 error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2292 break;
2269 default: 2293 default:
2270 error = -EINVAL; 2294 error = -EINVAL;
2271 break; 2295 break;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 17f7bcff1e02..8c4e27cbfe7f 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(__ktime_divns);
312 */ 312 */
313ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) 313ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
314{ 314{
315 ktime_t res = ktime_add(lhs, rhs); 315 ktime_t res = ktime_add_unsafe(lhs, rhs);
316 316
317 /* 317 /*
318 * We use KTIME_SEC_MAX here, the maximum timeout which we can 318 * We use KTIME_SEC_MAX here, the maximum timeout which we can
@@ -669,7 +669,9 @@ static void hrtimer_reprogram(struct hrtimer *timer,
669static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) 669static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
670{ 670{
671 base->expires_next.tv64 = KTIME_MAX; 671 base->expires_next.tv64 = KTIME_MAX;
672 base->hang_detected = 0;
672 base->hres_active = 0; 673 base->hres_active = 0;
674 base->next_timer = NULL;
673} 675}
674 676
675/* 677/*
@@ -1137,7 +1139,12 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1137 1139
1138 cpu_base = raw_cpu_ptr(&hrtimer_bases); 1140 cpu_base = raw_cpu_ptr(&hrtimer_bases);
1139 1141
1140 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) 1142 /*
1143 * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
1144 * clock modifications, so they needs to become CLOCK_MONOTONIC to
1145 * ensure POSIX compliance.
1146 */
1147 if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
1141 clock_id = CLOCK_MONOTONIC; 1148 clock_id = CLOCK_MONOTONIC;
1142 1149
1143 base = hrtimer_clockid_to_base(clock_id); 1150 base = hrtimer_clockid_to_base(clock_id);
@@ -1615,6 +1622,7 @@ static void init_hrtimers_cpu(int cpu)
1615 timerqueue_init_head(&cpu_base->clock_base[i].active); 1622 timerqueue_init_head(&cpu_base->clock_base[i].active);
1616 } 1623 }
1617 1624
1625 cpu_base->active_bases = 0;
1618 cpu_base->cpu = cpu; 1626 cpu_base->cpu = cpu;
1619 hrtimer_init_hres(cpu_base); 1627 hrtimer_init_hres(cpu_base);
1620} 1628}
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 9cff0ab82b63..e24008c098c6 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -300,14 +300,17 @@ out:
300static int pc_clock_gettime(clockid_t id, struct timespec *ts) 300static int pc_clock_gettime(clockid_t id, struct timespec *ts)
301{ 301{
302 struct posix_clock_desc cd; 302 struct posix_clock_desc cd;
303 struct timespec64 ts64;
303 int err; 304 int err;
304 305
305 err = get_clock_desc(id, &cd); 306 err = get_clock_desc(id, &cd);
306 if (err) 307 if (err)
307 return err; 308 return err;
308 309
309 if (cd.clk->ops.clock_gettime) 310 if (cd.clk->ops.clock_gettime) {
310 err = cd.clk->ops.clock_gettime(cd.clk, ts); 311 err = cd.clk->ops.clock_gettime(cd.clk, &ts64);
312 *ts = timespec64_to_timespec(ts64);
313 }
311 else 314 else
312 err = -EOPNOTSUPP; 315 err = -EOPNOTSUPP;
313 316
@@ -319,14 +322,17 @@ static int pc_clock_gettime(clockid_t id, struct timespec *ts)
319static int pc_clock_getres(clockid_t id, struct timespec *ts) 322static int pc_clock_getres(clockid_t id, struct timespec *ts)
320{ 323{
321 struct posix_clock_desc cd; 324 struct posix_clock_desc cd;
325 struct timespec64 ts64;
322 int err; 326 int err;
323 327
324 err = get_clock_desc(id, &cd); 328 err = get_clock_desc(id, &cd);
325 if (err) 329 if (err)
326 return err; 330 return err;
327 331
328 if (cd.clk->ops.clock_getres) 332 if (cd.clk->ops.clock_getres) {
329 err = cd.clk->ops.clock_getres(cd.clk, ts); 333 err = cd.clk->ops.clock_getres(cd.clk, &ts64);
334 *ts = timespec64_to_timespec(ts64);
335 }
330 else 336 else
331 err = -EOPNOTSUPP; 337 err = -EOPNOTSUPP;
332 338
@@ -337,6 +343,7 @@ static int pc_clock_getres(clockid_t id, struct timespec *ts)
337 343
338static int pc_clock_settime(clockid_t id, const struct timespec *ts) 344static int pc_clock_settime(clockid_t id, const struct timespec *ts)
339{ 345{
346 struct timespec64 ts64 = timespec_to_timespec64(*ts);
340 struct posix_clock_desc cd; 347 struct posix_clock_desc cd;
341 int err; 348 int err;
342 349
@@ -350,7 +357,7 @@ static int pc_clock_settime(clockid_t id, const struct timespec *ts)
350 } 357 }
351 358
352 if (cd.clk->ops.clock_settime) 359 if (cd.clk->ops.clock_settime)
353 err = cd.clk->ops.clock_settime(cd.clk, ts); 360 err = cd.clk->ops.clock_settime(cd.clk, &ts64);
354 else 361 else
355 err = -EOPNOTSUPP; 362 err = -EOPNOTSUPP;
356out: 363out:
@@ -403,29 +410,36 @@ static void pc_timer_gettime(struct k_itimer *kit, struct itimerspec *ts)
403{ 410{
404 clockid_t id = kit->it_clock; 411 clockid_t id = kit->it_clock;
405 struct posix_clock_desc cd; 412 struct posix_clock_desc cd;
413 struct itimerspec64 ts64;
406 414
407 if (get_clock_desc(id, &cd)) 415 if (get_clock_desc(id, &cd))
408 return; 416 return;
409 417
410 if (cd.clk->ops.timer_gettime) 418 if (cd.clk->ops.timer_gettime) {
411 cd.clk->ops.timer_gettime(cd.clk, kit, ts); 419 cd.clk->ops.timer_gettime(cd.clk, kit, &ts64);
412 420 *ts = itimerspec64_to_itimerspec(&ts64);
421 }
413 put_clock_desc(&cd); 422 put_clock_desc(&cd);
414} 423}
415 424
416static int pc_timer_settime(struct k_itimer *kit, int flags, 425static int pc_timer_settime(struct k_itimer *kit, int flags,
417 struct itimerspec *ts, struct itimerspec *old) 426 struct itimerspec *ts, struct itimerspec *old)
418{ 427{
428 struct itimerspec64 ts64 = itimerspec_to_itimerspec64(ts);
419 clockid_t id = kit->it_clock; 429 clockid_t id = kit->it_clock;
420 struct posix_clock_desc cd; 430 struct posix_clock_desc cd;
431 struct itimerspec64 old64;
421 int err; 432 int err;
422 433
423 err = get_clock_desc(id, &cd); 434 err = get_clock_desc(id, &cd);
424 if (err) 435 if (err)
425 return err; 436 return err;
426 437
427 if (cd.clk->ops.timer_settime) 438 if (cd.clk->ops.timer_settime) {
428 err = cd.clk->ops.timer_settime(cd.clk, kit, flags, ts, old); 439 err = cd.clk->ops.timer_settime(cd.clk, kit, flags, &ts64, &old64);
440 if (old)
441 *old = itimerspec64_to_itimerspec(&old64);
442 }
429 else 443 else
430 err = -EOPNOTSUPP; 444 err = -EOPNOTSUPP;
431 445
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index f2826c35e918..fc7c37ad90a0 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -507,17 +507,22 @@ static struct pid *good_sigevent(sigevent_t * event)
507{ 507{
508 struct task_struct *rtn = current->group_leader; 508 struct task_struct *rtn = current->group_leader;
509 509
510 if ((event->sigev_notify & SIGEV_THREAD_ID ) && 510 switch (event->sigev_notify) {
511 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || 511 case SIGEV_SIGNAL | SIGEV_THREAD_ID:
512 !same_thread_group(rtn, current) || 512 rtn = find_task_by_vpid(event->sigev_notify_thread_id);
513 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL)) 513 if (!rtn || !same_thread_group(rtn, current))
514 return NULL;
515 /* FALLTHRU */
516 case SIGEV_SIGNAL:
517 case SIGEV_THREAD:
518 if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
519 return NULL;
520 /* FALLTHRU */
521 case SIGEV_NONE:
522 return task_pid(rtn);
523 default:
514 return NULL; 524 return NULL;
515 525 }
516 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
517 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
518 return NULL;
519
520 return task_pid(rtn);
521} 526}
522 527
523void posix_timers_register_clock(const clockid_t clock_id, 528void posix_timers_register_clock(const clockid_t clock_id,
@@ -745,8 +750,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
745 /* interval timer ? */ 750 /* interval timer ? */
746 if (iv.tv64) 751 if (iv.tv64)
747 cur_setting->it_interval = ktime_to_timespec(iv); 752 cur_setting->it_interval = ktime_to_timespec(iv);
748 else if (!hrtimer_active(timer) && 753 else if (!hrtimer_active(timer) && timr->it_sigev_notify != SIGEV_NONE)
749 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
750 return; 754 return;
751 755
752 now = timer->base->get_time(); 756 now = timer->base->get_time();
@@ -757,7 +761,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
757 * expiry is > now. 761 * expiry is > now.
758 */ 762 */
759 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || 763 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
760 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) 764 timr->it_sigev_notify == SIGEV_NONE))
761 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); 765 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
762 766
763 remaining = __hrtimer_expires_remaining_adjusted(timer, now); 767 remaining = __hrtimer_expires_remaining_adjusted(timer, now);
@@ -767,7 +771,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
767 * A single shot SIGEV_NONE timer must return 0, when 771 * A single shot SIGEV_NONE timer must return 0, when
768 * it is expired ! 772 * it is expired !
769 */ 773 */
770 if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) 774 if (timr->it_sigev_notify != SIGEV_NONE)
771 cur_setting->it_value.tv_nsec = 1; 775 cur_setting->it_value.tv_nsec = 1;
772 } else 776 } else
773 cur_setting->it_value = ktime_to_timespec(remaining); 777 cur_setting->it_value = ktime_to_timespec(remaining);
@@ -865,7 +869,7 @@ common_timer_set(struct k_itimer *timr, int flags,
865 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); 869 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
866 870
867 /* SIGEV_NONE timers are not queued ! See common_timer_get */ 871 /* SIGEV_NONE timers are not queued ! See common_timer_get */
868 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { 872 if (timr->it_sigev_notify == SIGEV_NONE) {
869 /* Setup correct expiry time for relative timers */ 873 /* Setup correct expiry time for relative timers */
870 if (mode == HRTIMER_MODE_REL) { 874 if (mode == HRTIMER_MODE_REL) {
871 hrtimer_add_expires(timer, timer->base->get_time()); 875 hrtimer_add_expires(timer, timer->base->get_time());
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index a26036d37a38..382b159d8592 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -205,6 +205,11 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
205 205
206 update_clock_read_data(&rd); 206 update_clock_read_data(&rd);
207 207
208 if (sched_clock_timer.function != NULL) {
209 /* update timeout for clock wrap */
210 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
211 }
212
208 r = rate; 213 r = rate;
209 if (r >= 4000000) { 214 if (r >= 4000000) {
210 r /= 1000000; 215 r /= 1000000;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index d2a20e83ebae..22d7454b387b 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -610,6 +610,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
610 now = ktime_get(); 610 now = ktime_get();
611 /* Find all expired events */ 611 /* Find all expired events */
612 for_each_cpu(cpu, tick_broadcast_oneshot_mask) { 612 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
613 /*
614 * Required for !SMP because for_each_cpu() reports
615 * unconditionally CPU0 as set on UP kernels.
616 */
617 if (!IS_ENABLED(CONFIG_SMP) &&
618 cpumask_empty(tick_broadcast_oneshot_mask))
619 break;
620
613 td = &per_cpu(tick_cpu_device, cpu); 621 td = &per_cpu(tick_cpu_device, cpu);
614 if (td->evtdev->next_event.tv64 <= now.tv64) { 622 if (td->evtdev->next_event.tv64 <= now.tv64) {
615 cpumask_set_cpu(cpu, tmpmask); 623 cpumask_set_cpu(cpu, tmpmask);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e5d228f7224c..5ad2e852e9f6 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -570,7 +570,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
570 570
571static inline bool local_timer_softirq_pending(void) 571static inline bool local_timer_softirq_pending(void)
572{ 572{
573 return local_softirq_pending() & TIMER_SOFTIRQ; 573 return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
574} 574}
575 575
576static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, 576static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 86751c68e08d..de70ac1f84d0 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -28,6 +28,7 @@
28 */ 28 */
29 29
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/kernel.h>
31#include <linux/timex.h> 32#include <linux/timex.h>
32#include <linux/capability.h> 33#include <linux/capability.h>
33#include <linux/timekeeper_internal.h> 34#include <linux/timekeeper_internal.h>
@@ -258,9 +259,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
258 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); 259 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
259#else 260#else
260# if BITS_PER_LONG == 32 261# if BITS_PER_LONG == 32
261 return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32; 262 return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
263 HZ_TO_MSEC_SHR32;
262# else 264# else
263 return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN; 265 return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
264# endif 266# endif
265#endif 267#endif
266} 268}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 6e4866834d26..fed86b2dfc89 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -277,8 +277,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
277 /* Go back from cycles -> shifted ns */ 277 /* Go back from cycles -> shifted ns */
278 tk->xtime_interval = (u64) interval * clock->mult; 278 tk->xtime_interval = (u64) interval * clock->mult;
279 tk->xtime_remainder = ntpinterval - tk->xtime_interval; 279 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
280 tk->raw_interval = 280 tk->raw_interval = interval * clock->mult;
281 ((u64) interval * clock->mult) >> clock->shift;
282 281
283 /* if changing clocks, convert xtime_nsec shift units */ 282 /* if changing clocks, convert xtime_nsec shift units */
284 if (old_clock) { 283 if (old_clock) {
@@ -1767,7 +1766,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1767 unsigned int *clock_set) 1766 unsigned int *clock_set)
1768{ 1767{
1769 cycle_t interval = tk->cycle_interval << shift; 1768 cycle_t interval = tk->cycle_interval << shift;
1770 u64 raw_nsecs; 1769 u64 snsec_per_sec;
1771 1770
1772 /* If the offset is smaller than a shifted interval, do nothing */ 1771 /* If the offset is smaller than a shifted interval, do nothing */
1773 if (offset < interval) 1772 if (offset < interval)
@@ -1782,14 +1781,15 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1782 *clock_set |= accumulate_nsecs_to_secs(tk); 1781 *clock_set |= accumulate_nsecs_to_secs(tk);
1783 1782
1784 /* Accumulate raw time */ 1783 /* Accumulate raw time */
1785 raw_nsecs = (u64)tk->raw_interval << shift; 1784 tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
1786 raw_nsecs += tk->raw_time.tv_nsec; 1785 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
1787 if (raw_nsecs >= NSEC_PER_SEC) { 1786 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
1788 u64 raw_secs = raw_nsecs; 1787 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
1789 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); 1788 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
1790 tk->raw_time.tv_sec += raw_secs; 1789 tk->raw_time.tv_sec++;
1791 } 1790 }
1792 tk->raw_time.tv_nsec = raw_nsecs; 1791 tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
1792 tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
1793 1793
1794 /* Accumulate error between NTP and clock interval */ 1794 /* Accumulate error between NTP and clock interval */
1795 tk->ntp_error += tk->ntp_tick << shift; 1795 tk->ntp_error += tk->ntp_tick << shift;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 125407144c01..3d7588a2e97c 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -764,8 +764,15 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
764 __acquires(timer->base->lock) 764 __acquires(timer->base->lock)
765{ 765{
766 for (;;) { 766 for (;;) {
767 u32 tf = timer->flags;
768 struct tvec_base *base; 767 struct tvec_base *base;
768 u32 tf;
769
770 /*
771 * We need to use READ_ONCE() here, otherwise the compiler
772 * might re-read @tf between the check for TIMER_MIGRATING
773 * and spin_lock().
774 */
775 tf = READ_ONCE(timer->flags);
769 776
770 if (!(tf & TIMER_MIGRATING)) { 777 if (!(tf & TIMER_MIGRATING)) {
771 base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK); 778 base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index ba7d8b288bb3..ef4f16e81283 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -16,6 +16,7 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
19#include <linux/nmi.h>
19 20
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21 22
@@ -96,6 +97,9 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
96 97
97next_one: 98next_one:
98 i = 0; 99 i = 0;
100
101 touch_nmi_watchdog();
102
99 raw_spin_lock_irqsave(&base->cpu_base->lock, flags); 103 raw_spin_lock_irqsave(&base->cpu_base->lock, flags);
100 104
101 curr = timerqueue_getnext(&base->active); 105 curr = timerqueue_getnext(&base->active);
@@ -207,6 +211,8 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
207{ 211{
208 struct clock_event_device *dev = td->evtdev; 212 struct clock_event_device *dev = td->evtdev;
209 213
214 touch_nmi_watchdog();
215
210 SEQ_printf(m, "Tick Device: mode: %d\n", td->mode); 216 SEQ_printf(m, "Tick Device: mode: %d\n", td->mode);
211 if (cpu < 0) 217 if (cpu < 0)
212 SEQ_printf(m, "Broadcast device\n"); 218 SEQ_printf(m, "Broadcast device\n");
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index a990824c8604..7ab5eafea8b2 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -57,7 +57,8 @@ static struct tracer_flags blk_tracer_flags = {
57}; 57};
58 58
59/* Global reference count of probes */ 59/* Global reference count of probes */
60static atomic_t blk_probes_ref = ATOMIC_INIT(0); 60static DEFINE_MUTEX(blk_probe_mutex);
61static int blk_probes_ref;
61 62
62static void blk_register_tracepoints(void); 63static void blk_register_tracepoints(void);
63static void blk_unregister_tracepoints(void); 64static void blk_unregister_tracepoints(void);
@@ -300,11 +301,26 @@ static void blk_trace_free(struct blk_trace *bt)
300 kfree(bt); 301 kfree(bt);
301} 302}
302 303
304static void get_probe_ref(void)
305{
306 mutex_lock(&blk_probe_mutex);
307 if (++blk_probes_ref == 1)
308 blk_register_tracepoints();
309 mutex_unlock(&blk_probe_mutex);
310}
311
312static void put_probe_ref(void)
313{
314 mutex_lock(&blk_probe_mutex);
315 if (!--blk_probes_ref)
316 blk_unregister_tracepoints();
317 mutex_unlock(&blk_probe_mutex);
318}
319
303static void blk_trace_cleanup(struct blk_trace *bt) 320static void blk_trace_cleanup(struct blk_trace *bt)
304{ 321{
305 blk_trace_free(bt); 322 blk_trace_free(bt);
306 if (atomic_dec_and_test(&blk_probes_ref)) 323 put_probe_ref();
307 blk_unregister_tracepoints();
308} 324}
309 325
310int blk_trace_remove(struct request_queue *q) 326int blk_trace_remove(struct request_queue *q)
@@ -522,8 +538,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
522 if (cmpxchg(&q->blk_trace, NULL, bt)) 538 if (cmpxchg(&q->blk_trace, NULL, bt))
523 goto err; 539 goto err;
524 540
525 if (atomic_inc_return(&blk_probes_ref) == 1) 541 get_probe_ref();
526 blk_register_tracepoints();
527 542
528 return 0; 543 return 0;
529err: 544err:
@@ -1466,9 +1481,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
1466 if (bt == NULL) 1481 if (bt == NULL)
1467 return -EINVAL; 1482 return -EINVAL;
1468 1483
1469 if (atomic_dec_and_test(&blk_probes_ref)) 1484 put_probe_ref();
1470 blk_unregister_tracepoints();
1471
1472 blk_trace_free(bt); 1485 blk_trace_free(bt);
1473 return 0; 1486 return 0;
1474} 1487}
@@ -1499,8 +1512,7 @@ static int blk_trace_setup_queue(struct request_queue *q,
1499 if (cmpxchg(&q->blk_trace, NULL, bt)) 1512 if (cmpxchg(&q->blk_trace, NULL, bt))
1500 goto free_bt; 1513 goto free_bt;
1501 1514
1502 if (atomic_inc_return(&blk_probes_ref) == 1) 1515 get_probe_ref();
1503 blk_register_tracepoints();
1504 return 0; 1516 return 0;
1505 1517
1506free_bt: 1518free_bt:
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index fc0051fd672d..ac758a53fcea 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3845,7 +3845,6 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3845 func_g.type = filter_parse_regex(glob, strlen(glob), 3845 func_g.type = filter_parse_regex(glob, strlen(glob),
3846 &func_g.search, &not); 3846 &func_g.search, &not);
3847 func_g.len = strlen(func_g.search); 3847 func_g.len = strlen(func_g.search);
3848 func_g.search = glob;
3849 3848
3850 /* we do not support '!' for function probes */ 3849 /* we do not support '!' for function probes */
3851 if (WARN_ON(not)) 3850 if (WARN_ON(not))
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d9cd6191760b..fdaa88f38aec 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3142,6 +3142,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
3142} 3142}
3143 3143
3144/** 3144/**
3145 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
3146 * @buffer: The ring buffer to see if write is set enabled
3147 *
3148 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
3149 * Note that this does NOT mean it is in a writable state.
3150 *
3151 * It may return true when the ring buffer has been disabled by
3152 * ring_buffer_record_disable(), as that is a temporary disabling of
3153 * the ring buffer.
3154 */
3155int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
3156{
3157 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
3158}
3159
3160/**
3145 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 3161 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3146 * @buffer: The ring buffer to stop writes to. 3162 * @buffer: The ring buffer to stop writes to.
3147 * @cpu: The CPU buffer to stop 3163 * @cpu: The CPU buffer to stop
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8aef4e63ac57..1b980a8ef791 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1088,6 +1088,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1088 1088
1089 arch_spin_lock(&tr->max_lock); 1089 arch_spin_lock(&tr->max_lock);
1090 1090
1091 /* Inherit the recordable setting from trace_buffer */
1092 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1093 ring_buffer_record_on(tr->max_buffer.buffer);
1094 else
1095 ring_buffer_record_off(tr->max_buffer.buffer);
1096
1091 buf = tr->trace_buffer.buffer; 1097 buf = tr->trace_buffer.buffer;
1092 tr->trace_buffer.buffer = tr->max_buffer.buffer; 1098 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1093 tr->max_buffer.buffer = buf; 1099 tr->max_buffer.buffer = buf;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index f0e5408499b6..1ab2db6c127b 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -322,6 +322,9 @@ static int regex_match_full(char *str, struct regex *r, int len)
322 322
323static int regex_match_front(char *str, struct regex *r, int len) 323static int regex_match_front(char *str, struct regex *r, int len)
324{ 324{
325 if (len < r->len)
326 return 0;
327
325 if (strncmp(str, r->pattern, r->len) == 0) 328 if (strncmp(str, r->pattern, r->len) == 0)
326 return 1; 329 return 1;
327 return 0; 330 return 0;
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 42a4009fd75a..b8a894adab2c 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -469,9 +469,10 @@ clear_event_triggers(struct trace_array *tr)
469 struct trace_event_file *file; 469 struct trace_event_file *file;
470 470
471 list_for_each_entry(file, &tr->events, list) { 471 list_for_each_entry(file, &tr->events, list) {
472 struct event_trigger_data *data; 472 struct event_trigger_data *data, *n;
473 list_for_each_entry_rcu(data, &file->triggers, list) { 473 list_for_each_entry_safe(data, n, &file->triggers, list) {
474 trace_event_trigger_enable_disable(file, 0); 474 trace_event_trigger_enable_disable(file, 0);
475 list_del_rcu(&data->list);
475 if (data->ops->free) 476 if (data->ops->free)
476 data->ops->free(data->ops, data); 477 data->ops->free(data->ops, data);
477 } 478 }
@@ -662,6 +663,8 @@ event_trigger_callback(struct event_command *cmd_ops,
662 goto out_free; 663 goto out_free;
663 664
664 out_reg: 665 out_reg:
666 /* Up the trigger_data count to make sure reg doesn't free it on failure */
667 event_trigger_init(trigger_ops, trigger_data);
665 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 668 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
666 /* 669 /*
667 * The above returns on success the # of functions enabled, 670 * The above returns on success the # of functions enabled,
@@ -669,11 +672,13 @@ event_trigger_callback(struct event_command *cmd_ops,
669 * Consider no functions a failure too. 672 * Consider no functions a failure too.
670 */ 673 */
671 if (!ret) { 674 if (!ret) {
675 cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
672 ret = -ENOENT; 676 ret = -ENOENT;
673 goto out_free; 677 } else if (ret > 0)
674 } else if (ret < 0) 678 ret = 0;
675 goto out_free; 679
676 ret = 0; 680 /* Down the counter of trigger_data or free it if not used anymore */
681 event_trigger_free(trigger_ops, trigger_data);
677 out: 682 out:
678 return ret; 683 return ret;
679 684
@@ -1226,6 +1231,9 @@ event_enable_trigger_func(struct event_command *cmd_ops,
1226 goto out; 1231 goto out;
1227 } 1232 }
1228 1233
1234 /* Up the trigger_data count to make sure nothing frees it on failure */
1235 event_trigger_init(trigger_ops, trigger_data);
1236
1229 if (trigger) { 1237 if (trigger) {
1230 number = strsep(&trigger, ":"); 1238 number = strsep(&trigger, ":");
1231 1239
@@ -1276,6 +1284,7 @@ event_enable_trigger_func(struct event_command *cmd_ops,
1276 goto out_disable; 1284 goto out_disable;
1277 /* Just return zero, not the number of enabled functions */ 1285 /* Just return zero, not the number of enabled functions */
1278 ret = 0; 1286 ret = 0;
1287 event_trigger_free(trigger_ops, trigger_data);
1279 out: 1288 out:
1280 return ret; 1289 return ret;
1281 1290
@@ -1286,7 +1295,7 @@ event_enable_trigger_func(struct event_command *cmd_ops,
1286 out_free: 1295 out_free:
1287 if (cmd_ops->set_filter) 1296 if (cmd_ops->set_filter)
1288 cmd_ops->set_filter(NULL, trigger_data, NULL); 1297 cmd_ops->set_filter(NULL, trigger_data, NULL);
1289 kfree(trigger_data); 1298 event_trigger_free(trigger_ops, trigger_data);
1290 kfree(enable_data); 1299 kfree(enable_data);
1291 goto out; 1300 goto out;
1292} 1301}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 7fd6f5a26143..e212ec4cfb4e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -768,6 +768,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
768 struct ftrace_graph_ret *graph_ret; 768 struct ftrace_graph_ret *graph_ret;
769 struct ftrace_graph_ent *call; 769 struct ftrace_graph_ent *call;
770 unsigned long long duration; 770 unsigned long long duration;
771 int cpu = iter->cpu;
771 int i; 772 int i;
772 773
773 graph_ret = &ret_entry->ret; 774 graph_ret = &ret_entry->ret;
@@ -776,7 +777,6 @@ print_graph_entry_leaf(struct trace_iterator *iter,
776 777
777 if (data) { 778 if (data) {
778 struct fgraph_cpu_data *cpu_data; 779 struct fgraph_cpu_data *cpu_data;
779 int cpu = iter->cpu;
780 780
781 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 781 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
782 782
@@ -806,6 +806,9 @@ print_graph_entry_leaf(struct trace_iterator *iter,
806 806
807 trace_seq_printf(s, "%ps();\n", (void *)call->func); 807 trace_seq_printf(s, "%ps();\n", (void *)call->func);
808 808
809 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
810 cpu, iter->ent->pid, flags);
811
809 return trace_handle_return(s); 812 return trace_handle_return(s);
810} 813}
811 814
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index e9092a0247bf..f0ee722be520 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -349,11 +349,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
349static int 349static int
350enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 350enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
351{ 351{
352 struct event_file_link *link = NULL;
352 int ret = 0; 353 int ret = 0;
353 354
354 if (file) { 355 if (file) {
355 struct event_file_link *link;
356
357 link = kmalloc(sizeof(*link), GFP_KERNEL); 356 link = kmalloc(sizeof(*link), GFP_KERNEL);
358 if (!link) { 357 if (!link) {
359 ret = -ENOMEM; 358 ret = -ENOMEM;
@@ -373,6 +372,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
373 else 372 else
374 ret = enable_kprobe(&tk->rp.kp); 373 ret = enable_kprobe(&tk->rp.kp);
375 } 374 }
375
376 if (ret) {
377 if (file) {
378 /* Notice the if is true on not WARN() */
379 if (!WARN_ON_ONCE(!link))
380 list_del_rcu(&link->list);
381 kfree(link);
382 tk->tp.flags &= ~TP_FLAG_TRACE;
383 } else {
384 tk->tp.flags &= ~TP_FLAG_PROFILE;
385 }
386 }
376 out: 387 out:
377 return ret; 388 return ret;
378} 389}
@@ -599,7 +610,7 @@ static int create_trace_kprobe(int argc, char **argv)
599 bool is_return = false, is_delete = false; 610 bool is_return = false, is_delete = false;
600 char *symbol = NULL, *event = NULL, *group = NULL; 611 char *symbol = NULL, *event = NULL, *group = NULL;
601 char *arg; 612 char *arg;
602 unsigned long offset = 0; 613 long offset = 0;
603 void *addr = NULL; 614 void *addr = NULL;
604 char buf[MAX_EVENT_NAME_LEN]; 615 char buf[MAX_EVENT_NAME_LEN];
605 616
@@ -667,7 +678,7 @@ static int create_trace_kprobe(int argc, char **argv)
667 symbol = argv[1]; 678 symbol = argv[1];
668 /* TODO: support .init module functions */ 679 /* TODO: support .init module functions */
669 ret = traceprobe_split_symbol_offset(symbol, &offset); 680 ret = traceprobe_split_symbol_offset(symbol, &offset);
670 if (ret) { 681 if (ret || offset < 0 || offset > UINT_MAX) {
671 pr_info("Failed to parse either an address or a symbol.\n"); 682 pr_info("Failed to parse either an address or a symbol.\n");
672 return ret; 683 return ret;
673 } 684 }
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 1769a81da8a7..741c00b90fdc 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -293,7 +293,7 @@ static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
293} 293}
294 294
295/* Split symbol and offset. */ 295/* Split symbol and offset. */
296int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset) 296int traceprobe_split_symbol_offset(char *symbol, long *offset)
297{ 297{
298 char *tmp; 298 char *tmp;
299 int ret; 299 int ret;
@@ -301,13 +301,11 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
301 if (!offset) 301 if (!offset)
302 return -EINVAL; 302 return -EINVAL;
303 303
304 tmp = strchr(symbol, '+'); 304 tmp = strpbrk(symbol, "+-");
305 if (tmp) { 305 if (tmp) {
306 /* skip sign because kstrtoul doesn't accept '+' */ 306 ret = kstrtol(tmp, 0, offset);
307 ret = kstrtoul(tmp + 1, 0, offset);
308 if (ret) 307 if (ret)
309 return ret; 308 return ret;
310
311 *tmp = '\0'; 309 *tmp = '\0';
312 } else 310 } else
313 *offset = 0; 311 *offset = 0;
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index f6398db09114..0afe921df8c8 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -335,7 +335,7 @@ extern int traceprobe_conflict_field_name(const char *name,
335extern void traceprobe_update_arg(struct probe_arg *arg); 335extern void traceprobe_update_arg(struct probe_arg *arg);
336extern void traceprobe_free_probe_arg(struct probe_arg *arg); 336extern void traceprobe_free_probe_arg(struct probe_arg *arg);
337 337
338extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset); 338extern int traceprobe_split_symbol_offset(char *symbol, long *offset);
339 339
340extern ssize_t traceprobe_probes_write(struct file *file, 340extern ssize_t traceprobe_probes_write(struct file *file,
341 const char __user *buffer, size_t count, loff_t *ppos, 341 const char __user *buffer, size_t count, loff_t *ppos,
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index d2f6d0be3503..68bb89ad9d28 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -149,6 +149,8 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
149 return; 149 return;
150 150
151 ret = strncpy_from_user(dst, src, maxlen); 151 ret = strncpy_from_user(dst, src, maxlen);
152 if (ret == maxlen)
153 dst[--ret] = '\0';
152 154
153 if (ret < 0) { /* Failed to fetch string */ 155 if (ret < 0) { /* Failed to fetch string */
154 ((u8 *)get_rloc_data(dest))[0] = '\0'; 156 ((u8 *)get_rloc_data(dest))[0] = '\0';
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index ecd536de603a..eda85bbf1c2e 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -202,7 +202,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
202 lockdep_is_held(&tracepoints_mutex)); 202 lockdep_is_held(&tracepoints_mutex));
203 old = func_add(&tp_funcs, func, prio); 203 old = func_add(&tp_funcs, func, prio);
204 if (IS_ERR(old)) { 204 if (IS_ERR(old)) {
205 WARN_ON_ONCE(1); 205 WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
206 return PTR_ERR(old); 206 return PTR_ERR(old);
207 } 207 }
208 208
@@ -235,7 +235,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
235 lockdep_is_held(&tracepoints_mutex)); 235 lockdep_is_held(&tracepoints_mutex));
236 old = func_remove(&tp_funcs, func); 236 old = func_remove(&tp_funcs, func);
237 if (IS_ERR(old)) { 237 if (IS_ERR(old)) {
238 WARN_ON_ONCE(1); 238 WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
239 return PTR_ERR(old); 239 return PTR_ERR(old);
240 } 240 }
241 241
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 85555eb4d3cb..d8a2084b88db 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4048,6 +4048,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4048EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4048EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4049 4049
4050/** 4050/**
4051 * current_work - retrieve %current task's work struct
4052 *
4053 * Determine if %current task is a workqueue worker and what it's working on.
4054 * Useful to find out the context that the %current task is running in.
4055 *
4056 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4057 */
4058struct work_struct *current_work(void)
4059{
4060 struct worker *worker = current_wq_worker();
4061
4062 return worker ? worker->current_work : NULL;
4063}
4064EXPORT_SYMBOL(current_work);
4065
4066/**
4051 * current_is_workqueue_rescuer - is %current workqueue rescuer? 4067 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4052 * 4068 *
4053 * Determine whether %current is a workqueue rescuer. Can be used from 4069 * Determine whether %current is a workqueue rescuer. Can be used from
@@ -5183,7 +5199,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
5183 5199
5184 ret = device_register(&wq_dev->dev); 5200 ret = device_register(&wq_dev->dev);
5185 if (ret) { 5201 if (ret) {
5186 kfree(wq_dev); 5202 put_device(&wq_dev->dev);
5187 wq->wq_dev = NULL; 5203 wq->wq_dev = NULL;
5188 return ret; 5204 return ret;
5189 } 5205 }
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b53b375e14bd..f0602beeba26 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -197,7 +197,7 @@ config ENABLE_MUST_CHECK
197config FRAME_WARN 197config FRAME_WARN
198 int "Warn for stack frames larger than (needs gcc 4.4)" 198 int "Warn for stack frames larger than (needs gcc 4.4)"
199 range 0 8192 199 range 0 8192
200 default 0 if KASAN 200 default 2048 if GCC_PLUGIN_LATENT_ENTROPY
201 default 1024 if !64BIT 201 default 1024 if !64BIT
202 default 2048 if 64BIT 202 default 2048 if 64BIT
203 help 203 help
diff --git a/lib/Makefile b/lib/Makefile
index 7f1de26613d2..cb4f6aa95013 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -58,8 +58,6 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
58obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o 58obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
59obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o 59obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
60 60
61GCOV_PROFILE_hweight.o := n
62CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
63obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 61obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
64 62
65obj-$(CONFIG_BTREE) += btree.o 63obj-$(CONFIG_BTREE) += btree.o
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 83c33a5bcffb..de67fea3cf46 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -16,6 +16,10 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/atomic.h> 17#include <linux/atomic.h>
18 18
19#ifdef CONFIG_X86
20#include <asm/cpufeature.h> /* for boot_cpu_has below */
21#endif
22
19#define TEST(bit, op, c_op, val) \ 23#define TEST(bit, op, c_op, val) \
20do { \ 24do { \
21 atomic##bit##_set(&v, v0); \ 25 atomic##bit##_set(&v, v0); \
diff --git a/lib/hweight.c b/lib/hweight.c
index 9a5c1f221558..43273a7d83cf 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -9,6 +9,7 @@
9 * The Hamming Weight of a number is the total number of bits set in it. 9 * The Hamming Weight of a number is the total number of bits set in it.
10 */ 10 */
11 11
12#ifndef __HAVE_ARCH_SW_HWEIGHT
12unsigned int __sw_hweight32(unsigned int w) 13unsigned int __sw_hweight32(unsigned int w)
13{ 14{
14#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER 15#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
@@ -25,6 +26,7 @@ unsigned int __sw_hweight32(unsigned int w)
25#endif 26#endif
26} 27}
27EXPORT_SYMBOL(__sw_hweight32); 28EXPORT_SYMBOL(__sw_hweight32);
29#endif
28 30
29unsigned int __sw_hweight16(unsigned int w) 31unsigned int __sw_hweight16(unsigned int w)
30{ 32{
@@ -43,6 +45,7 @@ unsigned int __sw_hweight8(unsigned int w)
43} 45}
44EXPORT_SYMBOL(__sw_hweight8); 46EXPORT_SYMBOL(__sw_hweight8);
45 47
48#ifndef __HAVE_ARCH_SW_HWEIGHT
46unsigned long __sw_hweight64(__u64 w) 49unsigned long __sw_hweight64(__u64 w)
47{ 50{
48#if BITS_PER_LONG == 32 51#if BITS_PER_LONG == 32
@@ -65,3 +68,4 @@ unsigned long __sw_hweight64(__u64 w)
65#endif 68#endif
66} 69}
67EXPORT_SYMBOL(__sw_hweight64); 70EXPORT_SYMBOL(__sw_hweight64);
71#endif
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 86c8911b0e3a..b9462037868d 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -83,7 +83,8 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
83 83
84 if (ioremap_pmd_enabled() && 84 if (ioremap_pmd_enabled() &&
85 ((next - addr) == PMD_SIZE) && 85 ((next - addr) == PMD_SIZE) &&
86 IS_ALIGNED(phys_addr + addr, PMD_SIZE)) { 86 IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
87 pmd_free_pte_page(pmd, addr)) {
87 if (pmd_set_huge(pmd, phys_addr + addr, prot)) 88 if (pmd_set_huge(pmd, phys_addr + addr, prot))
88 continue; 89 continue;
89 } 90 }
@@ -109,7 +110,8 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
109 110
110 if (ioremap_pud_enabled() && 111 if (ioremap_pud_enabled() &&
111 ((next - addr) == PUD_SIZE) && 112 ((next - addr) == PUD_SIZE) &&
112 IS_ALIGNED(phys_addr + addr, PUD_SIZE)) { 113 IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
114 pud_free_pmd_page(pud, addr)) {
113 if (pud_set_huge(pud, phys_addr + addr, prot)) 115 if (pud_set_huge(pud, phys_addr + addr, prot))
114 continue; 116 continue;
115 } 117 }
diff --git a/lib/kobject.c b/lib/kobject.c
index 7cbccd2b4c72..895edb63fba4 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -234,14 +234,12 @@ static int kobject_add_internal(struct kobject *kobj)
234 234
235 /* be noisy on error issues */ 235 /* be noisy on error issues */
236 if (error == -EEXIST) 236 if (error == -EEXIST)
237 WARN(1, "%s failed for %s with " 237 pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
238 "-EEXIST, don't try to register things with " 238 __func__, kobject_name(kobj));
239 "the same name in the same directory.\n",
240 __func__, kobject_name(kobj));
241 else 239 else
242 WARN(1, "%s failed for %s (error: %d parent: %s)\n", 240 pr_err("%s failed for %s (error: %d parent: %s)\n",
243 __func__, kobject_name(kobj), error, 241 __func__, kobject_name(kobj), error,
244 parent ? kobject_name(parent) : "'none'"); 242 parent ? kobject_name(parent) : "'none'");
245 } else 243 } else
246 kobj->state_in_sysfs = 1; 244 kobj->state_in_sysfs = 1;
247 245
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index b90e255c2a68..d2ecf0a09180 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -671,7 +671,23 @@ do { \
671 ************** MIPS/64 ************** 671 ************** MIPS/64 **************
672 ***************************************/ 672 ***************************************/
673#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 673#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
674#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) 674#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
675/*
676 * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
677 * code below, so we special case MIPS64r6 until the compiler can do better.
678 */
679#define umul_ppmm(w1, w0, u, v) \
680do { \
681 __asm__ ("dmulu %0,%1,%2" \
682 : "=d" ((UDItype)(w0)) \
683 : "d" ((UDItype)(u)), \
684 "d" ((UDItype)(v))); \
685 __asm__ ("dmuhu %0,%1,%2" \
686 : "=d" ((UDItype)(w1)) \
687 : "d" ((UDItype)(u)), \
688 "d" ((UDItype)(v))); \
689} while (0)
690#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
675#define umul_ppmm(w1, w0, u, v) \ 691#define umul_ppmm(w1, w0, u, v) \
676do { \ 692do { \
677 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ 693 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index 318f382a010d..150e04d70303 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -116,7 +116,7 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
116 int count; 116 int count;
117 117
118 if (v >= end) 118 if (v >= end)
119 return -EBADMSG; 119 goto bad;
120 120
121 n = *v++; 121 n = *v++;
122 ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40); 122 ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40);
@@ -134,7 +134,7 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
134 num = n & 0x7f; 134 num = n & 0x7f;
135 do { 135 do {
136 if (v >= end) 136 if (v >= end)
137 return -EBADMSG; 137 goto bad;
138 n = *v++; 138 n = *v++;
139 num <<= 7; 139 num <<= 7;
140 num |= n & 0x7f; 140 num |= n & 0x7f;
@@ -148,6 +148,10 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
148 } 148 }
149 149
150 return ret; 150 return ret;
151
152bad:
153 snprintf(buffer, bufsize, "(bad)");
154 return -EBADMSG;
151} 155}
152EXPORT_SYMBOL_GPL(sprint_oid); 156EXPORT_SYMBOL_GPL(sprint_oid);
153 157
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 51282f579760..37ea94b636a3 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -670,8 +670,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
670 670
671static size_t rounded_hashtable_size(const struct rhashtable_params *params) 671static size_t rounded_hashtable_size(const struct rhashtable_params *params)
672{ 672{
673 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), 673 size_t retsize;
674 (unsigned long)params->min_size); 674
675 if (params->nelem_hint)
676 retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
677 (unsigned long)params->min_size);
678 else
679 retsize = max(HASH_DEFAULT_SIZE,
680 (unsigned long)params->min_size);
681
682 return retsize;
675} 683}
676 684
677static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) 685static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
@@ -728,8 +736,6 @@ int rhashtable_init(struct rhashtable *ht,
728 struct bucket_table *tbl; 736 struct bucket_table *tbl;
729 size_t size; 737 size_t size;
730 738
731 size = HASH_DEFAULT_SIZE;
732
733 if ((!params->key_len && !params->obj_hashfn) || 739 if ((!params->key_len && !params->obj_hashfn) ||
734 (params->obj_hashfn && !params->obj_cmpfn)) 740 (params->obj_hashfn && !params->obj_cmpfn))
735 return -EINVAL; 741 return -EINVAL;
@@ -756,8 +762,7 @@ int rhashtable_init(struct rhashtable *ht,
756 762
757 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); 763 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
758 764
759 if (params->nelem_hint) 765 size = rounded_hashtable_size(&ht->p);
760 size = rounded_hashtable_size(&ht->p);
761 766
762 /* The maximum (not average) chain length grows with the 767 /* The maximum (not average) chain length grows with the
763 * size of the hash table, at a rate of (log N)/(log log N). 768 * size of the hash table, at a rate of (log N)/(log log N).
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 7e26aea3e404..b1495f586f29 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -83,6 +83,7 @@ struct bpf_test {
83 __u32 result; 83 __u32 result;
84 } test[MAX_SUBTESTS]; 84 } test[MAX_SUBTESTS];
85 int (*fill_helper)(struct bpf_test *self); 85 int (*fill_helper)(struct bpf_test *self);
86 int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
86 __u8 frag_data[MAX_DATA]; 87 __u8 frag_data[MAX_DATA];
87}; 88};
88 89
@@ -1780,7 +1781,9 @@ static struct bpf_test tests[] = {
1780 }, 1781 },
1781 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 1782 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1782 { }, 1783 { },
1783 { } 1784 { },
1785 .fill_helper = NULL,
1786 .expected_errcode = -EINVAL,
1784 }, 1787 },
1785 { 1788 {
1786 "check: div_k_0", 1789 "check: div_k_0",
@@ -1790,7 +1793,9 @@ static struct bpf_test tests[] = {
1790 }, 1793 },
1791 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 1794 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1792 { }, 1795 { },
1793 { } 1796 { },
1797 .fill_helper = NULL,
1798 .expected_errcode = -EINVAL,
1794 }, 1799 },
1795 { 1800 {
1796 "check: unknown insn", 1801 "check: unknown insn",
@@ -1801,7 +1806,9 @@ static struct bpf_test tests[] = {
1801 }, 1806 },
1802 CLASSIC | FLAG_EXPECTED_FAIL, 1807 CLASSIC | FLAG_EXPECTED_FAIL,
1803 { }, 1808 { },
1804 { } 1809 { },
1810 .fill_helper = NULL,
1811 .expected_errcode = -EINVAL,
1805 }, 1812 },
1806 { 1813 {
1807 "check: out of range spill/fill", 1814 "check: out of range spill/fill",
@@ -1811,7 +1818,9 @@ static struct bpf_test tests[] = {
1811 }, 1818 },
1812 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 1819 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1813 { }, 1820 { },
1814 { } 1821 { },
1822 .fill_helper = NULL,
1823 .expected_errcode = -EINVAL,
1815 }, 1824 },
1816 { 1825 {
1817 "JUMPS + HOLES", 1826 "JUMPS + HOLES",
@@ -1903,6 +1912,8 @@ static struct bpf_test tests[] = {
1903 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 1912 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1904 { }, 1913 { },
1905 { }, 1914 { },
1915 .fill_helper = NULL,
1916 .expected_errcode = -EINVAL,
1906 }, 1917 },
1907 { 1918 {
1908 "check: LDX + RET X", 1919 "check: LDX + RET X",
@@ -1913,6 +1924,8 @@ static struct bpf_test tests[] = {
1913 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 1924 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1914 { }, 1925 { },
1915 { }, 1926 { },
1927 .fill_helper = NULL,
1928 .expected_errcode = -EINVAL,
1916 }, 1929 },
1917 { /* Mainly checking JIT here. */ 1930 { /* Mainly checking JIT here. */
1918 "M[]: alt STX + LDX", 1931 "M[]: alt STX + LDX",
@@ -2087,6 +2100,8 @@ static struct bpf_test tests[] = {
2087 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 2100 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
2088 { }, 2101 { },
2089 { }, 2102 { },
2103 .fill_helper = NULL,
2104 .expected_errcode = -EINVAL,
2090 }, 2105 },
2091 { /* Passes checker but fails during runtime. */ 2106 { /* Passes checker but fails during runtime. */
2092 "LD [SKF_AD_OFF-1]", 2107 "LD [SKF_AD_OFF-1]",
@@ -4462,6 +4477,7 @@ static struct bpf_test tests[] = {
4462 { }, 4477 { },
4463 { }, 4478 { },
4464 .fill_helper = bpf_fill_maxinsns4, 4479 .fill_helper = bpf_fill_maxinsns4,
4480 .expected_errcode = -EINVAL,
4465 }, 4481 },
4466 { /* Mainly checking JIT here. */ 4482 { /* Mainly checking JIT here. */
4467 "BPF_MAXINSNS: Very long jump", 4483 "BPF_MAXINSNS: Very long jump",
@@ -4517,10 +4533,15 @@ static struct bpf_test tests[] = {
4517 { 4533 {
4518 "BPF_MAXINSNS: Jump, gap, jump, ...", 4534 "BPF_MAXINSNS: Jump, gap, jump, ...",
4519 { }, 4535 { },
4536#ifdef CONFIG_BPF_JIT_ALWAYS_ON
4537 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
4538#else
4520 CLASSIC | FLAG_NO_DATA, 4539 CLASSIC | FLAG_NO_DATA,
4540#endif
4521 { }, 4541 { },
4522 { { 0, 0xababcbac } }, 4542 { { 0, 0xababcbac } },
4523 .fill_helper = bpf_fill_maxinsns11, 4543 .fill_helper = bpf_fill_maxinsns11,
4544 .expected_errcode = -ENOTSUPP,
4524 }, 4545 },
4525 { 4546 {
4526 "BPF_MAXINSNS: ld_abs+get_processor_id", 4547 "BPF_MAXINSNS: ld_abs+get_processor_id",
@@ -5290,7 +5311,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
5290 5311
5291 *err = bpf_prog_create(&fp, &fprog); 5312 *err = bpf_prog_create(&fp, &fprog);
5292 if (tests[which].aux & FLAG_EXPECTED_FAIL) { 5313 if (tests[which].aux & FLAG_EXPECTED_FAIL) {
5293 if (*err == -EINVAL) { 5314 if (*err == tests[which].expected_errcode) {
5294 pr_cont("PASS\n"); 5315 pr_cont("PASS\n");
5295 /* Verifier rejected filter as expected. */ 5316 /* Verifier rejected filter as expected. */
5296 *err = 0; 5317 *err = 0;
@@ -5304,9 +5325,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
5304 return NULL; 5325 return NULL;
5305 } 5326 }
5306 } 5327 }
5307 /* We don't expect to fail. */
5308 if (*err) { 5328 if (*err) {
5309 pr_cont("FAIL to attach err=%d len=%d\n", 5329 pr_cont("FAIL to prog_create err=%d len=%d\n",
5310 *err, fprog.len); 5330 *err, fprog.len);
5311 return NULL; 5331 return NULL;
5312 } 5332 }
@@ -5325,7 +5345,11 @@ static struct bpf_prog *generate_filter(int which, int *err)
5325 fp->type = BPF_PROG_TYPE_SOCKET_FILTER; 5345 fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
5326 memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); 5346 memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
5327 5347
5328 bpf_prog_select_runtime(fp); 5348 *err = bpf_prog_select_runtime(fp);
5349 if (*err) {
5350 pr_cont("FAIL to select_runtime err=%d\n", *err);
5351 return NULL;
5352 }
5329 break; 5353 break;
5330 } 5354 }
5331 5355
@@ -5511,8 +5535,8 @@ static __init int test_bpf(void)
5511 pass_cnt++; 5535 pass_cnt++;
5512 continue; 5536 continue;
5513 } 5537 }
5514 5538 err_cnt++;
5515 return err; 5539 continue;
5516 } 5540 }
5517 5541
5518 pr_cont("jited:%u ", fp->jited); 5542 pr_cont("jited:%u ", fp->jited);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index f9cee8e1233c..646009db4198 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1345,9 +1345,6 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
1345 return string(buf, end, NULL, spec); 1345 return string(buf, end, NULL, spec);
1346 1346
1347 switch (fmt[1]) { 1347 switch (fmt[1]) {
1348 case 'r':
1349 return number(buf, end, clk_get_rate(clk), spec);
1350
1351 case 'n': 1348 case 'n':
1352 default: 1349 default:
1353#ifdef CONFIG_COMMON_CLK 1350#ifdef CONFIG_COMMON_CLK
diff --git a/mm/Kconfig b/mm/Kconfig
index 97a4e06b15c0..5753f69b23f4 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -628,6 +628,7 @@ config DEFERRED_STRUCT_PAGE_INIT
628 default n 628 default n
629 depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT 629 depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
630 depends on MEMORY_HOTPLUG 630 depends on MEMORY_HOTPLUG
631 depends on !NEED_PER_CPU_KM
631 help 632 help
632 Ordinarily all struct pages are initialised during early boot in a 633 Ordinarily all struct pages are initialised during early boot in a
633 single thread. On very large machines this can take a considerable 634 single thread. On very large machines this can take a considerable
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index a988d4ef39da..7f80b1a1bc34 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -922,7 +922,7 @@ static atomic_t nr_wb_congested[2];
922void clear_wb_congested(struct bdi_writeback_congested *congested, int sync) 922void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
923{ 923{
924 wait_queue_head_t *wqh = &congestion_wqh[sync]; 924 wait_queue_head_t *wqh = &congestion_wqh[sync];
925 enum wb_state bit; 925 enum wb_congested_state bit;
926 926
927 bit = sync ? WB_sync_congested : WB_async_congested; 927 bit = sync ? WB_sync_congested : WB_async_congested;
928 if (test_and_clear_bit(bit, &congested->state)) 928 if (test_and_clear_bit(bit, &congested->state))
@@ -935,7 +935,7 @@ EXPORT_SYMBOL(clear_wb_congested);
935 935
936void set_wb_congested(struct bdi_writeback_congested *congested, int sync) 936void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
937{ 937{
938 enum wb_state bit; 938 enum wb_congested_state bit;
939 939
940 bit = sync ? WB_sync_congested : WB_async_congested; 940 bit = sync ? WB_sync_congested : WB_async_congested;
941 if (!test_and_set_bit(bit, &congested->state)) 941 if (!test_and_set_bit(bit, &congested->state))
diff --git a/mm/cma.c b/mm/cma.c
index 384c2cb51b56..488b6c62463b 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -54,7 +54,7 @@ unsigned long cma_get_size(const struct cma *cma)
54} 54}
55 55
56static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, 56static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
57 int align_order) 57 unsigned int align_order)
58{ 58{
59 if (align_order <= cma->order_per_bit) 59 if (align_order <= cma->order_per_bit)
60 return 0; 60 return 0;
@@ -62,17 +62,14 @@ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
62} 62}
63 63
64/* 64/*
65 * Find a PFN aligned to the specified order and return an offset represented in 65 * Find the offset of the base PFN from the specified align_order.
66 * order_per_bits. 66 * The value returned is represented in order_per_bits.
67 */ 67 */
68static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, 68static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
69 int align_order) 69 unsigned int align_order)
70{ 70{
71 if (align_order <= cma->order_per_bit) 71 return (cma->base_pfn & ((1UL << align_order) - 1))
72 return 0; 72 >> cma->order_per_bit;
73
74 return (ALIGN(cma->base_pfn, (1UL << align_order))
75 - cma->base_pfn) >> cma->order_per_bit;
76} 73}
77 74
78static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, 75static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
index 6d5717bd7197..57540de2b44c 100644
--- a/mm/early_ioremap.c
+++ b/mm/early_ioremap.c
@@ -103,7 +103,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
103 enum fixed_addresses idx; 103 enum fixed_addresses idx;
104 int i, slot; 104 int i, slot;
105 105
106 WARN_ON(system_state != SYSTEM_BOOTING); 106 WARN_ON(system_state >= SYSTEM_RUNNING);
107 107
108 slot = -1; 108 slot = -1;
109 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { 109 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
diff --git a/mm/filemap.c b/mm/filemap.c
index 69f75c77c098..21e750b6e810 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -571,7 +571,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
571 VM_BUG_ON_PAGE(!PageLocked(new), new); 571 VM_BUG_ON_PAGE(!PageLocked(new), new);
572 VM_BUG_ON_PAGE(new->mapping, new); 572 VM_BUG_ON_PAGE(new->mapping, new);
573 573
574 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 574 error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
575 if (!error) { 575 if (!error) {
576 struct address_space *mapping = old->mapping; 576 struct address_space *mapping = old->mapping;
577 void (*freepage)(struct page *); 577 void (*freepage)(struct page *);
@@ -630,7 +630,7 @@ static int __add_to_page_cache_locked(struct page *page,
630 return error; 630 return error;
631 } 631 }
632 632
633 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 633 error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
634 if (error) { 634 if (error) {
635 if (!huge) 635 if (!huge)
636 mem_cgroup_cancel_charge(page, memcg); 636 mem_cgroup_cancel_charge(page, memcg);
@@ -1192,8 +1192,7 @@ no_page:
1192 if (fgp_flags & FGP_ACCESSED) 1192 if (fgp_flags & FGP_ACCESSED)
1193 __SetPageReferenced(page); 1193 __SetPageReferenced(page);
1194 1194
1195 err = add_to_page_cache_lru(page, mapping, offset, 1195 err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
1196 gfp_mask & GFP_RECLAIM_MASK);
1197 if (unlikely(err)) { 1196 if (unlikely(err)) {
1198 page_cache_release(page); 1197 page_cache_release(page);
1199 page = NULL; 1198 page = NULL;
@@ -1582,6 +1581,15 @@ find_page:
1582 index, last_index - index); 1581 index, last_index - index);
1583 } 1582 }
1584 if (!PageUptodate(page)) { 1583 if (!PageUptodate(page)) {
1584 /*
1585 * See comment in do_read_cache_page on why
1586 * wait_on_page_locked is used to avoid unnecessarily
1587 * serialisations and why it's safe.
1588 */
1589 wait_on_page_locked_killable(page);
1590 if (PageUptodate(page))
1591 goto page_ok;
1592
1585 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1593 if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1586 !mapping->a_ops->is_partially_uptodate) 1594 !mapping->a_ops->is_partially_uptodate)
1587 goto page_not_up_to_date; 1595 goto page_not_up_to_date;
@@ -1827,19 +1835,18 @@ EXPORT_SYMBOL(generic_file_read_iter);
1827 * This adds the requested page to the page cache if it isn't already there, 1835 * This adds the requested page to the page cache if it isn't already there,
1828 * and schedules an I/O to read in its contents from disk. 1836 * and schedules an I/O to read in its contents from disk.
1829 */ 1837 */
1830static int page_cache_read(struct file *file, pgoff_t offset) 1838static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
1831{ 1839{
1832 struct address_space *mapping = file->f_mapping; 1840 struct address_space *mapping = file->f_mapping;
1833 struct page *page; 1841 struct page *page;
1834 int ret; 1842 int ret;
1835 1843
1836 do { 1844 do {
1837 page = page_cache_alloc_cold(mapping); 1845 page = __page_cache_alloc(gfp_mask|__GFP_COLD);
1838 if (!page) 1846 if (!page)
1839 return -ENOMEM; 1847 return -ENOMEM;
1840 1848
1841 ret = add_to_page_cache_lru(page, mapping, offset, 1849 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
1842 mapping_gfp_constraint(mapping, GFP_KERNEL));
1843 if (ret == 0) 1850 if (ret == 0)
1844 ret = mapping->a_ops->readpage(file, page); 1851 ret = mapping->a_ops->readpage(file, page);
1845 else if (ret == -EEXIST) 1852 else if (ret == -EEXIST)
@@ -2020,7 +2027,7 @@ no_cached_page:
2020 * We're only likely to ever get here if MADV_RANDOM is in 2027 * We're only likely to ever get here if MADV_RANDOM is in
2021 * effect. 2028 * effect.
2022 */ 2029 */
2023 error = page_cache_read(file, offset); 2030 error = page_cache_read(file, offset, vmf->gfp_mask);
2024 2031
2025 /* 2032 /*
2026 * The page we want has now been added to the page cache. 2033 * The page we want has now been added to the page cache.
@@ -2217,7 +2224,7 @@ static struct page *wait_on_page_read(struct page *page)
2217 return page; 2224 return page;
2218} 2225}
2219 2226
2220static struct page *__read_cache_page(struct address_space *mapping, 2227static struct page *do_read_cache_page(struct address_space *mapping,
2221 pgoff_t index, 2228 pgoff_t index,
2222 int (*filler)(void *, struct page *), 2229 int (*filler)(void *, struct page *),
2223 void *data, 2230 void *data,
@@ -2239,53 +2246,74 @@ repeat:
2239 /* Presumably ENOMEM for radix tree node */ 2246 /* Presumably ENOMEM for radix tree node */
2240 return ERR_PTR(err); 2247 return ERR_PTR(err);
2241 } 2248 }
2249
2250filler:
2242 err = filler(data, page); 2251 err = filler(data, page);
2243 if (err < 0) { 2252 if (err < 0) {
2244 page_cache_release(page); 2253 page_cache_release(page);
2245 page = ERR_PTR(err); 2254 return ERR_PTR(err);
2246 } else {
2247 page = wait_on_page_read(page);
2248 } 2255 }
2249 }
2250 return page;
2251}
2252 2256
2253static struct page *do_read_cache_page(struct address_space *mapping, 2257 page = wait_on_page_read(page);
2254 pgoff_t index, 2258 if (IS_ERR(page))
2255 int (*filler)(void *, struct page *), 2259 return page;
2256 void *data, 2260 goto out;
2257 gfp_t gfp) 2261 }
2258 2262 if (PageUptodate(page))
2259{ 2263 goto out;
2260 struct page *page;
2261 int err;
2262 2264
2263retry: 2265 /*
2264 page = __read_cache_page(mapping, index, filler, data, gfp); 2266 * Page is not up to date and may be locked due one of the following
2265 if (IS_ERR(page)) 2267 * case a: Page is being filled and the page lock is held
2266 return page; 2268 * case b: Read/write error clearing the page uptodate status
2269 * case c: Truncation in progress (page locked)
2270 * case d: Reclaim in progress
2271 *
2272 * Case a, the page will be up to date when the page is unlocked.
2273 * There is no need to serialise on the page lock here as the page
2274 * is pinned so the lock gives no additional protection. Even if the
2275 * the page is truncated, the data is still valid if PageUptodate as
2276 * it's a race vs truncate race.
2277 * Case b, the page will not be up to date
2278 * Case c, the page may be truncated but in itself, the data may still
2279 * be valid after IO completes as it's a read vs truncate race. The
2280 * operation must restart if the page is not uptodate on unlock but
2281 * otherwise serialising on page lock to stabilise the mapping gives
2282 * no additional guarantees to the caller as the page lock is
2283 * released before return.
2284 * Case d, similar to truncation. If reclaim holds the page lock, it
2285 * will be a race with remove_mapping that determines if the mapping
2286 * is valid on unlock but otherwise the data is valid and there is
2287 * no need to serialise with page lock.
2288 *
2289 * As the page lock gives no additional guarantee, we optimistically
2290 * wait on the page to be unlocked and check if it's up to date and
2291 * use the page if it is. Otherwise, the page lock is required to
2292 * distinguish between the different cases. The motivation is that we
2293 * avoid spurious serialisations and wakeups when multiple processes
2294 * wait on the same page for IO to complete.
2295 */
2296 wait_on_page_locked(page);
2267 if (PageUptodate(page)) 2297 if (PageUptodate(page))
2268 goto out; 2298 goto out;
2269 2299
2300 /* Distinguish between all the cases under the safety of the lock */
2270 lock_page(page); 2301 lock_page(page);
2302
2303 /* Case c or d, restart the operation */
2271 if (!page->mapping) { 2304 if (!page->mapping) {
2272 unlock_page(page); 2305 unlock_page(page);
2273 page_cache_release(page); 2306 page_cache_release(page);
2274 goto retry; 2307 goto repeat;
2275 } 2308 }
2309
2310 /* Someone else locked and filled the page in a very small window */
2276 if (PageUptodate(page)) { 2311 if (PageUptodate(page)) {
2277 unlock_page(page); 2312 unlock_page(page);
2278 goto out; 2313 goto out;
2279 } 2314 }
2280 err = filler(data, page); 2315 goto filler;
2281 if (err < 0) { 2316
2282 page_cache_release(page);
2283 return ERR_PTR(err);
2284 } else {
2285 page = wait_on_page_read(page);
2286 if (IS_ERR(page))
2287 return page;
2288 }
2289out: 2317out:
2290 mark_page_accessed(page); 2318 mark_page_accessed(page);
2291 return page; 2319 return page;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7294301d8495..a813b03021b7 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2038,6 +2038,7 @@ static void __init gather_bootmem_prealloc(void)
2038 */ 2038 */
2039 if (hstate_is_gigantic(h)) 2039 if (hstate_is_gigantic(h))
2040 adjust_managed_page_count(page, 1 << h->order); 2040 adjust_managed_page_count(page, 1 << h->order);
2041 cond_resched();
2041 } 2042 }
2042} 2043}
2043 2044
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index bc0a8d8b8f42..ba9adce1422a 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -548,5 +548,5 @@ static int __init kasan_memhotplug_init(void)
548 return 0; 548 return 0;
549} 549}
550 550
551module_init(kasan_memhotplug_init); 551core_initcall(kasan_memhotplug_init);
552#endif 552#endif
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index ae113cf8f3b9..b403bf406b41 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1441,6 +1441,8 @@ static void kmemleak_scan(void)
1441 if (page_count(page) == 0) 1441 if (page_count(page) == 0)
1442 continue; 1442 continue;
1443 scan_block(page, page + 1, NULL); 1443 scan_block(page, page + 1, NULL);
1444 if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
1445 cond_resched();
1444 } 1446 }
1445 } 1447 }
1446 put_online_mems(); 1448 put_online_mems();
@@ -1569,8 +1571,7 @@ static void start_scan_thread(void)
1569} 1571}
1570 1572
1571/* 1573/*
1572 * Stop the automatic memory scanning thread. This function must be called 1574 * Stop the automatic memory scanning thread.
1573 * with the scan_mutex held.
1574 */ 1575 */
1575static void stop_scan_thread(void) 1576static void stop_scan_thread(void)
1576{ 1577{
@@ -1833,12 +1834,15 @@ static void kmemleak_do_cleanup(struct work_struct *work)
1833{ 1834{
1834 stop_scan_thread(); 1835 stop_scan_thread();
1835 1836
1837 mutex_lock(&scan_mutex);
1836 /* 1838 /*
1837 * Once the scan thread has stopped, it is safe to no longer track 1839 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1838 * object freeing. Ordering of the scan thread stopping and the memory 1840 * longer track object freeing. Ordering of the scan thread stopping and
1839 * accesses below is guaranteed by the kthread_stop() function. 1841 * the memory accesses below is guaranteed by the kthread_stop()
1842 * function.
1840 */ 1843 */
1841 kmemleak_free_enabled = 0; 1844 kmemleak_free_enabled = 0;
1845 mutex_unlock(&scan_mutex);
1842 1846
1843 if (!kmemleak_found_leaks) 1847 if (!kmemleak_found_leaks)
1844 __kmemleak_do_cleanup(); 1848 __kmemleak_do_cleanup();
diff --git a/mm/ksm.c b/mm/ksm.c
index 2f028e6d0831..0b496edc704b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1494,8 +1494,22 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1494 tree_rmap_item = 1494 tree_rmap_item =
1495 unstable_tree_search_insert(rmap_item, page, &tree_page); 1495 unstable_tree_search_insert(rmap_item, page, &tree_page);
1496 if (tree_rmap_item) { 1496 if (tree_rmap_item) {
1497 bool split;
1498
1497 kpage = try_to_merge_two_pages(rmap_item, page, 1499 kpage = try_to_merge_two_pages(rmap_item, page,
1498 tree_rmap_item, tree_page); 1500 tree_rmap_item, tree_page);
1501 /*
1502 * If both pages we tried to merge belong to the same compound
1503 * page, then we actually ended up increasing the reference
1504 * count of the same compound page twice, and split_huge_page
1505 * failed.
1506 * Here we set a flag if that happened, and we use it later to
1507 * try split_huge_page again. Since we call put_page right
1508 * afterwards, the reference count will be correct and
1509 * split_huge_page should succeed.
1510 */
1511 split = PageTransCompound(page)
1512 && compound_head(page) == compound_head(tree_page);
1499 put_page(tree_page); 1513 put_page(tree_page);
1500 if (kpage) { 1514 if (kpage) {
1501 /* 1515 /*
@@ -1520,6 +1534,20 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1520 break_cow(tree_rmap_item); 1534 break_cow(tree_rmap_item);
1521 break_cow(rmap_item); 1535 break_cow(rmap_item);
1522 } 1536 }
1537 } else if (split) {
1538 /*
1539 * We are here if we tried to merge two pages and
1540 * failed because they both belonged to the same
1541 * compound page. We will split the page now, but no
1542 * merging will take place.
1543 * We do not want to add the cost of a full lock; if
1544 * the page is locked, it is better to skip it and
1545 * perhaps try again later.
1546 */
1547 if (!trylock_page(page))
1548 return;
1549 split_huge_page(page);
1550 unlock_page(page);
1523 } 1551 }
1524 } 1552 }
1525} 1553}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e25b93a4267d..9a8e688724b1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -996,7 +996,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
996 int nid, zid; 996 int nid, zid;
997 int i; 997 int i;
998 998
999 while ((memcg = parent_mem_cgroup(memcg))) { 999 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1000 for_each_node(nid) { 1000 for_each_node(nid) {
1001 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1001 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1002 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 1002 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
@@ -5576,7 +5576,7 @@ static void uncharge_list(struct list_head *page_list)
5576 next = page->lru.next; 5576 next = page->lru.next;
5577 5577
5578 VM_BUG_ON_PAGE(PageLRU(page), page); 5578 VM_BUG_ON_PAGE(PageLRU(page), page);
5579 VM_BUG_ON_PAGE(page_count(page), page); 5579 VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
5580 5580
5581 if (!page->mem_cgroup) 5581 if (!page->mem_cgroup)
5582 continue; 5582 continue;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 091fe9b06663..92a647957f91 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -539,6 +539,13 @@ static int delete_from_lru_cache(struct page *p)
539 */ 539 */
540 ClearPageActive(p); 540 ClearPageActive(p);
541 ClearPageUnevictable(p); 541 ClearPageUnevictable(p);
542
543 /*
544 * Poisoned page might never drop its ref count to 0 so we have
545 * to uncharge it manually from its memcg.
546 */
547 mem_cgroup_uncharge(p);
548
542 /* 549 /*
543 * drop the page count elevated by isolate_lru_page() 550 * drop the page count elevated by isolate_lru_page()
544 */ 551 */
diff --git a/mm/memory.c b/mm/memory.c
index 9ac55172aa7b..d5bb1465d30c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -72,7 +72,7 @@
72 72
73#include "internal.h" 73#include "internal.h"
74 74
75#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 75#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
76#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. 76#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
77#endif 77#endif
78 78
@@ -1605,8 +1605,29 @@ out:
1605int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1605int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1606 unsigned long pfn) 1606 unsigned long pfn)
1607{ 1607{
1608 return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
1609}
1610EXPORT_SYMBOL(vm_insert_pfn);
1611
1612/**
1613 * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
1614 * @vma: user vma to map to
1615 * @addr: target user address of this page
1616 * @pfn: source kernel pfn
1617 * @pgprot: pgprot flags for the inserted page
1618 *
1619 * This is exactly like vm_insert_pfn, except that it allows drivers to
1620 * to override pgprot on a per-page basis.
1621 *
1622 * This only makes sense for IO mappings, and it makes no sense for
1623 * cow mappings. In general, using multiple vmas is preferable;
1624 * vm_insert_pfn_prot should only be used if using multiple VMAs is
1625 * impractical.
1626 */
1627int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
1628 unsigned long pfn, pgprot_t pgprot)
1629{
1608 int ret; 1630 int ret;
1609 pgprot_t pgprot = vma->vm_page_prot;
1610 /* 1631 /*
1611 * Technically, architectures with pte_special can avoid all these 1632 * Technically, architectures with pte_special can avoid all these
1612 * restrictions (same for remap_pfn_range). However we would like 1633 * restrictions (same for remap_pfn_range). However we would like
@@ -1624,19 +1645,29 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1624 if (track_pfn_insert(vma, &pgprot, pfn)) 1645 if (track_pfn_insert(vma, &pgprot, pfn))
1625 return -EINVAL; 1646 return -EINVAL;
1626 1647
1648 if (!pfn_modify_allowed(pfn, pgprot))
1649 return -EACCES;
1650
1627 ret = insert_pfn(vma, addr, pfn, pgprot); 1651 ret = insert_pfn(vma, addr, pfn, pgprot);
1628 1652
1629 return ret; 1653 return ret;
1630} 1654}
1631EXPORT_SYMBOL(vm_insert_pfn); 1655EXPORT_SYMBOL(vm_insert_pfn_prot);
1632 1656
1633int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1657int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1634 unsigned long pfn) 1658 unsigned long pfn)
1635{ 1659{
1660 pgprot_t pgprot = vma->vm_page_prot;
1661
1636 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); 1662 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
1637 1663
1638 if (addr < vma->vm_start || addr >= vma->vm_end) 1664 if (addr < vma->vm_start || addr >= vma->vm_end)
1639 return -EFAULT; 1665 return -EFAULT;
1666 if (track_pfn_insert(vma, &pgprot, pfn))
1667 return -EINVAL;
1668
1669 if (!pfn_modify_allowed(pfn, pgprot))
1670 return -EACCES;
1640 1671
1641 /* 1672 /*
1642 * If we don't have pte special, then we have to use the pfn_valid() 1673 * If we don't have pte special, then we have to use the pfn_valid()
@@ -1649,9 +1680,9 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1649 struct page *page; 1680 struct page *page;
1650 1681
1651 page = pfn_to_page(pfn); 1682 page = pfn_to_page(pfn);
1652 return insert_page(vma, addr, page, vma->vm_page_prot); 1683 return insert_page(vma, addr, page, pgprot);
1653 } 1684 }
1654 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1685 return insert_pfn(vma, addr, pfn, pgprot);
1655} 1686}
1656EXPORT_SYMBOL(vm_insert_mixed); 1687EXPORT_SYMBOL(vm_insert_mixed);
1657 1688
@@ -1666,6 +1697,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1666{ 1697{
1667 pte_t *pte; 1698 pte_t *pte;
1668 spinlock_t *ptl; 1699 spinlock_t *ptl;
1700 int err = 0;
1669 1701
1670 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 1702 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1671 if (!pte) 1703 if (!pte)
@@ -1673,12 +1705,16 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1673 arch_enter_lazy_mmu_mode(); 1705 arch_enter_lazy_mmu_mode();
1674 do { 1706 do {
1675 BUG_ON(!pte_none(*pte)); 1707 BUG_ON(!pte_none(*pte));
1708 if (!pfn_modify_allowed(pfn, prot)) {
1709 err = -EACCES;
1710 break;
1711 }
1676 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 1712 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1677 pfn++; 1713 pfn++;
1678 } while (pte++, addr += PAGE_SIZE, addr != end); 1714 } while (pte++, addr += PAGE_SIZE, addr != end);
1679 arch_leave_lazy_mmu_mode(); 1715 arch_leave_lazy_mmu_mode();
1680 pte_unmap_unlock(pte - 1, ptl); 1716 pte_unmap_unlock(pte - 1, ptl);
1681 return 0; 1717 return err;
1682} 1718}
1683 1719
1684static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 1720static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
@@ -1687,6 +1723,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1687{ 1723{
1688 pmd_t *pmd; 1724 pmd_t *pmd;
1689 unsigned long next; 1725 unsigned long next;
1726 int err;
1690 1727
1691 pfn -= addr >> PAGE_SHIFT; 1728 pfn -= addr >> PAGE_SHIFT;
1692 pmd = pmd_alloc(mm, pud, addr); 1729 pmd = pmd_alloc(mm, pud, addr);
@@ -1695,9 +1732,10 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1695 VM_BUG_ON(pmd_trans_huge(*pmd)); 1732 VM_BUG_ON(pmd_trans_huge(*pmd));
1696 do { 1733 do {
1697 next = pmd_addr_end(addr, end); 1734 next = pmd_addr_end(addr, end);
1698 if (remap_pte_range(mm, pmd, addr, next, 1735 err = remap_pte_range(mm, pmd, addr, next,
1699 pfn + (addr >> PAGE_SHIFT), prot)) 1736 pfn + (addr >> PAGE_SHIFT), prot);
1700 return -ENOMEM; 1737 if (err)
1738 return err;
1701 } while (pmd++, addr = next, addr != end); 1739 } while (pmd++, addr = next, addr != end);
1702 return 0; 1740 return 0;
1703} 1741}
@@ -1708,6 +1746,7 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1708{ 1746{
1709 pud_t *pud; 1747 pud_t *pud;
1710 unsigned long next; 1748 unsigned long next;
1749 int err;
1711 1750
1712 pfn -= addr >> PAGE_SHIFT; 1751 pfn -= addr >> PAGE_SHIFT;
1713 pud = pud_alloc(mm, pgd, addr); 1752 pud = pud_alloc(mm, pgd, addr);
@@ -1715,9 +1754,10 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1715 return -ENOMEM; 1754 return -ENOMEM;
1716 do { 1755 do {
1717 next = pud_addr_end(addr, end); 1756 next = pud_addr_end(addr, end);
1718 if (remap_pmd_range(mm, pud, addr, next, 1757 err = remap_pmd_range(mm, pud, addr, next,
1719 pfn + (addr >> PAGE_SHIFT), prot)) 1758 pfn + (addr >> PAGE_SHIFT), prot);
1720 return -ENOMEM; 1759 if (err)
1760 return err;
1721 } while (pud++, addr = next, addr != end); 1761 } while (pud++, addr = next, addr != end);
1722 return 0; 1762 return 0;
1723} 1763}
@@ -1990,6 +2030,20 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
1990 copy_user_highpage(dst, src, va, vma); 2030 copy_user_highpage(dst, src, va, vma);
1991} 2031}
1992 2032
2033static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2034{
2035 struct file *vm_file = vma->vm_file;
2036
2037 if (vm_file)
2038 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2039
2040 /*
2041 * Special mappings (e.g. VDSO) do not have any file so fake
2042 * a default GFP_KERNEL for them.
2043 */
2044 return GFP_KERNEL;
2045}
2046
1993/* 2047/*
1994 * Notify the address space that the page is about to become writable so that 2048 * Notify the address space that the page is about to become writable so that
1995 * it can prohibit this or wait for the page to get into an appropriate state. 2049 * it can prohibit this or wait for the page to get into an appropriate state.
@@ -2005,6 +2059,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2005 vmf.virtual_address = (void __user *)(address & PAGE_MASK); 2059 vmf.virtual_address = (void __user *)(address & PAGE_MASK);
2006 vmf.pgoff = page->index; 2060 vmf.pgoff = page->index;
2007 vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; 2061 vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2062 vmf.gfp_mask = __get_fault_gfp_mask(vma);
2008 vmf.page = page; 2063 vmf.page = page;
2009 vmf.cow_page = NULL; 2064 vmf.cow_page = NULL;
2010 2065
@@ -2770,6 +2825,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
2770 vmf.pgoff = pgoff; 2825 vmf.pgoff = pgoff;
2771 vmf.flags = flags; 2826 vmf.flags = flags;
2772 vmf.page = NULL; 2827 vmf.page = NULL;
2828 vmf.gfp_mask = __get_fault_gfp_mask(vma);
2773 vmf.cow_page = cow_page; 2829 vmf.cow_page = cow_page;
2774 2830
2775 ret = vma->vm_ops->fault(vma, &vmf); 2831 ret = vma->vm_ops->fault(vma, &vmf);
@@ -2936,6 +2992,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
2936 vmf.pgoff = pgoff; 2992 vmf.pgoff = pgoff;
2937 vmf.max_pgoff = max_pgoff; 2993 vmf.max_pgoff = max_pgoff;
2938 vmf.flags = flags; 2994 vmf.flags = flags;
2995 vmf.gfp_mask = __get_fault_gfp_mask(vma);
2939 vma->vm_ops->map_pages(vma, &vmf); 2996 vma->vm_ops->map_pages(vma, &vmf);
2940} 2997}
2941 2998
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index c947014d128a..b777590c3e13 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1232,6 +1232,7 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1232 unsigned long maxnode) 1232 unsigned long maxnode)
1233{ 1233{
1234 unsigned long k; 1234 unsigned long k;
1235 unsigned long t;
1235 unsigned long nlongs; 1236 unsigned long nlongs;
1236 unsigned long endmask; 1237 unsigned long endmask;
1237 1238
@@ -1248,13 +1249,19 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1248 else 1249 else
1249 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 1250 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1250 1251
1251 /* When the user specified more nodes than supported just check 1252 /*
1252 if the non supported part is all zero. */ 1253 * When the user specified more nodes than supported just check
1254 * if the non supported part is all zero.
1255 *
1256 * If maxnode have more longs than MAX_NUMNODES, check
1257 * the bits in that area first. And then go through to
1258 * check the rest bits which equal or bigger than MAX_NUMNODES.
1259 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1260 */
1253 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 1261 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1254 if (nlongs > PAGE_SIZE/sizeof(long)) 1262 if (nlongs > PAGE_SIZE/sizeof(long))
1255 return -EINVAL; 1263 return -EINVAL;
1256 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 1264 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1257 unsigned long t;
1258 if (get_user(t, nmask + k)) 1265 if (get_user(t, nmask + k))
1259 return -EFAULT; 1266 return -EFAULT;
1260 if (k == nlongs - 1) { 1267 if (k == nlongs - 1) {
@@ -1267,6 +1274,16 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1267 endmask = ~0UL; 1274 endmask = ~0UL;
1268 } 1275 }
1269 1276
1277 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1278 unsigned long valid_mask = endmask;
1279
1280 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1281 if (get_user(t, nmask + nlongs - 1))
1282 return -EFAULT;
1283 if (t & valid_mask)
1284 return -EINVAL;
1285 }
1286
1270 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 1287 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1271 return -EFAULT; 1288 return -EFAULT;
1272 nodes_addr(*nodes)[nlongs-1] &= endmask; 1289 nodes_addr(*nodes)[nlongs-1] &= endmask;
@@ -1393,10 +1410,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1393 goto out_put; 1410 goto out_put;
1394 } 1411 }
1395 1412
1396 if (!nodes_subset(*new, node_states[N_MEMORY])) { 1413 task_nodes = cpuset_mems_allowed(current);
1397 err = -EINVAL; 1414 nodes_and(*new, *new, task_nodes);
1415 if (nodes_empty(*new))
1416 goto out_put;
1417
1418 nodes_and(*new, *new, node_states[N_MEMORY]);
1419 if (nodes_empty(*new))
1398 goto out_put; 1420 goto out_put;
1399 }
1400 1421
1401 err = security_task_movememory(task); 1422 err = security_task_movememory(task);
1402 if (err) 1423 if (err)
@@ -2121,6 +2142,9 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2121 case MPOL_INTERLEAVE: 2142 case MPOL_INTERLEAVE:
2122 return !!nodes_equal(a->v.nodes, b->v.nodes); 2143 return !!nodes_equal(a->v.nodes, b->v.nodes);
2123 case MPOL_PREFERRED: 2144 case MPOL_PREFERRED:
2145 /* a's ->flags is the same as b's */
2146 if (a->flags & MPOL_F_LOCAL)
2147 return true;
2124 return a->v.preferred_node == b->v.preferred_node; 2148 return a->v.preferred_node == b->v.preferred_node;
2125 default: 2149 default:
2126 BUG(); 2150 BUG();
diff --git a/mm/mmap.c b/mm/mmap.c
index eaa460ddcaf9..39f5fbd07486 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1275,6 +1275,35 @@ static inline int mlock_future_check(struct mm_struct *mm,
1275 return 0; 1275 return 0;
1276} 1276}
1277 1277
1278static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1279{
1280 if (S_ISREG(inode->i_mode))
1281 return MAX_LFS_FILESIZE;
1282
1283 if (S_ISBLK(inode->i_mode))
1284 return MAX_LFS_FILESIZE;
1285
1286 /* Special "we do even unsigned file positions" case */
1287 if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1288 return 0;
1289
1290 /* Yes, random drivers might want more. But I'm tired of buggy drivers */
1291 return ULONG_MAX;
1292}
1293
1294static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1295 unsigned long pgoff, unsigned long len)
1296{
1297 u64 maxsize = file_mmap_size_max(file, inode);
1298
1299 if (maxsize && len > maxsize)
1300 return false;
1301 maxsize -= len;
1302 if (pgoff > maxsize >> PAGE_SHIFT)
1303 return false;
1304 return true;
1305}
1306
1278/* 1307/*
1279 * The caller must hold down_write(&current->mm->mmap_sem). 1308 * The caller must hold down_write(&current->mm->mmap_sem).
1280 */ 1309 */
@@ -1340,6 +1369,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
1340 if (file) { 1369 if (file) {
1341 struct inode *inode = file_inode(file); 1370 struct inode *inode = file_inode(file);
1342 1371
1372 if (!file_mmap_ok(file, inode, pgoff, len))
1373 return -EOVERFLOW;
1374
1343 switch (flags & MAP_TYPE) { 1375 switch (flags & MAP_TYPE) {
1344 case MAP_SHARED: 1376 case MAP_SHARED:
1345 if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) 1377 if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
@@ -2188,7 +2220,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2188 gap_addr = TASK_SIZE; 2220 gap_addr = TASK_SIZE;
2189 2221
2190 next = vma->vm_next; 2222 next = vma->vm_next;
2191 if (next && next->vm_start < gap_addr) { 2223 if (next && next->vm_start < gap_addr &&
2224 (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
2192 if (!(next->vm_flags & VM_GROWSUP)) 2225 if (!(next->vm_flags & VM_GROWSUP))
2193 return -ENOMEM; 2226 return -ENOMEM;
2194 /* Check that both stack segments have the same anon_vma? */ 2227 /* Check that both stack segments have the same anon_vma? */
@@ -2273,7 +2306,8 @@ int expand_downwards(struct vm_area_struct *vma,
2273 if (gap_addr > address) 2306 if (gap_addr > address)
2274 return -ENOMEM; 2307 return -ENOMEM;
2275 prev = vma->vm_prev; 2308 prev = vma->vm_prev;
2276 if (prev && prev->vm_end > gap_addr) { 2309 if (prev && prev->vm_end > gap_addr &&
2310 (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
2277 if (!(prev->vm_flags & VM_GROWSDOWN)) 2311 if (!(prev->vm_flags & VM_GROWSDOWN))
2278 return -ENOMEM; 2312 return -ENOMEM;
2279 /* Check that both stack segments have the same anon_vma? */ 2313 /* Check that both stack segments have the same anon_vma? */
diff --git a/mm/mprotect.c b/mm/mprotect.c
index c0b4b2a49462..a277f3412a5d 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -255,6 +255,42 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
255 return pages; 255 return pages;
256} 256}
257 257
258static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
259 unsigned long next, struct mm_walk *walk)
260{
261 return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
262 0 : -EACCES;
263}
264
265static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
266 unsigned long addr, unsigned long next,
267 struct mm_walk *walk)
268{
269 return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
270 0 : -EACCES;
271}
272
273static int prot_none_test(unsigned long addr, unsigned long next,
274 struct mm_walk *walk)
275{
276 return 0;
277}
278
279static int prot_none_walk(struct vm_area_struct *vma, unsigned long start,
280 unsigned long end, unsigned long newflags)
281{
282 pgprot_t new_pgprot = vm_get_page_prot(newflags);
283 struct mm_walk prot_none_walk = {
284 .pte_entry = prot_none_pte_entry,
285 .hugetlb_entry = prot_none_hugetlb_entry,
286 .test_walk = prot_none_test,
287 .mm = current->mm,
288 .private = &new_pgprot,
289 };
290
291 return walk_page_range(start, end, &prot_none_walk);
292}
293
258int 294int
259mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, 295mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
260 unsigned long start, unsigned long end, unsigned long newflags) 296 unsigned long start, unsigned long end, unsigned long newflags)
@@ -273,6 +309,19 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
273 } 309 }
274 310
275 /* 311 /*
312 * Do PROT_NONE PFN permission checks here when we can still
313 * bail out without undoing a lot of state. This is a rather
314 * uncommon case, so doesn't need to be very optimized.
315 */
316 if (arch_has_pfn_modify_check() &&
317 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
318 (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) {
319 error = prot_none_walk(vma, start, end, newflags);
320 if (error)
321 return error;
322 }
323
324 /*
276 * If we make a private mapping writable we increase our commit; 325 * If we make a private mapping writable we increase our commit;
277 * but (without finer accounting) cannot reduce our commit if we 326 * but (without finer accounting) cannot reduce our commit if we
278 * make it unwritable again. hugetlb mapping were accounted for 327 * make it unwritable again. hugetlb mapping were accounted for
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 71b0f525180a..8e80ea58c7e7 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2520,13 +2520,13 @@ void account_page_redirty(struct page *page)
2520 if (mapping && mapping_cap_account_dirty(mapping)) { 2520 if (mapping && mapping_cap_account_dirty(mapping)) {
2521 struct inode *inode = mapping->host; 2521 struct inode *inode = mapping->host;
2522 struct bdi_writeback *wb; 2522 struct bdi_writeback *wb;
2523 bool locked; 2523 struct wb_lock_cookie cookie = {};
2524 2524
2525 wb = unlocked_inode_to_wb_begin(inode, &locked); 2525 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2526 current->nr_dirtied--; 2526 current->nr_dirtied--;
2527 dec_zone_page_state(page, NR_DIRTIED); 2527 dec_zone_page_state(page, NR_DIRTIED);
2528 dec_wb_stat(wb, WB_DIRTIED); 2528 dec_wb_stat(wb, WB_DIRTIED);
2529 unlocked_inode_to_wb_end(inode, locked); 2529 unlocked_inode_to_wb_end(inode, &cookie);
2530 } 2530 }
2531} 2531}
2532EXPORT_SYMBOL(account_page_redirty); 2532EXPORT_SYMBOL(account_page_redirty);
@@ -2632,15 +2632,15 @@ void cancel_dirty_page(struct page *page)
2632 struct inode *inode = mapping->host; 2632 struct inode *inode = mapping->host;
2633 struct bdi_writeback *wb; 2633 struct bdi_writeback *wb;
2634 struct mem_cgroup *memcg; 2634 struct mem_cgroup *memcg;
2635 bool locked; 2635 struct wb_lock_cookie cookie = {};
2636 2636
2637 memcg = mem_cgroup_begin_page_stat(page); 2637 memcg = mem_cgroup_begin_page_stat(page);
2638 wb = unlocked_inode_to_wb_begin(inode, &locked); 2638 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2639 2639
2640 if (TestClearPageDirty(page)) 2640 if (TestClearPageDirty(page))
2641 account_page_cleaned(page, mapping, memcg, wb); 2641 account_page_cleaned(page, mapping, memcg, wb);
2642 2642
2643 unlocked_inode_to_wb_end(inode, locked); 2643 unlocked_inode_to_wb_end(inode, &cookie);
2644 mem_cgroup_end_page_stat(memcg); 2644 mem_cgroup_end_page_stat(memcg);
2645 } else { 2645 } else {
2646 ClearPageDirty(page); 2646 ClearPageDirty(page);
@@ -2673,7 +2673,7 @@ int clear_page_dirty_for_io(struct page *page)
2673 struct inode *inode = mapping->host; 2673 struct inode *inode = mapping->host;
2674 struct bdi_writeback *wb; 2674 struct bdi_writeback *wb;
2675 struct mem_cgroup *memcg; 2675 struct mem_cgroup *memcg;
2676 bool locked; 2676 struct wb_lock_cookie cookie = {};
2677 2677
2678 /* 2678 /*
2679 * Yes, Virginia, this is indeed insane. 2679 * Yes, Virginia, this is indeed insane.
@@ -2711,14 +2711,14 @@ int clear_page_dirty_for_io(struct page *page)
2711 * exclusion. 2711 * exclusion.
2712 */ 2712 */
2713 memcg = mem_cgroup_begin_page_stat(page); 2713 memcg = mem_cgroup_begin_page_stat(page);
2714 wb = unlocked_inode_to_wb_begin(inode, &locked); 2714 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2715 if (TestClearPageDirty(page)) { 2715 if (TestClearPageDirty(page)) {
2716 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); 2716 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
2717 dec_zone_page_state(page, NR_FILE_DIRTY); 2717 dec_zone_page_state(page, NR_FILE_DIRTY);
2718 dec_wb_stat(wb, WB_RECLAIMABLE); 2718 dec_wb_stat(wb, WB_RECLAIMABLE);
2719 ret = 1; 2719 ret = 1;
2720 } 2720 }
2721 unlocked_inode_to_wb_end(inode, locked); 2721 unlocked_inode_to_wb_end(inode, &cookie);
2722 mem_cgroup_end_page_stat(memcg); 2722 mem_cgroup_end_page_stat(memcg);
2723 return ret; 2723 return ret;
2724 } 2724 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 417700241e52..bf2d9272b7ba 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2461,9 +2461,6 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
2461 if (!area->nr_free) 2461 if (!area->nr_free)
2462 continue; 2462 continue;
2463 2463
2464 if (alloc_harder)
2465 return true;
2466
2467 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 2464 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2468 if (!list_empty(&area->free_list[mt])) 2465 if (!list_empty(&area->free_list[mt]))
2469 return true; 2466 return true;
@@ -2475,6 +2472,9 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
2475 return true; 2472 return true;
2476 } 2473 }
2477#endif 2474#endif
2475 if (alloc_harder &&
2476 !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
2477 return true;
2478 } 2478 }
2479 return false; 2479 return false;
2480} 2480}
@@ -3102,8 +3102,6 @@ retry:
3102 * the allocation is high priority and these type of 3102 * the allocation is high priority and these type of
3103 * allocations are system rather than user orientated 3103 * allocations are system rather than user orientated
3104 */ 3104 */
3105 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3106
3107 page = __alloc_pages_high_priority(gfp_mask, order, ac); 3105 page = __alloc_pages_high_priority(gfp_mask, order, ac);
3108 3106
3109 if (page) { 3107 if (page) {
diff --git a/mm/percpu.c b/mm/percpu.c
index ef6353f0adbd..1c784df3bdfe 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -68,6 +68,7 @@
68#include <linux/vmalloc.h> 68#include <linux/vmalloc.h>
69#include <linux/workqueue.h> 69#include <linux/workqueue.h>
70#include <linux/kmemleak.h> 70#include <linux/kmemleak.h>
71#include <linux/sched.h>
71 72
72#include <asm/cacheflush.h> 73#include <asm/cacheflush.h>
73#include <asm/sections.h> 74#include <asm/sections.h>
diff --git a/mm/slab.c b/mm/slab.c
index 462938fc7cb9..78f082453481 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3919,7 +3919,8 @@ next:
3919 next_reap_node(); 3919 next_reap_node();
3920out: 3920out:
3921 /* Set up the next iteration */ 3921 /* Set up the next iteration */
3922 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC)); 3922 schedule_delayed_work_on(smp_processor_id(), work,
3923 round_jiffies_relative(REAPTIMEOUT_AC));
3923} 3924}
3924 3925
3925#ifdef CONFIG_SLABINFO 3926#ifdef CONFIG_SLABINFO
diff --git a/mm/slub.c b/mm/slub.c
index d6fe997c0577..490825fd931a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -689,7 +689,7 @@ void object_err(struct kmem_cache *s, struct page *page,
689 print_trailer(s, page, object); 689 print_trailer(s, page, object);
690} 690}
691 691
692static void slab_err(struct kmem_cache *s, struct page *page, 692static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
693 const char *fmt, ...) 693 const char *fmt, ...)
694{ 694{
695 va_list args; 695 va_list args;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c1a0f3dea8b5..8e25ff2b693a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2206,6 +2206,35 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2206 return 0; 2206 return 0;
2207} 2207}
2208 2208
2209
2210/*
2211 * Find out how many pages are allowed for a single swap device. There
2212 * are two limiting factors:
2213 * 1) the number of bits for the swap offset in the swp_entry_t type, and
2214 * 2) the number of bits in the swap pte, as defined by the different
2215 * architectures.
2216 *
2217 * In order to find the largest possible bit mask, a swap entry with
2218 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
2219 * decoded to a swp_entry_t again, and finally the swap offset is
2220 * extracted.
2221 *
2222 * This will mask all the bits from the initial ~0UL mask that can't
2223 * be encoded in either the swp_entry_t or the architecture definition
2224 * of a swap pte.
2225 */
2226unsigned long generic_max_swapfile_size(void)
2227{
2228 return swp_offset(pte_to_swp_entry(
2229 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2230}
2231
2232/* Can be overridden by an architecture for additional checks. */
2233__weak unsigned long max_swapfile_size(void)
2234{
2235 return generic_max_swapfile_size();
2236}
2237
2209static unsigned long read_swap_header(struct swap_info_struct *p, 2238static unsigned long read_swap_header(struct swap_info_struct *p,
2210 union swap_header *swap_header, 2239 union swap_header *swap_header,
2211 struct inode *inode) 2240 struct inode *inode)
@@ -2241,23 +2270,12 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
2241 p->cluster_next = 1; 2270 p->cluster_next = 1;
2242 p->cluster_nr = 0; 2271 p->cluster_nr = 0;
2243 2272
2244 /* 2273 maxpages = max_swapfile_size();
2245 * Find out how many pages are allowed for a single swap
2246 * device. There are two limiting factors: 1) the number
2247 * of bits for the swap offset in the swp_entry_t type, and
2248 * 2) the number of bits in the swap pte as defined by the
2249 * different architectures. In order to find the
2250 * largest possible bit mask, a swap entry with swap type 0
2251 * and swap offset ~0UL is created, encoded to a swap pte,
2252 * decoded to a swp_entry_t again, and finally the swap
2253 * offset is extracted. This will mask all the bits from
2254 * the initial ~0UL mask that can't be encoded in either
2255 * the swp_entry_t or the architecture definition of a
2256 * swap pte.
2257 */
2258 maxpages = swp_offset(pte_to_swp_entry(
2259 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2260 last_page = swap_header->info.last_page; 2274 last_page = swap_header->info.last_page;
2275 if (!last_page) {
2276 pr_warn("Empty swap-file\n");
2277 return 0;
2278 }
2261 if (last_page > maxpages) { 2279 if (last_page > maxpages) {
2262 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n", 2280 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2263 maxpages << (PAGE_SHIFT - 10), 2281 maxpages << (PAGE_SHIFT - 10),
diff --git a/mm/util.c b/mm/util.c
index d5259b62f8d7..5fae5b9c2885 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -80,6 +80,8 @@ EXPORT_SYMBOL(kstrdup_const);
80 * @s: the string to duplicate 80 * @s: the string to duplicate
81 * @max: read at most @max chars from @s 81 * @max: read at most @max chars from @s
82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
83 *
84 * Note: Use kmemdup_nul() instead if the size is known exactly.
83 */ 85 */
84char *kstrndup(const char *s, size_t max, gfp_t gfp) 86char *kstrndup(const char *s, size_t max, gfp_t gfp)
85{ 87{
@@ -118,6 +120,28 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
118EXPORT_SYMBOL(kmemdup); 120EXPORT_SYMBOL(kmemdup);
119 121
120/** 122/**
123 * kmemdup_nul - Create a NUL-terminated string from unterminated data
124 * @s: The data to stringify
125 * @len: The size of the data
126 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
127 */
128char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
129{
130 char *buf;
131
132 if (!s)
133 return NULL;
134
135 buf = kmalloc_track_caller(len + 1, gfp);
136 if (buf) {
137 memcpy(buf, s, len);
138 buf[len] = '\0';
139 }
140 return buf;
141}
142EXPORT_SYMBOL(kmemdup_nul);
143
144/**
121 * memdup_user - duplicate memory region from user space 145 * memdup_user - duplicate memory region from user space
122 * 146 *
123 * @src: source address in user space 147 * @src: source address in user space
@@ -404,17 +428,25 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
404 int res = 0; 428 int res = 0;
405 unsigned int len; 429 unsigned int len;
406 struct mm_struct *mm = get_task_mm(task); 430 struct mm_struct *mm = get_task_mm(task);
431 unsigned long arg_start, arg_end, env_start, env_end;
407 if (!mm) 432 if (!mm)
408 goto out; 433 goto out;
409 if (!mm->arg_end) 434 if (!mm->arg_end)
410 goto out_mm; /* Shh! No looking before we're done */ 435 goto out_mm; /* Shh! No looking before we're done */
411 436
412 len = mm->arg_end - mm->arg_start; 437 down_read(&mm->mmap_sem);
438 arg_start = mm->arg_start;
439 arg_end = mm->arg_end;
440 env_start = mm->env_start;
441 env_end = mm->env_end;
442 up_read(&mm->mmap_sem);
443
444 len = arg_end - arg_start;
413 445
414 if (len > buflen) 446 if (len > buflen)
415 len = buflen; 447 len = buflen;
416 448
417 res = access_process_vm(task, mm->arg_start, buffer, len, 0); 449 res = access_process_vm(task, arg_start, buffer, len, 0);
418 450
419 /* 451 /*
420 * If the nul at the end of args has been overwritten, then 452 * If the nul at the end of args has been overwritten, then
@@ -425,10 +457,10 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
425 if (len < res) { 457 if (len < res) {
426 res = len; 458 res = len;
427 } else { 459 } else {
428 len = mm->env_end - mm->env_start; 460 len = env_end - env_start;
429 if (len > buflen - res) 461 if (len > buflen - res)
430 len = buflen - res; 462 len = buflen - res;
431 res += access_process_vm(task, mm->env_start, 463 res += access_process_vm(task, env_start,
432 buffer+res, len, 0); 464 buffer+res, len, 0);
433 res = strnlen(buffer, res); 465 res = strnlen(buffer, res);
434 } 466 }
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8e3c9c5a3042..de8e372ece04 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1460,7 +1460,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
1460 addr)) 1460 addr))
1461 return; 1461 return;
1462 1462
1463 area = remove_vm_area(addr); 1463 area = find_vmap_area((unsigned long)addr)->vm;
1464 if (unlikely(!area)) { 1464 if (unlikely(!area)) {
1465 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 1465 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1466 addr); 1466 addr);
@@ -1470,6 +1470,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
1470 debug_check_no_locks_freed(addr, get_vm_area_size(area)); 1470 debug_check_no_locks_freed(addr, get_vm_area_size(area));
1471 debug_check_no_obj_freed(addr, get_vm_area_size(area)); 1471 debug_check_no_obj_freed(addr, get_vm_area_size(area));
1472 1472
1473 remove_vm_area(addr);
1473 if (deallocate_pages) { 1474 if (deallocate_pages) {
1474 int i; 1475 int i;
1475 1476
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 440c2df9be82..76853088f66b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -254,10 +254,13 @@ EXPORT_SYMBOL(register_shrinker);
254 */ 254 */
255void unregister_shrinker(struct shrinker *shrinker) 255void unregister_shrinker(struct shrinker *shrinker)
256{ 256{
257 if (!shrinker->nr_deferred)
258 return;
257 down_write(&shrinker_rwsem); 259 down_write(&shrinker_rwsem);
258 list_del(&shrinker->list); 260 list_del(&shrinker->list);
259 up_write(&shrinker_rwsem); 261 up_write(&shrinker_rwsem);
260 kfree(shrinker->nr_deferred); 262 kfree(shrinker->nr_deferred);
263 shrinker->nr_deferred = NULL;
261} 264}
262EXPORT_SYMBOL(unregister_shrinker); 265EXPORT_SYMBOL(unregister_shrinker);
263 266
@@ -1309,6 +1312,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1309 1312
1310 if (PageDirty(page)) { 1313 if (PageDirty(page)) {
1311 struct address_space *mapping; 1314 struct address_space *mapping;
1315 bool migrate_dirty;
1312 1316
1313 /* ISOLATE_CLEAN means only clean pages */ 1317 /* ISOLATE_CLEAN means only clean pages */
1314 if (mode & ISOLATE_CLEAN) 1318 if (mode & ISOLATE_CLEAN)
@@ -1317,10 +1321,19 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1317 /* 1321 /*
1318 * Only pages without mappings or that have a 1322 * Only pages without mappings or that have a
1319 * ->migratepage callback are possible to migrate 1323 * ->migratepage callback are possible to migrate
1320 * without blocking 1324 * without blocking. However, we can be racing with
1325 * truncation so it's necessary to lock the page
1326 * to stabilise the mapping as truncation holds
1327 * the page lock until after the page is removed
1328 * from the page cache.
1321 */ 1329 */
1330 if (!trylock_page(page))
1331 return ret;
1332
1322 mapping = page_mapping(page); 1333 mapping = page_mapping(page);
1323 if (mapping && !mapping->a_ops->migratepage) 1334 migrate_dirty = !mapping || mapping->a_ops->migratepage;
1335 unlock_page(page);
1336 if (!migrate_dirty)
1324 return ret; 1337 return ret;
1325 } 1338 }
1326 } 1339 }
@@ -2054,10 +2067,16 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness,
2054 } 2067 }
2055 2068
2056 /* 2069 /*
2057 * There is enough inactive page cache, do not reclaim 2070 * If there is enough inactive page cache, i.e. if the size of the
2058 * anything from the anonymous working set right now. 2071 * inactive list is greater than that of the active list *and* the
2072 * inactive list actually has some pages to scan on this priority, we
2073 * do not reclaim anything from the anonymous working set right now.
2074 * Without the second condition we could end up never scanning an
2075 * lruvec even if it has plenty of old anonymous pages unless the
2076 * system is under heavy pressure.
2059 */ 2077 */
2060 if (!inactive_file_is_low(lruvec)) { 2078 if (!inactive_file_is_low(lruvec) &&
2079 get_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
2061 scan_balance = SCAN_FILE; 2080 scan_balance = SCAN_FILE;
2062 goto out; 2081 goto out;
2063 } 2082 }
@@ -3822,7 +3841,13 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3822 */ 3841 */
3823int page_evictable(struct page *page) 3842int page_evictable(struct page *page)
3824{ 3843{
3825 return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); 3844 int ret;
3845
3846 /* Prevent address_space of inode and swap cache from being freed */
3847 rcu_read_lock();
3848 ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
3849 rcu_read_unlock();
3850 return ret;
3826} 3851}
3827 3852
3828#ifdef CONFIG_SHMEM 3853#ifdef CONFIG_SHMEM
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ca4dc9031073..ac9791dd4768 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -29,6 +29,7 @@
29#include <linux/net_tstamp.h> 29#include <linux/net_tstamp.h>
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/ethtool.h> 31#include <linux/ethtool.h>
32#include <linux/phy.h>
32#include <net/arp.h> 33#include <net/arp.h>
33 34
34#include "vlan.h" 35#include "vlan.h"
@@ -559,8 +560,7 @@ static int vlan_dev_init(struct net_device *dev)
559 NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM | 560 NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
560 NETIF_F_ALL_FCOE; 561 NETIF_F_ALL_FCOE;
561 562
562 dev->features |= real_dev->vlan_features | NETIF_F_LLTX | 563 dev->features |= dev->hw_features | NETIF_F_LLTX;
563 NETIF_F_GSO_SOFTWARE;
564 dev->gso_max_size = real_dev->gso_max_size; 564 dev->gso_max_size = real_dev->gso_max_size;
565 if (dev->features & NETIF_F_VLAN_FEATURES) 565 if (dev->features & NETIF_F_VLAN_FEATURES)
566 netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); 566 netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
@@ -655,8 +655,11 @@ static int vlan_ethtool_get_ts_info(struct net_device *dev,
655{ 655{
656 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 656 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
657 const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops; 657 const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
658 struct phy_device *phydev = vlan->real_dev->phydev;
658 659
659 if (ops->get_ts_info) { 660 if (phydev && phydev->drv && phydev->drv->ts_info) {
661 return phydev->drv->ts_info(phydev, info);
662 } else if (ops->get_ts_info) {
660 return ops->get_ts_info(vlan->real_dev, info); 663 return ops->get_ts_info(vlan->real_dev, info);
661 } else { 664 } else {
662 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | 665 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/net/Kconfig b/net/Kconfig
index 127da94ae25e..129b9fcbf1d0 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -383,8 +383,15 @@ config LWTUNNEL
383 weight tunnel endpoint. Tunnel encapsulation parameters are stored 383 weight tunnel endpoint. Tunnel encapsulation parameters are stored
384 with light weight tunnel state associated with fib routes. 384 with light weight tunnel state associated with fib routes.
385 385
386config DST_CACHE
387 bool
388 default n
389
386endif # if NET 390endif # if NET
387 391
388# Used by archs to tell that they support BPF_JIT 392# Used by archs to tell that they support BPF_JIT
389config HAVE_BPF_JIT 393config HAVE_BPF_JIT
390 bool 394 bool
395
396config HAVE_EBPF_JIT
397 bool
diff --git a/net/atm/lec.c b/net/atm/lec.c
index cd3b37989057..10e4066991b8 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -41,6 +41,9 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
41#include <linux/module.h> 41#include <linux/module.h>
42#include <linux/init.h> 42#include <linux/init.h>
43 43
44/* Hardening for Spectre-v1 */
45#include <linux/nospec.h>
46
44#include "lec.h" 47#include "lec.h"
45#include "lec_arpc.h" 48#include "lec_arpc.h"
46#include "resources.h" 49#include "resources.h"
@@ -697,8 +700,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
697 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); 700 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
698 if (bytes_left != 0) 701 if (bytes_left != 0)
699 pr_info("copy from user failed for %d bytes\n", bytes_left); 702 pr_info("copy from user failed for %d bytes\n", bytes_left);
700 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF || 703 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
701 !dev_lec[ioc_data.dev_num]) 704 return -EINVAL;
705 ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
706 if (!dev_lec[ioc_data.dev_num])
702 return -EINVAL; 707 return -EINVAL;
703 vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL); 708 vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
704 if (!vpriv) 709 if (!vpriv)
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index f5d2fe5e31cc..c5208136e3fc 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1603,10 +1603,22 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1603 /* if yes, the client has roamed and we have 1603 /* if yes, the client has roamed and we have
1604 * to unclaim it. 1604 * to unclaim it.
1605 */ 1605 */
1606 batadv_handle_unclaim(bat_priv, primary_if, 1606 if (batadv_has_timed_out(claim->lasttime, 100)) {
1607 primary_if->net_dev->dev_addr, 1607 /* only unclaim if the last claim entry is
1608 ethhdr->h_source, vid); 1608 * older than 100 ms to make sure we really
1609 goto allow; 1609 * have a roaming client here.
1610 */
1611 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Roaming client %pM detected. Unclaim it.\n",
1612 ethhdr->h_source);
1613 batadv_handle_unclaim(bat_priv, primary_if,
1614 primary_if->net_dev->dev_addr,
1615 ethhdr->h_source, vid);
1616 goto allow;
1617 } else {
1618 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Race for claim %pM detected. Drop packet.\n",
1619 ethhdr->h_source);
1620 goto handled;
1621 }
1610 } 1622 }
1611 1623
1612 /* check if it is a multicast/broadcast frame */ 1624 /* check if it is a multicast/broadcast frame */
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 5f19133c5530..c2dff7c6e960 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -374,7 +374,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
374 batadv_arp_hw_src(skb, hdr_size), &ip_src, 374 batadv_arp_hw_src(skb, hdr_size), &ip_src,
375 batadv_arp_hw_dst(skb, hdr_size), &ip_dst); 375 batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
376 376
377 if (hdr_size == 0) 377 if (hdr_size < sizeof(struct batadv_unicast_packet))
378 return; 378 return;
379 379
380 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; 380 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 700c96c82a15..5d2f9d4879b2 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -278,7 +278,8 @@ batadv_frag_merge_packets(struct hlist_head *chain)
278 /* Move the existing MAC header to just before the payload. (Override 278 /* Move the existing MAC header to just before the payload. (Override
279 * the fragment header.) 279 * the fragment header.)
280 */ 280 */
281 skb_pull_rcsum(skb_out, hdr_size); 281 skb_pull(skb_out, hdr_size);
282 skb_out->ip_summed = CHECKSUM_NONE;
282 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); 283 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
283 skb_set_mac_header(skb_out, -ETH_HLEN); 284 skb_set_mac_header(skb_out, -ETH_HLEN);
284 skb_reset_network_header(skb_out); 285 skb_reset_network_header(skb_out);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index e6c8382c79ba..6abfba1e227f 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -798,6 +798,9 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
798 798
799 vid = batadv_get_vid(skb, 0); 799 vid = batadv_get_vid(skb, 0);
800 800
801 if (is_multicast_ether_addr(ethhdr->h_dest))
802 goto out;
803
801 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, 804 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
802 ethhdr->h_dest, vid); 805 ethhdr->h_dest, vid);
803 if (!orig_dst_node) 806 if (!orig_dst_node)
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index eb76386f8d4b..8aa2d65df86f 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -428,8 +428,8 @@ static struct batadv_orig_node *
428batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, 428batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
429 struct ethhdr *ethhdr) 429 struct ethhdr *ethhdr)
430{ 430{
431 return batadv_transtable_search(bat_priv, ethhdr->h_source, 431 return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
432 ethhdr->h_dest, BATADV_NO_FLAGS); 432 BATADV_NO_FLAGS);
433} 433}
434 434
435/** 435/**
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 720f1a5b81ac..9f1fe6169bef 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -430,13 +430,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
430 430
431 /* skb->dev & skb->pkt_type are set here */ 431 /* skb->dev & skb->pkt_type are set here */
432 skb->protocol = eth_type_trans(skb, soft_iface); 432 skb->protocol = eth_type_trans(skb, soft_iface);
433 433 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
434 /* should not be necessary anymore as we use skb_pull_rcsum()
435 * TODO: please verify this and remove this TODO
436 * -- Dec 21st 2009, Simon Wunderlich
437 */
438
439 /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
440 434
441 batadv_inc_counter(bat_priv, BATADV_CNT_RX); 435 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
442 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, 436 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 24e9410923d0..80be0ee17ff3 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -708,7 +708,8 @@ done:
708} 708}
709 709
710static void hci_req_add_le_create_conn(struct hci_request *req, 710static void hci_req_add_le_create_conn(struct hci_request *req,
711 struct hci_conn *conn) 711 struct hci_conn *conn,
712 bdaddr_t *direct_rpa)
712{ 713{
713 struct hci_cp_le_create_conn cp; 714 struct hci_cp_le_create_conn cp;
714 struct hci_dev *hdev = conn->hdev; 715 struct hci_dev *hdev = conn->hdev;
@@ -716,11 +717,23 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
716 717
717 memset(&cp, 0, sizeof(cp)); 718 memset(&cp, 0, sizeof(cp));
718 719
719 /* Update random address, but set require_privacy to false so 720 /* If direct address was provided we use it instead of current
720 * that we never connect with an non-resolvable address. 721 * address.
721 */ 722 */
722 if (hci_update_random_address(req, false, &own_addr_type)) 723 if (direct_rpa) {
723 return; 724 if (bacmp(&req->hdev->random_addr, direct_rpa))
725 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
726 direct_rpa);
727
728 /* direct address is always RPA */
729 own_addr_type = ADDR_LE_DEV_RANDOM;
730 } else {
731 /* Update random address, but set require_privacy to false so
732 * that we never connect with an non-resolvable address.
733 */
734 if (hci_update_random_address(req, false, &own_addr_type))
735 return;
736 }
724 737
725 /* Set window to be the same value as the interval to enable 738 /* Set window to be the same value as the interval to enable
726 * continuous scanning. 739 * continuous scanning.
@@ -782,7 +795,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
782 795
783struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 796struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
784 u8 dst_type, u8 sec_level, u16 conn_timeout, 797 u8 dst_type, u8 sec_level, u16 conn_timeout,
785 u8 role) 798 u8 role, bdaddr_t *direct_rpa)
786{ 799{
787 struct hci_conn_params *params; 800 struct hci_conn_params *params;
788 struct hci_conn *conn, *conn_unfinished; 801 struct hci_conn *conn, *conn_unfinished;
@@ -913,7 +926,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
913 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); 926 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
914 } 927 }
915 928
916 hci_req_add_le_create_conn(&req, conn); 929 hci_req_add_le_create_conn(&req, conn, direct_rpa);
917 930
918create_conn: 931create_conn:
919 err = hci_req_run(&req, create_le_conn_complete); 932 err = hci_req_run(&req, create_le_conn_complete);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 62edbf1b114e..5d0b1358c754 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -716,6 +716,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
716{ 716{
717 struct hci_dev *hdev = req->hdev; 717 struct hci_dev *hdev = req->hdev;
718 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 718 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
719 bool changed = false;
719 720
720 /* If Connectionless Slave Broadcast master role is supported 721 /* If Connectionless Slave Broadcast master role is supported
721 * enable all necessary events for it. 722 * enable all necessary events for it.
@@ -725,6 +726,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
725 events[1] |= 0x80; /* Synchronization Train Complete */ 726 events[1] |= 0x80; /* Synchronization Train Complete */
726 events[2] |= 0x10; /* Slave Page Response Timeout */ 727 events[2] |= 0x10; /* Slave Page Response Timeout */
727 events[2] |= 0x20; /* CSB Channel Map Change */ 728 events[2] |= 0x20; /* CSB Channel Map Change */
729 changed = true;
728 } 730 }
729 731
730 /* If Connectionless Slave Broadcast slave role is supported 732 /* If Connectionless Slave Broadcast slave role is supported
@@ -735,13 +737,24 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
735 events[2] |= 0x02; /* CSB Receive */ 737 events[2] |= 0x02; /* CSB Receive */
736 events[2] |= 0x04; /* CSB Timeout */ 738 events[2] |= 0x04; /* CSB Timeout */
737 events[2] |= 0x08; /* Truncated Page Complete */ 739 events[2] |= 0x08; /* Truncated Page Complete */
740 changed = true;
738 } 741 }
739 742
740 /* Enable Authenticated Payload Timeout Expired event if supported */ 743 /* Enable Authenticated Payload Timeout Expired event if supported */
741 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) 744 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
742 events[2] |= 0x80; 745 events[2] |= 0x80;
746 changed = true;
747 }
743 748
744 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events); 749 /* Some Broadcom based controllers indicate support for Set Event
750 * Mask Page 2 command, but then actually do not support it. Since
751 * the default value is all bits set to zero, the command is only
752 * required if the event mask has to be changed. In case no change
753 * to the event mask is needed, skip this command.
754 */
755 if (changed)
756 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
757 sizeof(events), events);
745} 758}
746 759
747static void hci_init3_req(struct hci_request *req, unsigned long opt) 760static void hci_init3_req(struct hci_request *req, unsigned long opt)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index d57c11c1c6b5..d40d32a2c12d 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -4632,7 +4632,8 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4632/* This function requires the caller holds hdev->lock */ 4632/* This function requires the caller holds hdev->lock */
4633static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 4633static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4634 bdaddr_t *addr, 4634 bdaddr_t *addr,
4635 u8 addr_type, u8 adv_type) 4635 u8 addr_type, u8 adv_type,
4636 bdaddr_t *direct_rpa)
4636{ 4637{
4637 struct hci_conn *conn; 4638 struct hci_conn *conn;
4638 struct hci_conn_params *params; 4639 struct hci_conn_params *params;
@@ -4683,7 +4684,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4683 } 4684 }
4684 4685
4685 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, 4686 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4686 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER); 4687 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
4688 direct_rpa);
4687 if (!IS_ERR(conn)) { 4689 if (!IS_ERR(conn)) {
4688 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 4690 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
4689 * by higher layer that tried to connect, if no then 4691 * by higher layer that tried to connect, if no then
@@ -4780,8 +4782,13 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4780 bdaddr_type = irk->addr_type; 4782 bdaddr_type = irk->addr_type;
4781 } 4783 }
4782 4784
4783 /* Check if we have been requested to connect to this device */ 4785 /* Check if we have been requested to connect to this device.
4784 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type); 4786 *
4787 * direct_addr is set only for directed advertising reports (it is NULL
4788 * for advertising reports) and is already verified to be RPA above.
4789 */
4790 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
4791 direct_addr);
4785 if (conn && type == LE_ADV_IND) { 4792 if (conn && type == LE_ADV_IND) {
4786 /* Store report for later inclusion by 4793 /* Store report for later inclusion by
4787 * mgmt_device_connected 4794 * mgmt_device_connected
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 1fc076420d1e..1811f8e7ddf4 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session)
431 del_timer(&session->timer); 431 del_timer(&session->timer);
432} 432}
433 433
434static void hidp_process_report(struct hidp_session *session, 434static void hidp_process_report(struct hidp_session *session, int type,
435 int type, const u8 *data, int len, int intr) 435 const u8 *data, unsigned int len, int intr)
436{ 436{
437 if (len > HID_MAX_BUFFER_SIZE) 437 if (len > HID_MAX_BUFFER_SIZE)
438 len = HID_MAX_BUFFER_SIZE; 438 len = HID_MAX_BUFFER_SIZE;
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 906f88550cd8..0dc27d2e8f18 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2251,8 +2251,14 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
2251 else 2251 else
2252 sec_level = authreq_to_seclevel(auth); 2252 sec_level = authreq_to_seclevel(auth);
2253 2253
2254 if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) 2254 if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) {
2255 /* If link is already encrypted with sufficient security we
2256 * still need refresh encryption as per Core Spec 5.0 Vol 3,
2257 * Part H 2.4.6
2258 */
2259 smp_ltk_encrypt(conn, hcon->sec_level);
2255 return 0; 2260 return 0;
2261 }
2256 2262
2257 if (sec_level > hcon->pending_sec_level) 2263 if (sec_level > hcon->pending_sec_level)
2258 hcon->pending_sec_level = sec_level; 2264 hcon->pending_sec_level = sec_level;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ec02f5869a78..3400b1e47668 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -456,8 +456,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
456 if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) 456 if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
457 return -ELOOP; 457 return -ELOOP;
458 458
459 /* Device is already being bridged */ 459 /* Device has master upper dev */
460 if (br_port_exists(dev)) 460 if (netdev_master_upper_dev_get(dev))
461 return -EBUSY; 461 return -EBUSY;
462 462
463 /* No bridging devices that dislike that (e.g. wireless) */ 463 /* No bridging devices that dislike that (e.g. wireless) */
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index efe415ad842a..83bb695f9645 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -229,6 +229,9 @@ static ssize_t brport_show(struct kobject *kobj,
229 struct brport_attribute *brport_attr = to_brport_attr(attr); 229 struct brport_attribute *brport_attr = to_brport_attr(attr);
230 struct net_bridge_port *p = to_brport(kobj); 230 struct net_bridge_port *p = to_brport(kobj);
231 231
232 if (!brport_attr->show)
233 return -EINVAL;
234
232 return brport_attr->show(p, buf); 235 return brport_attr->show(p, buf);
233} 236}
234 237
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index 9024283d2bca..9adf16258cab 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -172,18 +172,69 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
172 return true; 172 return true;
173} 173}
174 174
175static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
176{
177 return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
178}
179
180static bool wormhash_offset_invalid(int off, unsigned int len)
181{
182 if (off == 0) /* not present */
183 return false;
184
185 if (off < (int)sizeof(struct ebt_among_info) ||
186 off % __alignof__(struct ebt_mac_wormhash))
187 return true;
188
189 off += sizeof(struct ebt_mac_wormhash);
190
191 return off > len;
192}
193
194static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b)
195{
196 if (a == 0)
197 a = sizeof(struct ebt_among_info);
198
199 return ebt_mac_wormhash_size(wh) + a == b;
200}
201
175static int ebt_among_mt_check(const struct xt_mtchk_param *par) 202static int ebt_among_mt_check(const struct xt_mtchk_param *par)
176{ 203{
177 const struct ebt_among_info *info = par->matchinfo; 204 const struct ebt_among_info *info = par->matchinfo;
178 const struct ebt_entry_match *em = 205 const struct ebt_entry_match *em =
179 container_of(par->matchinfo, const struct ebt_entry_match, data); 206 container_of(par->matchinfo, const struct ebt_entry_match, data);
180 int expected_length = sizeof(struct ebt_among_info); 207 unsigned int expected_length = sizeof(struct ebt_among_info);
181 const struct ebt_mac_wormhash *wh_dst, *wh_src; 208 const struct ebt_mac_wormhash *wh_dst, *wh_src;
182 int err; 209 int err;
183 210
211 if (expected_length > em->match_size)
212 return -EINVAL;
213
214 if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) ||
215 wormhash_offset_invalid(info->wh_src_ofs, em->match_size))
216 return -EINVAL;
217
184 wh_dst = ebt_among_wh_dst(info); 218 wh_dst = ebt_among_wh_dst(info);
185 wh_src = ebt_among_wh_src(info); 219 if (poolsize_invalid(wh_dst))
220 return -EINVAL;
221
186 expected_length += ebt_mac_wormhash_size(wh_dst); 222 expected_length += ebt_mac_wormhash_size(wh_dst);
223 if (expected_length > em->match_size)
224 return -EINVAL;
225
226 wh_src = ebt_among_wh_src(info);
227 if (poolsize_invalid(wh_src))
228 return -EINVAL;
229
230 if (info->wh_src_ofs < info->wh_dst_ofs) {
231 if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs))
232 return -EINVAL;
233 } else {
234 if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs))
235 return -EINVAL;
236 }
237
187 expected_length += ebt_mac_wormhash_size(wh_src); 238 expected_length += ebt_mac_wormhash_size(wh_src);
188 239
189 if (em->match_size != EBT_ALIGN(expected_length)) { 240 if (em->match_size != EBT_ALIGN(expected_length)) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index f46ca417bf2d..8b8a43fda6ca 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -404,6 +404,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
404 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); 404 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
405 if (IS_ERR(watcher)) 405 if (IS_ERR(watcher))
406 return PTR_ERR(watcher); 406 return PTR_ERR(watcher);
407
408 if (watcher->family != NFPROTO_BRIDGE) {
409 module_put(watcher->me);
410 return -ENOENT;
411 }
412
407 w->u.watcher = watcher; 413 w->u.watcher = watcher;
408 414
409 par->target = watcher; 415 par->target = watcher;
@@ -701,6 +707,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
701 } 707 }
702 i = 0; 708 i = 0;
703 709
710 memset(&mtpar, 0, sizeof(mtpar));
711 memset(&tgpar, 0, sizeof(tgpar));
704 mtpar.net = tgpar.net = net; 712 mtpar.net = tgpar.net = net;
705 mtpar.table = tgpar.table = name; 713 mtpar.table = tgpar.table = name;
706 mtpar.entryinfo = tgpar.entryinfo = e; 714 mtpar.entryinfo = tgpar.entryinfo = e;
@@ -722,6 +730,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
722 goto cleanup_watchers; 730 goto cleanup_watchers;
723 } 731 }
724 732
733 /* Reject UNSPEC, xtables verdicts/return values are incompatible */
734 if (target->family != NFPROTO_BRIDGE) {
735 module_put(target->me);
736 ret = -ENOENT;
737 goto cleanup_watchers;
738 }
739
725 t->u.target = target; 740 t->u.target = target;
726 if (t->u.target == &ebt_standard_target) { 741 if (t->u.target == &ebt_standard_target) {
727 if (gap < sizeof(struct ebt_standard_target)) { 742 if (gap < sizeof(struct ebt_standard_target)) {
@@ -1614,7 +1629,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1614 int off = ebt_compat_match_offset(match, m->match_size); 1629 int off = ebt_compat_match_offset(match, m->match_size);
1615 compat_uint_t msize = m->match_size - off; 1630 compat_uint_t msize = m->match_size - off;
1616 1631
1617 BUG_ON(off >= m->match_size); 1632 if (WARN_ON(off >= m->match_size))
1633 return -EINVAL;
1618 1634
1619 if (copy_to_user(cm->u.name, match->name, 1635 if (copy_to_user(cm->u.name, match->name,
1620 strlen(match->name) + 1) || put_user(msize, &cm->match_size)) 1636 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
@@ -1641,7 +1657,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
1641 int off = xt_compat_target_offset(target); 1657 int off = xt_compat_target_offset(target);
1642 compat_uint_t tsize = t->target_size - off; 1658 compat_uint_t tsize = t->target_size - off;
1643 1659
1644 BUG_ON(off >= t->target_size); 1660 if (WARN_ON(off >= t->target_size))
1661 return -EINVAL;
1645 1662
1646 if (copy_to_user(cm->u.name, target->name, 1663 if (copy_to_user(cm->u.name, target->name,
1647 strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) 1664 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
@@ -1869,7 +1886,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state,
1869 if (state->buf_kern_start == NULL) 1886 if (state->buf_kern_start == NULL)
1870 goto count_only; 1887 goto count_only;
1871 1888
1872 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len); 1889 if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
1890 return -EINVAL;
1873 1891
1874 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); 1892 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1875 1893
@@ -1882,7 +1900,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1882{ 1900{
1883 char *b = state->buf_kern_start; 1901 char *b = state->buf_kern_start;
1884 1902
1885 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len); 1903 if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
1904 return -EINVAL;
1886 1905
1887 if (b != NULL && sz > 0) 1906 if (b != NULL && sz > 0)
1888 memset(b + state->buf_kern_offset, 0, sz); 1907 memset(b + state->buf_kern_offset, 0, sz);
@@ -1908,7 +1927,8 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1908 int off, pad = 0; 1927 int off, pad = 0;
1909 unsigned int size_kern, match_size = mwt->match_size; 1928 unsigned int size_kern, match_size = mwt->match_size;
1910 1929
1911 strlcpy(name, mwt->u.name, sizeof(name)); 1930 if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
1931 return -EINVAL;
1912 1932
1913 if (state->buf_kern_start) 1933 if (state->buf_kern_start)
1914 dst = state->buf_kern_start + state->buf_kern_offset; 1934 dst = state->buf_kern_start + state->buf_kern_offset;
@@ -1959,8 +1979,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1959 pad = XT_ALIGN(size_kern) - size_kern; 1979 pad = XT_ALIGN(size_kern) - size_kern;
1960 1980
1961 if (pad > 0 && dst) { 1981 if (pad > 0 && dst) {
1962 BUG_ON(state->buf_kern_len <= pad); 1982 if (WARN_ON(state->buf_kern_len <= pad))
1963 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad); 1983 return -EINVAL;
1984 if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
1985 return -EINVAL;
1964 memset(dst + size_kern, 0, pad); 1986 memset(dst + size_kern, 0, pad);
1965 } 1987 }
1966 return off + match_size; 1988 return off + match_size;
@@ -2011,7 +2033,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
2011 if (ret < 0) 2033 if (ret < 0)
2012 return ret; 2034 return ret;
2013 2035
2014 BUG_ON(ret < match32->match_size); 2036 if (WARN_ON(ret < match32->match_size))
2037 return -EINVAL;
2015 growth += ret - match32->match_size; 2038 growth += ret - match32->match_size;
2016 growth += ebt_compat_entry_padsize(); 2039 growth += ebt_compat_entry_padsize();
2017 2040
@@ -2021,7 +2044,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
2021 if (match_kern) 2044 if (match_kern)
2022 match_kern->match_size = ret; 2045 match_kern->match_size = ret;
2023 2046
2024 WARN_ON(type == EBT_COMPAT_TARGET && size_left); 2047 if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
2048 return -EINVAL;
2049
2025 match32 = (struct compat_ebt_entry_mwt *) buf; 2050 match32 = (struct compat_ebt_entry_mwt *) buf;
2026 } 2051 }
2027 2052
@@ -2078,6 +2103,19 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2078 * 2103 *
2079 * offsets are relative to beginning of struct ebt_entry (i.e., 0). 2104 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2080 */ 2105 */
2106 for (i = 0; i < 4 ; ++i) {
2107 if (offsets[i] > *total)
2108 return -EINVAL;
2109
2110 if (i < 3 && offsets[i] == *total)
2111 return -EINVAL;
2112
2113 if (i == 0)
2114 continue;
2115 if (offsets[i-1] > offsets[i])
2116 return -EINVAL;
2117 }
2118
2081 for (i = 0, j = 1 ; j < 4 ; j++, i++) { 2119 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2082 struct compat_ebt_entry_mwt *match32; 2120 struct compat_ebt_entry_mwt *match32;
2083 unsigned int size; 2121 unsigned int size;
@@ -2110,7 +2148,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2110 2148
2111 startoff = state->buf_user_offset - startoff; 2149 startoff = state->buf_user_offset - startoff;
2112 2150
2113 BUG_ON(*total < startoff); 2151 if (WARN_ON(*total < startoff))
2152 return -EINVAL;
2114 *total -= startoff; 2153 *total -= startoff;
2115 return 0; 2154 return 0;
2116} 2155}
@@ -2238,7 +2277,8 @@ static int compat_do_replace(struct net *net, void __user *user,
2238 state.buf_kern_len = size64; 2277 state.buf_kern_len = size64;
2239 2278
2240 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2279 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2241 BUG_ON(ret < 0); /* parses same data again */ 2280 if (WARN_ON(ret < 0))
2281 goto out_unlock;
2242 2282
2243 vfree(entries_tmp); 2283 vfree(entries_tmp);
2244 tmp.entries_size = size64; 2284 tmp.entries_size = size64;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 928f58064098..c866e761651a 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -722,13 +722,12 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
722 if (unlikely(!net_eq(dev_net(dev), &init_net))) 722 if (unlikely(!net_eq(dev_net(dev), &init_net)))
723 goto drop; 723 goto drop;
724 724
725 if (WARN_ONCE(dev->type != ARPHRD_CAN || 725 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
726 skb->len != CAN_MTU || 726 cfd->len > CAN_MAX_DLEN)) {
727 cfd->len > CAN_MAX_DLEN, 727 pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
728 "PF_CAN: dropped non conform CAN skbuf: " 728 dev->type, skb->len, cfd->len);
729 "dev type %d, len %d, datalen %d\n",
730 dev->type, skb->len, cfd->len))
731 goto drop; 729 goto drop;
730 }
732 731
733 can_receive(skb, dev); 732 can_receive(skb, dev);
734 return NET_RX_SUCCESS; 733 return NET_RX_SUCCESS;
@@ -746,13 +745,12 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
746 if (unlikely(!net_eq(dev_net(dev), &init_net))) 745 if (unlikely(!net_eq(dev_net(dev), &init_net)))
747 goto drop; 746 goto drop;
748 747
749 if (WARN_ONCE(dev->type != ARPHRD_CAN || 748 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
750 skb->len != CANFD_MTU || 749 cfd->len > CANFD_MAX_DLEN)) {
751 cfd->len > CANFD_MAX_DLEN, 750 pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
752 "PF_CAN: dropped non conform CAN FD skbuf: " 751 dev->type, skb->len, cfd->len);
753 "dev type %d, len %d, datalen %d\n",
754 dev->type, skb->len, cfd->len))
755 goto drop; 752 goto drop;
753 }
756 754
757 can_receive(skb, dev); 755 can_receive(skb, dev);
758 return NET_RX_SUCCESS; 756 return NET_RX_SUCCESS;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index a6b2f2138c9d..ad3c9e96a275 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2531,6 +2531,11 @@ static int try_write(struct ceph_connection *con)
2531 int ret = 1; 2531 int ret = 1;
2532 2532
2533 dout("try_write start %p state %lu\n", con, con->state); 2533 dout("try_write start %p state %lu\n", con, con->state);
2534 if (con->state != CON_STATE_PREOPEN &&
2535 con->state != CON_STATE_CONNECTING &&
2536 con->state != CON_STATE_NEGOTIATING &&
2537 con->state != CON_STATE_OPEN)
2538 return 0;
2534 2539
2535more: 2540more:
2536 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2541 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
@@ -2556,6 +2561,8 @@ more:
2556 } 2561 }
2557 2562
2558more_kvec: 2563more_kvec:
2564 BUG_ON(!con->sock);
2565
2559 /* kvec data queued? */ 2566 /* kvec data queued? */
2560 if (con->out_kvec_left) { 2567 if (con->out_kvec_left) {
2561 ret = write_partial_kvec(con); 2568 ret = write_partial_kvec(con);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index bc95e48d5cfb..378c9ed00d40 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -295,6 +295,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
295 u32 yes; 295 u32 yes;
296 struct crush_rule *r; 296 struct crush_rule *r;
297 297
298 err = -EINVAL;
298 ceph_decode_32_safe(p, end, yes, bad); 299 ceph_decode_32_safe(p, end, yes, bad);
299 if (!yes) { 300 if (!yes) {
300 dout("crush_decode NO rule %d off %x %p to %p\n", 301 dout("crush_decode NO rule %d off %x %p to %p\n",
diff --git a/net/compat.c b/net/compat.c
index 0ccf3ecf6bbb..17e97b106458 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -358,7 +358,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
358 if (optname == SO_ATTACH_FILTER) 358 if (optname == SO_ATTACH_FILTER)
359 return do_set_attach_filter(sock, level, optname, 359 return do_set_attach_filter(sock, level, optname,
360 optval, optlen); 360 optval, optlen);
361 if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) 361 if (!COMPAT_USE_64BIT_TIME &&
362 (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
362 return do_set_sock_timeout(sock, level, optname, optval, optlen); 363 return do_set_sock_timeout(sock, level, optname, optval, optlen);
363 364
364 return sock_setsockopt(sock, level, optname, optval, optlen); 365 return sock_setsockopt(sock, level, optname, optval, optlen);
@@ -423,7 +424,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
423static int compat_sock_getsockopt(struct socket *sock, int level, int optname, 424static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
424 char __user *optval, int __user *optlen) 425 char __user *optval, int __user *optlen)
425{ 426{
426 if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) 427 if (!COMPAT_USE_64BIT_TIME &&
428 (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
427 return do_get_sock_timeout(sock, level, optname, optval, optlen); 429 return do_get_sock_timeout(sock, level, optname, optval, optlen);
428 return sock_getsockopt(sock, level, optname, optval, optlen); 430 return sock_getsockopt(sock, level, optname, optval, optlen);
429} 431}
diff --git a/net/core/Makefile b/net/core/Makefile
index 086b01fbe1bd..0d8ad4d0261b 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -24,3 +24,4 @@ obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
24obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o 24obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
25obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o 25obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
26obj-$(CONFIG_LWTUNNEL) += lwtunnel.o 26obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
27obj-$(CONFIG_DST_CACHE) += dst_cache.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 3b67c1e5756f..3bcbf931a910 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -989,7 +989,7 @@ bool dev_valid_name(const char *name)
989{ 989{
990 if (*name == '\0') 990 if (*name == '\0')
991 return false; 991 return false;
992 if (strlen(name) >= IFNAMSIZ) 992 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
993 return false; 993 return false;
994 if (!strcmp(name, ".") || !strcmp(name, "..")) 994 if (!strcmp(name, ".") || !strcmp(name, ".."))
995 return false; 995 return false;
@@ -2183,8 +2183,11 @@ EXPORT_SYMBOL(netif_set_xps_queue);
2183 */ 2183 */
2184int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2184int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2185{ 2185{
2186 bool disabling;
2186 int rc; 2187 int rc;
2187 2188
2189 disabling = txq < dev->real_num_tx_queues;
2190
2188 if (txq < 1 || txq > dev->num_tx_queues) 2191 if (txq < 1 || txq > dev->num_tx_queues)
2189 return -EINVAL; 2192 return -EINVAL;
2190 2193
@@ -2200,15 +2203,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2200 if (dev->num_tc) 2203 if (dev->num_tc)
2201 netif_setup_tc(dev, txq); 2204 netif_setup_tc(dev, txq);
2202 2205
2203 if (txq < dev->real_num_tx_queues) { 2206 dev->real_num_tx_queues = txq;
2207
2208 if (disabling) {
2209 synchronize_net();
2204 qdisc_reset_all_tx_gt(dev, txq); 2210 qdisc_reset_all_tx_gt(dev, txq);
2205#ifdef CONFIG_XPS 2211#ifdef CONFIG_XPS
2206 netif_reset_xps_queues_gt(dev, txq); 2212 netif_reset_xps_queues_gt(dev, txq);
2207#endif 2213#endif
2208 } 2214 }
2215 } else {
2216 dev->real_num_tx_queues = txq;
2209 } 2217 }
2210 2218
2211 dev->real_num_tx_queues = txq;
2212 return 0; 2219 return 0;
2213} 2220}
2214EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2221EXPORT_SYMBOL(netif_set_real_num_tx_queues);
@@ -2508,7 +2515,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2508 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 2515 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2509 return 0; 2516 return 0;
2510 2517
2511 eth = (struct ethhdr *)skb_mac_header(skb); 2518 eth = (struct ethhdr *)skb->data;
2512 type = eth->h_proto; 2519 type = eth->h_proto;
2513 } 2520 }
2514 2521
@@ -2598,7 +2605,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2598 2605
2599 segs = skb_mac_gso_segment(skb, features); 2606 segs = skb_mac_gso_segment(skb, features);
2600 2607
2601 if (unlikely(skb_needs_check(skb, tx_path))) 2608 if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
2602 skb_warn_bad_offload(skb); 2609 skb_warn_bad_offload(skb);
2603 2610
2604 return segs; 2611 return segs;
@@ -2699,7 +2706,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
2699} 2706}
2700EXPORT_SYMBOL(passthru_features_check); 2707EXPORT_SYMBOL(passthru_features_check);
2701 2708
2702static netdev_features_t dflt_features_check(const struct sk_buff *skb, 2709static netdev_features_t dflt_features_check(struct sk_buff *skb,
2703 struct net_device *dev, 2710 struct net_device *dev,
2704 netdev_features_t features) 2711 netdev_features_t features)
2705{ 2712{
@@ -2889,10 +2896,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
2889 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 2896 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2890 2897
2891 /* + transport layer */ 2898 /* + transport layer */
2892 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 2899 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
2893 hdr_len += tcp_hdrlen(skb); 2900 const struct tcphdr *th;
2894 else 2901 struct tcphdr _tcphdr;
2895 hdr_len += sizeof(struct udphdr); 2902
2903 th = skb_header_pointer(skb, skb_transport_offset(skb),
2904 sizeof(_tcphdr), &_tcphdr);
2905 if (likely(th))
2906 hdr_len += __tcp_hdrlen(th);
2907 } else {
2908 struct udphdr _udphdr;
2909
2910 if (skb_header_pointer(skb, skb_transport_offset(skb),
2911 sizeof(_udphdr), &_udphdr))
2912 hdr_len += sizeof(struct udphdr);
2913 }
2896 2914
2897 if (shinfo->gso_type & SKB_GSO_DODGY) 2915 if (shinfo->gso_type & SKB_GSO_DODGY)
2898 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 2916 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index c0548d268e1a..e3e6a3e2ca22 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -57,8 +57,8 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
57 return -EINVAL; 57 return -EINVAL;
58 58
59 list_for_each_entry(ha, &list->list, list) { 59 list_for_each_entry(ha, &list->list, list) {
60 if (!memcmp(ha->addr, addr, addr_len) && 60 if (ha->type == addr_type &&
61 ha->type == addr_type) { 61 !memcmp(ha->addr, addr, addr_len)) {
62 if (global) { 62 if (global) {
63 /* check if addr is already used as global */ 63 /* check if addr is already used as global */
64 if (ha->global_use) 64 if (ha->global_use)
diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c
new file mode 100644
index 000000000000..554d36449231
--- /dev/null
+++ b/net/core/dst_cache.c
@@ -0,0 +1,168 @@
1/*
2 * net/core/dst_cache.c - dst entry cache
3 *
4 * Copyright (c) 2016 Paolo Abeni <pabeni@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/percpu.h>
14#include <net/dst_cache.h>
15#include <net/route.h>
16#if IS_ENABLED(CONFIG_IPV6)
17#include <net/ip6_fib.h>
18#endif
19#include <uapi/linux/in.h>
20
21struct dst_cache_pcpu {
22 unsigned long refresh_ts;
23 struct dst_entry *dst;
24 u32 cookie;
25 union {
26 struct in_addr in_saddr;
27 struct in6_addr in6_saddr;
28 };
29};
30
31static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache,
32 struct dst_entry *dst, u32 cookie)
33{
34 dst_release(dst_cache->dst);
35 if (dst)
36 dst_hold(dst);
37
38 dst_cache->cookie = cookie;
39 dst_cache->dst = dst;
40}
41
42static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache,
43 struct dst_cache_pcpu *idst)
44{
45 struct dst_entry *dst;
46
47 dst = idst->dst;
48 if (!dst)
49 goto fail;
50
51 /* the cache already hold a dst reference; it can't go away */
52 dst_hold(dst);
53
54 if (unlikely(!time_after(idst->refresh_ts, dst_cache->reset_ts) ||
55 (dst->obsolete && !dst->ops->check(dst, idst->cookie)))) {
56 dst_cache_per_cpu_dst_set(idst, NULL, 0);
57 dst_release(dst);
58 goto fail;
59 }
60 return dst;
61
62fail:
63 idst->refresh_ts = jiffies;
64 return NULL;
65}
66
67struct dst_entry *dst_cache_get(struct dst_cache *dst_cache)
68{
69 if (!dst_cache->cache)
70 return NULL;
71
72 return dst_cache_per_cpu_get(dst_cache, this_cpu_ptr(dst_cache->cache));
73}
74EXPORT_SYMBOL_GPL(dst_cache_get);
75
76struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr)
77{
78 struct dst_cache_pcpu *idst;
79 struct dst_entry *dst;
80
81 if (!dst_cache->cache)
82 return NULL;
83
84 idst = this_cpu_ptr(dst_cache->cache);
85 dst = dst_cache_per_cpu_get(dst_cache, idst);
86 if (!dst)
87 return NULL;
88
89 *saddr = idst->in_saddr.s_addr;
90 return container_of(dst, struct rtable, dst);
91}
92EXPORT_SYMBOL_GPL(dst_cache_get_ip4);
93
94void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst,
95 __be32 saddr)
96{
97 struct dst_cache_pcpu *idst;
98
99 if (!dst_cache->cache)
100 return;
101
102 idst = this_cpu_ptr(dst_cache->cache);
103 dst_cache_per_cpu_dst_set(idst, dst, 0);
104 idst->in_saddr.s_addr = saddr;
105}
106EXPORT_SYMBOL_GPL(dst_cache_set_ip4);
107
108#if IS_ENABLED(CONFIG_IPV6)
109void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst,
110 const struct in6_addr *addr)
111{
112 struct dst_cache_pcpu *idst;
113
114 if (!dst_cache->cache)
115 return;
116
117 idst = this_cpu_ptr(dst_cache->cache);
118 dst_cache_per_cpu_dst_set(this_cpu_ptr(dst_cache->cache), dst,
119 rt6_get_cookie((struct rt6_info *)dst));
120 idst->in6_saddr = *addr;
121}
122EXPORT_SYMBOL_GPL(dst_cache_set_ip6);
123
124struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache,
125 struct in6_addr *saddr)
126{
127 struct dst_cache_pcpu *idst;
128 struct dst_entry *dst;
129
130 if (!dst_cache->cache)
131 return NULL;
132
133 idst = this_cpu_ptr(dst_cache->cache);
134 dst = dst_cache_per_cpu_get(dst_cache, idst);
135 if (!dst)
136 return NULL;
137
138 *saddr = idst->in6_saddr;
139 return dst;
140}
141EXPORT_SYMBOL_GPL(dst_cache_get_ip6);
142#endif
143
144int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp)
145{
146 dst_cache->cache = alloc_percpu_gfp(struct dst_cache_pcpu,
147 gfp | __GFP_ZERO);
148 if (!dst_cache->cache)
149 return -ENOMEM;
150
151 dst_cache_reset(dst_cache);
152 return 0;
153}
154EXPORT_SYMBOL_GPL(dst_cache_init);
155
156void dst_cache_destroy(struct dst_cache *dst_cache)
157{
158 int i;
159
160 if (!dst_cache->cache)
161 return;
162
163 for_each_possible_cpu(i)
164 dst_release(per_cpu_ptr(dst_cache->cache, i)->dst);
165
166 free_percpu(dst_cache->cache);
167}
168EXPORT_SYMBOL_GPL(dst_cache_destroy);
diff --git a/net/core/filter.c b/net/core/filter.c
index e94355452166..1a9ded6af138 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -430,6 +430,10 @@ do_pass:
430 convert_bpf_extensions(fp, &insn)) 430 convert_bpf_extensions(fp, &insn))
431 break; 431 break;
432 432
433 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
434 fp->code == (BPF_ALU | BPF_MOD | BPF_X))
435 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
436
433 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); 437 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
434 break; 438 break;
435 439
@@ -984,7 +988,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
984 */ 988 */
985 goto out_err_free; 989 goto out_err_free;
986 990
987 bpf_prog_select_runtime(fp); 991 err = bpf_prog_select_runtime(fp);
992 if (err)
993 goto out_err_free;
988 994
989 kfree(old_prog); 995 kfree(old_prog);
990 return fp; 996 return fp;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index ee9082792530..4d14908afaec 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -492,8 +492,8 @@ ip_proto_again:
492out_good: 492out_good:
493 ret = true; 493 ret = true;
494 494
495 key_control->thoff = (u16)nhoff;
496out: 495out:
496 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
497 key_basic->n_proto = proto; 497 key_basic->n_proto = proto;
498 key_basic->ip_proto = ip_proto; 498 key_basic->ip_proto = ip_proto;
499 499
@@ -501,7 +501,6 @@ out:
501 501
502out_bad: 502out_bad:
503 ret = false; 503 ret = false;
504 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
505 goto out; 504 goto out;
506} 505}
507EXPORT_SYMBOL(__skb_flow_dissect); 506EXPORT_SYMBOL(__skb_flow_dissect);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ae92131c4f89..f60b93627876 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -54,7 +54,8 @@ do { \
54static void neigh_timer_handler(unsigned long arg); 54static void neigh_timer_handler(unsigned long arg);
55static void __neigh_notify(struct neighbour *n, int type, int flags); 55static void __neigh_notify(struct neighbour *n, int type, int flags);
56static void neigh_update_notify(struct neighbour *neigh); 56static void neigh_update_notify(struct neighbour *neigh);
57static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); 57static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58 struct net_device *dev);
58 59
59#ifdef CONFIG_PROC_FS 60#ifdef CONFIG_PROC_FS
60static const struct file_operations neigh_stat_seq_fops; 61static const struct file_operations neigh_stat_seq_fops;
@@ -254,8 +255,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
254{ 255{
255 write_lock_bh(&tbl->lock); 256 write_lock_bh(&tbl->lock);
256 neigh_flush_dev(tbl, dev); 257 neigh_flush_dev(tbl, dev);
257 pneigh_ifdown(tbl, dev); 258 pneigh_ifdown_and_unlock(tbl, dev);
258 write_unlock_bh(&tbl->lock);
259 259
260 del_timer_sync(&tbl->proxy_timer); 260 del_timer_sync(&tbl->proxy_timer);
261 pneigh_queue_purge(&tbl->proxy_queue); 261 pneigh_queue_purge(&tbl->proxy_queue);
@@ -496,7 +496,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
496 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) 496 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
497 nht = neigh_hash_grow(tbl, nht->hash_shift + 1); 497 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
498 498
499 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); 499 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
500 500
501 if (n->parms->dead) { 501 if (n->parms->dead) {
502 rc = ERR_PTR(-EINVAL); 502 rc = ERR_PTR(-EINVAL);
@@ -508,7 +508,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
508 n1 != NULL; 508 n1 != NULL;
509 n1 = rcu_dereference_protected(n1->next, 509 n1 = rcu_dereference_protected(n1->next,
510 lockdep_is_held(&tbl->lock))) { 510 lockdep_is_held(&tbl->lock))) {
511 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { 511 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
512 if (want_ref) 512 if (want_ref)
513 neigh_hold(n1); 513 neigh_hold(n1);
514 rc = n1; 514 rc = n1;
@@ -645,9 +645,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
645 return -ENOENT; 645 return -ENOENT;
646} 646}
647 647
648static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) 648static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
649 struct net_device *dev)
649{ 650{
650 struct pneigh_entry *n, **np; 651 struct pneigh_entry *n, **np, *freelist = NULL;
651 u32 h; 652 u32 h;
652 653
653 for (h = 0; h <= PNEIGH_HASHMASK; h++) { 654 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
@@ -655,16 +656,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
655 while ((n = *np) != NULL) { 656 while ((n = *np) != NULL) {
656 if (!dev || n->dev == dev) { 657 if (!dev || n->dev == dev) {
657 *np = n->next; 658 *np = n->next;
658 if (tbl->pdestructor) 659 n->next = freelist;
659 tbl->pdestructor(n); 660 freelist = n;
660 if (n->dev)
661 dev_put(n->dev);
662 kfree(n);
663 continue; 661 continue;
664 } 662 }
665 np = &n->next; 663 np = &n->next;
666 } 664 }
667 } 665 }
666 write_unlock_bh(&tbl->lock);
667 while ((n = freelist)) {
668 freelist = n->next;
669 n->next = NULL;
670 if (tbl->pdestructor)
671 tbl->pdestructor(n);
672 if (n->dev)
673 dev_put(n->dev);
674 kfree(n);
675 }
668 return -ENOENT; 676 return -ENOENT;
669} 677}
670 678
@@ -1132,10 +1140,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1132 lladdr = neigh->ha; 1140 lladdr = neigh->ha;
1133 } 1141 }
1134 1142
1135 if (new & NUD_CONNECTED)
1136 neigh->confirmed = jiffies;
1137 neigh->updated = jiffies;
1138
1139 /* If entry was valid and address is not changed, 1143 /* If entry was valid and address is not changed,
1140 do not change entry state, if new one is STALE. 1144 do not change entry state, if new one is STALE.
1141 */ 1145 */
@@ -1159,6 +1163,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1159 } 1163 }
1160 } 1164 }
1161 1165
1166 /* Update timestamps only once we know we will make a change to the
1167 * neighbour entry. Otherwise we risk to move the locktime window with
1168 * noop updates and ignore relevant ARP updates.
1169 */
1170 if (new != old || lladdr != neigh->ha) {
1171 if (new & NUD_CONNECTED)
1172 neigh->confirmed = jiffies;
1173 neigh->updated = jiffies;
1174 }
1175
1162 if (new != old) { 1176 if (new != old) {
1163 neigh_del_timer(neigh); 1177 neigh_del_timer(neigh);
1164 if (new & NUD_PROBE) 1178 if (new & NUD_PROBE)
@@ -2274,12 +2288,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2274 2288
2275 err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL); 2289 err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
2276 if (!err) { 2290 if (!err) {
2277 if (tb[NDA_IFINDEX]) 2291 if (tb[NDA_IFINDEX]) {
2292 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
2293 return -EINVAL;
2278 filter_idx = nla_get_u32(tb[NDA_IFINDEX]); 2294 filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2279 2295 }
2280 if (tb[NDA_MASTER]) 2296 if (tb[NDA_MASTER]) {
2297 if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
2298 return -EINVAL;
2281 filter_master_idx = nla_get_u32(tb[NDA_MASTER]); 2299 filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2282 2300 }
2283 if (filter_idx || filter_master_idx) 2301 if (filter_idx || filter_master_idx)
2284 flags |= NLM_F_DUMP_FILTERED; 2302 flags |= NLM_F_DUMP_FILTERED;
2285 } 2303 }
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index b5c351d2830b..ccd20669ac00 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -310,6 +310,25 @@ out_undo:
310 goto out; 310 goto out;
311} 311}
312 312
313static int __net_init net_defaults_init_net(struct net *net)
314{
315 net->core.sysctl_somaxconn = SOMAXCONN;
316 return 0;
317}
318
319static struct pernet_operations net_defaults_ops = {
320 .init = net_defaults_init_net,
321};
322
323static __init int net_defaults_init(void)
324{
325 if (register_pernet_subsys(&net_defaults_ops))
326 panic("Cannot initialize net default settings");
327
328 return 0;
329}
330
331core_initcall(net_defaults_init);
313 332
314#ifdef CONFIG_NET_NS 333#ifdef CONFIG_NET_NS
315static struct kmem_cache *net_cachep; 334static struct kmem_cache *net_cachep;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5b3d611d8b5f..96c9c0f0905a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1691,6 +1691,10 @@ static int do_setlink(const struct sk_buff *skb,
1691 const struct net_device_ops *ops = dev->netdev_ops; 1691 const struct net_device_ops *ops = dev->netdev_ops;
1692 int err; 1692 int err;
1693 1693
1694 err = validate_linkmsg(dev, tb);
1695 if (err < 0)
1696 return err;
1697
1694 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) { 1698 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
1695 struct net *net = rtnl_link_get_net(dev_net(dev), tb); 1699 struct net *net = rtnl_link_get_net(dev_net(dev), tb);
1696 if (IS_ERR(net)) { 1700 if (IS_ERR(net)) {
@@ -1982,10 +1986,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
1982 goto errout; 1986 goto errout;
1983 } 1987 }
1984 1988
1985 err = validate_linkmsg(dev, tb);
1986 if (err < 0)
1987 goto errout;
1988
1989 err = do_setlink(skb, dev, ifm, tb, ifname, 0); 1989 err = do_setlink(skb, dev, ifm, tb, ifname, 0);
1990errout: 1990errout:
1991 return err; 1991 return err;
@@ -2087,9 +2087,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2087 return err; 2087 return err;
2088 } 2088 }
2089 2089
2090 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 2090 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
2091 2091 __dev_notify_flags(dev, old_flags, 0U);
2092 __dev_notify_flags(dev, old_flags, ~0U); 2092 } else {
2093 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2094 __dev_notify_flags(dev, old_flags, ~0U);
2095 }
2093 return 0; 2096 return 0;
2094} 2097}
2095EXPORT_SYMBOL(rtnl_configure_link); 2098EXPORT_SYMBOL(rtnl_configure_link);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 86b619501350..55be076706e5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -827,6 +827,8 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
827 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 827 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
828 n->cloned = 1; 828 n->cloned = 1;
829 n->nohdr = 0; 829 n->nohdr = 0;
830 n->peeked = 0;
831 C(pfmemalloc);
830 n->destructor = NULL; 832 n->destructor = NULL;
831 C(tail); 833 C(tail);
832 C(end); 834 C(end);
@@ -2551,7 +2553,8 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2551{ 2553{
2552 int pos = skb_headlen(skb); 2554 int pos = skb_headlen(skb);
2553 2555
2554 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2556 skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
2557 SKBTX_SHARED_FRAG;
2555 if (len < pos) /* Split line is inside header. */ 2558 if (len < pos) /* Split line is inside header. */
2556 skb_split_inside_header(skb, skb1, len, pos); 2559 skb_split_inside_header(skb, skb1, len, pos);
2557 else /* Second chunk has no header, nothing to copy. */ 2560 else /* Second chunk has no header, nothing to copy. */
@@ -3115,8 +3118,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
3115 skb_copy_from_linear_data_offset(head_skb, offset, 3118 skb_copy_from_linear_data_offset(head_skb, offset,
3116 skb_put(nskb, hsize), hsize); 3119 skb_put(nskb, hsize), hsize);
3117 3120
3118 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & 3121 skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3119 SKBTX_SHARED_FRAG; 3122 SKBTX_SHARED_FRAG;
3120 3123
3121 while (pos < offset + len) { 3124 while (pos < offset + len) {
3122 if (i >= nfrags) { 3125 if (i >= nfrags) {
@@ -3329,24 +3332,18 @@ void __init skb_init(void)
3329 NULL); 3332 NULL);
3330} 3333}
3331 3334
3332/**
3333 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
3334 * @skb: Socket buffer containing the buffers to be mapped
3335 * @sg: The scatter-gather list to map into
3336 * @offset: The offset into the buffer's contents to start mapping
3337 * @len: Length of buffer space to be mapped
3338 *
3339 * Fill the specified scatter-gather list with mappings/pointers into a
3340 * region of the buffer space attached to a socket buffer.
3341 */
3342static int 3335static int
3343__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3336__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
3337 unsigned int recursion_level)
3344{ 3338{
3345 int start = skb_headlen(skb); 3339 int start = skb_headlen(skb);
3346 int i, copy = start - offset; 3340 int i, copy = start - offset;
3347 struct sk_buff *frag_iter; 3341 struct sk_buff *frag_iter;
3348 int elt = 0; 3342 int elt = 0;
3349 3343
3344 if (unlikely(recursion_level >= 24))
3345 return -EMSGSIZE;
3346
3350 if (copy > 0) { 3347 if (copy > 0) {
3351 if (copy > len) 3348 if (copy > len)
3352 copy = len; 3349 copy = len;
@@ -3365,6 +3362,8 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3365 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3362 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3366 if ((copy = end - offset) > 0) { 3363 if ((copy = end - offset) > 0) {
3367 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3364 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3365 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3366 return -EMSGSIZE;
3368 3367
3369 if (copy > len) 3368 if (copy > len)
3370 copy = len; 3369 copy = len;
@@ -3379,16 +3378,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3379 } 3378 }
3380 3379
3381 skb_walk_frags(skb, frag_iter) { 3380 skb_walk_frags(skb, frag_iter) {
3382 int end; 3381 int end, ret;
3383 3382
3384 WARN_ON(start > offset + len); 3383 WARN_ON(start > offset + len);
3385 3384
3386 end = start + frag_iter->len; 3385 end = start + frag_iter->len;
3387 if ((copy = end - offset) > 0) { 3386 if ((copy = end - offset) > 0) {
3387 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3388 return -EMSGSIZE;
3389
3388 if (copy > len) 3390 if (copy > len)
3389 copy = len; 3391 copy = len;
3390 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 3392 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3391 copy); 3393 copy, recursion_level + 1);
3394 if (unlikely(ret < 0))
3395 return ret;
3396 elt += ret;
3392 if ((len -= copy) == 0) 3397 if ((len -= copy) == 0)
3393 return elt; 3398 return elt;
3394 offset += copy; 3399 offset += copy;
@@ -3399,6 +3404,31 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3399 return elt; 3404 return elt;
3400} 3405}
3401 3406
3407/**
3408 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
3409 * @skb: Socket buffer containing the buffers to be mapped
3410 * @sg: The scatter-gather list to map into
3411 * @offset: The offset into the buffer's contents to start mapping
3412 * @len: Length of buffer space to be mapped
3413 *
3414 * Fill the specified scatter-gather list with mappings/pointers into a
3415 * region of the buffer space attached to a socket buffer. Returns either
3416 * the number of scatterlist items used, or -EMSGSIZE if the contents
3417 * could not fit.
3418 */
3419int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3420{
3421 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
3422
3423 if (nsg <= 0)
3424 return nsg;
3425
3426 sg_mark_end(&sg[nsg - 1]);
3427
3428 return nsg;
3429}
3430EXPORT_SYMBOL_GPL(skb_to_sgvec);
3431
3402/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 3432/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
3403 * sglist without mark the sg which contain last skb data as the end. 3433 * sglist without mark the sg which contain last skb data as the end.
3404 * So the caller can mannipulate sg list as will when padding new data after 3434 * So the caller can mannipulate sg list as will when padding new data after
@@ -3421,19 +3451,11 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3421int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 3451int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
3422 int offset, int len) 3452 int offset, int len)
3423{ 3453{
3424 return __skb_to_sgvec(skb, sg, offset, len); 3454 return __skb_to_sgvec(skb, sg, offset, len, 0);
3425} 3455}
3426EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 3456EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
3427 3457
3428int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3429{
3430 int nsg = __skb_to_sgvec(skb, sg, offset, len);
3431 3458
3432 sg_mark_end(&sg[nsg - 1]);
3433
3434 return nsg;
3435}
3436EXPORT_SYMBOL_GPL(skb_to_sgvec);
3437 3459
3438/** 3460/**
3439 * skb_cow_data - Check that a socket buffer's data buffers are writable 3461 * skb_cow_data - Check that a socket buffer's data buffers are writable
@@ -3571,7 +3593,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3571 3593
3572 skb_queue_tail(&sk->sk_error_queue, skb); 3594 skb_queue_tail(&sk->sk_error_queue, skb);
3573 if (!sock_flag(sk, SOCK_DEAD)) 3595 if (!sock_flag(sk, SOCK_DEAD))
3574 sk->sk_data_ready(sk); 3596 sk->sk_error_report(sk);
3575 return 0; 3597 return 0;
3576} 3598}
3577EXPORT_SYMBOL(sock_queue_err_skb); 3599EXPORT_SYMBOL(sock_queue_err_skb);
@@ -3715,7 +3737,8 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
3715 return; 3737 return;
3716 3738
3717 if (tsonly) { 3739 if (tsonly) {
3718 skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags; 3740 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
3741 SKBTX_ANY_TSTAMP;
3719 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 3742 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
3720 } 3743 }
3721 3744
@@ -4273,13 +4296,18 @@ EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
4273 4296
4274static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 4297static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4275{ 4298{
4299 int mac_len;
4300
4276 if (skb_cow(skb, skb_headroom(skb)) < 0) { 4301 if (skb_cow(skb, skb_headroom(skb)) < 0) {
4277 kfree_skb(skb); 4302 kfree_skb(skb);
4278 return NULL; 4303 return NULL;
4279 } 4304 }
4280 4305
4281 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, 4306 mac_len = skb->data - skb_mac_header(skb);
4282 2 * ETH_ALEN); 4307 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
4308 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
4309 mac_len - VLAN_HLEN - ETH_TLEN);
4310 }
4283 skb->mac_header += VLAN_HLEN; 4311 skb->mac_header += VLAN_HLEN;
4284 return skb; 4312 return skb;
4285} 4313}
diff --git a/net/core/sock.c b/net/core/sock.c
index cd12cb6fe366..4238835a0e4e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1474,7 +1474,7 @@ void sk_destruct(struct sock *sk)
1474 1474
1475static void __sk_free(struct sock *sk) 1475static void __sk_free(struct sock *sk)
1476{ 1476{
1477 if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt)) 1477 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
1478 sock_diag_broadcast_destroy(sk); 1478 sock_diag_broadcast_destroy(sk);
1479 else 1479 else
1480 sk_destruct(sk); 1480 sk_destruct(sk);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index f5ef2115871f..32898247d8bf 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -292,7 +292,13 @@ static struct ctl_table net_core_table[] = {
292 .data = &bpf_jit_enable, 292 .data = &bpf_jit_enable,
293 .maxlen = sizeof(int), 293 .maxlen = sizeof(int),
294 .mode = 0644, 294 .mode = 0644,
295#ifndef CONFIG_BPF_JIT_ALWAYS_ON
295 .proc_handler = proc_dointvec 296 .proc_handler = proc_dointvec
297#else
298 .proc_handler = proc_dointvec_minmax,
299 .extra1 = &one,
300 .extra2 = &one,
301#endif
296 }, 302 },
297#endif 303#endif
298 { 304 {
@@ -423,8 +429,6 @@ static __net_init int sysctl_core_net_init(struct net *net)
423{ 429{
424 struct ctl_table *tbl; 430 struct ctl_table *tbl;
425 431
426 net->core.sysctl_somaxconn = SOMAXCONN;
427
428 tbl = netns_core_table; 432 tbl = netns_core_table;
429 if (!net_eq(net, &init_net)) { 433 if (!net_eq(net, &init_net)) {
430 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); 434 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 5e3a7302f774..86a2ed0fb219 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
126 DCCPF_SEQ_WMAX)); 126 DCCPF_SEQ_WMAX));
127} 127}
128 128
129static void dccp_tasklet_schedule(struct sock *sk)
130{
131 struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
132
133 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
134 sock_hold(sk);
135 __tasklet_schedule(t);
136 }
137}
138
129static void ccid2_hc_tx_rto_expire(unsigned long data) 139static void ccid2_hc_tx_rto_expire(unsigned long data)
130{ 140{
131 struct sock *sk = (struct sock *)data; 141 struct sock *sk = (struct sock *)data;
@@ -140,6 +150,9 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
140 150
141 ccid2_pr_debug("RTO_EXPIRE\n"); 151 ccid2_pr_debug("RTO_EXPIRE\n");
142 152
153 if (sk->sk_state == DCCP_CLOSED)
154 goto out;
155
143 /* back-off timer */ 156 /* back-off timer */
144 hc->tx_rto <<= 1; 157 hc->tx_rto <<= 1;
145 if (hc->tx_rto > DCCP_RTO_MAX) 158 if (hc->tx_rto > DCCP_RTO_MAX)
@@ -163,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
163 176
164 /* if we were blocked before, we may now send cwnd=1 packet */ 177 /* if we were blocked before, we may now send cwnd=1 packet */
165 if (sender_was_blocked) 178 if (sender_was_blocked)
166 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); 179 dccp_tasklet_schedule(sk);
167 /* restart backed-off timer */ 180 /* restart backed-off timer */
168 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); 181 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
169out: 182out:
@@ -703,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
703done: 716done:
704 /* check if incoming Acks allow pending packets to be sent */ 717 /* check if incoming Acks allow pending packets to be sent */
705 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) 718 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
706 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); 719 dccp_tasklet_schedule(sk);
707 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); 720 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
708} 721}
709 722
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 119c04317d48..03fcf3ee1534 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -599,7 +599,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
599{ 599{
600 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); 600 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
601 struct dccp_sock *dp = dccp_sk(sk); 601 struct dccp_sock *dp = dccp_sk(sk);
602 ktime_t now = ktime_get_real(); 602 ktime_t now = ktime_get();
603 s64 delta = 0; 603 s64 delta = 0;
604 604
605 switch (fbtype) { 605 switch (fbtype) {
@@ -624,15 +624,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
624 case CCID3_FBACK_PERIODIC: 624 case CCID3_FBACK_PERIODIC:
625 delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback); 625 delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
626 if (delta <= 0) 626 if (delta <= 0)
627 DCCP_BUG("delta (%ld) <= 0", (long)delta); 627 delta = 1;
628 else 628 hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
629 hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
630 break; 629 break;
631 default: 630 default:
632 return; 631 return;
633 } 632 }
634 633
635 ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta, 634 ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
636 hc->rx_x_recv, hc->rx_pinv); 635 hc->rx_x_recv, hc->rx_pinv);
637 636
638 hc->rx_tstamp_last_feedback = now; 637 hc->rx_tstamp_last_feedback = now;
@@ -679,7 +678,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
679static u32 ccid3_first_li(struct sock *sk) 678static u32 ccid3_first_li(struct sock *sk)
680{ 679{
681 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); 680 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
682 u32 x_recv, p, delta; 681 u32 x_recv, p;
682 s64 delta;
683 u64 fval; 683 u64 fval;
684 684
685 if (hc->rx_rtt == 0) { 685 if (hc->rx_rtt == 0) {
@@ -687,7 +687,9 @@ static u32 ccid3_first_li(struct sock *sk)
687 hc->rx_rtt = DCCP_FALLBACK_RTT; 687 hc->rx_rtt = DCCP_FALLBACK_RTT;
688 } 688 }
689 689
690 delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback)); 690 delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
691 if (delta <= 0)
692 delta = 1;
691 x_recv = scaled_div32(hc->rx_bytes_recv, delta); 693 x_recv = scaled_div32(hc->rx_bytes_recv, delta);
692 if (x_recv == 0) { /* would also trigger divide-by-zero */ 694 if (x_recv == 0) { /* would also trigger divide-by-zero */
693 DCCP_WARN("X_recv==0\n"); 695 DCCP_WARN("X_recv==0\n");
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 6eb2bbf9873b..45fd82e61e79 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -618,6 +618,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
618 ireq = inet_rsk(req); 618 ireq = inet_rsk(req);
619 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); 619 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
620 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); 620 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
621 ireq->ir_mark = inet_request_mark(sk, skb);
621 ireq->ireq_family = AF_INET; 622 ireq->ireq_family = AF_INET;
622 ireq->ir_iif = sk->sk_bound_dev_if; 623 ireq->ir_iif = sk->sk_bound_dev_if;
623 624
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 09a9ab65f4e1..0bf41faeffc4 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -345,6 +345,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
345 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; 345 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
346 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; 346 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
347 ireq->ireq_family = AF_INET6; 347 ireq->ireq_family = AF_INET6;
348 ireq->ir_mark = inet_request_mark(sk, skb);
348 349
349 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) || 350 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
350 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 351 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b68168fcc06a..936dab12f99f 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -259,6 +259,7 @@ int dccp_disconnect(struct sock *sk, int flags)
259{ 259{
260 struct inet_connection_sock *icsk = inet_csk(sk); 260 struct inet_connection_sock *icsk = inet_csk(sk);
261 struct inet_sock *inet = inet_sk(sk); 261 struct inet_sock *inet = inet_sk(sk);
262 struct dccp_sock *dp = dccp_sk(sk);
262 int err = 0; 263 int err = 0;
263 const int old_state = sk->sk_state; 264 const int old_state = sk->sk_state;
264 265
@@ -278,6 +279,8 @@ int dccp_disconnect(struct sock *sk, int flags)
278 sk->sk_err = ECONNRESET; 279 sk->sk_err = ECONNRESET;
279 280
280 dccp_clear_xmit_timers(sk); 281 dccp_clear_xmit_timers(sk);
282 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
283 dp->dccps_hc_rx_ccid = NULL;
281 284
282 __skb_queue_purge(&sk->sk_receive_queue); 285 __skb_queue_purge(&sk->sk_receive_queue);
283 __skb_queue_purge(&sk->sk_write_queue); 286 __skb_queue_purge(&sk->sk_write_queue);
@@ -784,6 +787,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
784 if (skb == NULL) 787 if (skb == NULL)
785 goto out_release; 788 goto out_release;
786 789
790 if (sk->sk_state == DCCP_CLOSED) {
791 rc = -ENOTCONN;
792 goto out_discard;
793 }
794
787 skb_reserve(skb, sk->sk_prot->max_header); 795 skb_reserve(skb, sk->sk_prot->max_header);
788 rc = memcpy_from_msg(skb_put(skb, len), msg, len); 796 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
789 if (rc != 0) 797 if (rc != 0)
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 3ef7acef3ce8..aa7c7dad7f96 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -230,12 +230,12 @@ static void dccp_write_xmitlet(unsigned long data)
230 else 230 else
231 dccp_write_xmit(sk); 231 dccp_write_xmit(sk);
232 bh_unlock_sock(sk); 232 bh_unlock_sock(sk);
233 sock_put(sk);
233} 234}
234 235
235static void dccp_write_xmit_timer(unsigned long data) 236static void dccp_write_xmit_timer(unsigned long data)
236{ 237{
237 dccp_write_xmitlet(data); 238 dccp_write_xmitlet(data);
238 sock_put((struct sock *)data);
239} 239}
240 240
241void dccp_init_xmit_timers(struct sock *sk) 241void dccp_init_xmit_timers(struct sock *sk)
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 13d6b1a6e0fc..9d8fcdefefc0 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1337,6 +1337,12 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
1337 lock_sock(sk); 1337 lock_sock(sk);
1338 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0); 1338 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1339 release_sock(sk); 1339 release_sock(sk);
1340#ifdef CONFIG_NETFILTER
1341 /* we need to exclude all possible ENOPROTOOPTs except default case */
1342 if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
1343 optname != DSO_STREAM && optname != DSO_SEQPACKET)
1344 err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1345#endif
1340 1346
1341 return err; 1347 return err;
1342} 1348}
@@ -1444,15 +1450,6 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
1444 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); 1450 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1445 break; 1451 break;
1446 1452
1447 default:
1448#ifdef CONFIG_NETFILTER
1449 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1450#endif
1451 case DSO_LINKINFO:
1452 case DSO_STREAM:
1453 case DSO_SEQPACKET:
1454 return -ENOPROTOOPT;
1455
1456 case DSO_MAXWINDOW: 1453 case DSO_MAXWINDOW:
1457 if (optlen != sizeof(unsigned long)) 1454 if (optlen != sizeof(unsigned long))
1458 return -EINVAL; 1455 return -EINVAL;
@@ -1500,6 +1497,12 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
1500 return -EINVAL; 1497 return -EINVAL;
1501 scp->info_loc = u.info; 1498 scp->info_loc = u.info;
1502 break; 1499 break;
1500
1501 case DSO_LINKINFO:
1502 case DSO_STREAM:
1503 case DSO_SEQPACKET:
1504 default:
1505 return -ENOPROTOOPT;
1503 } 1506 }
1504 1507
1505 return 0; 1508 return 0;
@@ -1513,6 +1516,20 @@ static int dn_getsockopt(struct socket *sock, int level, int optname, char __use
1513 lock_sock(sk); 1516 lock_sock(sk);
1514 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0); 1517 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1515 release_sock(sk); 1518 release_sock(sk);
1519#ifdef CONFIG_NETFILTER
1520 if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
1521 optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
1522 optname != DSO_CONREJECT) {
1523 int len;
1524
1525 if (get_user(len, optlen))
1526 return -EFAULT;
1527
1528 err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1529 if (err >= 0)
1530 err = put_user(len, optlen);
1531 }
1532#endif
1516 1533
1517 return err; 1534 return err;
1518} 1535}
@@ -1578,26 +1595,6 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1578 r_data = &link; 1595 r_data = &link;
1579 break; 1596 break;
1580 1597
1581 default:
1582#ifdef CONFIG_NETFILTER
1583 {
1584 int ret, len;
1585
1586 if (get_user(len, optlen))
1587 return -EFAULT;
1588
1589 ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1590 if (ret >= 0)
1591 ret = put_user(len, optlen);
1592 return ret;
1593 }
1594#endif
1595 case DSO_STREAM:
1596 case DSO_SEQPACKET:
1597 case DSO_CONACCEPT:
1598 case DSO_CONREJECT:
1599 return -ENOPROTOOPT;
1600
1601 case DSO_MAXWINDOW: 1598 case DSO_MAXWINDOW:
1602 if (r_len > sizeof(unsigned long)) 1599 if (r_len > sizeof(unsigned long))
1603 r_len = sizeof(unsigned long); 1600 r_len = sizeof(unsigned long);
@@ -1629,6 +1626,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1629 r_len = sizeof(unsigned char); 1626 r_len = sizeof(unsigned char);
1630 r_data = &scp->info_rem; 1627 r_data = &scp->info_rem;
1631 break; 1628 break;
1629
1630 case DSO_STREAM:
1631 case DSO_SEQPACKET:
1632 case DSO_CONACCEPT:
1633 case DSO_CONREJECT:
1634 default:
1635 return -ENOPROTOOPT;
1632 } 1636 }
1633 1637
1634 if (r_data) { 1638 if (r_data) {
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 6abc5012200b..1689c7bdf1c9 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -25,6 +25,7 @@
25#include <linux/moduleparam.h> 25#include <linux/moduleparam.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/ratelimit.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
29#include <linux/keyctl.h> 30#include <linux/keyctl.h>
30#include <linux/err.h> 31#include <linux/err.h>
@@ -86,35 +87,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
86 opt++; 87 opt++;
87 kdebug("options: '%s'", opt); 88 kdebug("options: '%s'", opt);
88 do { 89 do {
90 int opt_len, opt_nlen;
89 const char *eq; 91 const char *eq;
90 int opt_len, opt_nlen, opt_vlen, tmp; 92 char optval[128];
91 93
92 next_opt = memchr(opt, '#', end - opt) ?: end; 94 next_opt = memchr(opt, '#', end - opt) ?: end;
93 opt_len = next_opt - opt; 95 opt_len = next_opt - opt;
94 if (!opt_len) { 96 if (opt_len <= 0 || opt_len > sizeof(optval)) {
95 printk(KERN_WARNING 97 pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
96 "Empty option to dns_resolver key\n"); 98 opt_len);
97 return -EINVAL; 99 return -EINVAL;
98 } 100 }
99 101
100 eq = memchr(opt, '=', opt_len) ?: end; 102 eq = memchr(opt, '=', opt_len);
101 opt_nlen = eq - opt; 103 if (eq) {
102 eq++; 104 opt_nlen = eq - opt;
103 opt_vlen = next_opt - eq; /* will be -1 if no value */ 105 eq++;
106 memcpy(optval, eq, next_opt - eq);
107 optval[next_opt - eq] = '\0';
108 } else {
109 opt_nlen = opt_len;
110 optval[0] = '\0';
111 }
104 112
105 tmp = opt_vlen >= 0 ? opt_vlen : 0; 113 kdebug("option '%*.*s' val '%s'",
106 kdebug("option '%*.*s' val '%*.*s'", 114 opt_nlen, opt_nlen, opt, optval);
107 opt_nlen, opt_nlen, opt, tmp, tmp, eq);
108 115
109 /* see if it's an error number representing a DNS error 116 /* see if it's an error number representing a DNS error
110 * that's to be recorded as the result in this key */ 117 * that's to be recorded as the result in this key */
111 if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && 118 if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
112 memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { 119 memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
113 kdebug("dns error number option"); 120 kdebug("dns error number option");
114 if (opt_vlen <= 0)
115 goto bad_option_value;
116 121
117 ret = kstrtoul(eq, 10, &derrno); 122 ret = kstrtoul(optval, 10, &derrno);
118 if (ret < 0) 123 if (ret < 0)
119 goto bad_option_value; 124 goto bad_option_value;
120 125
@@ -127,10 +132,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
127 } 132 }
128 133
129 bad_option_value: 134 bad_option_value:
130 printk(KERN_WARNING 135 pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
131 "Option '%*.*s' to dns_resolver key:" 136 opt_nlen, opt_nlen, opt);
132 " bad/missing value\n",
133 opt_nlen, opt_nlen, opt);
134 return -EINVAL; 137 return -EINVAL;
135 } while (opt = next_opt + 1, opt < end); 138 } while (opt = next_opt + 1, opt < end);
136 } 139 }
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 554c2a961ad5..48b28a7ecc7a 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1099,6 +1099,9 @@ int dsa_slave_suspend(struct net_device *slave_dev)
1099{ 1099{
1100 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1100 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1101 1101
1102 if (!netif_running(slave_dev))
1103 return 0;
1104
1102 netif_device_detach(slave_dev); 1105 netif_device_detach(slave_dev);
1103 1106
1104 if (p->phy) { 1107 if (p->phy) {
@@ -1116,6 +1119,9 @@ int dsa_slave_resume(struct net_device *slave_dev)
1116{ 1119{
1117 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1120 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1118 1121
1122 if (!netif_running(slave_dev))
1123 return 0;
1124
1119 netif_device_attach(slave_dev); 1125 netif_device_attach(slave_dev);
1120 1126
1121 if (p->phy) { 1127 if (p->phy) {
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 20c49c724ba0..e8b279443d37 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -206,9 +206,13 @@ static inline void lowpan_netlink_fini(void)
206static int lowpan_device_event(struct notifier_block *unused, 206static int lowpan_device_event(struct notifier_block *unused,
207 unsigned long event, void *ptr) 207 unsigned long event, void *ptr)
208{ 208{
209 struct net_device *wdev = netdev_notifier_info_to_dev(ptr); 209 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
210 struct wpan_dev *wpan_dev;
210 211
211 if (wdev->type != ARPHRD_IEEE802154) 212 if (ndev->type != ARPHRD_IEEE802154)
213 return NOTIFY_DONE;
214 wpan_dev = ndev->ieee802154_ptr;
215 if (!wpan_dev)
212 goto out; 216 goto out;
213 217
214 switch (event) { 218 switch (event) {
@@ -217,8 +221,8 @@ static int lowpan_device_event(struct notifier_block *unused,
217 * also delete possible lowpan interfaces which belongs 221 * also delete possible lowpan interfaces which belongs
218 * to the wpan interface. 222 * to the wpan interface.
219 */ 223 */
220 if (wdev->ieee802154_ptr->lowpan_dev) 224 if (wpan_dev->lowpan_dev)
221 lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL); 225 lowpan_dellink(wpan_dev->lowpan_dev, NULL);
222 break; 226 break;
223 default: 227 default:
224 break; 228 break;
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index a548be247e15..47b397264f24 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -302,12 +302,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
302 skb->sk = sk; 302 skb->sk = sk;
303 skb->protocol = htons(ETH_P_IEEE802154); 303 skb->protocol = htons(ETH_P_IEEE802154);
304 304
305 dev_put(dev);
306
307 err = dev_queue_xmit(skb); 305 err = dev_queue_xmit(skb);
308 if (err > 0) 306 if (err > 0)
309 err = net_xmit_errno(err); 307 err = net_xmit_errno(err);
310 308
309 dev_put(dev);
310
311 return err ?: size; 311 return err ?: size;
312 312
313out_skb: 313out_skb:
@@ -689,12 +689,12 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
689 skb->sk = sk; 689 skb->sk = sk;
690 skb->protocol = htons(ETH_P_IEEE802154); 690 skb->protocol = htons(ETH_P_IEEE802154);
691 691
692 dev_put(dev);
693
694 err = dev_queue_xmit(skb); 692 err = dev_queue_xmit(skb);
695 if (err > 0) 693 if (err > 0)
696 err = net_xmit_errno(err); 694 err = net_xmit_errno(err);
697 695
696 dev_put(dev);
697
698 return err ?: size; 698 return err ?: size;
699 699
700out_skb: 700out_skb:
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 416dfa004cfb..09d6c4a6b53d 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -186,6 +186,7 @@ config NET_IPGRE_DEMUX
186 186
187config NET_IP_TUNNEL 187config NET_IP_TUNNEL
188 tristate 188 tristate
189 select DST_CACHE
189 default n 190 default n
190 191
191config NET_IPGRE 192config NET_IPGRE
@@ -353,6 +354,7 @@ config INET_ESP
353 select CRYPTO_CBC 354 select CRYPTO_CBC
354 select CRYPTO_SHA1 355 select CRYPTO_SHA1
355 select CRYPTO_DES 356 select CRYPTO_DES
357 select CRYPTO_ECHAINIV
356 ---help--- 358 ---help---
357 Support for IPsec ESP. 359 Support for IPsec ESP.
358 360
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 22377c8ff14b..e8f862358518 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -220,7 +220,9 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
220 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 220 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
221 221
222 sg_init_table(sg, nfrags + sglists); 222 sg_init_table(sg, nfrags + sglists);
223 skb_to_sgvec_nomark(skb, sg, 0, skb->len); 223 err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
224 if (unlikely(err < 0))
225 goto out_free;
224 226
225 if (x->props.flags & XFRM_STATE_ESN) { 227 if (x->props.flags & XFRM_STATE_ESN) {
226 /* Attach seqhi sg right after packet payload */ 228 /* Attach seqhi sg right after packet payload */
@@ -393,7 +395,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
393 skb_push(skb, ihl); 395 skb_push(skb, ihl);
394 396
395 sg_init_table(sg, nfrags + sglists); 397 sg_init_table(sg, nfrags + sglists);
396 skb_to_sgvec_nomark(skb, sg, 0, skb->len); 398 err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
399 if (unlikely(err < 0))
400 goto out_free;
397 401
398 if (x->props.flags & XFRM_STATE_ESN) { 402 if (x->props.flags & XFRM_STATE_ESN) {
399 /* Attach seqhi sg right after packet payload */ 403 /* Attach seqhi sg right after packet payload */
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 711b4dfa17c3..bfa79831873f 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
223 223
224static int arp_constructor(struct neighbour *neigh) 224static int arp_constructor(struct neighbour *neigh)
225{ 225{
226 __be32 addr = *(__be32 *)neigh->primary_key; 226 __be32 addr;
227 struct net_device *dev = neigh->dev; 227 struct net_device *dev = neigh->dev;
228 struct in_device *in_dev; 228 struct in_device *in_dev;
229 struct neigh_parms *parms; 229 struct neigh_parms *parms;
230 u32 inaddr_any = INADDR_ANY;
230 231
232 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
233 memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
234
235 addr = *(__be32 *)neigh->primary_key;
231 rcu_read_lock(); 236 rcu_read_lock();
232 in_dev = __in_dev_get_rcu(dev); 237 in_dev = __in_dev_get_rcu(dev);
233 if (!in_dev) { 238 if (!in_dev) {
@@ -432,7 +437,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
432 /*unsigned long now; */ 437 /*unsigned long now; */
433 struct net *net = dev_net(dev); 438 struct net *net = dev_net(dev);
434 439
435 rt = ip_route_output(net, sip, tip, 0, 0); 440 rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev));
436 if (IS_ERR(rt)) 441 if (IS_ERR(rt))
437 return 1; 442 return 1;
438 if (rt->dst.dev != dev) { 443 if (rt->dst.dev != dev) {
@@ -653,6 +658,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
653 unsigned char *arp_ptr; 658 unsigned char *arp_ptr;
654 struct rtable *rt; 659 struct rtable *rt;
655 unsigned char *sha; 660 unsigned char *sha;
661 unsigned char *tha = NULL;
656 __be32 sip, tip; 662 __be32 sip, tip;
657 u16 dev_type = dev->type; 663 u16 dev_type = dev->type;
658 int addr_type; 664 int addr_type;
@@ -724,6 +730,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
724 break; 730 break;
725#endif 731#endif
726 default: 732 default:
733 tha = arp_ptr;
727 arp_ptr += dev->addr_len; 734 arp_ptr += dev->addr_len;
728 } 735 }
729 memcpy(&tip, arp_ptr, 4); 736 memcpy(&tip, arp_ptr, 4);
@@ -834,8 +841,18 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
834 It is possible, that this option should be enabled for some 841 It is possible, that this option should be enabled for some
835 devices (strip is candidate) 842 devices (strip is candidate)
836 */ 843 */
837 is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip && 844 is_garp = tip == sip && addr_type == RTN_UNICAST;
838 addr_type == RTN_UNICAST; 845
846 /* Unsolicited ARP _replies_ also require target hwaddr to be
847 * the same as source.
848 */
849 if (is_garp && arp->ar_op == htons(ARPOP_REPLY))
850 is_garp =
851 /* IPv4 over IEEE 1394 doesn't provide target
852 * hardware address field in its ARP payload.
853 */
854 tha &&
855 !memcmp(tha, sha, dev->addr_len);
839 856
840 if (!n && 857 if (!n &&
841 ((arp->ar_op == htons(ARPOP_REPLY) && 858 ((arp->ar_op == htons(ARPOP_REPLY) &&
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 20fb25e3027b..3d8021d55336 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -268,10 +268,11 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
268 esph->spi = x->id.spi; 268 esph->spi = x->id.spi;
269 269
270 sg_init_table(sg, nfrags); 270 sg_init_table(sg, nfrags);
271 skb_to_sgvec(skb, sg, 271 err = skb_to_sgvec(skb, sg,
272 (unsigned char *)esph - skb->data, 272 (unsigned char *)esph - skb->data,
273 assoclen + ivlen + clen + alen); 273 assoclen + ivlen + clen + alen);
274 274 if (unlikely(err < 0))
275 goto error;
275 aead_request_set_crypt(req, sg, sg, ivlen + clen, iv); 276 aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
276 aead_request_set_ad(req, assoclen); 277 aead_request_set_ad(req, assoclen);
277 278
@@ -481,7 +482,9 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
481 } 482 }
482 483
483 sg_init_table(sg, nfrags); 484 sg_init_table(sg, nfrags);
484 skb_to_sgvec(skb, sg, 0, skb->len); 485 err = skb_to_sgvec(skb, sg, 0, skb->len);
486 if (unlikely(err < 0))
487 goto out;
485 488
486 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); 489 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
487 aead_request_set_ad(req, assoclen); 490 aead_request_set_ad(req, assoclen);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index c9e68ff48a72..015c33712803 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -289,18 +289,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
289 return ip_hdr(skb)->daddr; 289 return ip_hdr(skb)->daddr;
290 290
291 in_dev = __in_dev_get_rcu(dev); 291 in_dev = __in_dev_get_rcu(dev);
292 BUG_ON(!in_dev);
293 292
294 net = dev_net(dev); 293 net = dev_net(dev);
295 294
296 scope = RT_SCOPE_UNIVERSE; 295 scope = RT_SCOPE_UNIVERSE;
297 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { 296 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
297 bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev);
298 struct flowi4 fl4 = { 298 struct flowi4 fl4 = {
299 .flowi4_iif = LOOPBACK_IFINDEX, 299 .flowi4_iif = LOOPBACK_IFINDEX,
300 .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
300 .daddr = ip_hdr(skb)->saddr, 301 .daddr = ip_hdr(skb)->saddr,
301 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), 302 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
302 .flowi4_scope = scope, 303 .flowi4_scope = scope,
303 .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0, 304 .flowi4_mark = vmark ? skb->mark : 0,
304 }; 305 };
305 if (!fib_lookup(net, &fl4, &res, 0)) 306 if (!fib_lookup(net, &fl4, &res, 0))
306 return FIB_RES_PREFSRC(net, res); 307 return FIB_RES_PREFSRC(net, res);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 313e3c11a15a..03ebff3950d8 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -640,6 +640,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
640 fi->fib_nh, cfg)) 640 fi->fib_nh, cfg))
641 return 1; 641 return 1;
642 } 642 }
643#ifdef CONFIG_IP_ROUTE_CLASSID
644 if (cfg->fc_flow &&
645 cfg->fc_flow != fi->fib_nh->nh_tclassid)
646 return 1;
647#endif
643 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && 648 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
644 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) 649 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
645 return 0; 650 return 0;
@@ -974,6 +979,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
974 if (val == TCP_CA_UNSPEC) 979 if (val == TCP_CA_UNSPEC)
975 return -EINVAL; 980 return -EINVAL;
976 } else { 981 } else {
982 if (nla_len(nla) != sizeof(u32))
983 return -EINVAL;
977 val = nla_get_u32(nla); 984 val = nla_get_u32(nla);
978 } 985 }
979 if (type == RTAX_ADVMSS && val > 65535 - 40) 986 if (type == RTAX_ADVMSS && val > 65535 - 40)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index b60106d34346..c67efa3e79dd 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -338,7 +338,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
338 return htonl(INADDR_ANY); 338 return htonl(INADDR_ANY);
339 339
340 for_ifa(in_dev) { 340 for_ifa(in_dev) {
341 if (inet_ifa_match(fl4->saddr, ifa)) 341 if (fl4->saddr == ifa->ifa_local)
342 return fl4->saddr; 342 return fl4->saddr;
343 } endfor_ifa(in_dev); 343 } endfor_ifa(in_dev);
344 344
@@ -392,7 +392,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
392 pip->frag_off = htons(IP_DF); 392 pip->frag_off = htons(IP_DF);
393 pip->ttl = 1; 393 pip->ttl = 1;
394 pip->daddr = fl4.daddr; 394 pip->daddr = fl4.daddr;
395
396 rcu_read_lock();
395 pip->saddr = igmpv3_get_srcaddr(dev, &fl4); 397 pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
398 rcu_read_unlock();
399
396 pip->protocol = IPPROTO_IGMP; 400 pip->protocol = IPPROTO_IGMP;
397 pip->tot_len = 0; /* filled in later */ 401 pip->tot_len = 0; /* filled in later */
398 ip_select_ident(net, skb, NULL); 402 ip_select_ident(net, skb, NULL);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index c5fb2f694ed0..b2001b20e029 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -119,6 +119,9 @@ out:
119 119
120static bool inet_fragq_should_evict(const struct inet_frag_queue *q) 120static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121{ 121{
122 if (!hlist_unhashed(&q->list_evictor))
123 return false;
124
122 return q->net->low_thresh == 0 || 125 return q->net->low_thresh == 0 ||
123 frag_mem_limit(q->net) >= q->net->low_thresh; 126 frag_mem_limit(q->net) >= q->net->low_thresh;
124} 127}
@@ -361,11 +364,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
361{ 364{
362 struct inet_frag_queue *q; 365 struct inet_frag_queue *q;
363 366
364 if (frag_mem_limit(nf) > nf->high_thresh) {
365 inet_frag_schedule_worker(f);
366 return NULL;
367 }
368
369 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); 367 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
370 if (!q) 368 if (!q)
371 return NULL; 369 return NULL;
@@ -402,6 +400,11 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
402 struct inet_frag_queue *q; 400 struct inet_frag_queue *q;
403 int depth = 0; 401 int depth = 0;
404 402
403 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
404 inet_frag_schedule_worker(f);
405 return NULL;
406 }
407
405 if (frag_mem_limit(nf) > nf->low_thresh) 408 if (frag_mem_limit(nf) > nf->low_thresh)
406 inet_frag_schedule_worker(f); 409 inet_frag_schedule_worker(f);
407 410
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index c67f9bd7699c..d8316869947a 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -182,6 +182,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
182 tw->tw_dport = inet->inet_dport; 182 tw->tw_dport = inet->inet_dport;
183 tw->tw_family = sk->sk_family; 183 tw->tw_family = sk->sk_family;
184 tw->tw_reuse = sk->sk_reuse; 184 tw->tw_reuse = sk->sk_reuse;
185 tw->tw_reuseport = sk->sk_reuseport;
185 tw->tw_hash = sk->sk_hash; 186 tw->tw_hash = sk->sk_hash;
186 tw->tw_ipv6only = 0; 187 tw->tw_ipv6only = 0;
187 tw->tw_transparent = inet->transparent; 188 tw->tw_transparent = inet->transparent;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 09c73dd541c5..c11bb6d2d00a 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -480,6 +480,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
480 to->dev = from->dev; 480 to->dev = from->dev;
481 to->mark = from->mark; 481 to->mark = from->mark;
482 482
483 skb_copy_hash(to, from);
484
483 /* Copy the flags to each fragment. */ 485 /* Copy the flags to each fragment. */
484 IPCB(to)->flags = IPCB(from)->flags; 486 IPCB(to)->flags = IPCB(from)->flags;
485 487
@@ -1062,7 +1064,8 @@ alloc_new_skb:
1062 if (copy > length) 1064 if (copy > length)
1063 copy = length; 1065 copy = length;
1064 1066
1065 if (!(rt->dst.dev->features&NETIF_F_SG)) { 1067 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1068 skb_tailroom(skb) >= copy) {
1066 unsigned int off; 1069 unsigned int off;
1067 1070
1068 off = skb->len; 1071 off = skb->len;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 097a1243c16c..88426a6a7a85 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -135,15 +135,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
135{ 135{
136 struct sockaddr_in sin; 136 struct sockaddr_in sin;
137 const struct iphdr *iph = ip_hdr(skb); 137 const struct iphdr *iph = ip_hdr(skb);
138 __be16 *ports = (__be16 *)skb_transport_header(skb); 138 __be16 *ports;
139 int end;
139 140
140 if (skb_transport_offset(skb) + 4 > skb->len) 141 end = skb_transport_offset(skb) + 4;
142 if (end > 0 && !pskb_may_pull(skb, end))
141 return; 143 return;
142 144
143 /* All current transport protocols have the port numbers in the 145 /* All current transport protocols have the port numbers in the
144 * first four bytes of the transport header and this function is 146 * first four bytes of the transport header and this function is
145 * written with this assumption in mind. 147 * written with this assumption in mind.
146 */ 148 */
149 ports = (__be16 *)skb_transport_header(skb);
147 150
148 sin.sin_family = AF_INET; 151 sin.sin_family = AF_INET;
149 sin.sin_addr.s_addr = iph->daddr; 152 sin.sin_addr.s_addr = iph->daddr;
@@ -241,7 +244,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
241 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg); 244 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
242 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr)) 245 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
243 return -EINVAL; 246 return -EINVAL;
244 ipc->oif = src_info->ipi6_ifindex; 247 if (src_info->ipi6_ifindex)
248 ipc->oif = src_info->ipi6_ifindex;
245 ipc->addr = src_info->ipi6_addr.s6_addr32[3]; 249 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
246 continue; 250 continue;
247 } 251 }
@@ -264,7 +268,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
264 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) 268 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
265 return -EINVAL; 269 return -EINVAL;
266 info = (struct in_pktinfo *)CMSG_DATA(cmsg); 270 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
267 ipc->oif = info->ipi_ifindex; 271 if (info->ipi_ifindex)
272 ipc->oif = info->ipi_ifindex;
268 ipc->addr = info->ipi_spec_dst.s_addr; 273 ipc->addr = info->ipi_spec_dst.s_addr;
269 break; 274 break;
270 } 275 }
@@ -491,8 +496,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
491 int err; 496 int err;
492 int copied; 497 int copied;
493 498
494 WARN_ON_ONCE(sk->sk_family == AF_INET6);
495
496 err = -EAGAIN; 499 err = -EAGAIN;
497 skb = sock_dequeue_err_skb(sk); 500 skb = sock_dequeue_err_skb(sk);
498 if (!skb) 501 if (!skb)
@@ -1221,11 +1224,8 @@ int ip_setsockopt(struct sock *sk, int level,
1221 if (err == -ENOPROTOOPT && optname != IP_HDRINCL && 1224 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1222 optname != IP_IPSEC_POLICY && 1225 optname != IP_IPSEC_POLICY &&
1223 optname != IP_XFRM_POLICY && 1226 optname != IP_XFRM_POLICY &&
1224 !ip_mroute_opt(optname)) { 1227 !ip_mroute_opt(optname))
1225 lock_sock(sk);
1226 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); 1228 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1227 release_sock(sk);
1228 }
1229#endif 1229#endif
1230 return err; 1230 return err;
1231} 1231}
@@ -1250,12 +1250,9 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
1250 if (err == -ENOPROTOOPT && optname != IP_HDRINCL && 1250 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1251 optname != IP_IPSEC_POLICY && 1251 optname != IP_IPSEC_POLICY &&
1252 optname != IP_XFRM_POLICY && 1252 optname != IP_XFRM_POLICY &&
1253 !ip_mroute_opt(optname)) { 1253 !ip_mroute_opt(optname))
1254 lock_sock(sk); 1254 err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
1255 err = compat_nf_setsockopt(sk, PF_INET, optname, 1255 optlen);
1256 optval, optlen);
1257 release_sock(sk);
1258 }
1259#endif 1256#endif
1260 return err; 1257 return err;
1261} 1258}
@@ -1533,10 +1530,7 @@ int ip_getsockopt(struct sock *sk, int level,
1533 if (get_user(len, optlen)) 1530 if (get_user(len, optlen))
1534 return -EFAULT; 1531 return -EFAULT;
1535 1532
1536 lock_sock(sk); 1533 err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
1537 err = nf_getsockopt(sk, PF_INET, optname, optval,
1538 &len);
1539 release_sock(sk);
1540 if (err >= 0) 1534 if (err >= 0)
1541 err = put_user(len, optlen); 1535 err = put_user(len, optlen);
1542 return err; 1536 return err;
@@ -1568,9 +1562,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1568 if (get_user(len, optlen)) 1562 if (get_user(len, optlen))
1569 return -EFAULT; 1563 return -EFAULT;
1570 1564
1571 lock_sock(sk);
1572 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len); 1565 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
1573 release_sock(sk);
1574 if (err >= 0) 1566 if (err >= 0)
1575 err = put_user(len, optlen); 1567 err = put_user(len, optlen);
1576 return err; 1568 return err;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c18245e05d26..3d62feb65932 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -69,61 +69,6 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
69 IP_TNL_HASH_BITS); 69 IP_TNL_HASH_BITS);
70} 70}
71 71
72static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
73 struct dst_entry *dst, __be32 saddr)
74{
75 struct dst_entry *old_dst;
76
77 dst_clone(dst);
78 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
79 dst_release(old_dst);
80 idst->saddr = saddr;
81}
82
83static noinline void tunnel_dst_set(struct ip_tunnel *t,
84 struct dst_entry *dst, __be32 saddr)
85{
86 __tunnel_dst_set(raw_cpu_ptr(t->dst_cache), dst, saddr);
87}
88
89static void tunnel_dst_reset(struct ip_tunnel *t)
90{
91 tunnel_dst_set(t, NULL, 0);
92}
93
94void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
95{
96 int i;
97
98 for_each_possible_cpu(i)
99 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0);
100}
101EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
102
103static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
104 u32 cookie, __be32 *saddr)
105{
106 struct ip_tunnel_dst *idst;
107 struct dst_entry *dst;
108
109 rcu_read_lock();
110 idst = raw_cpu_ptr(t->dst_cache);
111 dst = rcu_dereference(idst->dst);
112 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
113 dst = NULL;
114 if (dst) {
115 if (!dst->obsolete || dst->ops->check(dst, cookie)) {
116 *saddr = idst->saddr;
117 } else {
118 tunnel_dst_reset(t);
119 dst_release(dst);
120 dst = NULL;
121 }
122 }
123 rcu_read_unlock();
124 return (struct rtable *)dst;
125}
126
127static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p, 72static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
128 __be16 flags, __be32 key) 73 __be16 flags, __be32 key)
129{ 74{
@@ -308,13 +253,14 @@ static struct net_device *__ip_tunnel_create(struct net *net,
308 struct net_device *dev; 253 struct net_device *dev;
309 char name[IFNAMSIZ]; 254 char name[IFNAMSIZ];
310 255
311 if (parms->name[0]) 256 err = -E2BIG;
257 if (parms->name[0]) {
258 if (!dev_valid_name(parms->name))
259 goto failed;
312 strlcpy(name, parms->name, IFNAMSIZ); 260 strlcpy(name, parms->name, IFNAMSIZ);
313 else { 261 } else {
314 if (strlen(ops->kind) > (IFNAMSIZ - 3)) { 262 if (strlen(ops->kind) > (IFNAMSIZ - 3))
315 err = -E2BIG;
316 goto failed; 263 goto failed;
317 }
318 strlcpy(name, ops->kind, IFNAMSIZ); 264 strlcpy(name, ops->kind, IFNAMSIZ);
319 strncat(name, "%d", 2); 265 strncat(name, "%d", 2);
320 } 266 }
@@ -382,11 +328,12 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
382 328
383 if (!IS_ERR(rt)) { 329 if (!IS_ERR(rt)) {
384 tdev = rt->dst.dev; 330 tdev = rt->dst.dev;
385 tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
386 ip_rt_put(rt); 331 ip_rt_put(rt);
387 } 332 }
388 if (dev->type != ARPHRD_ETHER) 333 if (dev->type != ARPHRD_ETHER)
389 dev->flags |= IFF_POINTOPOINT; 334 dev->flags |= IFF_POINTOPOINT;
335
336 dst_cache_reset(&tunnel->dst_cache);
390 } 337 }
391 338
392 if (!tdev && tunnel->parms.link) 339 if (!tdev && tunnel->parms.link)
@@ -733,7 +680,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
733 if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) 680 if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
734 goto tx_error; 681 goto tx_error;
735 682
736 rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL; 683 rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache, &fl4.saddr) :
684 NULL;
737 685
738 if (!rt) { 686 if (!rt) {
739 rt = ip_route_output_key(tunnel->net, &fl4); 687 rt = ip_route_output_key(tunnel->net, &fl4);
@@ -743,7 +691,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
743 goto tx_error; 691 goto tx_error;
744 } 692 }
745 if (connected) 693 if (connected)
746 tunnel_dst_set(tunnel, &rt->dst, fl4.saddr); 694 dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
695 fl4.saddr);
747 } 696 }
748 697
749 if (rt->dst.dev == dev) { 698 if (rt->dst.dev == dev) {
@@ -841,7 +790,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
841 if (set_mtu) 790 if (set_mtu)
842 dev->mtu = mtu; 791 dev->mtu = mtu;
843 } 792 }
844 ip_tunnel_dst_reset_all(t); 793 dst_cache_reset(&t->dst_cache);
845 netdev_state_change(dev); 794 netdev_state_change(dev);
846} 795}
847 796
@@ -980,7 +929,7 @@ static void ip_tunnel_dev_free(struct net_device *dev)
980 struct ip_tunnel *tunnel = netdev_priv(dev); 929 struct ip_tunnel *tunnel = netdev_priv(dev);
981 930
982 gro_cells_destroy(&tunnel->gro_cells); 931 gro_cells_destroy(&tunnel->gro_cells);
983 free_percpu(tunnel->dst_cache); 932 dst_cache_destroy(&tunnel->dst_cache);
984 free_percpu(dev->tstats); 933 free_percpu(dev->tstats);
985 free_netdev(dev); 934 free_netdev(dev);
986} 935}
@@ -1174,15 +1123,15 @@ int ip_tunnel_init(struct net_device *dev)
1174 if (!dev->tstats) 1123 if (!dev->tstats)
1175 return -ENOMEM; 1124 return -ENOMEM;
1176 1125
1177 tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst); 1126 err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1178 if (!tunnel->dst_cache) { 1127 if (err) {
1179 free_percpu(dev->tstats); 1128 free_percpu(dev->tstats);
1180 return -ENOMEM; 1129 return err;
1181 } 1130 }
1182 1131
1183 err = gro_cells_init(&tunnel->gro_cells, dev); 1132 err = gro_cells_init(&tunnel->gro_cells, dev);
1184 if (err) { 1133 if (err) {
1185 free_percpu(tunnel->dst_cache); 1134 dst_cache_destroy(&tunnel->dst_cache);
1186 free_percpu(dev->tstats); 1135 free_percpu(dev->tstats);
1187 return err; 1136 return err;
1188 } 1137 }
@@ -1212,7 +1161,7 @@ void ip_tunnel_uninit(struct net_device *dev)
1212 if (itn->fb_tunnel_dev != dev) 1161 if (itn->fb_tunnel_dev != dev)
1213 ip_tunnel_del(itn, netdev_priv(dev)); 1162 ip_tunnel_del(itn, netdev_priv(dev));
1214 1163
1215 ip_tunnel_dst_reset_all(tunnel); 1164 dst_cache_reset(&tunnel->dst_cache);
1216} 1165}
1217EXPORT_SYMBOL_GPL(ip_tunnel_uninit); 1166EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1218 1167
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index a03f834f16d5..4b7c81f88abf 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -366,7 +366,6 @@ static int vti_tunnel_init(struct net_device *dev)
366 memcpy(dev->dev_addr, &iph->saddr, 4); 366 memcpy(dev->dev_addr, &iph->saddr, 4);
367 memcpy(dev->broadcast, &iph->daddr, 4); 367 memcpy(dev->broadcast, &iph->daddr, 4);
368 368
369 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
370 dev->mtu = ETH_DATA_LEN; 369 dev->mtu = ETH_DATA_LEN;
371 dev->flags = IFF_NOARP; 370 dev->flags = IFF_NOARP;
372 dev->addr_len = 4; 371 dev->addr_len = 4;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 0bc7412d9e14..60f564db25a3 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -152,7 +152,11 @@ static char dhcp_client_identifier[253] __initdata;
152 152
153/* Persistent data: */ 153/* Persistent data: */
154 154
155#ifdef IPCONFIG_DYNAMIC
155static int ic_proto_used; /* Protocol used, if any */ 156static int ic_proto_used; /* Protocol used, if any */
157#else
158#define ic_proto_used 0
159#endif
156static __be32 ic_nameservers[CONF_NAMESERVERS_MAX]; /* DNS Server IP addresses */ 160static __be32 ic_nameservers[CONF_NAMESERVERS_MAX]; /* DNS Server IP addresses */
157static u8 ic_domain[64]; /* DNS (not NIS) domain name */ 161static u8 ic_domain[64]; /* DNS (not NIS) domain name */
158 162
@@ -786,6 +790,11 @@ static void __init ic_bootp_init_ext(u8 *e)
786 */ 790 */
787static inline void __init ic_bootp_init(void) 791static inline void __init ic_bootp_init(void)
788{ 792{
793 /* Re-initialise all name servers to NONE, in case any were set via the
794 * "ip=" or "nfsaddrs=" kernel command line parameters: any IP addresses
795 * specified there will already have been decoded but are no longer
796 * needed
797 */
789 ic_nameservers_predef(); 798 ic_nameservers_predef();
790 799
791 dev_add_pack(&bootp_packet_type); 800 dev_add_pack(&bootp_packet_type);
@@ -1419,6 +1428,13 @@ static int __init ip_auto_config(void)
1419 int err; 1428 int err;
1420 unsigned int i; 1429 unsigned int i;
1421 1430
1431 /* Initialise all name servers to NONE (but only if the "ip=" or
1432 * "nfsaddrs=" kernel command line parameters weren't decoded, otherwise
1433 * we'll overwrite the IP addresses specified there)
1434 */
1435 if (ic_set_manually == 0)
1436 ic_nameservers_predef();
1437
1422#ifdef CONFIG_PROC_FS 1438#ifdef CONFIG_PROC_FS
1423 proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops); 1439 proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops);
1424#endif /* CONFIG_PROC_FS */ 1440#endif /* CONFIG_PROC_FS */
@@ -1636,6 +1652,7 @@ static int __init ip_auto_config_setup(char *addrs)
1636 return 1; 1652 return 1;
1637 } 1653 }
1638 1654
1655 /* Initialise all name servers to NONE */
1639 ic_nameservers_predef(); 1656 ic_nameservers_predef();
1640 1657
1641 /* Parse string for static IP assignment. */ 1658 /* Parse string for static IP assignment. */
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index c3776ff6749f..699f8a5457a3 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -23,7 +23,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
23 struct rtable *rt; 23 struct rtable *rt;
24 struct flowi4 fl4 = {}; 24 struct flowi4 fl4 = {};
25 __be32 saddr = iph->saddr; 25 __be32 saddr = iph->saddr;
26 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; 26 const struct sock *sk = skb_to_full_sk(skb);
27 __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0;
27 unsigned int hh_len; 28 unsigned int hh_len;
28 29
29 if (addr_type == RTN_UNSPEC) 30 if (addr_type == RTN_UNSPEC)
@@ -39,7 +40,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
39 fl4.daddr = iph->daddr; 40 fl4.daddr = iph->daddr;
40 fl4.saddr = saddr; 41 fl4.saddr = saddr;
41 fl4.flowi4_tos = RT_TOS(iph->tos); 42 fl4.flowi4_tos = RT_TOS(iph->tos);
42 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 43 fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0;
43 fl4.flowi4_mark = skb->mark; 44 fl4.flowi4_mark = skb->mark;
44 fl4.flowi4_flags = flags; 45 fl4.flowi4_flags = flags;
45 rt = ip_route_output_key(net, &fl4); 46 rt = ip_route_output_key(net, &fl4);
@@ -58,7 +59,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
58 xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { 59 xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) {
59 struct dst_entry *dst = skb_dst(skb); 60 struct dst_entry *dst = skb_dst(skb);
60 skb_dst_set(skb, NULL); 61 skb_dst_set(skb, NULL);
61 dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0); 62 dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0);
62 if (IS_ERR(dst)) 63 if (IS_ERR(dst))
63 return PTR_ERR(dst); 64 return PTR_ERR(dst);
64 skb_dst_set(skb, dst); 65 skb_dst_set(skb, dst);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 6e3e0e8b1ce3..f51b32ed353c 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -329,6 +329,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
329 } 329 }
330 if (table_base + v 330 if (table_base + v
331 != arpt_next_entry(e)) { 331 != arpt_next_entry(e)) {
332 if (unlikely(stackidx >= private->stacksize)) {
333 verdict = NF_DROP;
334 break;
335 }
332 jumpstack[stackidx++] = e; 336 jumpstack[stackidx++] = e;
333 } 337 }
334 338
@@ -367,23 +371,12 @@ static inline bool unconditional(const struct arpt_entry *e)
367 memcmp(&e->arp, &uncond, sizeof(uncond)) == 0; 371 memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
368} 372}
369 373
370static bool find_jump_target(const struct xt_table_info *t,
371 const struct arpt_entry *target)
372{
373 struct arpt_entry *iter;
374
375 xt_entry_foreach(iter, t->entries, t->size) {
376 if (iter == target)
377 return true;
378 }
379 return false;
380}
381
382/* Figures out from what hook each rule can be called: returns 0 if 374/* Figures out from what hook each rule can be called: returns 0 if
383 * there are loops. Puts hook bitmask in comefrom. 375 * there are loops. Puts hook bitmask in comefrom.
384 */ 376 */
385static int mark_source_chains(const struct xt_table_info *newinfo, 377static int mark_source_chains(const struct xt_table_info *newinfo,
386 unsigned int valid_hooks, void *entry0) 378 unsigned int valid_hooks, void *entry0,
379 unsigned int *offsets)
387{ 380{
388 unsigned int hook; 381 unsigned int hook;
389 382
@@ -472,10 +465,11 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
472 /* This a jump; chase it. */ 465 /* This a jump; chase it. */
473 duprintf("Jump rule %u -> %u\n", 466 duprintf("Jump rule %u -> %u\n",
474 pos, newpos); 467 pos, newpos);
468 if (!xt_find_jump_offset(offsets, newpos,
469 newinfo->number))
470 return 0;
475 e = (struct arpt_entry *) 471 e = (struct arpt_entry *)
476 (entry0 + newpos); 472 (entry0 + newpos);
477 if (!find_jump_target(newinfo, e))
478 return 0;
479 } else { 473 } else {
480 /* ... this is a fallthru */ 474 /* ... this is a fallthru */
481 newpos = pos + e->next_offset; 475 newpos = pos + e->next_offset;
@@ -517,14 +511,14 @@ static inline int check_target(struct arpt_entry *e, const char *name)
517} 511}
518 512
519static inline int 513static inline int
520find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) 514find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
515 struct xt_percpu_counter_alloc_state *alloc_state)
521{ 516{
522 struct xt_entry_target *t; 517 struct xt_entry_target *t;
523 struct xt_target *target; 518 struct xt_target *target;
524 int ret; 519 int ret;
525 520
526 e->counters.pcnt = xt_percpu_counter_alloc(); 521 if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
527 if (IS_ERR_VALUE(e->counters.pcnt))
528 return -ENOMEM; 522 return -ENOMEM;
529 523
530 t = arpt_get_target(e); 524 t = arpt_get_target(e);
@@ -544,7 +538,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
544err: 538err:
545 module_put(t->u.kernel.target->me); 539 module_put(t->u.kernel.target->me);
546out: 540out:
547 xt_percpu_counter_free(e->counters.pcnt); 541 xt_percpu_counter_free(&e->counters);
548 542
549 return ret; 543 return ret;
550} 544}
@@ -632,7 +626,7 @@ static inline void cleanup_entry(struct arpt_entry *e)
632 if (par.target->destroy != NULL) 626 if (par.target->destroy != NULL)
633 par.target->destroy(&par); 627 par.target->destroy(&par);
634 module_put(par.target->me); 628 module_put(par.target->me);
635 xt_percpu_counter_free(e->counters.pcnt); 629 xt_percpu_counter_free(&e->counters);
636} 630}
637 631
638/* Checks and translates the user-supplied table segment (held in 632/* Checks and translates the user-supplied table segment (held in
@@ -641,7 +635,9 @@ static inline void cleanup_entry(struct arpt_entry *e)
641static int translate_table(struct xt_table_info *newinfo, void *entry0, 635static int translate_table(struct xt_table_info *newinfo, void *entry0,
642 const struct arpt_replace *repl) 636 const struct arpt_replace *repl)
643{ 637{
638 struct xt_percpu_counter_alloc_state alloc_state = { 0 };
644 struct arpt_entry *iter; 639 struct arpt_entry *iter;
640 unsigned int *offsets;
645 unsigned int i; 641 unsigned int i;
646 int ret = 0; 642 int ret = 0;
647 643
@@ -655,6 +651,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
655 } 651 }
656 652
657 duprintf("translate_table: size %u\n", newinfo->size); 653 duprintf("translate_table: size %u\n", newinfo->size);
654 offsets = xt_alloc_entry_offsets(newinfo->number);
655 if (!offsets)
656 return -ENOMEM;
658 i = 0; 657 i = 0;
659 658
660 /* Walk through entries, checking offsets. */ 659 /* Walk through entries, checking offsets. */
@@ -665,7 +664,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
665 repl->underflow, 664 repl->underflow,
666 repl->valid_hooks); 665 repl->valid_hooks);
667 if (ret != 0) 666 if (ret != 0)
668 break; 667 goto out_free;
668 if (i < repl->num_entries)
669 offsets[i] = (void *)iter - entry0;
669 ++i; 670 ++i;
670 if (strcmp(arpt_get_target(iter)->u.user.name, 671 if (strcmp(arpt_get_target(iter)->u.user.name,
671 XT_ERROR_TARGET) == 0) 672 XT_ERROR_TARGET) == 0)
@@ -673,12 +674,13 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
673 } 674 }
674 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); 675 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
675 if (ret != 0) 676 if (ret != 0)
676 return ret; 677 goto out_free;
677 678
679 ret = -EINVAL;
678 if (i != repl->num_entries) { 680 if (i != repl->num_entries) {
679 duprintf("translate_table: %u not %u entries\n", 681 duprintf("translate_table: %u not %u entries\n",
680 i, repl->num_entries); 682 i, repl->num_entries);
681 return -EINVAL; 683 goto out_free;
682 } 684 }
683 685
684 /* Check hooks all assigned */ 686 /* Check hooks all assigned */
@@ -689,22 +691,26 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
689 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 691 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
690 duprintf("Invalid hook entry %u %u\n", 692 duprintf("Invalid hook entry %u %u\n",
691 i, repl->hook_entry[i]); 693 i, repl->hook_entry[i]);
692 return -EINVAL; 694 goto out_free;
693 } 695 }
694 if (newinfo->underflow[i] == 0xFFFFFFFF) { 696 if (newinfo->underflow[i] == 0xFFFFFFFF) {
695 duprintf("Invalid underflow %u %u\n", 697 duprintf("Invalid underflow %u %u\n",
696 i, repl->underflow[i]); 698 i, repl->underflow[i]);
697 return -EINVAL; 699 goto out_free;
698 } 700 }
699 } 701 }
700 702
701 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) 703 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
702 return -ELOOP; 704 ret = -ELOOP;
705 goto out_free;
706 }
707 kvfree(offsets);
703 708
704 /* Finally, each sanity check must pass */ 709 /* Finally, each sanity check must pass */
705 i = 0; 710 i = 0;
706 xt_entry_foreach(iter, entry0, newinfo->size) { 711 xt_entry_foreach(iter, entry0, newinfo->size) {
707 ret = find_check_entry(iter, repl->name, repl->size); 712 ret = find_check_entry(iter, repl->name, repl->size,
713 &alloc_state);
708 if (ret != 0) 714 if (ret != 0)
709 break; 715 break;
710 ++i; 716 ++i;
@@ -720,6 +726,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
720 } 726 }
721 727
722 return ret; 728 return ret;
729 out_free:
730 kvfree(offsets);
731 return ret;
723} 732}
724 733
725static void get_counters(const struct xt_table_info *t, 734static void get_counters(const struct xt_table_info *t,
@@ -1336,8 +1345,8 @@ static int translate_compat_table(struct xt_table_info **pinfo,
1336 1345
1337 newinfo->number = compatr->num_entries; 1346 newinfo->number = compatr->num_entries;
1338 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 1347 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
1339 newinfo->hook_entry[i] = info->hook_entry[i]; 1348 newinfo->hook_entry[i] = compatr->hook_entry[i];
1340 newinfo->underflow[i] = info->underflow[i]; 1349 newinfo->underflow[i] = compatr->underflow[i];
1341 } 1350 }
1342 entry1 = newinfo->entries; 1351 entry1 = newinfo->entries;
1343 pos = entry1; 1352 pos = entry1;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index a399c5419622..9363c1a70f16 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -408,6 +408,10 @@ ipt_do_table(struct sk_buff *skb,
408 } 408 }
409 if (table_base + v != ipt_next_entry(e) && 409 if (table_base + v != ipt_next_entry(e) &&
410 !(e->ip.flags & IPT_F_GOTO)) { 410 !(e->ip.flags & IPT_F_GOTO)) {
411 if (unlikely(stackidx >= private->stacksize)) {
412 verdict = NF_DROP;
413 break;
414 }
411 jumpstack[stackidx++] = e; 415 jumpstack[stackidx++] = e;
412 pr_debug("Pushed %p into pos %u\n", 416 pr_debug("Pushed %p into pos %u\n",
413 e, stackidx - 1); 417 e, stackidx - 1);
@@ -443,23 +447,12 @@ ipt_do_table(struct sk_buff *skb,
443#endif 447#endif
444} 448}
445 449
446static bool find_jump_target(const struct xt_table_info *t,
447 const struct ipt_entry *target)
448{
449 struct ipt_entry *iter;
450
451 xt_entry_foreach(iter, t->entries, t->size) {
452 if (iter == target)
453 return true;
454 }
455 return false;
456}
457
458/* Figures out from what hook each rule can be called: returns 0 if 450/* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */ 451 there are loops. Puts hook bitmask in comefrom. */
460static int 452static int
461mark_source_chains(const struct xt_table_info *newinfo, 453mark_source_chains(const struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0) 454 unsigned int valid_hooks, void *entry0,
455 unsigned int *offsets)
463{ 456{
464 unsigned int hook; 457 unsigned int hook;
465 458
@@ -552,10 +545,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
552 /* This a jump; chase it. */ 545 /* This a jump; chase it. */
553 duprintf("Jump rule %u -> %u\n", 546 duprintf("Jump rule %u -> %u\n",
554 pos, newpos); 547 pos, newpos);
548 if (!xt_find_jump_offset(offsets, newpos,
549 newinfo->number))
550 return 0;
555 e = (struct ipt_entry *) 551 e = (struct ipt_entry *)
556 (entry0 + newpos); 552 (entry0 + newpos);
557 if (!find_jump_target(newinfo, e))
558 return 0;
559 } else { 553 } else {
560 /* ... this is a fallthru */ 554 /* ... this is a fallthru */
561 newpos = pos + e->next_offset; 555 newpos = pos + e->next_offset;
@@ -655,7 +649,8 @@ static int check_target(struct ipt_entry *e, struct net *net, const char *name)
655 649
656static int 650static int
657find_check_entry(struct ipt_entry *e, struct net *net, const char *name, 651find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
658 unsigned int size) 652 unsigned int size,
653 struct xt_percpu_counter_alloc_state *alloc_state)
659{ 654{
660 struct xt_entry_target *t; 655 struct xt_entry_target *t;
661 struct xt_target *target; 656 struct xt_target *target;
@@ -664,11 +659,11 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
664 struct xt_mtchk_param mtpar; 659 struct xt_mtchk_param mtpar;
665 struct xt_entry_match *ematch; 660 struct xt_entry_match *ematch;
666 661
667 e->counters.pcnt = xt_percpu_counter_alloc(); 662 if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
668 if (IS_ERR_VALUE(e->counters.pcnt))
669 return -ENOMEM; 663 return -ENOMEM;
670 664
671 j = 0; 665 j = 0;
666 memset(&mtpar, 0, sizeof(mtpar));
672 mtpar.net = net; 667 mtpar.net = net;
673 mtpar.table = name; 668 mtpar.table = name;
674 mtpar.entryinfo = &e->ip; 669 mtpar.entryinfo = &e->ip;
@@ -705,7 +700,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
705 cleanup_match(ematch, net); 700 cleanup_match(ematch, net);
706 } 701 }
707 702
708 xt_percpu_counter_free(e->counters.pcnt); 703 xt_percpu_counter_free(&e->counters);
709 704
710 return ret; 705 return ret;
711} 706}
@@ -801,7 +796,7 @@ cleanup_entry(struct ipt_entry *e, struct net *net)
801 if (par.target->destroy != NULL) 796 if (par.target->destroy != NULL)
802 par.target->destroy(&par); 797 par.target->destroy(&par);
803 module_put(par.target->me); 798 module_put(par.target->me);
804 xt_percpu_counter_free(e->counters.pcnt); 799 xt_percpu_counter_free(&e->counters);
805} 800}
806 801
807/* Checks and translates the user-supplied table segment (held in 802/* Checks and translates the user-supplied table segment (held in
@@ -810,7 +805,9 @@ static int
810translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, 805translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
811 const struct ipt_replace *repl) 806 const struct ipt_replace *repl)
812{ 807{
808 struct xt_percpu_counter_alloc_state alloc_state = { 0 };
813 struct ipt_entry *iter; 809 struct ipt_entry *iter;
810 unsigned int *offsets;
814 unsigned int i; 811 unsigned int i;
815 int ret = 0; 812 int ret = 0;
816 813
@@ -824,6 +821,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
824 } 821 }
825 822
826 duprintf("translate_table: size %u\n", newinfo->size); 823 duprintf("translate_table: size %u\n", newinfo->size);
824 offsets = xt_alloc_entry_offsets(newinfo->number);
825 if (!offsets)
826 return -ENOMEM;
827 i = 0; 827 i = 0;
828 /* Walk through entries, checking offsets. */ 828 /* Walk through entries, checking offsets. */
829 xt_entry_foreach(iter, entry0, newinfo->size) { 829 xt_entry_foreach(iter, entry0, newinfo->size) {
@@ -833,17 +833,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
833 repl->underflow, 833 repl->underflow,
834 repl->valid_hooks); 834 repl->valid_hooks);
835 if (ret != 0) 835 if (ret != 0)
836 return ret; 836 goto out_free;
837 if (i < repl->num_entries)
838 offsets[i] = (void *)iter - entry0;
837 ++i; 839 ++i;
838 if (strcmp(ipt_get_target(iter)->u.user.name, 840 if (strcmp(ipt_get_target(iter)->u.user.name,
839 XT_ERROR_TARGET) == 0) 841 XT_ERROR_TARGET) == 0)
840 ++newinfo->stacksize; 842 ++newinfo->stacksize;
841 } 843 }
842 844
845 ret = -EINVAL;
843 if (i != repl->num_entries) { 846 if (i != repl->num_entries) {
844 duprintf("translate_table: %u not %u entries\n", 847 duprintf("translate_table: %u not %u entries\n",
845 i, repl->num_entries); 848 i, repl->num_entries);
846 return -EINVAL; 849 goto out_free;
847 } 850 }
848 851
849 /* Check hooks all assigned */ 852 /* Check hooks all assigned */
@@ -854,22 +857,26 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
854 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 857 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
855 duprintf("Invalid hook entry %u %u\n", 858 duprintf("Invalid hook entry %u %u\n",
856 i, repl->hook_entry[i]); 859 i, repl->hook_entry[i]);
857 return -EINVAL; 860 goto out_free;
858 } 861 }
859 if (newinfo->underflow[i] == 0xFFFFFFFF) { 862 if (newinfo->underflow[i] == 0xFFFFFFFF) {
860 duprintf("Invalid underflow %u %u\n", 863 duprintf("Invalid underflow %u %u\n",
861 i, repl->underflow[i]); 864 i, repl->underflow[i]);
862 return -EINVAL; 865 goto out_free;
863 } 866 }
864 } 867 }
865 868
866 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) 869 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
867 return -ELOOP; 870 ret = -ELOOP;
871 goto out_free;
872 }
873 kvfree(offsets);
868 874
869 /* Finally, each sanity check must pass */ 875 /* Finally, each sanity check must pass */
870 i = 0; 876 i = 0;
871 xt_entry_foreach(iter, entry0, newinfo->size) { 877 xt_entry_foreach(iter, entry0, newinfo->size) {
872 ret = find_check_entry(iter, net, repl->name, repl->size); 878 ret = find_check_entry(iter, net, repl->name, repl->size,
879 &alloc_state);
873 if (ret != 0) 880 if (ret != 0)
874 break; 881 break;
875 ++i; 882 ++i;
@@ -885,6 +892,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
885 } 892 }
886 893
887 return ret; 894 return ret;
895 out_free:
896 kvfree(offsets);
897 return ret;
888} 898}
889 899
890static void 900static void
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 4a9e6db9df8d..16599bae11dd 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -365,7 +365,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
365 struct ipt_clusterip_tgt_info *cipinfo = par->targinfo; 365 struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
366 const struct ipt_entry *e = par->entryinfo; 366 const struct ipt_entry *e = par->entryinfo;
367 struct clusterip_config *config; 367 struct clusterip_config *config;
368 int ret; 368 int ret, i;
369 369
370 if (par->nft_compat) { 370 if (par->nft_compat) {
371 pr_err("cannot use CLUSTERIP target from nftables compat\n"); 371 pr_err("cannot use CLUSTERIP target from nftables compat\n");
@@ -384,8 +384,18 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
384 pr_info("Please specify destination IP\n"); 384 pr_info("Please specify destination IP\n");
385 return -EINVAL; 385 return -EINVAL;
386 } 386 }
387 387 if (cipinfo->num_local_nodes > ARRAY_SIZE(cipinfo->local_nodes)) {
388 /* FIXME: further sanity checks */ 388 pr_info("bad num_local_nodes %u\n", cipinfo->num_local_nodes);
389 return -EINVAL;
390 }
391 for (i = 0; i < cipinfo->num_local_nodes; i++) {
392 if (cipinfo->local_nodes[i] - 1 >=
393 sizeof(config->local_nodes) * 8) {
394 pr_info("bad local_nodes[%d] %u\n",
395 i, cipinfo->local_nodes[i]);
396 return -EINVAL;
397 }
398 }
389 399
390 config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1); 400 config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1);
391 if (!config) { 401 if (!config) {
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 6a20195a3a2a..3fe8c951f427 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -259,15 +259,19 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
259 struct nf_conntrack_tuple tuple; 259 struct nf_conntrack_tuple tuple;
260 260
261 memset(&tuple, 0, sizeof(tuple)); 261 memset(&tuple, 0, sizeof(tuple));
262
263 lock_sock(sk);
262 tuple.src.u3.ip = inet->inet_rcv_saddr; 264 tuple.src.u3.ip = inet->inet_rcv_saddr;
263 tuple.src.u.tcp.port = inet->inet_sport; 265 tuple.src.u.tcp.port = inet->inet_sport;
264 tuple.dst.u3.ip = inet->inet_daddr; 266 tuple.dst.u3.ip = inet->inet_daddr;
265 tuple.dst.u.tcp.port = inet->inet_dport; 267 tuple.dst.u.tcp.port = inet->inet_dport;
266 tuple.src.l3num = PF_INET; 268 tuple.src.l3num = PF_INET;
267 tuple.dst.protonum = sk->sk_protocol; 269 tuple.dst.protonum = sk->sk_protocol;
270 release_sock(sk);
268 271
269 /* We only do TCP and SCTP at the moment: is there a better way? */ 272 /* We only do TCP and SCTP at the moment: is there a better way? */
270 if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) { 273 if (tuple.dst.protonum != IPPROTO_TCP &&
274 tuple.dst.protonum != IPPROTO_SCTP) {
271 pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n"); 275 pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
272 return -ENOPROTOOPT; 276 return -ENOPROTOOPT;
273 } 277 }
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 574f7ebba0b6..ac8342dcb55e 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -252,16 +252,16 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
252 if (set_h245_addr(skb, protoff, data, dataoff, taddr, 252 if (set_h245_addr(skb, protoff, data, dataoff, taddr,
253 &ct->tuplehash[!dir].tuple.dst.u3, 253 &ct->tuplehash[!dir].tuple.dst.u3,
254 htons((port & htons(1)) ? nated_port + 1 : 254 htons((port & htons(1)) ? nated_port + 1 :
255 nated_port)) == 0) { 255 nated_port))) {
256 /* Save ports */
257 info->rtp_port[i][dir] = rtp_port;
258 info->rtp_port[i][!dir] = htons(nated_port);
259 } else {
260 nf_ct_unexpect_related(rtp_exp); 256 nf_ct_unexpect_related(rtp_exp);
261 nf_ct_unexpect_related(rtcp_exp); 257 nf_ct_unexpect_related(rtcp_exp);
262 return -1; 258 return -1;
263 } 259 }
264 260
261 /* Save ports */
262 info->rtp_port[i][dir] = rtp_port;
263 info->rtp_port[i][!dir] = htons(nated_port);
264
265 /* Success */ 265 /* Success */
266 pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n", 266 pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n",
267 &rtp_exp->tuple.src.u3.ip, 267 &rtp_exp->tuple.src.u3.ip,
@@ -370,15 +370,15 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
370 /* Modify signal */ 370 /* Modify signal */
371 if (set_h225_addr(skb, protoff, data, dataoff, taddr, 371 if (set_h225_addr(skb, protoff, data, dataoff, taddr,
372 &ct->tuplehash[!dir].tuple.dst.u3, 372 &ct->tuplehash[!dir].tuple.dst.u3,
373 htons(nated_port)) == 0) { 373 htons(nated_port))) {
374 /* Save ports */
375 info->sig_port[dir] = port;
376 info->sig_port[!dir] = htons(nated_port);
377 } else {
378 nf_ct_unexpect_related(exp); 374 nf_ct_unexpect_related(exp);
379 return -1; 375 return -1;
380 } 376 }
381 377
378 /* Save ports */
379 info->sig_port[dir] = port;
380 info->sig_port[!dir] = htons(nated_port);
381
382 pr_debug("nf_nat_q931: expect H.245 %pI4:%hu->%pI4:%hu\n", 382 pr_debug("nf_nat_q931: expect H.245 %pI4:%hu->%pI4:%hu\n",
383 &exp->tuple.src.u3.ip, 383 &exp->tuple.src.u3.ip,
384 ntohs(exp->tuple.src.u.tcp.port), 384 ntohs(exp->tuple.src.u.tcp.port),
@@ -462,24 +462,27 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
462 /* Modify signal */ 462 /* Modify signal */
463 if (set_h225_addr(skb, protoff, data, 0, &taddr[idx], 463 if (set_h225_addr(skb, protoff, data, 0, &taddr[idx],
464 &ct->tuplehash[!dir].tuple.dst.u3, 464 &ct->tuplehash[!dir].tuple.dst.u3,
465 htons(nated_port)) == 0) { 465 htons(nated_port))) {
466 /* Save ports */
467 info->sig_port[dir] = port;
468 info->sig_port[!dir] = htons(nated_port);
469
470 /* Fix for Gnomemeeting */
471 if (idx > 0 &&
472 get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
473 (ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
474 set_h225_addr(skb, protoff, data, 0, &taddr[0],
475 &ct->tuplehash[!dir].tuple.dst.u3,
476 info->sig_port[!dir]);
477 }
478 } else {
479 nf_ct_unexpect_related(exp); 466 nf_ct_unexpect_related(exp);
480 return -1; 467 return -1;
481 } 468 }
482 469
470 /* Save ports */
471 info->sig_port[dir] = port;
472 info->sig_port[!dir] = htons(nated_port);
473
474 /* Fix for Gnomemeeting */
475 if (idx > 0 &&
476 get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
477 (ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
478 if (set_h225_addr(skb, protoff, data, 0, &taddr[0],
479 &ct->tuplehash[!dir].tuple.dst.u3,
480 info->sig_port[!dir])) {
481 nf_ct_unexpect_related(exp);
482 return -1;
483 }
484 }
485
483 /* Success */ 486 /* Success */
484 pr_debug("nf_nat_ras: expect Q.931 %pI4:%hu->%pI4:%hu\n", 487 pr_debug("nf_nat_ras: expect Q.931 %pI4:%hu->%pI4:%hu\n",
485 &exp->tuple.src.u3.ip, 488 &exp->tuple.src.u3.ip,
@@ -550,9 +553,9 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
550 } 553 }
551 554
552 /* Modify signal */ 555 /* Modify signal */
553 if (!set_h225_addr(skb, protoff, data, dataoff, taddr, 556 if (set_h225_addr(skb, protoff, data, dataoff, taddr,
554 &ct->tuplehash[!dir].tuple.dst.u3, 557 &ct->tuplehash[!dir].tuple.dst.u3,
555 htons(nated_port)) == 0) { 558 htons(nated_port))) {
556 nf_ct_unexpect_related(exp); 559 nf_ct_unexpect_related(exp);
557 return -1; 560 return -1;
558 } 561 }
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index c747b2d9eb77..d4acf38b60fd 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -124,6 +124,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
124 /* ip_route_me_harder expects skb->dst to be set */ 124 /* ip_route_me_harder expects skb->dst to be set */
125 skb_dst_set_noref(nskb, skb_dst(oldskb)); 125 skb_dst_set_noref(nskb, skb_dst(oldskb));
126 126
127 nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
128
127 skb_reserve(nskb, LL_MAX_HEADER); 129 skb_reserve(nskb, LL_MAX_HEADER);
128 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, 130 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
129 ip4_dst_hoplimit(skb_dst(nskb))); 131 ip4_dst_hoplimit(skb_dst(nskb)));
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 37a3b05d175c..82c878224bfc 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -777,8 +777,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
777 ipc.addr = faddr = daddr; 777 ipc.addr = faddr = daddr;
778 778
779 if (ipc.opt && ipc.opt->opt.srr) { 779 if (ipc.opt && ipc.opt->opt.srr) {
780 if (!daddr) 780 if (!daddr) {
781 return -EINVAL; 781 err = -EINVAL;
782 goto out_free;
783 }
782 faddr = ipc.opt->opt.faddr; 784 faddr = ipc.opt->opt.faddr;
783 } 785 }
784 tos = get_rttos(&ipc, inet); 786 tos = get_rttos(&ipc, inet);
@@ -843,6 +845,7 @@ back_from_confirm:
843 845
844out: 846out:
845 ip_rt_put(rt); 847 ip_rt_put(rt);
848out_free:
846 if (free) 849 if (free)
847 kfree(ipc.opt); 850 kfree(ipc.opt);
848 if (!err) { 851 if (!err) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 52d718e3f077..3251dede1815 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -126,10 +126,13 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126static int ip_rt_error_cost __read_mostly = HZ; 126static int ip_rt_error_cost __read_mostly = HZ;
127static int ip_rt_error_burst __read_mostly = 5 * HZ; 127static int ip_rt_error_burst __read_mostly = 5 * HZ;
128static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; 128static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
129static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 129static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
130static int ip_rt_min_advmss __read_mostly = 256; 130static int ip_rt_min_advmss __read_mostly = 256;
131 131
132static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; 132static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
133
134static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
135
133/* 136/*
134 * Interface to generic destination cache. 137 * Interface to generic destination cache.
135 */ 138 */
@@ -609,6 +612,7 @@ static inline u32 fnhe_hashfun(__be32 daddr)
609static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) 612static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
610{ 613{
611 rt->rt_pmtu = fnhe->fnhe_pmtu; 614 rt->rt_pmtu = fnhe->fnhe_pmtu;
615 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
612 rt->dst.expires = fnhe->fnhe_expires; 616 rt->dst.expires = fnhe->fnhe_expires;
613 617
614 if (fnhe->fnhe_gw) { 618 if (fnhe->fnhe_gw) {
@@ -619,7 +623,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
619} 623}
620 624
621static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, 625static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
622 u32 pmtu, unsigned long expires) 626 u32 pmtu, bool lock, unsigned long expires)
623{ 627{
624 struct fnhe_hash_bucket *hash; 628 struct fnhe_hash_bucket *hash;
625 struct fib_nh_exception *fnhe; 629 struct fib_nh_exception *fnhe;
@@ -656,8 +660,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
656 fnhe->fnhe_genid = genid; 660 fnhe->fnhe_genid = genid;
657 if (gw) 661 if (gw)
658 fnhe->fnhe_gw = gw; 662 fnhe->fnhe_gw = gw;
659 if (pmtu) 663 if (pmtu) {
660 fnhe->fnhe_pmtu = pmtu; 664 fnhe->fnhe_pmtu = pmtu;
665 fnhe->fnhe_mtu_locked = lock;
666 }
661 fnhe->fnhe_expires = max(1UL, expires); 667 fnhe->fnhe_expires = max(1UL, expires);
662 /* Update all cached dsts too */ 668 /* Update all cached dsts too */
663 rt = rcu_dereference(fnhe->fnhe_rth_input); 669 rt = rcu_dereference(fnhe->fnhe_rth_input);
@@ -681,6 +687,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
681 fnhe->fnhe_daddr = daddr; 687 fnhe->fnhe_daddr = daddr;
682 fnhe->fnhe_gw = gw; 688 fnhe->fnhe_gw = gw;
683 fnhe->fnhe_pmtu = pmtu; 689 fnhe->fnhe_pmtu = pmtu;
690 fnhe->fnhe_mtu_locked = lock;
684 fnhe->fnhe_expires = expires; 691 fnhe->fnhe_expires = expires;
685 692
686 /* Exception created; mark the cached routes for the nexthop 693 /* Exception created; mark the cached routes for the nexthop
@@ -762,7 +769,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
762 struct fib_nh *nh = &FIB_RES_NH(res); 769 struct fib_nh *nh = &FIB_RES_NH(res);
763 770
764 update_or_create_fnhe(nh, fl4->daddr, new_gw, 771 update_or_create_fnhe(nh, fl4->daddr, new_gw,
765 0, jiffies + ip_rt_gc_timeout); 772 0, false,
773 jiffies + ip_rt_gc_timeout);
766 } 774 }
767 if (kill_route) 775 if (kill_route)
768 rt->dst.obsolete = DST_OBSOLETE_KILL; 776 rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -974,15 +982,18 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
974{ 982{
975 struct dst_entry *dst = &rt->dst; 983 struct dst_entry *dst = &rt->dst;
976 struct fib_result res; 984 struct fib_result res;
985 bool lock = false;
977 986
978 if (dst_metric_locked(dst, RTAX_MTU)) 987 if (ip_mtu_locked(dst))
979 return; 988 return;
980 989
981 if (ipv4_mtu(dst) < mtu) 990 if (ipv4_mtu(dst) < mtu)
982 return; 991 return;
983 992
984 if (mtu < ip_rt_min_pmtu) 993 if (mtu < ip_rt_min_pmtu) {
994 lock = true;
985 mtu = ip_rt_min_pmtu; 995 mtu = ip_rt_min_pmtu;
996 }
986 997
987 if (rt->rt_pmtu == mtu && 998 if (rt->rt_pmtu == mtu &&
988 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) 999 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
@@ -992,7 +1003,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
992 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { 1003 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
993 struct fib_nh *nh = &FIB_RES_NH(res); 1004 struct fib_nh *nh = &FIB_RES_NH(res);
994 1005
995 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, 1006 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
996 jiffies + ip_rt_mtu_expires); 1007 jiffies + ip_rt_mtu_expires);
997 } 1008 }
998 rcu_read_unlock(); 1009 rcu_read_unlock();
@@ -1247,7 +1258,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1247 1258
1248 mtu = READ_ONCE(dst->dev->mtu); 1259 mtu = READ_ONCE(dst->dev->mtu);
1249 1260
1250 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { 1261 if (unlikely(ip_mtu_locked(dst))) {
1251 if (rt->rt_uses_gateway && mtu > 576) 1262 if (rt->rt_uses_gateway && mtu > 576)
1252 mtu = 576; 1263 mtu = 576;
1253 } 1264 }
@@ -1470,6 +1481,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
1470 rt->rt_is_input = 0; 1481 rt->rt_is_input = 0;
1471 rt->rt_iif = 0; 1482 rt->rt_iif = 0;
1472 rt->rt_pmtu = 0; 1483 rt->rt_pmtu = 0;
1484 rt->rt_mtu_locked = 0;
1473 rt->rt_gateway = 0; 1485 rt->rt_gateway = 0;
1474 rt->rt_uses_gateway = 0; 1486 rt->rt_uses_gateway = 0;
1475 rt->rt_table_id = 0; 1487 rt->rt_table_id = 0;
@@ -2390,6 +2402,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2390 rt->rt_is_input = ort->rt_is_input; 2402 rt->rt_is_input = ort->rt_is_input;
2391 rt->rt_iif = ort->rt_iif; 2403 rt->rt_iif = ort->rt_iif;
2392 rt->rt_pmtu = ort->rt_pmtu; 2404 rt->rt_pmtu = ort->rt_pmtu;
2405 rt->rt_mtu_locked = ort->rt_mtu_locked;
2393 2406
2394 rt->rt_genid = rt_genid_ipv4(net); 2407 rt->rt_genid = rt_genid_ipv4(net);
2395 rt->rt_flags = ort->rt_flags; 2408 rt->rt_flags = ort->rt_flags;
@@ -2492,6 +2505,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
2492 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); 2505 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2493 if (rt->rt_pmtu && expires) 2506 if (rt->rt_pmtu && expires)
2494 metrics[RTAX_MTU - 1] = rt->rt_pmtu; 2507 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2508 if (rt->rt_mtu_locked && expires)
2509 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2495 if (rtnetlink_put_metrics(skb, metrics) < 0) 2510 if (rtnetlink_put_metrics(skb, metrics) < 0)
2496 goto nla_put_failure; 2511 goto nla_put_failure;
2497 2512
@@ -2765,7 +2780,8 @@ static struct ctl_table ipv4_route_table[] = {
2765 .data = &ip_rt_min_pmtu, 2780 .data = &ip_rt_min_pmtu,
2766 .maxlen = sizeof(int), 2781 .maxlen = sizeof(int),
2767 .mode = 0644, 2782 .mode = 0644,
2768 .proc_handler = proc_dointvec, 2783 .proc_handler = proc_dointvec_minmax,
2784 .extra1 = &ip_min_valid_pmtu,
2769 }, 2785 },
2770 { 2786 {
2771 .procname = "min_adv_mss", 2787 .procname = "min_adv_mss",
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 70fb352e317f..da90c74d12ef 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -141,8 +141,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
141 if (write && ret == 0) { 141 if (write && ret == 0) {
142 low = make_kgid(user_ns, urange[0]); 142 low = make_kgid(user_ns, urange[0]);
143 high = make_kgid(user_ns, urange[1]); 143 high = make_kgid(user_ns, urange[1]);
144 if (!gid_valid(low) || !gid_valid(high) || 144 if (!gid_valid(low) || !gid_valid(high))
145 (urange[1] < urange[0]) || gid_lt(high, low)) { 145 return -EINVAL;
146 if (urange[1] < urange[0] || gid_lt(high, low)) {
146 low = make_kgid(&init_user_ns, 1); 147 low = make_kgid(&init_user_ns, 1);
147 high = make_kgid(&init_user_ns, 0); 148 high = make_kgid(&init_user_ns, 0);
148 } 149 }
@@ -213,8 +214,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
213{ 214{
214 struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; 215 struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
215 struct tcp_fastopen_context *ctxt; 216 struct tcp_fastopen_context *ctxt;
216 int ret;
217 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ 217 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
218 __le32 key[4];
219 int ret, i;
218 220
219 tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); 221 tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
220 if (!tbl.data) 222 if (!tbl.data)
@@ -223,11 +225,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
223 rcu_read_lock(); 225 rcu_read_lock();
224 ctxt = rcu_dereference(tcp_fastopen_ctx); 226 ctxt = rcu_dereference(tcp_fastopen_ctx);
225 if (ctxt) 227 if (ctxt)
226 memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); 228 memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
227 else 229 else
228 memset(user_key, 0, sizeof(user_key)); 230 memset(key, 0, sizeof(key));
229 rcu_read_unlock(); 231 rcu_read_unlock();
230 232
233 for (i = 0; i < ARRAY_SIZE(key); i++)
234 user_key[i] = le32_to_cpu(key[i]);
235
231 snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", 236 snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
232 user_key[0], user_key[1], user_key[2], user_key[3]); 237 user_key[0], user_key[1], user_key[2], user_key[3]);
233 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); 238 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
@@ -243,12 +248,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
243 * first invocation of tcp_fastopen_cookie_gen 248 * first invocation of tcp_fastopen_cookie_gen
244 */ 249 */
245 tcp_fastopen_init_key_once(false); 250 tcp_fastopen_init_key_once(false);
246 tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH); 251
252 for (i = 0; i < ARRAY_SIZE(user_key); i++)
253 key[i] = cpu_to_le32(user_key[i]);
254
255 tcp_fastopen_reset_cipher(key, TCP_FASTOPEN_KEY_LENGTH);
247 } 256 }
248 257
249bad_key: 258bad_key:
250 pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n", 259 pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
251 user_key[0], user_key[1], user_key[2], user_key[3], 260 user_key[0], user_key[1], user_key[2], user_key[3],
252 (char *)tbl.data, ret); 261 (char *)tbl.data, ret);
253 kfree(tbl.data); 262 kfree(tbl.data);
254 return ret; 263 return ret;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5597120c8ffd..a0f0a7db946b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1108 lock_sock(sk); 1108 lock_sock(sk);
1109 1109
1110 flags = msg->msg_flags; 1110 flags = msg->msg_flags;
1111 if (flags & MSG_FASTOPEN) { 1111 if ((flags & MSG_FASTOPEN) && !tp->repair) {
1112 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); 1112 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
1113 if (err == -EINPROGRESS && copied_syn > 0) 1113 if (err == -EINPROGRESS && copied_syn > 0)
1114 goto out; 1114 goto out;
@@ -2176,6 +2176,9 @@ adjudge_to_death:
2176 tcp_send_active_reset(sk, GFP_ATOMIC); 2176 tcp_send_active_reset(sk, GFP_ATOMIC);
2177 NET_INC_STATS_BH(sock_net(sk), 2177 NET_INC_STATS_BH(sock_net(sk),
2178 LINUX_MIB_TCPABORTONMEMORY); 2178 LINUX_MIB_TCPABORTONMEMORY);
2179 } else if (!check_net(sock_net(sk))) {
2180 /* Not possible to send reset; just close */
2181 tcp_set_state(sk, TCP_CLOSE);
2179 } 2182 }
2180 } 2183 }
2181 2184
@@ -2273,6 +2276,12 @@ int tcp_disconnect(struct sock *sk, int flags)
2273 2276
2274 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 2277 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2275 2278
2279 if (sk->sk_frag.page) {
2280 put_page(sk->sk_frag.page);
2281 sk->sk_frag.page = NULL;
2282 sk->sk_frag.offset = 0;
2283 }
2284
2276 sk->sk_error_report(sk); 2285 sk->sk_error_report(sk);
2277 return err; 2286 return err;
2278} 2287}
@@ -2441,7 +2450,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2441 case TCP_REPAIR_QUEUE: 2450 case TCP_REPAIR_QUEUE:
2442 if (!tp->repair) 2451 if (!tp->repair)
2443 err = -EPERM; 2452 err = -EPERM;
2444 else if (val < TCP_QUEUES_NR) 2453 else if ((unsigned int)val < TCP_QUEUES_NR)
2445 tp->repair_queue = val; 2454 tp->repair_queue = val;
2446 else 2455 else
2447 err = -EINVAL; 2456 err = -EINVAL;
@@ -2580,8 +2589,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2580 2589
2581#ifdef CONFIG_TCP_MD5SIG 2590#ifdef CONFIG_TCP_MD5SIG
2582 case TCP_MD5SIG: 2591 case TCP_MD5SIG:
2583 /* Read the IP->Key mappings from userspace */ 2592 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
2584 err = tp->af_specific->md5_parse(sk, optval, optlen); 2593 err = tp->af_specific->md5_parse(sk, optval, optlen);
2594 else
2595 err = -EINVAL;
2585 break; 2596 break;
2586#endif 2597#endif
2587 case TCP_USER_TIMEOUT: 2598 case TCP_USER_TIMEOUT:
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 55d7da1d2ce9..6300edf90e60 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -131,23 +131,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
131 struct dctcp *ca = inet_csk_ca(sk); 131 struct dctcp *ca = inet_csk_ca(sk);
132 struct tcp_sock *tp = tcp_sk(sk); 132 struct tcp_sock *tp = tcp_sk(sk);
133 133
134 /* State has changed from CE=0 to CE=1 and delayed 134 if (!ca->ce_state) {
135 * ACK has not sent yet. 135 /* State has changed from CE=0 to CE=1, force an immediate
136 */ 136 * ACK to reflect the new CE state. If an ACK was delayed,
137 if (!ca->ce_state && ca->delayed_ack_reserved) { 137 * send that first to reflect the prior CE state.
138 u32 tmp_rcv_nxt; 138 */
139 139 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
140 /* Save current rcv_nxt. */ 140 __tcp_send_ack(sk, ca->prior_rcv_nxt);
141 tmp_rcv_nxt = tp->rcv_nxt; 141 tcp_enter_quickack_mode(sk, 1);
142
143 /* Generate previous ack with CE=0. */
144 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
145 tp->rcv_nxt = ca->prior_rcv_nxt;
146
147 tcp_send_ack(sk);
148
149 /* Recover current rcv_nxt. */
150 tp->rcv_nxt = tmp_rcv_nxt;
151 } 142 }
152 143
153 ca->prior_rcv_nxt = tp->rcv_nxt; 144 ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -161,23 +152,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
161 struct dctcp *ca = inet_csk_ca(sk); 152 struct dctcp *ca = inet_csk_ca(sk);
162 struct tcp_sock *tp = tcp_sk(sk); 153 struct tcp_sock *tp = tcp_sk(sk);
163 154
164 /* State has changed from CE=1 to CE=0 and delayed 155 if (ca->ce_state) {
165 * ACK has not sent yet. 156 /* State has changed from CE=1 to CE=0, force an immediate
166 */ 157 * ACK to reflect the new CE state. If an ACK was delayed,
167 if (ca->ce_state && ca->delayed_ack_reserved) { 158 * send that first to reflect the prior CE state.
168 u32 tmp_rcv_nxt; 159 */
169 160 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
170 /* Save current rcv_nxt. */ 161 __tcp_send_ack(sk, ca->prior_rcv_nxt);
171 tmp_rcv_nxt = tp->rcv_nxt; 162 tcp_enter_quickack_mode(sk, 1);
172
173 /* Generate previous ack with CE=1. */
174 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
175 tp->rcv_nxt = ca->prior_rcv_nxt;
176
177 tcp_send_ack(sk);
178
179 /* Recover current rcv_nxt. */
180 tp->rcv_nxt = tmp_rcv_nxt;
181 } 163 }
182 164
183 ca->prior_rcv_nxt = tp->rcv_nxt; 165 ca->prior_rcv_nxt = tp->rcv_nxt;
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 2ab9bbb6faff..5ed6a89894fd 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -6,7 +6,7 @@
6 * The algorithm is described in: 6 * The algorithm is described in:
7 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm 7 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
8 * for High-Speed Networks" 8 * for High-Speed Networks"
9 * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf 9 * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf
10 * 10 *
11 * Implemented from description in paper and ns-2 simulation. 11 * Implemented from description in paper and ns-2 simulation.
12 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> 12 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 71290fb7d500..9c4c6cd0316e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -117,6 +117,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
117#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 117#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
118#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 118#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
119#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ 119#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
120#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
120 121
121#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 122#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
122#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 123#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -175,24 +176,27 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
175 } 176 }
176} 177}
177 178
178static void tcp_incr_quickack(struct sock *sk) 179static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
179{ 180{
180 struct inet_connection_sock *icsk = inet_csk(sk); 181 struct inet_connection_sock *icsk = inet_csk(sk);
181 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 182 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
182 183
183 if (quickacks == 0) 184 if (quickacks == 0)
184 quickacks = 2; 185 quickacks = 2;
186 quickacks = min(quickacks, max_quickacks);
185 if (quickacks > icsk->icsk_ack.quick) 187 if (quickacks > icsk->icsk_ack.quick)
186 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 188 icsk->icsk_ack.quick = quickacks;
187} 189}
188 190
189static void tcp_enter_quickack_mode(struct sock *sk) 191void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
190{ 192{
191 struct inet_connection_sock *icsk = inet_csk(sk); 193 struct inet_connection_sock *icsk = inet_csk(sk);
192 tcp_incr_quickack(sk); 194
195 tcp_incr_quickack(sk, max_quickacks);
193 icsk->icsk_ack.pingpong = 0; 196 icsk->icsk_ack.pingpong = 0;
194 icsk->icsk_ack.ato = TCP_ATO_MIN; 197 icsk->icsk_ack.ato = TCP_ATO_MIN;
195} 198}
199EXPORT_SYMBOL(tcp_enter_quickack_mode);
196 200
197/* Send ACKs quickly, if "quick" count is not exhausted 201/* Send ACKs quickly, if "quick" count is not exhausted
198 * and the session is not interactive. 202 * and the session is not interactive.
@@ -224,8 +228,10 @@ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
224 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 228 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
225} 229}
226 230
227static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) 231static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
228{ 232{
233 struct tcp_sock *tp = tcp_sk(sk);
234
229 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { 235 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
230 case INET_ECN_NOT_ECT: 236 case INET_ECN_NOT_ECT:
231 /* Funny extension: if ECT is not set on a segment, 237 /* Funny extension: if ECT is not set on a segment,
@@ -233,31 +239,31 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
233 * it is probably a retransmit. 239 * it is probably a retransmit.
234 */ 240 */
235 if (tp->ecn_flags & TCP_ECN_SEEN) 241 if (tp->ecn_flags & TCP_ECN_SEEN)
236 tcp_enter_quickack_mode((struct sock *)tp); 242 tcp_enter_quickack_mode(sk, 2);
237 break; 243 break;
238 case INET_ECN_CE: 244 case INET_ECN_CE:
239 if (tcp_ca_needs_ecn((struct sock *)tp)) 245 if (tcp_ca_needs_ecn(sk))
240 tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE); 246 tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
241 247
242 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { 248 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
243 /* Better not delay acks, sender can have a very low cwnd */ 249 /* Better not delay acks, sender can have a very low cwnd */
244 tcp_enter_quickack_mode((struct sock *)tp); 250 tcp_enter_quickack_mode(sk, 2);
245 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 251 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
246 } 252 }
247 tp->ecn_flags |= TCP_ECN_SEEN; 253 tp->ecn_flags |= TCP_ECN_SEEN;
248 break; 254 break;
249 default: 255 default:
250 if (tcp_ca_needs_ecn((struct sock *)tp)) 256 if (tcp_ca_needs_ecn(sk))
251 tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE); 257 tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
252 tp->ecn_flags |= TCP_ECN_SEEN; 258 tp->ecn_flags |= TCP_ECN_SEEN;
253 break; 259 break;
254 } 260 }
255} 261}
256 262
257static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) 263static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
258{ 264{
259 if (tp->ecn_flags & TCP_ECN_OK) 265 if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
260 __tcp_ecn_check_ce(tp, skb); 266 __tcp_ecn_check_ce(sk, skb);
261} 267}
262 268
263static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) 269static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
@@ -556,8 +562,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
556void tcp_rcv_space_adjust(struct sock *sk) 562void tcp_rcv_space_adjust(struct sock *sk)
557{ 563{
558 struct tcp_sock *tp = tcp_sk(sk); 564 struct tcp_sock *tp = tcp_sk(sk);
565 u32 copied;
559 int time; 566 int time;
560 int copied;
561 567
562 time = tcp_time_stamp - tp->rcvq_space.time; 568 time = tcp_time_stamp - tp->rcvq_space.time;
563 if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) 569 if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
@@ -579,12 +585,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
579 585
580 if (sysctl_tcp_moderate_rcvbuf && 586 if (sysctl_tcp_moderate_rcvbuf &&
581 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 587 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
582 int rcvwin, rcvmem, rcvbuf; 588 int rcvmem, rcvbuf;
589 u64 rcvwin;
583 590
584 /* minimal window to cope with packet losses, assuming 591 /* minimal window to cope with packet losses, assuming
585 * steady state. Add some cushion because of small variations. 592 * steady state. Add some cushion because of small variations.
586 */ 593 */
587 rcvwin = (copied << 1) + 16 * tp->advmss; 594 rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
588 595
589 /* If rate increased by 25%, 596 /* If rate increased by 25%,
590 * assume slow start, rcvwin = 3 * copied 597 * assume slow start, rcvwin = 3 * copied
@@ -604,12 +611,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
604 while (tcp_win_from_space(rcvmem) < tp->advmss) 611 while (tcp_win_from_space(rcvmem) < tp->advmss)
605 rcvmem += 128; 612 rcvmem += 128;
606 613
607 rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]); 614 do_div(rcvwin, tp->advmss);
615 rcvbuf = min_t(u64, rcvwin * rcvmem, sysctl_tcp_rmem[2]);
608 if (rcvbuf > sk->sk_rcvbuf) { 616 if (rcvbuf > sk->sk_rcvbuf) {
609 sk->sk_rcvbuf = rcvbuf; 617 sk->sk_rcvbuf = rcvbuf;
610 618
611 /* Make the window clamp follow along. */ 619 /* Make the window clamp follow along. */
612 tp->window_clamp = rcvwin; 620 tp->window_clamp = tcp_win_from_space(rcvbuf);
613 } 621 }
614 } 622 }
615 tp->rcvq_space.space = copied; 623 tp->rcvq_space.space = copied;
@@ -647,7 +655,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
647 /* The _first_ data packet received, initialize 655 /* The _first_ data packet received, initialize
648 * delayed ACK engine. 656 * delayed ACK engine.
649 */ 657 */
650 tcp_incr_quickack(sk); 658 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
651 icsk->icsk_ack.ato = TCP_ATO_MIN; 659 icsk->icsk_ack.ato = TCP_ATO_MIN;
652 } else { 660 } else {
653 int m = now - icsk->icsk_ack.lrcvtime; 661 int m = now - icsk->icsk_ack.lrcvtime;
@@ -663,13 +671,13 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
663 /* Too long gap. Apparently sender failed to 671 /* Too long gap. Apparently sender failed to
664 * restart window, so that we send ACKs quickly. 672 * restart window, so that we send ACKs quickly.
665 */ 673 */
666 tcp_incr_quickack(sk); 674 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
667 sk_mem_reclaim(sk); 675 sk_mem_reclaim(sk);
668 } 676 }
669 } 677 }
670 icsk->icsk_ack.lrcvtime = now; 678 icsk->icsk_ack.lrcvtime = now;
671 679
672 tcp_ecn_check_ce(tp, skb); 680 tcp_ecn_check_ce(sk, skb);
673 681
674 if (skb->len >= 128) 682 if (skb->len >= 128)
675 tcp_grow_window(sk, skb); 683 tcp_grow_window(sk, skb);
@@ -3215,6 +3223,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3215 3223
3216 if (tcp_is_reno(tp)) { 3224 if (tcp_is_reno(tp)) {
3217 tcp_remove_reno_sacks(sk, pkts_acked); 3225 tcp_remove_reno_sacks(sk, pkts_acked);
3226
3227 /* If any of the cumulatively ACKed segments was
3228 * retransmitted, non-SACK case cannot confirm that
3229 * progress was due to original transmission due to
3230 * lack of TCPCB_SACKED_ACKED bits even if some of
3231 * the packets may have been never retransmitted.
3232 */
3233 if (flag & FLAG_RETRANS_DATA_ACKED)
3234 flag &= ~FLAG_ORIG_SACK_ACKED;
3218 } else { 3235 } else {
3219 int delta; 3236 int delta;
3220 3237
@@ -3543,7 +3560,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3543 if (before(ack, prior_snd_una)) { 3560 if (before(ack, prior_snd_una)) {
3544 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ 3561 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
3545 if (before(ack, prior_snd_una - tp->max_window)) { 3562 if (before(ack, prior_snd_una - tp->max_window)) {
3546 tcp_send_challenge_ack(sk, skb); 3563 if (!(flag & FLAG_NO_CHALLENGE_ACK))
3564 tcp_send_challenge_ack(sk, skb);
3547 return -1; 3565 return -1;
3548 } 3566 }
3549 goto old_ack; 3567 goto old_ack;
@@ -3867,11 +3885,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
3867 int length = (th->doff << 2) - sizeof(*th); 3885 int length = (th->doff << 2) - sizeof(*th);
3868 const u8 *ptr = (const u8 *)(th + 1); 3886 const u8 *ptr = (const u8 *)(th + 1);
3869 3887
3870 /* If the TCP option is too short, we can short cut */ 3888 /* If not enough data remaining, we can short cut */
3871 if (length < TCPOLEN_MD5SIG) 3889 while (length >= TCPOLEN_MD5SIG) {
3872 return NULL;
3873
3874 while (length > 0) {
3875 int opcode = *ptr++; 3890 int opcode = *ptr++;
3876 int opsize; 3891 int opsize;
3877 3892
@@ -4125,7 +4140,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
4125 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4140 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4126 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4141 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4127 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4142 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4128 tcp_enter_quickack_mode(sk); 4143 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
4129 4144
4130 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 4145 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
4131 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4146 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -4353,7 +4368,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4353 struct sk_buff *skb1; 4368 struct sk_buff *skb1;
4354 u32 seq, end_seq; 4369 u32 seq, end_seq;
4355 4370
4356 tcp_ecn_check_ce(tp, skb); 4371 tcp_ecn_check_ce(sk, skb);
4357 4372
4358 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { 4373 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
4359 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); 4374 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
@@ -4627,7 +4642,7 @@ queue_and_out:
4627 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4642 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4628 4643
4629out_of_window: 4644out_of_window:
4630 tcp_enter_quickack_mode(sk); 4645 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
4631 inet_csk_schedule_ack(sk); 4646 inet_csk_schedule_ack(sk);
4632drop: 4647drop:
4633 __kfree_skb(skb); 4648 __kfree_skb(skb);
@@ -4638,8 +4653,6 @@ drop:
4638 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 4653 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
4639 goto out_of_window; 4654 goto out_of_window;
4640 4655
4641 tcp_enter_quickack_mode(sk);
4642
4643 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4656 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4644 /* Partial packet, seq < rcv_next < end_seq */ 4657 /* Partial packet, seq < rcv_next < end_seq */
4645 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", 4658 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
@@ -4778,6 +4791,7 @@ restart:
4778static void tcp_collapse_ofo_queue(struct sock *sk) 4791static void tcp_collapse_ofo_queue(struct sock *sk)
4779{ 4792{
4780 struct tcp_sock *tp = tcp_sk(sk); 4793 struct tcp_sock *tp = tcp_sk(sk);
4794 u32 range_truesize, sum_tiny = 0;
4781 struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); 4795 struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
4782 struct sk_buff *head; 4796 struct sk_buff *head;
4783 u32 start, end; 4797 u32 start, end;
@@ -4787,6 +4801,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
4787 4801
4788 start = TCP_SKB_CB(skb)->seq; 4802 start = TCP_SKB_CB(skb)->seq;
4789 end = TCP_SKB_CB(skb)->end_seq; 4803 end = TCP_SKB_CB(skb)->end_seq;
4804 range_truesize = skb->truesize;
4790 head = skb; 4805 head = skb;
4791 4806
4792 for (;;) { 4807 for (;;) {
@@ -4801,15 +4816,26 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
4801 if (!skb || 4816 if (!skb ||
4802 after(TCP_SKB_CB(skb)->seq, end) || 4817 after(TCP_SKB_CB(skb)->seq, end) ||
4803 before(TCP_SKB_CB(skb)->end_seq, start)) { 4818 before(TCP_SKB_CB(skb)->end_seq, start)) {
4804 tcp_collapse(sk, &tp->out_of_order_queue, 4819 /* Do not attempt collapsing tiny skbs */
4805 head, skb, start, end); 4820 if (range_truesize != head->truesize ||
4821 end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
4822 tcp_collapse(sk, &tp->out_of_order_queue,
4823 head, skb, start, end);
4824 } else {
4825 sum_tiny += range_truesize;
4826 if (sum_tiny > sk->sk_rcvbuf >> 3)
4827 return;
4828 }
4829
4806 head = skb; 4830 head = skb;
4807 if (!skb) 4831 if (!skb)
4808 break; 4832 break;
4809 /* Start new segment */ 4833 /* Start new segment */
4810 start = TCP_SKB_CB(skb)->seq; 4834 start = TCP_SKB_CB(skb)->seq;
4811 end = TCP_SKB_CB(skb)->end_seq; 4835 end = TCP_SKB_CB(skb)->end_seq;
4836 range_truesize = skb->truesize;
4812 } else { 4837 } else {
4838 range_truesize += skb->truesize;
4813 if (before(TCP_SKB_CB(skb)->seq, start)) 4839 if (before(TCP_SKB_CB(skb)->seq, start))
4814 start = TCP_SKB_CB(skb)->seq; 4840 start = TCP_SKB_CB(skb)->seq;
4815 if (after(TCP_SKB_CB(skb)->end_seq, end)) 4841 if (after(TCP_SKB_CB(skb)->end_seq, end))
@@ -4864,6 +4890,9 @@ static int tcp_prune_queue(struct sock *sk)
4864 else if (tcp_under_memory_pressure(sk)) 4890 else if (tcp_under_memory_pressure(sk))
4865 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 4891 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
4866 4892
4893 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
4894 return 0;
4895
4867 tcp_collapse_ofo_queue(sk); 4896 tcp_collapse_ofo_queue(sk);
4868 if (!skb_queue_empty(&sk->sk_receive_queue)) 4897 if (!skb_queue_empty(&sk->sk_receive_queue))
4869 tcp_collapse(sk, &sk->sk_receive_queue, 4898 tcp_collapse(sk, &sk->sk_receive_queue,
@@ -5464,10 +5493,6 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5464 else 5493 else
5465 tp->pred_flags = 0; 5494 tp->pred_flags = 0;
5466 5495
5467 if (!sock_flag(sk, SOCK_DEAD)) {
5468 sk->sk_state_change(sk);
5469 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
5470 }
5471} 5496}
5472 5497
5473static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, 5498static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
@@ -5531,6 +5556,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5531 struct tcp_sock *tp = tcp_sk(sk); 5556 struct tcp_sock *tp = tcp_sk(sk);
5532 struct tcp_fastopen_cookie foc = { .len = -1 }; 5557 struct tcp_fastopen_cookie foc = { .len = -1 };
5533 int saved_clamp = tp->rx_opt.mss_clamp; 5558 int saved_clamp = tp->rx_opt.mss_clamp;
5559 bool fastopen_fail;
5534 5560
5535 tcp_parse_options(skb, &tp->rx_opt, 0, &foc); 5561 tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
5536 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 5562 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
@@ -5633,10 +5659,15 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5633 5659
5634 tcp_finish_connect(sk, skb); 5660 tcp_finish_connect(sk, skb);
5635 5661
5636 if ((tp->syn_fastopen || tp->syn_data) && 5662 fastopen_fail = (tp->syn_fastopen || tp->syn_data) &&
5637 tcp_rcv_fastopen_synack(sk, skb, &foc)) 5663 tcp_rcv_fastopen_synack(sk, skb, &foc);
5638 return -1;
5639 5664
5665 if (!sock_flag(sk, SOCK_DEAD)) {
5666 sk->sk_state_change(sk);
5667 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
5668 }
5669 if (fastopen_fail)
5670 return -1;
5640 if (sk->sk_write_pending || 5671 if (sk->sk_write_pending ||
5641 icsk->icsk_accept_queue.rskq_defer_accept || 5672 icsk->icsk_accept_queue.rskq_defer_accept ||
5642 icsk->icsk_ack.pingpong) { 5673 icsk->icsk_ack.pingpong) {
@@ -5648,7 +5679,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5648 * to stand against the temptation 8) --ANK 5679 * to stand against the temptation 8) --ANK
5649 */ 5680 */
5650 inet_csk_schedule_ack(sk); 5681 inet_csk_schedule_ack(sk);
5651 tcp_enter_quickack_mode(sk); 5682 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
5652 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5683 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
5653 TCP_DELACK_MAX, TCP_RTO_MAX); 5684 TCP_DELACK_MAX, TCP_RTO_MAX);
5654 5685
@@ -5830,13 +5861,17 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
5830 5861
5831 /* step 5: check the ACK field */ 5862 /* step 5: check the ACK field */
5832 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | 5863 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
5833 FLAG_UPDATE_TS_RECENT) > 0; 5864 FLAG_UPDATE_TS_RECENT |
5865 FLAG_NO_CHALLENGE_ACK) > 0;
5834 5866
5867 if (!acceptable) {
5868 if (sk->sk_state == TCP_SYN_RECV)
5869 return 1; /* send one RST */
5870 tcp_send_challenge_ack(sk, skb);
5871 goto discard;
5872 }
5835 switch (sk->sk_state) { 5873 switch (sk->sk_state) {
5836 case TCP_SYN_RECV: 5874 case TCP_SYN_RECV:
5837 if (!acceptable)
5838 return 1;
5839
5840 if (!tp->srtt_us) 5875 if (!tp->srtt_us)
5841 tcp_synack_rtt_meas(sk, req); 5876 tcp_synack_rtt_meas(sk, req);
5842 5877
@@ -5905,14 +5940,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
5905 * our SYNACK so stop the SYNACK timer. 5940 * our SYNACK so stop the SYNACK timer.
5906 */ 5941 */
5907 if (req) { 5942 if (req) {
5908 /* Return RST if ack_seq is invalid.
5909 * Note that RFC793 only says to generate a
5910 * DUPACK for it but for TCP Fast Open it seems
5911 * better to treat this case like TCP_SYN_RECV
5912 * above.
5913 */
5914 if (!acceptable)
5915 return 1;
5916 /* We no longer need the request sock. */ 5943 /* We no longer need the request sock. */
5917 reqsk_fastopen_remove(sk, req, false); 5944 reqsk_fastopen_remove(sk, req, false);
5918 tcp_rearm_rto(sk); 5945 tcp_rearm_rto(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 61c93a93f228..eeda67c3dd11 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1627,6 +1627,10 @@ process:
1627 reqsk_put(req); 1627 reqsk_put(req);
1628 goto discard_it; 1628 goto discard_it;
1629 } 1629 }
1630 if (tcp_checksum_complete(skb)) {
1631 reqsk_put(req);
1632 goto csum_error;
1633 }
1630 if (unlikely(sk->sk_state != TCP_LISTEN)) { 1634 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1631 inet_csk_reqsk_queue_drop_and_put(sk, req); 1635 inet_csk_reqsk_queue_drop_and_put(sk, req);
1632 goto lookup; 1636 goto lookup;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 39c2919fe0d3..6fa749ce231f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -177,8 +177,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
177} 177}
178 178
179/* Account for an ACK we sent. */ 179/* Account for an ACK we sent. */
180static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 180static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
181 u32 rcv_nxt)
181{ 182{
183 struct tcp_sock *tp = tcp_sk(sk);
184
185 if (unlikely(rcv_nxt != tp->rcv_nxt))
186 return; /* Special ACK sent by DCTCP to reflect ECN */
182 tcp_dec_quickack_mode(sk, pkts); 187 tcp_dec_quickack_mode(sk, pkts);
183 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 188 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
184} 189}
@@ -901,8 +906,8 @@ out:
901 * We are working here with either a clone of the original 906 * We are working here with either a clone of the original
902 * SKB, or a fresh unique copy made by the retransmit engine. 907 * SKB, or a fresh unique copy made by the retransmit engine.
903 */ 908 */
904static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 909static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
905 gfp_t gfp_mask) 910 int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
906{ 911{
907 const struct inet_connection_sock *icsk = inet_csk(sk); 912 const struct inet_connection_sock *icsk = inet_csk(sk);
908 struct inet_sock *inet; 913 struct inet_sock *inet;
@@ -962,7 +967,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
962 th->source = inet->inet_sport; 967 th->source = inet->inet_sport;
963 th->dest = inet->inet_dport; 968 th->dest = inet->inet_dport;
964 th->seq = htonl(tcb->seq); 969 th->seq = htonl(tcb->seq);
965 th->ack_seq = htonl(tp->rcv_nxt); 970 th->ack_seq = htonl(rcv_nxt);
966 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 971 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
967 tcb->tcp_flags); 972 tcb->tcp_flags);
968 973
@@ -1005,7 +1010,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1005 icsk->icsk_af_ops->send_check(sk, skb); 1010 icsk->icsk_af_ops->send_check(sk, skb);
1006 1011
1007 if (likely(tcb->tcp_flags & TCPHDR_ACK)) 1012 if (likely(tcb->tcp_flags & TCPHDR_ACK))
1008 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 1013 tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
1009 1014
1010 if (skb->len != tcp_header_size) 1015 if (skb->len != tcp_header_size)
1011 tcp_event_data_sent(tp, sk); 1016 tcp_event_data_sent(tp, sk);
@@ -1036,6 +1041,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1036 return net_xmit_eval(err); 1041 return net_xmit_eval(err);
1037} 1042}
1038 1043
1044static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1045 gfp_t gfp_mask)
1046{
1047 return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
1048 tcp_sk(sk)->rcv_nxt);
1049}
1050
1039/* This routine just queues the buffer for sending. 1051/* This routine just queues the buffer for sending.
1040 * 1052 *
1041 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 1053 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
@@ -2587,8 +2599,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2587 return -EBUSY; 2599 return -EBUSY;
2588 2600
2589 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 2601 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2590 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 2602 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
2591 BUG(); 2603 WARN_ON_ONCE(1);
2604 return -EINVAL;
2605 }
2592 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2606 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
2593 return -ENOMEM; 2607 return -ENOMEM;
2594 } 2608 }
@@ -3117,6 +3131,7 @@ static void tcp_connect_init(struct sock *sk)
3117 sock_reset_flag(sk, SOCK_DONE); 3131 sock_reset_flag(sk, SOCK_DONE);
3118 tp->snd_wnd = 0; 3132 tp->snd_wnd = 0;
3119 tcp_init_wl(tp, 0); 3133 tcp_init_wl(tp, 0);
3134 tcp_write_queue_purge(sk);
3120 tp->snd_una = tp->write_seq; 3135 tp->snd_una = tp->write_seq;
3121 tp->snd_sml = tp->write_seq; 3136 tp->snd_sml = tp->write_seq;
3122 tp->snd_up = tp->write_seq; 3137 tp->snd_up = tp->write_seq;
@@ -3351,7 +3366,7 @@ void tcp_send_delayed_ack(struct sock *sk)
3351} 3366}
3352 3367
3353/* This routine sends an ack and also updates the window. */ 3368/* This routine sends an ack and also updates the window. */
3354void tcp_send_ack(struct sock *sk) 3369void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
3355{ 3370{
3356 struct sk_buff *buff; 3371 struct sk_buff *buff;
3357 3372
@@ -3388,9 +3403,14 @@ void tcp_send_ack(struct sock *sk)
3388 3403
3389 /* Send it off, this clears delayed acks for us. */ 3404 /* Send it off, this clears delayed acks for us. */
3390 skb_mstamp_get(&buff->skb_mstamp); 3405 skb_mstamp_get(&buff->skb_mstamp);
3391 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); 3406 __tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC), rcv_nxt);
3407}
3408EXPORT_SYMBOL_GPL(__tcp_send_ack);
3409
3410void tcp_send_ack(struct sock *sk)
3411{
3412 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
3392} 3413}
3393EXPORT_SYMBOL_GPL(tcp_send_ack);
3394 3414
3395/* This routine sends a packet with an out of date sequence 3415/* This routine sends a packet with an out of date sequence
3396 * number. It assumes the other end will try to ack it. 3416 * number. It assumes the other end will try to ack it.
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 1ec12a4f327e..35f638cfc675 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -46,11 +46,19 @@ static void tcp_write_err(struct sock *sk)
46 * to prevent DoS attacks. It is called when a retransmission timeout 46 * to prevent DoS attacks. It is called when a retransmission timeout
47 * or zero probe timeout occurs on orphaned socket. 47 * or zero probe timeout occurs on orphaned socket.
48 * 48 *
49 * Also close if our net namespace is exiting; in that case there is no
50 * hope of ever communicating again since all netns interfaces are already
51 * down (or about to be down), and we need to release our dst references,
52 * which have been moved to the netns loopback interface, so the namespace
53 * can finish exiting. This condition is only possible if we are a kernel
54 * socket, as those do not hold references to the namespace.
55 *
49 * Criteria is still not confirmed experimentally and may change. 56 * Criteria is still not confirmed experimentally and may change.
50 * We kill the socket, if: 57 * We kill the socket, if:
51 * 1. If number of orphaned sockets exceeds an administratively configured 58 * 1. If number of orphaned sockets exceeds an administratively configured
52 * limit. 59 * limit.
53 * 2. If we have strong memory pressure. 60 * 2. If we have strong memory pressure.
61 * 3. If our net namespace is exiting.
54 */ 62 */
55static int tcp_out_of_resources(struct sock *sk, bool do_reset) 63static int tcp_out_of_resources(struct sock *sk, bool do_reset)
56{ 64{
@@ -79,6 +87,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
79 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); 87 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
80 return 1; 88 return 1;
81 } 89 }
90
91 if (!check_net(sock_net(sk))) {
92 /* Not possible to send reset; just close */
93 tcp_done(sk);
94 return 1;
95 }
96
82 return 0; 97 return 0;
83} 98}
84 99
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 301e60829c7e..6f929689fd03 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -991,8 +991,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
991 ipc.addr = faddr = daddr; 991 ipc.addr = faddr = daddr;
992 992
993 if (ipc.opt && ipc.opt->opt.srr) { 993 if (ipc.opt && ipc.opt->opt.srr) {
994 if (!daddr) 994 if (!daddr) {
995 return -EINVAL; 995 err = -EINVAL;
996 goto out_free;
997 }
996 faddr = ipc.opt->opt.faddr; 998 faddr = ipc.opt->opt.faddr;
997 connected = 0; 999 connected = 0;
998 } 1000 }
@@ -1105,6 +1107,7 @@ do_append_data:
1105 1107
1106out: 1108out:
1107 ip_rt_put(rt); 1109 ip_rt_put(rt);
1110out_free:
1108 if (free) 1111 if (free)
1109 kfree(ipc.opt); 1112 kfree(ipc.opt);
1110 if (!err) 1113 if (!err)
@@ -1744,6 +1747,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1744 err = udplite_checksum_init(skb, uh); 1747 err = udplite_checksum_init(skb, uh);
1745 if (err) 1748 if (err)
1746 return err; 1749 return err;
1750
1751 if (UDP_SKB_CB(skb)->partial_cov) {
1752 skb->csum = inet_compute_pseudo(skb, proto);
1753 return 0;
1754 }
1747 } 1755 }
1748 1756
1749 return skb_checksum_init_zero_check(skb, proto, uh->check, 1757 return skb_checksum_init_zero_check(skb, proto, uh->check,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 7b0edb37a115..fddae0164b91 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -97,6 +97,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
97 xdst->u.rt.rt_gateway = rt->rt_gateway; 97 xdst->u.rt.rt_gateway = rt->rt_gateway;
98 xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; 98 xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
99 xdst->u.rt.rt_pmtu = rt->rt_pmtu; 99 xdst->u.rt.rt_pmtu = rt->rt_pmtu;
100 xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
100 xdst->u.rt.rt_table_id = rt->rt_table_id; 101 xdst->u.rt.rt_table_id = rt->rt_table_id;
101 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); 102 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
102 103
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 983bb999738c..0f50248bad17 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -69,6 +69,7 @@ config INET6_ESP
69 select CRYPTO_CBC 69 select CRYPTO_CBC
70 select CRYPTO_SHA1 70 select CRYPTO_SHA1
71 select CRYPTO_DES 71 select CRYPTO_DES
72 select CRYPTO_ECHAINIV
72 ---help--- 73 ---help---
73 Support for IPsec ESP. 74 Support for IPsec ESP.
74 75
@@ -205,6 +206,7 @@ config IPV6_NDISC_NODETYPE
205config IPV6_TUNNEL 206config IPV6_TUNNEL
206 tristate "IPv6: IP-in-IPv6 tunnel (RFC2473)" 207 tristate "IPv6: IP-in-IPv6 tunnel (RFC2473)"
207 select INET6_TUNNEL 208 select INET6_TUNNEL
209 select DST_CACHE
208 ---help--- 210 ---help---
209 Support for IPv6-in-IPv6 and IPv4-in-IPv6 tunnels described in 211 Support for IPv6-in-IPv6 and IPv4-in-IPv6 tunnels described in
210 RFC 2473. 212 RFC 2473.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 92174881844d..0613be57513e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -957,7 +957,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
957 INIT_HLIST_NODE(&ifa->addr_lst); 957 INIT_HLIST_NODE(&ifa->addr_lst);
958 ifa->scope = scope; 958 ifa->scope = scope;
959 ifa->prefix_len = pfxlen; 959 ifa->prefix_len = pfxlen;
960 ifa->flags = flags | IFA_F_TENTATIVE; 960 ifa->flags = flags;
961 /* No need to add the TENTATIVE flag for addresses with NODAD */
962 if (!(flags & IFA_F_NODAD))
963 ifa->flags |= IFA_F_TENTATIVE;
961 ifa->valid_lft = valid_lft; 964 ifa->valid_lft = valid_lft;
962 ifa->prefered_lft = prefered_lft; 965 ifa->prefered_lft = prefered_lft;
963 ifa->cstamp = ifa->tstamp = jiffies; 966 ifa->cstamp = ifa->tstamp = jiffies;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 0630a4d5daaa..0edc44cb254e 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -423,7 +423,9 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
423 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 423 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
424 424
425 sg_init_table(sg, nfrags + sglists); 425 sg_init_table(sg, nfrags + sglists);
426 skb_to_sgvec_nomark(skb, sg, 0, skb->len); 426 err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
427 if (unlikely(err < 0))
428 goto out_free;
427 429
428 if (x->props.flags & XFRM_STATE_ESN) { 430 if (x->props.flags & XFRM_STATE_ESN) {
429 /* Attach seqhi sg right after packet payload */ 431 /* Attach seqhi sg right after packet payload */
@@ -603,7 +605,9 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
603 ip6h->hop_limit = 0; 605 ip6h->hop_limit = 0;
604 606
605 sg_init_table(sg, nfrags + sglists); 607 sg_init_table(sg, nfrags + sglists);
606 skb_to_sgvec_nomark(skb, sg, 0, skb->len); 608 err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
609 if (unlikely(err < 0))
610 goto out_free;
607 611
608 if (x->props.flags & XFRM_STATE_ESN) { 612 if (x->props.flags & XFRM_STATE_ESN) {
609 /* Attach seqhi sg right after packet payload */ 613 /* Attach seqhi sg right after packet payload */
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index cae37bfd12ab..9f6e57ded338 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -657,13 +657,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
657 } 657 }
658 if (np->rxopt.bits.rxorigdstaddr) { 658 if (np->rxopt.bits.rxorigdstaddr) {
659 struct sockaddr_in6 sin6; 659 struct sockaddr_in6 sin6;
660 __be16 *ports = (__be16 *) skb_transport_header(skb); 660 __be16 *ports;
661 int end;
661 662
662 if (skb_transport_offset(skb) + 4 <= skb->len) { 663 end = skb_transport_offset(skb) + 4;
664 if (end <= 0 || pskb_may_pull(skb, end)) {
663 /* All current transport protocols have the port numbers in the 665 /* All current transport protocols have the port numbers in the
664 * first four bytes of the transport header and this function is 666 * first four bytes of the transport header and this function is
665 * written with this assumption in mind. 667 * written with this assumption in mind.
666 */ 668 */
669 ports = (__be16 *)skb_transport_header(skb);
667 670
668 sin6.sin6_family = AF_INET6; 671 sin6.sin6_family = AF_INET6;
669 sin6.sin6_addr = ipv6_hdr(skb)->daddr; 672 sin6.sin6_addr = ipv6_hdr(skb)->daddr;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 111ba55fd512..6a924be66e37 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -248,9 +248,11 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
248 esph->spi = x->id.spi; 248 esph->spi = x->id.spi;
249 249
250 sg_init_table(sg, nfrags); 250 sg_init_table(sg, nfrags);
251 skb_to_sgvec(skb, sg, 251 err = skb_to_sgvec(skb, sg,
252 (unsigned char *)esph - skb->data, 252 (unsigned char *)esph - skb->data,
253 assoclen + ivlen + clen + alen); 253 assoclen + ivlen + clen + alen);
254 if (unlikely(err < 0))
255 goto error;
254 256
255 aead_request_set_crypt(req, sg, sg, ivlen + clen, iv); 257 aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
256 aead_request_set_ad(req, assoclen); 258 aead_request_set_ad(req, assoclen);
@@ -423,7 +425,9 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
423 } 425 }
424 426
425 sg_init_table(sg, nfrags); 427 sg_init_table(sg, nfrags);
426 skb_to_sgvec(skb, sg, 0, skb->len); 428 ret = skb_to_sgvec(skb, sg, 0, skb->len);
429 if (unlikely(ret < 0))
430 goto out;
427 431
428 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); 432 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
429 aead_request_set_ad(req, assoclen); 433 aead_request_set_ad(req, assoclen);
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
index 9a4d7322fb22..391a8fedb27e 100644
--- a/net/ipv6/ip6_checksum.c
+++ b/net/ipv6/ip6_checksum.c
@@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
73 err = udplite_checksum_init(skb, uh); 73 err = udplite_checksum_init(skb, uh);
74 if (err) 74 if (err)
75 return err; 75 return err;
76
77 if (UDP_SKB_CB(skb)->partial_cov) {
78 skb->csum = ip6_compute_pseudo(skb, proto);
79 return 0;
80 }
76 } 81 }
77 82
78 /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels) 83 /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c878cbf65485..b25f4ad28b03 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -320,11 +320,13 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
320 if (t || !create) 320 if (t || !create)
321 return t; 321 return t;
322 322
323 if (parms->name[0]) 323 if (parms->name[0]) {
324 if (!dev_valid_name(parms->name))
325 return NULL;
324 strlcpy(name, parms->name, IFNAMSIZ); 326 strlcpy(name, parms->name, IFNAMSIZ);
325 else 327 } else {
326 strcpy(name, "ip6gre%d"); 328 strcpy(name, "ip6gre%d");
327 329 }
328 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 330 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
329 ip6gre_tunnel_setup); 331 ip6gre_tunnel_setup);
330 if (!dev) 332 if (!dev)
@@ -362,7 +364,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
362 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 364 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
363 365
364 ip6gre_tunnel_unlink(ign, t); 366 ip6gre_tunnel_unlink(ign, t);
365 ip6_tnl_dst_reset(t); 367 dst_cache_reset(&t->dst_cache);
366 dev_put(dev); 368 dev_put(dev);
367} 369}
368 370
@@ -640,7 +642,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
640 } 642 }
641 643
642 if (!fl6->flowi6_mark) 644 if (!fl6->flowi6_mark)
643 dst = ip6_tnl_dst_get(tunnel); 645 dst = dst_cache_get(&tunnel->dst_cache);
644 646
645 if (!dst) { 647 if (!dst) {
646 dst = ip6_route_output(net, NULL, fl6); 648 dst = ip6_route_output(net, NULL, fl6);
@@ -709,7 +711,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
709 } 711 }
710 712
711 if (!fl6->flowi6_mark && ndst) 713 if (!fl6->flowi6_mark && ndst)
712 ip6_tnl_dst_set(tunnel, ndst); 714 dst_cache_set_ip6(&tunnel->dst_cache, ndst, &fl6->saddr);
713 skb_dst_set(skb, dst); 715 skb_dst_set(skb, dst);
714 716
715 proto = NEXTHDR_GRE; 717 proto = NEXTHDR_GRE;
@@ -1017,7 +1019,7 @@ static int ip6gre_tnl_change(struct ip6_tnl *t,
1017 t->parms.o_key = p->o_key; 1019 t->parms.o_key = p->o_key;
1018 t->parms.i_flags = p->i_flags; 1020 t->parms.i_flags = p->i_flags;
1019 t->parms.o_flags = p->o_flags; 1021 t->parms.o_flags = p->o_flags;
1020 ip6_tnl_dst_reset(t); 1022 dst_cache_reset(&t->dst_cache);
1021 ip6gre_tnl_link_config(t, set_mtu); 1023 ip6gre_tnl_link_config(t, set_mtu);
1022 return 0; 1024 return 0;
1023} 1025}
@@ -1228,7 +1230,7 @@ static void ip6gre_dev_free(struct net_device *dev)
1228{ 1230{
1229 struct ip6_tnl *t = netdev_priv(dev); 1231 struct ip6_tnl *t = netdev_priv(dev);
1230 1232
1231 ip6_tnl_dst_destroy(t); 1233 dst_cache_destroy(&t->dst_cache);
1232 free_percpu(dev->tstats); 1234 free_percpu(dev->tstats);
1233 free_netdev(dev); 1235 free_netdev(dev);
1234} 1236}
@@ -1266,7 +1268,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
1266 if (!dev->tstats) 1268 if (!dev->tstats)
1267 return -ENOMEM; 1269 return -ENOMEM;
1268 1270
1269 ret = ip6_tnl_dst_init(tunnel); 1271 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1270 if (ret) { 1272 if (ret) {
1271 free_percpu(dev->tstats); 1273 free_percpu(dev->tstats);
1272 dev->tstats = NULL; 1274 dev->tstats = NULL;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index b809958f7388..0feede45bd28 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -148,7 +148,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
148 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); 148 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
149} 149}
150 150
151static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np) 151bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
152{ 152{
153 if (!np->autoflowlabel_set) 153 if (!np->autoflowlabel_set)
154 return ip6_default_np_autolabel(net); 154 return ip6_default_np_autolabel(net);
@@ -340,6 +340,10 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
340static inline int ip6_forward_finish(struct net *net, struct sock *sk, 340static inline int ip6_forward_finish(struct net *net, struct sock *sk,
341 struct sk_buff *skb) 341 struct sk_buff *skb)
342{ 342{
343 struct dst_entry *dst = skb_dst(skb);
344
345 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
346 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
343 skb_sender_cpu_clear(skb); 347 skb_sender_cpu_clear(skb);
344 return dst_output(net, sk, skb); 348 return dst_output(net, sk, skb);
345} 349}
@@ -534,8 +538,6 @@ int ip6_forward(struct sk_buff *skb)
534 538
535 hdr->hop_limit--; 539 hdr->hop_limit--;
536 540
537 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
538 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
539 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, 541 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
540 net, NULL, skb, skb->dev, dst->dev, 542 net, NULL, skb, skb->dev, dst->dev,
541 ip6_forward_finish); 543 ip6_forward_finish);
@@ -557,6 +559,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
557 to->dev = from->dev; 559 to->dev = from->dev;
558 to->mark = from->mark; 560 to->mark = from->mark;
559 561
562 skb_copy_hash(to, from);
563
560#ifdef CONFIG_NET_SCHED 564#ifdef CONFIG_NET_SCHED
561 to->tc_index = from->tc_index; 565 to->tc_index = from->tc_index;
562#endif 566#endif
@@ -1246,14 +1250,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1246 v6_cork->tclass = tclass; 1250 v6_cork->tclass = tclass;
1247 if (rt->dst.flags & DST_XFRM_TUNNEL) 1251 if (rt->dst.flags & DST_XFRM_TUNNEL)
1248 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? 1252 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1249 rt->dst.dev->mtu : dst_mtu(&rt->dst); 1253 READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
1250 else 1254 else
1251 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? 1255 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1252 rt->dst.dev->mtu : dst_mtu(rt->dst.path); 1256 READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
1253 if (np->frag_size < mtu) { 1257 if (np->frag_size < mtu) {
1254 if (np->frag_size) 1258 if (np->frag_size)
1255 mtu = np->frag_size; 1259 mtu = np->frag_size;
1256 } 1260 }
1261 if (mtu < IPV6_MIN_MTU)
1262 return -EINVAL;
1257 cork->base.fragsize = mtu; 1263 cork->base.fragsize = mtu;
1258 if (dst_allfrag(rt->dst.path)) 1264 if (dst_allfrag(rt->dst.path))
1259 cork->base.flags |= IPCORK_ALLFRAG; 1265 cork->base.flags |= IPCORK_ALLFRAG;
@@ -1274,7 +1280,7 @@ static int __ip6_append_data(struct sock *sk,
1274 unsigned int flags, int dontfrag) 1280 unsigned int flags, int dontfrag)
1275{ 1281{
1276 struct sk_buff *skb, *skb_prev = NULL; 1282 struct sk_buff *skb, *skb_prev = NULL;
1277 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; 1283 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
1278 int exthdrlen = 0; 1284 int exthdrlen = 0;
1279 int dst_exthdrlen = 0; 1285 int dst_exthdrlen = 0;
1280 int hh_len; 1286 int hh_len;
@@ -1310,6 +1316,12 @@ static int __ip6_append_data(struct sock *sk,
1310 sizeof(struct frag_hdr) : 0) + 1316 sizeof(struct frag_hdr) : 0) +
1311 rt->rt6i_nfheader_len; 1317 rt->rt6i_nfheader_len;
1312 1318
1319 /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
1320 * the first fragment
1321 */
1322 if (headersize + transhdrlen > mtu)
1323 goto emsgsize;
1324
1313 if (cork->length + length > mtu - headersize && dontfrag && 1325 if (cork->length + length > mtu - headersize && dontfrag &&
1314 (sk->sk_protocol == IPPROTO_UDP || 1326 (sk->sk_protocol == IPPROTO_UDP ||
1315 sk->sk_protocol == IPPROTO_RAW)) { 1327 sk->sk_protocol == IPPROTO_RAW)) {
@@ -1325,9 +1337,8 @@ static int __ip6_append_data(struct sock *sk,
1325 1337
1326 if (cork->length + length > maxnonfragsize - headersize) { 1338 if (cork->length + length > maxnonfragsize - headersize) {
1327emsgsize: 1339emsgsize:
1328 ipv6_local_error(sk, EMSGSIZE, fl6, 1340 pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
1329 mtu - headersize + 1341 ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
1330 sizeof(struct ipv6hdr));
1331 return -EMSGSIZE; 1342 return -EMSGSIZE;
1332 } 1343 }
1333 1344
@@ -1520,7 +1531,8 @@ alloc_new_skb:
1520 if (copy > length) 1531 if (copy > length)
1521 copy = length; 1532 copy = length;
1522 1533
1523 if (!(rt->dst.dev->features&NETIF_F_SG)) { 1534 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1535 skb_tailroom(skb) >= copy) {
1524 unsigned int off; 1536 unsigned int off;
1525 1537
1526 off = skb->len; 1538 off = skb->len;
@@ -1783,6 +1795,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
1783 cork.base.flags = 0; 1795 cork.base.flags = 0;
1784 cork.base.addr = 0; 1796 cork.base.addr = 0;
1785 cork.base.opt = NULL; 1797 cork.base.opt = NULL;
1798 cork.base.dst = NULL;
1786 v6_cork.opt = NULL; 1799 v6_cork.opt = NULL;
1787 err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6); 1800 err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
1788 if (err) { 1801 if (err) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a7170a23ab0b..e8f21dd520b2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -122,97 +122,6 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
122 return &dev->stats; 122 return &dev->stats;
123} 123}
124 124
125/*
126 * Locking : hash tables are protected by RCU and RTNL
127 */
128
129static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
130 struct dst_entry *dst)
131{
132 write_seqlock_bh(&idst->lock);
133 dst_release(rcu_dereference_protected(
134 idst->dst,
135 lockdep_is_held(&idst->lock.lock)));
136 if (dst) {
137 dst_hold(dst);
138 idst->cookie = rt6_get_cookie((struct rt6_info *)dst);
139 } else {
140 idst->cookie = 0;
141 }
142 rcu_assign_pointer(idst->dst, dst);
143 write_sequnlock_bh(&idst->lock);
144}
145
146struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
147{
148 struct ip6_tnl_dst *idst;
149 struct dst_entry *dst;
150 unsigned int seq;
151 u32 cookie;
152
153 idst = raw_cpu_ptr(t->dst_cache);
154
155 rcu_read_lock();
156 do {
157 seq = read_seqbegin(&idst->lock);
158 dst = rcu_dereference(idst->dst);
159 cookie = idst->cookie;
160 } while (read_seqretry(&idst->lock, seq));
161
162 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
163 dst = NULL;
164 rcu_read_unlock();
165
166 if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) {
167 ip6_tnl_per_cpu_dst_set(idst, NULL);
168 dst_release(dst);
169 dst = NULL;
170 }
171 return dst;
172}
173EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
174
175void ip6_tnl_dst_reset(struct ip6_tnl *t)
176{
177 int i;
178
179 for_each_possible_cpu(i)
180 ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
181}
182EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
183
184void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst)
185{
186 ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), dst);
187
188}
189EXPORT_SYMBOL_GPL(ip6_tnl_dst_set);
190
191void ip6_tnl_dst_destroy(struct ip6_tnl *t)
192{
193 if (!t->dst_cache)
194 return;
195
196 ip6_tnl_dst_reset(t);
197 free_percpu(t->dst_cache);
198}
199EXPORT_SYMBOL_GPL(ip6_tnl_dst_destroy);
200
201int ip6_tnl_dst_init(struct ip6_tnl *t)
202{
203 int i;
204
205 t->dst_cache = alloc_percpu(struct ip6_tnl_dst);
206 if (!t->dst_cache)
207 return -ENOMEM;
208
209 for_each_possible_cpu(i)
210 seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
211
212 return 0;
213}
214EXPORT_SYMBOL_GPL(ip6_tnl_dst_init);
215
216/** 125/**
217 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 126 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
218 * @remote: the address of the tunnel exit-point 127 * @remote: the address of the tunnel exit-point
@@ -331,7 +240,7 @@ static void ip6_dev_free(struct net_device *dev)
331{ 240{
332 struct ip6_tnl *t = netdev_priv(dev); 241 struct ip6_tnl *t = netdev_priv(dev);
333 242
334 ip6_tnl_dst_destroy(t); 243 dst_cache_destroy(&t->dst_cache);
335 free_percpu(dev->tstats); 244 free_percpu(dev->tstats);
336 free_netdev(dev); 245 free_netdev(dev);
337} 246}
@@ -377,13 +286,16 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
377 struct net_device *dev; 286 struct net_device *dev;
378 struct ip6_tnl *t; 287 struct ip6_tnl *t;
379 char name[IFNAMSIZ]; 288 char name[IFNAMSIZ];
380 int err = -ENOMEM; 289 int err = -E2BIG;
381 290
382 if (p->name[0]) 291 if (p->name[0]) {
292 if (!dev_valid_name(p->name))
293 goto failed;
383 strlcpy(name, p->name, IFNAMSIZ); 294 strlcpy(name, p->name, IFNAMSIZ);
384 else 295 } else {
385 sprintf(name, "ip6tnl%%d"); 296 sprintf(name, "ip6tnl%%d");
386 297 }
298 err = -ENOMEM;
387 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 299 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
388 ip6_tnl_dev_setup); 300 ip6_tnl_dev_setup);
389 if (!dev) 301 if (!dev)
@@ -464,7 +376,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
464 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); 376 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
465 else 377 else
466 ip6_tnl_unlink(ip6n, t); 378 ip6_tnl_unlink(ip6n, t);
467 ip6_tnl_dst_reset(t); 379 dst_cache_reset(&t->dst_cache);
468 dev_put(dev); 380 dev_put(dev);
469} 381}
470 382
@@ -1053,7 +965,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1053 struct ipv6_tel_txoption opt; 965 struct ipv6_tel_txoption opt;
1054 struct dst_entry *dst = NULL, *ndst = NULL; 966 struct dst_entry *dst = NULL, *ndst = NULL;
1055 struct net_device *tdev; 967 struct net_device *tdev;
1056 bool use_cache = false;
1057 int mtu; 968 int mtu;
1058 unsigned int max_headroom = sizeof(struct ipv6hdr); 969 unsigned int max_headroom = sizeof(struct ipv6hdr);
1059 u8 proto; 970 u8 proto;
@@ -1061,39 +972,28 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1061 972
1062 /* NBMA tunnel */ 973 /* NBMA tunnel */
1063 if (ipv6_addr_any(&t->parms.raddr)) { 974 if (ipv6_addr_any(&t->parms.raddr)) {
1064 if (skb->protocol == htons(ETH_P_IPV6)) { 975 struct in6_addr *addr6;
1065 struct in6_addr *addr6; 976 struct neighbour *neigh;
1066 struct neighbour *neigh; 977 int addr_type;
1067 int addr_type;
1068 978
1069 if (!skb_dst(skb)) 979 if (!skb_dst(skb))
1070 goto tx_err_link_failure; 980 goto tx_err_link_failure;
1071
1072 neigh = dst_neigh_lookup(skb_dst(skb),
1073 &ipv6_hdr(skb)->daddr);
1074 if (!neigh)
1075 goto tx_err_link_failure;
1076 981
1077 addr6 = (struct in6_addr *)&neigh->primary_key; 982 neigh = dst_neigh_lookup(skb_dst(skb),
1078 addr_type = ipv6_addr_type(addr6); 983 &ipv6_hdr(skb)->daddr);
984 if (!neigh)
985 goto tx_err_link_failure;
1079 986
1080 if (addr_type == IPV6_ADDR_ANY) 987 addr6 = (struct in6_addr *)&neigh->primary_key;
1081 addr6 = &ipv6_hdr(skb)->daddr; 988 addr_type = ipv6_addr_type(addr6);
1082 989
1083 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 990 if (addr_type == IPV6_ADDR_ANY)
1084 neigh_release(neigh); 991 addr6 = &ipv6_hdr(skb)->daddr;
1085 }
1086 } else if (t->parms.proto != 0 && !(t->parms.flags &
1087 (IP6_TNL_F_USE_ORIG_TCLASS |
1088 IP6_TNL_F_USE_ORIG_FWMARK))) {
1089 /* enable the cache only if neither the outer protocol nor the
1090 * routing decision depends on the current inner header value
1091 */
1092 use_cache = true;
1093 }
1094 992
1095 if (use_cache) 993 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1096 dst = ip6_tnl_dst_get(t); 994 neigh_release(neigh);
995 } else if (!fl6->flowi6_mark)
996 dst = dst_cache_get(&t->dst_cache);
1097 997
1098 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) 998 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1099 goto tx_err_link_failure; 999 goto tx_err_link_failure;
@@ -1156,8 +1056,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1156 skb = new_skb; 1056 skb = new_skb;
1157 } 1057 }
1158 1058
1159 if (use_cache && ndst) 1059 if (!fl6->flowi6_mark && ndst)
1160 ip6_tnl_dst_set(t, ndst); 1060 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1161 skb_dst_set(skb, dst); 1061 skb_dst_set(skb, dst);
1162 1062
1163 skb->transport_header = skb->network_header; 1063 skb->transport_header = skb->network_header;
@@ -1392,7 +1292,7 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1392 t->parms.flowinfo = p->flowinfo; 1292 t->parms.flowinfo = p->flowinfo;
1393 t->parms.link = p->link; 1293 t->parms.link = p->link;
1394 t->parms.proto = p->proto; 1294 t->parms.proto = p->proto;
1395 ip6_tnl_dst_reset(t); 1295 dst_cache_reset(&t->dst_cache);
1396 ip6_tnl_link_config(t); 1296 ip6_tnl_link_config(t);
1397 return 0; 1297 return 0;
1398} 1298}
@@ -1663,7 +1563,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
1663 if (!dev->tstats) 1563 if (!dev->tstats)
1664 return -ENOMEM; 1564 return -ENOMEM;
1665 1565
1666 ret = ip6_tnl_dst_init(t); 1566 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1667 if (ret) { 1567 if (ret) {
1668 free_percpu(dev->tstats); 1568 free_percpu(dev->tstats);
1669 dev->tstats = NULL; 1569 dev->tstats = NULL;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 24dfc2de0165..40bb7a5e6d47 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -212,10 +212,13 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
212 char name[IFNAMSIZ]; 212 char name[IFNAMSIZ];
213 int err; 213 int err;
214 214
215 if (p->name[0]) 215 if (p->name[0]) {
216 if (!dev_valid_name(p->name))
217 goto failed;
216 strlcpy(name, p->name, IFNAMSIZ); 218 strlcpy(name, p->name, IFNAMSIZ);
217 else 219 } else {
218 sprintf(name, "ip6_vti%%d"); 220 sprintf(name, "ip6_vti%%d");
221 }
219 222
220 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup); 223 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
221 if (!dev) 224 if (!dev)
@@ -645,7 +648,7 @@ vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
645 t->parms.i_key = p->i_key; 648 t->parms.i_key = p->i_key;
646 t->parms.o_key = p->o_key; 649 t->parms.o_key = p->o_key;
647 t->parms.proto = p->proto; 650 t->parms.proto = p->proto;
648 ip6_tnl_dst_reset(t); 651 dst_cache_reset(&t->dst_cache);
649 vti6_link_config(t); 652 vti6_link_config(t);
650 return 0; 653 return 0;
651} 654}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 8361d73ab653..9b92960f024d 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -495,6 +495,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
495 return ERR_PTR(-ENOENT); 495 return ERR_PTR(-ENOENT);
496 496
497 it->mrt = mrt; 497 it->mrt = mrt;
498 it->cache = NULL;
498 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) 499 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
499 : SEQ_START_TOKEN; 500 : SEQ_START_TOKEN;
500} 501}
@@ -1786,7 +1787,8 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1786 ret = 0; 1787 ret = 0;
1787 if (!ip6mr_new_table(net, v)) 1788 if (!ip6mr_new_table(net, v))
1788 ret = -ENOMEM; 1789 ret = -ENOMEM;
1789 raw6_sk(sk)->ip6mr_table = v; 1790 else
1791 raw6_sk(sk)->ip6mr_table = v;
1790 rtnl_unlock(); 1792 rtnl_unlock();
1791 return ret; 1793 return ret;
1792 } 1794 }
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 435e26210587..8d11a034ca3f 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -905,12 +905,8 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
905#ifdef CONFIG_NETFILTER 905#ifdef CONFIG_NETFILTER
906 /* we need to exclude all possible ENOPROTOOPTs except default case */ 906 /* we need to exclude all possible ENOPROTOOPTs except default case */
907 if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && 907 if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
908 optname != IPV6_XFRM_POLICY) { 908 optname != IPV6_XFRM_POLICY)
909 lock_sock(sk); 909 err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen);
910 err = nf_setsockopt(sk, PF_INET6, optname, optval,
911 optlen);
912 release_sock(sk);
913 }
914#endif 910#endif
915 return err; 911 return err;
916} 912}
@@ -940,12 +936,9 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
940#ifdef CONFIG_NETFILTER 936#ifdef CONFIG_NETFILTER
941 /* we need to exclude all possible ENOPROTOOPTs except default case */ 937 /* we need to exclude all possible ENOPROTOOPTs except default case */
942 if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && 938 if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
943 optname != IPV6_XFRM_POLICY) { 939 optname != IPV6_XFRM_POLICY)
944 lock_sock(sk); 940 err = compat_nf_setsockopt(sk, PF_INET6, optname, optval,
945 err = compat_nf_setsockopt(sk, PF_INET6, optname, 941 optlen);
946 optval, optlen);
947 release_sock(sk);
948 }
949#endif 942#endif
950 return err; 943 return err;
951} 944}
@@ -1313,7 +1306,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1313 break; 1306 break;
1314 1307
1315 case IPV6_AUTOFLOWLABEL: 1308 case IPV6_AUTOFLOWLABEL:
1316 val = np->autoflowlabel; 1309 val = ip6_autoflowlabel(sock_net(sk), np);
1317 break; 1310 break;
1318 1311
1319 default: 1312 default:
@@ -1347,10 +1340,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
1347 if (get_user(len, optlen)) 1340 if (get_user(len, optlen))
1348 return -EFAULT; 1341 return -EFAULT;
1349 1342
1350 lock_sock(sk); 1343 err = nf_getsockopt(sk, PF_INET6, optname, optval, &len);
1351 err = nf_getsockopt(sk, PF_INET6, optname, optval,
1352 &len);
1353 release_sock(sk);
1354 if (err >= 0) 1344 if (err >= 0)
1355 err = put_user(len, optlen); 1345 err = put_user(len, optlen);
1356 } 1346 }
@@ -1389,10 +1379,7 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
1389 if (get_user(len, optlen)) 1379 if (get_user(len, optlen))
1390 return -EFAULT; 1380 return -EFAULT;
1391 1381
1392 lock_sock(sk); 1382 err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len);
1393 err = compat_nf_getsockopt(sk, PF_INET6,
1394 optname, optval, &len);
1395 release_sock(sk);
1396 if (err >= 0) 1383 if (err >= 0)
1397 err = put_user(len, optlen); 1384 err = put_user(len, optlen);
1398 } 1385 }
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 84afb9a77278..3db8d7d1a986 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1478,7 +1478,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
1478 *(opt++) = (rd_len >> 3); 1478 *(opt++) = (rd_len >> 3);
1479 opt += 6; 1479 opt += 6;
1480 1480
1481 memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8); 1481 skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
1482 rd_len - 8);
1482} 1483}
1483 1484
1484void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) 1485void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
@@ -1686,6 +1687,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1686 case NETDEV_CHANGEADDR: 1687 case NETDEV_CHANGEADDR:
1687 neigh_changeaddr(&nd_tbl, dev); 1688 neigh_changeaddr(&nd_tbl, dev);
1688 fib6_run_gc(0, net, false); 1689 fib6_run_gc(0, net, false);
1690 /* fallthrough */
1691 case NETDEV_UP:
1689 idev = in6_dev_get(dev); 1692 idev = in6_dev_get(dev);
1690 if (!idev) 1693 if (!idev)
1691 break; 1694 break;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 22f39e00bef3..6cb9e35d23ac 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -425,6 +425,10 @@ ip6t_do_table(struct sk_buff *skb,
425 } 425 }
426 if (table_base + v != ip6t_next_entry(e) && 426 if (table_base + v != ip6t_next_entry(e) &&
427 !(e->ipv6.flags & IP6T_F_GOTO)) { 427 !(e->ipv6.flags & IP6T_F_GOTO)) {
428 if (unlikely(stackidx >= private->stacksize)) {
429 verdict = NF_DROP;
430 break;
431 }
428 jumpstack[stackidx++] = e; 432 jumpstack[stackidx++] = e;
429 } 433 }
430 434
@@ -455,23 +459,12 @@ ip6t_do_table(struct sk_buff *skb,
455#endif 459#endif
456} 460}
457 461
458static bool find_jump_target(const struct xt_table_info *t,
459 const struct ip6t_entry *target)
460{
461 struct ip6t_entry *iter;
462
463 xt_entry_foreach(iter, t->entries, t->size) {
464 if (iter == target)
465 return true;
466 }
467 return false;
468}
469
470/* Figures out from what hook each rule can be called: returns 0 if 462/* Figures out from what hook each rule can be called: returns 0 if
471 there are loops. Puts hook bitmask in comefrom. */ 463 there are loops. Puts hook bitmask in comefrom. */
472static int 464static int
473mark_source_chains(const struct xt_table_info *newinfo, 465mark_source_chains(const struct xt_table_info *newinfo,
474 unsigned int valid_hooks, void *entry0) 466 unsigned int valid_hooks, void *entry0,
467 unsigned int *offsets)
475{ 468{
476 unsigned int hook; 469 unsigned int hook;
477 470
@@ -564,10 +557,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
564 /* This a jump; chase it. */ 557 /* This a jump; chase it. */
565 duprintf("Jump rule %u -> %u\n", 558 duprintf("Jump rule %u -> %u\n",
566 pos, newpos); 559 pos, newpos);
560 if (!xt_find_jump_offset(offsets, newpos,
561 newinfo->number))
562 return 0;
567 e = (struct ip6t_entry *) 563 e = (struct ip6t_entry *)
568 (entry0 + newpos); 564 (entry0 + newpos);
569 if (!find_jump_target(newinfo, e))
570 return 0;
571 } else { 565 } else {
572 /* ... this is a fallthru */ 566 /* ... this is a fallthru */
573 newpos = pos + e->next_offset; 567 newpos = pos + e->next_offset;
@@ -668,7 +662,8 @@ static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
668 662
669static int 663static int
670find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, 664find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
671 unsigned int size) 665 unsigned int size,
666 struct xt_percpu_counter_alloc_state *alloc_state)
672{ 667{
673 struct xt_entry_target *t; 668 struct xt_entry_target *t;
674 struct xt_target *target; 669 struct xt_target *target;
@@ -677,11 +672,11 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
677 struct xt_mtchk_param mtpar; 672 struct xt_mtchk_param mtpar;
678 struct xt_entry_match *ematch; 673 struct xt_entry_match *ematch;
679 674
680 e->counters.pcnt = xt_percpu_counter_alloc(); 675 if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
681 if (IS_ERR_VALUE(e->counters.pcnt))
682 return -ENOMEM; 676 return -ENOMEM;
683 677
684 j = 0; 678 j = 0;
679 memset(&mtpar, 0, sizeof(mtpar));
685 mtpar.net = net; 680 mtpar.net = net;
686 mtpar.table = name; 681 mtpar.table = name;
687 mtpar.entryinfo = &e->ipv6; 682 mtpar.entryinfo = &e->ipv6;
@@ -717,7 +712,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
717 cleanup_match(ematch, net); 712 cleanup_match(ematch, net);
718 } 713 }
719 714
720 xt_percpu_counter_free(e->counters.pcnt); 715 xt_percpu_counter_free(&e->counters);
721 716
722 return ret; 717 return ret;
723} 718}
@@ -812,8 +807,7 @@ static void cleanup_entry(struct ip6t_entry *e, struct net *net)
812 if (par.target->destroy != NULL) 807 if (par.target->destroy != NULL)
813 par.target->destroy(&par); 808 par.target->destroy(&par);
814 module_put(par.target->me); 809 module_put(par.target->me);
815 810 xt_percpu_counter_free(&e->counters);
816 xt_percpu_counter_free(e->counters.pcnt);
817} 811}
818 812
819/* Checks and translates the user-supplied table segment (held in 813/* Checks and translates the user-supplied table segment (held in
@@ -822,7 +816,9 @@ static int
822translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, 816translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
823 const struct ip6t_replace *repl) 817 const struct ip6t_replace *repl)
824{ 818{
819 struct xt_percpu_counter_alloc_state alloc_state = { 0 };
825 struct ip6t_entry *iter; 820 struct ip6t_entry *iter;
821 unsigned int *offsets;
826 unsigned int i; 822 unsigned int i;
827 int ret = 0; 823 int ret = 0;
828 824
@@ -836,6 +832,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
836 } 832 }
837 833
838 duprintf("translate_table: size %u\n", newinfo->size); 834 duprintf("translate_table: size %u\n", newinfo->size);
835 offsets = xt_alloc_entry_offsets(newinfo->number);
836 if (!offsets)
837 return -ENOMEM;
839 i = 0; 838 i = 0;
840 /* Walk through entries, checking offsets. */ 839 /* Walk through entries, checking offsets. */
841 xt_entry_foreach(iter, entry0, newinfo->size) { 840 xt_entry_foreach(iter, entry0, newinfo->size) {
@@ -845,17 +844,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
845 repl->underflow, 844 repl->underflow,
846 repl->valid_hooks); 845 repl->valid_hooks);
847 if (ret != 0) 846 if (ret != 0)
848 return ret; 847 goto out_free;
848 if (i < repl->num_entries)
849 offsets[i] = (void *)iter - entry0;
849 ++i; 850 ++i;
850 if (strcmp(ip6t_get_target(iter)->u.user.name, 851 if (strcmp(ip6t_get_target(iter)->u.user.name,
851 XT_ERROR_TARGET) == 0) 852 XT_ERROR_TARGET) == 0)
852 ++newinfo->stacksize; 853 ++newinfo->stacksize;
853 } 854 }
854 855
856 ret = -EINVAL;
855 if (i != repl->num_entries) { 857 if (i != repl->num_entries) {
856 duprintf("translate_table: %u not %u entries\n", 858 duprintf("translate_table: %u not %u entries\n",
857 i, repl->num_entries); 859 i, repl->num_entries);
858 return -EINVAL; 860 goto out_free;
859 } 861 }
860 862
861 /* Check hooks all assigned */ 863 /* Check hooks all assigned */
@@ -866,22 +868,26 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
866 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 868 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
867 duprintf("Invalid hook entry %u %u\n", 869 duprintf("Invalid hook entry %u %u\n",
868 i, repl->hook_entry[i]); 870 i, repl->hook_entry[i]);
869 return -EINVAL; 871 goto out_free;
870 } 872 }
871 if (newinfo->underflow[i] == 0xFFFFFFFF) { 873 if (newinfo->underflow[i] == 0xFFFFFFFF) {
872 duprintf("Invalid underflow %u %u\n", 874 duprintf("Invalid underflow %u %u\n",
873 i, repl->underflow[i]); 875 i, repl->underflow[i]);
874 return -EINVAL; 876 goto out_free;
875 } 877 }
876 } 878 }
877 879
878 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) 880 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
879 return -ELOOP; 881 ret = -ELOOP;
882 goto out_free;
883 }
884 kvfree(offsets);
880 885
881 /* Finally, each sanity check must pass */ 886 /* Finally, each sanity check must pass */
882 i = 0; 887 i = 0;
883 xt_entry_foreach(iter, entry0, newinfo->size) { 888 xt_entry_foreach(iter, entry0, newinfo->size) {
884 ret = find_check_entry(iter, net, repl->name, repl->size); 889 ret = find_check_entry(iter, net, repl->name, repl->size,
890 &alloc_state);
885 if (ret != 0) 891 if (ret != 0)
886 break; 892 break;
887 ++i; 893 ++i;
@@ -897,6 +903,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
897 } 903 }
898 904
899 return ret; 905 return ret;
906 out_free:
907 kvfree(offsets);
908 return ret;
900} 909}
901 910
902static void 911static void
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 1aa5848764a7..aa051d9d4a96 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -226,20 +226,27 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
226static int 226static int
227ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len) 227ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
228{ 228{
229 const struct inet_sock *inet = inet_sk(sk); 229 struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
230 const struct ipv6_pinfo *inet6 = inet6_sk(sk); 230 const struct ipv6_pinfo *inet6 = inet6_sk(sk);
231 const struct inet_sock *inet = inet_sk(sk);
231 const struct nf_conntrack_tuple_hash *h; 232 const struct nf_conntrack_tuple_hash *h;
232 struct sockaddr_in6 sin6; 233 struct sockaddr_in6 sin6;
233 struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
234 struct nf_conn *ct; 234 struct nf_conn *ct;
235 __be32 flow_label;
236 int bound_dev_if;
235 237
238 lock_sock(sk);
236 tuple.src.u3.in6 = sk->sk_v6_rcv_saddr; 239 tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
237 tuple.src.u.tcp.port = inet->inet_sport; 240 tuple.src.u.tcp.port = inet->inet_sport;
238 tuple.dst.u3.in6 = sk->sk_v6_daddr; 241 tuple.dst.u3.in6 = sk->sk_v6_daddr;
239 tuple.dst.u.tcp.port = inet->inet_dport; 242 tuple.dst.u.tcp.port = inet->inet_dport;
240 tuple.dst.protonum = sk->sk_protocol; 243 tuple.dst.protonum = sk->sk_protocol;
244 bound_dev_if = sk->sk_bound_dev_if;
245 flow_label = inet6->flow_label;
246 release_sock(sk);
241 247
242 if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) 248 if (tuple.dst.protonum != IPPROTO_TCP &&
249 tuple.dst.protonum != IPPROTO_SCTP)
243 return -ENOPROTOOPT; 250 return -ENOPROTOOPT;
244 251
245 if (*len < 0 || (unsigned int) *len < sizeof(sin6)) 252 if (*len < 0 || (unsigned int) *len < sizeof(sin6))
@@ -257,14 +264,13 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
257 264
258 sin6.sin6_family = AF_INET6; 265 sin6.sin6_family = AF_INET6;
259 sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port; 266 sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
260 sin6.sin6_flowinfo = inet6->flow_label & IPV6_FLOWINFO_MASK; 267 sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK;
261 memcpy(&sin6.sin6_addr, 268 memcpy(&sin6.sin6_addr,
262 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6, 269 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
263 sizeof(sin6.sin6_addr)); 270 sizeof(sin6.sin6_addr));
264 271
265 nf_ct_put(ct); 272 nf_ct_put(ct);
266 sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, 273 sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if);
267 sk->sk_bound_dev_if);
268 return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0; 274 return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
269} 275}
270 276
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
index 6989c70ae29f..4a84b5ad9ecb 100644
--- a/net/ipv6/netfilter/nf_dup_ipv6.c
+++ b/net/ipv6/netfilter/nf_dup_ipv6.c
@@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
33 fl6.daddr = *gw; 33 fl6.daddr = *gw;
34 fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) | 34 fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
35 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]); 35 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
36 fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
36 dst = ip6_route_output(net, NULL, &fl6); 37 dst = ip6_route_output(net, NULL, &fl6);
37 if (dst->error) { 38 if (dst->error) {
38 dst_release(dst); 39 dst_release(dst);
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index 238e70c3f7b7..7b9c2cabd495 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
99 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff, 99 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
100 target, maniptype)) 100 target, maniptype))
101 return false; 101 return false;
102
103 /* must reload, offset might have changed */
104 ipv6h = (void *)skb->data + iphdroff;
105
102manip_addr: 106manip_addr:
103 if (maniptype == NF_NAT_MANIP_SRC) 107 if (maniptype == NF_NAT_MANIP_SRC)
104 ipv6h->saddr = target->src.u3.in6; 108 ipv6h->saddr = target->src.u3.in6;
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index e0f922b777e3..7117e5bef412 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
157 fl6.daddr = oip6h->saddr; 157 fl6.daddr = oip6h->saddr;
158 fl6.fl6_sport = otcph->dest; 158 fl6.fl6_sport = otcph->dest;
159 fl6.fl6_dport = otcph->source; 159 fl6.fl6_dport = otcph->source;
160 fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
160 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 161 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
161 dst = ip6_route_output(net, NULL, &fl6); 162 dst = ip6_route_output(net, NULL, &fl6);
162 if (dst == NULL || dst->error) { 163 if (dst == NULL || dst->error) {
@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
180 181
181 skb_dst_set(nskb, dst); 182 skb_dst_set(nskb, dst);
182 183
184 nskb->mark = fl6.flowi6_mark;
185
183 skb_reserve(nskb, hh_len + dst->header_len); 186 skb_reserve(nskb, hh_len + dst->header_len);
184 ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, 187 ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
185 ip6_dst_hoplimit(dst)); 188 ip6_dst_hoplimit(dst));
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7336a7311038..2f6d8f57fdd4 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -851,6 +851,9 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net,
851 struct fib6_node *fn; 851 struct fib6_node *fn;
852 struct rt6_info *rt; 852 struct rt6_info *rt;
853 853
854 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
855 flags &= ~RT6_LOOKUP_F_IFACE;
856
854 read_lock_bh(&table->tb6_lock); 857 read_lock_bh(&table->tb6_lock);
855 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 858 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
856restart: 859restart:
@@ -1614,6 +1617,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1614 } 1617 }
1615 1618
1616 rt->dst.flags |= DST_HOST; 1619 rt->dst.flags |= DST_HOST;
1620 rt->dst.input = ip6_input;
1617 rt->dst.output = ip6_output; 1621 rt->dst.output = ip6_output;
1618 atomic_set(&rt->dst.__refcnt, 1); 1622 atomic_set(&rt->dst.__refcnt, 1);
1619 rt->rt6i_gateway = fl6->daddr; 1623 rt->rt6i_gateway = fl6->daddr;
@@ -2707,6 +2711,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2707 2711
2708static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { 2712static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2709 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, 2713 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2714 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
2710 [RTA_OIF] = { .type = NLA_U32 }, 2715 [RTA_OIF] = { .type = NLA_U32 },
2711 [RTA_IIF] = { .type = NLA_U32 }, 2716 [RTA_IIF] = { .type = NLA_U32 },
2712 [RTA_PRIORITY] = { .type = NLA_U32 }, 2717 [RTA_PRIORITY] = { .type = NLA_U32 },
@@ -2715,6 +2720,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2715 [RTA_PREF] = { .type = NLA_U8 }, 2720 [RTA_PREF] = { .type = NLA_U8 },
2716 [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, 2721 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
2717 [RTA_ENCAP] = { .type = NLA_NESTED }, 2722 [RTA_ENCAP] = { .type = NLA_NESTED },
2723 [RTA_TABLE] = { .type = NLA_U32 },
2718}; 2724};
2719 2725
2720static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, 2726static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index b7ea5eaa4fd1..11282ffca567 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -176,7 +176,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
176#ifdef CONFIG_IPV6_SIT_6RD 176#ifdef CONFIG_IPV6_SIT_6RD
177 struct ip_tunnel *t = netdev_priv(dev); 177 struct ip_tunnel *t = netdev_priv(dev);
178 178
179 if (t->dev == sitn->fb_tunnel_dev) { 179 if (dev == sitn->fb_tunnel_dev) {
180 ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0); 180 ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
181 t->ip6rd.relay_prefix = 0; 181 t->ip6rd.relay_prefix = 0;
182 t->ip6rd.prefixlen = 16; 182 t->ip6rd.prefixlen = 16;
@@ -244,11 +244,13 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
244 if (!create) 244 if (!create)
245 goto failed; 245 goto failed;
246 246
247 if (parms->name[0]) 247 if (parms->name[0]) {
248 if (!dev_valid_name(parms->name))
249 goto failed;
248 strlcpy(name, parms->name, IFNAMSIZ); 250 strlcpy(name, parms->name, IFNAMSIZ);
249 else 251 } else {
250 strcpy(name, "sit%d"); 252 strcpy(name, "sit%d");
251 253 }
252 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, 254 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
253 ipip6_tunnel_setup); 255 ipip6_tunnel_setup);
254 if (!dev) 256 if (!dev)
@@ -475,7 +477,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
475 ipip6_tunnel_unlink(sitn, tunnel); 477 ipip6_tunnel_unlink(sitn, tunnel);
476 ipip6_tunnel_del_prl(tunnel, NULL); 478 ipip6_tunnel_del_prl(tunnel, NULL);
477 } 479 }
478 ip_tunnel_dst_reset_all(tunnel); 480 dst_cache_reset(&tunnel->dst_cache);
479 dev_put(dev); 481 dev_put(dev);
480} 482}
481 483
@@ -1098,7 +1100,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
1098 t->parms.link = p->link; 1100 t->parms.link = p->link;
1099 ipip6_tunnel_bind_dev(t->dev); 1101 ipip6_tunnel_bind_dev(t->dev);
1100 } 1102 }
1101 ip_tunnel_dst_reset_all(t); 1103 dst_cache_reset(&t->dst_cache);
1102 netdev_state_change(t->dev); 1104 netdev_state_change(t->dev);
1103} 1105}
1104 1106
@@ -1129,7 +1131,7 @@ static int ipip6_tunnel_update_6rd(struct ip_tunnel *t,
1129 t->ip6rd.relay_prefix = relay_prefix; 1131 t->ip6rd.relay_prefix = relay_prefix;
1130 t->ip6rd.prefixlen = ip6rd->prefixlen; 1132 t->ip6rd.prefixlen = ip6rd->prefixlen;
1131 t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen; 1133 t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen;
1132 ip_tunnel_dst_reset_all(t); 1134 dst_cache_reset(&t->dst_cache);
1133 netdev_state_change(t->dev); 1135 netdev_state_change(t->dev);
1134 return 0; 1136 return 0;
1135} 1137}
@@ -1283,7 +1285,7 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1283 err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL); 1285 err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
1284 break; 1286 break;
1285 } 1287 }
1286 ip_tunnel_dst_reset_all(t); 1288 dst_cache_reset(&t->dst_cache);
1287 netdev_state_change(dev); 1289 netdev_state_change(dev);
1288 break; 1290 break;
1289 1291
@@ -1344,7 +1346,7 @@ static void ipip6_dev_free(struct net_device *dev)
1344{ 1346{
1345 struct ip_tunnel *tunnel = netdev_priv(dev); 1347 struct ip_tunnel *tunnel = netdev_priv(dev);
1346 1348
1347 free_percpu(tunnel->dst_cache); 1349 dst_cache_destroy(&tunnel->dst_cache);
1348 free_percpu(dev->tstats); 1350 free_percpu(dev->tstats);
1349 free_netdev(dev); 1351 free_netdev(dev);
1350} 1352}
@@ -1377,6 +1379,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
1377static int ipip6_tunnel_init(struct net_device *dev) 1379static int ipip6_tunnel_init(struct net_device *dev)
1378{ 1380{
1379 struct ip_tunnel *tunnel = netdev_priv(dev); 1381 struct ip_tunnel *tunnel = netdev_priv(dev);
1382 int err;
1380 1383
1381 tunnel->dev = dev; 1384 tunnel->dev = dev;
1382 tunnel->net = dev_net(dev); 1385 tunnel->net = dev_net(dev);
@@ -1387,11 +1390,11 @@ static int ipip6_tunnel_init(struct net_device *dev)
1387 if (!dev->tstats) 1390 if (!dev->tstats)
1388 return -ENOMEM; 1391 return -ENOMEM;
1389 1392
1390 tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst); 1393 err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1391 if (!tunnel->dst_cache) { 1394 if (err) {
1392 free_percpu(dev->tstats); 1395 free_percpu(dev->tstats);
1393 dev->tstats = NULL; 1396 dev->tstats = NULL;
1394 return -ENOMEM; 1397 return err;
1395 } 1398 }
1396 1399
1397 return 0; 1400 return 0;
@@ -1570,6 +1573,13 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
1570 if (err < 0) 1573 if (err < 0)
1571 return err; 1574 return err;
1572 1575
1576 if (tb[IFLA_MTU]) {
1577 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
1578
1579 if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
1580 dev->mtu = mtu;
1581 }
1582
1573#ifdef CONFIG_IPV6_SIT_6RD 1583#ifdef CONFIG_IPV6_SIT_6RD
1574 if (ipip6_netlink_6rd_parms(data, &ip6rd)) 1584 if (ipip6_netlink_6rd_parms(data, &ip6rd))
1575 err = ipip6_tunnel_update_6rd(nt, &ip6rd); 1585 err = ipip6_tunnel_update_6rd(nt, &ip6rd);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 74cbcc4b399c..90abe88e1b40 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1415,6 +1415,10 @@ process:
1415 reqsk_put(req); 1415 reqsk_put(req);
1416 goto discard_it; 1416 goto discard_it;
1417 } 1417 }
1418 if (tcp_checksum_complete(skb)) {
1419 reqsk_put(req);
1420 goto csum_error;
1421 }
1418 if (unlikely(sk->sk_state != TCP_LISTEN)) { 1422 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1419 inet_csk_reqsk_queue_drop_and_put(sk, req); 1423 inet_csk_reqsk_queue_drop_and_put(sk, req);
1420 goto lookup; 1424 goto lookup;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index c074771a10f7..1ca0c2f3d92b 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -121,7 +121,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
121 struct flowi6 *fl6 = &fl->u.ip6; 121 struct flowi6 *fl6 = &fl->u.ip6;
122 int onlyproto = 0; 122 int onlyproto = 0;
123 const struct ipv6hdr *hdr = ipv6_hdr(skb); 123 const struct ipv6hdr *hdr = ipv6_hdr(skb);
124 u16 offset = sizeof(*hdr); 124 u32 offset = sizeof(*hdr);
125 struct ipv6_opt_hdr *exthdr; 125 struct ipv6_opt_hdr *exthdr;
126 const unsigned char *nh = skb_network_header(skb); 126 const unsigned char *nh = skb_network_header(skb);
127 u16 nhoff = IP6CB(skb)->nhoff; 127 u16 nhoff = IP6CB(skb)->nhoff;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 20ab7b2ec463..aeffb65181f5 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -2381,9 +2381,11 @@ static int afiucv_iucv_init(void)
2381 af_iucv_dev->driver = &af_iucv_driver; 2381 af_iucv_dev->driver = &af_iucv_driver;
2382 err = device_register(af_iucv_dev); 2382 err = device_register(af_iucv_dev);
2383 if (err) 2383 if (err)
2384 goto out_driver; 2384 goto out_iucv_dev;
2385 return 0; 2385 return 0;
2386 2386
2387out_iucv_dev:
2388 put_device(af_iucv_dev);
2387out_driver: 2389out_driver:
2388 driver_unregister(&af_iucv_driver); 2390 driver_unregister(&af_iucv_driver);
2389out_iucv: 2391out_iucv:
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 6482b001f19a..3ba903ff2bb0 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -437,6 +437,24 @@ static int verify_address_len(const void *p)
437 return 0; 437 return 0;
438} 438}
439 439
440static inline int sadb_key_len(const struct sadb_key *key)
441{
442 int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8);
443
444 return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes,
445 sizeof(uint64_t));
446}
447
448static int verify_key_len(const void *p)
449{
450 const struct sadb_key *key = p;
451
452 if (sadb_key_len(key) > key->sadb_key_len)
453 return -EINVAL;
454
455 return 0;
456}
457
440static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx) 458static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
441{ 459{
442 return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) + 460 return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
@@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
533 return -EINVAL; 551 return -EINVAL;
534 if (ext_hdrs[ext_type-1] != NULL) 552 if (ext_hdrs[ext_type-1] != NULL)
535 return -EINVAL; 553 return -EINVAL;
536 if (ext_type == SADB_EXT_ADDRESS_SRC || 554 switch (ext_type) {
537 ext_type == SADB_EXT_ADDRESS_DST || 555 case SADB_EXT_ADDRESS_SRC:
538 ext_type == SADB_EXT_ADDRESS_PROXY || 556 case SADB_EXT_ADDRESS_DST:
539 ext_type == SADB_X_EXT_NAT_T_OA) { 557 case SADB_EXT_ADDRESS_PROXY:
558 case SADB_X_EXT_NAT_T_OA:
540 if (verify_address_len(p)) 559 if (verify_address_len(p))
541 return -EINVAL; 560 return -EINVAL;
542 } 561 break;
543 if (ext_type == SADB_X_EXT_SEC_CTX) { 562 case SADB_X_EXT_SEC_CTX:
544 if (verify_sec_ctx_len(p)) 563 if (verify_sec_ctx_len(p))
545 return -EINVAL; 564 return -EINVAL;
565 break;
566 case SADB_EXT_KEY_AUTH:
567 case SADB_EXT_KEY_ENCRYPT:
568 if (verify_key_len(p))
569 return -EINVAL;
570 break;
571 default:
572 break;
546 } 573 }
547 ext_hdrs[ext_type-1] = (void *) p; 574 ext_hdrs[ext_type-1] = (void *) p;
548 } 575 }
@@ -1111,14 +1138,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1111 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; 1138 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
1112 if (key != NULL && 1139 if (key != NULL &&
1113 sa->sadb_sa_auth != SADB_X_AALG_NULL && 1140 sa->sadb_sa_auth != SADB_X_AALG_NULL &&
1114 ((key->sadb_key_bits+7) / 8 == 0 || 1141 key->sadb_key_bits == 0)
1115 (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
1116 return ERR_PTR(-EINVAL); 1142 return ERR_PTR(-EINVAL);
1117 key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1]; 1143 key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
1118 if (key != NULL && 1144 if (key != NULL &&
1119 sa->sadb_sa_encrypt != SADB_EALG_NULL && 1145 sa->sadb_sa_encrypt != SADB_EALG_NULL &&
1120 ((key->sadb_key_bits+7) / 8 == 0 || 1146 key->sadb_key_bits == 0)
1121 (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
1122 return ERR_PTR(-EINVAL); 1147 return ERR_PTR(-EINVAL);
1123 1148
1124 x = xfrm_state_alloc(net); 1149 x = xfrm_state_alloc(net);
@@ -3305,7 +3330,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
3305 p += pol->sadb_x_policy_len*8; 3330 p += pol->sadb_x_policy_len*8;
3306 sec_ctx = (struct sadb_x_sec_ctx *)p; 3331 sec_ctx = (struct sadb_x_sec_ctx *)p;
3307 if (len < pol->sadb_x_policy_len*8 + 3332 if (len < pol->sadb_x_policy_len*8 +
3308 sec_ctx->sadb_x_sec_len) { 3333 sec_ctx->sadb_x_sec_len*8) {
3309 *dir = -EINVAL; 3334 *dir = -EINVAL;
3310 goto out; 3335 goto out;
3311 } 3336 }
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ec8f6a6485e3..92df832a1896 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1518,9 +1518,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1518 encap = cfg->encap; 1518 encap = cfg->encap;
1519 1519
1520 /* Quick sanity checks */ 1520 /* Quick sanity checks */
1521 err = -EPROTONOSUPPORT;
1522 if (sk->sk_type != SOCK_DGRAM) {
1523 pr_debug("tunl %hu: fd %d wrong socket type\n",
1524 tunnel_id, fd);
1525 goto err;
1526 }
1521 switch (encap) { 1527 switch (encap) {
1522 case L2TP_ENCAPTYPE_UDP: 1528 case L2TP_ENCAPTYPE_UDP:
1523 err = -EPROTONOSUPPORT;
1524 if (sk->sk_protocol != IPPROTO_UDP) { 1529 if (sk->sk_protocol != IPPROTO_UDP) {
1525 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1530 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1526 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); 1531 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
@@ -1528,7 +1533,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1528 } 1533 }
1529 break; 1534 break;
1530 case L2TP_ENCAPTYPE_IP: 1535 case L2TP_ENCAPTYPE_IP:
1531 err = -EPROTONOSUPPORT;
1532 if (sk->sk_protocol != IPPROTO_L2TP) { 1536 if (sk->sk_protocol != IPPROTO_L2TP) {
1533 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1537 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1534 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); 1538 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 67f2e72723b2..2764c4bd072c 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -606,6 +606,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
606 lock_sock(sk); 606 lock_sock(sk);
607 607
608 error = -EINVAL; 608 error = -EINVAL;
609
610 if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
611 sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
612 sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
613 sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
614 goto end;
615
609 if (sp->sa_protocol != PX_PROTO_OL2TP) 616 if (sp->sa_protocol != PX_PROTO_OL2TP)
610 goto end; 617 goto end;
611 618
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index bb8edb9ef506..83e8a295c806 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -197,9 +197,19 @@ static int llc_ui_release(struct socket *sock)
197 llc->laddr.lsap, llc->daddr.lsap); 197 llc->laddr.lsap, llc->daddr.lsap);
198 if (!llc_send_disc(sk)) 198 if (!llc_send_disc(sk))
199 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); 199 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
200 if (!sock_flag(sk, SOCK_ZAPPED)) 200 if (!sock_flag(sk, SOCK_ZAPPED)) {
201 struct llc_sap *sap = llc->sap;
202
203 /* Hold this for release_sock(), so that llc_backlog_rcv()
204 * could still use it.
205 */
206 llc_sap_hold(sap);
201 llc_sap_remove_socket(llc->sap, sk); 207 llc_sap_remove_socket(llc->sap, sk);
202 release_sock(sk); 208 release_sock(sk);
209 llc_sap_put(sap);
210 } else {
211 release_sock(sk);
212 }
203 if (llc->dev) 213 if (llc->dev)
204 dev_put(llc->dev); 214 dev_put(llc->dev);
205 sock_put(sk); 215 sock_put(sk);
@@ -309,6 +319,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
309 int rc = -EINVAL; 319 int rc = -EINVAL;
310 320
311 dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); 321 dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
322
323 lock_sock(sk);
312 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) 324 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
313 goto out; 325 goto out;
314 rc = -EAFNOSUPPORT; 326 rc = -EAFNOSUPPORT;
@@ -380,6 +392,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
380out_put: 392out_put:
381 llc_sap_put(sap); 393 llc_sap_put(sap);
382out: 394out:
395 release_sock(sk);
383 return rc; 396 return rc;
384} 397}
385 398
@@ -913,6 +926,9 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
913 if (size > llc->dev->mtu) 926 if (size > llc->dev->mtu)
914 size = llc->dev->mtu; 927 size = llc->dev->mtu;
915 copied = size - hdrlen; 928 copied = size - hdrlen;
929 rc = -EINVAL;
930 if (copied < 0)
931 goto release;
916 release_sock(sk); 932 release_sock(sk);
917 skb = sock_alloc_send_skb(sk, size, noblock, &rc); 933 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
918 lock_sock(sk); 934 lock_sock(sk);
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index ea225bd2672c..4b60f68cb492 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -389,7 +389,7 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
389 llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); 389 llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
390 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); 390 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
391 if (likely(!rc)) { 391 if (likely(!rc)) {
392 llc_conn_send_pdu(sk, skb); 392 rc = llc_conn_send_pdu(sk, skb);
393 llc_conn_ac_inc_vs_by_1(sk, skb); 393 llc_conn_ac_inc_vs_by_1(sk, skb);
394 } 394 }
395 return rc; 395 return rc;
@@ -916,7 +916,7 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
916 llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); 916 llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
917 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); 917 rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
918 if (likely(!rc)) { 918 if (likely(!rc)) {
919 llc_conn_send_pdu(sk, skb); 919 rc = llc_conn_send_pdu(sk, skb);
920 llc_conn_ac_inc_vs_by_1(sk, skb); 920 llc_conn_ac_inc_vs_by_1(sk, skb);
921 } 921 }
922 return rc; 922 return rc;
@@ -935,14 +935,17 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
935int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb) 935int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb)
936{ 936{
937 struct llc_sock *llc = llc_sk(sk); 937 struct llc_sock *llc = llc_sk(sk);
938 int ret;
938 939
939 if (llc->ack_must_be_send) { 940 if (llc->ack_must_be_send) {
940 llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); 941 ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
941 llc->ack_must_be_send = 0 ; 942 llc->ack_must_be_send = 0 ;
942 llc->ack_pf = 0; 943 llc->ack_pf = 0;
943 } else 944 } else {
944 llc_conn_ac_send_i_cmd_p_set_0(sk, skb); 945 ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
945 return 0; 946 }
947
948 return ret;
946} 949}
947 950
948/** 951/**
@@ -1096,14 +1099,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
1096 1099
1097int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb) 1100int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
1098{ 1101{
1099 struct llc_sock *llc = llc_sk(sk); 1102 llc_sk_stop_all_timers(sk, false);
1100
1101 del_timer(&llc->pf_cycle_timer.timer);
1102 del_timer(&llc->ack_timer.timer);
1103 del_timer(&llc->rej_sent_timer.timer);
1104 del_timer(&llc->busy_state_timer.timer);
1105 llc->ack_must_be_send = 0;
1106 llc->ack_pf = 0;
1107 return 0; 1103 return 0;
1108} 1104}
1109 1105
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 8bc5a1bd2d45..79c346fd859b 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -30,7 +30,7 @@
30#endif 30#endif
31 31
32static int llc_find_offset(int state, int ev_type); 32static int llc_find_offset(int state, int ev_type);
33static void llc_conn_send_pdus(struct sock *sk); 33static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
34static int llc_conn_service(struct sock *sk, struct sk_buff *skb); 34static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
35static int llc_exec_conn_trans_actions(struct sock *sk, 35static int llc_exec_conn_trans_actions(struct sock *sk,
36 struct llc_conn_state_trans *trans, 36 struct llc_conn_state_trans *trans,
@@ -193,11 +193,11 @@ out_skb_put:
193 return rc; 193 return rc;
194} 194}
195 195
196void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) 196int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
197{ 197{
198 /* queue PDU to send to MAC layer */ 198 /* queue PDU to send to MAC layer */
199 skb_queue_tail(&sk->sk_write_queue, skb); 199 skb_queue_tail(&sk->sk_write_queue, skb);
200 llc_conn_send_pdus(sk); 200 return llc_conn_send_pdus(sk, skb);
201} 201}
202 202
203/** 203/**
@@ -255,7 +255,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
255 if (howmany_resend > 0) 255 if (howmany_resend > 0)
256 llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; 256 llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
257 /* any PDUs to re-send are queued up; start sending to MAC */ 257 /* any PDUs to re-send are queued up; start sending to MAC */
258 llc_conn_send_pdus(sk); 258 llc_conn_send_pdus(sk, NULL);
259out:; 259out:;
260} 260}
261 261
@@ -296,7 +296,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
296 if (howmany_resend > 0) 296 if (howmany_resend > 0)
297 llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; 297 llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
298 /* any PDUs to re-send are queued up; start sending to MAC */ 298 /* any PDUs to re-send are queued up; start sending to MAC */
299 llc_conn_send_pdus(sk); 299 llc_conn_send_pdus(sk, NULL);
300out:; 300out:;
301} 301}
302 302
@@ -340,12 +340,16 @@ out:
340/** 340/**
341 * llc_conn_send_pdus - Sends queued PDUs 341 * llc_conn_send_pdus - Sends queued PDUs
342 * @sk: active connection 342 * @sk: active connection
343 * @hold_skb: the skb held by caller, or NULL if does not care
343 * 344 *
344 * Sends queued pdus to MAC layer for transmission. 345 * Sends queued pdus to MAC layer for transmission. When @hold_skb is
346 * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
347 * successfully, or 1 for failure.
345 */ 348 */
346static void llc_conn_send_pdus(struct sock *sk) 349static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
347{ 350{
348 struct sk_buff *skb; 351 struct sk_buff *skb;
352 int ret = 0;
349 353
350 while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) { 354 while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
351 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 355 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
@@ -357,10 +361,20 @@ static void llc_conn_send_pdus(struct sock *sk)
357 skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb); 361 skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
358 if (!skb2) 362 if (!skb2)
359 break; 363 break;
360 skb = skb2; 364 dev_queue_xmit(skb2);
365 } else {
366 bool is_target = skb == hold_skb;
367 int rc;
368
369 if (is_target)
370 skb_get(skb);
371 rc = dev_queue_xmit(skb);
372 if (is_target)
373 ret = rc;
361 } 374 }
362 dev_queue_xmit(skb);
363 } 375 }
376
377 return ret;
364} 378}
365 379
366/** 380/**
@@ -951,6 +965,26 @@ out:
951 return sk; 965 return sk;
952} 966}
953 967
968void llc_sk_stop_all_timers(struct sock *sk, bool sync)
969{
970 struct llc_sock *llc = llc_sk(sk);
971
972 if (sync) {
973 del_timer_sync(&llc->pf_cycle_timer.timer);
974 del_timer_sync(&llc->ack_timer.timer);
975 del_timer_sync(&llc->rej_sent_timer.timer);
976 del_timer_sync(&llc->busy_state_timer.timer);
977 } else {
978 del_timer(&llc->pf_cycle_timer.timer);
979 del_timer(&llc->ack_timer.timer);
980 del_timer(&llc->rej_sent_timer.timer);
981 del_timer(&llc->busy_state_timer.timer);
982 }
983
984 llc->ack_must_be_send = 0;
985 llc->ack_pf = 0;
986}
987
954/** 988/**
955 * llc_sk_free - Frees a LLC socket 989 * llc_sk_free - Frees a LLC socket
956 * @sk - socket to free 990 * @sk - socket to free
@@ -963,7 +997,7 @@ void llc_sk_free(struct sock *sk)
963 997
964 llc->state = LLC_CONN_OUT_OF_SVC; 998 llc->state = LLC_CONN_OUT_OF_SVC;
965 /* Stop all (possibly) running timers */ 999 /* Stop all (possibly) running timers */
966 llc_conn_ac_stop_all_timers(sk, NULL); 1000 llc_sk_stop_all_timers(sk, true);
967#ifdef DEBUG_LLC_CONN_ALLOC 1001#ifdef DEBUG_LLC_CONN_ALLOC
968 printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__, 1002 printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
969 skb_queue_len(&llc->pdu_unack_q), 1003 skb_queue_len(&llc->pdu_unack_q),
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index cb6439fd62dd..b55ce0dfb742 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2943,7 +2943,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
2943 } 2943 }
2944 if (beacon->probe_resp_len) { 2944 if (beacon->probe_resp_len) {
2945 new_beacon->probe_resp_len = beacon->probe_resp_len; 2945 new_beacon->probe_resp_len = beacon->probe_resp_len;
2946 beacon->probe_resp = pos; 2946 new_beacon->probe_resp = pos;
2947 memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); 2947 memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
2948 pos += beacon->probe_resp_len; 2948 pos += beacon->probe_resp_len;
2949 } 2949 }
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index bcb0a1b64556..58588a610b05 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1441,7 +1441,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1441 break; 1441 break;
1442 case NL80211_IFTYPE_UNSPECIFIED: 1442 case NL80211_IFTYPE_UNSPECIFIED:
1443 case NUM_NL80211_IFTYPES: 1443 case NUM_NL80211_IFTYPES:
1444 BUG(); 1444 WARN_ON(1);
1445 break; 1445 break;
1446 } 1446 }
1447 1447
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index c6be0b4f4058..e68a409fc351 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -776,7 +776,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
776 struct mesh_path *mpath; 776 struct mesh_path *mpath;
777 u8 ttl, flags, hopcount; 777 u8 ttl, flags, hopcount;
778 const u8 *orig_addr; 778 const u8 *orig_addr;
779 u32 orig_sn, metric, metric_txsta, interval; 779 u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
780 bool root_is_gate; 780 bool root_is_gate;
781 781
782 ttl = rann->rann_ttl; 782 ttl = rann->rann_ttl;
@@ -787,7 +787,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
787 interval = le32_to_cpu(rann->rann_interval); 787 interval = le32_to_cpu(rann->rann_interval);
788 hopcount = rann->rann_hopcount; 788 hopcount = rann->rann_hopcount;
789 hopcount++; 789 hopcount++;
790 metric = le32_to_cpu(rann->rann_metric); 790 orig_metric = le32_to_cpu(rann->rann_metric);
791 791
792 /* Ignore our own RANNs */ 792 /* Ignore our own RANNs */
793 if (ether_addr_equal(orig_addr, sdata->vif.addr)) 793 if (ether_addr_equal(orig_addr, sdata->vif.addr))
@@ -804,7 +804,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
804 return; 804 return;
805 } 805 }
806 806
807 metric_txsta = airtime_link_metric_get(local, sta); 807 last_hop_metric = airtime_link_metric_get(local, sta);
808 new_metric = orig_metric + last_hop_metric;
809 if (new_metric < orig_metric)
810 new_metric = MAX_METRIC;
808 811
809 mpath = mesh_path_lookup(sdata, orig_addr); 812 mpath = mesh_path_lookup(sdata, orig_addr);
810 if (!mpath) { 813 if (!mpath) {
@@ -817,7 +820,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
817 } 820 }
818 821
819 if (!(SN_LT(mpath->sn, orig_sn)) && 822 if (!(SN_LT(mpath->sn, orig_sn)) &&
820 !(mpath->sn == orig_sn && metric < mpath->rann_metric)) { 823 !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
821 rcu_read_unlock(); 824 rcu_read_unlock();
822 return; 825 return;
823 } 826 }
@@ -835,7 +838,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
835 } 838 }
836 839
837 mpath->sn = orig_sn; 840 mpath->sn = orig_sn;
838 mpath->rann_metric = metric + metric_txsta; 841 mpath->rann_metric = new_metric;
839 mpath->is_root = true; 842 mpath->is_root = true;
840 /* Recording RANNs sender address to send individually 843 /* Recording RANNs sender address to send individually
841 * addressed PREQs destined for root mesh STA */ 844 * addressed PREQs destined for root mesh STA */
@@ -855,7 +858,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
855 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, 858 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
856 orig_sn, 0, NULL, 0, broadcast_addr, 859 orig_sn, 0, NULL, 0, broadcast_addr,
857 hopcount, ttl, interval, 860 hopcount, ttl, interval,
858 metric + metric_txsta, 0, sdata); 861 new_metric, 0, sdata);
859 } 862 }
860 863
861 rcu_read_unlock(); 864 rcu_read_unlock();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 23095d5e0199..005cd8796505 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4326,6 +4326,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
4326 if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data)) 4326 if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
4327 return -EINVAL; 4327 return -EINVAL;
4328 4328
4329 /* If a reconfig is happening, bail out */
4330 if (local->in_reconfig)
4331 return -EBUSY;
4332
4329 if (assoc) { 4333 if (assoc) {
4330 rcu_read_lock(); 4334 rcu_read_lock();
4331 have_sta = sta_info_get(sdata, cbss->bssid); 4335 have_sta = sta_info_get(sdata, cbss->bssid);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3bcabc2ba4a6..f8406c37fc1d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3367,6 +3367,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3367 } 3367 }
3368 return true; 3368 return true;
3369 case NL80211_IFTYPE_MESH_POINT: 3369 case NL80211_IFTYPE_MESH_POINT:
3370 if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
3371 return false;
3370 if (multicast) 3372 if (multicast)
3371 return true; 3373 return true;
3372 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3374 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 5bad05e9af90..45fb1abdb265 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -194,6 +194,7 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
194 } 194 }
195 195
196 if (ieee80211_is_action(mgmt->frame_control) && 196 if (ieee80211_is_action(mgmt->frame_control) &&
197 !ieee80211_has_protected(mgmt->frame_control) &&
197 mgmt->u.action.category == WLAN_CATEGORY_HT && 198 mgmt->u.action.category == WLAN_CATEGORY_HT &&
198 mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS && 199 mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS &&
199 ieee80211_sdata_running(sdata)) { 200 ieee80211_sdata_running(sdata)) {
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 33344f5a66a8..ec26a84b00e2 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2663,8 +2663,9 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
2663 2663
2664 rate = cfg80211_calculate_bitrate(&ri); 2664 rate = cfg80211_calculate_bitrate(&ri);
2665 if (WARN_ONCE(!rate, 2665 if (WARN_ONCE(!rate,
2666 "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n", 2666 "Invalid bitrate: flags=0x%llx, idx=%d, vht_nss=%d\n",
2667 status->flag, status->rate_idx, status->vht_nss)) 2667 (unsigned long long)status->flag, status->rate_idx,
2668 status->vht_nss))
2668 return 0; 2669 return 0;
2669 2670
2670 /* rewind from end of MPDU */ 2671 /* rewind from end of MPDU */
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index efa3f48f1ec5..73e8f347802e 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -293,7 +293,8 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
293 return RX_DROP_UNUSABLE; 293 return RX_DROP_UNUSABLE;
294 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); 294 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
295 /* remove ICV */ 295 /* remove ICV */
296 if (pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN)) 296 if (!(status->flag & RX_FLAG_ICV_STRIPPED) &&
297 pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
297 return RX_DROP_UNUSABLE; 298 return RX_DROP_UNUSABLE;
298 } 299 }
299 300
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index e19ea1c53afa..cb439e06919f 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -298,7 +298,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
298 return RX_DROP_UNUSABLE; 298 return RX_DROP_UNUSABLE;
299 299
300 /* Trim ICV */ 300 /* Trim ICV */
301 skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); 301 if (!(status->flag & RX_FLAG_ICV_STRIPPED))
302 skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
302 303
303 /* Remove IV */ 304 /* Remove IV */
304 memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen); 305 memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen);
@@ -508,25 +509,31 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
508 !ieee80211_is_robust_mgmt_frame(skb)) 509 !ieee80211_is_robust_mgmt_frame(skb))
509 return RX_CONTINUE; 510 return RX_CONTINUE;
510 511
511 data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
512 if (!rx->sta || data_len < 0)
513 return RX_DROP_UNUSABLE;
514
515 if (status->flag & RX_FLAG_DECRYPTED) { 512 if (status->flag & RX_FLAG_DECRYPTED) {
516 if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN)) 513 if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN))
517 return RX_DROP_UNUSABLE; 514 return RX_DROP_UNUSABLE;
515 if (status->flag & RX_FLAG_MIC_STRIPPED)
516 mic_len = 0;
518 } else { 517 } else {
519 if (skb_linearize(rx->skb)) 518 if (skb_linearize(rx->skb))
520 return RX_DROP_UNUSABLE; 519 return RX_DROP_UNUSABLE;
521 } 520 }
522 521
522 data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
523 if (!rx->sta || data_len < 0)
524 return RX_DROP_UNUSABLE;
525
523 if (!(status->flag & RX_FLAG_PN_VALIDATED)) { 526 if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
527 int res;
528
524 ccmp_hdr2pn(pn, skb->data + hdrlen); 529 ccmp_hdr2pn(pn, skb->data + hdrlen);
525 530
526 queue = rx->security_idx; 531 queue = rx->security_idx;
527 532
528 if (memcmp(pn, key->u.ccmp.rx_pn[queue], 533 res = memcmp(pn, key->u.ccmp.rx_pn[queue],
529 IEEE80211_CCMP_PN_LEN) <= 0) { 534 IEEE80211_CCMP_PN_LEN);
535 if (res < 0 ||
536 (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) {
530 key->u.ccmp.replays++; 537 key->u.ccmp.replays++;
531 return RX_DROP_UNUSABLE; 538 return RX_DROP_UNUSABLE;
532 } 539 }
@@ -724,8 +731,7 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
724 struct sk_buff *skb = rx->skb; 731 struct sk_buff *skb = rx->skb;
725 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 732 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
726 u8 pn[IEEE80211_GCMP_PN_LEN]; 733 u8 pn[IEEE80211_GCMP_PN_LEN];
727 int data_len; 734 int data_len, queue, mic_len = IEEE80211_GCMP_MIC_LEN;
728 int queue;
729 735
730 hdrlen = ieee80211_hdrlen(hdr->frame_control); 736 hdrlen = ieee80211_hdrlen(hdr->frame_control);
731 737
@@ -733,26 +739,31 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
733 !ieee80211_is_robust_mgmt_frame(skb)) 739 !ieee80211_is_robust_mgmt_frame(skb))
734 return RX_CONTINUE; 740 return RX_CONTINUE;
735 741
736 data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN -
737 IEEE80211_GCMP_MIC_LEN;
738 if (!rx->sta || data_len < 0)
739 return RX_DROP_UNUSABLE;
740
741 if (status->flag & RX_FLAG_DECRYPTED) { 742 if (status->flag & RX_FLAG_DECRYPTED) {
742 if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN)) 743 if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN))
743 return RX_DROP_UNUSABLE; 744 return RX_DROP_UNUSABLE;
745 if (status->flag & RX_FLAG_MIC_STRIPPED)
746 mic_len = 0;
744 } else { 747 } else {
745 if (skb_linearize(rx->skb)) 748 if (skb_linearize(rx->skb))
746 return RX_DROP_UNUSABLE; 749 return RX_DROP_UNUSABLE;
747 } 750 }
748 751
752 data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
753 if (!rx->sta || data_len < 0)
754 return RX_DROP_UNUSABLE;
755
749 if (!(status->flag & RX_FLAG_PN_VALIDATED)) { 756 if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
757 int res;
758
750 gcmp_hdr2pn(pn, skb->data + hdrlen); 759 gcmp_hdr2pn(pn, skb->data + hdrlen);
751 760
752 queue = rx->security_idx; 761 queue = rx->security_idx;
753 762
754 if (memcmp(pn, key->u.gcmp.rx_pn[queue], 763 res = memcmp(pn, key->u.gcmp.rx_pn[queue],
755 IEEE80211_GCMP_PN_LEN) <= 0) { 764 IEEE80211_GCMP_PN_LEN);
765 if (res < 0 ||
766 (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) {
756 key->u.gcmp.replays++; 767 key->u.gcmp.replays++;
757 return RX_DROP_UNUSABLE; 768 return RX_DROP_UNUSABLE;
758 } 769 }
@@ -776,7 +787,7 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
776 } 787 }
777 788
778 /* Remove GCMP header and MIC */ 789 /* Remove GCMP header and MIC */
779 if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN)) 790 if (pskb_trim(skb, skb->len - mic_len))
780 return RX_DROP_UNUSABLE; 791 return RX_DROP_UNUSABLE;
781 memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen); 792 memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen);
782 skb_pull(skb, IEEE80211_GCMP_HDR_LEN); 793 skb_pull(skb, IEEE80211_GCMP_HDR_LEN);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 52cfc4478511..c2ce7dec5198 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -7,6 +7,7 @@
7#include <linux/if_arp.h> 7#include <linux/if_arp.h>
8#include <linux/ipv6.h> 8#include <linux/ipv6.h>
9#include <linux/mpls.h> 9#include <linux/mpls.h>
10#include <linux/nospec.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
11#include <net/ip.h> 12#include <net/ip.h>
12#include <net/dst.h> 13#include <net/dst.h>
@@ -714,6 +715,22 @@ errout:
714 return err; 715 return err;
715} 716}
716 717
718static bool mpls_label_ok(struct net *net, unsigned int *index)
719{
720 bool is_ok = true;
721
722 /* Reserved labels may not be set */
723 if (*index < MPLS_LABEL_FIRST_UNRESERVED)
724 is_ok = false;
725
726 /* The full 20 bit range may not be supported. */
727 if (is_ok && *index >= net->mpls.platform_labels)
728 is_ok = false;
729
730 *index = array_index_nospec(*index, net->mpls.platform_labels);
731 return is_ok;
732}
733
717static int mpls_route_add(struct mpls_route_config *cfg) 734static int mpls_route_add(struct mpls_route_config *cfg)
718{ 735{
719 struct mpls_route __rcu **platform_label; 736 struct mpls_route __rcu **platform_label;
@@ -732,12 +749,7 @@ static int mpls_route_add(struct mpls_route_config *cfg)
732 index = find_free_label(net); 749 index = find_free_label(net);
733 } 750 }
734 751
735 /* Reserved labels may not be set */ 752 if (!mpls_label_ok(net, &index))
736 if (index < MPLS_LABEL_FIRST_UNRESERVED)
737 goto errout;
738
739 /* The full 20 bit range may not be supported. */
740 if (index >= net->mpls.platform_labels)
741 goto errout; 753 goto errout;
742 754
743 /* Append makes no sense with mpls */ 755 /* Append makes no sense with mpls */
@@ -798,12 +810,7 @@ static int mpls_route_del(struct mpls_route_config *cfg)
798 810
799 index = cfg->rc_label; 811 index = cfg->rc_label;
800 812
801 /* Reserved labels may not be removed */ 813 if (!mpls_label_ok(net, &index))
802 if (index < MPLS_LABEL_FIRST_UNRESERVED)
803 goto errout;
804
805 /* The full 20 bit range may not be supported */
806 if (index >= net->mpls.platform_labels)
807 goto errout; 814 goto errout;
808 815
809 mpls_route_update(net, index, NULL, &cfg->rc_nlinfo); 816 mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
@@ -1162,10 +1169,9 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1162 &cfg->rc_label)) 1169 &cfg->rc_label))
1163 goto errout; 1170 goto errout;
1164 1171
1165 /* Reserved labels may not be set */ 1172 if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
1166 if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED) 1173 &cfg->rc_label))
1167 goto errout; 1174 goto errout;
1168
1169 break; 1175 break;
1170 } 1176 }
1171 case RTA_VIA: 1177 case RTA_VIA:
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 0328f7250693..299edc6add5a 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -605,17 +605,13 @@ static const struct file_operations ip_vs_app_fops = {
605 605
606int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs) 606int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs)
607{ 607{
608 struct net *net = ipvs->net;
609
610 INIT_LIST_HEAD(&ipvs->app_list); 608 INIT_LIST_HEAD(&ipvs->app_list);
611 proc_create("ip_vs_app", 0, net->proc_net, &ip_vs_app_fops); 609 proc_create("ip_vs_app", 0, ipvs->net->proc_net, &ip_vs_app_fops);
612 return 0; 610 return 0;
613} 611}
614 612
615void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs) 613void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs)
616{ 614{
617 struct net *net = ipvs->net;
618
619 unregister_ip_vs_app(ipvs, NULL /* all */); 615 unregister_ip_vs_app(ipvs, NULL /* all */);
620 remove_proc_entry("ip_vs_app", net->proc_net); 616 remove_proc_entry("ip_vs_app", ipvs->net->proc_net);
621} 617}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 2c937c16dc27..3167ec76903a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2349,14 +2349,12 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2349 struct ipvs_sync_daemon_cfg cfg; 2349 struct ipvs_sync_daemon_cfg cfg;
2350 2350
2351 memset(&cfg, 0, sizeof(cfg)); 2351 memset(&cfg, 0, sizeof(cfg));
2352 strlcpy(cfg.mcast_ifn, dm->mcast_ifn, 2352 ret = -EINVAL;
2353 sizeof(cfg.mcast_ifn)); 2353 if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
2354 sizeof(cfg.mcast_ifn)) <= 0)
2355 goto out_dec;
2354 cfg.syncid = dm->syncid; 2356 cfg.syncid = dm->syncid;
2355 rtnl_lock();
2356 mutex_lock(&ipvs->sync_mutex);
2357 ret = start_sync_thread(ipvs, &cfg, dm->state); 2357 ret = start_sync_thread(ipvs, &cfg, dm->state);
2358 mutex_unlock(&ipvs->sync_mutex);
2359 rtnl_unlock();
2360 } else { 2358 } else {
2361 mutex_lock(&ipvs->sync_mutex); 2359 mutex_lock(&ipvs->sync_mutex);
2362 ret = stop_sync_thread(ipvs, dm->state); 2360 ret = stop_sync_thread(ipvs, dm->state);
@@ -2392,12 +2390,19 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2392 } 2390 }
2393 } 2391 }
2394 2392
2393 if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) &&
2394 strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) ==
2395 IP_VS_SCHEDNAME_MAXLEN) {
2396 ret = -EINVAL;
2397 goto out_unlock;
2398 }
2399
2395 /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */ 2400 /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
2396 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP && 2401 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
2397 usvc.protocol != IPPROTO_SCTP) { 2402 usvc.protocol != IPPROTO_SCTP) {
2398 pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n", 2403 pr_err("set_ctl: invalid protocol: %d %pI4:%d\n",
2399 usvc.protocol, &usvc.addr.ip, 2404 usvc.protocol, &usvc.addr.ip,
2400 ntohs(usvc.port), usvc.sched_name); 2405 ntohs(usvc.port));
2401 ret = -EFAULT; 2406 ret = -EFAULT;
2402 goto out_unlock; 2407 goto out_unlock;
2403 } 2408 }
@@ -2826,7 +2831,7 @@ static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
2826static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = { 2831static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
2827 [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 }, 2832 [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
2828 [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING, 2833 [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
2829 .len = IP_VS_IFNAME_MAXLEN }, 2834 .len = IP_VS_IFNAME_MAXLEN - 1 },
2830 [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 }, 2835 [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
2831 [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 }, 2836 [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 },
2832 [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 }, 2837 [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 },
@@ -2844,7 +2849,7 @@ static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
2844 [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 }, 2849 [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
2845 [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 }, 2850 [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
2846 [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING, 2851 [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
2847 .len = IP_VS_SCHEDNAME_MAXLEN }, 2852 .len = IP_VS_SCHEDNAME_MAXLEN - 1 },
2848 [IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING, 2853 [IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING,
2849 .len = IP_VS_PENAME_MAXLEN }, 2854 .len = IP_VS_PENAME_MAXLEN },
2850 [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY, 2855 [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
@@ -3435,12 +3440,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
3435 if (ipvs->mixed_address_family_dests > 0) 3440 if (ipvs->mixed_address_family_dests > 0)
3436 return -EINVAL; 3441 return -EINVAL;
3437 3442
3438 rtnl_lock();
3439 mutex_lock(&ipvs->sync_mutex);
3440 ret = start_sync_thread(ipvs, &c, 3443 ret = start_sync_thread(ipvs, &c,
3441 nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); 3444 nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
3442 mutex_unlock(&ipvs->sync_mutex);
3443 rtnl_unlock();
3444 return ret; 3445 return ret;
3445} 3446}
3446 3447
@@ -3951,7 +3952,6 @@ static struct notifier_block ip_vs_dst_notifier = {
3951 3952
3952int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs) 3953int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
3953{ 3954{
3954 struct net *net = ipvs->net;
3955 int i, idx; 3955 int i, idx;
3956 3956
3957 /* Initialize rs_table */ 3957 /* Initialize rs_table */
@@ -3978,9 +3978,9 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
3978 3978
3979 spin_lock_init(&ipvs->tot_stats.lock); 3979 spin_lock_init(&ipvs->tot_stats.lock);
3980 3980
3981 proc_create("ip_vs", 0, net->proc_net, &ip_vs_info_fops); 3981 proc_create("ip_vs", 0, ipvs->net->proc_net, &ip_vs_info_fops);
3982 proc_create("ip_vs_stats", 0, net->proc_net, &ip_vs_stats_fops); 3982 proc_create("ip_vs_stats", 0, ipvs->net->proc_net, &ip_vs_stats_fops);
3983 proc_create("ip_vs_stats_percpu", 0, net->proc_net, 3983 proc_create("ip_vs_stats_percpu", 0, ipvs->net->proc_net,
3984 &ip_vs_stats_percpu_fops); 3984 &ip_vs_stats_percpu_fops);
3985 3985
3986 if (ip_vs_control_net_init_sysctl(ipvs)) 3986 if (ip_vs_control_net_init_sysctl(ipvs))
@@ -3995,13 +3995,11 @@ err:
3995 3995
3996void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs) 3996void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs)
3997{ 3997{
3998 struct net *net = ipvs->net;
3999
4000 ip_vs_trash_cleanup(ipvs); 3998 ip_vs_trash_cleanup(ipvs);
4001 ip_vs_control_net_cleanup_sysctl(ipvs); 3999 ip_vs_control_net_cleanup_sysctl(ipvs);
4002 remove_proc_entry("ip_vs_stats_percpu", net->proc_net); 4000 remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
4003 remove_proc_entry("ip_vs_stats", net->proc_net); 4001 remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
4004 remove_proc_entry("ip_vs", net->proc_net); 4002 remove_proc_entry("ip_vs", ipvs->net->proc_net);
4005 free_percpu(ipvs->tot_stats.cpustats); 4003 free_percpu(ipvs->tot_stats.cpustats);
4006} 4004}
4007 4005
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 1b07578bedf3..cec7234b7a1d 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -48,6 +48,7 @@
48#include <linux/kthread.h> 48#include <linux/kthread.h>
49#include <linux/wait.h> 49#include <linux/wait.h>
50#include <linux/kernel.h> 50#include <linux/kernel.h>
51#include <linux/sched.h>
51 52
52#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */ 53#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
53 54
@@ -1356,15 +1357,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
1356/* 1357/*
1357 * Specifiy default interface for outgoing multicasts 1358 * Specifiy default interface for outgoing multicasts
1358 */ 1359 */
1359static int set_mcast_if(struct sock *sk, char *ifname) 1360static int set_mcast_if(struct sock *sk, struct net_device *dev)
1360{ 1361{
1361 struct net_device *dev;
1362 struct inet_sock *inet = inet_sk(sk); 1362 struct inet_sock *inet = inet_sk(sk);
1363 struct net *net = sock_net(sk);
1364
1365 dev = __dev_get_by_name(net, ifname);
1366 if (!dev)
1367 return -ENODEV;
1368 1363
1369 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) 1364 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
1370 return -EINVAL; 1365 return -EINVAL;
@@ -1392,19 +1387,14 @@ static int set_mcast_if(struct sock *sk, char *ifname)
1392 * in the in_addr structure passed in as a parameter. 1387 * in the in_addr structure passed in as a parameter.
1393 */ 1388 */
1394static int 1389static int
1395join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) 1390join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
1396{ 1391{
1397 struct net *net = sock_net(sk);
1398 struct ip_mreqn mreq; 1392 struct ip_mreqn mreq;
1399 struct net_device *dev;
1400 int ret; 1393 int ret;
1401 1394
1402 memset(&mreq, 0, sizeof(mreq)); 1395 memset(&mreq, 0, sizeof(mreq));
1403 memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); 1396 memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
1404 1397
1405 dev = __dev_get_by_name(net, ifname);
1406 if (!dev)
1407 return -ENODEV;
1408 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) 1398 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
1409 return -EINVAL; 1399 return -EINVAL;
1410 1400
@@ -1419,15 +1409,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
1419 1409
1420#ifdef CONFIG_IP_VS_IPV6 1410#ifdef CONFIG_IP_VS_IPV6
1421static int join_mcast_group6(struct sock *sk, struct in6_addr *addr, 1411static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
1422 char *ifname) 1412 struct net_device *dev)
1423{ 1413{
1424 struct net *net = sock_net(sk);
1425 struct net_device *dev;
1426 int ret; 1414 int ret;
1427 1415
1428 dev = __dev_get_by_name(net, ifname);
1429 if (!dev)
1430 return -ENODEV;
1431 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) 1416 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
1432 return -EINVAL; 1417 return -EINVAL;
1433 1418
@@ -1439,24 +1424,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
1439} 1424}
1440#endif 1425#endif
1441 1426
1442static int bind_mcastif_addr(struct socket *sock, char *ifname) 1427static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
1443{ 1428{
1444 struct net *net = sock_net(sock->sk);
1445 struct net_device *dev;
1446 __be32 addr; 1429 __be32 addr;
1447 struct sockaddr_in sin; 1430 struct sockaddr_in sin;
1448 1431
1449 dev = __dev_get_by_name(net, ifname);
1450 if (!dev)
1451 return -ENODEV;
1452
1453 addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); 1432 addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
1454 if (!addr) 1433 if (!addr)
1455 pr_err("You probably need to specify IP address on " 1434 pr_err("You probably need to specify IP address on "
1456 "multicast interface.\n"); 1435 "multicast interface.\n");
1457 1436
1458 IP_VS_DBG(7, "binding socket with (%s) %pI4\n", 1437 IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
1459 ifname, &addr); 1438 dev->name, &addr);
1460 1439
1461 /* Now bind the socket with the address of multicast interface */ 1440 /* Now bind the socket with the address of multicast interface */
1462 sin.sin_family = AF_INET; 1441 sin.sin_family = AF_INET;
@@ -1489,7 +1468,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
1489/* 1468/*
1490 * Set up sending multicast socket over UDP 1469 * Set up sending multicast socket over UDP
1491 */ 1470 */
1492static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id) 1471static int make_send_sock(struct netns_ipvs *ipvs, int id,
1472 struct net_device *dev, struct socket **sock_ret)
1493{ 1473{
1494 /* multicast addr */ 1474 /* multicast addr */
1495 union ipvs_sockaddr mcast_addr; 1475 union ipvs_sockaddr mcast_addr;
@@ -1501,9 +1481,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
1501 IPPROTO_UDP, &sock); 1481 IPPROTO_UDP, &sock);
1502 if (result < 0) { 1482 if (result < 0) {
1503 pr_err("Error during creation of socket; terminating\n"); 1483 pr_err("Error during creation of socket; terminating\n");
1504 return ERR_PTR(result); 1484 goto error;
1505 } 1485 }
1506 result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn); 1486 *sock_ret = sock;
1487 result = set_mcast_if(sock->sk, dev);
1507 if (result < 0) { 1488 if (result < 0) {
1508 pr_err("Error setting outbound mcast interface\n"); 1489 pr_err("Error setting outbound mcast interface\n");
1509 goto error; 1490 goto error;
@@ -1518,7 +1499,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
1518 set_sock_size(sock->sk, 1, result); 1499 set_sock_size(sock->sk, 1, result);
1519 1500
1520 if (AF_INET == ipvs->mcfg.mcast_af) 1501 if (AF_INET == ipvs->mcfg.mcast_af)
1521 result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn); 1502 result = bind_mcastif_addr(sock, dev);
1522 else 1503 else
1523 result = 0; 1504 result = 0;
1524 if (result < 0) { 1505 if (result < 0) {
@@ -1534,19 +1515,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
1534 goto error; 1515 goto error;
1535 } 1516 }
1536 1517
1537 return sock; 1518 return 0;
1538 1519
1539error: 1520error:
1540 sock_release(sock); 1521 return result;
1541 return ERR_PTR(result);
1542} 1522}
1543 1523
1544 1524
1545/* 1525/*
1546 * Set up receiving multicast socket over UDP 1526 * Set up receiving multicast socket over UDP
1547 */ 1527 */
1548static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, 1528static int make_receive_sock(struct netns_ipvs *ipvs, int id,
1549 int ifindex) 1529 struct net_device *dev, struct socket **sock_ret)
1550{ 1530{
1551 /* multicast addr */ 1531 /* multicast addr */
1552 union ipvs_sockaddr mcast_addr; 1532 union ipvs_sockaddr mcast_addr;
@@ -1558,8 +1538,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
1558 IPPROTO_UDP, &sock); 1538 IPPROTO_UDP, &sock);
1559 if (result < 0) { 1539 if (result < 0) {
1560 pr_err("Error during creation of socket; terminating\n"); 1540 pr_err("Error during creation of socket; terminating\n");
1561 return ERR_PTR(result); 1541 goto error;
1562 } 1542 }
1543 *sock_ret = sock;
1563 /* it is equivalent to the REUSEADDR option in user-space */ 1544 /* it is equivalent to the REUSEADDR option in user-space */
1564 sock->sk->sk_reuse = SK_CAN_REUSE; 1545 sock->sk->sk_reuse = SK_CAN_REUSE;
1565 result = sysctl_sync_sock_size(ipvs); 1546 result = sysctl_sync_sock_size(ipvs);
@@ -1567,7 +1548,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
1567 set_sock_size(sock->sk, 0, result); 1548 set_sock_size(sock->sk, 0, result);
1568 1549
1569 get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id); 1550 get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
1570 sock->sk->sk_bound_dev_if = ifindex; 1551 sock->sk->sk_bound_dev_if = dev->ifindex;
1571 result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen); 1552 result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
1572 if (result < 0) { 1553 if (result < 0) {
1573 pr_err("Error binding to the multicast addr\n"); 1554 pr_err("Error binding to the multicast addr\n");
@@ -1578,21 +1559,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
1578#ifdef CONFIG_IP_VS_IPV6 1559#ifdef CONFIG_IP_VS_IPV6
1579 if (ipvs->bcfg.mcast_af == AF_INET6) 1560 if (ipvs->bcfg.mcast_af == AF_INET6)
1580 result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr, 1561 result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
1581 ipvs->bcfg.mcast_ifn); 1562 dev);
1582 else 1563 else
1583#endif 1564#endif
1584 result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr, 1565 result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
1585 ipvs->bcfg.mcast_ifn); 1566 dev);
1586 if (result < 0) { 1567 if (result < 0) {
1587 pr_err("Error joining to the multicast group\n"); 1568 pr_err("Error joining to the multicast group\n");
1588 goto error; 1569 goto error;
1589 } 1570 }
1590 1571
1591 return sock; 1572 return 0;
1592 1573
1593error: 1574error:
1594 sock_release(sock); 1575 return result;
1595 return ERR_PTR(result);
1596} 1576}
1597 1577
1598 1578
@@ -1777,13 +1757,12 @@ static int sync_thread_backup(void *data)
1777int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, 1757int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1778 int state) 1758 int state)
1779{ 1759{
1780 struct ip_vs_sync_thread_data *tinfo; 1760 struct ip_vs_sync_thread_data *tinfo = NULL;
1781 struct task_struct **array = NULL, *task; 1761 struct task_struct **array = NULL, *task;
1782 struct socket *sock;
1783 struct net_device *dev; 1762 struct net_device *dev;
1784 char *name; 1763 char *name;
1785 int (*threadfn)(void *data); 1764 int (*threadfn)(void *data);
1786 int id, count, hlen; 1765 int id = 0, count, hlen;
1787 int result = -ENOMEM; 1766 int result = -ENOMEM;
1788 u16 mtu, min_mtu; 1767 u16 mtu, min_mtu;
1789 1768
@@ -1791,6 +1770,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1791 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", 1770 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
1792 sizeof(struct ip_vs_sync_conn_v0)); 1771 sizeof(struct ip_vs_sync_conn_v0));
1793 1772
1773 /* Do not hold one mutex and then to block on another */
1774 for (;;) {
1775 rtnl_lock();
1776 if (mutex_trylock(&ipvs->sync_mutex))
1777 break;
1778 rtnl_unlock();
1779 mutex_lock(&ipvs->sync_mutex);
1780 if (rtnl_trylock())
1781 break;
1782 mutex_unlock(&ipvs->sync_mutex);
1783 }
1784
1794 if (!ipvs->sync_state) { 1785 if (!ipvs->sync_state) {
1795 count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); 1786 count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
1796 ipvs->threads_mask = count - 1; 1787 ipvs->threads_mask = count - 1;
@@ -1809,7 +1800,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1809 dev = __dev_get_by_name(ipvs->net, c->mcast_ifn); 1800 dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
1810 if (!dev) { 1801 if (!dev) {
1811 pr_err("Unknown mcast interface: %s\n", c->mcast_ifn); 1802 pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
1812 return -ENODEV; 1803 result = -ENODEV;
1804 goto out_early;
1813 } 1805 }
1814 hlen = (AF_INET6 == c->mcast_af) ? 1806 hlen = (AF_INET6 == c->mcast_af) ?
1815 sizeof(struct ipv6hdr) + sizeof(struct udphdr) : 1807 sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
@@ -1826,26 +1818,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1826 c->sync_maxlen = mtu - hlen; 1818 c->sync_maxlen = mtu - hlen;
1827 1819
1828 if (state == IP_VS_STATE_MASTER) { 1820 if (state == IP_VS_STATE_MASTER) {
1821 result = -EEXIST;
1829 if (ipvs->ms) 1822 if (ipvs->ms)
1830 return -EEXIST; 1823 goto out_early;
1831 1824
1832 ipvs->mcfg = *c; 1825 ipvs->mcfg = *c;
1833 name = "ipvs-m:%d:%d"; 1826 name = "ipvs-m:%d:%d";
1834 threadfn = sync_thread_master; 1827 threadfn = sync_thread_master;
1835 } else if (state == IP_VS_STATE_BACKUP) { 1828 } else if (state == IP_VS_STATE_BACKUP) {
1829 result = -EEXIST;
1836 if (ipvs->backup_threads) 1830 if (ipvs->backup_threads)
1837 return -EEXIST; 1831 goto out_early;
1838 1832
1839 ipvs->bcfg = *c; 1833 ipvs->bcfg = *c;
1840 name = "ipvs-b:%d:%d"; 1834 name = "ipvs-b:%d:%d";
1841 threadfn = sync_thread_backup; 1835 threadfn = sync_thread_backup;
1842 } else { 1836 } else {
1843 return -EINVAL; 1837 result = -EINVAL;
1838 goto out_early;
1844 } 1839 }
1845 1840
1846 if (state == IP_VS_STATE_MASTER) { 1841 if (state == IP_VS_STATE_MASTER) {
1847 struct ipvs_master_sync_state *ms; 1842 struct ipvs_master_sync_state *ms;
1848 1843
1844 result = -ENOMEM;
1849 ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL); 1845 ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL);
1850 if (!ipvs->ms) 1846 if (!ipvs->ms)
1851 goto out; 1847 goto out;
@@ -1861,39 +1857,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1861 } else { 1857 } else {
1862 array = kzalloc(count * sizeof(struct task_struct *), 1858 array = kzalloc(count * sizeof(struct task_struct *),
1863 GFP_KERNEL); 1859 GFP_KERNEL);
1860 result = -ENOMEM;
1864 if (!array) 1861 if (!array)
1865 goto out; 1862 goto out;
1866 } 1863 }
1867 1864
1868 tinfo = NULL;
1869 for (id = 0; id < count; id++) { 1865 for (id = 0; id < count; id++) {
1870 if (state == IP_VS_STATE_MASTER) 1866 result = -ENOMEM;
1871 sock = make_send_sock(ipvs, id);
1872 else
1873 sock = make_receive_sock(ipvs, id, dev->ifindex);
1874 if (IS_ERR(sock)) {
1875 result = PTR_ERR(sock);
1876 goto outtinfo;
1877 }
1878 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); 1867 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
1879 if (!tinfo) 1868 if (!tinfo)
1880 goto outsocket; 1869 goto out;
1881 tinfo->ipvs = ipvs; 1870 tinfo->ipvs = ipvs;
1882 tinfo->sock = sock; 1871 tinfo->sock = NULL;
1883 if (state == IP_VS_STATE_BACKUP) { 1872 if (state == IP_VS_STATE_BACKUP) {
1884 tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen, 1873 tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
1885 GFP_KERNEL); 1874 GFP_KERNEL);
1886 if (!tinfo->buf) 1875 if (!tinfo->buf)
1887 goto outtinfo; 1876 goto out;
1888 } else { 1877 } else {
1889 tinfo->buf = NULL; 1878 tinfo->buf = NULL;
1890 } 1879 }
1891 tinfo->id = id; 1880 tinfo->id = id;
1881 if (state == IP_VS_STATE_MASTER)
1882 result = make_send_sock(ipvs, id, dev, &tinfo->sock);
1883 else
1884 result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
1885 if (result < 0)
1886 goto out;
1892 1887
1893 task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); 1888 task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
1894 if (IS_ERR(task)) { 1889 if (IS_ERR(task)) {
1895 result = PTR_ERR(task); 1890 result = PTR_ERR(task);
1896 goto outtinfo; 1891 goto out;
1897 } 1892 }
1898 tinfo = NULL; 1893 tinfo = NULL;
1899 if (state == IP_VS_STATE_MASTER) 1894 if (state == IP_VS_STATE_MASTER)
@@ -1910,20 +1905,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1910 ipvs->sync_state |= state; 1905 ipvs->sync_state |= state;
1911 spin_unlock_bh(&ipvs->sync_buff_lock); 1906 spin_unlock_bh(&ipvs->sync_buff_lock);
1912 1907
1908 mutex_unlock(&ipvs->sync_mutex);
1909 rtnl_unlock();
1910
1913 /* increase the module use count */ 1911 /* increase the module use count */
1914 ip_vs_use_count_inc(); 1912 ip_vs_use_count_inc();
1915 1913
1916 return 0; 1914 return 0;
1917 1915
1918outsocket: 1916out:
1919 sock_release(sock); 1917 /* We do not need RTNL lock anymore, release it here so that
1920 1918 * sock_release below and in the kthreads can use rtnl_lock
1921outtinfo: 1919 * to leave the mcast group.
1922 if (tinfo) { 1920 */
1923 sock_release(tinfo->sock); 1921 rtnl_unlock();
1924 kfree(tinfo->buf);
1925 kfree(tinfo);
1926 }
1927 count = id; 1922 count = id;
1928 while (count-- > 0) { 1923 while (count-- > 0) {
1929 if (state == IP_VS_STATE_MASTER) 1924 if (state == IP_VS_STATE_MASTER)
@@ -1931,13 +1926,23 @@ outtinfo:
1931 else 1926 else
1932 kthread_stop(array[count]); 1927 kthread_stop(array[count]);
1933 } 1928 }
1934 kfree(array);
1935
1936out:
1937 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { 1929 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
1938 kfree(ipvs->ms); 1930 kfree(ipvs->ms);
1939 ipvs->ms = NULL; 1931 ipvs->ms = NULL;
1940 } 1932 }
1933 mutex_unlock(&ipvs->sync_mutex);
1934 if (tinfo) {
1935 if (tinfo->sock)
1936 sock_release(tinfo->sock);
1937 kfree(tinfo->buf);
1938 kfree(tinfo);
1939 }
1940 kfree(array);
1941 return result;
1942
1943out_early:
1944 mutex_unlock(&ipvs->sync_mutex);
1945 rtnl_unlock();
1941 return result; 1946 return result;
1942} 1947}
1943 1948
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 86a3c6f0c871..5f747089024f 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -719,6 +719,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
719 * least once for the stats anyway. 719 * least once for the stats anyway.
720 */ 720 */
721 rcu_read_lock_bh(); 721 rcu_read_lock_bh();
722 begin:
722 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { 723 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
723 ct = nf_ct_tuplehash_to_ctrack(h); 724 ct = nf_ct_tuplehash_to_ctrack(h);
724 if (ct != ignored_conntrack && 725 if (ct != ignored_conntrack &&
@@ -730,6 +731,12 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
730 } 731 }
731 NF_CT_STAT_INC(net, searched); 732 NF_CT_STAT_INC(net, searched);
732 } 733 }
734
735 if (get_nulls_value(n) != hash) {
736 NF_CT_STAT_INC(net, search_restart);
737 goto begin;
738 }
739
733 rcu_read_unlock_bh(); 740 rcu_read_unlock_bh();
734 741
735 return 0; 742 return 0;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 7f16d19d6198..a91f8bd51d05 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -560,7 +560,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
560 helper = rcu_dereference(nfct_help(expect->master)->helper); 560 helper = rcu_dereference(nfct_help(expect->master)->helper);
561 if (helper) { 561 if (helper) {
562 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); 562 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
563 if (helper->expect_policy[expect->class].name) 563 if (helper->expect_policy[expect->class].name[0])
564 seq_printf(s, "/%s", 564 seq_printf(s, "/%s",
565 helper->expect_policy[expect->class].name); 565 helper->expect_policy[expect->class].name);
566 } 566 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 660939df7c94..c68e020427ab 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -887,8 +887,13 @@ restart:
887 } 887 }
888out: 888out:
889 local_bh_enable(); 889 local_bh_enable();
890 if (last) 890 if (last) {
891 /* nf ct hash resize happened, now clear the leftover. */
892 if ((struct nf_conn *)cb->args[1] == last)
893 cb->args[1] = 0;
894
891 nf_ct_put(last); 895 nf_ct_put(last);
896 }
892 897
893 return skb->len; 898 return skb->len;
894} 899}
@@ -999,9 +1004,8 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
999 1004
1000static int 1005static int
1001ctnetlink_parse_tuple(const struct nlattr * const cda[], 1006ctnetlink_parse_tuple(const struct nlattr * const cda[],
1002 struct nf_conntrack_tuple *tuple, 1007 struct nf_conntrack_tuple *tuple, u32 type,
1003 enum ctattr_type type, u_int8_t l3num, 1008 u_int8_t l3num, struct nf_conntrack_zone *zone)
1004 struct nf_conntrack_zone *zone)
1005{ 1009{
1006 struct nlattr *tb[CTA_TUPLE_MAX+1]; 1010 struct nlattr *tb[CTA_TUPLE_MAX+1];
1007 int err; 1011 int err;
@@ -2416,7 +2420,7 @@ static struct nfnl_ct_hook ctnetlink_glue_hook = {
2416static inline int 2420static inline int
2417ctnetlink_exp_dump_tuple(struct sk_buff *skb, 2421ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2418 const struct nf_conntrack_tuple *tuple, 2422 const struct nf_conntrack_tuple *tuple,
2419 enum ctattr_expect type) 2423 u32 type)
2420{ 2424{
2421 struct nlattr *nest_parms; 2425 struct nlattr *nest_parms;
2422 2426
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 885b4aba3695..1665c2159e4b 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1434,9 +1434,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
1434 handler = &sip_handlers[i]; 1434 handler = &sip_handlers[i];
1435 if (handler->request == NULL) 1435 if (handler->request == NULL)
1436 continue; 1436 continue;
1437 if (*datalen < handler->len || 1437 if (*datalen < handler->len + 2 ||
1438 strncasecmp(*dptr, handler->method, handler->len)) 1438 strncasecmp(*dptr, handler->method, handler->len))
1439 continue; 1439 continue;
1440 if ((*dptr)[handler->len] != ' ' ||
1441 !isalpha((*dptr)[handler->len+1]))
1442 continue;
1440 1443
1441 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, 1444 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
1442 &matchoff, &matchlen) <= 0) { 1445 &matchoff, &matchlen) <= 0) {
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 2c89f90cd7bc..f94a2e1172f0 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -422,14 +422,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
422 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); 422 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
423 mutex_unlock(&nf_log_mutex); 423 mutex_unlock(&nf_log_mutex);
424 } else { 424 } else {
425 struct ctl_table tmp = *table;
426
427 tmp.data = buf;
425 mutex_lock(&nf_log_mutex); 428 mutex_lock(&nf_log_mutex);
426 logger = nft_log_dereference(net->nf.nf_loggers[tindex]); 429 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
427 if (!logger) 430 if (!logger)
428 table->data = "NONE"; 431 strlcpy(buf, "NONE", sizeof(buf));
429 else 432 else
430 table->data = logger->name; 433 strlcpy(buf, logger->name, sizeof(buf));
431 r = proc_dostring(table, write, buffer, lenp, ppos);
432 mutex_unlock(&nf_log_mutex); 434 mutex_unlock(&nf_log_mutex);
435 r = proc_dostring(&tmp, write, buffer, lenp, ppos);
433 } 436 }
434 437
435 return r; 438 return r;
diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
index fbce552a796e..7d7466dbf663 100644
--- a/net/netfilter/nf_nat_proto_common.c
+++ b/net/netfilter/nf_nat_proto_common.c
@@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
41 const struct nf_conn *ct, 41 const struct nf_conn *ct,
42 u16 *rover) 42 u16 *rover)
43{ 43{
44 unsigned int range_size, min, i; 44 unsigned int range_size, min, max, i;
45 __be16 *portptr; 45 __be16 *portptr;
46 u_int16_t off; 46 u_int16_t off;
47 47
@@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
71 } 71 }
72 } else { 72 } else {
73 min = ntohs(range->min_proto.all); 73 min = ntohs(range->min_proto.all);
74 range_size = ntohs(range->max_proto.all) - min + 1; 74 max = ntohs(range->max_proto.all);
75 if (unlikely(max < min))
76 swap(max, min);
77 range_size = max - min + 1;
75 } 78 }
76 79
77 if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) { 80 if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 5baa8e24e6ac..b19ad20a705c 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -26,23 +26,21 @@
26 * Once the queue is registered it must reinject all packets it 26 * Once the queue is registered it must reinject all packets it
27 * receives, no matter what. 27 * receives, no matter what.
28 */ 28 */
29static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
30 29
31/* return EBUSY when somebody else is registered, return EEXIST if the 30/* return EBUSY when somebody else is registered, return EEXIST if the
32 * same handler is registered, return 0 in case of success. */ 31 * same handler is registered, return 0 in case of success. */
33void nf_register_queue_handler(const struct nf_queue_handler *qh) 32void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
34{ 33{
35 /* should never happen, we only have one queueing backend in kernel */ 34 /* should never happen, we only have one queueing backend in kernel */
36 WARN_ON(rcu_access_pointer(queue_handler)); 35 WARN_ON(rcu_access_pointer(net->nf.queue_handler));
37 rcu_assign_pointer(queue_handler, qh); 36 rcu_assign_pointer(net->nf.queue_handler, qh);
38} 37}
39EXPORT_SYMBOL(nf_register_queue_handler); 38EXPORT_SYMBOL(nf_register_queue_handler);
40 39
41/* The caller must flush their queue before this */ 40/* The caller must flush their queue before this */
42void nf_unregister_queue_handler(void) 41void nf_unregister_queue_handler(struct net *net)
43{ 42{
44 RCU_INIT_POINTER(queue_handler, NULL); 43 RCU_INIT_POINTER(net->nf.queue_handler, NULL);
45 synchronize_rcu();
46} 44}
47EXPORT_SYMBOL(nf_unregister_queue_handler); 45EXPORT_SYMBOL(nf_unregister_queue_handler);
48 46
@@ -103,7 +101,7 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
103 const struct nf_queue_handler *qh; 101 const struct nf_queue_handler *qh;
104 102
105 rcu_read_lock(); 103 rcu_read_lock();
106 qh = rcu_dereference(queue_handler); 104 qh = rcu_dereference(net->nf.queue_handler);
107 if (qh) 105 if (qh)
108 qh->nf_hook_drop(net, ops); 106 qh->nf_hook_drop(net, ops);
109 rcu_read_unlock(); 107 rcu_read_unlock();
@@ -122,9 +120,10 @@ int nf_queue(struct sk_buff *skb,
122 struct nf_queue_entry *entry = NULL; 120 struct nf_queue_entry *entry = NULL;
123 const struct nf_afinfo *afinfo; 121 const struct nf_afinfo *afinfo;
124 const struct nf_queue_handler *qh; 122 const struct nf_queue_handler *qh;
123 struct net *net = state->net;
125 124
126 /* QUEUE == DROP if no one is waiting, to be safe. */ 125 /* QUEUE == DROP if no one is waiting, to be safe. */
127 qh = rcu_dereference(queue_handler); 126 qh = rcu_dereference(net->nf.queue_handler);
128 if (!qh) { 127 if (!qh) {
129 status = -ESRCH; 128 status = -ESRCH;
130 goto err; 129 goto err;
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index f3695a497408..99bc2f87a974 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -167,7 +167,8 @@ next_rule:
167 167
168 switch (regs.verdict.code) { 168 switch (regs.verdict.code) {
169 case NFT_JUMP: 169 case NFT_JUMP:
170 BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE); 170 if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE))
171 return NF_DROP;
171 jumpstack[stackptr].chain = chain; 172 jumpstack[stackptr].chain = chain;
172 jumpstack[stackptr].rule = rule; 173 jumpstack[stackptr].rule = rule;
173 jumpstack[stackptr].rulenum = rulenum; 174 jumpstack[stackptr].rulenum = rulenum;
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 8d34a488efc0..ac143ae4f7b6 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -17,6 +17,7 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/capability.h>
20#include <net/netlink.h> 21#include <net/netlink.h>
21#include <net/sock.h> 22#include <net/sock.h>
22 23
@@ -392,6 +393,9 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
392 struct nfnl_cthelper *nlcth; 393 struct nfnl_cthelper *nlcth;
393 int ret = 0; 394 int ret = 0;
394 395
396 if (!capable(CAP_NET_ADMIN))
397 return -EPERM;
398
395 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) 399 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
396 return -EINVAL; 400 return -EINVAL;
397 401
@@ -595,6 +599,9 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
595 struct nfnl_cthelper *nlcth; 599 struct nfnl_cthelper *nlcth;
596 bool tuple_set = false; 600 bool tuple_set = false;
597 601
602 if (!capable(CAP_NET_ADMIN))
603 return -EPERM;
604
598 if (nlh->nlmsg_flags & NLM_F_DUMP) { 605 if (nlh->nlmsg_flags & NLM_F_DUMP) {
599 struct netlink_dump_control c = { 606 struct netlink_dump_control c = {
600 .dump = nfnl_cthelper_dump_table, 607 .dump = nfnl_cthelper_dump_table,
@@ -661,6 +668,9 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
661 struct nfnl_cthelper *nlcth, *n; 668 struct nfnl_cthelper *nlcth, *n;
662 int j = 0, ret; 669 int j = 0, ret;
663 670
671 if (!capable(CAP_NET_ADMIN))
672 return -EPERM;
673
664 if (tb[NFCTH_NAME]) 674 if (tb[NFCTH_NAME])
665 helper_name = nla_data(tb[NFCTH_NAME]); 675 helper_name = nla_data(tb[NFCTH_NAME]);
666 676
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index f6837f9b6d6c..54cde78c2718 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -501,7 +501,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
501 501
502 if (entskb->tstamp.tv64) { 502 if (entskb->tstamp.tv64) {
503 struct nfqnl_msg_packet_timestamp ts; 503 struct nfqnl_msg_packet_timestamp ts;
504 struct timespec64 kts = ktime_to_timespec64(skb->tstamp); 504 struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
505 505
506 ts.sec = cpu_to_be64(kts.tv_sec); 506 ts.sec = cpu_to_be64(kts.tv_sec);
507 ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); 507 ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
@@ -1053,10 +1053,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
1053 struct net *net = sock_net(ctnl); 1053 struct net *net = sock_net(ctnl);
1054 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1054 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1055 1055
1056 queue = instance_lookup(q, queue_num); 1056 queue = verdict_instance_lookup(q, queue_num,
1057 if (!queue) 1057 NETLINK_CB(skb).portid);
1058 queue = verdict_instance_lookup(q, queue_num,
1059 NETLINK_CB(skb).portid);
1060 if (IS_ERR(queue)) 1058 if (IS_ERR(queue))
1061 return PTR_ERR(queue); 1059 return PTR_ERR(queue);
1062 1060
@@ -1108,6 +1106,9 @@ nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
1108static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { 1106static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
1109 [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, 1107 [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
1110 [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, 1108 [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
1109 [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
1110 [NFQA_CFG_MASK] = { .type = NLA_U32 },
1111 [NFQA_CFG_FLAGS] = { .type = NLA_U32 },
1111}; 1112};
1112 1113
1113static const struct nf_queue_handler nfqh = { 1114static const struct nf_queue_handler nfqh = {
@@ -1384,21 +1385,29 @@ static int __net_init nfnl_queue_net_init(struct net *net)
1384 net->nf.proc_netfilter, &nfqnl_file_ops)) 1385 net->nf.proc_netfilter, &nfqnl_file_ops))
1385 return -ENOMEM; 1386 return -ENOMEM;
1386#endif 1387#endif
1388 nf_register_queue_handler(net, &nfqh);
1387 return 0; 1389 return 0;
1388} 1390}
1389 1391
1390static void __net_exit nfnl_queue_net_exit(struct net *net) 1392static void __net_exit nfnl_queue_net_exit(struct net *net)
1391{ 1393{
1394 nf_unregister_queue_handler(net);
1392#ifdef CONFIG_PROC_FS 1395#ifdef CONFIG_PROC_FS
1393 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); 1396 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1394#endif 1397#endif
1395} 1398}
1396 1399
1400static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
1401{
1402 synchronize_rcu();
1403}
1404
1397static struct pernet_operations nfnl_queue_net_ops = { 1405static struct pernet_operations nfnl_queue_net_ops = {
1398 .init = nfnl_queue_net_init, 1406 .init = nfnl_queue_net_init,
1399 .exit = nfnl_queue_net_exit, 1407 .exit = nfnl_queue_net_exit,
1400 .id = &nfnl_queue_net_id, 1408 .exit_batch = nfnl_queue_net_exit_batch,
1401 .size = sizeof(struct nfnl_queue_net), 1409 .id = &nfnl_queue_net_id,
1410 .size = sizeof(struct nfnl_queue_net),
1402}; 1411};
1403 1412
1404static int __init nfnetlink_queue_init(void) 1413static int __init nfnetlink_queue_init(void)
@@ -1419,7 +1428,6 @@ static int __init nfnetlink_queue_init(void)
1419 } 1428 }
1420 1429
1421 register_netdevice_notifier(&nfqnl_dev_notifier); 1430 register_netdevice_notifier(&nfqnl_dev_notifier);
1422 nf_register_queue_handler(&nfqh);
1423 return status; 1431 return status;
1424 1432
1425cleanup_netlink_notifier: 1433cleanup_netlink_notifier:
@@ -1431,7 +1439,6 @@ out:
1431 1439
1432static void __exit nfnetlink_queue_fini(void) 1440static void __exit nfnetlink_queue_fini(void)
1433{ 1441{
1434 nf_unregister_queue_handler();
1435 unregister_netdevice_notifier(&nfqnl_dev_notifier); 1442 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1436 nfnetlink_subsys_unregister(&nfqnl_subsys); 1443 nfnetlink_subsys_unregister(&nfqnl_subsys);
1437 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1444 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 2fc6ca9d1286..1f3c305df45d 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -38,7 +38,7 @@ MODULE_LICENSE("GPL");
38MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 38MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
39MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); 39MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
40 40
41#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) 41#define XT_PCPU_BLOCK_SIZE 4096
42 42
43struct compat_delta { 43struct compat_delta {
44 unsigned int offset; /* offset in kernel */ 44 unsigned int offset; /* offset in kernel */
@@ -208,6 +208,9 @@ xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
208{ 208{
209 struct xt_match *match; 209 struct xt_match *match;
210 210
211 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
212 return ERR_PTR(-EINVAL);
213
211 match = xt_find_match(nfproto, name, revision); 214 match = xt_find_match(nfproto, name, revision);
212 if (IS_ERR(match)) { 215 if (IS_ERR(match)) {
213 request_module("%st_%s", xt_prefix[nfproto], name); 216 request_module("%st_%s", xt_prefix[nfproto], name);
@@ -250,6 +253,9 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
250{ 253{
251 struct xt_target *target; 254 struct xt_target *target;
252 255
256 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
257 return ERR_PTR(-EINVAL);
258
253 target = xt_find_target(af, name, revision); 259 target = xt_find_target(af, name, revision);
254 if (IS_ERR(target)) { 260 if (IS_ERR(target)) {
255 request_module("%st_%s", xt_prefix[af], name); 261 request_module("%st_%s", xt_prefix[af], name);
@@ -360,6 +366,36 @@ textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
360 return buf; 366 return buf;
361} 367}
362 368
369/**
370 * xt_check_proc_name - check that name is suitable for /proc file creation
371 *
372 * @name: file name candidate
373 * @size: length of buffer
374 *
375 * some x_tables modules wish to create a file in /proc.
376 * This function makes sure that the name is suitable for this
377 * purpose, it checks that name is NUL terminated and isn't a 'special'
378 * name, like "..".
379 *
380 * returns negative number on error or 0 if name is useable.
381 */
382int xt_check_proc_name(const char *name, unsigned int size)
383{
384 if (name[0] == '\0')
385 return -EINVAL;
386
387 if (strnlen(name, size) == size)
388 return -ENAMETOOLONG;
389
390 if (strcmp(name, ".") == 0 ||
391 strcmp(name, "..") == 0 ||
392 strchr(name, '/'))
393 return -EINVAL;
394
395 return 0;
396}
397EXPORT_SYMBOL(xt_check_proc_name);
398
363int xt_check_match(struct xt_mtchk_param *par, 399int xt_check_match(struct xt_mtchk_param *par,
364 unsigned int size, u_int8_t proto, bool inv_proto) 400 unsigned int size, u_int8_t proto, bool inv_proto)
365{ 401{
@@ -701,6 +737,56 @@ int xt_check_entry_offsets(const void *base,
701} 737}
702EXPORT_SYMBOL(xt_check_entry_offsets); 738EXPORT_SYMBOL(xt_check_entry_offsets);
703 739
740/**
741 * xt_alloc_entry_offsets - allocate array to store rule head offsets
742 *
743 * @size: number of entries
744 *
745 * Return: NULL or kmalloc'd or vmalloc'd array
746 */
747unsigned int *xt_alloc_entry_offsets(unsigned int size)
748{
749 unsigned int *off;
750
751 off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
752
753 if (off)
754 return off;
755
756 if (size < (SIZE_MAX / sizeof(unsigned int)))
757 off = vmalloc(size * sizeof(unsigned int));
758
759 return off;
760}
761EXPORT_SYMBOL(xt_alloc_entry_offsets);
762
763/**
764 * xt_find_jump_offset - check if target is a valid jump offset
765 *
766 * @offsets: array containing all valid rule start offsets of a rule blob
767 * @target: the jump target to search for
768 * @size: entries in @offset
769 */
770bool xt_find_jump_offset(const unsigned int *offsets,
771 unsigned int target, unsigned int size)
772{
773 int m, low = 0, hi = size;
774
775 while (hi > low) {
776 m = (low + hi) / 2u;
777
778 if (offsets[m] > target)
779 hi = m;
780 else if (offsets[m] < target)
781 low = m + 1;
782 else
783 return true;
784 }
785
786 return false;
787}
788EXPORT_SYMBOL(xt_find_jump_offset);
789
704int xt_check_target(struct xt_tgchk_param *par, 790int xt_check_target(struct xt_tgchk_param *par,
705 unsigned int size, u_int8_t proto, bool inv_proto) 791 unsigned int size, u_int8_t proto, bool inv_proto)
706{ 792{
@@ -904,7 +990,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
904 return NULL; 990 return NULL;
905 991
906 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ 992 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
907 if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) 993 if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
908 return NULL; 994 return NULL;
909 995
910 if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 996 if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
@@ -1538,6 +1624,59 @@ void xt_proto_fini(struct net *net, u_int8_t af)
1538} 1624}
1539EXPORT_SYMBOL_GPL(xt_proto_fini); 1625EXPORT_SYMBOL_GPL(xt_proto_fini);
1540 1626
1627/**
1628 * xt_percpu_counter_alloc - allocate x_tables rule counter
1629 *
1630 * @state: pointer to xt_percpu allocation state
1631 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1632 *
1633 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1634 * contain the address of the real (percpu) counter.
1635 *
1636 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1637 * to fetch the real percpu counter.
1638 *
1639 * To speed up allocation and improve data locality, a 4kb block is
1640 * allocated.
1641 *
1642 * xt_percpu_counter_alloc_state contains the base address of the
1643 * allocated page and the current sub-offset.
1644 *
1645 * returns false on error.
1646 */
1647bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1648 struct xt_counters *counter)
1649{
1650 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1651
1652 if (nr_cpu_ids <= 1)
1653 return true;
1654
1655 if (!state->mem) {
1656 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1657 XT_PCPU_BLOCK_SIZE);
1658 if (!state->mem)
1659 return false;
1660 }
1661 counter->pcnt = (__force unsigned long)(state->mem + state->off);
1662 state->off += sizeof(*counter);
1663 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1664 state->mem = NULL;
1665 state->off = 0;
1666 }
1667 return true;
1668}
1669EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1670
1671void xt_percpu_counter_free(struct xt_counters *counters)
1672{
1673 unsigned long pcnt = counters->pcnt;
1674
1675 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1676 free_percpu((void __percpu *)pcnt);
1677}
1678EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1679
1541static int __net_init xt_net_init(struct net *net) 1680static int __net_init xt_net_init(struct net *net)
1542{ 1681{
1543 int i; 1682 int i;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index e7ac07e53b59..febcfac7e3df 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -168,8 +168,10 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
168 goto err_put_timeout; 168 goto err_put_timeout;
169 } 169 }
170 timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC); 170 timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC);
171 if (timeout_ext == NULL) 171 if (!timeout_ext) {
172 ret = -ENOMEM; 172 ret = -ENOMEM;
173 goto err_put_timeout;
174 }
173 175
174 rcu_read_unlock(); 176 rcu_read_unlock();
175 return ret; 177 return ret;
@@ -201,6 +203,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
201 struct xt_ct_target_info_v1 *info) 203 struct xt_ct_target_info_v1 *info)
202{ 204{
203 struct nf_conntrack_zone zone; 205 struct nf_conntrack_zone zone;
206 struct nf_conn_help *help;
204 struct nf_conn *ct; 207 struct nf_conn *ct;
205 int ret = -EOPNOTSUPP; 208 int ret = -EOPNOTSUPP;
206 209
@@ -249,7 +252,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
249 if (info->timeout[0]) { 252 if (info->timeout[0]) {
250 ret = xt_ct_set_timeout(ct, par, info->timeout); 253 ret = xt_ct_set_timeout(ct, par, info->timeout);
251 if (ret < 0) 254 if (ret < 0)
252 goto err3; 255 goto err4;
253 } 256 }
254 __set_bit(IPS_CONFIRMED_BIT, &ct->status); 257 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
255 nf_conntrack_get(&ct->ct_general); 258 nf_conntrack_get(&ct->ct_general);
@@ -257,6 +260,10 @@ out:
257 info->ct = ct; 260 info->ct = ct;
258 return 0; 261 return 0;
259 262
263err4:
264 help = nfct_help(ct);
265 if (help)
266 module_put(help->helper->me);
260err3: 267err3:
261 nf_ct_tmpl_free(ct); 268 nf_ct_tmpl_free(ct);
262err2: 269err2:
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 29d2c31f406c..1718f536689f 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -147,11 +147,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
147 (unsigned long) info->timer); 147 (unsigned long) info->timer);
148 info->timer->refcnt = 1; 148 info->timer->refcnt = 1;
149 149
150 INIT_WORK(&info->timer->work, idletimer_tg_work);
151
150 mod_timer(&info->timer->timer, 152 mod_timer(&info->timer->timer,
151 msecs_to_jiffies(info->timeout * 1000) + jiffies); 153 msecs_to_jiffies(info->timeout * 1000) + jiffies);
152 154
153 INIT_WORK(&info->timer->work, idletimer_tg_work);
154
155 return 0; 155 return 0;
156 156
157out_free_attr: 157out_free_attr:
@@ -192,7 +192,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
192 pr_debug("timeout value is zero\n"); 192 pr_debug("timeout value is zero\n");
193 return -EINVAL; 193 return -EINVAL;
194 } 194 }
195 195 if (info->timeout >= INT_MAX / 1000) {
196 pr_debug("timeout value is too big\n");
197 return -EINVAL;
198 }
196 if (info->label[0] == '\0' || 199 if (info->label[0] == '\0' ||
197 strnlen(info->label, 200 strnlen(info->label,
198 MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) { 201 MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 3ba31c194cce..0858fe17e14a 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -141,10 +141,11 @@ static int led_tg_check(const struct xt_tgchk_param *par)
141 goto exit_alloc; 141 goto exit_alloc;
142 } 142 }
143 143
144 /* See if we need to set up a timer */ 144 /* Since the letinternal timer can be shared between multiple targets,
145 if (ledinfo->delay > 0) 145 * always set it up, even if the current target does not need it
146 setup_timer(&ledinternal->timer, led_timeout_callback, 146 */
147 (unsigned long)ledinternal); 147 setup_timer(&ledinternal->timer, led_timeout_callback,
148 (unsigned long)ledinternal);
148 149
149 list_add_tail(&ledinternal->list, &xt_led_triggers); 150 list_add_tail(&ledinternal->list, &xt_led_triggers);
150 151
@@ -181,8 +182,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
181 182
182 list_del(&ledinternal->list); 183 list_del(&ledinternal->list);
183 184
184 if (ledinfo->delay > 0) 185 del_timer_sync(&ledinternal->timer);
185 del_timer_sync(&ledinternal->timer);
186 186
187 led_trigger_unregister(&ledinternal->netfilter_led_trigger); 187 led_trigger_unregister(&ledinternal->netfilter_led_trigger);
188 188
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 604df6fae6fc..0be96f8475f7 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -40,23 +40,31 @@ static void xt_rateest_hash_insert(struct xt_rateest *est)
40 hlist_add_head(&est->list, &rateest_hash[h]); 40 hlist_add_head(&est->list, &rateest_hash[h]);
41} 41}
42 42
43struct xt_rateest *xt_rateest_lookup(const char *name) 43static struct xt_rateest *__xt_rateest_lookup(const char *name)
44{ 44{
45 struct xt_rateest *est; 45 struct xt_rateest *est;
46 unsigned int h; 46 unsigned int h;
47 47
48 h = xt_rateest_hash(name); 48 h = xt_rateest_hash(name);
49 mutex_lock(&xt_rateest_mutex);
50 hlist_for_each_entry(est, &rateest_hash[h], list) { 49 hlist_for_each_entry(est, &rateest_hash[h], list) {
51 if (strcmp(est->name, name) == 0) { 50 if (strcmp(est->name, name) == 0) {
52 est->refcnt++; 51 est->refcnt++;
53 mutex_unlock(&xt_rateest_mutex);
54 return est; 52 return est;
55 } 53 }
56 } 54 }
57 mutex_unlock(&xt_rateest_mutex); 55
58 return NULL; 56 return NULL;
59} 57}
58
59struct xt_rateest *xt_rateest_lookup(const char *name)
60{
61 struct xt_rateest *est;
62
63 mutex_lock(&xt_rateest_mutex);
64 est = __xt_rateest_lookup(name);
65 mutex_unlock(&xt_rateest_mutex);
66 return est;
67}
60EXPORT_SYMBOL_GPL(xt_rateest_lookup); 68EXPORT_SYMBOL_GPL(xt_rateest_lookup);
61 69
62void xt_rateest_put(struct xt_rateest *est) 70void xt_rateest_put(struct xt_rateest *est)
@@ -104,8 +112,10 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
104 rnd_inited = true; 112 rnd_inited = true;
105 } 113 }
106 114
107 est = xt_rateest_lookup(info->name); 115 mutex_lock(&xt_rateest_mutex);
116 est = __xt_rateest_lookup(info->name);
108 if (est) { 117 if (est) {
118 mutex_unlock(&xt_rateest_mutex);
109 /* 119 /*
110 * If estimator parameters are specified, they must match the 120 * If estimator parameters are specified, they must match the
111 * existing estimator. 121 * existing estimator.
@@ -143,11 +153,13 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
143 153
144 info->est = est; 154 info->est = est;
145 xt_rateest_hash_insert(est); 155 xt_rateest_hash_insert(est);
156 mutex_unlock(&xt_rateest_mutex);
146 return 0; 157 return 0;
147 158
148err2: 159err2:
149 kfree(est); 160 kfree(est);
150err1: 161err1:
162 mutex_unlock(&xt_rateest_mutex);
151 return ret; 163 return ret;
152} 164}
153 165
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 178696852bde..7381be0cdcdf 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -668,8 +668,9 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par)
668 668
669 if (info->cfg.gc_interval == 0 || info->cfg.expire == 0) 669 if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
670 return -EINVAL; 670 return -EINVAL;
671 if (info->name[sizeof(info->name)-1] != '\0') 671 ret = xt_check_proc_name(info->name, sizeof(info->name));
672 return -EINVAL; 672 if (ret)
673 return ret;
673 if (par->family == NFPROTO_IPV4) { 674 if (par->family == NFPROTO_IPV4) {
674 if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32) 675 if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32)
675 return -EINVAL; 676 return -EINVAL;
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index df8801e02a32..7eae0d0af89a 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21 21
22#include <linux/capability.h>
22#include <linux/if.h> 23#include <linux/if.h>
23#include <linux/inetdevice.h> 24#include <linux/inetdevice.h>
24#include <linux/ip.h> 25#include <linux/ip.h>
@@ -69,6 +70,9 @@ static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb,
69 struct xt_osf_finger *kf = NULL, *sf; 70 struct xt_osf_finger *kf = NULL, *sf;
70 int err = 0; 71 int err = 0;
71 72
73 if (!capable(CAP_NET_ADMIN))
74 return -EPERM;
75
72 if (!osf_attrs[OSF_ATTR_FINGER]) 76 if (!osf_attrs[OSF_ATTR_FINGER])
73 return -EINVAL; 77 return -EINVAL;
74 78
@@ -112,6 +116,9 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
112 struct xt_osf_finger *sf; 116 struct xt_osf_finger *sf;
113 int err = -ENOENT; 117 int err = -ENOENT;
114 118
119 if (!capable(CAP_NET_ADMIN))
120 return -EPERM;
121
115 if (!osf_attrs[OSF_ATTR_FINGER]) 122 if (!osf_attrs[OSF_ATTR_FINGER])
116 return -EINVAL; 123 return -EINVAL;
117 124
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index d725a27743a1..cd53b861a15c 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -364,9 +364,9 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
364 info->hit_count, XT_RECENT_MAX_NSTAMPS - 1); 364 info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
365 return -EINVAL; 365 return -EINVAL;
366 } 366 }
367 if (info->name[0] == '\0' || 367 ret = xt_check_proc_name(info->name, sizeof(info->name));
368 strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) 368 if (ret)
369 return -EINVAL; 369 return ret;
370 370
371 if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot) 371 if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
372 nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1; 372 nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index b0380927f05f..3f33ec44bd28 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1469,6 +1469,16 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
1469 iface = rcu_dereference(netlbl_unlhsh_def); 1469 iface = rcu_dereference(netlbl_unlhsh_def);
1470 if (iface == NULL || !iface->valid) 1470 if (iface == NULL || !iface->valid)
1471 goto unlabel_getattr_nolabel; 1471 goto unlabel_getattr_nolabel;
1472
1473#if IS_ENABLED(CONFIG_IPV6)
1474 /* When resolving a fallback label, check the sk_buff version as
1475 * it is possible (e.g. SCTP) to have family = PF_INET6 while
1476 * receiving ip_hdr(skb)->version = 4.
1477 */
1478 if (family == PF_INET6 && ip_hdr(skb)->version == 4)
1479 family = PF_INET;
1480#endif /* IPv6 */
1481
1472 switch (family) { 1482 switch (family) {
1473 case PF_INET: { 1483 case PF_INET: {
1474 struct iphdr *hdr4; 1484 struct iphdr *hdr4;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 48e1608414e6..bf292010760a 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -62,6 +62,7 @@
62#include <asm/cacheflush.h> 62#include <asm/cacheflush.h>
63#include <linux/hash.h> 63#include <linux/hash.h>
64#include <linux/genetlink.h> 64#include <linux/genetlink.h>
65#include <linux/nospec.h>
65 66
66#include <net/net_namespace.h> 67#include <net/net_namespace.h>
67#include <net/sock.h> 68#include <net/sock.h>
@@ -654,6 +655,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
654 655
655 if (protocol < 0 || protocol >= MAX_LINKS) 656 if (protocol < 0 || protocol >= MAX_LINKS)
656 return -EPROTONOSUPPORT; 657 return -EPROTONOSUPPORT;
658 protocol = array_index_nospec(protocol, MAX_LINKS);
657 659
658 netlink_lock_table(); 660 netlink_lock_table();
659#ifdef CONFIG_MODULES 661#ifdef CONFIG_MODULES
@@ -984,6 +986,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
984 return err; 986 return err;
985 } 987 }
986 988
989 if (nlk->ngroups == 0)
990 groups = 0;
991 else if (nlk->ngroups < 8*sizeof(groups))
992 groups &= (1UL << nlk->ngroups) - 1;
993
987 bound = nlk->bound; 994 bound = nlk->bound;
988 if (bound) { 995 if (bound) {
989 /* Ensure nlk->portid is up-to-date. */ 996 /* Ensure nlk->portid is up-to-date. */
@@ -1054,6 +1061,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1054 if (addr->sa_family != AF_NETLINK) 1061 if (addr->sa_family != AF_NETLINK)
1055 return -EINVAL; 1062 return -EINVAL;
1056 1063
1064 if (alen < sizeof(struct sockaddr_nl))
1065 return -EINVAL;
1066
1057 if ((nladdr->nl_groups || nladdr->nl_pid) && 1067 if ((nladdr->nl_groups || nladdr->nl_pid) &&
1058 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) 1068 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1059 return -EPERM; 1069 return -EPERM;
@@ -1792,6 +1802,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1792 1802
1793 if (msg->msg_namelen) { 1803 if (msg->msg_namelen) {
1794 err = -EINVAL; 1804 err = -EINVAL;
1805 if (msg->msg_namelen < sizeof(struct sockaddr_nl))
1806 goto out;
1795 if (addr->nl_family != AF_NETLINK) 1807 if (addr->nl_family != AF_NETLINK)
1796 goto out; 1808 goto out;
1797 dst_portid = addr->nl_pid; 1809 dst_portid = addr->nl_pid;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 8e63662c6fb0..d681dbaf00c1 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1118,6 +1118,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1118{ 1118{
1119 struct sk_buff *tmp; 1119 struct sk_buff *tmp;
1120 struct net *net, *prev = NULL; 1120 struct net *net, *prev = NULL;
1121 bool delivered = false;
1121 int err; 1122 int err;
1122 1123
1123 for_each_net_rcu(net) { 1124 for_each_net_rcu(net) {
@@ -1129,14 +1130,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1129 } 1130 }
1130 err = nlmsg_multicast(prev->genl_sock, tmp, 1131 err = nlmsg_multicast(prev->genl_sock, tmp,
1131 portid, group, flags); 1132 portid, group, flags);
1132 if (err) 1133 if (!err)
1134 delivered = true;
1135 else if (err != -ESRCH)
1133 goto error; 1136 goto error;
1134 } 1137 }
1135 1138
1136 prev = net; 1139 prev = net;
1137 } 1140 }
1138 1141
1139 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags); 1142 err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
1143 if (!err)
1144 delivered = true;
1145 else if (err != -ESRCH)
1146 return err;
1147 return delivered ? 0 : -ESRCH;
1140 error: 1148 error:
1141 kfree_skb(skb); 1149 kfree_skb(skb);
1142 return err; 1150 return err;
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 3621a902cb6e..04f060488686 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
149 149
150 pr_debug("uri: %s, len: %zu\n", uri, uri_len); 150 pr_debug("uri: %s, len: %zu\n", uri, uri_len);
151 151
152 /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
153 if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
154 return NULL;
155
152 sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); 156 sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
153 if (sdreq == NULL) 157 if (sdreq == NULL)
154 return NULL; 158 return NULL;
@@ -750,11 +754,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
750 pr_debug("Fragment %zd bytes remaining %zd", 754 pr_debug("Fragment %zd bytes remaining %zd",
751 frag_len, remaining_len); 755 frag_len, remaining_len);
752 756
753 pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, 757 pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
754 frag_len + LLCP_HEADER_SIZE, &err); 758 frag_len + LLCP_HEADER_SIZE, &err);
755 if (pdu == NULL) { 759 if (pdu == NULL) {
756 pr_err("Could not allocate PDU\n"); 760 pr_err("Could not allocate PDU (error=%d)\n", err);
757 continue; 761 len -= remaining_len;
762 if (len == 0)
763 len = err;
764 break;
758 } 765 }
759 766
760 pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI); 767 pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 12dfb457275d..32cb0c87e852 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -68,7 +68,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
68}; 68};
69 69
70static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { 70static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
71 [NFC_SDP_ATTR_URI] = { .type = NLA_STRING }, 71 [NFC_SDP_ATTR_URI] = { .type = NLA_STRING,
72 .len = U8_MAX - 4 },
72 [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 }, 73 [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
73}; 74};
74 75
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 6a2507f24b0f..1829adb23505 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -361,10 +361,38 @@ ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
361 u16 proto, const struct sk_buff *skb) 361 u16 proto, const struct sk_buff *skb)
362{ 362{
363 struct nf_conntrack_tuple tuple; 363 struct nf_conntrack_tuple tuple;
364 struct nf_conntrack_expect *exp;
364 365
365 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple)) 366 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple))
366 return NULL; 367 return NULL;
367 return __nf_ct_expect_find(net, zone, &tuple); 368
369 exp = __nf_ct_expect_find(net, zone, &tuple);
370 if (exp) {
371 struct nf_conntrack_tuple_hash *h;
372
373 /* Delete existing conntrack entry, if it clashes with the
374 * expectation. This can happen since conntrack ALGs do not
375 * check for clashes between (new) expectations and existing
376 * conntrack entries. nf_conntrack_in() will check the
377 * expectations only if a conntrack entry can not be found,
378 * which can lead to OVS finding the expectation (here) in the
379 * init direction, but which will not be removed by the
380 * nf_conntrack_in() call, if a matching conntrack entry is
381 * found instead. In this case all init direction packets
382 * would be reported as new related packets, while reply
383 * direction packets would be reported as un-related
384 * established packets.
385 */
386 h = nf_conntrack_find_get(net, zone, &tuple);
387 if (h) {
388 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
389
390 nf_ct_delete(ct, 0, 0);
391 nf_conntrack_put(&ct->ct_general);
392 }
393 }
394
395 return exp;
368} 396}
369 397
370/* Determine whether skb->nfct is equal to the result of conntrack lookup. */ 398/* Determine whether skb->nfct is equal to the result of conntrack lookup. */
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index d26b28def310..624c4719e404 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1141,13 +1141,10 @@ static void nlattr_set(struct nlattr *attr, u8 val,
1141 1141
1142 /* The nlattr stream should already have been validated */ 1142 /* The nlattr stream should already have been validated */
1143 nla_for_each_nested(nla, attr, rem) { 1143 nla_for_each_nested(nla, attr, rem) {
1144 if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) { 1144 if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
1145 if (tbl[nla_type(nla)].next) 1145 nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
1146 tbl = tbl[nla_type(nla)].next; 1146 else
1147 nlattr_set(nla, val, tbl);
1148 } else {
1149 memset(nla_data(nla), val, nla_len(nla)); 1147 memset(nla_data(nla), val, nla_len(nla));
1150 }
1151 1148
1152 if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE) 1149 if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
1153 *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK; 1150 *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
@@ -1672,14 +1669,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
1672 1669
1673#define MAX_ACTIONS_BUFSIZE (32 * 1024) 1670#define MAX_ACTIONS_BUFSIZE (32 * 1024)
1674 1671
1675static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log) 1672static struct sw_flow_actions *nla_alloc_flow_actions(int size)
1676{ 1673{
1677 struct sw_flow_actions *sfa; 1674 struct sw_flow_actions *sfa;
1678 1675
1679 if (size > MAX_ACTIONS_BUFSIZE) { 1676 WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
1680 OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
1681 return ERR_PTR(-EINVAL);
1682 }
1683 1677
1684 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); 1678 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
1685 if (!sfa) 1679 if (!sfa)
@@ -1752,12 +1746,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
1752 new_acts_size = ksize(*sfa) * 2; 1746 new_acts_size = ksize(*sfa) * 2;
1753 1747
1754 if (new_acts_size > MAX_ACTIONS_BUFSIZE) { 1748 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
1755 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) 1749 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
1750 OVS_NLERR(log, "Flow action size exceeds max %u",
1751 MAX_ACTIONS_BUFSIZE);
1756 return ERR_PTR(-EMSGSIZE); 1752 return ERR_PTR(-EMSGSIZE);
1753 }
1757 new_acts_size = MAX_ACTIONS_BUFSIZE; 1754 new_acts_size = MAX_ACTIONS_BUFSIZE;
1758 } 1755 }
1759 1756
1760 acts = nla_alloc_flow_actions(new_acts_size, log); 1757 acts = nla_alloc_flow_actions(new_acts_size);
1761 if (IS_ERR(acts)) 1758 if (IS_ERR(acts))
1762 return (void *)acts; 1759 return (void *)acts;
1763 1760
@@ -2369,7 +2366,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2369{ 2366{
2370 int err; 2367 int err;
2371 2368
2372 *sfa = nla_alloc_flow_actions(nla_len(attr), log); 2369 *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
2373 if (IS_ERR(*sfa)) 2370 if (IS_ERR(*sfa))
2374 return PTR_ERR(*sfa); 2371 return PTR_ERR(*sfa);
2375 2372
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 92ca3e106c2b..3a63f33698d3 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -332,11 +332,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
332 skb_set_queue_mapping(skb, queue_index); 332 skb_set_queue_mapping(skb, queue_index);
333} 333}
334 334
335/* register_prot_hook must be invoked with the po->bind_lock held, 335/* __register_prot_hook must be invoked through register_prot_hook
336 * or from a context in which asynchronous accesses to the packet 336 * or from a context in which asynchronous accesses to the packet
337 * socket is not possible (packet_create()). 337 * socket is not possible (packet_create()).
338 */ 338 */
339static void register_prot_hook(struct sock *sk) 339static void __register_prot_hook(struct sock *sk)
340{ 340{
341 struct packet_sock *po = pkt_sk(sk); 341 struct packet_sock *po = pkt_sk(sk);
342 342
@@ -351,8 +351,13 @@ static void register_prot_hook(struct sock *sk)
351 } 351 }
352} 352}
353 353
354/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock 354static void register_prot_hook(struct sock *sk)
355 * held. If the sync parameter is true, we will temporarily drop 355{
356 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
357 __register_prot_hook(sk);
358}
359
360/* If the sync parameter is true, we will temporarily drop
356 * the po->bind_lock and do a synchronize_net to make sure no 361 * the po->bind_lock and do a synchronize_net to make sure no
357 * asynchronous packet processing paths still refer to the elements 362 * asynchronous packet processing paths still refer to the elements
358 * of po->prot_hook. If the sync parameter is false, it is the 363 * of po->prot_hook. If the sync parameter is false, it is the
@@ -362,6 +367,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
362{ 367{
363 struct packet_sock *po = pkt_sk(sk); 368 struct packet_sock *po = pkt_sk(sk);
364 369
370 lockdep_assert_held_once(&po->bind_lock);
371
365 po->running = 0; 372 po->running = 0;
366 373
367 if (po->fanout) 374 if (po->fanout)
@@ -2764,13 +2771,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2764 if (skb == NULL) 2771 if (skb == NULL)
2765 goto out_unlock; 2772 goto out_unlock;
2766 2773
2767 skb_set_network_header(skb, reserve); 2774 skb_reset_network_header(skb);
2768 2775
2769 err = -EINVAL; 2776 err = -EINVAL;
2770 if (sock->type == SOCK_DGRAM) { 2777 if (sock->type == SOCK_DGRAM) {
2771 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 2778 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2772 if (unlikely(offset < 0)) 2779 if (unlikely(offset < 0))
2773 goto out_free; 2780 goto out_free;
2781 } else if (reserve) {
2782 skb_reserve(skb, -reserve);
2774 } 2783 }
2775 2784
2776 /* Returns -EFAULT on error */ 2785 /* Returns -EFAULT on error */
@@ -2892,6 +2901,7 @@ static int packet_release(struct socket *sock)
2892 2901
2893 packet_flush_mclist(sk); 2902 packet_flush_mclist(sk);
2894 2903
2904 lock_sock(sk);
2895 if (po->rx_ring.pg_vec) { 2905 if (po->rx_ring.pg_vec) {
2896 memset(&req_u, 0, sizeof(req_u)); 2906 memset(&req_u, 0, sizeof(req_u));
2897 packet_set_ring(sk, &req_u, 1, 0); 2907 packet_set_ring(sk, &req_u, 1, 0);
@@ -2901,6 +2911,7 @@ static int packet_release(struct socket *sock)
2901 memset(&req_u, 0, sizeof(req_u)); 2911 memset(&req_u, 0, sizeof(req_u));
2902 packet_set_ring(sk, &req_u, 1, 1); 2912 packet_set_ring(sk, &req_u, 1, 1);
2903 } 2913 }
2914 release_sock(sk);
2904 2915
2905 f = fanout_release(sk); 2916 f = fanout_release(sk);
2906 2917
@@ -3134,7 +3145,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
3134 3145
3135 if (proto) { 3146 if (proto) {
3136 po->prot_hook.type = proto; 3147 po->prot_hook.type = proto;
3137 register_prot_hook(sk); 3148 __register_prot_hook(sk);
3138 } 3149 }
3139 3150
3140 mutex_lock(&net->packet.sklist_lock); 3151 mutex_lock(&net->packet.sklist_lock);
@@ -3570,6 +3581,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3570 union tpacket_req_u req_u; 3581 union tpacket_req_u req_u;
3571 int len; 3582 int len;
3572 3583
3584 lock_sock(sk);
3573 switch (po->tp_version) { 3585 switch (po->tp_version) {
3574 case TPACKET_V1: 3586 case TPACKET_V1:
3575 case TPACKET_V2: 3587 case TPACKET_V2:
@@ -3580,14 +3592,21 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3580 len = sizeof(req_u.req3); 3592 len = sizeof(req_u.req3);
3581 break; 3593 break;
3582 } 3594 }
3583 if (optlen < len) 3595 if (optlen < len) {
3584 return -EINVAL; 3596 ret = -EINVAL;
3585 if (pkt_sk(sk)->has_vnet_hdr) 3597 } else {
3586 return -EINVAL; 3598 if (pkt_sk(sk)->has_vnet_hdr) {
3587 if (copy_from_user(&req_u.req, optval, len)) 3599 ret = -EINVAL;
3588 return -EFAULT; 3600 } else {
3589 return packet_set_ring(sk, &req_u, 0, 3601 if (copy_from_user(&req_u.req, optval, len))
3590 optname == PACKET_TX_RING); 3602 ret = -EFAULT;
3603 else
3604 ret = packet_set_ring(sk, &req_u, 0,
3605 optname == PACKET_TX_RING);
3606 }
3607 }
3608 release_sock(sk);
3609 return ret;
3591 } 3610 }
3592 case PACKET_COPY_THRESH: 3611 case PACKET_COPY_THRESH:
3593 { 3612 {
@@ -3653,12 +3672,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3653 3672
3654 if (optlen != sizeof(val)) 3673 if (optlen != sizeof(val))
3655 return -EINVAL; 3674 return -EINVAL;
3656 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3657 return -EBUSY;
3658 if (copy_from_user(&val, optval, sizeof(val))) 3675 if (copy_from_user(&val, optval, sizeof(val)))
3659 return -EFAULT; 3676 return -EFAULT;
3660 po->tp_loss = !!val; 3677
3661 return 0; 3678 lock_sock(sk);
3679 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3680 ret = -EBUSY;
3681 } else {
3682 po->tp_loss = !!val;
3683 ret = 0;
3684 }
3685 release_sock(sk);
3686 return ret;
3662 } 3687 }
3663 case PACKET_AUXDATA: 3688 case PACKET_AUXDATA:
3664 { 3689 {
@@ -3669,7 +3694,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3669 if (copy_from_user(&val, optval, sizeof(val))) 3694 if (copy_from_user(&val, optval, sizeof(val)))
3670 return -EFAULT; 3695 return -EFAULT;
3671 3696
3697 lock_sock(sk);
3672 po->auxdata = !!val; 3698 po->auxdata = !!val;
3699 release_sock(sk);
3673 return 0; 3700 return 0;
3674 } 3701 }
3675 case PACKET_ORIGDEV: 3702 case PACKET_ORIGDEV:
@@ -3681,7 +3708,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3681 if (copy_from_user(&val, optval, sizeof(val))) 3708 if (copy_from_user(&val, optval, sizeof(val)))
3682 return -EFAULT; 3709 return -EFAULT;
3683 3710
3711 lock_sock(sk);
3684 po->origdev = !!val; 3712 po->origdev = !!val;
3713 release_sock(sk);
3685 return 0; 3714 return 0;
3686 } 3715 }
3687 case PACKET_VNET_HDR: 3716 case PACKET_VNET_HDR:
@@ -3690,15 +3719,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3690 3719
3691 if (sock->type != SOCK_RAW) 3720 if (sock->type != SOCK_RAW)
3692 return -EINVAL; 3721 return -EINVAL;
3693 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3694 return -EBUSY;
3695 if (optlen < sizeof(val)) 3722 if (optlen < sizeof(val))
3696 return -EINVAL; 3723 return -EINVAL;
3697 if (copy_from_user(&val, optval, sizeof(val))) 3724 if (copy_from_user(&val, optval, sizeof(val)))
3698 return -EFAULT; 3725 return -EFAULT;
3699 3726
3700 po->has_vnet_hdr = !!val; 3727 lock_sock(sk);
3701 return 0; 3728 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3729 ret = -EBUSY;
3730 } else {
3731 po->has_vnet_hdr = !!val;
3732 ret = 0;
3733 }
3734 release_sock(sk);
3735 return ret;
3702 } 3736 }
3703 case PACKET_TIMESTAMP: 3737 case PACKET_TIMESTAMP:
3704 { 3738 {
@@ -3736,11 +3770,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3736 3770
3737 if (optlen != sizeof(val)) 3771 if (optlen != sizeof(val))
3738 return -EINVAL; 3772 return -EINVAL;
3739 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3740 return -EBUSY;
3741 if (copy_from_user(&val, optval, sizeof(val))) 3773 if (copy_from_user(&val, optval, sizeof(val)))
3742 return -EFAULT; 3774 return -EFAULT;
3743 po->tp_tx_has_off = !!val; 3775
3776 lock_sock(sk);
3777 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3778 ret = -EBUSY;
3779 } else {
3780 po->tp_tx_has_off = !!val;
3781 ret = 0;
3782 }
3783 release_sock(sk);
3744 return 0; 3784 return 0;
3745 } 3785 }
3746 case PACKET_QDISC_BYPASS: 3786 case PACKET_QDISC_BYPASS:
@@ -4116,7 +4156,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4116 /* Added to avoid minimal code churn */ 4156 /* Added to avoid minimal code churn */
4117 struct tpacket_req *req = &req_u->req; 4157 struct tpacket_req *req = &req_u->req;
4118 4158
4119 lock_sock(sk);
4120 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ 4159 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
4121 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { 4160 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
4122 WARN(1, "Tx-ring is not supported.\n"); 4161 WARN(1, "Tx-ring is not supported.\n");
@@ -4159,7 +4198,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4159 goto out; 4198 goto out;
4160 if (po->tp_version >= TPACKET_V3 && 4199 if (po->tp_version >= TPACKET_V3 &&
4161 req->tp_block_size <= 4200 req->tp_block_size <=
4162 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv)) 4201 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr))
4163 goto out; 4202 goto out;
4164 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 4203 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4165 po->tp_reserve)) 4204 po->tp_reserve))
@@ -4252,7 +4291,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4252 if (pg_vec) 4291 if (pg_vec)
4253 free_pg_vec(pg_vec, order, req->tp_block_nr); 4292 free_pg_vec(pg_vec, order, req->tp_block_nr);
4254out: 4293out:
4255 release_sock(sk);
4256 return err; 4294 return err;
4257} 4295}
4258 4296
diff --git a/net/packet/internal.h b/net/packet/internal.h
index d55bfc34d6b3..1309e2a7baad 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -109,10 +109,12 @@ struct packet_sock {
109 int copy_thresh; 109 int copy_thresh;
110 spinlock_t bind_lock; 110 spinlock_t bind_lock;
111 struct mutex pg_vec_lock; 111 struct mutex pg_vec_lock;
112 unsigned int running:1, /* prot_hook is attached*/ 112 unsigned int running; /* bind_lock must be held */
113 auxdata:1, 113 unsigned int auxdata:1, /* writer must hold sock lock */
114 origdev:1, 114 origdev:1,
115 has_vnet_hdr:1; 115 has_vnet_hdr:1,
116 tp_loss:1,
117 tp_tx_has_off:1;
116 int pressure; 118 int pressure;
117 int ifindex; /* bound device */ 119 int ifindex; /* bound device */
118 __be16 num; 120 __be16 num;
@@ -122,8 +124,6 @@ struct packet_sock {
122 enum tpacket_versions tp_version; 124 enum tpacket_versions tp_version;
123 unsigned int tp_hdrlen; 125 unsigned int tp_hdrlen;
124 unsigned int tp_reserve; 126 unsigned int tp_reserve;
125 unsigned int tp_loss:1;
126 unsigned int tp_tx_has_off:1;
127 unsigned int tp_tstamp; 127 unsigned int tp_tstamp;
128 struct net_device __rcu *cached_dev; 128 struct net_device __rcu *cached_dev;
129 int (*xmit)(struct sk_buff *skb); 129 int (*xmit)(struct sk_buff *skb);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index b22ea956522b..e29b47193645 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -108,6 +108,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
108 rs, &addr, (int)ntohs(*port)); 108 rs, &addr, (int)ntohs(*port));
109 break; 109 break;
110 } else { 110 } else {
111 rs->rs_bound_addr = 0;
111 rds_sock_put(rs); 112 rds_sock_put(rs);
112 ret = -ENOMEM; 113 ret = -ENOMEM;
113 break; 114 break;
diff --git a/net/rds/ib.c b/net/rds/ib.c
index f222885ac0c7..ed51ccc84b3a 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -336,7 +336,8 @@ static int rds_ib_laddr_check(struct net *net, __be32 addr)
336 /* Create a CMA ID and try to bind it. This catches both 336 /* Create a CMA ID and try to bind it. This catches both
337 * IB and iWARP capable NICs. 337 * IB and iWARP capable NICs.
338 */ 338 */
339 cm_id = rdma_create_id(&init_net, NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); 339 cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler,
340 NULL, RDMA_PS_TCP, IB_QPT_RC);
340 if (IS_ERR(cm_id)) 341 if (IS_ERR(cm_id))
341 return PTR_ERR(cm_id); 342 return PTR_ERR(cm_id);
342 343
diff --git a/net/rds/loop.c b/net/rds/loop.c
index 6b12b68541ae..05cab8c5a379 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -191,4 +191,5 @@ struct rds_transport rds_loop_transport = {
191 .inc_copy_to_user = rds_message_inc_copy_to_user, 191 .inc_copy_to_user = rds_message_inc_copy_to_user,
192 .inc_free = rds_loop_inc_free, 192 .inc_free = rds_loop_inc_free,
193 .t_name = "loopback", 193 .t_name = "loopback",
194 .t_type = RDS_TRANS_LOOP,
194}; 195};
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 4588860f4c3b..254f1345cf7e 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -401,6 +401,11 @@ struct rds_notifier {
401 int n_status; 401 int n_status;
402}; 402};
403 403
404/* Available as part of RDS core, so doesn't need to participate
405 * in get_preferred transport etc
406 */
407#define RDS_TRANS_LOOP 3
408
404/** 409/**
405 * struct rds_transport - transport specific behavioural hooks 410 * struct rds_transport - transport specific behavioural hooks
406 * 411 *
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 0514af3ab378..6275de19689c 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -76,6 +76,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
76 return; 76 return;
77 77
78 rs->rs_rcv_bytes += delta; 78 rs->rs_rcv_bytes += delta;
79
80 /* loop transport doesn't send/recv congestion updates */
81 if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
82 return;
83
79 now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs); 84 now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
80 85
81 rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d " 86 rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 93127220cb54..e6e249cc651c 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -140,13 +140,18 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
140 140
141 ret = rfkill_register(rfkill->rfkill_dev); 141 ret = rfkill_register(rfkill->rfkill_dev);
142 if (ret < 0) 142 if (ret < 0)
143 return ret; 143 goto err_destroy;
144 144
145 platform_set_drvdata(pdev, rfkill); 145 platform_set_drvdata(pdev, rfkill);
146 146
147 dev_info(&pdev->dev, "%s device registered.\n", rfkill->name); 147 dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
148 148
149 return 0; 149 return 0;
150
151err_destroy:
152 rfkill_destroy(rfkill->rfkill_dev);
153
154 return ret;
150} 155}
151 156
152static int rfkill_gpio_remove(struct platform_device *pdev) 157static int rfkill_gpio_remove(struct platform_device *pdev)
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index d7a9ab5a9d9c..6c65fb229e50 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -209,7 +209,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
209 struct sk_buff *trailer; 209 struct sk_buff *trailer;
210 unsigned int len; 210 unsigned int len;
211 u16 check; 211 u16 check;
212 int nsg; 212 int nsg, err;
213 213
214 sp = rxrpc_skb(skb); 214 sp = rxrpc_skb(skb);
215 215
@@ -240,7 +240,9 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
240 len &= ~(call->conn->size_align - 1); 240 len &= ~(call->conn->size_align - 1);
241 241
242 sg_init_table(sg, nsg); 242 sg_init_table(sg, nsg);
243 skb_to_sgvec(skb, sg, 0, len); 243 err = skb_to_sgvec(skb, sg, 0, len);
244 if (unlikely(err < 0))
245 return err;
244 crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); 246 crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
245 247
246 _leave(" = 0"); 248 _leave(" = 0");
@@ -336,7 +338,7 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
336 struct sk_buff *trailer; 338 struct sk_buff *trailer;
337 u32 data_size, buf; 339 u32 data_size, buf;
338 u16 check; 340 u16 check;
339 int nsg; 341 int nsg, ret;
340 342
341 _enter(""); 343 _enter("");
342 344
@@ -348,7 +350,9 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
348 goto nomem; 350 goto nomem;
349 351
350 sg_init_table(sg, nsg); 352 sg_init_table(sg, nsg);
351 skb_to_sgvec(skb, sg, 0, 8); 353 ret = skb_to_sgvec(skb, sg, 0, 8);
354 if (unlikely(ret < 0))
355 return ret;
352 356
353 /* start the decryption afresh */ 357 /* start the decryption afresh */
354 memset(&iv, 0, sizeof(iv)); 358 memset(&iv, 0, sizeof(iv));
@@ -411,7 +415,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
411 struct sk_buff *trailer; 415 struct sk_buff *trailer;
412 u32 data_size, buf; 416 u32 data_size, buf;
413 u16 check; 417 u16 check;
414 int nsg; 418 int nsg, ret;
415 419
416 _enter(",{%d}", skb->len); 420 _enter(",{%d}", skb->len);
417 421
@@ -430,7 +434,12 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
430 } 434 }
431 435
432 sg_init_table(sg, nsg); 436 sg_init_table(sg, nsg);
433 skb_to_sgvec(skb, sg, 0, skb->len); 437 ret = skb_to_sgvec(skb, sg, 0, skb->len);
438 if (unlikely(ret < 0)) {
439 if (sg != _sg)
440 kfree(sg);
441 return ret;
442 }
434 443
435 /* decrypt from the session key */ 444 /* decrypt from the session key */
436 token = call->conn->key->payload.data[0]; 445 token = call->conn->key->payload.data[0];
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 694a06f1e0d5..f44fea22d69c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -101,8 +101,10 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
101 a->order = n_i; 101 a->order = n_i;
102 102
103 nest = nla_nest_start(skb, a->order); 103 nest = nla_nest_start(skb, a->order);
104 if (nest == NULL) 104 if (nest == NULL) {
105 index--;
105 goto nla_put_failure; 106 goto nla_put_failure;
107 }
106 err = tcf_action_dump_1(skb, a, 0, 0); 108 err = tcf_action_dump_1(skb, a, 0, 0);
107 if (err < 0) { 109 if (err < 0) {
108 index--; 110 index--;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 0bc6f912f870..bd155e59be1c 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -249,10 +249,14 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
249 249
250static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg) 250static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
251{ 251{
252 if (cfg->is_ebpf) 252 struct bpf_prog *filter = cfg->filter;
253 bpf_prog_put(cfg->filter); 253
254 else 254 if (filter) {
255 bpf_prog_destroy(cfg->filter); 255 if (cfg->is_ebpf)
256 bpf_prog_put(filter);
257 else
258 bpf_prog_destroy(filter);
259 }
256 260
257 kfree(cfg->bpf_ops); 261 kfree(cfg->bpf_ops);
258 kfree(cfg->bpf_name); 262 kfree(cfg->bpf_name);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index eeb3eb3ea9eb..024d6cf342c5 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -175,6 +175,9 @@ static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
175 struct tcphdr *tcph; 175 struct tcphdr *tcph;
176 const struct iphdr *iph; 176 const struct iphdr *iph;
177 177
178 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
179 return 1;
180
178 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); 181 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
179 if (tcph == NULL) 182 if (tcph == NULL)
180 return 0; 183 return 0;
@@ -196,6 +199,9 @@ static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
196 struct tcphdr *tcph; 199 struct tcphdr *tcph;
197 const struct ipv6hdr *ip6h; 200 const struct ipv6hdr *ip6h;
198 201
202 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
203 return 1;
204
199 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); 205 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
200 if (tcph == NULL) 206 if (tcph == NULL)
201 return 0; 207 return 0;
@@ -219,6 +225,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb,
219 const struct iphdr *iph; 225 const struct iphdr *iph;
220 u16 ul; 226 u16 ul;
221 227
228 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
229 return 1;
230
222 /* 231 /*
223 * Support both UDP and UDPLITE checksum algorithms, Don't use 232 * Support both UDP and UDPLITE checksum algorithms, Don't use
224 * udph->len to get the real length without any protocol check, 233 * udph->len to get the real length without any protocol check,
@@ -272,6 +281,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb,
272 const struct ipv6hdr *ip6h; 281 const struct ipv6hdr *ip6h;
273 u16 ul; 282 u16 ul;
274 283
284 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
285 return 1;
286
275 /* 287 /*
276 * Support both UDP and UDPLITE checksum algorithms, Don't use 288 * Support both UDP and UDPLITE checksum algorithms, Don't use
277 * udph->len to get the real length without any protocol check, 289 * udph->len to get the real length without any protocol check,
diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
index 3fee70d9814f..562edd50fa94 100644
--- a/net/sched/sch_blackhole.c
+++ b/net/sched/sch_blackhole.c
@@ -20,7 +20,7 @@
20static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch) 20static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch)
21{ 21{
22 qdisc_drop(skb, sch); 22 qdisc_drop(skb, sch);
23 return NET_XMIT_SUCCESS; 23 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
24} 24}
25 25
26static struct sk_buff *blackhole_dequeue(struct Qdisc *sch) 26static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 0a08c860eee4..e8dcf94a23c8 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -438,6 +438,9 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
438 438
439 ctl = nla_data(tb[TCA_CHOKE_PARMS]); 439 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
440 440
441 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
442 return -EINVAL;
443
441 if (ctl->limit > CHOKE_MAX_QUEUE) 444 if (ctl->limit > CHOKE_MAX_QUEUE)
442 return -EINVAL; 445 return -EINVAL;
443 446
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 3c6a47d66a04..117ed90c5f21 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -126,6 +126,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
126 return f->next == &detached; 126 return f->next == &detached;
127} 127}
128 128
129static bool fq_flow_is_throttled(const struct fq_flow *f)
130{
131 return f->next == &throttled;
132}
133
134static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
135{
136 if (head->first)
137 head->last->next = flow;
138 else
139 head->first = flow;
140 head->last = flow;
141 flow->next = NULL;
142}
143
144static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
145{
146 rb_erase(&f->rate_node, &q->delayed);
147 q->throttled_flows--;
148 fq_flow_add_tail(&q->old_flows, f);
149}
150
129static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) 151static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
130{ 152{
131 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; 153 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
@@ -153,15 +175,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
153 175
154static struct kmem_cache *fq_flow_cachep __read_mostly; 176static struct kmem_cache *fq_flow_cachep __read_mostly;
155 177
156static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
157{
158 if (head->first)
159 head->last->next = flow;
160 else
161 head->first = flow;
162 head->last = flow;
163 flow->next = NULL;
164}
165 178
166/* limit number of collected flows per round */ 179/* limit number of collected flows per round */
167#define FQ_GC_MAX 8 180#define FQ_GC_MAX 8
@@ -265,6 +278,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
265 f->socket_hash != sk->sk_hash)) { 278 f->socket_hash != sk->sk_hash)) {
266 f->credit = q->initial_quantum; 279 f->credit = q->initial_quantum;
267 f->socket_hash = sk->sk_hash; 280 f->socket_hash = sk->sk_hash;
281 if (fq_flow_is_throttled(f))
282 fq_flow_unset_throttled(q, f);
268 f->time_next_packet = 0ULL; 283 f->time_next_packet = 0ULL;
269 } 284 }
270 return f; 285 return f;
@@ -419,9 +434,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
419 q->time_next_delayed_flow = f->time_next_packet; 434 q->time_next_delayed_flow = f->time_next_packet;
420 break; 435 break;
421 } 436 }
422 rb_erase(p, &q->delayed); 437 fq_flow_unset_throttled(q, f);
423 q->throttled_flows--;
424 fq_flow_add_tail(&q->old_flows, f);
425 } 438 }
426} 439}
427 440
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 80105109f756..f9e8deeeac96 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -389,6 +389,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
389 struct gred_sched *table = qdisc_priv(sch); 389 struct gred_sched *table = qdisc_priv(sch);
390 struct gred_sched_data *q = table->tab[dp]; 390 struct gred_sched_data *q = table->tab[dp];
391 391
392 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
393 return -EINVAL;
394
392 if (!q) { 395 if (!q) {
393 table->tab[dp] = q = *prealloc; 396 table->tab[dp] = q = *prealloc;
394 *prealloc = NULL; 397 *prealloc = NULL;
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 8c0508c0e287..0505b8408c8b 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -199,6 +199,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
199 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0; 199 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
200 200
201 ctl = nla_data(tb[TCA_RED_PARMS]); 201 ctl = nla_data(tb[TCA_RED_PARMS]);
202 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
203 return -EINVAL;
202 204
203 if (ctl->limit > 0) { 205 if (ctl->limit > 0) {
204 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit); 206 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 3f2c3eed04da..8b8c084b32cd 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -633,6 +633,9 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
633 if (ctl->divisor && 633 if (ctl->divisor &&
634 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) 634 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
635 return -EINVAL; 635 return -EINVAL;
636 if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
637 ctl_v1->Wlog))
638 return -EINVAL;
636 if (ctl_v1 && ctl_v1->qth_min) { 639 if (ctl_v1 && ctl_v1->qth_min) {
637 p = kmalloc(sizeof(*p), GFP_KERNEL); 640 p = kmalloc(sizeof(*p), GFP_KERNEL);
638 if (!p) 641 if (!p)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 559afd0ee7de..a40b8b0ef0d5 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1000,9 +1000,10 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1000 struct sctp_endpoint *ep; 1000 struct sctp_endpoint *ep;
1001 struct sctp_chunk *chunk; 1001 struct sctp_chunk *chunk;
1002 struct sctp_inq *inqueue; 1002 struct sctp_inq *inqueue;
1003 int state;
1004 sctp_subtype_t subtype; 1003 sctp_subtype_t subtype;
1004 int first_time = 1; /* is this the first time through the loop */
1005 int error = 0; 1005 int error = 0;
1006 int state;
1006 1007
1007 /* The association should be held so we should be safe. */ 1008 /* The association should be held so we should be safe. */
1008 ep = asoc->ep; 1009 ep = asoc->ep;
@@ -1013,6 +1014,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1013 state = asoc->state; 1014 state = asoc->state;
1014 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); 1015 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1015 1016
1017 /* If the first chunk in the packet is AUTH, do special
1018 * processing specified in Section 6.3 of SCTP-AUTH spec
1019 */
1020 if (first_time && subtype.chunk == SCTP_CID_AUTH) {
1021 struct sctp_chunkhdr *next_hdr;
1022
1023 next_hdr = sctp_inq_peek(inqueue);
1024 if (!next_hdr)
1025 goto normal;
1026
1027 /* If the next chunk is COOKIE-ECHO, skip the AUTH
1028 * chunk while saving a pointer to it so we can do
1029 * Authentication later (during cookie-echo
1030 * processing).
1031 */
1032 if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
1033 chunk->auth_chunk = skb_clone(chunk->skb,
1034 GFP_ATOMIC);
1035 chunk->auth = 1;
1036 continue;
1037 }
1038 }
1039
1040normal:
1016 /* SCTP-AUTH, Section 6.3: 1041 /* SCTP-AUTH, Section 6.3:
1017 * The receiver has a list of chunk types which it expects 1042 * The receiver has a list of chunk types which it expects
1018 * to be received only after an AUTH-chunk. This list has 1043 * to be received only after an AUTH-chunk. This list has
@@ -1051,6 +1076,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1051 /* If there is an error on chunk, discard this packet. */ 1076 /* If there is an error on chunk, discard this packet. */
1052 if (error && chunk) 1077 if (error && chunk)
1053 chunk->pdiscard = 1; 1078 chunk->pdiscard = 1;
1079
1080 if (first_time)
1081 first_time = 0;
1054 } 1082 }
1055 sctp_association_put(asoc); 1083 sctp_association_put(asoc);
1056} 1084}
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 7e8a16c77039..8d9b7ad25b65 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -178,7 +178,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
178 skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); 178 skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
179 chunk->subh.v = NULL; /* Subheader is no longer valid. */ 179 chunk->subh.v = NULL; /* Subheader is no longer valid. */
180 180
181 if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) < 181 if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <=
182 skb_tail_pointer(chunk->skb)) { 182 skb_tail_pointer(chunk->skb)) {
183 /* This is not a singleton */ 183 /* This is not a singleton */
184 chunk->singleton = 0; 184 chunk->singleton = 0;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 8a61ccc37e12..5ca8309ea7b1 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -323,8 +323,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
323 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); 323 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
324 bdst = ip6_dst_lookup_flow(sk, fl6, final_p); 324 bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
325 325
326 if (!IS_ERR(bdst) && 326 if (IS_ERR(bdst))
327 ipv6_chk_addr(dev_net(bdst->dev), 327 continue;
328
329 if (ipv6_chk_addr(dev_net(bdst->dev),
328 &laddr->a.v6.sin6_addr, bdst->dev, 1)) { 330 &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
329 if (!IS_ERR_OR_NULL(dst)) 331 if (!IS_ERR_OR_NULL(dst))
330 dst_release(dst); 332 dst_release(dst);
@@ -333,8 +335,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
333 } 335 }
334 336
335 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); 337 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
336 if (matchlen > bmatchlen) 338 if (matchlen > bmatchlen) {
339 dst_release(bdst);
337 continue; 340 continue;
341 }
338 342
339 if (!IS_ERR_OR_NULL(dst)) 343 if (!IS_ERR_OR_NULL(dst))
340 dst_release(dst); 344 dst_release(dst);
@@ -515,46 +519,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
515 addr->v6.sin6_scope_id = 0; 519 addr->v6.sin6_scope_id = 0;
516} 520}
517 521
518/* Compare addresses exactly. 522static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
519 * v4-mapped-v6 is also in consideration. 523 const union sctp_addr *addr2)
520 */
521static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
522 const union sctp_addr *addr2)
523{ 524{
524 if (addr1->sa.sa_family != addr2->sa.sa_family) { 525 if (addr1->sa.sa_family != addr2->sa.sa_family) {
525 if (addr1->sa.sa_family == AF_INET && 526 if (addr1->sa.sa_family == AF_INET &&
526 addr2->sa.sa_family == AF_INET6 && 527 addr2->sa.sa_family == AF_INET6 &&
527 ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) { 528 ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
528 if (addr2->v6.sin6_port == addr1->v4.sin_port && 529 addr2->v6.sin6_addr.s6_addr32[3] ==
529 addr2->v6.sin6_addr.s6_addr32[3] == 530 addr1->v4.sin_addr.s_addr)
530 addr1->v4.sin_addr.s_addr) 531 return 1;
531 return 1; 532
532 }
533 if (addr2->sa.sa_family == AF_INET && 533 if (addr2->sa.sa_family == AF_INET &&
534 addr1->sa.sa_family == AF_INET6 && 534 addr1->sa.sa_family == AF_INET6 &&
535 ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) { 535 ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
536 if (addr1->v6.sin6_port == addr2->v4.sin_port && 536 addr1->v6.sin6_addr.s6_addr32[3] ==
537 addr1->v6.sin6_addr.s6_addr32[3] == 537 addr2->v4.sin_addr.s_addr)
538 addr2->v4.sin_addr.s_addr) 538 return 1;
539 return 1; 539
540 }
541 return 0; 540 return 0;
542 } 541 }
543 if (addr1->v6.sin6_port != addr2->v6.sin6_port) 542
544 return 0;
545 if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) 543 if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
546 return 0; 544 return 0;
545
547 /* If this is a linklocal address, compare the scope_id. */ 546 /* If this is a linklocal address, compare the scope_id. */
548 if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { 547 if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
549 if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && 548 addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
550 (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) { 549 addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
551 return 0; 550 return 0;
552 }
553 }
554 551
555 return 1; 552 return 1;
556} 553}
557 554
555/* Compare addresses exactly.
556 * v4-mapped-v6 is also in consideration.
557 */
558static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
559 const union sctp_addr *addr2)
560{
561 return __sctp_v6_cmp_addr(addr1, addr2) &&
562 addr1->v6.sin6_port == addr2->v6.sin6_port;
563}
564
558/* Initialize addr struct to INADDR_ANY. */ 565/* Initialize addr struct to INADDR_ANY. */
559static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port) 566static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
560{ 567{
@@ -719,8 +726,10 @@ static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
719 sctp_v6_map_v4(addr); 726 sctp_v6_map_v4(addr);
720 } 727 }
721 728
722 if (addr->sa.sa_family == AF_INET) 729 if (addr->sa.sa_family == AF_INET) {
730 memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
723 return sizeof(struct sockaddr_in); 731 return sizeof(struct sockaddr_in);
732 }
724 return sizeof(struct sockaddr_in6); 733 return sizeof(struct sockaddr_in6);
725} 734}
726 735
@@ -837,8 +846,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
837 const union sctp_addr *addr2, 846 const union sctp_addr *addr2,
838 struct sctp_sock *opt) 847 struct sctp_sock *opt)
839{ 848{
840 struct sctp_af *af1, *af2;
841 struct sock *sk = sctp_opt2sk(opt); 849 struct sock *sk = sctp_opt2sk(opt);
850 struct sctp_af *af1, *af2;
842 851
843 af1 = sctp_get_af_specific(addr1->sa.sa_family); 852 af1 = sctp_get_af_specific(addr1->sa.sa_family);
844 af2 = sctp_get_af_specific(addr2->sa.sa_family); 853 af2 = sctp_get_af_specific(addr2->sa.sa_family);
@@ -854,10 +863,10 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
854 if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) 863 if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
855 return 1; 864 return 1;
856 865
857 if (addr1->sa.sa_family != addr2->sa.sa_family) 866 if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
858 return 0; 867 return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
859 868
860 return af1->cmp_addr(addr1, addr2); 869 return __sctp_v6_cmp_addr(addr1, addr2);
861} 870}
862 871
863/* Verify that the provided sockaddr looks bindable. Common verification, 872/* Verify that the provided sockaddr looks bindable. Common verification,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 8b4ff315695e..dc030efa4447 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -508,22 +508,20 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
508 if (IS_ERR(rt)) 508 if (IS_ERR(rt))
509 continue; 509 continue;
510 510
511 if (!dst)
512 dst = &rt->dst;
513
514 /* Ensure the src address belongs to the output 511 /* Ensure the src address belongs to the output
515 * interface. 512 * interface.
516 */ 513 */
517 odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr, 514 odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
518 false); 515 false);
519 if (!odev || odev->ifindex != fl4->flowi4_oif) { 516 if (!odev || odev->ifindex != fl4->flowi4_oif) {
520 if (&rt->dst != dst) 517 if (!dst)
518 dst = &rt->dst;
519 else
521 dst_release(&rt->dst); 520 dst_release(&rt->dst);
522 continue; 521 continue;
523 } 522 }
524 523
525 if (dst != &rt->dst) 524 dst_release(dst);
526 dst_release(dst);
527 dst = &rt->dst; 525 dst = &rt->dst;
528 break; 526 break;
529 } 527 }
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 5d6a03fad378..509e9426a056 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1367,10 +1367,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
1367 sctp_chunkhdr_t *chunk_hdr; 1367 sctp_chunkhdr_t *chunk_hdr;
1368 struct sk_buff *skb; 1368 struct sk_buff *skb;
1369 struct sock *sk; 1369 struct sock *sk;
1370 int chunklen;
1371
1372 chunklen = WORD_ROUND(sizeof(*chunk_hdr) + paylen);
1373 if (chunklen > SCTP_MAX_CHUNK_LEN)
1374 goto nodata;
1370 1375
1371 /* No need to allocate LL here, as this is only a chunk. */ 1376 /* No need to allocate LL here, as this is only a chunk. */
1372 skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), 1377 skb = alloc_skb(chunklen, GFP_ATOMIC);
1373 GFP_ATOMIC);
1374 if (!skb) 1378 if (!skb)
1375 goto nodata; 1379 goto nodata;
1376 1380
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 29c7c43de108..df9ac3746c1b 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -144,10 +144,8 @@ static sctp_disposition_t sctp_sf_violation_chunk(
144 void *arg, 144 void *arg,
145 sctp_cmd_seq_t *commands); 145 sctp_cmd_seq_t *commands);
146 146
147static sctp_ierror_t sctp_sf_authenticate(struct net *net, 147static sctp_ierror_t sctp_sf_authenticate(
148 const struct sctp_endpoint *ep,
149 const struct sctp_association *asoc, 148 const struct sctp_association *asoc,
150 const sctp_subtype_t type,
151 struct sctp_chunk *chunk); 149 struct sctp_chunk *chunk);
152 150
153static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, 151static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
@@ -615,6 +613,38 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
615 return SCTP_DISPOSITION_CONSUME; 613 return SCTP_DISPOSITION_CONSUME;
616} 614}
617 615
616static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
617 const struct sctp_association *asoc)
618{
619 struct sctp_chunk auth;
620
621 if (!chunk->auth_chunk)
622 return true;
623
624 /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
625 * is supposed to be authenticated and we have to do delayed
626 * authentication. We've just recreated the association using
627 * the information in the cookie and now it's much easier to
628 * do the authentication.
629 */
630
631 /* Make sure that we and the peer are AUTH capable */
632 if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
633 return false;
634
635 /* set-up our fake chunk so that we can process it */
636 auth.skb = chunk->auth_chunk;
637 auth.asoc = chunk->asoc;
638 auth.sctp_hdr = chunk->sctp_hdr;
639 auth.chunk_hdr = (struct sctp_chunkhdr *)
640 skb_push(chunk->auth_chunk,
641 sizeof(struct sctp_chunkhdr));
642 skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
643 auth.transport = chunk->transport;
644
645 return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
646}
647
618/* 648/*
619 * Respond to a normal COOKIE ECHO chunk. 649 * Respond to a normal COOKIE ECHO chunk.
620 * We are the side that is being asked for an association. 650 * We are the side that is being asked for an association.
@@ -751,36 +781,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
751 if (error) 781 if (error)
752 goto nomem_init; 782 goto nomem_init;
753 783
754 /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo 784 if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
755 * is supposed to be authenticated and we have to do delayed 785 sctp_association_free(new_asoc);
756 * authentication. We've just recreated the association using 786 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
757 * the information in the cookie and now it's much easier to
758 * do the authentication.
759 */
760 if (chunk->auth_chunk) {
761 struct sctp_chunk auth;
762 sctp_ierror_t ret;
763
764 /* Make sure that we and the peer are AUTH capable */
765 if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
766 sctp_association_free(new_asoc);
767 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
768 }
769
770 /* set-up our fake chunk so that we can process it */
771 auth.skb = chunk->auth_chunk;
772 auth.asoc = chunk->asoc;
773 auth.sctp_hdr = chunk->sctp_hdr;
774 auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
775 sizeof(sctp_chunkhdr_t));
776 skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
777 auth.transport = chunk->transport;
778
779 ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
780 if (ret != SCTP_IERROR_NO_ERROR) {
781 sctp_association_free(new_asoc);
782 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
783 }
784 } 787 }
785 788
786 repl = sctp_make_cookie_ack(new_asoc, chunk); 789 repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1717,13 +1720,15 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
1717 GFP_ATOMIC)) 1720 GFP_ATOMIC))
1718 goto nomem; 1721 goto nomem;
1719 1722
1723 if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
1724 return SCTP_DISPOSITION_DISCARD;
1725
1720 /* Make sure no new addresses are being added during the 1726 /* Make sure no new addresses are being added during the
1721 * restart. Though this is a pretty complicated attack 1727 * restart. Though this is a pretty complicated attack
1722 * since you'd have to get inside the cookie. 1728 * since you'd have to get inside the cookie.
1723 */ 1729 */
1724 if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) { 1730 if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
1725 return SCTP_DISPOSITION_CONSUME; 1731 return SCTP_DISPOSITION_CONSUME;
1726 }
1727 1732
1728 /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes 1733 /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
1729 * the peer has restarted (Action A), it MUST NOT setup a new 1734 * the peer has restarted (Action A), it MUST NOT setup a new
@@ -1828,6 +1833,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net,
1828 GFP_ATOMIC)) 1833 GFP_ATOMIC))
1829 goto nomem; 1834 goto nomem;
1830 1835
1836 if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
1837 return SCTP_DISPOSITION_DISCARD;
1838
1831 /* Update the content of current association. */ 1839 /* Update the content of current association. */
1832 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1840 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
1833 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 1841 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
@@ -1920,6 +1928,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
1920 * a COOKIE ACK. 1928 * a COOKIE ACK.
1921 */ 1929 */
1922 1930
1931 if (!sctp_auth_chunk_verify(net, chunk, asoc))
1932 return SCTP_DISPOSITION_DISCARD;
1933
1923 /* Don't accidentally move back into established state. */ 1934 /* Don't accidentally move back into established state. */
1924 if (asoc->state < SCTP_STATE_ESTABLISHED) { 1935 if (asoc->state < SCTP_STATE_ESTABLISHED) {
1925 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 1936 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -1959,7 +1970,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
1959 } 1970 }
1960 } 1971 }
1961 1972
1962 repl = sctp_make_cookie_ack(new_asoc, chunk); 1973 repl = sctp_make_cookie_ack(asoc, chunk);
1963 if (!repl) 1974 if (!repl)
1964 goto nomem; 1975 goto nomem;
1965 1976
@@ -3985,10 +3996,8 @@ gen_shutdown:
3985 * 3996 *
3986 * The return value is the disposition of the chunk. 3997 * The return value is the disposition of the chunk.
3987 */ 3998 */
3988static sctp_ierror_t sctp_sf_authenticate(struct net *net, 3999static sctp_ierror_t sctp_sf_authenticate(
3989 const struct sctp_endpoint *ep,
3990 const struct sctp_association *asoc, 4000 const struct sctp_association *asoc,
3991 const sctp_subtype_t type,
3992 struct sctp_chunk *chunk) 4001 struct sctp_chunk *chunk)
3993{ 4002{
3994 struct sctp_authhdr *auth_hdr; 4003 struct sctp_authhdr *auth_hdr;
@@ -4087,7 +4096,7 @@ sctp_disposition_t sctp_sf_eat_auth(struct net *net,
4087 commands); 4096 commands);
4088 4097
4089 auth_hdr = (struct sctp_authhdr *)chunk->skb->data; 4098 auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
4090 error = sctp_sf_authenticate(net, ep, asoc, type, chunk); 4099 error = sctp_sf_authenticate(asoc, chunk);
4091 switch (error) { 4100 switch (error) {
4092 case SCTP_IERROR_AUTH_BAD_HMAC: 4101 case SCTP_IERROR_AUTH_BAD_HMAC:
4093 /* Generate the ERROR chunk and discard the rest 4102 /* Generate the ERROR chunk and discard the rest
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index a870d27ca778..13c7f42b7040 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -83,7 +83,7 @@
83static int sctp_writeable(struct sock *sk); 83static int sctp_writeable(struct sock *sk);
84static void sctp_wfree(struct sk_buff *skb); 84static void sctp_wfree(struct sk_buff *skb);
85static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 85static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
86 size_t msg_len, struct sock **orig_sk); 86 size_t msg_len);
87static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); 87static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
88static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 88static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
89static int sctp_wait_for_accept(struct sock *sk, long timeo); 89static int sctp_wait_for_accept(struct sock *sk, long timeo);
@@ -332,14 +332,15 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
332 if (len < sizeof (struct sockaddr)) 332 if (len < sizeof (struct sockaddr))
333 return NULL; 333 return NULL;
334 334
335 /* V4 mapped address are really of AF_INET family */ 335 if (!opt->pf->af_supported(addr->sa.sa_family, opt))
336 if (addr->sa.sa_family == AF_INET6 && 336 return NULL;
337 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 337
338 if (!opt->pf->af_supported(AF_INET, opt)) 338 if (addr->sa.sa_family == AF_INET6) {
339 if (len < SIN6_LEN_RFC2133)
339 return NULL; 340 return NULL;
340 } else { 341 /* V4 mapped address are really of AF_INET family */
341 /* Does this PF support this AF? */ 342 if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
342 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 343 !opt->pf->af_supported(AF_INET, opt))
343 return NULL; 344 return NULL;
344 } 345 }
345 346
@@ -1520,7 +1521,7 @@ static void sctp_close(struct sock *sk, long timeout)
1520 1521
1521 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); 1522 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
1522 1523
1523 lock_sock(sk); 1524 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1524 sk->sk_shutdown = SHUTDOWN_MASK; 1525 sk->sk_shutdown = SHUTDOWN_MASK;
1525 sk->sk_state = SCTP_SS_CLOSING; 1526 sk->sk_state = SCTP_SS_CLOSING;
1526 1527
@@ -1571,7 +1572,7 @@ static void sctp_close(struct sock *sk, long timeout)
1571 * held and that should be grabbed before socket lock. 1572 * held and that should be grabbed before socket lock.
1572 */ 1573 */
1573 spin_lock_bh(&net->sctp.addr_wq_lock); 1574 spin_lock_bh(&net->sctp.addr_wq_lock);
1574 bh_lock_sock(sk); 1575 bh_lock_sock_nested(sk);
1575 1576
1576 /* Hold the sock, since sk_common_release() will put sock_put() 1577 /* Hold the sock, since sk_common_release() will put sock_put()
1577 * and we have just a little more cleanup. 1578 * and we have just a little more cleanup.
@@ -1954,7 +1955,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1954 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1955 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1955 if (!sctp_wspace(asoc)) { 1956 if (!sctp_wspace(asoc)) {
1956 /* sk can be changed by peel off when waiting for buf. */ 1957 /* sk can be changed by peel off when waiting for buf. */
1957 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk); 1958 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
1958 if (err) { 1959 if (err) {
1959 if (err == -ESRCH) { 1960 if (err == -ESRCH) {
1960 /* asoc is already dead. */ 1961 /* asoc is already dead. */
@@ -4447,7 +4448,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
4447 len = sizeof(int); 4448 len = sizeof(int);
4448 if (put_user(len, optlen)) 4449 if (put_user(len, optlen))
4449 return -EFAULT; 4450 return -EFAULT;
4450 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 4451 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
4451 return -EFAULT; 4452 return -EFAULT;
4452 return 0; 4453 return 0;
4453} 4454}
@@ -5024,6 +5025,9 @@ copy_getaddrs:
5024 err = -EFAULT; 5025 err = -EFAULT;
5025 goto out; 5026 goto out;
5026 } 5027 }
5028 /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
5029 * but we can't change it anymore.
5030 */
5027 if (put_user(bytes_copied, optlen)) 5031 if (put_user(bytes_copied, optlen))
5028 err = -EFAULT; 5032 err = -EFAULT;
5029out: 5033out:
@@ -5460,7 +5464,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
5460 params.assoc_id = 0; 5464 params.assoc_id = 0;
5461 } else if (len >= sizeof(struct sctp_assoc_value)) { 5465 } else if (len >= sizeof(struct sctp_assoc_value)) {
5462 len = sizeof(struct sctp_assoc_value); 5466 len = sizeof(struct sctp_assoc_value);
5463 if (copy_from_user(&params, optval, sizeof(params))) 5467 if (copy_from_user(&params, optval, len))
5464 return -EFAULT; 5468 return -EFAULT;
5465 } else 5469 } else
5466 return -EINVAL; 5470 return -EINVAL;
@@ -5629,7 +5633,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
5629 5633
5630 if (len < sizeof(struct sctp_authkeyid)) 5634 if (len < sizeof(struct sctp_authkeyid))
5631 return -EINVAL; 5635 return -EINVAL;
5632 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 5636
5637 len = sizeof(struct sctp_authkeyid);
5638 if (copy_from_user(&val, optval, len))
5633 return -EFAULT; 5639 return -EFAULT;
5634 5640
5635 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 5641 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
@@ -5641,7 +5647,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
5641 else 5647 else
5642 val.scact_keynumber = ep->active_key_id; 5648 val.scact_keynumber = ep->active_key_id;
5643 5649
5644 len = sizeof(struct sctp_authkeyid);
5645 if (put_user(len, optlen)) 5650 if (put_user(len, optlen))
5646 return -EFAULT; 5651 return -EFAULT;
5647 if (copy_to_user(optval, &val, len)) 5652 if (copy_to_user(optval, &val, len))
@@ -5667,7 +5672,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
5667 if (len < sizeof(struct sctp_authchunks)) 5672 if (len < sizeof(struct sctp_authchunks))
5668 return -EINVAL; 5673 return -EINVAL;
5669 5674
5670 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5675 if (copy_from_user(&val, optval, sizeof(val)))
5671 return -EFAULT; 5676 return -EFAULT;
5672 5677
5673 to = p->gauth_chunks; 5678 to = p->gauth_chunks;
@@ -5712,7 +5717,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
5712 if (len < sizeof(struct sctp_authchunks)) 5717 if (len < sizeof(struct sctp_authchunks))
5713 return -EINVAL; 5718 return -EINVAL;
5714 5719
5715 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5720 if (copy_from_user(&val, optval, sizeof(val)))
5716 return -EFAULT; 5721 return -EFAULT;
5717 5722
5718 to = p->gauth_chunks; 5723 to = p->gauth_chunks;
@@ -6976,12 +6981,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
6976 6981
6977/* Helper function to wait for space in the sndbuf. */ 6982/* Helper function to wait for space in the sndbuf. */
6978static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 6983static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
6979 size_t msg_len, struct sock **orig_sk) 6984 size_t msg_len)
6980{ 6985{
6981 struct sock *sk = asoc->base.sk; 6986 struct sock *sk = asoc->base.sk;
6982 int err = 0;
6983 long current_timeo = *timeo_p; 6987 long current_timeo = *timeo_p;
6984 DEFINE_WAIT(wait); 6988 DEFINE_WAIT(wait);
6989 int err = 0;
6985 6990
6986 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 6991 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
6987 *timeo_p, msg_len); 6992 *timeo_p, msg_len);
@@ -7010,17 +7015,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
7010 release_sock(sk); 7015 release_sock(sk);
7011 current_timeo = schedule_timeout(current_timeo); 7016 current_timeo = schedule_timeout(current_timeo);
7012 lock_sock(sk); 7017 lock_sock(sk);
7013 if (sk != asoc->base.sk) { 7018 if (sk != asoc->base.sk)
7014 release_sock(sk); 7019 goto do_error;
7015 sk = asoc->base.sk;
7016 lock_sock(sk);
7017 }
7018 7020
7019 *timeo_p = current_timeo; 7021 *timeo_p = current_timeo;
7020 } 7022 }
7021 7023
7022out: 7024out:
7023 *orig_sk = sk;
7024 finish_wait(&asoc->wait, &wait); 7025 finish_wait(&asoc->wait, &wait);
7025 7026
7026 /* Release the association's refcnt. */ 7027 /* Release the association's refcnt. */
diff --git a/net/socket.c b/net/socket.c
index 2cf4f25f5c2b..0c544ae48eac 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -89,6 +89,7 @@
89#include <linux/magic.h> 89#include <linux/magic.h>
90#include <linux/slab.h> 90#include <linux/slab.h>
91#include <linux/xattr.h> 91#include <linux/xattr.h>
92#include <linux/nospec.h>
92 93
93#include <asm/uaccess.h> 94#include <asm/uaccess.h>
94#include <asm/unistd.h> 95#include <asm/unistd.h>
@@ -2324,6 +2325,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2324 2325
2325 if (call < 1 || call > SYS_SENDMMSG) 2326 if (call < 1 || call > SYS_SENDMMSG)
2326 return -EINVAL; 2327 return -EINVAL;
2328 call = array_index_nospec(call, SYS_SENDMMSG + 1);
2327 2329
2328 len = nargs[call]; 2330 len = nargs[call];
2329 if (len > sizeof(a)) 2331 if (len > sizeof(a))
@@ -2534,6 +2536,15 @@ out_fs:
2534 2536
2535core_initcall(sock_init); /* early initcall */ 2537core_initcall(sock_init); /* early initcall */
2536 2538
2539static int __init jit_init(void)
2540{
2541#ifdef CONFIG_BPF_JIT_ALWAYS_ON
2542 bpf_jit_enable = 1;
2543#endif
2544 return 0;
2545}
2546pure_initcall(jit_init);
2547
2537#ifdef CONFIG_PROC_FS 2548#ifdef CONFIG_PROC_FS
2538void socket_seq_show(struct seq_file *seq) 2549void socket_seq_show(struct seq_file *seq)
2539{ 2550{
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index d81186d34558..9103dd15511c 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry)
1375 struct dentry *clnt_dir = pipe_dentry->d_parent; 1375 struct dentry *clnt_dir = pipe_dentry->d_parent;
1376 struct dentry *gssd_dir = clnt_dir->d_parent; 1376 struct dentry *gssd_dir = clnt_dir->d_parent;
1377 1377
1378 dget(pipe_dentry);
1378 __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry); 1379 __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
1379 __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1); 1380 __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
1380 __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1); 1381 __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 27b6f55fa43a..c9c0976d3bbb 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2360,9 +2360,15 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2360 case -ECONNREFUSED: 2360 case -ECONNREFUSED:
2361 case -ECONNRESET: 2361 case -ECONNRESET:
2362 case -ENETUNREACH: 2362 case -ENETUNREACH:
2363 case -EHOSTUNREACH:
2363 case -EADDRINUSE: 2364 case -EADDRINUSE:
2364 case -ENOBUFS: 2365 case -ENOBUFS:
2365 /* retry with existing socket, after a delay */ 2366 /*
2367 * xs_tcp_force_close() wakes tasks with -EIO.
2368 * We need to wake them first to ensure the
2369 * correct error code.
2370 */
2371 xprt_wake_pending_tasks(xprt, status);
2366 xs_tcp_force_close(xprt); 2372 xs_tcp_force_close(xprt);
2367 goto out; 2373 goto out;
2368 } 2374 }
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 77bf9113c7a7..2763bd369b79 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -44,7 +44,8 @@
44 44
45static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { 45static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
46 [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, 46 [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
47 [TIPC_NLA_NET_ID] = { .type = NLA_U32 } 47 [TIPC_NLA_NET_ID] = { .type = NLA_U32 },
48 [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
48}; 49};
49 50
50/* 51/*
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 8f0bac7e03c4..a1e909ae0f78 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -94,6 +94,9 @@ static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev,
94 94
95 ASSERT_RTNL(); 95 ASSERT_RTNL();
96 96
97 if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN)
98 return -EINVAL;
99
97 /* prohibit calling the thing phy%d when %d is not its number */ 100 /* prohibit calling the thing phy%d when %d is not its number */
98 sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); 101 sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken);
99 if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) { 102 if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) {
@@ -390,6 +393,8 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
390 if (rv) 393 if (rv)
391 goto use_default_name; 394 goto use_default_name;
392 } else { 395 } else {
396 int rv;
397
393use_default_name: 398use_default_name:
394 /* NOTE: This is *probably* safe w/out holding rtnl because of 399 /* NOTE: This is *probably* safe w/out holding rtnl because of
395 * the restrictions on phy names. Probably this call could 400 * the restrictions on phy names. Probably this call could
@@ -397,7 +402,11 @@ use_default_name:
397 * phyX. But, might should add some locking and check return 402 * phyX. But, might should add some locking and check return
398 * value, and use a different name if this one exists? 403 * value, and use a different name if this one exists?
399 */ 404 */
400 dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); 405 rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
406 if (rv < 0) {
407 kfree(rdev);
408 return NULL;
409 }
401 } 410 }
402 411
403 INIT_LIST_HEAD(&rdev->wdev_list); 412 INIT_LIST_HEAD(&rdev->wdev_list);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7950506395a8..b0b58d1565c2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -16,6 +16,7 @@
16#include <linux/nl80211.h> 16#include <linux/nl80211.h>
17#include <linux/rtnetlink.h> 17#include <linux/rtnetlink.h>
18#include <linux/netlink.h> 18#include <linux/netlink.h>
19#include <linux/nospec.h>
19#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
20#include <net/net_namespace.h> 21#include <net/net_namespace.h>
21#include <net/genetlink.h> 22#include <net/genetlink.h>
@@ -1879,20 +1880,22 @@ static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = {
1879static int parse_txq_params(struct nlattr *tb[], 1880static int parse_txq_params(struct nlattr *tb[],
1880 struct ieee80211_txq_params *txq_params) 1881 struct ieee80211_txq_params *txq_params)
1881{ 1882{
1883 u8 ac;
1884
1882 if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] || 1885 if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] ||
1883 !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] || 1886 !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] ||
1884 !tb[NL80211_TXQ_ATTR_AIFS]) 1887 !tb[NL80211_TXQ_ATTR_AIFS])
1885 return -EINVAL; 1888 return -EINVAL;
1886 1889
1887 txq_params->ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]); 1890 ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]);
1888 txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]); 1891 txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]);
1889 txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]); 1892 txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]);
1890 txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]); 1893 txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]);
1891 txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]); 1894 txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]);
1892 1895
1893 if (txq_params->ac >= NL80211_NUM_ACS) 1896 if (ac >= NL80211_NUM_ACS)
1894 return -EINVAL; 1897 return -EINVAL;
1895 1898 txq_params->ac = array_index_nospec(ac, NL80211_NUM_ACS);
1896 return 0; 1899 return 0;
1897} 1900}
1898 1901
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index a750f330b8dd..c6ab4da4b8e2 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1794,32 +1794,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
1794 1794
1795static int __init x25_init(void) 1795static int __init x25_init(void)
1796{ 1796{
1797 int rc = proto_register(&x25_proto, 0); 1797 int rc;
1798 1798
1799 if (rc != 0) 1799 rc = proto_register(&x25_proto, 0);
1800 if (rc)
1800 goto out; 1801 goto out;
1801 1802
1802 rc = sock_register(&x25_family_ops); 1803 rc = sock_register(&x25_family_ops);
1803 if (rc != 0) 1804 if (rc)
1804 goto out_proto; 1805 goto out_proto;
1805 1806
1806 dev_add_pack(&x25_packet_type); 1807 dev_add_pack(&x25_packet_type);
1807 1808
1808 rc = register_netdevice_notifier(&x25_dev_notifier); 1809 rc = register_netdevice_notifier(&x25_dev_notifier);
1809 if (rc != 0) 1810 if (rc)
1810 goto out_sock; 1811 goto out_sock;
1811 1812
1812 pr_info("Linux Version 0.2\n"); 1813 rc = x25_register_sysctl();
1814 if (rc)
1815 goto out_dev;
1813 1816
1814 x25_register_sysctl();
1815 rc = x25_proc_init(); 1817 rc = x25_proc_init();
1816 if (rc != 0) 1818 if (rc)
1817 goto out_dev; 1819 goto out_sysctl;
1820
1821 pr_info("Linux Version 0.2\n");
1822
1818out: 1823out:
1819 return rc; 1824 return rc;
1825out_sysctl:
1826 x25_unregister_sysctl();
1820out_dev: 1827out_dev:
1821 unregister_netdevice_notifier(&x25_dev_notifier); 1828 unregister_netdevice_notifier(&x25_dev_notifier);
1822out_sock: 1829out_sock:
1830 dev_remove_pack(&x25_packet_type);
1823 sock_unregister(AF_X25); 1831 sock_unregister(AF_X25);
1824out_proto: 1832out_proto:
1825 proto_unregister(&x25_proto); 1833 proto_unregister(&x25_proto);
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index 43239527a205..703d46aae7a2 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = {
73 { 0, }, 73 { 0, },
74}; 74};
75 75
76void __init x25_register_sysctl(void) 76int __init x25_register_sysctl(void)
77{ 77{
78 x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); 78 x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
79 if (!x25_table_header)
80 return -ENOMEM;
81 return 0;
79} 82}
80 83
81void x25_unregister_sysctl(void) 84void x25_unregister_sysctl(void)
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index ccfdc7115a83..a00ec715aa46 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -283,7 +283,7 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
283 struct crypto_comp *tfm; 283 struct crypto_comp *tfm;
284 284
285 /* This can be any valid CPU ID so we don't need locking. */ 285 /* This can be any valid CPU ID so we don't need locking. */
286 tfm = __this_cpu_read(*pos->tfms); 286 tfm = this_cpu_read(*pos->tfms);
287 287
288 if (!strcmp(crypto_comp_name(tfm), alg_name)) { 288 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
289 pos->users++; 289 pos->users++;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 22df3b51e905..f9a13b67df5e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -626,6 +626,11 @@ static void xfrm_hash_rebuild(struct work_struct *work)
626 626
627 /* re-insert all policies by order of creation */ 627 /* re-insert all policies by order of creation */
628 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 628 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
629 if (policy->walk.dead ||
630 xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
631 /* skip socket policies */
632 continue;
633 }
629 newpos = NULL; 634 newpos = NULL;
630 chain = policy_hash_bysel(net, &policy->selector, 635 chain = policy_hash_bysel(net, &policy->selector,
631 policy->family, 636 policy->family,
@@ -1225,9 +1230,15 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1225 read_lock_bh(&net->xfrm.xfrm_policy_lock); 1230 read_lock_bh(&net->xfrm.xfrm_policy_lock);
1226 pol = rcu_dereference(sk->sk_policy[dir]); 1231 pol = rcu_dereference(sk->sk_policy[dir]);
1227 if (pol != NULL) { 1232 if (pol != NULL) {
1228 bool match = xfrm_selector_match(&pol->selector, fl, family); 1233 bool match;
1229 int err = 0; 1234 int err = 0;
1230 1235
1236 if (pol->family != family) {
1237 pol = NULL;
1238 goto out;
1239 }
1240
1241 match = xfrm_selector_match(&pol->selector, fl, family);
1231 if (match) { 1242 if (match) {
1232 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { 1243 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1233 pol = NULL; 1244 pol = NULL;
@@ -1307,7 +1318,7 @@ EXPORT_SYMBOL(xfrm_policy_delete);
1307 1318
1308int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 1319int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1309{ 1320{
1310 struct net *net = xp_net(pol); 1321 struct net *net = sock_net(sk);
1311 struct xfrm_policy *old_pol; 1322 struct xfrm_policy *old_pol;
1312 1323
1313#ifdef CONFIG_XFRM_SUB_POLICY 1324#ifdef CONFIG_XFRM_SUB_POLICY
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 9895a8c56d8c..d6a11af0bab1 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1159,6 +1159,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
1159 1159
1160 if (orig->aead) { 1160 if (orig->aead) {
1161 x->aead = xfrm_algo_aead_clone(orig->aead); 1161 x->aead = xfrm_algo_aead_clone(orig->aead);
1162 x->geniv = orig->geniv;
1162 if (!x->aead) 1163 if (!x->aead)
1163 goto error; 1164 goto error;
1164 } 1165 }
@@ -1208,6 +1209,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
1208 x->curlft.add_time = orig->curlft.add_time; 1209 x->curlft.add_time = orig->curlft.add_time;
1209 x->km.state = orig->km.state; 1210 x->km.state = orig->km.state;
1210 x->km.seq = orig->km.seq; 1211 x->km.seq = orig->km.seq;
1212 x->replay = orig->replay;
1213 x->preplay = orig->preplay;
1211 1214
1212 return x; 1215 return x;
1213 1216
@@ -1845,6 +1848,18 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
1845 struct xfrm_mgr *km; 1848 struct xfrm_mgr *km;
1846 struct xfrm_policy *pol = NULL; 1849 struct xfrm_policy *pol = NULL;
1847 1850
1851#ifdef CONFIG_COMPAT
1852 if (is_compat_task())
1853 return -EOPNOTSUPP;
1854#endif
1855
1856 if (!optval && !optlen) {
1857 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
1858 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
1859 __sk_dst_reset(sk);
1860 return 0;
1861 }
1862
1848 if (optlen <= 0 || optlen > PAGE_SIZE) 1863 if (optlen <= 0 || optlen > PAGE_SIZE)
1849 return -EMSGSIZE; 1864 return -EMSGSIZE;
1850 1865
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 76944a4839a5..90270d7110a3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -121,22 +121,17 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
121 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 121 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
122 struct xfrm_replay_state_esn *rs; 122 struct xfrm_replay_state_esn *rs;
123 123
124 if (p->flags & XFRM_STATE_ESN) { 124 if (!rt)
125 if (!rt) 125 return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
126 return -EINVAL;
127
128 rs = nla_data(rt);
129 126
130 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) 127 rs = nla_data(rt);
131 return -EINVAL;
132 128
133 if (nla_len(rt) < xfrm_replay_state_esn_len(rs) && 129 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
134 nla_len(rt) != sizeof(*rs)) 130 return -EINVAL;
135 return -EINVAL;
136 }
137 131
138 if (!rt) 132 if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
139 return 0; 133 nla_len(rt) != sizeof(*rs))
134 return -EINVAL;
140 135
141 /* As only ESP and AH support ESN feature. */ 136 /* As only ESP and AH support ESN feature. */
142 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) 137 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
@@ -1376,11 +1371,14 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1376 1371
1377static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) 1372static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1378{ 1373{
1374 u16 prev_family;
1379 int i; 1375 int i;
1380 1376
1381 if (nr > XFRM_MAX_DEPTH) 1377 if (nr > XFRM_MAX_DEPTH)
1382 return -EINVAL; 1378 return -EINVAL;
1383 1379
1380 prev_family = family;
1381
1384 for (i = 0; i < nr; i++) { 1382 for (i = 0; i < nr; i++) {
1385 /* We never validated the ut->family value, so many 1383 /* We never validated the ut->family value, so many
1386 * applications simply leave it at zero. The check was 1384 * applications simply leave it at zero. The check was
@@ -1392,6 +1390,12 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1392 if (!ut[i].family) 1390 if (!ut[i].family)
1393 ut[i].family = family; 1391 ut[i].family = family;
1394 1392
1393 if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
1394 (ut[i].family != prev_family))
1395 return -EINVAL;
1396
1397 prev_family = ut[i].family;
1398
1395 switch (ut[i].family) { 1399 switch (ut[i].family) {
1396 case AF_INET: 1400 case AF_INET:
1397 break; 1401 break;
@@ -1402,6 +1406,21 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1402 default: 1406 default:
1403 return -EINVAL; 1407 return -EINVAL;
1404 } 1408 }
1409
1410 switch (ut[i].id.proto) {
1411 case IPPROTO_AH:
1412 case IPPROTO_ESP:
1413 case IPPROTO_COMP:
1414#if IS_ENABLED(CONFIG_IPV6)
1415 case IPPROTO_ROUTING:
1416 case IPPROTO_DSTOPTS:
1417#endif
1418 case IPSEC_PROTO_ANY:
1419 break;
1420 default:
1421 return -EINVAL;
1422 }
1423
1405 } 1424 }
1406 1425
1407 return 0; 1426 return 0;
@@ -2461,7 +2480,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2461 2480
2462#ifdef CONFIG_COMPAT 2481#ifdef CONFIG_COMPAT
2463 if (is_compat_task()) 2482 if (is_compat_task())
2464 return -ENOTSUPP; 2483 return -EOPNOTSUPP;
2465#endif 2484#endif
2466 2485
2467 type = nlh->nlmsg_type; 2486 type = nlh->nlmsg_type;
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 1db6d73c8dd2..31a981d6229d 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -7,6 +7,7 @@ quote := "
7squote := ' 7squote := '
8empty := 8empty :=
9space := $(empty) $(empty) 9space := $(empty) $(empty)
10pound := \#
10 11
11### 12###
12# Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o 13# Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o
@@ -236,11 +237,11 @@ endif
236 237
237# Replace >$< with >$$< to preserve $ when reloading the .cmd file 238# Replace >$< with >$$< to preserve $ when reloading the .cmd file
238# (needed for make) 239# (needed for make)
239# Replace >#< with >\#< to avoid starting a comment in the .cmd file 240# Replace >#< with >$(pound)< to avoid starting a comment in the .cmd file
240# (needed for make) 241# (needed for make)
241# Replace >'< with >'\''< to be able to enclose the whole string in '...' 242# Replace >'< with >'\''< to be able to enclose the whole string in '...'
242# (needed for the shell) 243# (needed for the shell)
243make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1))))) 244make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1)))))
244 245
245# Find any prerequisites that is newer than target or that does not exist. 246# Find any prerequisites that is newer than target or that does not exist.
246# PHONY targets skipped in both cases. 247# PHONY targets skipped in both cases.
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 37323b0df374..2624d4bf9a45 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -28,4 +28,7 @@ else
28 CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL) 28 CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL)
29 endif 29 endif
30endif 30endif
31
32CFLAGS_KASAN_NOSANITIZE := -fno-builtin
33
31endif 34endif
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 79e86613712f..a2d0e6d32659 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -126,7 +126,7 @@ endif
126ifeq ($(CONFIG_KASAN),y) 126ifeq ($(CONFIG_KASAN),y)
127_c_flags += $(if $(patsubst n%,, \ 127_c_flags += $(if $(patsubst n%,, \
128 $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \ 128 $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
129 $(CFLAGS_KASAN)) 129 $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE))
130endif 130endif
131 131
132# If building the kernel in a separate objtree expand all occurrences 132# If building the kernel in a separate objtree expand all occurrences
@@ -270,11 +270,11 @@ cmd_dt_S_dtb= \
270 echo '\#include <asm-generic/vmlinux.lds.h>'; \ 270 echo '\#include <asm-generic/vmlinux.lds.h>'; \
271 echo '.section .dtb.init.rodata,"a"'; \ 271 echo '.section .dtb.init.rodata,"a"'; \
272 echo '.balign STRUCT_ALIGNMENT'; \ 272 echo '.balign STRUCT_ALIGNMENT'; \
273 echo '.global __dtb_$(*F)_begin'; \ 273 echo '.global __dtb_$(subst -,_,$(*F))_begin'; \
274 echo '__dtb_$(*F)_begin:'; \ 274 echo '__dtb_$(subst -,_,$(*F))_begin:'; \
275 echo '.incbin "$<" '; \ 275 echo '.incbin "$<" '; \
276 echo '__dtb_$(*F)_end:'; \ 276 echo '__dtb_$(subst -,_,$(*F))_end:'; \
277 echo '.global __dtb_$(*F)_end'; \ 277 echo '.global __dtb_$(subst -,_,$(*F))_end'; \
278 echo '.balign STRUCT_ALIGNMENT'; \ 278 echo '.balign STRUCT_ALIGNMENT'; \
279) > $@ 279) > $@
280 280
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index 122599b1c13b..ea1e96921e3b 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -10,10 +10,16 @@ DEPMOD=$1
10KERNELRELEASE=$2 10KERNELRELEASE=$2
11SYMBOL_PREFIX=$3 11SYMBOL_PREFIX=$3
12 12
13if ! test -r System.map -a -x "$DEPMOD"; then 13if ! test -r System.map ; then
14 exit 0 14 exit 0
15fi 15fi
16 16
17if [ -z $(command -v $DEPMOD) ]; then
18 echo "'make modules_install' requires $DEPMOD. Please install it." >&2
19 echo "This is probably in the kmod package." >&2
20 exit 1
21fi
22
17# older versions of depmod don't support -P <symbol-prefix> 23# older versions of depmod don't support -P <symbol-prefix>
18# support was added in module-init-tools 3.13 24# support was added in module-init-tools 3.13
19if test -n "$SYMBOL_PREFIX"; then 25if test -n "$SYMBOL_PREFIX"; then
diff --git a/scripts/genksyms/parse.tab.c_shipped b/scripts/genksyms/parse.tab.c_shipped
index 99950b5afb0d..632f6d66982d 100644
--- a/scripts/genksyms/parse.tab.c_shipped
+++ b/scripts/genksyms/parse.tab.c_shipped
@@ -1,19 +1,19 @@
1/* A Bison parser, made by GNU Bison 2.7. */ 1/* A Bison parser, made by GNU Bison 3.0.4. */
2 2
3/* Bison implementation for Yacc-like parsers in C 3/* Bison implementation for Yacc-like parsers in C
4 4
5 Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc. 5 Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
6 6
7 This program is free software: you can redistribute it and/or modify 7 This program is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by 8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or 9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version. 10 (at your option) any later version.
11 11
12 This program is distributed in the hope that it will be useful, 12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details. 15 GNU General Public License for more details.
16 16
17 You should have received a copy of the GNU General Public License 17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 19
@@ -26,7 +26,7 @@
26 special exception, which will cause the skeleton and the resulting 26 special exception, which will cause the skeleton and the resulting
27 Bison output files to be licensed under the GNU General Public 27 Bison output files to be licensed under the GNU General Public
28 License without this special exception. 28 License without this special exception.
29 29
30 This special exception was added by the Free Software Foundation in 30 This special exception was added by the Free Software Foundation in
31 version 2.2 of Bison. */ 31 version 2.2 of Bison. */
32 32
@@ -44,7 +44,7 @@
44#define YYBISON 1 44#define YYBISON 1
45 45
46/* Bison version. */ 46/* Bison version. */
47#define YYBISON_VERSION "2.7" 47#define YYBISON_VERSION "3.0.4"
48 48
49/* Skeleton name. */ 49/* Skeleton name. */
50#define YYSKELETON_NAME "yacc.c" 50#define YYSKELETON_NAME "yacc.c"
@@ -62,7 +62,7 @@
62 62
63 63
64/* Copy the first part of user declarations. */ 64/* Copy the first part of user declarations. */
65 65#line 24 "parse.y" /* yacc.c:339 */
66 66
67 67
68#include <assert.h> 68#include <assert.h>
@@ -113,13 +113,13 @@ static void record_compound(struct string_list **keyw,
113} 113}
114 114
115 115
116#line 117 "parse.tab.c" /* yacc.c:339 */
116 117
117 118# ifndef YY_NULLPTR
118# ifndef YY_NULL
119# if defined __cplusplus && 201103L <= __cplusplus 119# if defined __cplusplus && 201103L <= __cplusplus
120# define YY_NULL nullptr 120# define YY_NULLPTR nullptr
121# else 121# else
122# define YY_NULL 0 122# define YY_NULLPTR 0
123# endif 123# endif
124# endif 124# endif
125 125
@@ -131,8 +131,11 @@ static void record_compound(struct string_list **keyw,
131# define YYERROR_VERBOSE 0 131# define YYERROR_VERBOSE 0
132#endif 132#endif
133 133
134 134/* In a future release of Bison, this section will be replaced
135/* Enabling traces. */ 135 by #include "parse.tab.h". */
136#ifndef YY_YY_PARSE_TAB_H_INCLUDED
137# define YY_YY_PARSE_TAB_H_INCLUDED
138/* Debug traces. */
136#ifndef YYDEBUG 139#ifndef YYDEBUG
137# define YYDEBUG 1 140# define YYDEBUG 1
138#endif 141#endif
@@ -140,86 +143,73 @@ static void record_compound(struct string_list **keyw,
140extern int yydebug; 143extern int yydebug;
141#endif 144#endif
142 145
143/* Tokens. */ 146/* Token type. */
144#ifndef YYTOKENTYPE 147#ifndef YYTOKENTYPE
145# define YYTOKENTYPE 148# define YYTOKENTYPE
146 /* Put the tokens into the symbol table, so that GDB and other debuggers 149 enum yytokentype
147 know about them. */ 150 {
148 enum yytokentype { 151 ASM_KEYW = 258,
149 ASM_KEYW = 258, 152 ATTRIBUTE_KEYW = 259,
150 ATTRIBUTE_KEYW = 259, 153 AUTO_KEYW = 260,
151 AUTO_KEYW = 260, 154 BOOL_KEYW = 261,
152 BOOL_KEYW = 261, 155 CHAR_KEYW = 262,
153 CHAR_KEYW = 262, 156 CONST_KEYW = 263,
154 CONST_KEYW = 263, 157 DOUBLE_KEYW = 264,
155 DOUBLE_KEYW = 264, 158 ENUM_KEYW = 265,
156 ENUM_KEYW = 265, 159 EXTERN_KEYW = 266,
157 EXTERN_KEYW = 266, 160 EXTENSION_KEYW = 267,
158 EXTENSION_KEYW = 267, 161 FLOAT_KEYW = 268,
159 FLOAT_KEYW = 268, 162 INLINE_KEYW = 269,
160 INLINE_KEYW = 269, 163 INT_KEYW = 270,
161 INT_KEYW = 270, 164 LONG_KEYW = 271,
162 LONG_KEYW = 271, 165 REGISTER_KEYW = 272,
163 REGISTER_KEYW = 272, 166 RESTRICT_KEYW = 273,
164 RESTRICT_KEYW = 273, 167 SHORT_KEYW = 274,
165 SHORT_KEYW = 274, 168 SIGNED_KEYW = 275,
166 SIGNED_KEYW = 275, 169 STATIC_KEYW = 276,
167 STATIC_KEYW = 276, 170 STRUCT_KEYW = 277,
168 STRUCT_KEYW = 277, 171 TYPEDEF_KEYW = 278,
169 TYPEDEF_KEYW = 278, 172 UNION_KEYW = 279,
170 UNION_KEYW = 279, 173 UNSIGNED_KEYW = 280,
171 UNSIGNED_KEYW = 280, 174 VOID_KEYW = 281,
172 VOID_KEYW = 281, 175 VOLATILE_KEYW = 282,
173 VOLATILE_KEYW = 282, 176 TYPEOF_KEYW = 283,
174 TYPEOF_KEYW = 283, 177 EXPORT_SYMBOL_KEYW = 284,
175 EXPORT_SYMBOL_KEYW = 284, 178 ASM_PHRASE = 285,
176 ASM_PHRASE = 285, 179 ATTRIBUTE_PHRASE = 286,
177 ATTRIBUTE_PHRASE = 286, 180 TYPEOF_PHRASE = 287,
178 TYPEOF_PHRASE = 287, 181 BRACE_PHRASE = 288,
179 BRACE_PHRASE = 288, 182 BRACKET_PHRASE = 289,
180 BRACKET_PHRASE = 289, 183 EXPRESSION_PHRASE = 290,
181 EXPRESSION_PHRASE = 290, 184 CHAR = 291,
182 CHAR = 291, 185 DOTS = 292,
183 DOTS = 292, 186 IDENT = 293,
184 IDENT = 293, 187 INT = 294,
185 INT = 294, 188 REAL = 295,
186 REAL = 295, 189 STRING = 296,
187 STRING = 296, 190 TYPE = 297,
188 TYPE = 297, 191 OTHER = 298,
189 OTHER = 298, 192 FILENAME = 299
190 FILENAME = 299 193 };
191 };
192#endif 194#endif
193 195
194 196/* Value type. */
195#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED 197#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
196typedef int YYSTYPE; 198typedef int YYSTYPE;
197# define YYSTYPE_IS_TRIVIAL 1 199# define YYSTYPE_IS_TRIVIAL 1
198# define yystype YYSTYPE /* obsolescent; will be withdrawn */
199# define YYSTYPE_IS_DECLARED 1 200# define YYSTYPE_IS_DECLARED 1
200#endif 201#endif
201 202
203
202extern YYSTYPE yylval; 204extern YYSTYPE yylval;
203 205
204#ifdef YYPARSE_PARAM
205#if defined __STDC__ || defined __cplusplus
206int yyparse (void *YYPARSE_PARAM);
207#else
208int yyparse ();
209#endif
210#else /* ! YYPARSE_PARAM */
211#if defined __STDC__ || defined __cplusplus
212int yyparse (void); 206int yyparse (void);
213#else
214int yyparse ();
215#endif
216#endif /* ! YYPARSE_PARAM */
217
218 207
208#endif /* !YY_YY_PARSE_TAB_H_INCLUDED */
219 209
220/* Copy the second part of user declarations. */ 210/* Copy the second part of user declarations. */
221 211
222 212#line 213 "parse.tab.c" /* yacc.c:358 */
223 213
224#ifdef short 214#ifdef short
225# undef short 215# undef short
@@ -233,11 +223,8 @@ typedef unsigned char yytype_uint8;
233 223
234#ifdef YYTYPE_INT8 224#ifdef YYTYPE_INT8
235typedef YYTYPE_INT8 yytype_int8; 225typedef YYTYPE_INT8 yytype_int8;
236#elif (defined __STDC__ || defined __C99__FUNC__ \
237 || defined __cplusplus || defined _MSC_VER)
238typedef signed char yytype_int8;
239#else 226#else
240typedef short int yytype_int8; 227typedef signed char yytype_int8;
241#endif 228#endif
242 229
243#ifdef YYTYPE_UINT16 230#ifdef YYTYPE_UINT16
@@ -257,8 +244,7 @@ typedef short int yytype_int16;
257# define YYSIZE_T __SIZE_TYPE__ 244# define YYSIZE_T __SIZE_TYPE__
258# elif defined size_t 245# elif defined size_t
259# define YYSIZE_T size_t 246# define YYSIZE_T size_t
260# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \ 247# elif ! defined YYSIZE_T
261 || defined __cplusplus || defined _MSC_VER)
262# include <stddef.h> /* INFRINGES ON USER NAME SPACE */ 248# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
263# define YYSIZE_T size_t 249# define YYSIZE_T size_t
264# else 250# else
@@ -280,6 +266,33 @@ typedef short int yytype_int16;
280# endif 266# endif
281#endif 267#endif
282 268
269#ifndef YY_ATTRIBUTE
270# if (defined __GNUC__ \
271 && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \
272 || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C
273# define YY_ATTRIBUTE(Spec) __attribute__(Spec)
274# else
275# define YY_ATTRIBUTE(Spec) /* empty */
276# endif
277#endif
278
279#ifndef YY_ATTRIBUTE_PURE
280# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__))
281#endif
282
283#ifndef YY_ATTRIBUTE_UNUSED
284# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__))
285#endif
286
287#if !defined _Noreturn \
288 && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112)
289# if defined _MSC_VER && 1200 <= _MSC_VER
290# define _Noreturn __declspec (noreturn)
291# else
292# define _Noreturn YY_ATTRIBUTE ((__noreturn__))
293# endif
294#endif
295
283/* Suppress unused-variable warnings by "using" E. */ 296/* Suppress unused-variable warnings by "using" E. */
284#if ! defined lint || defined __GNUC__ 297#if ! defined lint || defined __GNUC__
285# define YYUSE(E) ((void) (E)) 298# define YYUSE(E) ((void) (E))
@@ -287,24 +300,26 @@ typedef short int yytype_int16;
287# define YYUSE(E) /* empty */ 300# define YYUSE(E) /* empty */
288#endif 301#endif
289 302
290/* Identity function, used to suppress warnings about constant conditions. */ 303#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__
291#ifndef lint 304/* Suppress an incorrect diagnostic about yylval being uninitialized. */
292# define YYID(N) (N) 305# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \
293#else 306 _Pragma ("GCC diagnostic push") \
294#if (defined __STDC__ || defined __C99__FUNC__ \ 307 _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\
295 || defined __cplusplus || defined _MSC_VER) 308 _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
296static int 309# define YY_IGNORE_MAYBE_UNINITIALIZED_END \
297YYID (int yyi) 310 _Pragma ("GCC diagnostic pop")
298#else 311#else
299static int 312# define YY_INITIAL_VALUE(Value) Value
300YYID (yyi)
301 int yyi;
302#endif 313#endif
303{ 314#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
304 return yyi; 315# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
305} 316# define YY_IGNORE_MAYBE_UNINITIALIZED_END
317#endif
318#ifndef YY_INITIAL_VALUE
319# define YY_INITIAL_VALUE(Value) /* Nothing. */
306#endif 320#endif
307 321
322
308#if ! defined yyoverflow || YYERROR_VERBOSE 323#if ! defined yyoverflow || YYERROR_VERBOSE
309 324
310/* The parser invokes alloca or malloc; define the necessary symbols. */ 325/* The parser invokes alloca or malloc; define the necessary symbols. */
@@ -322,8 +337,7 @@ YYID (yyi)
322# define alloca _alloca 337# define alloca _alloca
323# else 338# else
324# define YYSTACK_ALLOC alloca 339# define YYSTACK_ALLOC alloca
325# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ 340# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS
326 || defined __cplusplus || defined _MSC_VER)
327# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ 341# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
328 /* Use EXIT_SUCCESS as a witness for stdlib.h. */ 342 /* Use EXIT_SUCCESS as a witness for stdlib.h. */
329# ifndef EXIT_SUCCESS 343# ifndef EXIT_SUCCESS
@@ -335,8 +349,8 @@ YYID (yyi)
335# endif 349# endif
336 350
337# ifdef YYSTACK_ALLOC 351# ifdef YYSTACK_ALLOC
338 /* Pacify GCC's `empty if-body' warning. */ 352 /* Pacify GCC's 'empty if-body' warning. */
339# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0)) 353# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
340# ifndef YYSTACK_ALLOC_MAXIMUM 354# ifndef YYSTACK_ALLOC_MAXIMUM
341 /* The OS might guarantee only one guard page at the bottom of the stack, 355 /* The OS might guarantee only one guard page at the bottom of the stack,
342 and a page size can be as small as 4096 bytes. So we cannot safely 356 and a page size can be as small as 4096 bytes. So we cannot safely
@@ -352,7 +366,7 @@ YYID (yyi)
352# endif 366# endif
353# if (defined __cplusplus && ! defined EXIT_SUCCESS \ 367# if (defined __cplusplus && ! defined EXIT_SUCCESS \
354 && ! ((defined YYMALLOC || defined malloc) \ 368 && ! ((defined YYMALLOC || defined malloc) \
355 && (defined YYFREE || defined free))) 369 && (defined YYFREE || defined free)))
356# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ 370# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
357# ifndef EXIT_SUCCESS 371# ifndef EXIT_SUCCESS
358# define EXIT_SUCCESS 0 372# define EXIT_SUCCESS 0
@@ -360,15 +374,13 @@ YYID (yyi)
360# endif 374# endif
361# ifndef YYMALLOC 375# ifndef YYMALLOC
362# define YYMALLOC malloc 376# define YYMALLOC malloc
363# if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ 377# if ! defined malloc && ! defined EXIT_SUCCESS
364 || defined __cplusplus || defined _MSC_VER)
365void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ 378void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
366# endif 379# endif
367# endif 380# endif
368# ifndef YYFREE 381# ifndef YYFREE
369# define YYFREE free 382# define YYFREE free
370# if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ 383# if ! defined free && ! defined EXIT_SUCCESS
371 || defined __cplusplus || defined _MSC_VER)
372void free (void *); /* INFRINGES ON USER NAME SPACE */ 384void free (void *); /* INFRINGES ON USER NAME SPACE */
373# endif 385# endif
374# endif 386# endif
@@ -378,7 +390,7 @@ void free (void *); /* INFRINGES ON USER NAME SPACE */
378 390
379#if (! defined yyoverflow \ 391#if (! defined yyoverflow \
380 && (! defined __cplusplus \ 392 && (! defined __cplusplus \
381 || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) 393 || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
382 394
383/* A type that is properly aligned for any stack member. */ 395/* A type that is properly aligned for any stack member. */
384union yyalloc 396union yyalloc
@@ -403,16 +415,16 @@ union yyalloc
403 elements in the stack, and YYPTR gives the new location of the 415 elements in the stack, and YYPTR gives the new location of the
404 stack. Advance YYPTR to a properly aligned location for the next 416 stack. Advance YYPTR to a properly aligned location for the next
405 stack. */ 417 stack. */
406# define YYSTACK_RELOCATE(Stack_alloc, Stack) \ 418# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
407 do \ 419 do \
408 { \ 420 { \
409 YYSIZE_T yynewbytes; \ 421 YYSIZE_T yynewbytes; \
410 YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ 422 YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
411 Stack = &yyptr->Stack_alloc; \ 423 Stack = &yyptr->Stack_alloc; \
412 yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ 424 yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
413 yyptr += yynewbytes / sizeof (*yyptr); \ 425 yyptr += yynewbytes / sizeof (*yyptr); \
414 } \ 426 } \
415 while (YYID (0)) 427 while (0)
416 428
417#endif 429#endif
418 430
@@ -431,7 +443,7 @@ union yyalloc
431 for (yyi = 0; yyi < (Count); yyi++) \ 443 for (yyi = 0; yyi < (Count); yyi++) \
432 (Dst)[yyi] = (Src)[yyi]; \ 444 (Dst)[yyi] = (Src)[yyi]; \
433 } \ 445 } \
434 while (YYID (0)) 446 while (0)
435# endif 447# endif
436# endif 448# endif
437#endif /* !YYCOPY_NEEDED */ 449#endif /* !YYCOPY_NEEDED */
@@ -439,25 +451,27 @@ union yyalloc
439/* YYFINAL -- State number of the termination state. */ 451/* YYFINAL -- State number of the termination state. */
440#define YYFINAL 4 452#define YYFINAL 4
441/* YYLAST -- Last index in YYTABLE. */ 453/* YYLAST -- Last index in YYTABLE. */
442#define YYLAST 515 454#define YYLAST 513
443 455
444/* YYNTOKENS -- Number of terminals. */ 456/* YYNTOKENS -- Number of terminals. */
445#define YYNTOKENS 54 457#define YYNTOKENS 54
446/* YYNNTS -- Number of nonterminals. */ 458/* YYNNTS -- Number of nonterminals. */
447#define YYNNTS 49 459#define YYNNTS 49
448/* YYNRULES -- Number of rules. */ 460/* YYNRULES -- Number of rules. */
449#define YYNRULES 133 461#define YYNRULES 132
450/* YYNRULES -- Number of states. */ 462/* YYNSTATES -- Number of states. */
451#define YYNSTATES 188 463#define YYNSTATES 186
452 464
453/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */ 465/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned
466 by yylex, with out-of-bounds checking. */
454#define YYUNDEFTOK 2 467#define YYUNDEFTOK 2
455#define YYMAXUTOK 299 468#define YYMAXUTOK 299
456 469
457#define YYTRANSLATE(YYX) \ 470#define YYTRANSLATE(YYX) \
458 ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) 471 ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
459 472
460/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */ 473/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM
474 as returned by yylex, without out-of-bounds checking. */
461static const yytype_uint8 yytranslate[] = 475static const yytype_uint8 yytranslate[] =
462{ 476{
463 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 477 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
@@ -493,69 +507,7 @@ static const yytype_uint8 yytranslate[] =
493}; 507};
494 508
495#if YYDEBUG 509#if YYDEBUG
496/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in 510 /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */
497 YYRHS. */
498static const yytype_uint16 yyprhs[] =
499{
500 0, 0, 3, 5, 8, 9, 12, 13, 18, 19,
501 23, 25, 27, 29, 31, 34, 37, 41, 42, 44,
502 46, 50, 55, 56, 58, 60, 63, 65, 67, 69,
503 71, 73, 75, 77, 79, 81, 86, 88, 91, 94,
504 97, 101, 105, 109, 112, 115, 118, 120, 122, 124,
505 126, 128, 130, 132, 134, 136, 138, 140, 143, 144,
506 146, 148, 151, 153, 155, 157, 159, 162, 164, 166,
507 168, 173, 178, 181, 185, 189, 192, 194, 196, 198,
508 203, 208, 211, 215, 219, 222, 224, 228, 229, 231,
509 233, 237, 240, 243, 245, 246, 248, 250, 255, 260,
510 263, 267, 271, 275, 276, 278, 281, 285, 289, 290,
511 292, 294, 297, 301, 304, 305, 307, 309, 313, 316,
512 319, 321, 324, 325, 328, 332, 337, 339, 343, 345,
513 349, 352, 353, 355
514};
515
516/* YYRHS -- A `-1'-separated list of the rules' RHS. */
517static const yytype_int8 yyrhs[] =
518{
519 55, 0, -1, 56, -1, 55, 56, -1, -1, 57,
520 58, -1, -1, 12, 23, 59, 61, -1, -1, 23,
521 60, 61, -1, 61, -1, 85, -1, 100, -1, 102,
522 -1, 1, 45, -1, 1, 46, -1, 65, 62, 45,
523 -1, -1, 63, -1, 64, -1, 63, 47, 64, -1,
524 75, 101, 96, 86, -1, -1, 66, -1, 67, -1,
525 66, 67, -1, 68, -1, 69, -1, 5, -1, 17,
526 -1, 21, -1, 11, -1, 14, -1, 70, -1, 74,
527 -1, 28, 48, 82, 49, -1, 32, -1, 22, 38,
528 -1, 24, 38, -1, 10, 38, -1, 22, 38, 88,
529 -1, 24, 38, 88, -1, 10, 38, 97, -1, 10,
530 97, -1, 22, 88, -1, 24, 88, -1, 7, -1,
531 19, -1, 15, -1, 16, -1, 20, -1, 25, -1,
532 13, -1, 9, -1, 26, -1, 6, -1, 42, -1,
533 50, 72, -1, -1, 73, -1, 74, -1, 73, 74,
534 -1, 8, -1, 27, -1, 31, -1, 18, -1, 71,
535 75, -1, 76, -1, 38, -1, 42, -1, 76, 48,
536 79, 49, -1, 76, 48, 1, 49, -1, 76, 34,
537 -1, 48, 75, 49, -1, 48, 1, 49, -1, 71,
538 77, -1, 78, -1, 38, -1, 42, -1, 78, 48,
539 79, 49, -1, 78, 48, 1, 49, -1, 78, 34,
540 -1, 48, 77, 49, -1, 48, 1, 49, -1, 80,
541 37, -1, 80, -1, 81, 47, 37, -1, -1, 81,
542 -1, 82, -1, 81, 47, 82, -1, 66, 83, -1,
543 71, 83, -1, 84, -1, -1, 38, -1, 42, -1,
544 84, 48, 79, 49, -1, 84, 48, 1, 49, -1,
545 84, 34, -1, 48, 83, 49, -1, 48, 1, 49,
546 -1, 65, 75, 33, -1, -1, 87, -1, 51, 35,
547 -1, 52, 89, 46, -1, 52, 1, 46, -1, -1,
548 90, -1, 91, -1, 90, 91, -1, 65, 92, 45,
549 -1, 1, 45, -1, -1, 93, -1, 94, -1, 93,
550 47, 94, -1, 77, 96, -1, 38, 95, -1, 95,
551 -1, 53, 35, -1, -1, 96, 31, -1, 52, 98,
552 46, -1, 52, 98, 47, 46, -1, 99, -1, 98,
553 47, 99, -1, 38, -1, 38, 51, 35, -1, 30,
554 45, -1, -1, 30, -1, 29, 48, 38, 49, 45,
555 -1
556};
557
558/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
559static const yytype_uint16 yyrline[] = 511static const yytype_uint16 yyrline[] =
560{ 512{
561 0, 124, 124, 125, 129, 129, 135, 135, 137, 137, 513 0, 124, 124, 125, 129, 129, 135, 135, 137, 137,
@@ -565,13 +517,13 @@ static const yytype_uint16 yyrline[] =
565 237, 239, 241, 246, 249, 250, 254, 255, 256, 257, 517 237, 239, 241, 246, 249, 250, 254, 255, 256, 257,
566 258, 259, 260, 261, 262, 263, 264, 268, 273, 274, 518 258, 259, 260, 261, 262, 263, 264, 268, 273, 274,
567 278, 279, 283, 283, 283, 284, 292, 293, 297, 306, 519 278, 279, 283, 283, 283, 284, 292, 293, 297, 306,
568 315, 317, 319, 321, 323, 330, 331, 335, 336, 337, 520 315, 317, 319, 321, 328, 329, 333, 334, 335, 337,
569 339, 341, 343, 345, 350, 351, 352, 356, 357, 361, 521 339, 341, 343, 348, 349, 350, 354, 355, 359, 360,
570 362, 367, 372, 374, 378, 379, 387, 391, 393, 395, 522 365, 370, 372, 376, 377, 385, 389, 391, 393, 395,
571 397, 399, 404, 413, 414, 419, 424, 425, 429, 430, 523 397, 402, 411, 412, 417, 422, 423, 427, 428, 432,
572 434, 435, 439, 441, 446, 447, 451, 452, 456, 457, 524 433, 437, 439, 444, 445, 449, 450, 454, 455, 456,
573 458, 462, 466, 467, 471, 472, 476, 477, 480, 485, 525 460, 464, 465, 469, 470, 474, 475, 478, 483, 491,
574 493, 497, 498, 502 526 495, 496, 500
575}; 527};
576#endif 528#endif
577 529
@@ -606,13 +558,13 @@ static const char *const yytname[] =
606 "member_declarator_list_opt", "member_declarator_list", 558 "member_declarator_list_opt", "member_declarator_list",
607 "member_declarator", "member_bitfield_declarator", "attribute_opt", 559 "member_declarator", "member_bitfield_declarator", "attribute_opt",
608 "enum_body", "enumerator_list", "enumerator", "asm_definition", 560 "enum_body", "enumerator_list", "enumerator", "asm_definition",
609 "asm_phrase_opt", "export_definition", YY_NULL 561 "asm_phrase_opt", "export_definition", YY_NULLPTR
610}; 562};
611#endif 563#endif
612 564
613# ifdef YYPRINT 565# ifdef YYPRINT
614/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to 566/* YYTOKNUM[NUM] -- (External) token number corresponding to the
615 token YYLEX-NUM. */ 567 (internal) symbol number NUM (which must be that of a token). */
616static const yytype_uint16 yytoknum[] = 568static const yytype_uint16 yytoknum[] =
617{ 569{
618 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 570 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
@@ -624,47 +576,44 @@ static const yytype_uint16 yytoknum[] =
624}; 576};
625# endif 577# endif
626 578
627/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ 579#define YYPACT_NINF -135
628static const yytype_uint8 yyr1[] =
629{
630 0, 54, 55, 55, 57, 56, 59, 58, 60, 58,
631 58, 58, 58, 58, 58, 58, 61, 62, 62, 63,
632 63, 64, 65, 65, 66, 66, 67, 67, 68, 68,
633 68, 68, 68, 69, 69, 69, 69, 69, 69, 69,
634 69, 69, 69, 69, 69, 69, 70, 70, 70, 70,
635 70, 70, 70, 70, 70, 70, 70, 71, 72, 72,
636 73, 73, 74, 74, 74, 74, 75, 75, 76, 76,
637 76, 76, 76, 76, 76, 77, 77, 78, 78, 78,
638 78, 78, 78, 78, 79, 79, 79, 80, 80, 81,
639 81, 82, 83, 83, 84, 84, 84, 84, 84, 84,
640 84, 84, 85, 86, 86, 87, 88, 88, 89, 89,
641 90, 90, 91, 91, 92, 92, 93, 93, 94, 94,
642 94, 95, 96, 96, 97, 97, 98, 98, 99, 99,
643 100, 101, 101, 102
644};
645 580
646/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ 581#define yypact_value_is_default(Yystate) \
647static const yytype_uint8 yyr2[] = 582 (!!((Yystate) == (-135)))
583
584#define YYTABLE_NINF -109
585
586#define yytable_value_is_error(Yytable_value) \
587 0
588
589 /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
590 STATE-NUM. */
591static const yytype_int16 yypact[] =
648{ 592{
649 0, 2, 1, 2, 0, 2, 0, 4, 0, 3, 593 -135, 38, -135, 206, -135, -135, 22, -135, -135, -135,
650 1, 1, 1, 1, 2, 2, 3, 0, 1, 1, 594 -135, -135, -24, -135, 20, -135, -135, -135, -135, -135,
651 3, 4, 0, 1, 1, 2, 1, 1, 1, 1, 595 -135, -135, -135, -135, -23, -135, 6, -135, -135, -135,
652 1, 1, 1, 1, 1, 4, 1, 2, 2, 2, 596 -2, 15, 24, -135, -135, -135, -135, -135, 41, 471,
653 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 597 -135, -135, -135, -135, -135, -135, -135, -135, -135, -135,
654 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 598 13, 36, -135, -135, 35, 106, -135, 471, 35, -135,
655 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 599 471, 44, -135, -135, -135, 41, 39, 45, 48, -135,
656 4, 4, 2, 3, 3, 2, 1, 1, 1, 4, 600 41, -10, 25, -135, -135, 47, 34, -135, 471, -135,
657 4, 2, 3, 3, 2, 1, 3, 0, 1, 1, 601 26, -26, 53, 156, -135, -135, 41, -135, 387, 52,
658 3, 2, 2, 1, 0, 1, 1, 4, 4, 2, 602 57, 59, -135, 39, -135, -135, 41, -135, -135, -135,
659 3, 3, 3, 0, 1, 2, 3, 3, 0, 1, 603 -135, -135, 252, 67, -135, -21, -135, -135, -135, 51,
660 1, 2, 3, 2, 0, 1, 1, 3, 2, 2, 604 -135, 12, 83, 46, -135, 27, 84, 88, -135, -135,
661 1, 2, 0, 2, 3, 4, 1, 3, 1, 3, 605 -135, 91, -135, 109, -135, -135, 3, 55, -135, 30,
662 2, 0, 1, 5 606 -135, 95, -135, -135, -135, -20, 92, 93, 108, 96,
607 -135, -135, -135, -135, -135, 97, -135, 98, -135, -135,
608 118, -135, 297, -135, -26, 101, -135, 104, -135, -135,
609 342, -135, -135, 120, -135, -135, -135, -135, -135, 433,
610 -135, -135, 111, 119, -135, -135, -135, 130, 136, -135,
611 -135, -135, -135, -135, -135, -135
663}; 612};
664 613
665/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM. 614 /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM.
666 Performed when YYTABLE doesn't specify something else to do. Zero 615 Performed when YYTABLE does not specify something else to do. Zero
667 means the default is an error. */ 616 means the default is an error. */
668static const yytype_uint8 yydefact[] = 617static const yytype_uint8 yydefact[] =
669{ 618{
670 4, 4, 2, 0, 1, 3, 0, 28, 55, 46, 619 4, 4, 2, 0, 1, 3, 0, 28, 55, 46,
@@ -673,191 +622,158 @@ static const yytype_uint8 yydefact[] =
673 0, 0, 0, 64, 36, 56, 5, 10, 17, 23, 622 0, 0, 0, 64, 36, 56, 5, 10, 17, 23,
674 24, 26, 27, 33, 34, 11, 12, 13, 14, 15, 623 24, 26, 27, 33, 34, 11, 12, 13, 14, 15,
675 39, 0, 43, 6, 37, 0, 44, 22, 38, 45, 624 39, 0, 43, 6, 37, 0, 44, 22, 38, 45,
676 0, 0, 130, 68, 69, 0, 58, 0, 18, 19, 625 0, 0, 129, 68, 69, 0, 58, 0, 18, 19,
677 0, 131, 67, 25, 42, 128, 0, 126, 22, 40, 626 0, 130, 67, 25, 42, 127, 0, 125, 22, 40,
678 0, 114, 0, 0, 110, 9, 17, 41, 94, 0, 627 0, 113, 0, 0, 109, 9, 17, 41, 93, 0,
679 0, 0, 0, 57, 59, 60, 16, 0, 66, 132, 628 0, 0, 57, 59, 60, 16, 0, 66, 131, 101,
680 102, 122, 72, 0, 0, 124, 0, 7, 113, 107, 629 121, 72, 0, 0, 123, 0, 7, 112, 106, 76,
681 77, 78, 0, 0, 0, 122, 76, 0, 115, 116, 630 77, 0, 0, 0, 121, 75, 0, 114, 115, 119,
682 120, 106, 0, 111, 131, 95, 56, 0, 94, 91, 631 105, 0, 110, 130, 94, 56, 0, 93, 90, 92,
683 93, 35, 0, 74, 73, 61, 20, 103, 0, 0, 632 35, 0, 73, 61, 20, 102, 0, 0, 84, 87,
684 85, 88, 89, 129, 125, 127, 119, 0, 77, 0, 633 88, 128, 124, 126, 118, 0, 76, 0, 120, 74,
685 121, 75, 118, 81, 0, 112, 0, 0, 96, 0, 634 117, 80, 0, 111, 0, 0, 95, 0, 91, 98,
686 92, 99, 0, 133, 123, 0, 21, 104, 71, 70, 635 0, 132, 122, 0, 21, 103, 71, 70, 83, 0,
687 84, 0, 83, 82, 0, 0, 117, 101, 100, 0, 636 82, 81, 0, 0, 116, 100, 99, 0, 0, 104,
688 0, 105, 86, 90, 80, 79, 98, 97 637 85, 89, 79, 78, 97, 96
689};
690
691/* YYDEFGOTO[NTERM-NUM]. */
692static const yytype_int16 yydefgoto[] =
693{
694 -1, 1, 2, 3, 36, 78, 57, 37, 67, 68,
695 69, 81, 39, 40, 41, 42, 43, 70, 93, 94,
696 44, 124, 72, 115, 116, 139, 140, 141, 142, 129,
697 130, 45, 166, 167, 56, 82, 83, 84, 117, 118,
698 119, 120, 137, 52, 76, 77, 46, 101, 47
699}; 638};
700 639
701/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing 640 /* YYPGOTO[NTERM-NUM]. */
702 STATE-NUM. */ 641static const yytype_int16 yypgoto[] =
703#define YYPACT_NINF -92
704static const yytype_int16 yypact[] =
705{ 642{
706 -92, 19, -92, 208, -92, -92, 39, -92, -92, -92, 643 -135, -135, 157, -135, -135, -135, -135, -48, -135, -135,
707 -92, -92, -27, -92, 23, -92, -92, -92, -92, -92, 644 90, -1, -60, -33, -135, -135, -135, -78, -135, -135,
708 -92, -92, -92, -92, -22, -92, 9, -92, -92, -92, 645 -61, -31, -135, -92, -135, -134, -135, -135, -59, -41,
709 -6, 16, 25, -92, -92, -92, -92, -92, 31, 473, 646 -135, -135, -135, -135, -18, -135, -135, 107, -135, -135,
710 -92, -92, -92, -92, -92, -92, -92, -92, -92, -92, 647 37, 80, 78, 143, -135, 94, -135, -135, -135
711 49, 37, -92, -92, 51, 108, -92, 473, 51, -92,
712 473, 59, -92, -92, -92, 12, -3, 60, 57, -92,
713 31, -7, 24, -92, -92, 55, 42, -92, 473, -92,
714 46, -21, 61, 158, -92, -92, 31, -92, 389, 71,
715 82, 88, 89, -92, -3, -92, -92, 31, -92, -92,
716 -92, -92, -92, 254, 73, -92, -24, -92, -92, -92,
717 90, -92, 17, 75, 45, -92, 32, 96, 95, -92,
718 -92, -92, 99, -92, 115, -92, -92, 3, 48, -92,
719 34, -92, 102, -92, -92, -92, -92, -11, 100, 103,
720 111, 104, -92, -92, -92, -92, -92, 106, -92, 113,
721 -92, -92, 126, -92, 299, -92, -21, 121, -92, 132,
722 -92, -92, 344, -92, -92, 125, -92, -92, -92, -92,
723 -92, 435, -92, -92, 138, 139, -92, -92, -92, 142,
724 143, -92, -92, -92, -92, -92, -92, -92
725}; 648};
726 649
727/* YYPGOTO[NTERM-NUM]. */ 650 /* YYDEFGOTO[NTERM-NUM]. */
728static const yytype_int16 yypgoto[] = 651static const yytype_int16 yydefgoto[] =
729{ 652{
730 -92, -92, 192, -92, -92, -92, -92, -47, -92, -92, 653 -1, 1, 2, 3, 36, 78, 57, 37, 67, 68,
731 97, 0, -60, -32, -92, -92, -92, -79, -92, -92, 654 69, 81, 39, 40, 41, 42, 43, 70, 92, 93,
732 -58, -26, -92, -38, -92, -91, -92, -92, -59, -28, 655 44, 123, 72, 114, 115, 137, 138, 139, 140, 128,
733 -92, -92, -92, -92, -20, -92, -92, 112, -92, -92, 656 129, 45, 164, 165, 56, 82, 83, 84, 116, 117,
734 41, 91, 83, 149, -92, 101, -92, -92, -92 657 118, 119, 135, 52, 76, 77, 46, 100, 47
735}; 658};
736 659
737/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If 660 /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If
738 positive, shift that token. If negative, reduce the rule which 661 positive, shift that token. If negative, reduce the rule whose
739 number is the opposite. If YYTABLE_NINF, syntax error. */ 662 number is the opposite. If YYTABLE_NINF, syntax error. */
740#define YYTABLE_NINF -110
741static const yytype_int16 yytable[] = 663static const yytype_int16 yytable[] =
742{ 664{
743 88, 89, 114, 38, 157, 10, 59, 73, 95, 128, 665 88, 89, 38, 113, 155, 94, 73, 71, 59, 85,
744 85, 50, 71, 91, 75, 20, 54, 110, 147, 4, 666 127, 162, 109, 145, 50, 54, 110, 75, 173, 147,
745 164, 111, 144, 99, 29, 51, 100, 112, 33, 66, 667 98, 149, 111, 99, 66, 142, 178, 112, 51, 55,
746 55, 107, 113, 114, 79, 114, 135, -94, 87, 92, 668 106, 163, 133, 113, 91, 113, 79, -93, 4, 97,
747 165, 125, 60, 88, 98, 158, 53, 58, 128, 128, 669 87, 124, 88, 53, 58, 156, 60, 10, 127, 127,
748 63, 127, -94, 66, 64, 148, 73, 86, 102, 111, 670 146, 126, -93, 66, 110, 73, 86, 20, 55, 101,
749 65, 55, 66, 175, 61, 112, 153, 66, 161, 63, 671 111, 151, 66, 61, 159, 51, 29, 48, 49, 62,
750 62, 180, 103, 64, 149, 75, 151, 114, 86, 65, 672 33, 107, 108, 102, 75, 152, 113, 86, 160, 63,
751 154, 66, 162, 148, 48, 49, 125, 111, 105, 106, 673 104, 105, 90, 64, 146, 157, 158, 55, 110, 65,
752 158, 108, 109, 112, 88, 66, 127, 90, 66, 159, 674 95, 66, 88, 124, 111, 96, 66, 156, 103, 120,
753 160, 51, 88, 55, 97, 96, 104, 121, 143, 80, 675 88, 130, 141, 126, 112, 66, 131, 80, 132, 88,
754 150, 88, 183, 7, 8, 9, 10, 11, 12, 13, 676 181, 7, 8, 9, 10, 11, 12, 13, 148, 15,
755 131, 15, 16, 17, 18, 19, 20, 21, 22, 23, 677 16, 17, 18, 19, 20, 21, 22, 23, 24, 153,
756 24, 132, 26, 27, 28, 29, 30, 133, 134, 33, 678 26, 27, 28, 29, 30, 154, 107, 33, 34, 98,
757 34, 155, 156, 113, 108, 99, -22, 163, 170, 168, 679 161, 166, 167, 169, -22, 168, 170, 171, 35, 162,
758 35, 171, 169, -22, -108, 172, -22, 164, -22, 122, 680 175, -22, -107, 176, -22, 179, -22, 121, 5, -22,
759 181, -22, 173, 7, 8, 9, 10, 11, 12, 13, 681 182, 7, 8, 9, 10, 11, 12, 13, 183, 15,
760 177, 15, 16, 17, 18, 19, 20, 21, 22, 23, 682 16, 17, 18, 19, 20, 21, 22, 23, 24, 184,
761 24, 178, 26, 27, 28, 29, 30, 184, 185, 33, 683 26, 27, 28, 29, 30, 185, 134, 33, 34, 144,
762 34, 186, 187, 5, 136, 123, -22, 176, 152, 74, 684 122, 174, 150, 74, -22, 0, 0, 0, 35, 143,
763 35, 146, 0, -22, -109, 0, -22, 145, -22, 6, 685 0, -22, -108, 0, -22, 0, -22, 6, 0, -22,
764 0, -22, 0, 7, 8, 9, 10, 11, 12, 13, 686 0, 7, 8, 9, 10, 11, 12, 13, 14, 15,
765 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 687 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
766 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 688 26, 27, 28, 29, 30, 31, 32, 33, 34, 0,
767 34, 0, 0, 0, 0, 0, -22, 0, 0, 0, 689 0, 0, 0, 0, -22, 0, 0, 0, 35, 0,
768 35, 0, 0, -22, 0, 138, -22, 0, -22, 7, 690 0, -22, 0, 136, -22, 0, -22, 7, 8, 9,
769 8, 9, 10, 11, 12, 13, 0, 15, 16, 17, 691 10, 11, 12, 13, 0, 15, 16, 17, 18, 19,
770 18, 19, 20, 21, 22, 23, 24, 0, 26, 27, 692 20, 21, 22, 23, 24, 0, 26, 27, 28, 29,
771 28, 29, 30, 0, 0, 33, 34, 0, 0, 0, 693 30, 0, 0, 33, 34, 0, 0, 0, 0, -86,
772 0, -87, 0, 0, 0, 0, 35, 0, 0, 0, 694 0, 0, 0, 0, 35, 0, 0, 0, 172, 0,
773 174, 0, 0, -87, 7, 8, 9, 10, 11, 12, 695 0, -86, 7, 8, 9, 10, 11, 12, 13, 0,
774 13, 0, 15, 16, 17, 18, 19, 20, 21, 22, 696 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
775 23, 24, 0, 26, 27, 28, 29, 30, 0, 0, 697 0, 26, 27, 28, 29, 30, 0, 0, 33, 34,
776 33, 34, 0, 0, 0, 0, -87, 0, 0, 0, 698 0, 0, 0, 0, -86, 0, 0, 0, 0, 35,
777 0, 35, 0, 0, 0, 179, 0, 0, -87, 7, 699 0, 0, 0, 177, 0, 0, -86, 7, 8, 9,
778 8, 9, 10, 11, 12, 13, 0, 15, 16, 17, 700 10, 11, 12, 13, 0, 15, 16, 17, 18, 19,
779 18, 19, 20, 21, 22, 23, 24, 0, 26, 27, 701 20, 21, 22, 23, 24, 0, 26, 27, 28, 29,
780 28, 29, 30, 0, 0, 33, 34, 0, 0, 0, 702 30, 0, 0, 33, 34, 0, 0, 0, 0, -86,
781 0, -87, 0, 0, 0, 0, 35, 0, 0, 0, 703 0, 0, 0, 0, 35, 0, 0, 0, 0, 0,
782 0, 0, 0, -87, 7, 8, 9, 10, 11, 12, 704 0, -86, 7, 8, 9, 10, 11, 12, 13, 0,
783 13, 0, 15, 16, 17, 18, 19, 20, 21, 22, 705 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
784 23, 24, 0, 26, 27, 28, 29, 30, 0, 0, 706 0, 26, 27, 28, 29, 30, 0, 0, 33, 34,
785 33, 34, 0, 0, 0, 0, 0, 125, 0, 0, 707 0, 0, 0, 0, 0, 124, 0, 0, 0, 125,
786 0, 126, 0, 0, 0, 0, 0, 127, 0, 66, 708 0, 0, 0, 0, 0, 126, 0, 66, 7, 8,
787 7, 8, 9, 10, 11, 12, 13, 0, 15, 16,
788 17, 18, 19, 20, 21, 22, 23, 24, 0, 26,
789 27, 28, 29, 30, 0, 0, 33, 34, 0, 0,
790 0, 0, 182, 0, 0, 0, 0, 35, 7, 8,
791 9, 10, 11, 12, 13, 0, 15, 16, 17, 18, 709 9, 10, 11, 12, 13, 0, 15, 16, 17, 18,
792 19, 20, 21, 22, 23, 24, 0, 26, 27, 28, 710 19, 20, 21, 22, 23, 24, 0, 26, 27, 28,
793 29, 30, 0, 0, 33, 34, 0, 0, 0, 0, 711 29, 30, 0, 0, 33, 34, 0, 0, 0, 0,
794 0, 0, 0, 0, 0, 35 712 180, 0, 0, 0, 0, 35, 7, 8, 9, 10,
713 11, 12, 13, 0, 15, 16, 17, 18, 19, 20,
714 21, 22, 23, 24, 0, 26, 27, 28, 29, 30,
715 0, 0, 33, 34, 0, 0, 0, 0, 0, 0,
716 0, 0, 0, 35
795}; 717};
796 718
797#define yypact_value_is_default(Yystate) \
798 (!!((Yystate) == (-92)))
799
800#define yytable_value_is_error(Yytable_value) \
801 YYID (0)
802
803static const yytype_int16 yycheck[] = 719static const yytype_int16 yycheck[] =
804{ 720{
805 60, 60, 81, 3, 1, 8, 26, 39, 66, 88, 721 60, 60, 3, 81, 1, 66, 39, 38, 26, 57,
806 57, 38, 38, 1, 38, 18, 38, 38, 1, 0, 722 88, 31, 38, 1, 38, 38, 42, 38, 152, 111,
807 31, 42, 46, 30, 27, 52, 33, 48, 31, 50, 723 30, 113, 48, 33, 50, 46, 160, 53, 52, 52,
808 52, 78, 53, 112, 54, 114, 94, 34, 58, 65, 724 78, 51, 93, 111, 65, 113, 54, 34, 0, 70,
809 51, 38, 48, 103, 70, 42, 23, 38, 127, 128, 725 58, 38, 102, 23, 38, 42, 48, 8, 126, 127,
810 38, 48, 49, 50, 42, 38, 88, 57, 34, 42, 726 38, 48, 49, 50, 42, 88, 57, 18, 52, 34,
811 48, 52, 50, 154, 48, 48, 34, 50, 34, 38, 727 48, 34, 50, 48, 34, 52, 27, 45, 46, 45,
812 45, 162, 48, 42, 112, 38, 114, 156, 78, 48, 728 31, 45, 46, 48, 38, 48, 154, 78, 48, 38,
813 48, 50, 48, 38, 45, 46, 38, 42, 46, 47, 729 46, 47, 38, 42, 38, 126, 127, 52, 42, 48,
814 42, 45, 46, 48, 154, 50, 48, 38, 50, 127, 730 45, 50, 152, 38, 48, 47, 50, 42, 51, 46,
815 128, 52, 162, 52, 47, 45, 51, 46, 35, 1, 731 160, 49, 35, 48, 53, 50, 49, 1, 49, 169,
816 35, 171, 171, 5, 6, 7, 8, 9, 10, 11, 732 169, 5, 6, 7, 8, 9, 10, 11, 35, 13,
817 49, 13, 14, 15, 16, 17, 18, 19, 20, 21, 733 14, 15, 16, 17, 18, 19, 20, 21, 22, 45,
818 22, 49, 24, 25, 26, 27, 28, 49, 49, 31, 734 24, 25, 26, 27, 28, 47, 45, 31, 32, 30,
819 32, 45, 47, 53, 45, 30, 38, 45, 37, 49, 735 45, 49, 49, 47, 38, 37, 49, 49, 42, 31,
820 42, 47, 49, 45, 46, 49, 48, 31, 50, 1, 736 49, 45, 46, 49, 48, 35, 50, 1, 1, 53,
821 35, 53, 49, 5, 6, 7, 8, 9, 10, 11, 737 49, 5, 6, 7, 8, 9, 10, 11, 49, 13,
822 49, 13, 14, 15, 16, 17, 18, 19, 20, 21, 738 14, 15, 16, 17, 18, 19, 20, 21, 22, 49,
823 22, 49, 24, 25, 26, 27, 28, 49, 49, 31, 739 24, 25, 26, 27, 28, 49, 96, 31, 32, 109,
824 32, 49, 49, 1, 97, 83, 38, 156, 115, 50, 740 83, 154, 114, 50, 38, -1, -1, -1, 42, 105,
825 42, 110, -1, 45, 46, -1, 48, 106, 50, 1, 741 -1, 45, 46, -1, 48, -1, 50, 1, -1, 53,
826 -1, 53, -1, 5, 6, 7, 8, 9, 10, 11, 742 -1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
827 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 743 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
828 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 744 24, 25, 26, 27, 28, 29, 30, 31, 32, -1,
829 32, -1, -1, -1, -1, -1, 38, -1, -1, -1, 745 -1, -1, -1, -1, 38, -1, -1, -1, 42, -1,
830 42, -1, -1, 45, -1, 1, 48, -1, 50, 5, 746 -1, 45, -1, 1, 48, -1, 50, 5, 6, 7,
831 6, 7, 8, 9, 10, 11, -1, 13, 14, 15, 747 8, 9, 10, 11, -1, 13, 14, 15, 16, 17,
832 16, 17, 18, 19, 20, 21, 22, -1, 24, 25, 748 18, 19, 20, 21, 22, -1, 24, 25, 26, 27,
833 26, 27, 28, -1, -1, 31, 32, -1, -1, -1, 749 28, -1, -1, 31, 32, -1, -1, -1, -1, 37,
834 -1, 37, -1, -1, -1, -1, 42, -1, -1, -1, 750 -1, -1, -1, -1, 42, -1, -1, -1, 1, -1,
835 1, -1, -1, 49, 5, 6, 7, 8, 9, 10, 751 -1, 49, 5, 6, 7, 8, 9, 10, 11, -1,
836 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 752 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
837 21, 22, -1, 24, 25, 26, 27, 28, -1, -1, 753 -1, 24, 25, 26, 27, 28, -1, -1, 31, 32,
838 31, 32, -1, -1, -1, -1, 37, -1, -1, -1, 754 -1, -1, -1, -1, 37, -1, -1, -1, -1, 42,
839 -1, 42, -1, -1, -1, 1, -1, -1, 49, 5, 755 -1, -1, -1, 1, -1, -1, 49, 5, 6, 7,
840 6, 7, 8, 9, 10, 11, -1, 13, 14, 15, 756 8, 9, 10, 11, -1, 13, 14, 15, 16, 17,
841 16, 17, 18, 19, 20, 21, 22, -1, 24, 25, 757 18, 19, 20, 21, 22, -1, 24, 25, 26, 27,
842 26, 27, 28, -1, -1, 31, 32, -1, -1, -1, 758 28, -1, -1, 31, 32, -1, -1, -1, -1, 37,
843 -1, 37, -1, -1, -1, -1, 42, -1, -1, -1, 759 -1, -1, -1, -1, 42, -1, -1, -1, -1, -1,
844 -1, -1, -1, 49, 5, 6, 7, 8, 9, 10, 760 -1, 49, 5, 6, 7, 8, 9, 10, 11, -1,
845 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 761 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
846 21, 22, -1, 24, 25, 26, 27, 28, -1, -1, 762 -1, 24, 25, 26, 27, 28, -1, -1, 31, 32,
847 31, 32, -1, -1, -1, -1, -1, 38, -1, -1, 763 -1, -1, -1, -1, -1, 38, -1, -1, -1, 42,
848 -1, 42, -1, -1, -1, -1, -1, 48, -1, 50, 764 -1, -1, -1, -1, -1, 48, -1, 50, 5, 6,
849 5, 6, 7, 8, 9, 10, 11, -1, 13, 14,
850 15, 16, 17, 18, 19, 20, 21, 22, -1, 24,
851 25, 26, 27, 28, -1, -1, 31, 32, -1, -1,
852 -1, -1, 37, -1, -1, -1, -1, 42, 5, 6,
853 7, 8, 9, 10, 11, -1, 13, 14, 15, 16, 765 7, 8, 9, 10, 11, -1, 13, 14, 15, 16,
854 17, 18, 19, 20, 21, 22, -1, 24, 25, 26, 766 17, 18, 19, 20, 21, 22, -1, 24, 25, 26,
855 27, 28, -1, -1, 31, 32, -1, -1, -1, -1, 767 27, 28, -1, -1, 31, 32, -1, -1, -1, -1,
856 -1, -1, -1, -1, -1, 42 768 37, -1, -1, -1, -1, 42, 5, 6, 7, 8,
769 9, 10, 11, -1, 13, 14, 15, 16, 17, 18,
770 19, 20, 21, 22, -1, 24, 25, 26, 27, 28,
771 -1, -1, 31, 32, -1, -1, -1, -1, -1, -1,
772 -1, -1, -1, 42
857}; 773};
858 774
859/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing 775 /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
860 symbol of state STATE-NUM. */ 776 symbol of state STATE-NUM. */
861static const yytype_uint8 yystos[] = 777static const yytype_uint8 yystos[] =
862{ 778{
863 0, 55, 56, 57, 0, 56, 1, 5, 6, 7, 779 0, 55, 56, 57, 0, 56, 1, 5, 6, 7,
@@ -869,42 +785,66 @@ static const yytype_uint8 yystos[] =
869 48, 48, 45, 38, 42, 48, 50, 62, 63, 64, 785 48, 48, 45, 38, 42, 48, 50, 62, 63, 64,
870 71, 75, 76, 67, 97, 38, 98, 99, 59, 88, 786 71, 75, 76, 67, 97, 38, 98, 99, 59, 88,
871 1, 65, 89, 90, 91, 61, 65, 88, 66, 82, 787 1, 65, 89, 90, 91, 61, 65, 88, 66, 82,
872 38, 1, 75, 72, 73, 74, 45, 47, 75, 30, 788 38, 75, 72, 73, 74, 45, 47, 75, 30, 33,
873 33, 101, 34, 48, 51, 46, 47, 61, 45, 46, 789 101, 34, 48, 51, 46, 47, 61, 45, 46, 38,
874 38, 42, 48, 53, 71, 77, 78, 92, 93, 94, 790 42, 48, 53, 71, 77, 78, 92, 93, 94, 95,
875 95, 46, 1, 91, 75, 38, 42, 48, 71, 83, 791 46, 1, 91, 75, 38, 42, 48, 71, 83, 84,
876 84, 49, 49, 49, 49, 74, 64, 96, 1, 79, 792 49, 49, 49, 74, 64, 96, 1, 79, 80, 81,
877 80, 81, 82, 35, 46, 99, 95, 1, 38, 77, 793 82, 35, 46, 99, 95, 1, 38, 77, 35, 77,
878 35, 77, 96, 34, 48, 45, 47, 1, 42, 83, 794 96, 34, 48, 45, 47, 1, 42, 83, 83, 34,
879 83, 34, 48, 45, 31, 51, 86, 87, 49, 49, 795 48, 45, 31, 51, 86, 87, 49, 49, 37, 47,
880 37, 47, 49, 49, 1, 79, 94, 49, 49, 1, 796 49, 49, 1, 79, 94, 49, 49, 1, 79, 35,
881 79, 35, 37, 82, 49, 49, 49, 49 797 37, 82, 49, 49, 49, 49
882}; 798};
883 799
884#define yyerrok (yyerrstatus = 0) 800 /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
885#define yyclearin (yychar = YYEMPTY) 801static const yytype_uint8 yyr1[] =
886#define YYEMPTY (-2) 802{
887#define YYEOF 0 803 0, 54, 55, 55, 57, 56, 59, 58, 60, 58,
888 804 58, 58, 58, 58, 58, 58, 61, 62, 62, 63,
889#define YYACCEPT goto yyacceptlab 805 63, 64, 65, 65, 66, 66, 67, 67, 68, 68,
890#define YYABORT goto yyabortlab 806 68, 68, 68, 69, 69, 69, 69, 69, 69, 69,
891#define YYERROR goto yyerrorlab 807 69, 69, 69, 69, 69, 69, 70, 70, 70, 70,
892 808 70, 70, 70, 70, 70, 70, 70, 71, 72, 72,
893 809 73, 73, 74, 74, 74, 74, 75, 75, 76, 76,
894/* Like YYERROR except do call yyerror. This remains here temporarily 810 76, 76, 76, 76, 77, 77, 78, 78, 78, 78,
895 to ease the transition to the new meaning of YYERROR, for GCC. 811 78, 78, 78, 79, 79, 79, 80, 80, 81, 81,
896 Once GCC version 2 has supplanted version 1, this can go. However, 812 82, 83, 83, 84, 84, 84, 84, 84, 84, 84,
897 YYFAIL appears to be in use. Nevertheless, it is formally deprecated 813 84, 85, 86, 86, 87, 88, 88, 89, 89, 90,
898 in Bison 2.4.2's NEWS entry, where a plan to phase it out is 814 90, 91, 91, 92, 92, 93, 93, 94, 94, 94,
899 discussed. */ 815 95, 96, 96, 97, 97, 98, 98, 99, 99, 100,
900 816 101, 101, 102
901#define YYFAIL goto yyerrlab 817};
902#if defined YYFAIL 818
903 /* This is here to suppress warnings from the GCC cpp's 819 /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */
904 -Wunused-macros. Normally we don't worry about that warning, but 820static const yytype_uint8 yyr2[] =
905 some users do, and we want to make it easy for users to remove 821{
906 YYFAIL uses, which will produce warnings from Bison 2.5. */ 822 0, 2, 1, 2, 0, 2, 0, 4, 0, 3,
907#endif 823 1, 1, 1, 1, 2, 2, 3, 0, 1, 1,
824 3, 4, 0, 1, 1, 2, 1, 1, 1, 1,
825 1, 1, 1, 1, 1, 4, 1, 2, 2, 2,
826 3, 3, 3, 2, 2, 2, 1, 1, 1, 1,
827 1, 1, 1, 1, 1, 1, 1, 2, 0, 1,
828 1, 2, 1, 1, 1, 1, 2, 1, 1, 1,
829 4, 4, 2, 3, 2, 1, 1, 1, 4, 4,
830 2, 3, 3, 2, 1, 3, 0, 1, 1, 3,
831 2, 2, 1, 0, 1, 1, 4, 4, 2, 3,
832 3, 3, 0, 1, 2, 3, 3, 0, 1, 1,
833 2, 3, 2, 0, 1, 1, 3, 2, 2, 1,
834 2, 0, 2, 3, 4, 1, 3, 1, 3, 2,
835 0, 1, 5
836};
837
838
839#define yyerrok (yyerrstatus = 0)
840#define yyclearin (yychar = YYEMPTY)
841#define YYEMPTY (-2)
842#define YYEOF 0
843
844#define YYACCEPT goto yyacceptlab
845#define YYABORT goto yyabortlab
846#define YYERROR goto yyerrorlab
847
908 848
909#define YYRECOVERING() (!!yyerrstatus) 849#define YYRECOVERING() (!!yyerrstatus)
910 850
@@ -921,27 +861,15 @@ do \
921 else \ 861 else \
922 { \ 862 { \
923 yyerror (YY_("syntax error: cannot back up")); \ 863 yyerror (YY_("syntax error: cannot back up")); \
924 YYERROR; \ 864 YYERROR; \
925 } \ 865 } \
926while (YYID (0)) 866while (0)
927 867
928/* Error token number */ 868/* Error token number */
929#define YYTERROR 1 869#define YYTERROR 1
930#define YYERRCODE 256 870#define YYERRCODE 256
931 871
932 872
933/* This macro is provided for backward compatibility. */
934#ifndef YY_LOCATION_PRINT
935# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
936#endif
937
938
939/* YYLEX -- calling `yylex' with the right arguments. */
940#ifdef YYLEX_PARAM
941# define YYLEX yylex (YYLEX_PARAM)
942#else
943# define YYLEX yylex ()
944#endif
945 873
946/* Enable debugging if requested. */ 874/* Enable debugging if requested. */
947#if YYDEBUG 875#if YYDEBUG
@@ -951,40 +879,36 @@ while (YYID (0))
951# define YYFPRINTF fprintf 879# define YYFPRINTF fprintf
952# endif 880# endif
953 881
954# define YYDPRINTF(Args) \ 882# define YYDPRINTF(Args) \
955do { \ 883do { \
956 if (yydebug) \ 884 if (yydebug) \
957 YYFPRINTF Args; \ 885 YYFPRINTF Args; \
958} while (YYID (0)) 886} while (0)
887
888/* This macro is provided for backward compatibility. */
889#ifndef YY_LOCATION_PRINT
890# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
891#endif
892
959 893
960# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ 894# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
961do { \ 895do { \
962 if (yydebug) \ 896 if (yydebug) \
963 { \ 897 { \
964 YYFPRINTF (stderr, "%s ", Title); \ 898 YYFPRINTF (stderr, "%s ", Title); \
965 yy_symbol_print (stderr, \ 899 yy_symbol_print (stderr, \
966 Type, Value); \ 900 Type, Value); \
967 YYFPRINTF (stderr, "\n"); \ 901 YYFPRINTF (stderr, "\n"); \
968 } \ 902 } \
969} while (YYID (0)) 903} while (0)
970 904
971 905
972/*--------------------------------. 906/*----------------------------------------.
973| Print this symbol on YYOUTPUT. | 907| Print this symbol's value on YYOUTPUT. |
974`--------------------------------*/ 908`----------------------------------------*/
975 909
976/*ARGSUSED*/
977#if (defined __STDC__ || defined __C99__FUNC__ \
978 || defined __cplusplus || defined _MSC_VER)
979static void 910static void
980yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) 911yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
981#else
982static void
983yy_symbol_value_print (yyoutput, yytype, yyvaluep)
984 FILE *yyoutput;
985 int yytype;
986 YYSTYPE const * const yyvaluep;
987#endif
988{ 912{
989 FILE *yyo = yyoutput; 913 FILE *yyo = yyoutput;
990 YYUSE (yyo); 914 YYUSE (yyo);
@@ -993,14 +917,8 @@ yy_symbol_value_print (yyoutput, yytype, yyvaluep)
993# ifdef YYPRINT 917# ifdef YYPRINT
994 if (yytype < YYNTOKENS) 918 if (yytype < YYNTOKENS)
995 YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); 919 YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
996# else
997 YYUSE (yyoutput);
998# endif 920# endif
999 switch (yytype) 921 YYUSE (yytype);
1000 {
1001 default:
1002 break;
1003 }
1004} 922}
1005 923
1006 924
@@ -1008,22 +926,11 @@ yy_symbol_value_print (yyoutput, yytype, yyvaluep)
1008| Print this symbol on YYOUTPUT. | 926| Print this symbol on YYOUTPUT. |
1009`--------------------------------*/ 927`--------------------------------*/
1010 928
1011#if (defined __STDC__ || defined __C99__FUNC__ \
1012 || defined __cplusplus || defined _MSC_VER)
1013static void 929static void
1014yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) 930yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
1015#else
1016static void
1017yy_symbol_print (yyoutput, yytype, yyvaluep)
1018 FILE *yyoutput;
1019 int yytype;
1020 YYSTYPE const * const yyvaluep;
1021#endif
1022{ 931{
1023 if (yytype < YYNTOKENS) 932 YYFPRINTF (yyoutput, "%s %s (",
1024 YYFPRINTF (yyoutput, "token %s (", yytname[yytype]); 933 yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]);
1025 else
1026 YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
1027 934
1028 yy_symbol_value_print (yyoutput, yytype, yyvaluep); 935 yy_symbol_value_print (yyoutput, yytype, yyvaluep);
1029 YYFPRINTF (yyoutput, ")"); 936 YYFPRINTF (yyoutput, ")");
@@ -1034,16 +941,8 @@ yy_symbol_print (yyoutput, yytype, yyvaluep)
1034| TOP (included). | 941| TOP (included). |
1035`------------------------------------------------------------------*/ 942`------------------------------------------------------------------*/
1036 943
1037#if (defined __STDC__ || defined __C99__FUNC__ \
1038 || defined __cplusplus || defined _MSC_VER)
1039static void 944static void
1040yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) 945yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
1041#else
1042static void
1043yy_stack_print (yybottom, yytop)
1044 yytype_int16 *yybottom;
1045 yytype_int16 *yytop;
1046#endif
1047{ 946{
1048 YYFPRINTF (stderr, "Stack now"); 947 YYFPRINTF (stderr, "Stack now");
1049 for (; yybottom <= yytop; yybottom++) 948 for (; yybottom <= yytop; yybottom++)
@@ -1054,49 +953,42 @@ yy_stack_print (yybottom, yytop)
1054 YYFPRINTF (stderr, "\n"); 953 YYFPRINTF (stderr, "\n");
1055} 954}
1056 955
1057# define YY_STACK_PRINT(Bottom, Top) \ 956# define YY_STACK_PRINT(Bottom, Top) \
1058do { \ 957do { \
1059 if (yydebug) \ 958 if (yydebug) \
1060 yy_stack_print ((Bottom), (Top)); \ 959 yy_stack_print ((Bottom), (Top)); \
1061} while (YYID (0)) 960} while (0)
1062 961
1063 962
1064/*------------------------------------------------. 963/*------------------------------------------------.
1065| Report that the YYRULE is going to be reduced. | 964| Report that the YYRULE is going to be reduced. |
1066`------------------------------------------------*/ 965`------------------------------------------------*/
1067 966
1068#if (defined __STDC__ || defined __C99__FUNC__ \
1069 || defined __cplusplus || defined _MSC_VER)
1070static void 967static void
1071yy_reduce_print (YYSTYPE *yyvsp, int yyrule) 968yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule)
1072#else
1073static void
1074yy_reduce_print (yyvsp, yyrule)
1075 YYSTYPE *yyvsp;
1076 int yyrule;
1077#endif
1078{ 969{
970 unsigned long int yylno = yyrline[yyrule];
1079 int yynrhs = yyr2[yyrule]; 971 int yynrhs = yyr2[yyrule];
1080 int yyi; 972 int yyi;
1081 unsigned long int yylno = yyrline[yyrule];
1082 YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", 973 YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
1083 yyrule - 1, yylno); 974 yyrule - 1, yylno);
1084 /* The symbols being reduced. */ 975 /* The symbols being reduced. */
1085 for (yyi = 0; yyi < yynrhs; yyi++) 976 for (yyi = 0; yyi < yynrhs; yyi++)
1086 { 977 {
1087 YYFPRINTF (stderr, " $%d = ", yyi + 1); 978 YYFPRINTF (stderr, " $%d = ", yyi + 1);
1088 yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi], 979 yy_symbol_print (stderr,
1089 &(yyvsp[(yyi + 1) - (yynrhs)]) 980 yystos[yyssp[yyi + 1 - yynrhs]],
1090 ); 981 &(yyvsp[(yyi + 1) - (yynrhs)])
982 );
1091 YYFPRINTF (stderr, "\n"); 983 YYFPRINTF (stderr, "\n");
1092 } 984 }
1093} 985}
1094 986
1095# define YY_REDUCE_PRINT(Rule) \ 987# define YY_REDUCE_PRINT(Rule) \
1096do { \ 988do { \
1097 if (yydebug) \ 989 if (yydebug) \
1098 yy_reduce_print (yyvsp, Rule); \ 990 yy_reduce_print (yyssp, yyvsp, Rule); \
1099} while (YYID (0)) 991} while (0)
1100 992
1101/* Nonzero means print parse trace. It is left uninitialized so that 993/* Nonzero means print parse trace. It is left uninitialized so that
1102 multiple parsers can coexist. */ 994 multiple parsers can coexist. */
@@ -1110,7 +1002,7 @@ int yydebug;
1110 1002
1111 1003
1112/* YYINITDEPTH -- initial size of the parser's stacks. */ 1004/* YYINITDEPTH -- initial size of the parser's stacks. */
1113#ifndef YYINITDEPTH 1005#ifndef YYINITDEPTH
1114# define YYINITDEPTH 200 1006# define YYINITDEPTH 200
1115#endif 1007#endif
1116 1008
@@ -1133,15 +1025,8 @@ int yydebug;
1133# define yystrlen strlen 1025# define yystrlen strlen
1134# else 1026# else
1135/* Return the length of YYSTR. */ 1027/* Return the length of YYSTR. */
1136#if (defined __STDC__ || defined __C99__FUNC__ \
1137 || defined __cplusplus || defined _MSC_VER)
1138static YYSIZE_T 1028static YYSIZE_T
1139yystrlen (const char *yystr) 1029yystrlen (const char *yystr)
1140#else
1141static YYSIZE_T
1142yystrlen (yystr)
1143 const char *yystr;
1144#endif
1145{ 1030{
1146 YYSIZE_T yylen; 1031 YYSIZE_T yylen;
1147 for (yylen = 0; yystr[yylen]; yylen++) 1032 for (yylen = 0; yystr[yylen]; yylen++)
@@ -1157,16 +1042,8 @@ yystrlen (yystr)
1157# else 1042# else
1158/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in 1043/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
1159 YYDEST. */ 1044 YYDEST. */
1160#if (defined __STDC__ || defined __C99__FUNC__ \
1161 || defined __cplusplus || defined _MSC_VER)
1162static char * 1045static char *
1163yystpcpy (char *yydest, const char *yysrc) 1046yystpcpy (char *yydest, const char *yysrc)
1164#else
1165static char *
1166yystpcpy (yydest, yysrc)
1167 char *yydest;
1168 const char *yysrc;
1169#endif
1170{ 1047{
1171 char *yyd = yydest; 1048 char *yyd = yydest;
1172 const char *yys = yysrc; 1049 const char *yys = yysrc;
@@ -1196,27 +1073,27 @@ yytnamerr (char *yyres, const char *yystr)
1196 char const *yyp = yystr; 1073 char const *yyp = yystr;
1197 1074
1198 for (;;) 1075 for (;;)
1199 switch (*++yyp) 1076 switch (*++yyp)
1200 { 1077 {
1201 case '\'': 1078 case '\'':
1202 case ',': 1079 case ',':
1203 goto do_not_strip_quotes; 1080 goto do_not_strip_quotes;
1204 1081
1205 case '\\': 1082 case '\\':
1206 if (*++yyp != '\\') 1083 if (*++yyp != '\\')
1207 goto do_not_strip_quotes; 1084 goto do_not_strip_quotes;
1208 /* Fall through. */ 1085 /* Fall through. */
1209 default: 1086 default:
1210 if (yyres) 1087 if (yyres)
1211 yyres[yyn] = *yyp; 1088 yyres[yyn] = *yyp;
1212 yyn++; 1089 yyn++;
1213 break; 1090 break;
1214 1091
1215 case '"': 1092 case '"':
1216 if (yyres) 1093 if (yyres)
1217 yyres[yyn] = '\0'; 1094 yyres[yyn] = '\0';
1218 return yyn; 1095 return yyn;
1219 } 1096 }
1220 do_not_strip_quotes: ; 1097 do_not_strip_quotes: ;
1221 } 1098 }
1222 1099
@@ -1239,11 +1116,11 @@ static int
1239yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, 1116yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
1240 yytype_int16 *yyssp, int yytoken) 1117 yytype_int16 *yyssp, int yytoken)
1241{ 1118{
1242 YYSIZE_T yysize0 = yytnamerr (YY_NULL, yytname[yytoken]); 1119 YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]);
1243 YYSIZE_T yysize = yysize0; 1120 YYSIZE_T yysize = yysize0;
1244 enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; 1121 enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
1245 /* Internationalized format string. */ 1122 /* Internationalized format string. */
1246 const char *yyformat = YY_NULL; 1123 const char *yyformat = YY_NULLPTR;
1247 /* Arguments of yyformat. */ 1124 /* Arguments of yyformat. */
1248 char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; 1125 char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
1249 /* Number of reported tokens (one for the "unexpected", one per 1126 /* Number of reported tokens (one for the "unexpected", one per
@@ -1251,10 +1128,6 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
1251 int yycount = 0; 1128 int yycount = 0;
1252 1129
1253 /* There are many possibilities here to consider: 1130 /* There are many possibilities here to consider:
1254 - Assume YYFAIL is not used. It's too flawed to consider. See
1255 <http://lists.gnu.org/archive/html/bison-patches/2009-12/msg00024.html>
1256 for details. YYERROR is fine as it does not invoke this
1257 function.
1258 - If this state is a consistent state with a default action, then 1131 - If this state is a consistent state with a default action, then
1259 the only way this function was invoked is if the default action 1132 the only way this function was invoked is if the default action
1260 is an error action. In that case, don't check for expected 1133 is an error action. In that case, don't check for expected
@@ -1304,7 +1177,7 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
1304 } 1177 }
1305 yyarg[yycount++] = yytname[yyx]; 1178 yyarg[yycount++] = yytname[yyx];
1306 { 1179 {
1307 YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULL, yytname[yyx]); 1180 YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);
1308 if (! (yysize <= yysize1 1181 if (! (yysize <= yysize1
1309 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) 1182 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
1310 return 2; 1183 return 2;
@@ -1371,31 +1244,17 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
1371| Release the memory associated to this symbol. | 1244| Release the memory associated to this symbol. |
1372`-----------------------------------------------*/ 1245`-----------------------------------------------*/
1373 1246
1374/*ARGSUSED*/
1375#if (defined __STDC__ || defined __C99__FUNC__ \
1376 || defined __cplusplus || defined _MSC_VER)
1377static void 1247static void
1378yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep) 1248yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
1379#else
1380static void
1381yydestruct (yymsg, yytype, yyvaluep)
1382 const char *yymsg;
1383 int yytype;
1384 YYSTYPE *yyvaluep;
1385#endif
1386{ 1249{
1387 YYUSE (yyvaluep); 1250 YYUSE (yyvaluep);
1388
1389 if (!yymsg) 1251 if (!yymsg)
1390 yymsg = "Deleting"; 1252 yymsg = "Deleting";
1391 YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); 1253 YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
1392 1254
1393 switch (yytype) 1255 YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
1394 { 1256 YYUSE (yytype);
1395 1257 YY_IGNORE_MAYBE_UNINITIALIZED_END
1396 default:
1397 break;
1398 }
1399} 1258}
1400 1259
1401 1260
@@ -1404,18 +1263,8 @@ yydestruct (yymsg, yytype, yyvaluep)
1404/* The lookahead symbol. */ 1263/* The lookahead symbol. */
1405int yychar; 1264int yychar;
1406 1265
1407
1408#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
1409# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
1410# define YY_IGNORE_MAYBE_UNINITIALIZED_END
1411#endif
1412#ifndef YY_INITIAL_VALUE
1413# define YY_INITIAL_VALUE(Value) /* Nothing. */
1414#endif
1415
1416/* The semantic value of the lookahead symbol. */ 1266/* The semantic value of the lookahead symbol. */
1417YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); 1267YYSTYPE yylval;
1418
1419/* Number of syntax errors so far. */ 1268/* Number of syntax errors so far. */
1420int yynerrs; 1269int yynerrs;
1421 1270
@@ -1424,35 +1273,16 @@ int yynerrs;
1424| yyparse. | 1273| yyparse. |
1425`----------*/ 1274`----------*/
1426 1275
1427#ifdef YYPARSE_PARAM
1428#if (defined __STDC__ || defined __C99__FUNC__ \
1429 || defined __cplusplus || defined _MSC_VER)
1430int
1431yyparse (void *YYPARSE_PARAM)
1432#else
1433int
1434yyparse (YYPARSE_PARAM)
1435 void *YYPARSE_PARAM;
1436#endif
1437#else /* ! YYPARSE_PARAM */
1438#if (defined __STDC__ || defined __C99__FUNC__ \
1439 || defined __cplusplus || defined _MSC_VER)
1440int 1276int
1441yyparse (void) 1277yyparse (void)
1442#else
1443int
1444yyparse ()
1445
1446#endif
1447#endif
1448{ 1278{
1449 int yystate; 1279 int yystate;
1450 /* Number of tokens to shift before error messages enabled. */ 1280 /* Number of tokens to shift before error messages enabled. */
1451 int yyerrstatus; 1281 int yyerrstatus;
1452 1282
1453 /* The stacks and their tools: 1283 /* The stacks and their tools:
1454 `yyss': related to states. 1284 'yyss': related to states.
1455 `yyvs': related to semantic values. 1285 'yyvs': related to semantic values.
1456 1286
1457 Refer to the stacks through separate pointers, to allow yyoverflow 1287 Refer to the stacks through separate pointers, to allow yyoverflow
1458 to reallocate them elsewhere. */ 1288 to reallocate them elsewhere. */
@@ -1520,23 +1350,23 @@ yyparse ()
1520 1350
1521#ifdef yyoverflow 1351#ifdef yyoverflow
1522 { 1352 {
1523 /* Give user a chance to reallocate the stack. Use copies of 1353 /* Give user a chance to reallocate the stack. Use copies of
1524 these so that the &'s don't force the real ones into 1354 these so that the &'s don't force the real ones into
1525 memory. */ 1355 memory. */
1526 YYSTYPE *yyvs1 = yyvs; 1356 YYSTYPE *yyvs1 = yyvs;
1527 yytype_int16 *yyss1 = yyss; 1357 yytype_int16 *yyss1 = yyss;
1528 1358
1529 /* Each stack pointer address is followed by the size of the 1359 /* Each stack pointer address is followed by the size of the
1530 data in use in that stack, in bytes. This used to be a 1360 data in use in that stack, in bytes. This used to be a
1531 conditional around just the two extra args, but that might 1361 conditional around just the two extra args, but that might
1532 be undefined if yyoverflow is a macro. */ 1362 be undefined if yyoverflow is a macro. */
1533 yyoverflow (YY_("memory exhausted"), 1363 yyoverflow (YY_("memory exhausted"),
1534 &yyss1, yysize * sizeof (*yyssp), 1364 &yyss1, yysize * sizeof (*yyssp),
1535 &yyvs1, yysize * sizeof (*yyvsp), 1365 &yyvs1, yysize * sizeof (*yyvsp),
1536 &yystacksize); 1366 &yystacksize);
1537 1367
1538 yyss = yyss1; 1368 yyss = yyss1;
1539 yyvs = yyvs1; 1369 yyvs = yyvs1;
1540 } 1370 }
1541#else /* no yyoverflow */ 1371#else /* no yyoverflow */
1542# ifndef YYSTACK_RELOCATE 1372# ifndef YYSTACK_RELOCATE
@@ -1544,22 +1374,22 @@ yyparse ()
1544# else 1374# else
1545 /* Extend the stack our own way. */ 1375 /* Extend the stack our own way. */
1546 if (YYMAXDEPTH <= yystacksize) 1376 if (YYMAXDEPTH <= yystacksize)
1547 goto yyexhaustedlab; 1377 goto yyexhaustedlab;
1548 yystacksize *= 2; 1378 yystacksize *= 2;
1549 if (YYMAXDEPTH < yystacksize) 1379 if (YYMAXDEPTH < yystacksize)
1550 yystacksize = YYMAXDEPTH; 1380 yystacksize = YYMAXDEPTH;
1551 1381
1552 { 1382 {
1553 yytype_int16 *yyss1 = yyss; 1383 yytype_int16 *yyss1 = yyss;
1554 union yyalloc *yyptr = 1384 union yyalloc *yyptr =
1555 (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); 1385 (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
1556 if (! yyptr) 1386 if (! yyptr)
1557 goto yyexhaustedlab; 1387 goto yyexhaustedlab;
1558 YYSTACK_RELOCATE (yyss_alloc, yyss); 1388 YYSTACK_RELOCATE (yyss_alloc, yyss);
1559 YYSTACK_RELOCATE (yyvs_alloc, yyvs); 1389 YYSTACK_RELOCATE (yyvs_alloc, yyvs);
1560# undef YYSTACK_RELOCATE 1390# undef YYSTACK_RELOCATE
1561 if (yyss1 != yyssa) 1391 if (yyss1 != yyssa)
1562 YYSTACK_FREE (yyss1); 1392 YYSTACK_FREE (yyss1);
1563 } 1393 }
1564# endif 1394# endif
1565#endif /* no yyoverflow */ 1395#endif /* no yyoverflow */
@@ -1568,10 +1398,10 @@ yyparse ()
1568 yyvsp = yyvs + yysize - 1; 1398 yyvsp = yyvs + yysize - 1;
1569 1399
1570 YYDPRINTF ((stderr, "Stack size increased to %lu\n", 1400 YYDPRINTF ((stderr, "Stack size increased to %lu\n",
1571 (unsigned long int) yystacksize)); 1401 (unsigned long int) yystacksize));
1572 1402
1573 if (yyss + yystacksize - 1 <= yyssp) 1403 if (yyss + yystacksize - 1 <= yyssp)
1574 YYABORT; 1404 YYABORT;
1575 } 1405 }
1576 1406
1577 YYDPRINTF ((stderr, "Entering state %d\n", yystate)); 1407 YYDPRINTF ((stderr, "Entering state %d\n", yystate));
@@ -1600,7 +1430,7 @@ yybackup:
1600 if (yychar == YYEMPTY) 1430 if (yychar == YYEMPTY)
1601 { 1431 {
1602 YYDPRINTF ((stderr, "Reading a token: ")); 1432 YYDPRINTF ((stderr, "Reading a token: "));
1603 yychar = YYLEX; 1433 yychar = yylex ();
1604 } 1434 }
1605 1435
1606 if (yychar <= YYEOF) 1436 if (yychar <= YYEOF)
@@ -1665,7 +1495,7 @@ yyreduce:
1665 yylen = yyr2[yyn]; 1495 yylen = yyr2[yyn];
1666 1496
1667 /* If YYLEN is nonzero, implement the default value of the action: 1497 /* If YYLEN is nonzero, implement the default value of the action:
1668 `$$ = $1'. 1498 '$$ = $1'.
1669 1499
1670 Otherwise, the following line sets YYVAL to garbage. 1500 Otherwise, the following line sets YYVAL to garbage.
1671 This behavior is undocumented and Bison 1501 This behavior is undocumented and Bison
@@ -1679,483 +1509,560 @@ yyreduce:
1679 switch (yyn) 1509 switch (yyn)
1680 { 1510 {
1681 case 4: 1511 case 4:
1682 1512#line 129 "parse.y" /* yacc.c:1646 */
1683 { is_typedef = 0; is_extern = 0; current_name = NULL; decl_spec = NULL; } 1513 { is_typedef = 0; is_extern = 0; current_name = NULL; decl_spec = NULL; }
1514#line 1515 "parse.tab.c" /* yacc.c:1646 */
1684 break; 1515 break;
1685 1516
1686 case 5: 1517 case 5:
1687 1518#line 131 "parse.y" /* yacc.c:1646 */
1688 { free_list(*(yyvsp[(2) - (2)]), NULL); *(yyvsp[(2) - (2)]) = NULL; } 1519 { free_list(*(yyvsp[0]), NULL); *(yyvsp[0]) = NULL; }
1520#line 1521 "parse.tab.c" /* yacc.c:1646 */
1689 break; 1521 break;
1690 1522
1691 case 6: 1523 case 6:
1692 1524#line 135 "parse.y" /* yacc.c:1646 */
1693 { is_typedef = 1; } 1525 { is_typedef = 1; }
1526#line 1527 "parse.tab.c" /* yacc.c:1646 */
1694 break; 1527 break;
1695 1528
1696 case 7: 1529 case 7:
1697 1530#line 136 "parse.y" /* yacc.c:1646 */
1698 { (yyval) = (yyvsp[(4) - (4)]); } 1531 { (yyval) = (yyvsp[0]); }
1532#line 1533 "parse.tab.c" /* yacc.c:1646 */
1699 break; 1533 break;
1700 1534
1701 case 8: 1535 case 8:
1702 1536#line 137 "parse.y" /* yacc.c:1646 */
1703 { is_typedef = 1; } 1537 { is_typedef = 1; }
1538#line 1539 "parse.tab.c" /* yacc.c:1646 */
1704 break; 1539 break;
1705 1540
1706 case 9: 1541 case 9:
1707 1542#line 138 "parse.y" /* yacc.c:1646 */
1708 { (yyval) = (yyvsp[(3) - (3)]); } 1543 { (yyval) = (yyvsp[0]); }
1544#line 1545 "parse.tab.c" /* yacc.c:1646 */
1709 break; 1545 break;
1710 1546
1711 case 14: 1547 case 14:
1712 1548#line 143 "parse.y" /* yacc.c:1646 */
1713 { (yyval) = (yyvsp[(2) - (2)]); } 1549 { (yyval) = (yyvsp[0]); }
1550#line 1551 "parse.tab.c" /* yacc.c:1646 */
1714 break; 1551 break;
1715 1552
1716 case 15: 1553 case 15:
1717 1554#line 144 "parse.y" /* yacc.c:1646 */
1718 { (yyval) = (yyvsp[(2) - (2)]); } 1555 { (yyval) = (yyvsp[0]); }
1556#line 1557 "parse.tab.c" /* yacc.c:1646 */
1719 break; 1557 break;
1720 1558
1721 case 16: 1559 case 16:
1722 1560#line 149 "parse.y" /* yacc.c:1646 */
1723 { if (current_name) { 1561 { if (current_name) {
1724 struct string_list *decl = (*(yyvsp[(3) - (3)]))->next; 1562 struct string_list *decl = (*(yyvsp[0]))->next;
1725 (*(yyvsp[(3) - (3)]))->next = NULL; 1563 (*(yyvsp[0]))->next = NULL;
1726 add_symbol(current_name, 1564 add_symbol(current_name,
1727 is_typedef ? SYM_TYPEDEF : SYM_NORMAL, 1565 is_typedef ? SYM_TYPEDEF : SYM_NORMAL,
1728 decl, is_extern); 1566 decl, is_extern);
1729 current_name = NULL; 1567 current_name = NULL;
1730 } 1568 }
1731 (yyval) = (yyvsp[(3) - (3)]); 1569 (yyval) = (yyvsp[0]);
1732 } 1570 }
1571#line 1572 "parse.tab.c" /* yacc.c:1646 */
1733 break; 1572 break;
1734 1573
1735 case 17: 1574 case 17:
1736 1575#line 162 "parse.y" /* yacc.c:1646 */
1737 { (yyval) = NULL; } 1576 { (yyval) = NULL; }
1577#line 1578 "parse.tab.c" /* yacc.c:1646 */
1738 break; 1578 break;
1739 1579
1740 case 19: 1580 case 19:
1741 1581#line 168 "parse.y" /* yacc.c:1646 */
1742 { struct string_list *decl = *(yyvsp[(1) - (1)]); 1582 { struct string_list *decl = *(yyvsp[0]);
1743 *(yyvsp[(1) - (1)]) = NULL; 1583 *(yyvsp[0]) = NULL;
1744 add_symbol(current_name, 1584 add_symbol(current_name,
1745 is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern); 1585 is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern);
1746 current_name = NULL; 1586 current_name = NULL;
1747 (yyval) = (yyvsp[(1) - (1)]); 1587 (yyval) = (yyvsp[0]);
1748 } 1588 }
1589#line 1590 "parse.tab.c" /* yacc.c:1646 */
1749 break; 1590 break;
1750 1591
1751 case 20: 1592 case 20:
1752 1593#line 176 "parse.y" /* yacc.c:1646 */
1753 { struct string_list *decl = *(yyvsp[(3) - (3)]); 1594 { struct string_list *decl = *(yyvsp[0]);
1754 *(yyvsp[(3) - (3)]) = NULL; 1595 *(yyvsp[0]) = NULL;
1755 free_list(*(yyvsp[(2) - (3)]), NULL); 1596 free_list(*(yyvsp[-1]), NULL);
1756 *(yyvsp[(2) - (3)]) = decl_spec; 1597 *(yyvsp[-1]) = decl_spec;
1757 add_symbol(current_name, 1598 add_symbol(current_name,
1758 is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern); 1599 is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern);
1759 current_name = NULL; 1600 current_name = NULL;
1760 (yyval) = (yyvsp[(3) - (3)]); 1601 (yyval) = (yyvsp[0]);
1761 } 1602 }
1603#line 1604 "parse.tab.c" /* yacc.c:1646 */
1762 break; 1604 break;
1763 1605
1764 case 21: 1606 case 21:
1765 1607#line 189 "parse.y" /* yacc.c:1646 */
1766 { (yyval) = (yyvsp[(4) - (4)]) ? (yyvsp[(4) - (4)]) : (yyvsp[(3) - (4)]) ? (yyvsp[(3) - (4)]) : (yyvsp[(2) - (4)]) ? (yyvsp[(2) - (4)]) : (yyvsp[(1) - (4)]); } 1608 { (yyval) = (yyvsp[0]) ? (yyvsp[0]) : (yyvsp[-1]) ? (yyvsp[-1]) : (yyvsp[-2]) ? (yyvsp[-2]) : (yyvsp[-3]); }
1609#line 1610 "parse.tab.c" /* yacc.c:1646 */
1767 break; 1610 break;
1768 1611
1769 case 22: 1612 case 22:
1770 1613#line 194 "parse.y" /* yacc.c:1646 */
1771 { decl_spec = NULL; } 1614 { decl_spec = NULL; }
1615#line 1616 "parse.tab.c" /* yacc.c:1646 */
1772 break; 1616 break;
1773 1617
1774 case 24: 1618 case 24:
1775 1619#line 199 "parse.y" /* yacc.c:1646 */
1776 { decl_spec = *(yyvsp[(1) - (1)]); } 1620 { decl_spec = *(yyvsp[0]); }
1621#line 1622 "parse.tab.c" /* yacc.c:1646 */
1777 break; 1622 break;
1778 1623
1779 case 25: 1624 case 25:
1780 1625#line 200 "parse.y" /* yacc.c:1646 */
1781 { decl_spec = *(yyvsp[(2) - (2)]); } 1626 { decl_spec = *(yyvsp[0]); }
1627#line 1628 "parse.tab.c" /* yacc.c:1646 */
1782 break; 1628 break;
1783 1629
1784 case 26: 1630 case 26:
1785 1631#line 205 "parse.y" /* yacc.c:1646 */
1786 { /* Version 2 checksumming ignores storage class, as that 1632 { /* Version 2 checksumming ignores storage class, as that
1787 is really irrelevant to the linkage. */ 1633 is really irrelevant to the linkage. */
1788 remove_node((yyvsp[(1) - (1)])); 1634 remove_node((yyvsp[0]));
1789 (yyval) = (yyvsp[(1) - (1)]); 1635 (yyval) = (yyvsp[0]);
1790 } 1636 }
1637#line 1638 "parse.tab.c" /* yacc.c:1646 */
1791 break; 1638 break;
1792 1639
1793 case 31: 1640 case 31:
1794 1641#line 217 "parse.y" /* yacc.c:1646 */
1795 { is_extern = 1; (yyval) = (yyvsp[(1) - (1)]); } 1642 { is_extern = 1; (yyval) = (yyvsp[0]); }
1643#line 1644 "parse.tab.c" /* yacc.c:1646 */
1796 break; 1644 break;
1797 1645
1798 case 32: 1646 case 32:
1799 1647#line 218 "parse.y" /* yacc.c:1646 */
1800 { is_extern = 0; (yyval) = (yyvsp[(1) - (1)]); } 1648 { is_extern = 0; (yyval) = (yyvsp[0]); }
1649#line 1650 "parse.tab.c" /* yacc.c:1646 */
1801 break; 1650 break;
1802 1651
1803 case 37: 1652 case 37:
1804 1653#line 230 "parse.y" /* yacc.c:1646 */
1805 { remove_node((yyvsp[(1) - (2)])); (*(yyvsp[(2) - (2)]))->tag = SYM_STRUCT; (yyval) = (yyvsp[(2) - (2)]); } 1654 { remove_node((yyvsp[-1])); (*(yyvsp[0]))->tag = SYM_STRUCT; (yyval) = (yyvsp[0]); }
1655#line 1656 "parse.tab.c" /* yacc.c:1646 */
1806 break; 1656 break;
1807 1657
1808 case 38: 1658 case 38:
1809 1659#line 232 "parse.y" /* yacc.c:1646 */
1810 { remove_node((yyvsp[(1) - (2)])); (*(yyvsp[(2) - (2)]))->tag = SYM_UNION; (yyval) = (yyvsp[(2) - (2)]); } 1660 { remove_node((yyvsp[-1])); (*(yyvsp[0]))->tag = SYM_UNION; (yyval) = (yyvsp[0]); }
1661#line 1662 "parse.tab.c" /* yacc.c:1646 */
1811 break; 1662 break;
1812 1663
1813 case 39: 1664 case 39:
1814 1665#line 234 "parse.y" /* yacc.c:1646 */
1815 { remove_node((yyvsp[(1) - (2)])); (*(yyvsp[(2) - (2)]))->tag = SYM_ENUM; (yyval) = (yyvsp[(2) - (2)]); } 1666 { remove_node((yyvsp[-1])); (*(yyvsp[0]))->tag = SYM_ENUM; (yyval) = (yyvsp[0]); }
1667#line 1668 "parse.tab.c" /* yacc.c:1646 */
1816 break; 1668 break;
1817 1669
1818 case 40: 1670 case 40:
1819 1671#line 238 "parse.y" /* yacc.c:1646 */
1820 { record_compound((yyvsp[(1) - (3)]), (yyvsp[(2) - (3)]), (yyvsp[(3) - (3)]), SYM_STRUCT); (yyval) = (yyvsp[(3) - (3)]); } 1672 { record_compound((yyvsp[-2]), (yyvsp[-1]), (yyvsp[0]), SYM_STRUCT); (yyval) = (yyvsp[0]); }
1673#line 1674 "parse.tab.c" /* yacc.c:1646 */
1821 break; 1674 break;
1822 1675
1823 case 41: 1676 case 41:
1824 1677#line 240 "parse.y" /* yacc.c:1646 */
1825 { record_compound((yyvsp[(1) - (3)]), (yyvsp[(2) - (3)]), (yyvsp[(3) - (3)]), SYM_UNION); (yyval) = (yyvsp[(3) - (3)]); } 1678 { record_compound((yyvsp[-2]), (yyvsp[-1]), (yyvsp[0]), SYM_UNION); (yyval) = (yyvsp[0]); }
1679#line 1680 "parse.tab.c" /* yacc.c:1646 */
1826 break; 1680 break;
1827 1681
1828 case 42: 1682 case 42:
1829 1683#line 242 "parse.y" /* yacc.c:1646 */
1830 { record_compound((yyvsp[(1) - (3)]), (yyvsp[(2) - (3)]), (yyvsp[(3) - (3)]), SYM_ENUM); (yyval) = (yyvsp[(3) - (3)]); } 1684 { record_compound((yyvsp[-2]), (yyvsp[-1]), (yyvsp[0]), SYM_ENUM); (yyval) = (yyvsp[0]); }
1685#line 1686 "parse.tab.c" /* yacc.c:1646 */
1831 break; 1686 break;
1832 1687
1833 case 43: 1688 case 43:
1834 1689#line 247 "parse.y" /* yacc.c:1646 */
1835 { add_symbol(NULL, SYM_ENUM, NULL, 0); (yyval) = (yyvsp[(2) - (2)]); } 1690 { add_symbol(NULL, SYM_ENUM, NULL, 0); (yyval) = (yyvsp[0]); }
1691#line 1692 "parse.tab.c" /* yacc.c:1646 */
1836 break; 1692 break;
1837 1693
1838 case 44: 1694 case 44:
1839 1695#line 249 "parse.y" /* yacc.c:1646 */
1840 { (yyval) = (yyvsp[(2) - (2)]); } 1696 { (yyval) = (yyvsp[0]); }
1697#line 1698 "parse.tab.c" /* yacc.c:1646 */
1841 break; 1698 break;
1842 1699
1843 case 45: 1700 case 45:
1844 1701#line 250 "parse.y" /* yacc.c:1646 */
1845 { (yyval) = (yyvsp[(2) - (2)]); } 1702 { (yyval) = (yyvsp[0]); }
1703#line 1704 "parse.tab.c" /* yacc.c:1646 */
1846 break; 1704 break;
1847 1705
1848 case 56: 1706 case 56:
1849 1707#line 264 "parse.y" /* yacc.c:1646 */
1850 { (*(yyvsp[(1) - (1)]))->tag = SYM_TYPEDEF; (yyval) = (yyvsp[(1) - (1)]); } 1708 { (*(yyvsp[0]))->tag = SYM_TYPEDEF; (yyval) = (yyvsp[0]); }
1709#line 1710 "parse.tab.c" /* yacc.c:1646 */
1851 break; 1710 break;
1852 1711
1853 case 57: 1712 case 57:
1854 1713#line 269 "parse.y" /* yacc.c:1646 */
1855 { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); } 1714 { (yyval) = (yyvsp[0]) ? (yyvsp[0]) : (yyvsp[-1]); }
1715#line 1716 "parse.tab.c" /* yacc.c:1646 */
1856 break; 1716 break;
1857 1717
1858 case 58: 1718 case 58:
1859 1719#line 273 "parse.y" /* yacc.c:1646 */
1860 { (yyval) = NULL; } 1720 { (yyval) = NULL; }
1721#line 1722 "parse.tab.c" /* yacc.c:1646 */
1861 break; 1722 break;
1862 1723
1863 case 61: 1724 case 61:
1864 1725#line 279 "parse.y" /* yacc.c:1646 */
1865 { (yyval) = (yyvsp[(2) - (2)]); } 1726 { (yyval) = (yyvsp[0]); }
1727#line 1728 "parse.tab.c" /* yacc.c:1646 */
1866 break; 1728 break;
1867 1729
1868 case 65: 1730 case 65:
1869 1731#line 285 "parse.y" /* yacc.c:1646 */
1870 { /* restrict has no effect in prototypes so ignore it */ 1732 { /* restrict has no effect in prototypes so ignore it */
1871 remove_node((yyvsp[(1) - (1)])); 1733 remove_node((yyvsp[0]));
1872 (yyval) = (yyvsp[(1) - (1)]); 1734 (yyval) = (yyvsp[0]);
1873 } 1735 }
1736#line 1737 "parse.tab.c" /* yacc.c:1646 */
1874 break; 1737 break;
1875 1738
1876 case 66: 1739 case 66:
1877 1740#line 292 "parse.y" /* yacc.c:1646 */
1878 { (yyval) = (yyvsp[(2) - (2)]); } 1741 { (yyval) = (yyvsp[0]); }
1742#line 1743 "parse.tab.c" /* yacc.c:1646 */
1879 break; 1743 break;
1880 1744
1881 case 68: 1745 case 68:
1882 1746#line 298 "parse.y" /* yacc.c:1646 */
1883 { if (current_name != NULL) { 1747 { if (current_name != NULL) {
1884 error_with_pos("unexpected second declaration name"); 1748 error_with_pos("unexpected second declaration name");
1885 YYERROR; 1749 YYERROR;
1886 } else { 1750 } else {
1887 current_name = (*(yyvsp[(1) - (1)]))->string; 1751 current_name = (*(yyvsp[0]))->string;
1888 (yyval) = (yyvsp[(1) - (1)]); 1752 (yyval) = (yyvsp[0]);
1889 } 1753 }
1890 } 1754 }
1755#line 1756 "parse.tab.c" /* yacc.c:1646 */
1891 break; 1756 break;
1892 1757
1893 case 69: 1758 case 69:
1894 1759#line 307 "parse.y" /* yacc.c:1646 */
1895 { if (current_name != NULL) { 1760 { if (current_name != NULL) {
1896 error_with_pos("unexpected second declaration name"); 1761 error_with_pos("unexpected second declaration name");
1897 YYERROR; 1762 YYERROR;
1898 } else { 1763 } else {
1899 current_name = (*(yyvsp[(1) - (1)]))->string; 1764 current_name = (*(yyvsp[0]))->string;
1900 (yyval) = (yyvsp[(1) - (1)]); 1765 (yyval) = (yyvsp[0]);
1901 } 1766 }
1902 } 1767 }
1768#line 1769 "parse.tab.c" /* yacc.c:1646 */
1903 break; 1769 break;
1904 1770
1905 case 70: 1771 case 70:
1906 1772#line 316 "parse.y" /* yacc.c:1646 */
1907 { (yyval) = (yyvsp[(4) - (4)]); } 1773 { (yyval) = (yyvsp[0]); }
1774#line 1775 "parse.tab.c" /* yacc.c:1646 */
1908 break; 1775 break;
1909 1776
1910 case 71: 1777 case 71:
1911 1778#line 318 "parse.y" /* yacc.c:1646 */
1912 { (yyval) = (yyvsp[(4) - (4)]); } 1779 { (yyval) = (yyvsp[0]); }
1780#line 1781 "parse.tab.c" /* yacc.c:1646 */
1913 break; 1781 break;
1914 1782
1915 case 72: 1783 case 72:
1916 1784#line 320 "parse.y" /* yacc.c:1646 */
1917 { (yyval) = (yyvsp[(2) - (2)]); } 1785 { (yyval) = (yyvsp[0]); }
1786#line 1787 "parse.tab.c" /* yacc.c:1646 */
1918 break; 1787 break;
1919 1788
1920 case 73: 1789 case 73:
1921 1790#line 322 "parse.y" /* yacc.c:1646 */
1922 { (yyval) = (yyvsp[(3) - (3)]); } 1791 { (yyval) = (yyvsp[0]); }
1792#line 1793 "parse.tab.c" /* yacc.c:1646 */
1923 break; 1793 break;
1924 1794
1925 case 74: 1795 case 74:
1926 1796#line 328 "parse.y" /* yacc.c:1646 */
1927 { (yyval) = (yyvsp[(3) - (3)]); } 1797 { (yyval) = (yyvsp[0]); }
1798#line 1799 "parse.tab.c" /* yacc.c:1646 */
1928 break; 1799 break;
1929 1800
1930 case 75: 1801 case 78:
1931 1802#line 336 "parse.y" /* yacc.c:1646 */
1932 { (yyval) = (yyvsp[(2) - (2)]); } 1803 { (yyval) = (yyvsp[0]); }
1804#line 1805 "parse.tab.c" /* yacc.c:1646 */
1933 break; 1805 break;
1934 1806
1935 case 79: 1807 case 79:
1936 1808#line 338 "parse.y" /* yacc.c:1646 */
1937 { (yyval) = (yyvsp[(4) - (4)]); } 1809 { (yyval) = (yyvsp[0]); }
1810#line 1811 "parse.tab.c" /* yacc.c:1646 */
1938 break; 1811 break;
1939 1812
1940 case 80: 1813 case 80:
1941 1814#line 340 "parse.y" /* yacc.c:1646 */
1942 { (yyval) = (yyvsp[(4) - (4)]); } 1815 { (yyval) = (yyvsp[0]); }
1816#line 1817 "parse.tab.c" /* yacc.c:1646 */
1943 break; 1817 break;
1944 1818
1945 case 81: 1819 case 81:
1946 1820#line 342 "parse.y" /* yacc.c:1646 */
1947 { (yyval) = (yyvsp[(2) - (2)]); } 1821 { (yyval) = (yyvsp[0]); }
1822#line 1823 "parse.tab.c" /* yacc.c:1646 */
1948 break; 1823 break;
1949 1824
1950 case 82: 1825 case 82:
1951 1826#line 344 "parse.y" /* yacc.c:1646 */
1952 { (yyval) = (yyvsp[(3) - (3)]); } 1827 { (yyval) = (yyvsp[0]); }
1828#line 1829 "parse.tab.c" /* yacc.c:1646 */
1953 break; 1829 break;
1954 1830
1955 case 83: 1831 case 83:
1956 1832#line 348 "parse.y" /* yacc.c:1646 */
1957 { (yyval) = (yyvsp[(3) - (3)]); } 1833 { (yyval) = (yyvsp[0]); }
1834#line 1835 "parse.tab.c" /* yacc.c:1646 */
1958 break; 1835 break;
1959 1836
1960 case 84: 1837 case 85:
1961 1838#line 350 "parse.y" /* yacc.c:1646 */
1962 { (yyval) = (yyvsp[(2) - (2)]); } 1839 { (yyval) = (yyvsp[0]); }
1840#line 1841 "parse.tab.c" /* yacc.c:1646 */
1963 break; 1841 break;
1964 1842
1965 case 86: 1843 case 86:
1966 1844#line 354 "parse.y" /* yacc.c:1646 */
1967 { (yyval) = (yyvsp[(3) - (3)]); } 1845 { (yyval) = NULL; }
1846#line 1847 "parse.tab.c" /* yacc.c:1646 */
1968 break; 1847 break;
1969 1848
1970 case 87: 1849 case 89:
1971 1850#line 361 "parse.y" /* yacc.c:1646 */
1972 { (yyval) = NULL; } 1851 { (yyval) = (yyvsp[0]); }
1852#line 1853 "parse.tab.c" /* yacc.c:1646 */
1973 break; 1853 break;
1974 1854
1975 case 90: 1855 case 90:
1976 1856#line 366 "parse.y" /* yacc.c:1646 */
1977 { (yyval) = (yyvsp[(3) - (3)]); } 1857 { (yyval) = (yyvsp[0]) ? (yyvsp[0]) : (yyvsp[-1]); }
1858#line 1859 "parse.tab.c" /* yacc.c:1646 */
1978 break; 1859 break;
1979 1860
1980 case 91: 1861 case 91:
1981 1862#line 371 "parse.y" /* yacc.c:1646 */
1982 { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); } 1863 { (yyval) = (yyvsp[0]) ? (yyvsp[0]) : (yyvsp[-1]); }
1864#line 1865 "parse.tab.c" /* yacc.c:1646 */
1983 break; 1865 break;
1984 1866
1985 case 92: 1867 case 93:
1986 1868#line 376 "parse.y" /* yacc.c:1646 */
1987 { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); } 1869 { (yyval) = NULL; }
1870#line 1871 "parse.tab.c" /* yacc.c:1646 */
1988 break; 1871 break;
1989 1872
1990 case 94: 1873 case 94:
1991 1874#line 378 "parse.y" /* yacc.c:1646 */
1992 { (yyval) = NULL; } 1875 { /* For version 2 checksums, we don't want to remember
1876 private parameter names. */
1877 remove_node((yyvsp[0]));
1878 (yyval) = (yyvsp[0]);
1879 }
1880#line 1881 "parse.tab.c" /* yacc.c:1646 */
1993 break; 1881 break;
1994 1882
1995 case 95: 1883 case 95:
1996 1884#line 386 "parse.y" /* yacc.c:1646 */
1997 { /* For version 2 checksums, we don't want to remember 1885 { remove_node((yyvsp[0]));
1998 private parameter names. */ 1886 (yyval) = (yyvsp[0]);
1999 remove_node((yyvsp[(1) - (1)]));
2000 (yyval) = (yyvsp[(1) - (1)]);
2001 } 1887 }
1888#line 1889 "parse.tab.c" /* yacc.c:1646 */
2002 break; 1889 break;
2003 1890
2004 case 96: 1891 case 96:
2005 1892#line 390 "parse.y" /* yacc.c:1646 */
2006 { remove_node((yyvsp[(1) - (1)])); 1893 { (yyval) = (yyvsp[0]); }
2007 (yyval) = (yyvsp[(1) - (1)]); 1894#line 1895 "parse.tab.c" /* yacc.c:1646 */
2008 }
2009 break; 1895 break;
2010 1896
2011 case 97: 1897 case 97:
2012 1898#line 392 "parse.y" /* yacc.c:1646 */
2013 { (yyval) = (yyvsp[(4) - (4)]); } 1899 { (yyval) = (yyvsp[0]); }
1900#line 1901 "parse.tab.c" /* yacc.c:1646 */
2014 break; 1901 break;
2015 1902
2016 case 98: 1903 case 98:
2017 1904#line 394 "parse.y" /* yacc.c:1646 */
2018 { (yyval) = (yyvsp[(4) - (4)]); } 1905 { (yyval) = (yyvsp[0]); }
1906#line 1907 "parse.tab.c" /* yacc.c:1646 */
2019 break; 1907 break;
2020 1908
2021 case 99: 1909 case 99:
2022 1910#line 396 "parse.y" /* yacc.c:1646 */
2023 { (yyval) = (yyvsp[(2) - (2)]); } 1911 { (yyval) = (yyvsp[0]); }
1912#line 1913 "parse.tab.c" /* yacc.c:1646 */
2024 break; 1913 break;
2025 1914
2026 case 100: 1915 case 100:
2027 1916#line 398 "parse.y" /* yacc.c:1646 */
2028 { (yyval) = (yyvsp[(3) - (3)]); } 1917 { (yyval) = (yyvsp[0]); }
1918#line 1919 "parse.tab.c" /* yacc.c:1646 */
2029 break; 1919 break;
2030 1920
2031 case 101: 1921 case 101:
2032 1922#line 403 "parse.y" /* yacc.c:1646 */
2033 { (yyval) = (yyvsp[(3) - (3)]); } 1923 { struct string_list *decl = *(yyvsp[-1]);
2034 break; 1924 *(yyvsp[-1]) = NULL;
2035
2036 case 102:
2037
2038 { struct string_list *decl = *(yyvsp[(2) - (3)]);
2039 *(yyvsp[(2) - (3)]) = NULL;
2040 add_symbol(current_name, SYM_NORMAL, decl, is_extern); 1925 add_symbol(current_name, SYM_NORMAL, decl, is_extern);
2041 (yyval) = (yyvsp[(3) - (3)]); 1926 (yyval) = (yyvsp[0]);
2042 } 1927 }
1928#line 1929 "parse.tab.c" /* yacc.c:1646 */
2043 break; 1929 break;
2044 1930
2045 case 103: 1931 case 102:
2046 1932#line 411 "parse.y" /* yacc.c:1646 */
2047 { (yyval) = NULL; } 1933 { (yyval) = NULL; }
1934#line 1935 "parse.tab.c" /* yacc.c:1646 */
2048 break; 1935 break;
2049 1936
2050 case 105: 1937 case 104:
1938#line 418 "parse.y" /* yacc.c:1646 */
1939 { remove_list((yyvsp[0]), &(*(yyvsp[-1]))->next); (yyval) = (yyvsp[0]); }
1940#line 1941 "parse.tab.c" /* yacc.c:1646 */
1941 break;
2051 1942
2052 { remove_list((yyvsp[(2) - (2)]), &(*(yyvsp[(1) - (2)]))->next); (yyval) = (yyvsp[(2) - (2)]); } 1943 case 105:
1944#line 422 "parse.y" /* yacc.c:1646 */
1945 { (yyval) = (yyvsp[0]); }
1946#line 1947 "parse.tab.c" /* yacc.c:1646 */
2053 break; 1947 break;
2054 1948
2055 case 106: 1949 case 106:
2056 1950#line 423 "parse.y" /* yacc.c:1646 */
2057 { (yyval) = (yyvsp[(3) - (3)]); } 1951 { (yyval) = (yyvsp[0]); }
1952#line 1953 "parse.tab.c" /* yacc.c:1646 */
2058 break; 1953 break;
2059 1954
2060 case 107: 1955 case 107:
2061 1956#line 427 "parse.y" /* yacc.c:1646 */
2062 { (yyval) = (yyvsp[(3) - (3)]); } 1957 { (yyval) = NULL; }
1958#line 1959 "parse.tab.c" /* yacc.c:1646 */
2063 break; 1959 break;
2064 1960
2065 case 108: 1961 case 110:
2066 1962#line 433 "parse.y" /* yacc.c:1646 */
2067 { (yyval) = NULL; } 1963 { (yyval) = (yyvsp[0]); }
1964#line 1965 "parse.tab.c" /* yacc.c:1646 */
2068 break; 1965 break;
2069 1966
2070 case 111: 1967 case 111:
2071 1968#line 438 "parse.y" /* yacc.c:1646 */
2072 { (yyval) = (yyvsp[(2) - (2)]); } 1969 { (yyval) = (yyvsp[0]); }
1970#line 1971 "parse.tab.c" /* yacc.c:1646 */
2073 break; 1971 break;
2074 1972
2075 case 112: 1973 case 112:
2076 1974#line 440 "parse.y" /* yacc.c:1646 */
2077 { (yyval) = (yyvsp[(3) - (3)]); } 1975 { (yyval) = (yyvsp[0]); }
1976#line 1977 "parse.tab.c" /* yacc.c:1646 */
2078 break; 1977 break;
2079 1978
2080 case 113: 1979 case 113:
2081 1980#line 444 "parse.y" /* yacc.c:1646 */
2082 { (yyval) = (yyvsp[(2) - (2)]); } 1981 { (yyval) = NULL; }
1982#line 1983 "parse.tab.c" /* yacc.c:1646 */
2083 break; 1983 break;
2084 1984
2085 case 114: 1985 case 116:
2086 1986#line 450 "parse.y" /* yacc.c:1646 */
2087 { (yyval) = NULL; } 1987 { (yyval) = (yyvsp[0]); }
1988#line 1989 "parse.tab.c" /* yacc.c:1646 */
2088 break; 1989 break;
2089 1990
2090 case 117: 1991 case 117:
2091 1992#line 454 "parse.y" /* yacc.c:1646 */
2092 { (yyval) = (yyvsp[(3) - (3)]); } 1993 { (yyval) = (yyvsp[0]) ? (yyvsp[0]) : (yyvsp[-1]); }
1994#line 1995 "parse.tab.c" /* yacc.c:1646 */
2093 break; 1995 break;
2094 1996
2095 case 118: 1997 case 118:
2096 1998#line 455 "parse.y" /* yacc.c:1646 */
2097 { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); } 1999 { (yyval) = (yyvsp[0]); }
2000#line 2001 "parse.tab.c" /* yacc.c:1646 */
2098 break; 2001 break;
2099 2002
2100 case 119: 2003 case 120:
2101 2004#line 460 "parse.y" /* yacc.c:1646 */
2102 { (yyval) = (yyvsp[(2) - (2)]); } 2005 { (yyval) = (yyvsp[0]); }
2006#line 2007 "parse.tab.c" /* yacc.c:1646 */
2103 break; 2007 break;
2104 2008
2105 case 121: 2009 case 121:
2106 2010#line 464 "parse.y" /* yacc.c:1646 */
2107 { (yyval) = (yyvsp[(2) - (2)]); }
2108 break;
2109
2110 case 122:
2111
2112 { (yyval) = NULL; } 2011 { (yyval) = NULL; }
2012#line 2013 "parse.tab.c" /* yacc.c:1646 */
2113 break; 2013 break;
2114 2014
2115 case 124: 2015 case 123:
2116 2016#line 469 "parse.y" /* yacc.c:1646 */
2117 { (yyval) = (yyvsp[(3) - (3)]); } 2017 { (yyval) = (yyvsp[0]); }
2018#line 2019 "parse.tab.c" /* yacc.c:1646 */
2118 break; 2019 break;
2119 2020
2120 case 125: 2021 case 124:
2121 2022#line 470 "parse.y" /* yacc.c:1646 */
2122 { (yyval) = (yyvsp[(4) - (4)]); } 2023 { (yyval) = (yyvsp[0]); }
2024#line 2025 "parse.tab.c" /* yacc.c:1646 */
2123 break; 2025 break;
2124 2026
2125 case 128: 2027 case 127:
2126 2028#line 479 "parse.y" /* yacc.c:1646 */
2127 { 2029 {
2128 const char *name = strdup((*(yyvsp[(1) - (1)]))->string); 2030 const char *name = strdup((*(yyvsp[0]))->string);
2129 add_symbol(name, SYM_ENUM_CONST, NULL, 0); 2031 add_symbol(name, SYM_ENUM_CONST, NULL, 0);
2130 } 2032 }
2033#line 2034 "parse.tab.c" /* yacc.c:1646 */
2131 break; 2034 break;
2132 2035
2133 case 129: 2036 case 128:
2134 2037#line 484 "parse.y" /* yacc.c:1646 */
2135 { 2038 {
2136 const char *name = strdup((*(yyvsp[(1) - (3)]))->string); 2039 const char *name = strdup((*(yyvsp[-2]))->string);
2137 struct string_list *expr = copy_list_range(*(yyvsp[(3) - (3)]), *(yyvsp[(2) - (3)])); 2040 struct string_list *expr = copy_list_range(*(yyvsp[0]), *(yyvsp[-1]));
2138 add_symbol(name, SYM_ENUM_CONST, expr, 0); 2041 add_symbol(name, SYM_ENUM_CONST, expr, 0);
2139 } 2042 }
2043#line 2044 "parse.tab.c" /* yacc.c:1646 */
2140 break; 2044 break;
2141 2045
2142 case 130: 2046 case 129:
2143 2047#line 491 "parse.y" /* yacc.c:1646 */
2144 { (yyval) = (yyvsp[(2) - (2)]); } 2048 { (yyval) = (yyvsp[0]); }
2049#line 2050 "parse.tab.c" /* yacc.c:1646 */
2145 break; 2050 break;
2146 2051
2147 case 131: 2052 case 130:
2148 2053#line 495 "parse.y" /* yacc.c:1646 */
2149 { (yyval) = NULL; } 2054 { (yyval) = NULL; }
2055#line 2056 "parse.tab.c" /* yacc.c:1646 */
2150 break; 2056 break;
2151 2057
2152 case 133: 2058 case 132:
2153 2059#line 501 "parse.y" /* yacc.c:1646 */
2154 { export_symbol((*(yyvsp[(3) - (5)]))->string); (yyval) = (yyvsp[(5) - (5)]); } 2060 { export_symbol((*(yyvsp[-2]))->string); (yyval) = (yyvsp[0]); }
2061#line 2062 "parse.tab.c" /* yacc.c:1646 */
2155 break; 2062 break;
2156 2063
2157 2064
2158 2065#line 2066 "parse.tab.c" /* yacc.c:1646 */
2159 default: break; 2066 default: break;
2160 } 2067 }
2161 /* User semantic actions sometimes alter yychar, and that requires 2068 /* User semantic actions sometimes alter yychar, and that requires
@@ -2177,7 +2084,7 @@ yyreduce:
2177 2084
2178 *++yyvsp = yyval; 2085 *++yyvsp = yyval;
2179 2086
2180 /* Now `shift' the result of the reduction. Determine what state 2087 /* Now 'shift' the result of the reduction. Determine what state
2181 that goes to, based on the state we popped back to and the rule 2088 that goes to, based on the state we popped back to and the rule
2182 number reduced by. */ 2089 number reduced by. */
2183 2090
@@ -2192,9 +2099,9 @@ yyreduce:
2192 goto yynewstate; 2099 goto yynewstate;
2193 2100
2194 2101
2195/*------------------------------------. 2102/*--------------------------------------.
2196| yyerrlab -- here on detecting error | 2103| yyerrlab -- here on detecting error. |
2197`------------------------------------*/ 2104`--------------------------------------*/
2198yyerrlab: 2105yyerrlab:
2199 /* Make sure we have latest lookahead translation. See comments at 2106 /* Make sure we have latest lookahead translation. See comments at
2200 user semantic actions for why this is necessary. */ 2107 user semantic actions for why this is necessary. */
@@ -2245,20 +2152,20 @@ yyerrlab:
2245 if (yyerrstatus == 3) 2152 if (yyerrstatus == 3)
2246 { 2153 {
2247 /* If just tried and failed to reuse lookahead token after an 2154 /* If just tried and failed to reuse lookahead token after an
2248 error, discard it. */ 2155 error, discard it. */
2249 2156
2250 if (yychar <= YYEOF) 2157 if (yychar <= YYEOF)
2251 { 2158 {
2252 /* Return failure if at end of input. */ 2159 /* Return failure if at end of input. */
2253 if (yychar == YYEOF) 2160 if (yychar == YYEOF)
2254 YYABORT; 2161 YYABORT;
2255 } 2162 }
2256 else 2163 else
2257 { 2164 {
2258 yydestruct ("Error: discarding", 2165 yydestruct ("Error: discarding",
2259 yytoken, &yylval); 2166 yytoken, &yylval);
2260 yychar = YYEMPTY; 2167 yychar = YYEMPTY;
2261 } 2168 }
2262 } 2169 }
2263 2170
2264 /* Else will try to reuse lookahead token after shifting the error 2171 /* Else will try to reuse lookahead token after shifting the error
@@ -2277,7 +2184,7 @@ yyerrorlab:
2277 if (/*CONSTCOND*/ 0) 2184 if (/*CONSTCOND*/ 0)
2278 goto yyerrorlab; 2185 goto yyerrorlab;
2279 2186
2280 /* Do not reclaim the symbols of the rule which action triggered 2187 /* Do not reclaim the symbols of the rule whose action triggered
2281 this YYERROR. */ 2188 this YYERROR. */
2282 YYPOPSTACK (yylen); 2189 YYPOPSTACK (yylen);
2283 yylen = 0; 2190 yylen = 0;
@@ -2290,29 +2197,29 @@ yyerrorlab:
2290| yyerrlab1 -- common code for both syntax error and YYERROR. | 2197| yyerrlab1 -- common code for both syntax error and YYERROR. |
2291`-------------------------------------------------------------*/ 2198`-------------------------------------------------------------*/
2292yyerrlab1: 2199yyerrlab1:
2293 yyerrstatus = 3; /* Each real token shifted decrements this. */ 2200 yyerrstatus = 3; /* Each real token shifted decrements this. */
2294 2201
2295 for (;;) 2202 for (;;)
2296 { 2203 {
2297 yyn = yypact[yystate]; 2204 yyn = yypact[yystate];
2298 if (!yypact_value_is_default (yyn)) 2205 if (!yypact_value_is_default (yyn))
2299 { 2206 {
2300 yyn += YYTERROR; 2207 yyn += YYTERROR;
2301 if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) 2208 if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
2302 { 2209 {
2303 yyn = yytable[yyn]; 2210 yyn = yytable[yyn];
2304 if (0 < yyn) 2211 if (0 < yyn)
2305 break; 2212 break;
2306 } 2213 }
2307 } 2214 }
2308 2215
2309 /* Pop the current state because it cannot handle the error token. */ 2216 /* Pop the current state because it cannot handle the error token. */
2310 if (yyssp == yyss) 2217 if (yyssp == yyss)
2311 YYABORT; 2218 YYABORT;
2312 2219
2313 2220
2314 yydestruct ("Error: popping", 2221 yydestruct ("Error: popping",
2315 yystos[yystate], yyvsp); 2222 yystos[yystate], yyvsp);
2316 YYPOPSTACK (1); 2223 YYPOPSTACK (1);
2317 yystate = *yyssp; 2224 yystate = *yyssp;
2318 YY_STACK_PRINT (yyss, yyssp); 2225 YY_STACK_PRINT (yyss, yyssp);
@@ -2363,14 +2270,14 @@ yyreturn:
2363 yydestruct ("Cleanup: discarding lookahead", 2270 yydestruct ("Cleanup: discarding lookahead",
2364 yytoken, &yylval); 2271 yytoken, &yylval);
2365 } 2272 }
2366 /* Do not reclaim the symbols of the rule which action triggered 2273 /* Do not reclaim the symbols of the rule whose action triggered
2367 this YYABORT or YYACCEPT. */ 2274 this YYABORT or YYACCEPT. */
2368 YYPOPSTACK (yylen); 2275 YYPOPSTACK (yylen);
2369 YY_STACK_PRINT (yyss, yyssp); 2276 YY_STACK_PRINT (yyss, yyssp);
2370 while (yyssp != yyss) 2277 while (yyssp != yyss)
2371 { 2278 {
2372 yydestruct ("Cleanup: popping", 2279 yydestruct ("Cleanup: popping",
2373 yystos[*yyssp], yyvsp); 2280 yystos[*yyssp], yyvsp);
2374 YYPOPSTACK (1); 2281 YYPOPSTACK (1);
2375 } 2282 }
2376#ifndef yyoverflow 2283#ifndef yyoverflow
@@ -2381,12 +2288,9 @@ yyreturn:
2381 if (yymsg != yymsgbuf) 2288 if (yymsg != yymsgbuf)
2382 YYSTACK_FREE (yymsg); 2289 YYSTACK_FREE (yymsg);
2383#endif 2290#endif
2384 /* Make sure YYID is used. */ 2291 return yyresult;
2385 return YYID (yyresult);
2386} 2292}
2387 2293#line 505 "parse.y" /* yacc.c:1906 */
2388
2389
2390 2294
2391 2295
2392static void 2296static void
diff --git a/scripts/genksyms/parse.tab.h_shipped b/scripts/genksyms/parse.tab.h_shipped
index 4c00cef6d71d..1751bd03ad26 100644
--- a/scripts/genksyms/parse.tab.h_shipped
+++ b/scripts/genksyms/parse.tab.h_shipped
@@ -1,19 +1,19 @@
1/* A Bison parser, made by GNU Bison 2.7. */ 1/* A Bison parser, made by GNU Bison 3.0.4. */
2 2
3/* Bison interface for Yacc-like parsers in C 3/* Bison interface for Yacc-like parsers in C
4 4
5 Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc. 5 Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
6 6
7 This program is free software: you can redistribute it and/or modify 7 This program is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by 8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or 9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version. 10 (at your option) any later version.
11 11
12 This program is distributed in the hope that it will be useful, 12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details. 15 GNU General Public License for more details.
16 16
17 You should have received a copy of the GNU General Public License 17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 19
@@ -26,93 +26,80 @@
26 special exception, which will cause the skeleton and the resulting 26 special exception, which will cause the skeleton and the resulting
27 Bison output files to be licensed under the GNU General Public 27 Bison output files to be licensed under the GNU General Public
28 License without this special exception. 28 License without this special exception.
29 29
30 This special exception was added by the Free Software Foundation in 30 This special exception was added by the Free Software Foundation in
31 version 2.2 of Bison. */ 31 version 2.2 of Bison. */
32 32
33#ifndef YY_YY_SCRIPTS_GENKSYMS_PARSE_TAB_H_SHIPPED_INCLUDED 33#ifndef YY_YY_PARSE_TAB_H_INCLUDED
34# define YY_YY_SCRIPTS_GENKSYMS_PARSE_TAB_H_SHIPPED_INCLUDED 34# define YY_YY_PARSE_TAB_H_INCLUDED
35/* Enabling traces. */ 35/* Debug traces. */
36#ifndef YYDEBUG 36#ifndef YYDEBUG
37# define YYDEBUG 1 37# define YYDEBUG 0
38#endif 38#endif
39#if YYDEBUG 39#if YYDEBUG
40extern int yydebug; 40extern int yydebug;
41#endif 41#endif
42 42
43/* Tokens. */ 43/* Token type. */
44#ifndef YYTOKENTYPE 44#ifndef YYTOKENTYPE
45# define YYTOKENTYPE 45# define YYTOKENTYPE
46 /* Put the tokens into the symbol table, so that GDB and other debuggers 46 enum yytokentype
47 know about them. */ 47 {
48 enum yytokentype { 48 ASM_KEYW = 258,
49 ASM_KEYW = 258, 49 ATTRIBUTE_KEYW = 259,
50 ATTRIBUTE_KEYW = 259, 50 AUTO_KEYW = 260,
51 AUTO_KEYW = 260, 51 BOOL_KEYW = 261,
52 BOOL_KEYW = 261, 52 CHAR_KEYW = 262,
53 CHAR_KEYW = 262, 53 CONST_KEYW = 263,
54 CONST_KEYW = 263, 54 DOUBLE_KEYW = 264,
55 DOUBLE_KEYW = 264, 55 ENUM_KEYW = 265,
56 ENUM_KEYW = 265, 56 EXTERN_KEYW = 266,
57 EXTERN_KEYW = 266, 57 EXTENSION_KEYW = 267,
58 EXTENSION_KEYW = 267, 58 FLOAT_KEYW = 268,
59 FLOAT_KEYW = 268, 59 INLINE_KEYW = 269,
60 INLINE_KEYW = 269, 60 INT_KEYW = 270,
61 INT_KEYW = 270, 61 LONG_KEYW = 271,
62 LONG_KEYW = 271, 62 REGISTER_KEYW = 272,
63 REGISTER_KEYW = 272, 63 RESTRICT_KEYW = 273,
64 RESTRICT_KEYW = 273, 64 SHORT_KEYW = 274,
65 SHORT_KEYW = 274, 65 SIGNED_KEYW = 275,
66 SIGNED_KEYW = 275, 66 STATIC_KEYW = 276,
67 STATIC_KEYW = 276, 67 STRUCT_KEYW = 277,
68 STRUCT_KEYW = 277, 68 TYPEDEF_KEYW = 278,
69 TYPEDEF_KEYW = 278, 69 UNION_KEYW = 279,
70 UNION_KEYW = 279, 70 UNSIGNED_KEYW = 280,
71 UNSIGNED_KEYW = 280, 71 VOID_KEYW = 281,
72 VOID_KEYW = 281, 72 VOLATILE_KEYW = 282,
73 VOLATILE_KEYW = 282, 73 TYPEOF_KEYW = 283,
74 TYPEOF_KEYW = 283, 74 EXPORT_SYMBOL_KEYW = 284,
75 EXPORT_SYMBOL_KEYW = 284, 75 ASM_PHRASE = 285,
76 ASM_PHRASE = 285, 76 ATTRIBUTE_PHRASE = 286,
77 ATTRIBUTE_PHRASE = 286, 77 TYPEOF_PHRASE = 287,
78 TYPEOF_PHRASE = 287, 78 BRACE_PHRASE = 288,
79 BRACE_PHRASE = 288, 79 BRACKET_PHRASE = 289,
80 BRACKET_PHRASE = 289, 80 EXPRESSION_PHRASE = 290,
81 EXPRESSION_PHRASE = 290, 81 CHAR = 291,
82 CHAR = 291, 82 DOTS = 292,
83 DOTS = 292, 83 IDENT = 293,
84 IDENT = 293, 84 INT = 294,
85 INT = 294, 85 REAL = 295,
86 REAL = 295, 86 STRING = 296,
87 STRING = 296, 87 TYPE = 297,
88 TYPE = 297, 88 OTHER = 298,
89 OTHER = 298, 89 FILENAME = 299
90 FILENAME = 299 90 };
91 };
92#endif 91#endif
93 92
94 93/* Value type. */
95#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED 94#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
96typedef int YYSTYPE; 95typedef int YYSTYPE;
97# define YYSTYPE_IS_TRIVIAL 1 96# define YYSTYPE_IS_TRIVIAL 1
98# define yystype YYSTYPE /* obsolescent; will be withdrawn */
99# define YYSTYPE_IS_DECLARED 1 97# define YYSTYPE_IS_DECLARED 1
100#endif 98#endif
101 99
100
102extern YYSTYPE yylval; 101extern YYSTYPE yylval;
103 102
104#ifdef YYPARSE_PARAM
105#if defined __STDC__ || defined __cplusplus
106int yyparse (void *YYPARSE_PARAM);
107#else
108int yyparse ();
109#endif
110#else /* ! YYPARSE_PARAM */
111#if defined __STDC__ || defined __cplusplus
112int yyparse (void); 103int yyparse (void);
113#else
114int yyparse ();
115#endif
116#endif /* ! YYPARSE_PARAM */
117 104
118#endif /* !YY_YY_SCRIPTS_GENKSYMS_PARSE_TAB_H_SHIPPED_INCLUDED */ 105#endif /* !YY_YY_PARSE_TAB_H_INCLUDED */
diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y
index 723ab30fe9d4..268efe37688a 100644
--- a/scripts/genksyms/parse.y
+++ b/scripts/genksyms/parse.y
@@ -320,8 +320,6 @@ direct_declarator:
320 { $$ = $2; } 320 { $$ = $2; }
321 | '(' declarator ')' 321 | '(' declarator ')'
322 { $$ = $3; } 322 { $$ = $3; }
323 | '(' error ')'
324 { $$ = $3; }
325 ; 323 ;
326 324
327/* Nested declarators differ from regular declarators in that they do 325/* Nested declarators differ from regular declarators in that they do
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index dd243d2abd87..138d7f100f7e 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -743,7 +743,7 @@ int conf_write(const char *name)
743 struct menu *menu; 743 struct menu *menu;
744 const char *basename; 744 const char *basename;
745 const char *str; 745 const char *str;
746 char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1]; 746 char dirname[PATH_MAX+1], tmpname[PATH_MAX+22], newname[PATH_MAX+8];
747 char *env; 747 char *env;
748 748
749 dirname[0] = 0; 749 dirname[0] = 0;
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index cbf4996dd9c1..ed29bad1f03a 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -113,7 +113,7 @@ void expr_free(struct expr *e)
113 break; 113 break;
114 case E_NOT: 114 case E_NOT:
115 expr_free(e->left.expr); 115 expr_free(e->left.expr);
116 return; 116 break;
117 case E_EQUAL: 117 case E_EQUAL:
118 case E_GEQ: 118 case E_GEQ:
119 case E_GTH: 119 case E_GTH:
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index b05cc3d4a9be..8360feaf51ce 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -364,6 +364,7 @@ void menu_finalize(struct menu *parent)
364 menu->parent = parent; 364 menu->parent = parent;
365 last_menu = menu; 365 last_menu = menu;
366 } 366 }
367 expr_free(basedep);
367 if (last_menu) { 368 if (last_menu) {
368 parent->list = parent->next; 369 parent->list = parent->next;
369 parent->next = last_menu->next; 370 parent->next = last_menu->next;
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 71bf8bff696a..5122ed2d839a 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -107,7 +107,27 @@ static struct menu *current_menu, *current_entry;
107%% 107%%
108input: nl start | start; 108input: nl start | start;
109 109
110start: mainmenu_stmt stmt_list | stmt_list; 110start: mainmenu_stmt stmt_list | no_mainmenu_stmt stmt_list;
111
112/* mainmenu entry */
113
114mainmenu_stmt: T_MAINMENU prompt nl
115{
116 menu_add_prompt(P_MENU, $2, NULL);
117};
118
119/* Default main menu, if there's no mainmenu entry */
120
121no_mainmenu_stmt: /* empty */
122{
123 /*
124 * Hack: Keep the main menu title on the heap so we can safely free it
125 * later regardless of whether it comes from the 'prompt' in
126 * mainmenu_stmt or here
127 */
128 menu_add_prompt(P_MENU, strdup("Linux Kernel Configuration"), NULL);
129};
130
111 131
112stmt_list: 132stmt_list:
113 /* empty */ 133 /* empty */
@@ -344,13 +364,6 @@ if_block:
344 | if_block choice_stmt 364 | if_block choice_stmt
345; 365;
346 366
347/* mainmenu entry */
348
349mainmenu_stmt: T_MAINMENU prompt nl
350{
351 menu_add_prompt(P_MENU, $2, NULL);
352};
353
354/* menu entry */ 367/* menu entry */
355 368
356menu: T_MENU prompt T_EOL 369menu: T_MENU prompt T_EOL
@@ -495,6 +508,7 @@ word_opt: /* empty */ { $$ = NULL; }
495 508
496void conf_parse(const char *name) 509void conf_parse(const char *name)
497{ 510{
511 const char *tmp;
498 struct symbol *sym; 512 struct symbol *sym;
499 int i; 513 int i;
500 514
@@ -502,7 +516,6 @@ void conf_parse(const char *name)
502 516
503 sym_init(); 517 sym_init();
504 _menu_init(); 518 _menu_init();
505 rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
506 519
507 if (getenv("ZCONF_DEBUG")) 520 if (getenv("ZCONF_DEBUG"))
508 zconfdebug = 1; 521 zconfdebug = 1;
@@ -512,8 +525,10 @@ void conf_parse(const char *name)
512 if (!modules_sym) 525 if (!modules_sym)
513 modules_sym = sym_find( "n" ); 526 modules_sym = sym_find( "n" );
514 527
528 tmp = rootmenu.prompt->text;
515 rootmenu.prompt->text = _(rootmenu.prompt->text); 529 rootmenu.prompt->text = _(rootmenu.prompt->text);
516 rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text); 530 rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text);
531 free((char*)tmp);
517 532
518 menu_finalize(&rootmenu); 533 menu_finalize(&rootmenu);
519 for_all_symbols(i, sym) { 534 for_all_symbols(i, sym) {
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 638a38e1b419..bba8ad9c4f2c 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -2742,4 +2742,4 @@ if ($verbose && $warnings) {
2742 print STDERR "$warnings warnings\n"; 2742 print STDERR "$warnings warnings\n";
2743} 2743}
2744 2744
2745exit($errors); 2745exit($output_mode eq "none" ? 0 : $errors);
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index e080746e1a6b..bd5151915e5a 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -594,7 +594,8 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
594 if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 || 594 if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
595 strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 || 595 strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
596 strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 || 596 strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
597 strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0) 597 strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0 ||
598 strcmp(symname, ".TOC.") == 0)
598 return 1; 599 return 1;
599 /* Do not ignore this symbol */ 600 /* Do not ignore this symbol */
600 return 0; 601 return 0;
@@ -2128,6 +2129,14 @@ static void add_intree_flag(struct buffer *b, int is_intree)
2128 buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n"); 2129 buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n");
2129} 2130}
2130 2131
2132/* Cannot check for assembler */
2133static void add_retpoline(struct buffer *b)
2134{
2135 buf_printf(b, "\n#ifdef RETPOLINE\n");
2136 buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
2137 buf_printf(b, "#endif\n");
2138}
2139
2131static void add_staging_flag(struct buffer *b, const char *name) 2140static void add_staging_flag(struct buffer *b, const char *name)
2132{ 2141{
2133 static const char *staging_dir = "drivers/staging"; 2142 static const char *staging_dir = "drivers/staging";
@@ -2472,6 +2481,7 @@ int main(int argc, char **argv)
2472 2481
2473 add_header(&buf, mod); 2482 add_header(&buf, mod);
2474 add_intree_flag(&buf, !external_module); 2483 add_intree_flag(&buf, !external_module);
2484 add_retpoline(&buf);
2475 add_staging_flag(&buf, mod->name); 2485 add_staging_flag(&buf, mod->name);
2476 err |= add_versions(&buf, mod); 2486 err |= add_versions(&buf, mod);
2477 add_depends(&buf, mod, modules); 2487 add_depends(&buf, mod, modules);
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 262889046703..45e246595d10 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -106,6 +106,7 @@ all_compiled_sources()
106 case "$i" in 106 case "$i" in
107 *.[cS]) 107 *.[cS])
108 j=${i/\.[cS]/\.o} 108 j=${i/\.[cS]/\.o}
109 j="${j#$tree}"
109 if [ -e $j ]; then 110 if [ -e $j ]; then
110 echo $i 111 echo $i
111 fi 112 fi
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index dec607c17b64..6dc4ce47580f 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -722,7 +722,7 @@ module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR);
722 722
723/* Maximum pathname length before accesses will start getting rejected */ 723/* Maximum pathname length before accesses will start getting rejected */
724unsigned int aa_g_path_max = 2 * PATH_MAX; 724unsigned int aa_g_path_max = 2 * PATH_MAX;
725module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR); 725module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
726 726
727/* Determines how paranoid loading of policy is and how much verification 727/* Determines how paranoid loading of policy is and how much verification
728 * on the loaded policy is done. 728 * on the loaded policy is done.
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index df303346029b..648a0461f8ed 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -10,6 +10,7 @@ config IMA
10 select CRYPTO_HASH_INFO 10 select CRYPTO_HASH_INFO
11 select TCG_TPM if HAS_IOMEM && !UML 11 select TCG_TPM if HAS_IOMEM && !UML
12 select TCG_TIS if TCG_TPM && X86 12 select TCG_TIS if TCG_TPM && X86
13 select TCG_CRB if TCG_TPM && ACPI
13 select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES 14 select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
14 help 15 help
15 The Trusted Computing Group(TCG) runtime Integrity 16 The Trusted Computing Group(TCG) runtime Integrity
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 19014293f927..c36b98b07d6b 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -206,7 +206,8 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
206 if (opened & FILE_CREATED) 206 if (opened & FILE_CREATED)
207 iint->flags |= IMA_NEW_FILE; 207 iint->flags |= IMA_NEW_FILE;
208 if ((iint->flags & IMA_NEW_FILE) && 208 if ((iint->flags & IMA_NEW_FILE) &&
209 !(iint->flags & IMA_DIGSIG_REQUIRED)) 209 (!(iint->flags & IMA_DIGSIG_REQUIRED) ||
210 (inode->i_size == 0)))
210 status = INTEGRITY_PASS; 211 status = INTEGRITY_PASS;
211 goto out; 212 goto out;
212 } 213 }
@@ -382,14 +383,10 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
382 result = ima_protect_xattr(dentry, xattr_name, xattr_value, 383 result = ima_protect_xattr(dentry, xattr_name, xattr_value,
383 xattr_value_len); 384 xattr_value_len);
384 if (result == 1) { 385 if (result == 1) {
385 bool digsig;
386
387 if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST)) 386 if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
388 return -EINVAL; 387 return -EINVAL;
389 digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG); 388 ima_reset_appraise_flags(d_backing_inode(dentry),
390 if (!digsig && (ima_appraise & IMA_APPRAISE_ENFORCE)) 389 (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
391 return -EPERM;
392 ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
393 result = 0; 390 result = 0;
394 } 391 }
395 return result; 392 return result;
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 6eb62936c672..a29209fa5674 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -78,6 +78,8 @@ int __init ima_init_crypto(void)
78 hash_algo_name[ima_hash_algo], rc); 78 hash_algo_name[ima_hash_algo], rc);
79 return rc; 79 return rc;
80 } 80 }
81 pr_info("Allocated hash algorithm: %s\n",
82 hash_algo_name[ima_hash_algo]);
81 return 0; 83 return 0;
82} 84}
83 85
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 98289ba2a2e6..236dce30e517 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -16,6 +16,9 @@
16 * implements the IMA hooks: ima_bprm_check, ima_file_mmap, 16 * implements the IMA hooks: ima_bprm_check, ima_file_mmap,
17 * and ima_file_check. 17 * and ima_file_check.
18 */ 18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
19#include <linux/module.h> 22#include <linux/module.h>
20#include <linux/file.h> 23#include <linux/file.h>
21#include <linux/binfmts.h> 24#include <linux/binfmts.h>
@@ -353,6 +356,16 @@ static int __init init_ima(void)
353 356
354 hash_setup(CONFIG_IMA_DEFAULT_HASH); 357 hash_setup(CONFIG_IMA_DEFAULT_HASH);
355 error = ima_init(); 358 error = ima_init();
359
360 if (error && strcmp(hash_algo_name[ima_hash_algo],
361 CONFIG_IMA_DEFAULT_HASH) != 0) {
362 pr_info("Allocating %s failed, going to use default hash algorithm %s\n",
363 hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH);
364 hash_setup_done = 0;
365 hash_setup(CONFIG_IMA_DEFAULT_HASH);
366 error = ima_init();
367 }
368
356 if (!error) { 369 if (!error) {
357 ima_initialized = 1; 370 ima_initialized = 1;
358 ima_update_policy_flag(); 371 ima_update_policy_flag();
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index ce295c0c1da0..e44e844c8ec4 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -141,23 +141,22 @@ static int valid_ecryptfs_desc(const char *ecryptfs_desc)
141 */ 141 */
142static int valid_master_desc(const char *new_desc, const char *orig_desc) 142static int valid_master_desc(const char *new_desc, const char *orig_desc)
143{ 143{
144 if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) { 144 int prefix_len;
145 if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN) 145
146 goto out; 146 if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN))
147 if (orig_desc) 147 prefix_len = KEY_TRUSTED_PREFIX_LEN;
148 if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN)) 148 else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN))
149 goto out; 149 prefix_len = KEY_USER_PREFIX_LEN;
150 } else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) { 150 else
151 if (strlen(new_desc) == KEY_USER_PREFIX_LEN) 151 return -EINVAL;
152 goto out; 152
153 if (orig_desc) 153 if (!new_desc[prefix_len])
154 if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN)) 154 return -EINVAL;
155 goto out; 155
156 } else 156 if (orig_desc && strncmp(new_desc, orig_desc, prefix_len))
157 goto out; 157 return -EINVAL;
158
158 return 0; 159 return 0;
159out:
160 return -EINVAL;
161} 160}
162 161
163/* 162/*
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4b56c3b6c25f..99212ff6a568 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -333,18 +333,6 @@ static void superblock_free_security(struct super_block *sb)
333 kfree(sbsec); 333 kfree(sbsec);
334} 334}
335 335
336/* The file system's label must be initialized prior to use. */
337
338static const char *labeling_behaviors[7] = {
339 "uses xattr",
340 "uses transition SIDs",
341 "uses task SIDs",
342 "uses genfs_contexts",
343 "not configured for labeling",
344 "uses mountpoint labeling",
345 "uses native labeling",
346};
347
348static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry); 336static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry);
349 337
350static inline int inode_doinit(struct inode *inode) 338static inline int inode_doinit(struct inode *inode)
@@ -456,10 +444,6 @@ static int sb_finish_set_opts(struct super_block *sb)
456 } 444 }
457 } 445 }
458 446
459 if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
460 printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
461 sb->s_id, sb->s_type->name);
462
463 sbsec->flags |= SE_SBINITIALIZED; 447 sbsec->flags |= SE_SBINITIALIZED;
464 if (selinux_is_sblabel_mnt(sb)) 448 if (selinux_is_sblabel_mnt(sb))
465 sbsec->flags |= SBLABEL_MNT; 449 sbsec->flags |= SBLABEL_MNT;
@@ -1958,8 +1942,9 @@ static inline u32 file_to_av(struct file *file)
1958static inline u32 open_file_to_av(struct file *file) 1942static inline u32 open_file_to_av(struct file *file)
1959{ 1943{
1960 u32 av = file_to_av(file); 1944 u32 av = file_to_av(file);
1945 struct inode *inode = file_inode(file);
1961 1946
1962 if (selinux_policycap_openperm) 1947 if (selinux_policycap_openperm && inode->i_sb->s_magic != SOCKFS_MAGIC)
1963 av |= FILE__OPEN; 1948 av |= FILE__OPEN;
1964 1949
1965 return av; 1950 return av;
@@ -2928,6 +2913,7 @@ static int selinux_inode_permission(struct inode *inode, int mask)
2928static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) 2913static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
2929{ 2914{
2930 const struct cred *cred = current_cred(); 2915 const struct cred *cred = current_cred();
2916 struct inode *inode = d_backing_inode(dentry);
2931 unsigned int ia_valid = iattr->ia_valid; 2917 unsigned int ia_valid = iattr->ia_valid;
2932 __u32 av = FILE__WRITE; 2918 __u32 av = FILE__WRITE;
2933 2919
@@ -2943,8 +2929,10 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
2943 ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET)) 2929 ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
2944 return dentry_has_perm(cred, dentry, FILE__SETATTR); 2930 return dentry_has_perm(cred, dentry, FILE__SETATTR);
2945 2931
2946 if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE) 2932 if (selinux_policycap_openperm &&
2947 && !(ia_valid & ATTR_FILE)) 2933 inode->i_sb->s_magic != SOCKFS_MAGIC &&
2934 (ia_valid & ATTR_SIZE) &&
2935 !(ia_valid & ATTR_FILE))
2948 av |= FILE__OPEN; 2936 av |= FILE__OPEN;
2949 2937
2950 return dentry_has_perm(cred, dentry, av); 2938 return dentry_has_perm(cred, dentry, av);
@@ -4032,6 +4020,8 @@ static int sock_has_perm(struct task_struct *task, struct sock *sk, u32 perms)
4032 struct lsm_network_audit net = {0,}; 4020 struct lsm_network_audit net = {0,};
4033 u32 tsid = task_sid(task); 4021 u32 tsid = task_sid(task);
4034 4022
4023 if (!sksec)
4024 return -EFAULT;
4035 if (sksec->sid == SECINITSID_KERNEL) 4025 if (sksec->sid == SECINITSID_KERNEL)
4036 return 0; 4026 return 0;
4037 4027
@@ -4122,10 +4112,18 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
4122 u32 sid, node_perm; 4112 u32 sid, node_perm;
4123 4113
4124 if (family == PF_INET) { 4114 if (family == PF_INET) {
4115 if (addrlen < sizeof(struct sockaddr_in)) {
4116 err = -EINVAL;
4117 goto out;
4118 }
4125 addr4 = (struct sockaddr_in *)address; 4119 addr4 = (struct sockaddr_in *)address;
4126 snum = ntohs(addr4->sin_port); 4120 snum = ntohs(addr4->sin_port);
4127 addrp = (char *)&addr4->sin_addr.s_addr; 4121 addrp = (char *)&addr4->sin_addr.s_addr;
4128 } else { 4122 } else {
4123 if (addrlen < SIN6_LEN_RFC2133) {
4124 err = -EINVAL;
4125 goto out;
4126 }
4129 addr6 = (struct sockaddr_in6 *)address; 4127 addr6 = (struct sockaddr_in6 *)address;
4130 snum = ntohs(addr6->sin6_port); 4128 snum = ntohs(addr6->sin6_port);
4131 addrp = (char *)&addr6->sin6_addr.s6_addr; 4129 addrp = (char *)&addr6->sin6_addr.s6_addr;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index ebb5eb3c318c..0a258c0602d1 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -155,7 +155,7 @@ static int selinux_set_mapping(struct policydb *pol,
155 } 155 }
156 156
157 k = 0; 157 k = 0;
158 while (p_in->perms && p_in->perms[k]) { 158 while (p_in->perms[k]) {
159 /* An empty permission string skips ahead */ 159 /* An empty permission string skips ahead */
160 if (!*p_in->perms[k]) { 160 if (!*p_in->perms[k]) {
161 k++; 161 k++;
@@ -860,6 +860,9 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
860 int index; 860 int index;
861 int rc; 861 int rc;
862 862
863 if (!ss_initialized)
864 return 0;
865
863 read_lock(&policy_rwlock); 866 read_lock(&policy_rwlock);
864 867
865 rc = -EINVAL; 868 rc = -EINVAL;
@@ -1406,27 +1409,25 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
1406 if (!scontext_len) 1409 if (!scontext_len)
1407 return -EINVAL; 1410 return -EINVAL;
1408 1411
1412 /* Copy the string to allow changes and ensure a NUL terminator */
1413 scontext2 = kmemdup_nul(scontext, scontext_len, gfp_flags);
1414 if (!scontext2)
1415 return -ENOMEM;
1416
1409 if (!ss_initialized) { 1417 if (!ss_initialized) {
1410 int i; 1418 int i;
1411 1419
1412 for (i = 1; i < SECINITSID_NUM; i++) { 1420 for (i = 1; i < SECINITSID_NUM; i++) {
1413 if (!strcmp(initial_sid_to_string[i], scontext)) { 1421 if (!strcmp(initial_sid_to_string[i], scontext2)) {
1414 *sid = i; 1422 *sid = i;
1415 return 0; 1423 goto out;
1416 } 1424 }
1417 } 1425 }
1418 *sid = SECINITSID_KERNEL; 1426 *sid = SECINITSID_KERNEL;
1419 return 0; 1427 goto out;
1420 } 1428 }
1421 *sid = SECSID_NULL; 1429 *sid = SECSID_NULL;
1422 1430
1423 /* Copy the string so that we can modify the copy as we parse it. */
1424 scontext2 = kmalloc(scontext_len + 1, gfp_flags);
1425 if (!scontext2)
1426 return -ENOMEM;
1427 memcpy(scontext2, scontext, scontext_len);
1428 scontext2[scontext_len] = 0;
1429
1430 if (force) { 1431 if (force) {
1431 /* Save another copy for storing in uninterpreted form */ 1432 /* Save another copy for storing in uninterpreted form */
1432 rc = -ENOMEM; 1433 rc = -ENOMEM;
@@ -1440,7 +1441,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
1440 scontext_len, &context, def_sid); 1441 scontext_len, &context, def_sid);
1441 if (rc == -EINVAL && force) { 1442 if (rc == -EINVAL && force) {
1442 context.str = str; 1443 context.str = str;
1443 context.len = scontext_len; 1444 context.len = strlen(str) + 1;
1444 str = NULL; 1445 str = NULL;
1445 } else if (rc) 1446 } else if (rc)
1446 goto out_unlock; 1447 goto out_unlock;
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index 0608f216f359..ac0a40b9ba1e 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -400,8 +400,7 @@ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
400 if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) || 400 if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) ||
401 copy_from_user(&data->type, &data32->type, 3 * sizeof(u32))) 401 copy_from_user(&data->type, &data32->type, 3 * sizeof(u32)))
402 goto error; 402 goto error;
403 if (get_user(data->owner, &data32->owner) || 403 if (get_user(data->owner, &data32->owner))
404 get_user(data->type, &data32->type))
405 goto error; 404 goto error;
406 switch (data->type) { 405 switch (data->type) {
407 case SNDRV_CTL_ELEM_TYPE_BOOLEAN: 406 case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 494b7b533366..07feb35f1935 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -833,8 +833,25 @@ static int choose_rate(struct snd_pcm_substream *substream,
833 return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL); 833 return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
834} 834}
835 835
836static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream, 836/* parameter locking: returns immediately if tried during streaming */
837 bool trylock) 837static int lock_params(struct snd_pcm_runtime *runtime)
838{
839 if (mutex_lock_interruptible(&runtime->oss.params_lock))
840 return -ERESTARTSYS;
841 if (atomic_read(&runtime->oss.rw_ref)) {
842 mutex_unlock(&runtime->oss.params_lock);
843 return -EBUSY;
844 }
845 return 0;
846}
847
848static void unlock_params(struct snd_pcm_runtime *runtime)
849{
850 mutex_unlock(&runtime->oss.params_lock);
851}
852
853/* call with params_lock held */
854static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
838{ 855{
839 struct snd_pcm_runtime *runtime = substream->runtime; 856 struct snd_pcm_runtime *runtime = substream->runtime;
840 struct snd_pcm_hw_params *params, *sparams; 857 struct snd_pcm_hw_params *params, *sparams;
@@ -848,12 +865,9 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
848 struct snd_mask sformat_mask; 865 struct snd_mask sformat_mask;
849 struct snd_mask mask; 866 struct snd_mask mask;
850 867
851 if (trylock) { 868 if (!runtime->oss.params)
852 if (!(mutex_trylock(&runtime->oss.params_lock))) 869 return 0;
853 return -EAGAIN; 870 sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
854 } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
855 return -EINTR;
856 sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
857 params = kmalloc(sizeof(*params), GFP_KERNEL); 871 params = kmalloc(sizeof(*params), GFP_KERNEL);
858 sparams = kmalloc(sizeof(*sparams), GFP_KERNEL); 872 sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
859 if (!sw_params || !params || !sparams) { 873 if (!sw_params || !params || !sparams) {
@@ -991,7 +1005,6 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
991 goto failure; 1005 goto failure;
992 } 1006 }
993 1007
994 memset(sw_params, 0, sizeof(*sw_params));
995 if (runtime->oss.trigger) { 1008 if (runtime->oss.trigger) {
996 sw_params->start_threshold = 1; 1009 sw_params->start_threshold = 1;
997 } else { 1010 } else {
@@ -1079,6 +1092,23 @@ failure:
1079 kfree(sw_params); 1092 kfree(sw_params);
1080 kfree(params); 1093 kfree(params);
1081 kfree(sparams); 1094 kfree(sparams);
1095 return err;
1096}
1097
1098/* this one takes the lock by itself */
1099static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
1100 bool trylock)
1101{
1102 struct snd_pcm_runtime *runtime = substream->runtime;
1103 int err;
1104
1105 if (trylock) {
1106 if (!(mutex_trylock(&runtime->oss.params_lock)))
1107 return -EAGAIN;
1108 } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
1109 return -ERESTARTSYS;
1110
1111 err = snd_pcm_oss_change_params_locked(substream);
1082 mutex_unlock(&runtime->oss.params_lock); 1112 mutex_unlock(&runtime->oss.params_lock);
1083 return err; 1113 return err;
1084} 1114}
@@ -1107,6 +1137,10 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
1107 return 0; 1137 return 0;
1108} 1138}
1109 1139
1140/* call with params_lock held */
1141/* NOTE: this always call PREPARE unconditionally no matter whether
1142 * runtime->oss.prepare is set or not
1143 */
1110static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream) 1144static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream)
1111{ 1145{
1112 int err; 1146 int err;
@@ -1131,8 +1165,6 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
1131 struct snd_pcm_runtime *runtime; 1165 struct snd_pcm_runtime *runtime;
1132 int err; 1166 int err;
1133 1167
1134 if (substream == NULL)
1135 return 0;
1136 runtime = substream->runtime; 1168 runtime = substream->runtime;
1137 if (runtime->oss.params) { 1169 if (runtime->oss.params) {
1138 err = snd_pcm_oss_change_params(substream, false); 1170 err = snd_pcm_oss_change_params(substream, false);
@@ -1140,6 +1172,29 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
1140 return err; 1172 return err;
1141 } 1173 }
1142 if (runtime->oss.prepare) { 1174 if (runtime->oss.prepare) {
1175 if (mutex_lock_interruptible(&runtime->oss.params_lock))
1176 return -ERESTARTSYS;
1177 err = snd_pcm_oss_prepare(substream);
1178 mutex_unlock(&runtime->oss.params_lock);
1179 if (err < 0)
1180 return err;
1181 }
1182 return 0;
1183}
1184
1185/* call with params_lock held */
1186static int snd_pcm_oss_make_ready_locked(struct snd_pcm_substream *substream)
1187{
1188 struct snd_pcm_runtime *runtime;
1189 int err;
1190
1191 runtime = substream->runtime;
1192 if (runtime->oss.params) {
1193 err = snd_pcm_oss_change_params_locked(substream);
1194 if (err < 0)
1195 return err;
1196 }
1197 if (runtime->oss.prepare) {
1143 err = snd_pcm_oss_prepare(substream); 1198 err = snd_pcm_oss_prepare(substream);
1144 if (err < 0) 1199 if (err < 0)
1145 return err; 1200 return err;
@@ -1361,19 +1416,21 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
1361static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const char __user *buf, size_t bytes) 1416static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const char __user *buf, size_t bytes)
1362{ 1417{
1363 size_t xfer = 0; 1418 size_t xfer = 0;
1364 ssize_t tmp; 1419 ssize_t tmp = 0;
1365 struct snd_pcm_runtime *runtime = substream->runtime; 1420 struct snd_pcm_runtime *runtime = substream->runtime;
1366 1421
1367 if (atomic_read(&substream->mmap_count)) 1422 if (atomic_read(&substream->mmap_count))
1368 return -ENXIO; 1423 return -ENXIO;
1369 1424
1370 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) 1425 atomic_inc(&runtime->oss.rw_ref);
1371 return tmp;
1372 while (bytes > 0) { 1426 while (bytes > 0) {
1373 if (mutex_lock_interruptible(&runtime->oss.params_lock)) { 1427 if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
1374 tmp = -ERESTARTSYS; 1428 tmp = -ERESTARTSYS;
1375 break; 1429 break;
1376 } 1430 }
1431 tmp = snd_pcm_oss_make_ready_locked(substream);
1432 if (tmp < 0)
1433 goto err;
1377 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { 1434 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
1378 tmp = bytes; 1435 tmp = bytes;
1379 if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes) 1436 if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
@@ -1429,6 +1486,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
1429 } 1486 }
1430 tmp = 0; 1487 tmp = 0;
1431 } 1488 }
1489 atomic_dec(&runtime->oss.rw_ref);
1432 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; 1490 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
1433} 1491}
1434 1492
@@ -1468,19 +1526,21 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
1468static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __user *buf, size_t bytes) 1526static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __user *buf, size_t bytes)
1469{ 1527{
1470 size_t xfer = 0; 1528 size_t xfer = 0;
1471 ssize_t tmp; 1529 ssize_t tmp = 0;
1472 struct snd_pcm_runtime *runtime = substream->runtime; 1530 struct snd_pcm_runtime *runtime = substream->runtime;
1473 1531
1474 if (atomic_read(&substream->mmap_count)) 1532 if (atomic_read(&substream->mmap_count))
1475 return -ENXIO; 1533 return -ENXIO;
1476 1534
1477 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) 1535 atomic_inc(&runtime->oss.rw_ref);
1478 return tmp;
1479 while (bytes > 0) { 1536 while (bytes > 0) {
1480 if (mutex_lock_interruptible(&runtime->oss.params_lock)) { 1537 if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
1481 tmp = -ERESTARTSYS; 1538 tmp = -ERESTARTSYS;
1482 break; 1539 break;
1483 } 1540 }
1541 tmp = snd_pcm_oss_make_ready_locked(substream);
1542 if (tmp < 0)
1543 goto err;
1484 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { 1544 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
1485 if (runtime->oss.buffer_used == 0) { 1545 if (runtime->oss.buffer_used == 0) {
1486 tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1); 1546 tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
@@ -1521,6 +1581,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
1521 } 1581 }
1522 tmp = 0; 1582 tmp = 0;
1523 } 1583 }
1584 atomic_dec(&runtime->oss.rw_ref);
1524 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; 1585 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
1525} 1586}
1526 1587
@@ -1536,10 +1597,12 @@ static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
1536 continue; 1597 continue;
1537 runtime = substream->runtime; 1598 runtime = substream->runtime;
1538 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); 1599 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
1600 mutex_lock(&runtime->oss.params_lock);
1539 runtime->oss.prepare = 1; 1601 runtime->oss.prepare = 1;
1540 runtime->oss.buffer_used = 0; 1602 runtime->oss.buffer_used = 0;
1541 runtime->oss.prev_hw_ptr_period = 0; 1603 runtime->oss.prev_hw_ptr_period = 0;
1542 runtime->oss.period_ptr = 0; 1604 runtime->oss.period_ptr = 0;
1605 mutex_unlock(&runtime->oss.params_lock);
1543 } 1606 }
1544 return 0; 1607 return 0;
1545} 1608}
@@ -1625,9 +1688,13 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
1625 goto __direct; 1688 goto __direct;
1626 if ((err = snd_pcm_oss_make_ready(substream)) < 0) 1689 if ((err = snd_pcm_oss_make_ready(substream)) < 0)
1627 return err; 1690 return err;
1691 atomic_inc(&runtime->oss.rw_ref);
1692 if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
1693 atomic_dec(&runtime->oss.rw_ref);
1694 return -ERESTARTSYS;
1695 }
1628 format = snd_pcm_oss_format_from(runtime->oss.format); 1696 format = snd_pcm_oss_format_from(runtime->oss.format);
1629 width = snd_pcm_format_physical_width(format); 1697 width = snd_pcm_format_physical_width(format);
1630 mutex_lock(&runtime->oss.params_lock);
1631 if (runtime->oss.buffer_used > 0) { 1698 if (runtime->oss.buffer_used > 0) {
1632#ifdef OSS_DEBUG 1699#ifdef OSS_DEBUG
1633 pcm_dbg(substream->pcm, "sync: buffer_used\n"); 1700 pcm_dbg(substream->pcm, "sync: buffer_used\n");
@@ -1637,10 +1704,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
1637 runtime->oss.buffer + runtime->oss.buffer_used, 1704 runtime->oss.buffer + runtime->oss.buffer_used,
1638 size); 1705 size);
1639 err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes); 1706 err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes);
1640 if (err < 0) { 1707 if (err < 0)
1641 mutex_unlock(&runtime->oss.params_lock); 1708 goto unlock;
1642 return err;
1643 }
1644 } else if (runtime->oss.period_ptr > 0) { 1709 } else if (runtime->oss.period_ptr > 0) {
1645#ifdef OSS_DEBUG 1710#ifdef OSS_DEBUG
1646 pcm_dbg(substream->pcm, "sync: period_ptr\n"); 1711 pcm_dbg(substream->pcm, "sync: period_ptr\n");
@@ -1650,10 +1715,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
1650 runtime->oss.buffer, 1715 runtime->oss.buffer,
1651 size * 8 / width); 1716 size * 8 / width);
1652 err = snd_pcm_oss_sync1(substream, size); 1717 err = snd_pcm_oss_sync1(substream, size);
1653 if (err < 0) { 1718 if (err < 0)
1654 mutex_unlock(&runtime->oss.params_lock); 1719 goto unlock;
1655 return err;
1656 }
1657 } 1720 }
1658 /* 1721 /*
1659 * The ALSA's period might be a bit large than OSS one. 1722 * The ALSA's period might be a bit large than OSS one.
@@ -1684,7 +1747,11 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
1684 snd_pcm_lib_writev(substream, buffers, size); 1747 snd_pcm_lib_writev(substream, buffers, size);
1685 } 1748 }
1686 } 1749 }
1750unlock:
1687 mutex_unlock(&runtime->oss.params_lock); 1751 mutex_unlock(&runtime->oss.params_lock);
1752 atomic_dec(&runtime->oss.rw_ref);
1753 if (err < 0)
1754 return err;
1688 /* 1755 /*
1689 * finish sync: drain the buffer 1756 * finish sync: drain the buffer
1690 */ 1757 */
@@ -1695,7 +1762,9 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
1695 substream->f_flags = saved_f_flags; 1762 substream->f_flags = saved_f_flags;
1696 if (err < 0) 1763 if (err < 0)
1697 return err; 1764 return err;
1765 mutex_lock(&runtime->oss.params_lock);
1698 runtime->oss.prepare = 1; 1766 runtime->oss.prepare = 1;
1767 mutex_unlock(&runtime->oss.params_lock);
1699 } 1768 }
1700 1769
1701 substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; 1770 substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
@@ -1706,8 +1775,10 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
1706 err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); 1775 err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
1707 if (err < 0) 1776 if (err < 0)
1708 return err; 1777 return err;
1778 mutex_lock(&runtime->oss.params_lock);
1709 runtime->oss.buffer_used = 0; 1779 runtime->oss.buffer_used = 0;
1710 runtime->oss.prepare = 1; 1780 runtime->oss.prepare = 1;
1781 mutex_unlock(&runtime->oss.params_lock);
1711 } 1782 }
1712 return 0; 1783 return 0;
1713} 1784}
@@ -1719,6 +1790,8 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
1719 for (idx = 1; idx >= 0; --idx) { 1790 for (idx = 1; idx >= 0; --idx) {
1720 struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; 1791 struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
1721 struct snd_pcm_runtime *runtime; 1792 struct snd_pcm_runtime *runtime;
1793 int err;
1794
1722 if (substream == NULL) 1795 if (substream == NULL)
1723 continue; 1796 continue;
1724 runtime = substream->runtime; 1797 runtime = substream->runtime;
@@ -1726,10 +1799,14 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
1726 rate = 1000; 1799 rate = 1000;
1727 else if (rate > 192000) 1800 else if (rate > 192000)
1728 rate = 192000; 1801 rate = 192000;
1802 err = lock_params(runtime);
1803 if (err < 0)
1804 return err;
1729 if (runtime->oss.rate != rate) { 1805 if (runtime->oss.rate != rate) {
1730 runtime->oss.params = 1; 1806 runtime->oss.params = 1;
1731 runtime->oss.rate = rate; 1807 runtime->oss.rate = rate;
1732 } 1808 }
1809 unlock_params(runtime);
1733 } 1810 }
1734 return snd_pcm_oss_get_rate(pcm_oss_file); 1811 return snd_pcm_oss_get_rate(pcm_oss_file);
1735} 1812}
@@ -1754,13 +1831,19 @@ static int snd_pcm_oss_set_channels(struct snd_pcm_oss_file *pcm_oss_file, unsig
1754 for (idx = 1; idx >= 0; --idx) { 1831 for (idx = 1; idx >= 0; --idx) {
1755 struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; 1832 struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
1756 struct snd_pcm_runtime *runtime; 1833 struct snd_pcm_runtime *runtime;
1834 int err;
1835
1757 if (substream == NULL) 1836 if (substream == NULL)
1758 continue; 1837 continue;
1759 runtime = substream->runtime; 1838 runtime = substream->runtime;
1839 err = lock_params(runtime);
1840 if (err < 0)
1841 return err;
1760 if (runtime->oss.channels != channels) { 1842 if (runtime->oss.channels != channels) {
1761 runtime->oss.params = 1; 1843 runtime->oss.params = 1;
1762 runtime->oss.channels = channels; 1844 runtime->oss.channels = channels;
1763 } 1845 }
1846 unlock_params(runtime);
1764 } 1847 }
1765 return snd_pcm_oss_get_channels(pcm_oss_file); 1848 return snd_pcm_oss_get_channels(pcm_oss_file);
1766} 1849}
@@ -1814,10 +1897,9 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
1814 return -ENOMEM; 1897 return -ENOMEM;
1815 _snd_pcm_hw_params_any(params); 1898 _snd_pcm_hw_params_any(params);
1816 err = snd_pcm_hw_refine(substream, params); 1899 err = snd_pcm_hw_refine(substream, params);
1817 format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
1818 kfree(params);
1819 if (err < 0) 1900 if (err < 0)
1820 return err; 1901 goto error;
1902 format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
1821 for (fmt = 0; fmt < 32; ++fmt) { 1903 for (fmt = 0; fmt < 32; ++fmt) {
1822 if (snd_mask_test(&format_mask, fmt)) { 1904 if (snd_mask_test(&format_mask, fmt)) {
1823 int f = snd_pcm_oss_format_to(fmt); 1905 int f = snd_pcm_oss_format_to(fmt);
@@ -1825,12 +1907,16 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
1825 formats |= f; 1907 formats |= f;
1826 } 1908 }
1827 } 1909 }
1828 return formats; 1910
1911 error:
1912 kfree(params);
1913 return err < 0 ? err : formats;
1829} 1914}
1830 1915
1831static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format) 1916static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format)
1832{ 1917{
1833 int formats, idx; 1918 int formats, idx;
1919 int err;
1834 1920
1835 if (format != AFMT_QUERY) { 1921 if (format != AFMT_QUERY) {
1836 formats = snd_pcm_oss_get_formats(pcm_oss_file); 1922 formats = snd_pcm_oss_get_formats(pcm_oss_file);
@@ -1844,10 +1930,14 @@ static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int for
1844 if (substream == NULL) 1930 if (substream == NULL)
1845 continue; 1931 continue;
1846 runtime = substream->runtime; 1932 runtime = substream->runtime;
1933 err = lock_params(runtime);
1934 if (err < 0)
1935 return err;
1847 if (runtime->oss.format != format) { 1936 if (runtime->oss.format != format) {
1848 runtime->oss.params = 1; 1937 runtime->oss.params = 1;
1849 runtime->oss.format = format; 1938 runtime->oss.format = format;
1850 } 1939 }
1940 unlock_params(runtime);
1851 } 1941 }
1852 } 1942 }
1853 return snd_pcm_oss_get_format(pcm_oss_file); 1943 return snd_pcm_oss_get_format(pcm_oss_file);
@@ -1867,8 +1957,6 @@ static int snd_pcm_oss_set_subdivide1(struct snd_pcm_substream *substream, int s
1867{ 1957{
1868 struct snd_pcm_runtime *runtime; 1958 struct snd_pcm_runtime *runtime;
1869 1959
1870 if (substream == NULL)
1871 return 0;
1872 runtime = substream->runtime; 1960 runtime = substream->runtime;
1873 if (subdivide == 0) { 1961 if (subdivide == 0) {
1874 subdivide = runtime->oss.subdivision; 1962 subdivide = runtime->oss.subdivision;
@@ -1892,9 +1980,17 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int
1892 1980
1893 for (idx = 1; idx >= 0; --idx) { 1981 for (idx = 1; idx >= 0; --idx) {
1894 struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; 1982 struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
1983 struct snd_pcm_runtime *runtime;
1984
1895 if (substream == NULL) 1985 if (substream == NULL)
1896 continue; 1986 continue;
1897 if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 0) 1987 runtime = substream->runtime;
1988 err = lock_params(runtime);
1989 if (err < 0)
1990 return err;
1991 err = snd_pcm_oss_set_subdivide1(substream, subdivide);
1992 unlock_params(runtime);
1993 if (err < 0)
1898 return err; 1994 return err;
1899 } 1995 }
1900 return err; 1996 return err;
@@ -1904,8 +2000,6 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
1904{ 2000{
1905 struct snd_pcm_runtime *runtime; 2001 struct snd_pcm_runtime *runtime;
1906 2002
1907 if (substream == NULL)
1908 return 0;
1909 runtime = substream->runtime; 2003 runtime = substream->runtime;
1910 if (runtime->oss.subdivision || runtime->oss.fragshift) 2004 if (runtime->oss.subdivision || runtime->oss.fragshift)
1911 return -EINVAL; 2005 return -EINVAL;
@@ -1925,9 +2019,17 @@ static int snd_pcm_oss_set_fragment(struct snd_pcm_oss_file *pcm_oss_file, unsig
1925 2019
1926 for (idx = 1; idx >= 0; --idx) { 2020 for (idx = 1; idx >= 0; --idx) {
1927 struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; 2021 struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
2022 struct snd_pcm_runtime *runtime;
2023
1928 if (substream == NULL) 2024 if (substream == NULL)
1929 continue; 2025 continue;
1930 if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0) 2026 runtime = substream->runtime;
2027 err = lock_params(runtime);
2028 if (err < 0)
2029 return err;
2030 err = snd_pcm_oss_set_fragment1(substream, val);
2031 unlock_params(runtime);
2032 if (err < 0)
1931 return err; 2033 return err;
1932 } 2034 }
1933 return err; 2035 return err;
@@ -2011,6 +2113,9 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
2011 } 2113 }
2012 if (psubstream) { 2114 if (psubstream) {
2013 runtime = psubstream->runtime; 2115 runtime = psubstream->runtime;
2116 cmd = 0;
2117 if (mutex_lock_interruptible(&runtime->oss.params_lock))
2118 return -ERESTARTSYS;
2014 if (trigger & PCM_ENABLE_OUTPUT) { 2119 if (trigger & PCM_ENABLE_OUTPUT) {
2015 if (runtime->oss.trigger) 2120 if (runtime->oss.trigger)
2016 goto _skip1; 2121 goto _skip1;
@@ -2028,13 +2133,19 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
2028 cmd = SNDRV_PCM_IOCTL_DROP; 2133 cmd = SNDRV_PCM_IOCTL_DROP;
2029 runtime->oss.prepare = 1; 2134 runtime->oss.prepare = 1;
2030 } 2135 }
2031 err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
2032 if (err < 0)
2033 return err;
2034 }
2035 _skip1: 2136 _skip1:
2137 mutex_unlock(&runtime->oss.params_lock);
2138 if (cmd) {
2139 err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
2140 if (err < 0)
2141 return err;
2142 }
2143 }
2036 if (csubstream) { 2144 if (csubstream) {
2037 runtime = csubstream->runtime; 2145 runtime = csubstream->runtime;
2146 cmd = 0;
2147 if (mutex_lock_interruptible(&runtime->oss.params_lock))
2148 return -ERESTARTSYS;
2038 if (trigger & PCM_ENABLE_INPUT) { 2149 if (trigger & PCM_ENABLE_INPUT) {
2039 if (runtime->oss.trigger) 2150 if (runtime->oss.trigger)
2040 goto _skip2; 2151 goto _skip2;
@@ -2049,11 +2160,14 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
2049 cmd = SNDRV_PCM_IOCTL_DROP; 2160 cmd = SNDRV_PCM_IOCTL_DROP;
2050 runtime->oss.prepare = 1; 2161 runtime->oss.prepare = 1;
2051 } 2162 }
2052 err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
2053 if (err < 0)
2054 return err;
2055 }
2056 _skip2: 2163 _skip2:
2164 mutex_unlock(&runtime->oss.params_lock);
2165 if (cmd) {
2166 err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
2167 if (err < 0)
2168 return err;
2169 }
2170 }
2057 return 0; 2171 return 0;
2058} 2172}
2059 2173
@@ -2305,6 +2419,7 @@ static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream,
2305 runtime->oss.maxfrags = 0; 2419 runtime->oss.maxfrags = 0;
2306 runtime->oss.subdivision = 0; 2420 runtime->oss.subdivision = 0;
2307 substream->pcm_release = snd_pcm_oss_release_substream; 2421 substream->pcm_release = snd_pcm_oss_release_substream;
2422 atomic_set(&runtime->oss.rw_ref, 0);
2308} 2423}
2309 2424
2310static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file) 2425static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file)
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 074363b63cc4..6bda8f6c5f84 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -28,6 +28,7 @@
28#include <sound/core.h> 28#include <sound/core.h>
29#include <sound/minors.h> 29#include <sound/minors.h>
30#include <sound/pcm.h> 30#include <sound/pcm.h>
31#include <sound/timer.h>
31#include <sound/control.h> 32#include <sound/control.h>
32#include <sound/info.h> 33#include <sound/info.h>
33 34
@@ -1025,8 +1026,13 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
1025 snd_free_pages((void*)runtime->control, 1026 snd_free_pages((void*)runtime->control,
1026 PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control))); 1027 PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
1027 kfree(runtime->hw_constraints.rules); 1028 kfree(runtime->hw_constraints.rules);
1028 kfree(runtime); 1029 /* Avoid concurrent access to runtime via PCM timer interface */
1030 if (substream->timer)
1031 spin_lock_irq(&substream->timer->lock);
1029 substream->runtime = NULL; 1032 substream->runtime = NULL;
1033 if (substream->timer)
1034 spin_unlock_irq(&substream->timer->lock);
1035 kfree(runtime);
1030 put_pid(substream->pid); 1036 put_pid(substream->pid);
1031 substream->pid = NULL; 1037 substream->pid = NULL;
1032 substream->pstr->substream_opened--; 1038 substream->pstr->substream_opened--;
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 1f64ab0c2a95..7ae080bae15c 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -426,6 +426,8 @@ static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
426 return -ENOTTY; 426 return -ENOTTY;
427 if (substream->stream != dir) 427 if (substream->stream != dir)
428 return -EINVAL; 428 return -EINVAL;
429 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
430 return -EBADFD;
429 431
430 if ((ch = substream->runtime->channels) > 128) 432 if ((ch = substream->runtime->channels) > 128)
431 return -EINVAL; 433 return -EINVAL;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 4ba64fd49759..3de88974eeb6 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2727,6 +2727,7 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2727 sync_ptr.s.status.hw_ptr = status->hw_ptr; 2727 sync_ptr.s.status.hw_ptr = status->hw_ptr;
2728 sync_ptr.s.status.tstamp = status->tstamp; 2728 sync_ptr.s.status.tstamp = status->tstamp;
2729 sync_ptr.s.status.suspended_state = status->suspended_state; 2729 sync_ptr.s.status.suspended_state = status->suspended_state;
2730 sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
2730 snd_pcm_stream_unlock_irq(substream); 2731 snd_pcm_stream_unlock_irq(substream);
2731 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr))) 2732 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
2732 return -EFAULT; 2733 return -EFAULT;
@@ -3408,7 +3409,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3408 area, 3409 area,
3409 substream->runtime->dma_area, 3410 substream->runtime->dma_area,
3410 substream->runtime->dma_addr, 3411 substream->runtime->dma_addr,
3411 area->vm_end - area->vm_start); 3412 substream->runtime->dma_bytes);
3412#endif /* CONFIG_X86 */ 3413#endif /* CONFIG_X86 */
3413 /* mmap with fault handler */ 3414 /* mmap with fault handler */
3414 area->vm_ops = &snd_pcm_vm_ops_data_fault; 3415 area->vm_ops = &snd_pcm_vm_ops_data_fault;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 16f8124b1150..59111cadaec2 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -635,7 +635,7 @@ static int snd_rawmidi_info_select_user(struct snd_card *card,
635int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream, 635int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
636 struct snd_rawmidi_params * params) 636 struct snd_rawmidi_params * params)
637{ 637{
638 char *newbuf; 638 char *newbuf, *oldbuf;
639 struct snd_rawmidi_runtime *runtime = substream->runtime; 639 struct snd_rawmidi_runtime *runtime = substream->runtime;
640 640
641 if (substream->append && substream->use_count > 1) 641 if (substream->append && substream->use_count > 1)
@@ -648,13 +648,17 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
648 return -EINVAL; 648 return -EINVAL;
649 } 649 }
650 if (params->buffer_size != runtime->buffer_size) { 650 if (params->buffer_size != runtime->buffer_size) {
651 newbuf = krealloc(runtime->buffer, params->buffer_size, 651 newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
652 GFP_KERNEL);
653 if (!newbuf) 652 if (!newbuf)
654 return -ENOMEM; 653 return -ENOMEM;
654 spin_lock_irq(&runtime->lock);
655 oldbuf = runtime->buffer;
655 runtime->buffer = newbuf; 656 runtime->buffer = newbuf;
656 runtime->buffer_size = params->buffer_size; 657 runtime->buffer_size = params->buffer_size;
657 runtime->avail = runtime->buffer_size; 658 runtime->avail = runtime->buffer_size;
659 runtime->appl_ptr = runtime->hw_ptr = 0;
660 spin_unlock_irq(&runtime->lock);
661 kfree(oldbuf);
658 } 662 }
659 runtime->avail_min = params->avail_min; 663 runtime->avail_min = params->avail_min;
660 substream->active_sensing = !params->no_active_sensing; 664 substream->active_sensing = !params->no_active_sensing;
@@ -665,7 +669,7 @@ EXPORT_SYMBOL(snd_rawmidi_output_params);
665int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream, 669int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
666 struct snd_rawmidi_params * params) 670 struct snd_rawmidi_params * params)
667{ 671{
668 char *newbuf; 672 char *newbuf, *oldbuf;
669 struct snd_rawmidi_runtime *runtime = substream->runtime; 673 struct snd_rawmidi_runtime *runtime = substream->runtime;
670 674
671 snd_rawmidi_drain_input(substream); 675 snd_rawmidi_drain_input(substream);
@@ -676,12 +680,16 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
676 return -EINVAL; 680 return -EINVAL;
677 } 681 }
678 if (params->buffer_size != runtime->buffer_size) { 682 if (params->buffer_size != runtime->buffer_size) {
679 newbuf = krealloc(runtime->buffer, params->buffer_size, 683 newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
680 GFP_KERNEL);
681 if (!newbuf) 684 if (!newbuf)
682 return -ENOMEM; 685 return -ENOMEM;
686 spin_lock_irq(&runtime->lock);
687 oldbuf = runtime->buffer;
683 runtime->buffer = newbuf; 688 runtime->buffer = newbuf;
684 runtime->buffer_size = params->buffer_size; 689 runtime->buffer_size = params->buffer_size;
690 runtime->appl_ptr = runtime->hw_ptr = 0;
691 spin_unlock_irq(&runtime->lock);
692 kfree(oldbuf);
685 } 693 }
686 runtime->avail_min = params->avail_min; 694 runtime->avail_min = params->avail_min;
687 return 0; 695 return 0;
diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
index 09a89094dcf7..4e304a24924a 100644
--- a/sound/core/rawmidi_compat.c
+++ b/sound/core/rawmidi_compat.c
@@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
36 struct snd_rawmidi_params params; 36 struct snd_rawmidi_params params;
37 unsigned int val; 37 unsigned int val;
38 38
39 if (rfile->output == NULL)
40 return -EINVAL;
41 if (get_user(params.stream, &src->stream) || 39 if (get_user(params.stream, &src->stream) ||
42 get_user(params.buffer_size, &src->buffer_size) || 40 get_user(params.buffer_size, &src->buffer_size) ||
43 get_user(params.avail_min, &src->avail_min) || 41 get_user(params.avail_min, &src->avail_min) ||
@@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
46 params.no_active_sensing = val; 44 params.no_active_sensing = val;
47 switch (params.stream) { 45 switch (params.stream) {
48 case SNDRV_RAWMIDI_STREAM_OUTPUT: 46 case SNDRV_RAWMIDI_STREAM_OUTPUT:
47 if (!rfile->output)
48 return -EINVAL;
49 return snd_rawmidi_output_params(rfile->output, &params); 49 return snd_rawmidi_output_params(rfile->output, &params);
50 case SNDRV_RAWMIDI_STREAM_INPUT: 50 case SNDRV_RAWMIDI_STREAM_INPUT:
51 if (!rfile->input)
52 return -EINVAL;
51 return snd_rawmidi_input_params(rfile->input, &params); 53 return snd_rawmidi_input_params(rfile->input, &params);
52 } 54 }
53 return -EINVAL; 55 return -EINVAL;
@@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
67 int err; 69 int err;
68 struct snd_rawmidi_status status; 70 struct snd_rawmidi_status status;
69 71
70 if (rfile->output == NULL)
71 return -EINVAL;
72 if (get_user(status.stream, &src->stream)) 72 if (get_user(status.stream, &src->stream))
73 return -EFAULT; 73 return -EFAULT;
74 74
75 switch (status.stream) { 75 switch (status.stream) {
76 case SNDRV_RAWMIDI_STREAM_OUTPUT: 76 case SNDRV_RAWMIDI_STREAM_OUTPUT:
77 if (!rfile->output)
78 return -EINVAL;
77 err = snd_rawmidi_output_status(rfile->output, &status); 79 err = snd_rawmidi_output_status(rfile->output, &status);
78 break; 80 break;
79 case SNDRV_RAWMIDI_STREAM_INPUT: 81 case SNDRV_RAWMIDI_STREAM_INPUT:
82 if (!rfile->input)
83 return -EINVAL;
80 err = snd_rawmidi_input_status(rfile->input, &status); 84 err = snd_rawmidi_input_status(rfile->input, &status);
81 break; 85 break;
82 default: 86 default:
@@ -113,16 +117,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
113 int err; 117 int err;
114 struct snd_rawmidi_status status; 118 struct snd_rawmidi_status status;
115 119
116 if (rfile->output == NULL)
117 return -EINVAL;
118 if (get_user(status.stream, &src->stream)) 120 if (get_user(status.stream, &src->stream))
119 return -EFAULT; 121 return -EFAULT;
120 122
121 switch (status.stream) { 123 switch (status.stream) {
122 case SNDRV_RAWMIDI_STREAM_OUTPUT: 124 case SNDRV_RAWMIDI_STREAM_OUTPUT:
125 if (!rfile->output)
126 return -EINVAL;
123 err = snd_rawmidi_output_status(rfile->output, &status); 127 err = snd_rawmidi_output_status(rfile->output, &status);
124 break; 128 break;
125 case SNDRV_RAWMIDI_STREAM_INPUT: 129 case SNDRV_RAWMIDI_STREAM_INPUT:
130 if (!rfile->input)
131 return -EINVAL;
126 err = snd_rawmidi_input_status(rfile->input, &status); 132 err = snd_rawmidi_input_status(rfile->input, &status);
127 break; 133 break;
128 default: 134 default:
diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
index c3908862bc8b..86ca584c27b2 100644
--- a/sound/core/seq/oss/seq_oss_event.c
+++ b/sound/core/seq/oss/seq_oss_event.c
@@ -26,6 +26,7 @@
26#include <sound/seq_oss_legacy.h> 26#include <sound/seq_oss_legacy.h>
27#include "seq_oss_readq.h" 27#include "seq_oss_readq.h"
28#include "seq_oss_writeq.h" 28#include "seq_oss_writeq.h"
29#include <linux/nospec.h>
29 30
30 31
31/* 32/*
@@ -287,10 +288,10 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
287{ 288{
288 struct seq_oss_synthinfo *info; 289 struct seq_oss_synthinfo *info;
289 290
290 if (!snd_seq_oss_synth_is_valid(dp, dev)) 291 info = snd_seq_oss_synth_info(dp, dev);
292 if (!info)
291 return -ENXIO; 293 return -ENXIO;
292 294
293 info = &dp->synths[dev];
294 switch (info->arg.event_passing) { 295 switch (info->arg.event_passing) {
295 case SNDRV_SEQ_OSS_PROCESS_EVENTS: 296 case SNDRV_SEQ_OSS_PROCESS_EVENTS:
296 if (! info->ch || ch < 0 || ch >= info->nr_voices) { 297 if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -298,6 +299,7 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
298 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); 299 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
299 } 300 }
300 301
302 ch = array_index_nospec(ch, info->nr_voices);
301 if (note == 255 && info->ch[ch].note >= 0) { 303 if (note == 255 && info->ch[ch].note >= 0) {
302 /* volume control */ 304 /* volume control */
303 int type; 305 int type;
@@ -347,10 +349,10 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
347{ 349{
348 struct seq_oss_synthinfo *info; 350 struct seq_oss_synthinfo *info;
349 351
350 if (!snd_seq_oss_synth_is_valid(dp, dev)) 352 info = snd_seq_oss_synth_info(dp, dev);
353 if (!info)
351 return -ENXIO; 354 return -ENXIO;
352 355
353 info = &dp->synths[dev];
354 switch (info->arg.event_passing) { 356 switch (info->arg.event_passing) {
355 case SNDRV_SEQ_OSS_PROCESS_EVENTS: 357 case SNDRV_SEQ_OSS_PROCESS_EVENTS:
356 if (! info->ch || ch < 0 || ch >= info->nr_voices) { 358 if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -358,6 +360,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
358 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); 360 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
359 } 361 }
360 362
363 ch = array_index_nospec(ch, info->nr_voices);
361 if (info->ch[ch].note >= 0) { 364 if (info->ch[ch].note >= 0) {
362 note = info->ch[ch].note; 365 note = info->ch[ch].note;
363 info->ch[ch].vel = 0; 366 info->ch[ch].vel = 0;
@@ -381,7 +384,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
381static int 384static int
382set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev) 385set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev)
383{ 386{
384 if (! snd_seq_oss_synth_is_valid(dp, dev)) 387 if (!snd_seq_oss_synth_info(dp, dev))
385 return -ENXIO; 388 return -ENXIO;
386 389
387 ev->type = type; 390 ev->type = type;
@@ -399,7 +402,7 @@ set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note,
399static int 402static int
400set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev) 403set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev)
401{ 404{
402 if (! snd_seq_oss_synth_is_valid(dp, dev)) 405 if (!snd_seq_oss_synth_info(dp, dev))
403 return -ENXIO; 406 return -ENXIO;
404 407
405 ev->type = type; 408 ev->type = type;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index b30b2139e3f0..9debd1b8fd28 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -29,6 +29,7 @@
29#include "../seq_lock.h" 29#include "../seq_lock.h"
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/nospec.h>
32 33
33 34
34/* 35/*
@@ -315,6 +316,7 @@ get_mididev(struct seq_oss_devinfo *dp, int dev)
315{ 316{
316 if (dev < 0 || dev >= dp->max_mididev) 317 if (dev < 0 || dev >= dp->max_mididev)
317 return NULL; 318 return NULL;
319 dev = array_index_nospec(dev, dp->max_mididev);
318 return get_mdev(dev); 320 return get_mdev(dev);
319} 321}
320 322
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index b16dbef04174..ea545f9291b4 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -26,6 +26,7 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/nospec.h>
29 30
30/* 31/*
31 * constants 32 * constants
@@ -339,17 +340,13 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
339 dp->max_synthdev = 0; 340 dp->max_synthdev = 0;
340} 341}
341 342
342/* 343static struct seq_oss_synthinfo *
343 * check if the specified device is MIDI mapped device 344get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev)
344 */
345static int
346is_midi_dev(struct seq_oss_devinfo *dp, int dev)
347{ 345{
348 if (dev < 0 || dev >= dp->max_synthdev) 346 if (dev < 0 || dev >= dp->max_synthdev)
349 return 0; 347 return NULL;
350 if (dp->synths[dev].is_midi) 348 dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS);
351 return 1; 349 return &dp->synths[dev];
352 return 0;
353} 350}
354 351
355/* 352/*
@@ -359,14 +356,20 @@ static struct seq_oss_synth *
359get_synthdev(struct seq_oss_devinfo *dp, int dev) 356get_synthdev(struct seq_oss_devinfo *dp, int dev)
360{ 357{
361 struct seq_oss_synth *rec; 358 struct seq_oss_synth *rec;
362 if (dev < 0 || dev >= dp->max_synthdev) 359 struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
363 return NULL; 360
364 if (! dp->synths[dev].opened) 361 if (!info)
365 return NULL; 362 return NULL;
366 if (dp->synths[dev].is_midi) 363 if (!info->opened)
367 return &midi_synth_dev;
368 if ((rec = get_sdev(dev)) == NULL)
369 return NULL; 364 return NULL;
365 if (info->is_midi) {
366 rec = &midi_synth_dev;
367 snd_use_lock_use(&rec->use_lock);
368 } else {
369 rec = get_sdev(dev);
370 if (!rec)
371 return NULL;
372 }
370 if (! rec->opened) { 373 if (! rec->opened) {
371 snd_use_lock_free(&rec->use_lock); 374 snd_use_lock_free(&rec->use_lock);
372 return NULL; 375 return NULL;
@@ -402,10 +405,8 @@ snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev)
402 struct seq_oss_synth *rec; 405 struct seq_oss_synth *rec;
403 struct seq_oss_synthinfo *info; 406 struct seq_oss_synthinfo *info;
404 407
405 if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev)) 408 info = get_synthinfo_nospec(dp, dev);
406 return; 409 if (!info || !info->opened)
407 info = &dp->synths[dev];
408 if (! info->opened)
409 return; 410 return;
410 if (info->sysex) 411 if (info->sysex)
411 info->sysex->len = 0; /* reset sysex */ 412 info->sysex->len = 0; /* reset sysex */
@@ -454,12 +455,14 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
454 const char __user *buf, int p, int c) 455 const char __user *buf, int p, int c)
455{ 456{
456 struct seq_oss_synth *rec; 457 struct seq_oss_synth *rec;
458 struct seq_oss_synthinfo *info;
457 int rc; 459 int rc;
458 460
459 if (dev < 0 || dev >= dp->max_synthdev) 461 info = get_synthinfo_nospec(dp, dev);
462 if (!info)
460 return -ENXIO; 463 return -ENXIO;
461 464
462 if (is_midi_dev(dp, dev)) 465 if (info->is_midi)
463 return 0; 466 return 0;
464 if ((rec = get_synthdev(dp, dev)) == NULL) 467 if ((rec = get_synthdev(dp, dev)) == NULL)
465 return -ENXIO; 468 return -ENXIO;
@@ -467,24 +470,25 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
467 if (rec->oper.load_patch == NULL) 470 if (rec->oper.load_patch == NULL)
468 rc = -ENXIO; 471 rc = -ENXIO;
469 else 472 else
470 rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c); 473 rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
471 snd_use_lock_free(&rec->use_lock); 474 snd_use_lock_free(&rec->use_lock);
472 return rc; 475 return rc;
473} 476}
474 477
475/* 478/*
476 * check if the device is valid synth device 479 * check if the device is valid synth device and return the synth info
477 */ 480 */
478int 481struct seq_oss_synthinfo *
479snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev) 482snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
480{ 483{
481 struct seq_oss_synth *rec; 484 struct seq_oss_synth *rec;
485
482 rec = get_synthdev(dp, dev); 486 rec = get_synthdev(dp, dev);
483 if (rec) { 487 if (rec) {
484 snd_use_lock_free(&rec->use_lock); 488 snd_use_lock_free(&rec->use_lock);
485 return 1; 489 return get_synthinfo_nospec(dp, dev);
486 } 490 }
487 return 0; 491 return NULL;
488} 492}
489 493
490 494
@@ -499,16 +503,18 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
499 int i, send; 503 int i, send;
500 unsigned char *dest; 504 unsigned char *dest;
501 struct seq_oss_synth_sysex *sysex; 505 struct seq_oss_synth_sysex *sysex;
506 struct seq_oss_synthinfo *info;
502 507
503 if (! snd_seq_oss_synth_is_valid(dp, dev)) 508 info = snd_seq_oss_synth_info(dp, dev);
509 if (!info)
504 return -ENXIO; 510 return -ENXIO;
505 511
506 sysex = dp->synths[dev].sysex; 512 sysex = info->sysex;
507 if (sysex == NULL) { 513 if (sysex == NULL) {
508 sysex = kzalloc(sizeof(*sysex), GFP_KERNEL); 514 sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
509 if (sysex == NULL) 515 if (sysex == NULL)
510 return -ENOMEM; 516 return -ENOMEM;
511 dp->synths[dev].sysex = sysex; 517 info->sysex = sysex;
512 } 518 }
513 519
514 send = 0; 520 send = 0;
@@ -553,10 +559,12 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
553int 559int
554snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev) 560snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev)
555{ 561{
556 if (! snd_seq_oss_synth_is_valid(dp, dev)) 562 struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev);
563
564 if (!info)
557 return -EINVAL; 565 return -EINVAL;
558 snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client, 566 snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client,
559 dp->synths[dev].arg.addr.port); 567 info->arg.addr.port);
560 return 0; 568 return 0;
561} 569}
562 570
@@ -568,16 +576,18 @@ int
568snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr) 576snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr)
569{ 577{
570 struct seq_oss_synth *rec; 578 struct seq_oss_synth *rec;
579 struct seq_oss_synthinfo *info;
571 int rc; 580 int rc;
572 581
573 if (is_midi_dev(dp, dev)) 582 info = get_synthinfo_nospec(dp, dev);
583 if (!info || info->is_midi)
574 return -ENXIO; 584 return -ENXIO;
575 if ((rec = get_synthdev(dp, dev)) == NULL) 585 if ((rec = get_synthdev(dp, dev)) == NULL)
576 return -ENXIO; 586 return -ENXIO;
577 if (rec->oper.ioctl == NULL) 587 if (rec->oper.ioctl == NULL)
578 rc = -ENXIO; 588 rc = -ENXIO;
579 else 589 else
580 rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr); 590 rc = rec->oper.ioctl(&info->arg, cmd, addr);
581 snd_use_lock_free(&rec->use_lock); 591 snd_use_lock_free(&rec->use_lock);
582 return rc; 592 return rc;
583} 593}
@@ -589,7 +599,10 @@ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, u
589int 599int
590snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev) 600snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev)
591{ 601{
592 if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev)) 602 struct seq_oss_synthinfo *info;
603
604 info = snd_seq_oss_synth_info(dp, dev);
605 if (!info || info->is_midi)
593 return -ENXIO; 606 return -ENXIO;
594 ev->type = SNDRV_SEQ_EVENT_OSS; 607 ev->type = SNDRV_SEQ_EVENT_OSS;
595 memcpy(ev->data.raw8.d, data, 8); 608 memcpy(ev->data.raw8.d, data, 8);
diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
index 74ac55f166b6..a63f9e22974d 100644
--- a/sound/core/seq/oss/seq_oss_synth.h
+++ b/sound/core/seq/oss/seq_oss_synth.h
@@ -37,7 +37,8 @@ void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp);
37void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev); 37void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev);
38int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt, 38int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
39 const char __user *buf, int p, int c); 39 const char __user *buf, int p, int c);
40int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev); 40struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp,
41 int dev);
41int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf, 42int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
42 struct snd_seq_event *ev); 43 struct snd_seq_event *ev);
43int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev); 44int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index b36de76f24e2..73ee8476584d 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -236,6 +236,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
236 rwlock_init(&client->ports_lock); 236 rwlock_init(&client->ports_lock);
237 mutex_init(&client->ports_mutex); 237 mutex_init(&client->ports_mutex);
238 INIT_LIST_HEAD(&client->ports_list_head); 238 INIT_LIST_HEAD(&client->ports_list_head);
239 mutex_init(&client->ioctl_mutex);
239 240
240 /* find free slot in the client table */ 241 /* find free slot in the client table */
241 spin_lock_irqsave(&clients_lock, flags); 242 spin_lock_irqsave(&clients_lock, flags);
@@ -269,12 +270,12 @@ static int seq_free_client1(struct snd_seq_client *client)
269 270
270 if (!client) 271 if (!client)
271 return 0; 272 return 0;
272 snd_seq_delete_all_ports(client);
273 snd_seq_queue_client_leave(client->number);
274 spin_lock_irqsave(&clients_lock, flags); 273 spin_lock_irqsave(&clients_lock, flags);
275 clienttablock[client->number] = 1; 274 clienttablock[client->number] = 1;
276 clienttab[client->number] = NULL; 275 clienttab[client->number] = NULL;
277 spin_unlock_irqrestore(&clients_lock, flags); 276 spin_unlock_irqrestore(&clients_lock, flags);
277 snd_seq_delete_all_ports(client);
278 snd_seq_queue_client_leave(client->number);
278 snd_use_lock_sync(&client->use_lock); 279 snd_use_lock_sync(&client->use_lock);
279 snd_seq_queue_client_termination(client->number); 280 snd_seq_queue_client_termination(client->number);
280 if (client->pool) 281 if (client->pool)
@@ -918,7 +919,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
918static int snd_seq_client_enqueue_event(struct snd_seq_client *client, 919static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
919 struct snd_seq_event *event, 920 struct snd_seq_event *event,
920 struct file *file, int blocking, 921 struct file *file, int blocking,
921 int atomic, int hop) 922 int atomic, int hop,
923 struct mutex *mutexp)
922{ 924{
923 struct snd_seq_event_cell *cell; 925 struct snd_seq_event_cell *cell;
924 int err; 926 int err;
@@ -956,7 +958,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
956 return -ENXIO; /* queue is not allocated */ 958 return -ENXIO; /* queue is not allocated */
957 959
958 /* allocate an event cell */ 960 /* allocate an event cell */
959 err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file); 961 err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
962 file, mutexp);
960 if (err < 0) 963 if (err < 0)
961 return err; 964 return err;
962 965
@@ -1011,7 +1014,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1011{ 1014{
1012 struct snd_seq_client *client = file->private_data; 1015 struct snd_seq_client *client = file->private_data;
1013 int written = 0, len; 1016 int written = 0, len;
1014 int err = -EINVAL; 1017 int err;
1015 struct snd_seq_event event; 1018 struct snd_seq_event event;
1016 1019
1017 if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT)) 1020 if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
@@ -1025,12 +1028,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1025 return -ENXIO; 1028 return -ENXIO;
1026 1029
1027 /* allocate the pool now if the pool is not allocated yet */ 1030 /* allocate the pool now if the pool is not allocated yet */
1031 mutex_lock(&client->ioctl_mutex);
1028 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { 1032 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
1029 if (snd_seq_pool_init(client->pool) < 0) 1033 err = snd_seq_pool_init(client->pool);
1030 return -ENOMEM; 1034 if (err < 0)
1035 goto out;
1031 } 1036 }
1032 1037
1033 /* only process whole events */ 1038 /* only process whole events */
1039 err = -EINVAL;
1034 while (count >= sizeof(struct snd_seq_event)) { 1040 while (count >= sizeof(struct snd_seq_event)) {
1035 /* Read in the event header from the user */ 1041 /* Read in the event header from the user */
1036 len = sizeof(event); 1042 len = sizeof(event);
@@ -1077,7 +1083,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1077 /* ok, enqueue it */ 1083 /* ok, enqueue it */
1078 err = snd_seq_client_enqueue_event(client, &event, file, 1084 err = snd_seq_client_enqueue_event(client, &event, file,
1079 !(file->f_flags & O_NONBLOCK), 1085 !(file->f_flags & O_NONBLOCK),
1080 0, 0); 1086 0, 0, &client->ioctl_mutex);
1081 if (err < 0) 1087 if (err < 0)
1082 break; 1088 break;
1083 1089
@@ -1088,6 +1094,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1088 written += len; 1094 written += len;
1089 } 1095 }
1090 1096
1097 out:
1098 mutex_unlock(&client->ioctl_mutex);
1091 return written ? written : err; 1099 return written ? written : err;
1092} 1100}
1093 1101
@@ -1919,6 +1927,9 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
1919 (! snd_seq_write_pool_allocated(client) || 1927 (! snd_seq_write_pool_allocated(client) ||
1920 info.output_pool != client->pool->size)) { 1928 info.output_pool != client->pool->size)) {
1921 if (snd_seq_write_pool_allocated(client)) { 1929 if (snd_seq_write_pool_allocated(client)) {
1930 /* is the pool in use? */
1931 if (atomic_read(&client->pool->counter))
1932 return -EBUSY;
1922 /* remove all existing cells */ 1933 /* remove all existing cells */
1923 snd_seq_pool_mark_closing(client->pool); 1934 snd_seq_pool_mark_closing(client->pool);
1924 snd_seq_queue_client_leave_cells(client->number); 1935 snd_seq_queue_client_leave_cells(client->number);
@@ -2220,11 +2231,15 @@ static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
2220static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2231static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2221{ 2232{
2222 struct snd_seq_client *client = file->private_data; 2233 struct snd_seq_client *client = file->private_data;
2234 long ret;
2223 2235
2224 if (snd_BUG_ON(!client)) 2236 if (snd_BUG_ON(!client))
2225 return -ENXIO; 2237 return -ENXIO;
2226 2238
2227 return snd_seq_do_ioctl(client, cmd, (void __user *) arg); 2239 mutex_lock(&client->ioctl_mutex);
2240 ret = snd_seq_do_ioctl(client, cmd, (void __user *) arg);
2241 mutex_unlock(&client->ioctl_mutex);
2242 return ret;
2228} 2243}
2229 2244
2230#ifdef CONFIG_COMPAT 2245#ifdef CONFIG_COMPAT
@@ -2338,7 +2353,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
2338 if (! cptr->accept_output) 2353 if (! cptr->accept_output)
2339 result = -EPERM; 2354 result = -EPERM;
2340 else /* send it */ 2355 else /* send it */
2341 result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop); 2356 result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
2357 atomic, hop, NULL);
2342 2358
2343 snd_seq_client_unlock(cptr); 2359 snd_seq_client_unlock(cptr);
2344 return result; 2360 return result;
diff --git a/sound/core/seq/seq_clientmgr.h b/sound/core/seq/seq_clientmgr.h
index 20f0a725ec7d..91f8f165bfdc 100644
--- a/sound/core/seq/seq_clientmgr.h
+++ b/sound/core/seq/seq_clientmgr.h
@@ -59,6 +59,7 @@ struct snd_seq_client {
59 struct list_head ports_list_head; 59 struct list_head ports_list_head;
60 rwlock_t ports_lock; 60 rwlock_t ports_lock;
61 struct mutex ports_mutex; 61 struct mutex ports_mutex;
62 struct mutex ioctl_mutex;
62 int convert32; /* convert 32->64bit */ 63 int convert32; /* convert 32->64bit */
63 64
64 /* output pool */ 65 /* output pool */
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 3490d21ab9e7..9acbed1ac982 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -123,7 +123,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
123 return -EINVAL; 123 return -EINVAL;
124 124
125 snd_use_lock_use(&f->use_lock); 125 snd_use_lock_use(&f->use_lock);
126 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ 126 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
127 if (err < 0) { 127 if (err < 0) {
128 if ((err == -ENOMEM) || (err == -EAGAIN)) 128 if ((err == -ENOMEM) || (err == -EAGAIN))
129 atomic_inc(&f->overflow); 129 atomic_inc(&f->overflow);
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 5847c4475bf3..4c8cbcd89887 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -221,7 +221,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
221 */ 221 */
222static int snd_seq_cell_alloc(struct snd_seq_pool *pool, 222static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
223 struct snd_seq_event_cell **cellp, 223 struct snd_seq_event_cell **cellp,
224 int nonblock, struct file *file) 224 int nonblock, struct file *file,
225 struct mutex *mutexp)
225{ 226{
226 struct snd_seq_event_cell *cell; 227 struct snd_seq_event_cell *cell;
227 unsigned long flags; 228 unsigned long flags;
@@ -245,7 +246,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
245 set_current_state(TASK_INTERRUPTIBLE); 246 set_current_state(TASK_INTERRUPTIBLE);
246 add_wait_queue(&pool->output_sleep, &wait); 247 add_wait_queue(&pool->output_sleep, &wait);
247 spin_unlock_irq(&pool->lock); 248 spin_unlock_irq(&pool->lock);
249 if (mutexp)
250 mutex_unlock(mutexp);
248 schedule(); 251 schedule();
252 if (mutexp)
253 mutex_lock(mutexp);
249 spin_lock_irq(&pool->lock); 254 spin_lock_irq(&pool->lock);
250 remove_wait_queue(&pool->output_sleep, &wait); 255 remove_wait_queue(&pool->output_sleep, &wait);
251 /* interrupted? */ 256 /* interrupted? */
@@ -288,7 +293,7 @@ __error:
288 */ 293 */
289int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 294int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
290 struct snd_seq_event_cell **cellp, int nonblock, 295 struct snd_seq_event_cell **cellp, int nonblock,
291 struct file *file) 296 struct file *file, struct mutex *mutexp)
292{ 297{
293 int ncells, err; 298 int ncells, err;
294 unsigned int extlen; 299 unsigned int extlen;
@@ -305,7 +310,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
305 if (ncells >= pool->total_elements) 310 if (ncells >= pool->total_elements)
306 return -ENOMEM; 311 return -ENOMEM;
307 312
308 err = snd_seq_cell_alloc(pool, &cell, nonblock, file); 313 err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
309 if (err < 0) 314 if (err < 0)
310 return err; 315 return err;
311 316
@@ -331,7 +336,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
331 int size = sizeof(struct snd_seq_event); 336 int size = sizeof(struct snd_seq_event);
332 if (len < size) 337 if (len < size)
333 size = len; 338 size = len;
334 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file); 339 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
340 mutexp);
335 if (err < 0) 341 if (err < 0)
336 goto __error; 342 goto __error;
337 if (cell->event.data.ext.ptr == NULL) 343 if (cell->event.data.ext.ptr == NULL)
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 32f959c17786..3abe306c394a 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -66,7 +66,8 @@ struct snd_seq_pool {
66void snd_seq_cell_free(struct snd_seq_event_cell *cell); 66void snd_seq_cell_free(struct snd_seq_event_cell *cell);
67 67
68int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 68int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
69 struct snd_seq_event_cell **cellp, int nonblock, struct file *file); 69 struct snd_seq_event_cell **cellp, int nonblock,
70 struct file *file, struct mutex *mutexp);
70 71
71/* return number of unused (free) cells */ 72/* return number of unused (free) cells */
72static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) 73static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
diff --git a/sound/core/seq/seq_prioq.c b/sound/core/seq/seq_prioq.c
index bc1c8488fc2a..2bc6759e4adc 100644
--- a/sound/core/seq/seq_prioq.c
+++ b/sound/core/seq/seq_prioq.c
@@ -87,7 +87,7 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo)
87 if (f->cells > 0) { 87 if (f->cells > 0) {
88 /* drain prioQ */ 88 /* drain prioQ */
89 while (f->cells > 0) 89 while (f->cells > 0)
90 snd_seq_cell_free(snd_seq_prioq_cell_out(f)); 90 snd_seq_cell_free(snd_seq_prioq_cell_out(f, NULL));
91 } 91 }
92 92
93 kfree(f); 93 kfree(f);
@@ -214,8 +214,18 @@ int snd_seq_prioq_cell_in(struct snd_seq_prioq * f,
214 return 0; 214 return 0;
215} 215}
216 216
217/* return 1 if the current time >= event timestamp */
218static int event_is_ready(struct snd_seq_event *ev, void *current_time)
219{
220 if ((ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) == SNDRV_SEQ_TIME_STAMP_TICK)
221 return snd_seq_compare_tick_time(current_time, &ev->time.tick);
222 else
223 return snd_seq_compare_real_time(current_time, &ev->time.time);
224}
225
217/* dequeue cell from prioq */ 226/* dequeue cell from prioq */
218struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f) 227struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f,
228 void *current_time)
219{ 229{
220 struct snd_seq_event_cell *cell; 230 struct snd_seq_event_cell *cell;
221 unsigned long flags; 231 unsigned long flags;
@@ -227,6 +237,8 @@ struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f)
227 spin_lock_irqsave(&f->lock, flags); 237 spin_lock_irqsave(&f->lock, flags);
228 238
229 cell = f->head; 239 cell = f->head;
240 if (cell && current_time && !event_is_ready(&cell->event, current_time))
241 cell = NULL;
230 if (cell) { 242 if (cell) {
231 f->head = cell->next; 243 f->head = cell->next;
232 244
@@ -252,18 +264,6 @@ int snd_seq_prioq_avail(struct snd_seq_prioq * f)
252 return f->cells; 264 return f->cells;
253} 265}
254 266
255
256/* peek at cell at the head of the prioq */
257struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq * f)
258{
259 if (f == NULL) {
260 pr_debug("ALSA: seq: snd_seq_prioq_cell_in() called with NULL prioq\n");
261 return NULL;
262 }
263 return f->head;
264}
265
266
267static inline int prioq_match(struct snd_seq_event_cell *cell, 267static inline int prioq_match(struct snd_seq_event_cell *cell,
268 int client, int timestamp) 268 int client, int timestamp)
269{ 269{
diff --git a/sound/core/seq/seq_prioq.h b/sound/core/seq/seq_prioq.h
index d38bb78d9345..2c315ca10fc4 100644
--- a/sound/core/seq/seq_prioq.h
+++ b/sound/core/seq/seq_prioq.h
@@ -44,14 +44,12 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo);
44int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell); 44int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell);
45 45
46/* dequeue cell from prioq */ 46/* dequeue cell from prioq */
47struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f); 47struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f,
48 void *current_time);
48 49
49/* return number of events available in prioq */ 50/* return number of events available in prioq */
50int snd_seq_prioq_avail(struct snd_seq_prioq *f); 51int snd_seq_prioq_avail(struct snd_seq_prioq *f);
51 52
52/* peek at cell at the head of the prioq */
53struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq *f);
54
55/* client left queue */ 53/* client left queue */
56void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp); 54void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp);
57 55
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 79e0c5604ef8..1a6dc4ff44a6 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -277,30 +277,20 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
277 277
278 __again: 278 __again:
279 /* Process tick queue... */ 279 /* Process tick queue... */
280 while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) { 280 for (;;) {
281 if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick, 281 cell = snd_seq_prioq_cell_out(q->tickq,
282 &cell->event.time.tick)) { 282 &q->timer->tick.cur_tick);
283 cell = snd_seq_prioq_cell_out(q->tickq); 283 if (!cell)
284 if (cell)
285 snd_seq_dispatch_event(cell, atomic, hop);
286 } else {
287 /* event remains in the queue */
288 break; 284 break;
289 } 285 snd_seq_dispatch_event(cell, atomic, hop);
290 } 286 }
291 287
292
293 /* Process time queue... */ 288 /* Process time queue... */
294 while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) { 289 for (;;) {
295 if (snd_seq_compare_real_time(&q->timer->cur_time, 290 cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
296 &cell->event.time.time)) { 291 if (!cell)
297 cell = snd_seq_prioq_cell_out(q->timeq);
298 if (cell)
299 snd_seq_dispatch_event(cell, atomic, hop);
300 } else {
301 /* event remains in the queue */
302 break; 292 break;
303 } 293 snd_seq_dispatch_event(cell, atomic, hop);
304 } 294 }
305 295
306 /* free lock */ 296 /* free lock */
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 3b126af4a026..ef494ffc1369 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -174,12 +174,12 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
174 } 174 }
175 return; 175 return;
176 } 176 }
177 spin_lock_irqsave(&substream->runtime->lock, flags);
177 if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) { 178 if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
178 if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0) 179 if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
179 return; 180 goto out;
180 vmidi->event.type = SNDRV_SEQ_EVENT_NONE; 181 vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
181 } 182 }
182 spin_lock_irqsave(&substream->runtime->lock, flags);
183 while (1) { 183 while (1) {
184 count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf)); 184 count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
185 if (count <= 0) 185 if (count <= 0)
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 48eaccba82a3..ef850a99d64a 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -318,8 +318,6 @@ int snd_timer_open(struct snd_timer_instance **ti,
318 return 0; 318 return 0;
319} 319}
320 320
321static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
322
323/* 321/*
324 * close a timer instance 322 * close a timer instance
325 */ 323 */
@@ -408,7 +406,6 @@ unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
408static void snd_timer_notify1(struct snd_timer_instance *ti, int event) 406static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
409{ 407{
410 struct snd_timer *timer; 408 struct snd_timer *timer;
411 unsigned long flags;
412 unsigned long resolution = 0; 409 unsigned long resolution = 0;
413 struct snd_timer_instance *ts; 410 struct snd_timer_instance *ts;
414 struct timespec tstamp; 411 struct timespec tstamp;
@@ -432,34 +429,66 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
432 return; 429 return;
433 if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) 430 if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
434 return; 431 return;
435 spin_lock_irqsave(&timer->lock, flags);
436 list_for_each_entry(ts, &ti->slave_active_head, active_list) 432 list_for_each_entry(ts, &ti->slave_active_head, active_list)
437 if (ts->ccallback) 433 if (ts->ccallback)
438 ts->ccallback(ts, event + 100, &tstamp, resolution); 434 ts->ccallback(ts, event + 100, &tstamp, resolution);
439 spin_unlock_irqrestore(&timer->lock, flags);
440} 435}
441 436
442static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri, 437/* start/continue a master timer */
443 unsigned long sticks) 438static int snd_timer_start1(struct snd_timer_instance *timeri,
439 bool start, unsigned long ticks)
444{ 440{
441 struct snd_timer *timer;
442 int result;
443 unsigned long flags;
444
445 timer = timeri->timer;
446 if (!timer)
447 return -EINVAL;
448
449 spin_lock_irqsave(&timer->lock, flags);
450 if (timer->card && timer->card->shutdown) {
451 result = -ENODEV;
452 goto unlock;
453 }
454 if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
455 SNDRV_TIMER_IFLG_START)) {
456 result = -EBUSY;
457 goto unlock;
458 }
459
460 if (start)
461 timeri->ticks = timeri->cticks = ticks;
462 else if (!timeri->cticks)
463 timeri->cticks = 1;
464 timeri->pticks = 0;
465
445 list_move_tail(&timeri->active_list, &timer->active_list_head); 466 list_move_tail(&timeri->active_list, &timer->active_list_head);
446 if (timer->running) { 467 if (timer->running) {
447 if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) 468 if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
448 goto __start_now; 469 goto __start_now;
449 timer->flags |= SNDRV_TIMER_FLG_RESCHED; 470 timer->flags |= SNDRV_TIMER_FLG_RESCHED;
450 timeri->flags |= SNDRV_TIMER_IFLG_START; 471 timeri->flags |= SNDRV_TIMER_IFLG_START;
451 return 1; /* delayed start */ 472 result = 1; /* delayed start */
452 } else { 473 } else {
453 timer->sticks = sticks; 474 if (start)
475 timer->sticks = ticks;
454 timer->hw.start(timer); 476 timer->hw.start(timer);
455 __start_now: 477 __start_now:
456 timer->running++; 478 timer->running++;
457 timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; 479 timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
458 return 0; 480 result = 0;
459 } 481 }
482 snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
483 SNDRV_TIMER_EVENT_CONTINUE);
484 unlock:
485 spin_unlock_irqrestore(&timer->lock, flags);
486 return result;
460} 487}
461 488
462static int snd_timer_start_slave(struct snd_timer_instance *timeri) 489/* start/continue a slave timer */
490static int snd_timer_start_slave(struct snd_timer_instance *timeri,
491 bool start)
463{ 492{
464 unsigned long flags; 493 unsigned long flags;
465 494
@@ -473,88 +502,37 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
473 spin_lock(&timeri->timer->lock); 502 spin_lock(&timeri->timer->lock);
474 list_add_tail(&timeri->active_list, 503 list_add_tail(&timeri->active_list,
475 &timeri->master->slave_active_head); 504 &timeri->master->slave_active_head);
505 snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
506 SNDRV_TIMER_EVENT_CONTINUE);
476 spin_unlock(&timeri->timer->lock); 507 spin_unlock(&timeri->timer->lock);
477 } 508 }
478 spin_unlock_irqrestore(&slave_active_lock, flags); 509 spin_unlock_irqrestore(&slave_active_lock, flags);
479 return 1; /* delayed start */ 510 return 1; /* delayed start */
480} 511}
481 512
482/* 513/* stop/pause a master timer */
483 * start the timer instance 514static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
484 */
485int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
486{ 515{
487 struct snd_timer *timer; 516 struct snd_timer *timer;
488 int result = -EINVAL; 517 int result = 0;
489 unsigned long flags; 518 unsigned long flags;
490 519
491 if (timeri == NULL || ticks < 1)
492 return -EINVAL;
493 if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
494 result = snd_timer_start_slave(timeri);
495 if (result >= 0)
496 snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
497 return result;
498 }
499 timer = timeri->timer;
500 if (timer == NULL)
501 return -EINVAL;
502 if (timer->card && timer->card->shutdown)
503 return -ENODEV;
504 spin_lock_irqsave(&timer->lock, flags);
505 if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
506 SNDRV_TIMER_IFLG_START)) {
507 result = -EBUSY;
508 goto unlock;
509 }
510 timeri->ticks = timeri->cticks = ticks;
511 timeri->pticks = 0;
512 result = snd_timer_start1(timer, timeri, ticks);
513 unlock:
514 spin_unlock_irqrestore(&timer->lock, flags);
515 if (result >= 0)
516 snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
517 return result;
518}
519
520static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
521{
522 struct snd_timer *timer;
523 unsigned long flags;
524
525 if (snd_BUG_ON(!timeri))
526 return -ENXIO;
527
528 if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
529 spin_lock_irqsave(&slave_active_lock, flags);
530 if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
531 spin_unlock_irqrestore(&slave_active_lock, flags);
532 return -EBUSY;
533 }
534 if (timeri->timer)
535 spin_lock(&timeri->timer->lock);
536 timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
537 list_del_init(&timeri->ack_list);
538 list_del_init(&timeri->active_list);
539 if (timeri->timer)
540 spin_unlock(&timeri->timer->lock);
541 spin_unlock_irqrestore(&slave_active_lock, flags);
542 goto __end;
543 }
544 timer = timeri->timer; 520 timer = timeri->timer;
545 if (!timer) 521 if (!timer)
546 return -EINVAL; 522 return -EINVAL;
547 spin_lock_irqsave(&timer->lock, flags); 523 spin_lock_irqsave(&timer->lock, flags);
548 if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING | 524 if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
549 SNDRV_TIMER_IFLG_START))) { 525 SNDRV_TIMER_IFLG_START))) {
550 spin_unlock_irqrestore(&timer->lock, flags); 526 result = -EBUSY;
551 return -EBUSY; 527 goto unlock;
552 } 528 }
553 list_del_init(&timeri->ack_list); 529 list_del_init(&timeri->ack_list);
554 list_del_init(&timeri->active_list); 530 list_del_init(&timeri->active_list);
555 if (timer->card && timer->card->shutdown) { 531 if (timer->card && timer->card->shutdown)
556 spin_unlock_irqrestore(&timer->lock, flags); 532 goto unlock;
557 return 0; 533 if (stop) {
534 timeri->cticks = timeri->ticks;
535 timeri->pticks = 0;
558 } 536 }
559 if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) && 537 if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
560 !(--timer->running)) { 538 !(--timer->running)) {
@@ -569,35 +547,60 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
569 } 547 }
570 } 548 }
571 timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START); 549 timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
550 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
551 SNDRV_TIMER_EVENT_PAUSE);
552 unlock:
572 spin_unlock_irqrestore(&timer->lock, flags); 553 spin_unlock_irqrestore(&timer->lock, flags);
573 __end: 554 return result;
574 if (event != SNDRV_TIMER_EVENT_RESOLUTION) 555}
575 snd_timer_notify1(timeri, event); 556
557/* stop/pause a slave timer */
558static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
559{
560 unsigned long flags;
561
562 spin_lock_irqsave(&slave_active_lock, flags);
563 if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
564 spin_unlock_irqrestore(&slave_active_lock, flags);
565 return -EBUSY;
566 }
567 timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
568 if (timeri->timer) {
569 spin_lock(&timeri->timer->lock);
570 list_del_init(&timeri->ack_list);
571 list_del_init(&timeri->active_list);
572 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
573 SNDRV_TIMER_EVENT_PAUSE);
574 spin_unlock(&timeri->timer->lock);
575 }
576 spin_unlock_irqrestore(&slave_active_lock, flags);
576 return 0; 577 return 0;
577} 578}
578 579
579/* 580/*
581 * start the timer instance
582 */
583int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
584{
585 if (timeri == NULL || ticks < 1)
586 return -EINVAL;
587 if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
588 return snd_timer_start_slave(timeri, true);
589 else
590 return snd_timer_start1(timeri, true, ticks);
591}
592
593/*
580 * stop the timer instance. 594 * stop the timer instance.
581 * 595 *
582 * do not call this from the timer callback! 596 * do not call this from the timer callback!
583 */ 597 */
584int snd_timer_stop(struct snd_timer_instance *timeri) 598int snd_timer_stop(struct snd_timer_instance *timeri)
585{ 599{
586 struct snd_timer *timer; 600 if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
587 unsigned long flags; 601 return snd_timer_stop_slave(timeri, true);
588 int err; 602 else
589 603 return snd_timer_stop1(timeri, true);
590 err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
591 if (err < 0)
592 return err;
593 timer = timeri->timer;
594 if (!timer)
595 return -EINVAL;
596 spin_lock_irqsave(&timer->lock, flags);
597 timeri->cticks = timeri->ticks;
598 timeri->pticks = 0;
599 spin_unlock_irqrestore(&timer->lock, flags);
600 return 0;
601} 604}
602 605
603/* 606/*
@@ -605,32 +608,10 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
605 */ 608 */
606int snd_timer_continue(struct snd_timer_instance *timeri) 609int snd_timer_continue(struct snd_timer_instance *timeri)
607{ 610{
608 struct snd_timer *timer;
609 int result = -EINVAL;
610 unsigned long flags;
611
612 if (timeri == NULL)
613 return result;
614 if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) 611 if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
615 return snd_timer_start_slave(timeri); 612 return snd_timer_start_slave(timeri, false);
616 timer = timeri->timer; 613 else
617 if (! timer) 614 return snd_timer_start1(timeri, false, 0);
618 return -EINVAL;
619 if (timer->card && timer->card->shutdown)
620 return -ENODEV;
621 spin_lock_irqsave(&timer->lock, flags);
622 if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
623 result = -EBUSY;
624 goto unlock;
625 }
626 if (!timeri->cticks)
627 timeri->cticks = 1;
628 timeri->pticks = 0;
629 result = snd_timer_start1(timer, timeri, timer->sticks);
630 unlock:
631 spin_unlock_irqrestore(&timer->lock, flags);
632 snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
633 return result;
634} 615}
635 616
636/* 617/*
@@ -638,7 +619,10 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
638 */ 619 */
639int snd_timer_pause(struct snd_timer_instance * timeri) 620int snd_timer_pause(struct snd_timer_instance * timeri)
640{ 621{
641 return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE); 622 if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
623 return snd_timer_stop_slave(timeri, false);
624 else
625 return snd_timer_stop1(timeri, false);
642} 626}
643 627
644/* 628/*
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 6c58e6f73a01..7c6ef879c520 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -68,10 +68,13 @@ static int slave_update(struct link_slave *slave)
68 return -ENOMEM; 68 return -ENOMEM;
69 uctl->id = slave->slave.id; 69 uctl->id = slave->slave.id;
70 err = slave->slave.get(&slave->slave, uctl); 70 err = slave->slave.get(&slave->slave, uctl);
71 if (err < 0)
72 goto error;
71 for (ch = 0; ch < slave->info.count; ch++) 73 for (ch = 0; ch < slave->info.count; ch++)
72 slave->vals[ch] = uctl->value.integer.value[ch]; 74 slave->vals[ch] = uctl->value.integer.value[ch];
75 error:
73 kfree(uctl); 76 kfree(uctl);
74 return 0; 77 return err < 0 ? err : 0;
75} 78}
76 79
77/* get the slave ctl info and save the initial values */ 80/* get the slave ctl info and save the initial values */
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index cbd20cb8ca11..847f70348d4d 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -192,6 +192,11 @@ static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
192 dpcm->timer.expires = 0; 192 dpcm->timer.expires = 0;
193} 193}
194 194
195static inline void loopback_timer_stop_sync(struct loopback_pcm *dpcm)
196{
197 del_timer_sync(&dpcm->timer);
198}
199
195#define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK) 200#define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
196#define CABLE_VALID_CAPTURE (1 << SNDRV_PCM_STREAM_CAPTURE) 201#define CABLE_VALID_CAPTURE (1 << SNDRV_PCM_STREAM_CAPTURE)
197#define CABLE_VALID_BOTH (CABLE_VALID_PLAYBACK|CABLE_VALID_CAPTURE) 202#define CABLE_VALID_BOTH (CABLE_VALID_PLAYBACK|CABLE_VALID_CAPTURE)
@@ -291,6 +296,8 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
291 cable->pause |= stream; 296 cable->pause |= stream;
292 loopback_timer_stop(dpcm); 297 loopback_timer_stop(dpcm);
293 spin_unlock(&cable->lock); 298 spin_unlock(&cable->lock);
299 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
300 loopback_active_notify(dpcm);
294 break; 301 break;
295 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 302 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
296 case SNDRV_PCM_TRIGGER_RESUME: 303 case SNDRV_PCM_TRIGGER_RESUME:
@@ -299,6 +306,8 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
299 cable->pause &= ~stream; 306 cable->pause &= ~stream;
300 loopback_timer_start(dpcm); 307 loopback_timer_start(dpcm);
301 spin_unlock(&cable->lock); 308 spin_unlock(&cable->lock);
309 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
310 loopback_active_notify(dpcm);
302 break; 311 break;
303 default: 312 default:
304 return -EINVAL; 313 return -EINVAL;
@@ -326,6 +335,8 @@ static int loopback_prepare(struct snd_pcm_substream *substream)
326 struct loopback_cable *cable = dpcm->cable; 335 struct loopback_cable *cable = dpcm->cable;
327 int bps, salign; 336 int bps, salign;
328 337
338 loopback_timer_stop_sync(dpcm);
339
329 salign = (snd_pcm_format_width(runtime->format) * 340 salign = (snd_pcm_format_width(runtime->format) *
330 runtime->channels) / 8; 341 runtime->channels) / 8;
331 bps = salign * runtime->rate; 342 bps = salign * runtime->rate;
@@ -659,7 +670,9 @@ static void free_cable(struct snd_pcm_substream *substream)
659 return; 670 return;
660 if (cable->streams[!substream->stream]) { 671 if (cable->streams[!substream->stream]) {
661 /* other stream is still alive */ 672 /* other stream is still alive */
673 spin_lock_irq(&cable->lock);
662 cable->streams[substream->stream] = NULL; 674 cable->streams[substream->stream] = NULL;
675 spin_unlock_irq(&cable->lock);
663 } else { 676 } else {
664 /* free the cable */ 677 /* free the cable */
665 loopback->cables[substream->number][dev] = NULL; 678 loopback->cables[substream->number][dev] = NULL;
@@ -699,7 +712,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
699 loopback->cables[substream->number][dev] = cable; 712 loopback->cables[substream->number][dev] = cable;
700 } 713 }
701 dpcm->cable = cable; 714 dpcm->cable = cable;
702 cable->streams[substream->stream] = dpcm;
703 715
704 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); 716 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
705 717
@@ -731,6 +743,11 @@ static int loopback_open(struct snd_pcm_substream *substream)
731 runtime->hw = loopback_pcm_hardware; 743 runtime->hw = loopback_pcm_hardware;
732 else 744 else
733 runtime->hw = cable->hw; 745 runtime->hw = cable->hw;
746
747 spin_lock_irq(&cable->lock);
748 cable->streams[substream->stream] = dpcm;
749 spin_unlock_irq(&cable->lock);
750
734 unlock: 751 unlock:
735 if (err < 0) { 752 if (err < 0) {
736 free_cable(substream); 753 free_cable(substream);
@@ -745,7 +762,7 @@ static int loopback_close(struct snd_pcm_substream *substream)
745 struct loopback *loopback = substream->private_data; 762 struct loopback *loopback = substream->private_data;
746 struct loopback_pcm *dpcm = substream->runtime->private_data; 763 struct loopback_pcm *dpcm = substream->runtime->private_data;
747 764
748 loopback_timer_stop(dpcm); 765 loopback_timer_stop_sync(dpcm);
749 mutex_lock(&loopback->cable_lock); 766 mutex_lock(&loopback->cable_lock);
750 free_cable(substream); 767 free_cable(substream);
751 mutex_unlock(&loopback->cable_lock); 768 mutex_unlock(&loopback->cable_lock);
@@ -815,9 +832,11 @@ static int loopback_rate_shift_get(struct snd_kcontrol *kcontrol,
815{ 832{
816 struct loopback *loopback = snd_kcontrol_chip(kcontrol); 833 struct loopback *loopback = snd_kcontrol_chip(kcontrol);
817 834
835 mutex_lock(&loopback->cable_lock);
818 ucontrol->value.integer.value[0] = 836 ucontrol->value.integer.value[0] =
819 loopback->setup[kcontrol->id.subdevice] 837 loopback->setup[kcontrol->id.subdevice]
820 [kcontrol->id.device].rate_shift; 838 [kcontrol->id.device].rate_shift;
839 mutex_unlock(&loopback->cable_lock);
821 return 0; 840 return 0;
822} 841}
823 842
@@ -849,9 +868,11 @@ static int loopback_notify_get(struct snd_kcontrol *kcontrol,
849{ 868{
850 struct loopback *loopback = snd_kcontrol_chip(kcontrol); 869 struct loopback *loopback = snd_kcontrol_chip(kcontrol);
851 870
871 mutex_lock(&loopback->cable_lock);
852 ucontrol->value.integer.value[0] = 872 ucontrol->value.integer.value[0] =
853 loopback->setup[kcontrol->id.subdevice] 873 loopback->setup[kcontrol->id.subdevice]
854 [kcontrol->id.device].notify; 874 [kcontrol->id.device].notify;
875 mutex_unlock(&loopback->cable_lock);
855 return 0; 876 return 0;
856} 877}
857 878
@@ -863,12 +884,14 @@ static int loopback_notify_put(struct snd_kcontrol *kcontrol,
863 int change = 0; 884 int change = 0;
864 885
865 val = ucontrol->value.integer.value[0] ? 1 : 0; 886 val = ucontrol->value.integer.value[0] ? 1 : 0;
887 mutex_lock(&loopback->cable_lock);
866 if (val != loopback->setup[kcontrol->id.subdevice] 888 if (val != loopback->setup[kcontrol->id.subdevice]
867 [kcontrol->id.device].notify) { 889 [kcontrol->id.device].notify) {
868 loopback->setup[kcontrol->id.subdevice] 890 loopback->setup[kcontrol->id.subdevice]
869 [kcontrol->id.device].notify = val; 891 [kcontrol->id.device].notify = val;
870 change = 1; 892 change = 1;
871 } 893 }
894 mutex_unlock(&loopback->cable_lock);
872 return change; 895 return change;
873} 896}
874 897
@@ -876,13 +899,18 @@ static int loopback_active_get(struct snd_kcontrol *kcontrol,
876 struct snd_ctl_elem_value *ucontrol) 899 struct snd_ctl_elem_value *ucontrol)
877{ 900{
878 struct loopback *loopback = snd_kcontrol_chip(kcontrol); 901 struct loopback *loopback = snd_kcontrol_chip(kcontrol);
879 struct loopback_cable *cable = loopback->cables 902 struct loopback_cable *cable;
880 [kcontrol->id.subdevice][kcontrol->id.device ^ 1]; 903
881 unsigned int val = 0; 904 unsigned int val = 0;
882 905
883 if (cable != NULL) 906 mutex_lock(&loopback->cable_lock);
884 val = (cable->running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ? 907 cable = loopback->cables[kcontrol->id.subdevice][kcontrol->id.device ^ 1];
885 1 : 0; 908 if (cable != NULL) {
909 unsigned int running = cable->running ^ cable->pause;
910
911 val = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ? 1 : 0;
912 }
913 mutex_unlock(&loopback->cable_lock);
886 ucontrol->value.integer.value[0] = val; 914 ucontrol->value.integer.value[0] = val;
887 return 0; 915 return 0;
888} 916}
@@ -925,9 +953,11 @@ static int loopback_rate_get(struct snd_kcontrol *kcontrol,
925{ 953{
926 struct loopback *loopback = snd_kcontrol_chip(kcontrol); 954 struct loopback *loopback = snd_kcontrol_chip(kcontrol);
927 955
956 mutex_lock(&loopback->cable_lock);
928 ucontrol->value.integer.value[0] = 957 ucontrol->value.integer.value[0] =
929 loopback->setup[kcontrol->id.subdevice] 958 loopback->setup[kcontrol->id.subdevice]
930 [kcontrol->id.device].rate; 959 [kcontrol->id.device].rate;
960 mutex_unlock(&loopback->cable_lock);
931 return 0; 961 return 0;
932} 962}
933 963
@@ -947,9 +977,11 @@ static int loopback_channels_get(struct snd_kcontrol *kcontrol,
947{ 977{
948 struct loopback *loopback = snd_kcontrol_chip(kcontrol); 978 struct loopback *loopback = snd_kcontrol_chip(kcontrol);
949 979
980 mutex_lock(&loopback->cable_lock);
950 ucontrol->value.integer.value[0] = 981 ucontrol->value.integer.value[0] =
951 loopback->setup[kcontrol->id.subdevice] 982 loopback->setup[kcontrol->id.subdevice]
952 [kcontrol->id.device].channels; 983 [kcontrol->id.device].channels;
984 mutex_unlock(&loopback->cable_lock);
953 return 0; 985 return 0;
954} 986}
955 987
diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
index ddcc1a325a61..42920a243328 100644
--- a/sound/drivers/opl3/opl3_synth.c
+++ b/sound/drivers/opl3/opl3_synth.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/export.h> 23#include <linux/export.h>
24#include <linux/nospec.h>
24#include <sound/opl3.h> 25#include <sound/opl3.h>
25#include <sound/asound_fm.h> 26#include <sound/asound_fm.h>
26 27
@@ -448,7 +449,7 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
448{ 449{
449 unsigned short reg_side; 450 unsigned short reg_side;
450 unsigned char op_offset; 451 unsigned char op_offset;
451 unsigned char voice_offset; 452 unsigned char voice_offset, voice_op;
452 453
453 unsigned short opl3_reg; 454 unsigned short opl3_reg;
454 unsigned char reg_val; 455 unsigned char reg_val;
@@ -473,7 +474,9 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
473 voice_offset = voice->voice - MAX_OPL2_VOICES; 474 voice_offset = voice->voice - MAX_OPL2_VOICES;
474 } 475 }
475 /* Get register offset of operator */ 476 /* Get register offset of operator */
476 op_offset = snd_opl3_regmap[voice_offset][voice->op]; 477 voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES);
478 voice_op = array_index_nospec(voice->op, 4);
479 op_offset = snd_opl3_regmap[voice_offset][voice_op];
477 480
478 reg_val = 0x00; 481 reg_val = 0x00;
479 /* Set amplitude modulation (tremolo) effect */ 482 /* Set amplitude modulation (tremolo) effect */
diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c
index b02a5e8cad44..30e4925bf6b0 100644
--- a/sound/firewire/digi00x/amdtp-dot.c
+++ b/sound/firewire/digi00x/amdtp-dot.c
@@ -28,6 +28,9 @@
28 */ 28 */
29#define MAX_MIDI_RX_BLOCKS 8 29#define MAX_MIDI_RX_BLOCKS 8
30 30
31/* 3 = MAX(DOT_MIDI_IN_PORTS, DOT_MIDI_OUT_PORTS) + 1. */
32#define MAX_MIDI_PORTS 3
33
31/* 34/*
32 * The double-oh-three algorithm was discovered by Robin Gareus and Damien 35 * The double-oh-three algorithm was discovered by Robin Gareus and Damien
33 * Zammit in 2012, with reverse-engineering for Digi 003 Rack. 36 * Zammit in 2012, with reverse-engineering for Digi 003 Rack.
@@ -42,10 +45,8 @@ struct amdtp_dot {
42 unsigned int pcm_channels; 45 unsigned int pcm_channels;
43 struct dot_state state; 46 struct dot_state state;
44 47
45 unsigned int midi_ports; 48 struct snd_rawmidi_substream *midi[MAX_MIDI_PORTS];
46 /* 2 = MAX(DOT_MIDI_IN_PORTS, DOT_MIDI_OUT_PORTS) */ 49 int midi_fifo_used[MAX_MIDI_PORTS];
47 struct snd_rawmidi_substream *midi[2];
48 int midi_fifo_used[2];
49 int midi_fifo_limit; 50 int midi_fifo_limit;
50 51
51 void (*transfer_samples)(struct amdtp_stream *s, 52 void (*transfer_samples)(struct amdtp_stream *s,
@@ -124,8 +125,8 @@ int amdtp_dot_set_parameters(struct amdtp_stream *s, unsigned int rate,
124 return -EBUSY; 125 return -EBUSY;
125 126
126 /* 127 /*
127 * A first data channel is for MIDI conformant data channel, the rest is 128 * A first data channel is for MIDI messages, the rest is Multi Bit
128 * Multi Bit Linear Audio data channel. 129 * Linear Audio data channel.
129 */ 130 */
130 err = amdtp_stream_set_parameters(s, rate, pcm_channels + 1); 131 err = amdtp_stream_set_parameters(s, rate, pcm_channels + 1);
131 if (err < 0) 132 if (err < 0)
@@ -135,11 +136,6 @@ int amdtp_dot_set_parameters(struct amdtp_stream *s, unsigned int rate,
135 136
136 p->pcm_channels = pcm_channels; 137 p->pcm_channels = pcm_channels;
137 138
138 if (s->direction == AMDTP_IN_STREAM)
139 p->midi_ports = DOT_MIDI_IN_PORTS;
140 else
141 p->midi_ports = DOT_MIDI_OUT_PORTS;
142
143 /* 139 /*
144 * We do not know the actual MIDI FIFO size of most devices. Just 140 * We do not know the actual MIDI FIFO size of most devices. Just
145 * assume two bytes, i.e., one byte can be received over the bus while 141 * assume two bytes, i.e., one byte can be received over the bus while
@@ -281,13 +277,25 @@ static void write_midi_messages(struct amdtp_stream *s, __be32 *buffer,
281 b = (u8 *)&buffer[0]; 277 b = (u8 *)&buffer[0];
282 278
283 len = 0; 279 len = 0;
284 if (port < p->midi_ports && 280 if (port < MAX_MIDI_PORTS &&
285 midi_ratelimit_per_packet(s, port) && 281 midi_ratelimit_per_packet(s, port) &&
286 p->midi[port] != NULL) 282 p->midi[port] != NULL)
287 len = snd_rawmidi_transmit(p->midi[port], b + 1, 2); 283 len = snd_rawmidi_transmit(p->midi[port], b + 1, 2);
288 284
289 if (len > 0) { 285 if (len > 0) {
290 b[3] = (0x10 << port) | len; 286 /*
287 * Upper 4 bits of LSB represent port number.
288 * - 0000b: physical MIDI port 1.
289 * - 0010b: physical MIDI port 2.
290 * - 1110b: console MIDI port.
291 */
292 if (port == 2)
293 b[3] = 0xe0;
294 else if (port == 1)
295 b[3] = 0x20;
296 else
297 b[3] = 0x00;
298 b[3] |= len;
291 midi_use_bytes(s, port, len); 299 midi_use_bytes(s, port, len);
292 } else { 300 } else {
293 b[1] = 0; 301 b[1] = 0;
@@ -309,11 +317,22 @@ static void read_midi_messages(struct amdtp_stream *s, __be32 *buffer,
309 317
310 for (f = 0; f < data_blocks; f++) { 318 for (f = 0; f < data_blocks; f++) {
311 b = (u8 *)&buffer[0]; 319 b = (u8 *)&buffer[0];
312 port = b[3] >> 4;
313 len = b[3] & 0x0f;
314 320
315 if (port < p->midi_ports && p->midi[port] && len > 0) 321 len = b[3] & 0x0f;
316 snd_rawmidi_receive(p->midi[port], b + 1, len); 322 if (len > 0) {
323 /*
324 * Upper 4 bits of LSB represent port number.
325 * - 0000b: physical MIDI port 1. Use port 0.
326 * - 1110b: console MIDI port. Use port 2.
327 */
328 if (b[3] >> 4 > 0)
329 port = 2;
330 else
331 port = 0;
332
333 if (port < MAX_MIDI_PORTS && p->midi[port])
334 snd_rawmidi_receive(p->midi[port], b + 1, len);
335 }
317 336
318 buffer += s->data_block_quadlets; 337 buffer += s->data_block_quadlets;
319 } 338 }
@@ -364,7 +383,7 @@ void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port,
364{ 383{
365 struct amdtp_dot *p = s->protocol; 384 struct amdtp_dot *p = s->protocol;
366 385
367 if (port < p->midi_ports) 386 if (port < MAX_MIDI_PORTS)
368 ACCESS_ONCE(p->midi[port]) = midi; 387 ACCESS_ONCE(p->midi[port]) = midi;
369} 388}
370 389
diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
index 7eb617175fde..a31a70dccecf 100644
--- a/sound/pci/asihpi/hpimsginit.c
+++ b/sound/pci/asihpi/hpimsginit.c
@@ -23,6 +23,7 @@
23 23
24#include "hpi_internal.h" 24#include "hpi_internal.h"
25#include "hpimsginit.h" 25#include "hpimsginit.h"
26#include <linux/nospec.h>
26 27
27/* The actual message size for each object type */ 28/* The actual message size for each object type */
28static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT; 29static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
@@ -39,10 +40,12 @@ static void hpi_init_message(struct hpi_message *phm, u16 object,
39{ 40{
40 u16 size; 41 u16 size;
41 42
42 if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) 43 if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
44 object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
43 size = msg_size[object]; 45 size = msg_size[object];
44 else 46 } else {
45 size = sizeof(*phm); 47 size = sizeof(*phm);
48 }
46 49
47 memset(phm, 0, size); 50 memset(phm, 0, size);
48 phm->size = size; 51 phm->size = size;
@@ -66,10 +69,12 @@ void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
66{ 69{
67 u16 size; 70 u16 size;
68 71
69 if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) 72 if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
73 object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
70 size = res_size[object]; 74 size = res_size[object];
71 else 75 } else {
72 size = sizeof(*phr); 76 size = sizeof(*phr);
77 }
73 78
74 memset(phr, 0, sizeof(*phr)); 79 memset(phr, 0, sizeof(*phr));
75 phr->size = size; 80 phr->size = size;
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index d17937b92331..7a32abbe0cef 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -33,6 +33,7 @@
33#include <linux/stringify.h> 33#include <linux/stringify.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/nospec.h>
36 37
37#ifdef MODULE_FIRMWARE 38#ifdef MODULE_FIRMWARE
38MODULE_FIRMWARE("asihpi/dsp5000.bin"); 39MODULE_FIRMWARE("asihpi/dsp5000.bin");
@@ -182,7 +183,8 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
182 struct hpi_adapter *pa = NULL; 183 struct hpi_adapter *pa = NULL;
183 184
184 if (hm->h.adapter_index < ARRAY_SIZE(adapters)) 185 if (hm->h.adapter_index < ARRAY_SIZE(adapters))
185 pa = &adapters[hm->h.adapter_index]; 186 pa = &adapters[array_index_nospec(hm->h.adapter_index,
187 ARRAY_SIZE(adapters))];
186 188
187 if (!pa || !pa->adapter || !pa->adapter->type) { 189 if (!pa || !pa->adapter || !pa->adapter->type) {
188 hpi_init_response(&hr->r0, hm->h.object, 190 hpi_init_response(&hr->r0, hm->h.object,
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 14a305bd8a98..72e442d86bb1 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1850,7 +1850,9 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device)
1850 if (!kctl) 1850 if (!kctl)
1851 return -ENOMEM; 1851 return -ENOMEM;
1852 kctl->id.device = device; 1852 kctl->id.device = device;
1853 snd_ctl_add(emu->card, kctl); 1853 err = snd_ctl_add(emu->card, kctl);
1854 if (err < 0)
1855 return err;
1854 1856
1855 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024); 1857 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024);
1856 1858
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index 4f1f69be1865..8c778fa33031 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -237,13 +237,13 @@ __found_pages:
237static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr) 237static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
238{ 238{
239 if (addr & ~emu->dma_mask) { 239 if (addr & ~emu->dma_mask) {
240 dev_err(emu->card->dev, 240 dev_err_ratelimited(emu->card->dev,
241 "max memory size is 0x%lx (addr = 0x%lx)!!\n", 241 "max memory size is 0x%lx (addr = 0x%lx)!!\n",
242 emu->dma_mask, (unsigned long)addr); 242 emu->dma_mask, (unsigned long)addr);
243 return 0; 243 return 0;
244 } 244 }
245 if (addr & (EMUPAGESIZE-1)) { 245 if (addr & (EMUPAGESIZE-1)) {
246 dev_err(emu->card->dev, "page is not aligned\n"); 246 dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
247 return 0; 247 return 0;
248 } 248 }
249 return 1; 249 return 1;
@@ -334,7 +334,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
334 else 334 else
335 addr = snd_pcm_sgbuf_get_addr(substream, ofs); 335 addr = snd_pcm_sgbuf_get_addr(substream, ofs);
336 if (! is_valid_page(emu, addr)) { 336 if (! is_valid_page(emu, addr)) {
337 dev_err(emu->card->dev, 337 dev_err_ratelimited(emu->card->dev,
338 "emu: failure page = %d\n", idx); 338 "emu: failure page = %d\n", idx);
339 mutex_unlock(&hdr->block_mutex); 339 mutex_unlock(&hdr->block_mutex);
340 return NULL; 340 return NULL;
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index 1fdd92b6f18f..d6e89a6d0bb9 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1050,11 +1050,19 @@ static int snd_fm801_mixer(struct fm801 *chip)
1050 if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_sec)) < 0) 1050 if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_sec)) < 0)
1051 return err; 1051 return err;
1052 } 1052 }
1053 for (i = 0; i < FM801_CONTROLS; i++) 1053 for (i = 0; i < FM801_CONTROLS; i++) {
1054 snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls[i], chip)); 1054 err = snd_ctl_add(chip->card,
1055 snd_ctl_new1(&snd_fm801_controls[i], chip));
1056 if (err < 0)
1057 return err;
1058 }
1055 if (chip->multichannel) { 1059 if (chip->multichannel) {
1056 for (i = 0; i < FM801_CONTROLS_MULTI; i++) 1060 for (i = 0; i < FM801_CONTROLS_MULTI; i++) {
1057 snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls_multi[i], chip)); 1061 err = snd_ctl_add(chip->card,
1062 snd_ctl_new1(&snd_fm801_controls_multi[i], chip));
1063 if (err < 0)
1064 return err;
1065 }
1058 } 1066 }
1059 return 0; 1067 return 0;
1060} 1068}
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index e94cfd5c69f7..ebec1a1ae543 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -84,7 +84,6 @@ config SND_HDA_PATCH_LOADER
84config SND_HDA_CODEC_REALTEK 84config SND_HDA_CODEC_REALTEK
85 tristate "Build Realtek HD-audio codec support" 85 tristate "Build Realtek HD-audio codec support"
86 select SND_HDA_GENERIC 86 select SND_HDA_GENERIC
87 select INPUT
88 help 87 help
89 Say Y or M here to include Realtek HD-audio codec support in 88 Say Y or M here to include Realtek HD-audio codec support in
90 snd-hda-intel driver, such as ALC880. 89 snd-hda-intel driver, such as ALC880.
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 9c6e10fb479f..273364c39171 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -547,8 +547,10 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
547 return err; 547 return err;
548 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name)); 548 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
549 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); 549 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
550 if (apcm == NULL) 550 if (apcm == NULL) {
551 snd_device_free(chip->card, pcm);
551 return -ENOMEM; 552 return -ENOMEM;
553 }
552 apcm->chip = chip; 554 apcm->chip = chip;
553 apcm->pcm = pcm; 555 apcm->pcm = pcm;
554 apcm->codec = codec; 556 apcm->codec = codec;
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 57df06e76968..cc009a4a3d1d 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/compat.h> 23#include <linux/compat.h>
24#include <linux/nospec.h>
24#include <sound/core.h> 25#include <sound/core.h>
25#include "hda_codec.h" 26#include "hda_codec.h"
26#include "hda_local.h" 27#include "hda_local.h"
@@ -51,7 +52,16 @@ static int get_wcap_ioctl(struct hda_codec *codec,
51 52
52 if (get_user(verb, &arg->verb)) 53 if (get_user(verb, &arg->verb))
53 return -EFAULT; 54 return -EFAULT;
54 res = get_wcaps(codec, verb >> 24); 55 /* open-code get_wcaps(verb>>24) with nospec */
56 verb >>= 24;
57 if (verb < codec->core.start_nid ||
58 verb >= codec->core.start_nid + codec->core.num_nodes) {
59 res = 0;
60 } else {
61 verb -= codec->core.start_nid;
62 verb = array_index_nospec(verb, codec->core.num_nodes);
63 res = codec->wcaps[verb];
64 }
55 if (put_user(res, &arg->res)) 65 if (put_user(res, &arg->res))
56 return -EFAULT; 66 return -EFAULT;
57 return 0; 67 return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 20512fe32a97..d0b55c866370 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -184,6 +184,10 @@ module_param(power_save, xint, 0644);
184MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " 184MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
185 "(in second, 0 = disable)."); 185 "(in second, 0 = disable).");
186 186
187static bool pm_blacklist = true;
188module_param(pm_blacklist, bool, 0644);
189MODULE_PARM_DESC(pm_blacklist, "Enable power-management blacklist");
190
187/* reset the HD-audio controller in power save mode. 191/* reset the HD-audio controller in power save mode.
188 * this may give more power-saving, but will take longer time to 192 * this may give more power-saving, but will take longer time to
189 * wake up. 193 * wake up.
@@ -1545,7 +1549,8 @@ static void azx_check_snoop_available(struct azx *chip)
1545 */ 1549 */
1546 u8 val; 1550 u8 val;
1547 pci_read_config_byte(chip->pci, 0x42, &val); 1551 pci_read_config_byte(chip->pci, 0x42, &val);
1548 if (!(val & 0x80) && chip->pci->revision == 0x30) 1552 if (!(val & 0x80) && (chip->pci->revision == 0x30 ||
1553 chip->pci->revision == 0x20))
1549 snoop = false; 1554 snoop = false;
1550 } 1555 }
1551 1556
@@ -2055,6 +2060,26 @@ out_free:
2055 return err; 2060 return err;
2056} 2061}
2057 2062
2063#ifdef CONFIG_PM
2064/* On some boards setting power_save to a non 0 value leads to clicking /
2065 * popping sounds when ever we enter/leave powersaving mode. Ideally we would
2066 * figure out how to avoid these sounds, but that is not always feasible.
2067 * So we keep a list of devices where we disable powersaving as its known
2068 * to causes problems on these devices.
2069 */
2070static struct snd_pci_quirk power_save_blacklist[] = {
2071 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2072 SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
2073 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2074 SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
2075 /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
2076 SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
2077 /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
2078 SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
2079 {}
2080};
2081#endif /* CONFIG_PM */
2082
2058/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ 2083/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
2059static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { 2084static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
2060 [AZX_DRIVER_NVIDIA] = 8, 2085 [AZX_DRIVER_NVIDIA] = 8,
@@ -2067,6 +2092,7 @@ static int azx_probe_continue(struct azx *chip)
2067 struct hdac_bus *bus = azx_bus(chip); 2092 struct hdac_bus *bus = azx_bus(chip);
2068 struct pci_dev *pci = chip->pci; 2093 struct pci_dev *pci = chip->pci;
2069 int dev = chip->dev_index; 2094 int dev = chip->dev_index;
2095 int val;
2070 int err; 2096 int err;
2071 2097
2072 hda->probe_continued = 1; 2098 hda->probe_continued = 1;
@@ -2142,7 +2168,21 @@ static int azx_probe_continue(struct azx *chip)
2142 2168
2143 chip->running = 1; 2169 chip->running = 1;
2144 azx_add_card_list(chip); 2170 azx_add_card_list(chip);
2145 snd_hda_set_power_save(&chip->bus, power_save * 1000); 2171
2172 val = power_save;
2173#ifdef CONFIG_PM
2174 if (pm_blacklist) {
2175 const struct snd_pci_quirk *q;
2176
2177 q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
2178 if (q && val) {
2179 dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
2180 q->subvendor, q->subdevice);
2181 val = 0;
2182 }
2183 }
2184#endif /* CONFIG_PM */
2185 snd_hda_set_power_save(&chip->bus, val * 1000);
2146 if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo) 2186 if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
2147 pm_runtime_put_noidle(&pci->dev); 2187 pm_runtime_put_noidle(&pci->dev);
2148 2188
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index c146d0de53d8..c55c0131be0a 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -38,6 +38,10 @@
38/* Enable this to see controls for tuning purpose. */ 38/* Enable this to see controls for tuning purpose. */
39/*#define ENABLE_TUNING_CONTROLS*/ 39/*#define ENABLE_TUNING_CONTROLS*/
40 40
41#ifdef ENABLE_TUNING_CONTROLS
42#include <sound/tlv.h>
43#endif
44
41#define FLOAT_ZERO 0x00000000 45#define FLOAT_ZERO 0x00000000
42#define FLOAT_ONE 0x3f800000 46#define FLOAT_ONE 0x3f800000
43#define FLOAT_TWO 0x40000000 47#define FLOAT_TWO 0x40000000
@@ -1482,6 +1486,9 @@ static int dspio_scp(struct hda_codec *codec,
1482 } else if (ret_size != reply_data_size) { 1486 } else if (ret_size != reply_data_size) {
1483 codec_dbg(codec, "RetLen and HdrLen .NE.\n"); 1487 codec_dbg(codec, "RetLen and HdrLen .NE.\n");
1484 return -EINVAL; 1488 return -EINVAL;
1489 } else if (!reply) {
1490 codec_dbg(codec, "NULL reply\n");
1491 return -EINVAL;
1485 } else { 1492 } else {
1486 *reply_len = ret_size*sizeof(unsigned int); 1493 *reply_len = ret_size*sizeof(unsigned int);
1487 memcpy(reply, scp_reply.data, *reply_len); 1494 memcpy(reply, scp_reply.data, *reply_len);
@@ -3064,8 +3071,8 @@ static int equalizer_ctl_put(struct snd_kcontrol *kcontrol,
3064 return 1; 3071 return 1;
3065} 3072}
3066 3073
3067static const DECLARE_TLV_DB_SCALE(voice_focus_db_scale, 2000, 100, 0); 3074static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(voice_focus_db_scale, 2000, 100, 0);
3068static const DECLARE_TLV_DB_SCALE(eq_db_scale, -2400, 100, 0); 3075static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(eq_db_scale, -2400, 100, 0);
3069 3076
3070static int add_tuning_control(struct hda_codec *codec, 3077static int add_tuning_control(struct hda_codec *codec,
3071 hda_nid_t pnid, hda_nid_t nid, 3078 hda_nid_t pnid, hda_nid_t nid,
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c92b7ba344ef..cb19af145f46 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -849,6 +849,10 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
849 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), 849 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
850 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), 850 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
851 SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), 851 SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
852 SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
853 SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
854 SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
855 SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
852 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), 856 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
853 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), 857 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
854 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), 858 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f14c1f288443..d706a416b587 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -329,6 +329,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
329 break; 329 break;
330 case 0x10ec0225: 330 case 0x10ec0225:
331 case 0x10ec0233: 331 case 0x10ec0233:
332 case 0x10ec0235:
332 case 0x10ec0236: 333 case 0x10ec0236:
333 case 0x10ec0255: 334 case 0x10ec0255:
334 case 0x10ec0256: 335 case 0x10ec0256:
@@ -2446,6 +2447,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
2446 SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110), 2447 SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
2447 SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ), 2448 SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
2448 SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN), 2449 SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
2450 SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270),
2449 SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270), 2451 SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
2450 SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000), 2452 SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
2451 SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ), 2453 SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
@@ -3130,6 +3132,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
3130 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 3132 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
3131} 3133}
3132 3134
3135static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec,
3136 const struct hda_fixup *fix,
3137 int action)
3138{
3139 unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21);
3140 unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19);
3141
3142 if (cfg_headphone && cfg_headset_mic == 0x411111f0)
3143 snd_hda_codec_set_pincfg(codec, 0x19,
3144 (cfg_headphone & ~AC_DEFCFG_DEVICE) |
3145 (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT));
3146}
3147
3133static void alc269_fixup_hweq(struct hda_codec *codec, 3148static void alc269_fixup_hweq(struct hda_codec *codec,
3134 const struct hda_fixup *fix, int action) 3149 const struct hda_fixup *fix, int action)
3135{ 3150{
@@ -3248,8 +3263,12 @@ static void alc269_fixup_mic_mute_hook(void *private_data, int enabled)
3248 pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid); 3263 pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid);
3249 pinval &= ~AC_PINCTL_VREFEN; 3264 pinval &= ~AC_PINCTL_VREFEN;
3250 pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80; 3265 pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80;
3251 if (spec->mute_led_nid) 3266 if (spec->mute_led_nid) {
3267 /* temporarily power up/down for setting VREF */
3268 snd_hda_power_up_pm(codec);
3252 snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval); 3269 snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval);
3270 snd_hda_power_down_pm(codec);
3271 }
3253} 3272}
3254 3273
3255/* Make sure the led works even in runtime suspend */ 3274/* Make sure the led works even in runtime suspend */
@@ -3477,6 +3496,7 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
3477 } 3496 }
3478} 3497}
3479 3498
3499#if IS_REACHABLE(INPUT)
3480static void gpio2_mic_hotkey_event(struct hda_codec *codec, 3500static void gpio2_mic_hotkey_event(struct hda_codec *codec,
3481 struct hda_jack_callback *event) 3501 struct hda_jack_callback *event)
3482{ 3502{
@@ -3609,6 +3629,10 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
3609 spec->kb_dev = NULL; 3629 spec->kb_dev = NULL;
3610 } 3630 }
3611} 3631}
3632#else /* INPUT */
3633#define alc280_fixup_hp_gpio2_mic_hotkey NULL
3634#define alc233_fixup_lenovo_line2_mic_hotkey NULL
3635#endif /* INPUT */
3612 3636
3613static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, 3637static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
3614 const struct hda_fixup *fix, int action) 3638 const struct hda_fixup *fix, int action)
@@ -4709,6 +4733,16 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec,
4709 } 4733 }
4710} 4734}
4711 4735
4736/* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */
4737static void alc295_fixup_disable_dac3(struct hda_codec *codec,
4738 const struct hda_fixup *fix, int action)
4739{
4740 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4741 hda_nid_t conn[2] = { 0x02, 0x03 };
4742 snd_hda_override_conn_list(codec, 0x17, 2, conn);
4743 }
4744}
4745
4712/* Hook to update amp GPIO4 for automute */ 4746/* Hook to update amp GPIO4 for automute */
4713static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, 4747static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
4714 struct hda_jack_callback *jack) 4748 struct hda_jack_callback *jack)
@@ -4782,6 +4816,7 @@ enum {
4782 ALC269_FIXUP_LIFEBOOK_EXTMIC, 4816 ALC269_FIXUP_LIFEBOOK_EXTMIC,
4783 ALC269_FIXUP_LIFEBOOK_HP_PIN, 4817 ALC269_FIXUP_LIFEBOOK_HP_PIN,
4784 ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT, 4818 ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
4819 ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC,
4785 ALC269_FIXUP_AMIC, 4820 ALC269_FIXUP_AMIC,
4786 ALC269_FIXUP_DMIC, 4821 ALC269_FIXUP_DMIC,
4787 ALC269VB_FIXUP_AMIC, 4822 ALC269VB_FIXUP_AMIC,
@@ -4857,6 +4892,7 @@ enum {
4857 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, 4892 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
4858 ALC255_FIXUP_DELL_SPK_NOISE, 4893 ALC255_FIXUP_DELL_SPK_NOISE,
4859 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, 4894 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
4895 ALC295_FIXUP_DISABLE_DAC3,
4860 ALC280_FIXUP_HP_HEADSET_MIC, 4896 ALC280_FIXUP_HP_HEADSET_MIC,
4861 ALC221_FIXUP_HP_FRONT_MIC, 4897 ALC221_FIXUP_HP_FRONT_MIC,
4862 ALC292_FIXUP_TPT460, 4898 ALC292_FIXUP_TPT460,
@@ -4972,6 +5008,10 @@ static const struct hda_fixup alc269_fixups[] = {
4972 .type = HDA_FIXUP_FUNC, 5008 .type = HDA_FIXUP_FUNC,
4973 .v.func = alc269_fixup_pincfg_no_hp_to_lineout, 5009 .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
4974 }, 5010 },
5011 [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = {
5012 .type = HDA_FIXUP_FUNC,
5013 .v.func = alc269_fixup_pincfg_U7x7_headset_mic,
5014 },
4975 [ALC269_FIXUP_AMIC] = { 5015 [ALC269_FIXUP_AMIC] = {
4976 .type = HDA_FIXUP_PINS, 5016 .type = HDA_FIXUP_PINS,
4977 .v.pins = (const struct hda_pintbl[]) { 5017 .v.pins = (const struct hda_pintbl[]) {
@@ -5542,6 +5582,10 @@ static const struct hda_fixup alc269_fixups[] = {
5542 .chained = true, 5582 .chained = true,
5543 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, 5583 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
5544 }, 5584 },
5585 [ALC295_FIXUP_DISABLE_DAC3] = {
5586 .type = HDA_FIXUP_FUNC,
5587 .v.func = alc295_fixup_disable_dac3,
5588 },
5545 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { 5589 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
5546 .type = HDA_FIXUP_PINS, 5590 .type = HDA_FIXUP_PINS,
5547 .v.pins = (const struct hda_pintbl[]) { 5591 .v.pins = (const struct hda_pintbl[]) {
@@ -5599,6 +5643,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5599 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), 5643 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
5600 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 5644 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5601 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), 5645 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
5646 SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
5602 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), 5647 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
5603 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 5648 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5604 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5649 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5687,6 +5732,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5687 SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), 5732 SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
5688 SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), 5733 SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
5689 SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), 5734 SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
5735 SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
5690 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), 5736 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
5691 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), 5737 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
5692 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC), 5738 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
@@ -5976,6 +6022,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5976 {0x14, 0x90170110}, 6022 {0x14, 0x90170110},
5977 {0x21, 0x02211020}), 6023 {0x21, 0x02211020}),
5978 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 6024 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
6025 {0x12, 0x90a60130},
6026 {0x14, 0x90170110},
6027 {0x14, 0x01011020},
6028 {0x21, 0x0221101f}),
6029 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5979 ALC256_STANDARD_PINS), 6030 ALC256_STANDARD_PINS),
5980 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, 6031 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
5981 {0x12, 0x90a60130}, 6032 {0x12, 0x90a60130},
@@ -6031,6 +6082,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6031 {0x12, 0x90a60120}, 6082 {0x12, 0x90a60120},
6032 {0x14, 0x90170110}, 6083 {0x14, 0x90170110},
6033 {0x21, 0x0321101f}), 6084 {0x21, 0x0321101f}),
6085 SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
6086 {0x12, 0xb7a60130},
6087 {0x14, 0x90170110},
6088 {0x21, 0x04211020}),
6034 SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, 6089 SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
6035 ALC290_STANDARD_PINS, 6090 ALC290_STANDARD_PINS,
6036 {0x15, 0x04211040}, 6091 {0x15, 0x04211040},
@@ -6248,6 +6303,7 @@ static int patch_alc269(struct hda_codec *codec)
6248 case 0x10ec0298: 6303 case 0x10ec0298:
6249 spec->codec_variant = ALC269_TYPE_ALC298; 6304 spec->codec_variant = ALC269_TYPE_ALC298;
6250 break; 6305 break;
6306 case 0x10ec0235:
6251 case 0x10ec0255: 6307 case 0x10ec0255:
6252 spec->codec_variant = ALC269_TYPE_ALC255; 6308 spec->codec_variant = ALC269_TYPE_ALC255;
6253 break; 6309 break;
@@ -6673,6 +6729,7 @@ enum {
6673 ALC668_FIXUP_DELL_DISABLE_AAMIX, 6729 ALC668_FIXUP_DELL_DISABLE_AAMIX,
6674 ALC668_FIXUP_DELL_XPS13, 6730 ALC668_FIXUP_DELL_XPS13,
6675 ALC662_FIXUP_ASUS_Nx50, 6731 ALC662_FIXUP_ASUS_Nx50,
6732 ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
6676 ALC668_FIXUP_ASUS_Nx51, 6733 ALC668_FIXUP_ASUS_Nx51,
6677}; 6734};
6678 6735
@@ -6920,14 +6977,21 @@ static const struct hda_fixup alc662_fixups[] = {
6920 .chained = true, 6977 .chained = true,
6921 .chain_id = ALC662_FIXUP_BASS_1A 6978 .chain_id = ALC662_FIXUP_BASS_1A
6922 }, 6979 },
6980 [ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE] = {
6981 .type = HDA_FIXUP_FUNC,
6982 .v.func = alc_fixup_headset_mode_alc668,
6983 .chain_id = ALC662_FIXUP_BASS_CHMAP
6984 },
6923 [ALC668_FIXUP_ASUS_Nx51] = { 6985 [ALC668_FIXUP_ASUS_Nx51] = {
6924 .type = HDA_FIXUP_PINS, 6986 .type = HDA_FIXUP_PINS,
6925 .v.pins = (const struct hda_pintbl[]) { 6987 .v.pins = (const struct hda_pintbl[]) {
6926 {0x1a, 0x90170151}, /* bass speaker */ 6988 { 0x19, 0x03a1913d }, /* use as headphone mic, without its own jack detect */
6989 { 0x1a, 0x90170151 }, /* bass speaker */
6990 { 0x1b, 0x03a1113c }, /* use as headset mic, without its own jack detect */
6927 {} 6991 {}
6928 }, 6992 },
6929 .chained = true, 6993 .chained = true,
6930 .chain_id = ALC662_FIXUP_BASS_CHMAP, 6994 .chain_id = ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
6931 }, 6995 },
6932}; 6996};
6933 6997
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index a4a999a0317e..1a0c0d16a279 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -137,6 +137,7 @@
137#include <linux/pci.h> 137#include <linux/pci.h>
138#include <linux/math64.h> 138#include <linux/math64.h>
139#include <linux/io.h> 139#include <linux/io.h>
140#include <linux/nospec.h>
140 141
141#include <sound/core.h> 142#include <sound/core.h>
142#include <sound/control.h> 143#include <sound/control.h>
@@ -5692,40 +5693,43 @@ static int snd_hdspm_channel_info(struct snd_pcm_substream *substream,
5692 struct snd_pcm_channel_info *info) 5693 struct snd_pcm_channel_info *info)
5693{ 5694{
5694 struct hdspm *hdspm = snd_pcm_substream_chip(substream); 5695 struct hdspm *hdspm = snd_pcm_substream_chip(substream);
5696 unsigned int channel = info->channel;
5695 5697
5696 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 5698 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
5697 if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) { 5699 if (snd_BUG_ON(channel >= hdspm->max_channels_out)) {
5698 dev_info(hdspm->card->dev, 5700 dev_info(hdspm->card->dev,
5699 "snd_hdspm_channel_info: output channel out of range (%d)\n", 5701 "snd_hdspm_channel_info: output channel out of range (%d)\n",
5700 info->channel); 5702 channel);
5701 return -EINVAL; 5703 return -EINVAL;
5702 } 5704 }
5703 5705
5704 if (hdspm->channel_map_out[info->channel] < 0) { 5706 channel = array_index_nospec(channel, hdspm->max_channels_out);
5707 if (hdspm->channel_map_out[channel] < 0) {
5705 dev_info(hdspm->card->dev, 5708 dev_info(hdspm->card->dev,
5706 "snd_hdspm_channel_info: output channel %d mapped out\n", 5709 "snd_hdspm_channel_info: output channel %d mapped out\n",
5707 info->channel); 5710 channel);
5708 return -EINVAL; 5711 return -EINVAL;
5709 } 5712 }
5710 5713
5711 info->offset = hdspm->channel_map_out[info->channel] * 5714 info->offset = hdspm->channel_map_out[channel] *
5712 HDSPM_CHANNEL_BUFFER_BYTES; 5715 HDSPM_CHANNEL_BUFFER_BYTES;
5713 } else { 5716 } else {
5714 if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) { 5717 if (snd_BUG_ON(channel >= hdspm->max_channels_in)) {
5715 dev_info(hdspm->card->dev, 5718 dev_info(hdspm->card->dev,
5716 "snd_hdspm_channel_info: input channel out of range (%d)\n", 5719 "snd_hdspm_channel_info: input channel out of range (%d)\n",
5717 info->channel); 5720 channel);
5718 return -EINVAL; 5721 return -EINVAL;
5719 } 5722 }
5720 5723
5721 if (hdspm->channel_map_in[info->channel] < 0) { 5724 channel = array_index_nospec(channel, hdspm->max_channels_in);
5725 if (hdspm->channel_map_in[channel] < 0) {
5722 dev_info(hdspm->card->dev, 5726 dev_info(hdspm->card->dev,
5723 "snd_hdspm_channel_info: input channel %d mapped out\n", 5727 "snd_hdspm_channel_info: input channel %d mapped out\n",
5724 info->channel); 5728 channel);
5725 return -EINVAL; 5729 return -EINVAL;
5726 } 5730 }
5727 5731
5728 info->offset = hdspm->channel_map_in[info->channel] * 5732 info->offset = hdspm->channel_map_in[channel] *
5729 HDSPM_CHANNEL_BUFFER_BYTES; 5733 HDSPM_CHANNEL_BUFFER_BYTES;
5730 } 5734 }
5731 5735
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index fdbc0aa2776a..c253bdf92e36 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -26,6 +26,7 @@
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/nospec.h>
29 30
30#include <sound/core.h> 31#include <sound/core.h>
31#include <sound/control.h> 32#include <sound/control.h>
@@ -2036,9 +2037,10 @@ static int snd_rme9652_channel_info(struct snd_pcm_substream *substream,
2036 if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS)) 2037 if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS))
2037 return -EINVAL; 2038 return -EINVAL;
2038 2039
2039 if ((chn = rme9652->channel_map[info->channel]) < 0) { 2040 chn = rme9652->channel_map[array_index_nospec(info->channel,
2041 RME9652_NCHANNELS)];
2042 if (chn < 0)
2040 return -EINVAL; 2043 return -EINVAL;
2041 }
2042 2044
2043 info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES; 2045 info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES;
2044 info->first = 0; 2046 info->first = 0;
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index 29a97d52e8ad..66d6c52e7761 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -91,8 +91,8 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
91 do { 91 do {
92 mutex_lock(&ctx->lock); 92 mutex_lock(&ctx->lock);
93 93
94 tmo = 5; 94 tmo = 6;
95 while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--) 95 while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
96 udelay(21); /* wait an ac97 frame time */ 96 udelay(21); /* wait an ac97 frame time */
97 if (!tmo) { 97 if (!tmo) {
98 pr_debug("ac97rd timeout #1\n"); 98 pr_debug("ac97rd timeout #1\n");
@@ -105,7 +105,7 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
105 * poll, Forrest, poll... 105 * poll, Forrest, poll...
106 */ 106 */
107 tmo = 0x10000; 107 tmo = 0x10000;
108 while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--) 108 while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
109 asm volatile ("nop"); 109 asm volatile ("nop");
110 data = RD(ctx, AC97_CMDRESP); 110 data = RD(ctx, AC97_CMDRESP);
111 111
diff --git a/sound/soc/cirrus/edb93xx.c b/sound/soc/cirrus/edb93xx.c
index 85962657aabe..517963ef4847 100644
--- a/sound/soc/cirrus/edb93xx.c
+++ b/sound/soc/cirrus/edb93xx.c
@@ -67,7 +67,7 @@ static struct snd_soc_dai_link edb93xx_dai = {
67 .cpu_dai_name = "ep93xx-i2s", 67 .cpu_dai_name = "ep93xx-i2s",
68 .codec_name = "spi0.0", 68 .codec_name = "spi0.0",
69 .codec_dai_name = "cs4271-hifi", 69 .codec_dai_name = "cs4271-hifi",
70 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF | 70 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
71 SND_SOC_DAIFMT_CBS_CFS, 71 SND_SOC_DAIFMT_CBS_CFS,
72 .ops = &edb93xx_ops, 72 .ops = &edb93xx_ops,
73}; 73};
diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
index 934f8aefdd90..0dc3852c4621 100644
--- a/sound/soc/cirrus/ep93xx-i2s.c
+++ b/sound/soc/cirrus/ep93xx-i2s.c
@@ -51,7 +51,9 @@
51#define EP93XX_I2S_WRDLEN_24 (1 << 0) 51#define EP93XX_I2S_WRDLEN_24 (1 << 0)
52#define EP93XX_I2S_WRDLEN_32 (2 << 0) 52#define EP93XX_I2S_WRDLEN_32 (2 << 0)
53 53
54#define EP93XX_I2S_LINCTRLDATA_R_JUST (1 << 2) /* Right justify */ 54#define EP93XX_I2S_RXLINCTRLDATA_R_JUST BIT(1) /* Right justify */
55
56#define EP93XX_I2S_TXLINCTRLDATA_R_JUST BIT(2) /* Right justify */
55 57
56#define EP93XX_I2S_CLKCFG_LRS (1 << 0) /* lrclk polarity */ 58#define EP93XX_I2S_CLKCFG_LRS (1 << 0) /* lrclk polarity */
57#define EP93XX_I2S_CLKCFG_CKP (1 << 1) /* Bit clock polarity */ 59#define EP93XX_I2S_CLKCFG_CKP (1 << 1) /* Bit clock polarity */
@@ -170,25 +172,25 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
170 unsigned int fmt) 172 unsigned int fmt)
171{ 173{
172 struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(cpu_dai); 174 struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(cpu_dai);
173 unsigned int clk_cfg, lin_ctrl; 175 unsigned int clk_cfg;
176 unsigned int txlin_ctrl = 0;
177 unsigned int rxlin_ctrl = 0;
174 178
175 clk_cfg = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXCLKCFG); 179 clk_cfg = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXCLKCFG);
176 lin_ctrl = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXLINCTRLDATA);
177 180
178 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 181 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
179 case SND_SOC_DAIFMT_I2S: 182 case SND_SOC_DAIFMT_I2S:
180 clk_cfg |= EP93XX_I2S_CLKCFG_REL; 183 clk_cfg |= EP93XX_I2S_CLKCFG_REL;
181 lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
182 break; 184 break;
183 185
184 case SND_SOC_DAIFMT_LEFT_J: 186 case SND_SOC_DAIFMT_LEFT_J:
185 clk_cfg &= ~EP93XX_I2S_CLKCFG_REL; 187 clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
186 lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
187 break; 188 break;
188 189
189 case SND_SOC_DAIFMT_RIGHT_J: 190 case SND_SOC_DAIFMT_RIGHT_J:
190 clk_cfg &= ~EP93XX_I2S_CLKCFG_REL; 191 clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
191 lin_ctrl |= EP93XX_I2S_LINCTRLDATA_R_JUST; 192 rxlin_ctrl |= EP93XX_I2S_RXLINCTRLDATA_R_JUST;
193 txlin_ctrl |= EP93XX_I2S_TXLINCTRLDATA_R_JUST;
192 break; 194 break;
193 195
194 default: 196 default:
@@ -213,32 +215,32 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
213 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 215 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
214 case SND_SOC_DAIFMT_NB_NF: 216 case SND_SOC_DAIFMT_NB_NF:
215 /* Negative bit clock, lrclk low on left word */ 217 /* Negative bit clock, lrclk low on left word */
216 clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL); 218 clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS);
217 break; 219 break;
218 220
219 case SND_SOC_DAIFMT_NB_IF: 221 case SND_SOC_DAIFMT_NB_IF:
220 /* Negative bit clock, lrclk low on right word */ 222 /* Negative bit clock, lrclk low on right word */
221 clk_cfg &= ~EP93XX_I2S_CLKCFG_CKP; 223 clk_cfg &= ~EP93XX_I2S_CLKCFG_CKP;
222 clk_cfg |= EP93XX_I2S_CLKCFG_REL; 224 clk_cfg |= EP93XX_I2S_CLKCFG_LRS;
223 break; 225 break;
224 226
225 case SND_SOC_DAIFMT_IB_NF: 227 case SND_SOC_DAIFMT_IB_NF:
226 /* Positive bit clock, lrclk low on left word */ 228 /* Positive bit clock, lrclk low on left word */
227 clk_cfg |= EP93XX_I2S_CLKCFG_CKP; 229 clk_cfg |= EP93XX_I2S_CLKCFG_CKP;
228 clk_cfg &= ~EP93XX_I2S_CLKCFG_REL; 230 clk_cfg &= ~EP93XX_I2S_CLKCFG_LRS;
229 break; 231 break;
230 232
231 case SND_SOC_DAIFMT_IB_IF: 233 case SND_SOC_DAIFMT_IB_IF:
232 /* Positive bit clock, lrclk low on right word */ 234 /* Positive bit clock, lrclk low on right word */
233 clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL; 235 clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS;
234 break; 236 break;
235 } 237 }
236 238
237 /* Write new register values */ 239 /* Write new register values */
238 ep93xx_i2s_write_reg(info, EP93XX_I2S_RXCLKCFG, clk_cfg); 240 ep93xx_i2s_write_reg(info, EP93XX_I2S_RXCLKCFG, clk_cfg);
239 ep93xx_i2s_write_reg(info, EP93XX_I2S_TXCLKCFG, clk_cfg); 241 ep93xx_i2s_write_reg(info, EP93XX_I2S_TXCLKCFG, clk_cfg);
240 ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, lin_ctrl); 242 ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, rxlin_ctrl);
241 ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, lin_ctrl); 243 ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, txlin_ctrl);
242 return 0; 244 return 0;
243} 245}
244 246
diff --git a/sound/soc/cirrus/snappercl15.c b/sound/soc/cirrus/snappercl15.c
index 98089df08df6..c6737a573bc0 100644
--- a/sound/soc/cirrus/snappercl15.c
+++ b/sound/soc/cirrus/snappercl15.c
@@ -72,7 +72,7 @@ static struct snd_soc_dai_link snappercl15_dai = {
72 .codec_dai_name = "tlv320aic23-hifi", 72 .codec_dai_name = "tlv320aic23-hifi",
73 .codec_name = "tlv320aic23-codec.0-001a", 73 .codec_name = "tlv320aic23-codec.0-001a",
74 .platform_name = "ep93xx-i2s", 74 .platform_name = "ep93xx-i2s",
75 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF | 75 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
76 SND_SOC_DAIFMT_CBS_CFS, 76 SND_SOC_DAIFMT_CBS_CFS,
77 .ops = &snappercl15_ops, 77 .ops = &snappercl15_ops,
78}; 78};
diff --git a/sound/soc/codecs/pcm512x-spi.c b/sound/soc/codecs/pcm512x-spi.c
index 712ed6598c48..ebdf9bd5a64c 100644
--- a/sound/soc/codecs/pcm512x-spi.c
+++ b/sound/soc/codecs/pcm512x-spi.c
@@ -70,3 +70,7 @@ static struct spi_driver pcm512x_spi_driver = {
70}; 70};
71 71
72module_spi_driver(pcm512x_spi_driver); 72module_spi_driver(pcm512x_spi_driver);
73
74MODULE_DESCRIPTION("ASoC PCM512x codec driver - SPI");
75MODULE_AUTHOR("Mark Brown <broonie@kernel.org>");
76MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 4452fea0b118..bd4998f577a0 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -54,10 +54,17 @@ struct ssm2602_priv {
54 * using 2 wire for device control, so we cache them instead. 54 * using 2 wire for device control, so we cache them instead.
55 * There is no point in caching the reset register 55 * There is no point in caching the reset register
56 */ 56 */
57static const u16 ssm2602_reg[SSM2602_CACHEREGNUM] = { 57static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
58 0x0097, 0x0097, 0x0079, 0x0079, 58 { .reg = 0x00, .def = 0x0097 },
59 0x000a, 0x0008, 0x009f, 0x000a, 59 { .reg = 0x01, .def = 0x0097 },
60 0x0000, 0x0000 60 { .reg = 0x02, .def = 0x0079 },
61 { .reg = 0x03, .def = 0x0079 },
62 { .reg = 0x04, .def = 0x000a },
63 { .reg = 0x05, .def = 0x0008 },
64 { .reg = 0x06, .def = 0x009f },
65 { .reg = 0x07, .def = 0x000a },
66 { .reg = 0x08, .def = 0x0000 },
67 { .reg = 0x09, .def = 0x0000 }
61}; 68};
62 69
63 70
@@ -618,8 +625,8 @@ const struct regmap_config ssm2602_regmap_config = {
618 .volatile_reg = ssm2602_register_volatile, 625 .volatile_reg = ssm2602_register_volatile,
619 626
620 .cache_type = REGCACHE_RBTREE, 627 .cache_type = REGCACHE_RBTREE,
621 .reg_defaults_raw = ssm2602_reg, 628 .reg_defaults = ssm2602_reg,
622 .num_reg_defaults_raw = ARRAY_SIZE(ssm2602_reg), 629 .num_reg_defaults = ARRAY_SIZE(ssm2602_reg),
623}; 630};
624EXPORT_SYMBOL_GPL(ssm2602_regmap_config); 631EXPORT_SYMBOL_GPL(ssm2602_regmap_config);
625 632
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 59f234e51971..e8adead8be00 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -143,6 +143,13 @@ static int fsl_esai_divisor_cal(struct snd_soc_dai *dai, bool tx, u32 ratio,
143 143
144 psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8; 144 psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8;
145 145
146 /* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */
147 if (ratio <= 256) {
148 pm = ratio;
149 fp = 1;
150 goto out;
151 }
152
146 /* Set the max fluctuation -- 0.1% of the max devisor */ 153 /* Set the max fluctuation -- 0.1% of the max devisor */
147 savesub = (psr ? 1 : 8) * 256 * maxfp / 1000; 154 savesub = (psr ? 1 : 8) * 256 * maxfp / 1000;
148 155
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index ba384dee277b..d62695d696c4 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -358,13 +358,19 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
358 snprintf(prop, sizeof(prop), "%scpu", prefix); 358 snprintf(prop, sizeof(prop), "%scpu", prefix);
359 cpu = of_get_child_by_name(node, prop); 359 cpu = of_get_child_by_name(node, prop);
360 360
361 if (!cpu) {
362 ret = -EINVAL;
363 dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop);
364 goto dai_link_of_err;
365 }
366
361 snprintf(prop, sizeof(prop), "%splat", prefix); 367 snprintf(prop, sizeof(prop), "%splat", prefix);
362 plat = of_get_child_by_name(node, prop); 368 plat = of_get_child_by_name(node, prop);
363 369
364 snprintf(prop, sizeof(prop), "%scodec", prefix); 370 snprintf(prop, sizeof(prop), "%scodec", prefix);
365 codec = of_get_child_by_name(node, prop); 371 codec = of_get_child_by_name(node, prop);
366 372
367 if (!cpu || !codec) { 373 if (!codec) {
368 ret = -EINVAL; 374 ret = -EINVAL;
369 dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop); 375 dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop);
370 goto dai_link_of_err; 376 goto dai_link_of_err;
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index d430ef5a4f38..79c29330c56a 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -24,7 +24,6 @@ config SND_SST_IPC_PCI
24config SND_SST_IPC_ACPI 24config SND_SST_IPC_ACPI
25 tristate 25 tristate
26 select SND_SST_IPC 26 select SND_SST_IPC
27 depends on ACPI
28 27
29config SND_SOC_INTEL_SST 28config SND_SOC_INTEL_SST
30 tristate 29 tristate
@@ -91,7 +90,7 @@ config SND_SOC_INTEL_BROADWELL_MACH
91 90
92config SND_SOC_INTEL_BYTCR_RT5640_MACH 91config SND_SOC_INTEL_BYTCR_RT5640_MACH
93 tristate "ASoC Audio DSP Support for MID BYT Platform" 92 tristate "ASoC Audio DSP Support for MID BYT Platform"
94 depends on X86 && I2C 93 depends on X86 && I2C && ACPI
95 select SND_SOC_RT5640 94 select SND_SOC_RT5640
96 select SND_SST_MFLD_PLATFORM 95 select SND_SST_MFLD_PLATFORM
97 select SND_SST_IPC_ACPI 96 select SND_SST_IPC_ACPI
@@ -103,7 +102,7 @@ config SND_SOC_INTEL_BYTCR_RT5640_MACH
103 102
104config SND_SOC_INTEL_CHT_BSW_RT5672_MACH 103config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
105 tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec" 104 tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
106 depends on X86_INTEL_LPSS && I2C 105 depends on X86_INTEL_LPSS && I2C && ACPI
107 select SND_SOC_RT5670 106 select SND_SOC_RT5670
108 select SND_SST_MFLD_PLATFORM 107 select SND_SST_MFLD_PLATFORM
109 select SND_SST_IPC_ACPI 108 select SND_SST_IPC_ACPI
@@ -115,7 +114,7 @@ config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
115 114
116config SND_SOC_INTEL_CHT_BSW_RT5645_MACH 115config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
117 tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5645/5650 codec" 116 tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5645/5650 codec"
118 depends on X86_INTEL_LPSS && I2C 117 depends on X86_INTEL_LPSS && I2C && ACPI
119 select SND_SOC_RT5645 118 select SND_SOC_RT5645
120 select SND_SST_MFLD_PLATFORM 119 select SND_SST_MFLD_PLATFORM
121 select SND_SST_IPC_ACPI 120 select SND_SST_IPC_ACPI
diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
index a74c64c7053c..e83da42a8c03 100644
--- a/sound/soc/intel/atom/sst/sst_stream.c
+++ b/sound/soc/intel/atom/sst/sst_stream.c
@@ -221,7 +221,7 @@ int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx,
221 sst_free_block(sst_drv_ctx, block); 221 sst_free_block(sst_drv_ctx, block);
222out: 222out:
223 test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id); 223 test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id);
224 return 0; 224 return ret;
225} 225}
226 226
227/* 227/*
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 4e2fcf188dd1..01a573a063d1 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -131,23 +131,19 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
131 struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card); 131 struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
132 struct snd_soc_jack *jack = &ctx->jack; 132 struct snd_soc_jack *jack = &ctx->jack;
133 133
134 /** 134 if (ctx->ts3a227e_present) {
135 * TI supports 4 butons headset detection 135 /*
136 * KEY_MEDIA 136 * The jack has already been created in the
137 * KEY_VOICECOMMAND 137 * cht_max98090_headset_init() function.
138 * KEY_VOLUMEUP 138 */
139 * KEY_VOLUMEDOWN 139 snd_soc_jack_notifier_register(jack, &cht_jack_nb);
140 */ 140 return 0;
141 if (ctx->ts3a227e_present) 141 }
142 jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE | 142
143 SND_JACK_BTN_0 | SND_JACK_BTN_1 | 143 jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
144 SND_JACK_BTN_2 | SND_JACK_BTN_3;
145 else
146 jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
147 144
148 ret = snd_soc_card_jack_new(runtime->card, "Headset Jack", 145 ret = snd_soc_card_jack_new(runtime->card, "Headset Jack",
149 jack_type, jack, NULL, 0); 146 jack_type, jack, NULL, 0);
150
151 if (ret) { 147 if (ret) {
152 dev_err(runtime->dev, "Headset Jack creation failed %d\n", ret); 148 dev_err(runtime->dev, "Headset Jack creation failed %d\n", ret);
153 return ret; 149 return ret;
@@ -203,6 +199,27 @@ static int cht_max98090_headset_init(struct snd_soc_component *component)
203{ 199{
204 struct snd_soc_card *card = component->card; 200 struct snd_soc_card *card = component->card;
205 struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card); 201 struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
202 struct snd_soc_jack *jack = &ctx->jack;
203 int jack_type;
204 int ret;
205
206 /*
207 * TI supports 4 butons headset detection
208 * KEY_MEDIA
209 * KEY_VOICECOMMAND
210 * KEY_VOLUMEUP
211 * KEY_VOLUMEDOWN
212 */
213 jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
214 SND_JACK_BTN_0 | SND_JACK_BTN_1 |
215 SND_JACK_BTN_2 | SND_JACK_BTN_3;
216
217 ret = snd_soc_card_jack_new(card, "Headset Jack", jack_type,
218 jack, NULL, 0);
219 if (ret) {
220 dev_err(card->dev, "Headset Jack creation failed %d\n", ret);
221 return ret;
222 }
206 223
207 return ts3a227e_enable_jack_detect(component, &ctx->jack); 224 return ts3a227e_enable_jack_detect(component, &ctx->jack);
208} 225}
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 38d65a3529c4..44d560966e9c 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -96,6 +96,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
96 SND_SOC_DAPM_HP("Headphone", NULL), 96 SND_SOC_DAPM_HP("Headphone", NULL),
97 SND_SOC_DAPM_MIC("Headset Mic", NULL), 97 SND_SOC_DAPM_MIC("Headset Mic", NULL),
98 SND_SOC_DAPM_MIC("Int Mic", NULL), 98 SND_SOC_DAPM_MIC("Int Mic", NULL),
99 SND_SOC_DAPM_MIC("Int Analog Mic", NULL),
99 SND_SOC_DAPM_SPK("Ext Spk", NULL), 100 SND_SOC_DAPM_SPK("Ext Spk", NULL),
100 SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, 101 SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
101 platform_clock_control, SND_SOC_DAPM_POST_PMD), 102 platform_clock_control, SND_SOC_DAPM_POST_PMD),
@@ -106,6 +107,8 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
106 {"IN1N", NULL, "Headset Mic"}, 107 {"IN1N", NULL, "Headset Mic"},
107 {"DMIC L1", NULL, "Int Mic"}, 108 {"DMIC L1", NULL, "Int Mic"},
108 {"DMIC R1", NULL, "Int Mic"}, 109 {"DMIC R1", NULL, "Int Mic"},
110 {"IN2P", NULL, "Int Analog Mic"},
111 {"IN2N", NULL, "Int Analog Mic"},
109 {"Headphone", NULL, "HPOL"}, 112 {"Headphone", NULL, "HPOL"},
110 {"Headphone", NULL, "HPOR"}, 113 {"Headphone", NULL, "HPOR"},
111 {"Ext Spk", NULL, "SPOL"}, 114 {"Ext Spk", NULL, "SPOL"},
@@ -119,6 +122,9 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
119 {"Headphone", NULL, "Platform Clock"}, 122 {"Headphone", NULL, "Platform Clock"},
120 {"Headset Mic", NULL, "Platform Clock"}, 123 {"Headset Mic", NULL, "Platform Clock"},
121 {"Int Mic", NULL, "Platform Clock"}, 124 {"Int Mic", NULL, "Platform Clock"},
125 {"Int Analog Mic", NULL, "Platform Clock"},
126 {"Int Analog Mic", NULL, "micbias1"},
127 {"Int Analog Mic", NULL, "micbias2"},
122 {"Ext Spk", NULL, "Platform Clock"}, 128 {"Ext Spk", NULL, "Platform Clock"},
123}; 129};
124 130
@@ -147,6 +153,7 @@ static const struct snd_kcontrol_new cht_mc_controls[] = {
147 SOC_DAPM_PIN_SWITCH("Headphone"), 153 SOC_DAPM_PIN_SWITCH("Headphone"),
148 SOC_DAPM_PIN_SWITCH("Headset Mic"), 154 SOC_DAPM_PIN_SWITCH("Headset Mic"),
149 SOC_DAPM_PIN_SWITCH("Int Mic"), 155 SOC_DAPM_PIN_SWITCH("Int Mic"),
156 SOC_DAPM_PIN_SWITCH("Int Analog Mic"),
150 SOC_DAPM_PIN_SWITCH("Ext Spk"), 157 SOC_DAPM_PIN_SWITCH("Ext Spk"),
151}; 158};
152 159
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index 1636a1eeb002..be1b69c63bdf 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -260,7 +260,6 @@ int sst_dma_new(struct sst_dsp *sst)
260 struct sst_pdata *sst_pdata = sst->pdata; 260 struct sst_pdata *sst_pdata = sst->pdata;
261 struct sst_dma *dma; 261 struct sst_dma *dma;
262 struct resource mem; 262 struct resource mem;
263 const char *dma_dev_name;
264 int ret = 0; 263 int ret = 0;
265 264
266 if (sst->pdata->resindex_dma_base == -1) 265 if (sst->pdata->resindex_dma_base == -1)
@@ -271,7 +270,6 @@ int sst_dma_new(struct sst_dsp *sst)
271 * is attached to the ADSP IP. */ 270 * is attached to the ADSP IP. */
272 switch (sst->pdata->dma_engine) { 271 switch (sst->pdata->dma_engine) {
273 case SST_DMA_TYPE_DW: 272 case SST_DMA_TYPE_DW:
274 dma_dev_name = "dw_dmac";
275 break; 273 break;
276 default: 274 default:
277 dev_err(sst->dev, "error: invalid DMA engine %d\n", 275 dev_err(sst->dev, "error: invalid DMA engine %d\n",
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index b4844f78266f..f6c3be192cc9 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -280,7 +280,7 @@ static int probe_codec(struct hdac_ext_bus *ebus, int addr)
280 struct hdac_bus *bus = ebus_to_hbus(ebus); 280 struct hdac_bus *bus = ebus_to_hbus(ebus);
281 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) | 281 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
282 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; 282 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
283 unsigned int res; 283 unsigned int res = -1;
284 284
285 mutex_lock(&bus->cmd_mutex); 285 mutex_lock(&bus->cmd_mutex);
286 snd_hdac_bus_send_cmd(bus, cmd); 286 snd_hdac_bus_send_cmd(bus, cmd);
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 15c04e2eae34..976967675387 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -9,7 +9,7 @@ config SND_SOC_MEDIATEK
9 9
10config SND_SOC_MT8173_MAX98090 10config SND_SOC_MT8173_MAX98090
11 tristate "ASoC Audio driver for MT8173 with MAX98090 codec" 11 tristate "ASoC Audio driver for MT8173 with MAX98090 codec"
12 depends on SND_SOC_MEDIATEK 12 depends on SND_SOC_MEDIATEK && I2C
13 select SND_SOC_MAX98090 13 select SND_SOC_MAX98090
14 help 14 help
15 This adds ASoC driver for Mediatek MT8173 boards 15 This adds ASoC driver for Mediatek MT8173 boards
@@ -19,7 +19,7 @@ config SND_SOC_MT8173_MAX98090
19 19
20config SND_SOC_MT8173_RT5650_RT5676 20config SND_SOC_MT8173_RT5650_RT5676
21 tristate "ASoC Audio driver for MT8173 with RT5650 RT5676 codecs" 21 tristate "ASoC Audio driver for MT8173 with RT5650 RT5676 codecs"
22 depends on SND_SOC_MEDIATEK 22 depends on SND_SOC_MEDIATEK && I2C
23 select SND_SOC_RT5645 23 select SND_SOC_RT5645
24 select SND_SOC_RT5677 24 select SND_SOC_RT5677
25 help 25 help
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index b6615affe571..fde974d52bb2 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -67,7 +67,7 @@ static unsigned short nuc900_ac97_read(struct snd_ac97 *ac97,
67 67
68 /* polling the AC_R_FINISH */ 68 /* polling the AC_R_FINISH */
69 while (!(AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_R_FINISH) 69 while (!(AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_R_FINISH)
70 && timeout--) 70 && --timeout)
71 mdelay(1); 71 mdelay(1);
72 72
73 if (!timeout) { 73 if (!timeout) {
@@ -121,7 +121,7 @@ static void nuc900_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
121 121
122 /* polling the AC_W_FINISH */ 122 /* polling the AC_W_FINISH */
123 while ((AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_W_FINISH) 123 while ((AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_W_FINISH)
124 && timeout--) 124 && --timeout)
125 mdelay(1); 125 mdelay(1);
126 126
127 if (!timeout) 127 if (!timeout)
diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c
index 6147e86e9b0f..55ca9c9364b8 100644
--- a/sound/soc/pxa/brownstone.c
+++ b/sound/soc/pxa/brownstone.c
@@ -136,3 +136,4 @@ module_platform_driver(mmp_driver);
136MODULE_AUTHOR("Leo Yan <leoy@marvell.com>"); 136MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
137MODULE_DESCRIPTION("ALSA SoC Brownstone"); 137MODULE_DESCRIPTION("ALSA SoC Brownstone");
138MODULE_LICENSE("GPL"); 138MODULE_LICENSE("GPL");
139MODULE_ALIAS("platform:brownstone-audio");
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 29bc60e85e92..6cd28f95d548 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -203,3 +203,4 @@ module_platform_driver(mioa701_wm9713_driver);
203MODULE_AUTHOR("Robert Jarzmik (rjarzmik@free.fr)"); 203MODULE_AUTHOR("Robert Jarzmik (rjarzmik@free.fr)");
204MODULE_DESCRIPTION("ALSA SoC WM9713 MIO A701"); 204MODULE_DESCRIPTION("ALSA SoC WM9713 MIO A701");
205MODULE_LICENSE("GPL"); 205MODULE_LICENSE("GPL");
206MODULE_ALIAS("platform:mioa701-wm9713");
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 51e790d006f5..96df9b2d8fc4 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -248,3 +248,4 @@ module_platform_driver(mmp_pcm_driver);
248MODULE_AUTHOR("Leo Yan <leoy@marvell.com>"); 248MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
249MODULE_DESCRIPTION("MMP Soc Audio DMA module"); 249MODULE_DESCRIPTION("MMP Soc Audio DMA module");
250MODULE_LICENSE("GPL"); 250MODULE_LICENSE("GPL");
251MODULE_ALIAS("platform:mmp-pcm-audio");
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index eca60c29791a..ca8b23f8c525 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -482,3 +482,4 @@ module_platform_driver(asoc_mmp_sspa_driver);
482MODULE_AUTHOR("Leo Yan <leoy@marvell.com>"); 482MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
483MODULE_DESCRIPTION("MMP SSPA SoC Interface"); 483MODULE_DESCRIPTION("MMP SSPA SoC Interface");
484MODULE_LICENSE("GPL"); 484MODULE_LICENSE("GPL");
485MODULE_ALIAS("platform:mmp-sspa-dai");
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 4e74d9573f03..bcc81e920a67 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -161,3 +161,4 @@ module_platform_driver(palm27x_wm9712_driver);
161MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); 161MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
162MODULE_DESCRIPTION("ALSA SoC Palm T|X, T5 and LifeDrive"); 162MODULE_DESCRIPTION("ALSA SoC Palm T|X, T5 and LifeDrive");
163MODULE_LICENSE("GPL"); 163MODULE_LICENSE("GPL");
164MODULE_ALIAS("platform:palm27x-asoc");
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index da03fad1b9cd..3cad990dad2c 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -833,3 +833,4 @@ module_platform_driver(asoc_ssp_driver);
833MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); 833MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
834MODULE_DESCRIPTION("PXA SSP/PCM SoC Interface"); 834MODULE_DESCRIPTION("PXA SSP/PCM SoC Interface");
835MODULE_LICENSE("GPL"); 835MODULE_LICENSE("GPL");
836MODULE_ALIAS("platform:pxa-ssp-dai");
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index f3de615aacd7..9615e6de1306 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -287,3 +287,4 @@ module_platform_driver(pxa2xx_ac97_driver);
287MODULE_AUTHOR("Nicolas Pitre"); 287MODULE_AUTHOR("Nicolas Pitre");
288MODULE_DESCRIPTION("AC97 driver for the Intel PXA2xx chip"); 288MODULE_DESCRIPTION("AC97 driver for the Intel PXA2xx chip");
289MODULE_LICENSE("GPL"); 289MODULE_LICENSE("GPL");
290MODULE_ALIAS("platform:pxa2xx-ac97");
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 9f390398d518..410d48b93031 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -117,3 +117,4 @@ module_platform_driver(pxa_pcm_driver);
117MODULE_AUTHOR("Nicolas Pitre"); 117MODULE_AUTHOR("Nicolas Pitre");
118MODULE_DESCRIPTION("Intel PXA2xx PCM DMA module"); 118MODULE_DESCRIPTION("Intel PXA2xx PCM DMA module");
119MODULE_LICENSE("GPL"); 119MODULE_LICENSE("GPL");
120MODULE_ALIAS("platform:pxa-pcm-audio");
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
index 5a806da89f42..5e2eb4cc5cf1 100644
--- a/sound/soc/rockchip/rockchip_spdif.c
+++ b/sound/soc/rockchip/rockchip_spdif.c
@@ -54,7 +54,7 @@ static const struct of_device_id rk_spdif_match[] = {
54}; 54};
55MODULE_DEVICE_TABLE(of, rk_spdif_match); 55MODULE_DEVICE_TABLE(of, rk_spdif_match);
56 56
57static int rk_spdif_runtime_suspend(struct device *dev) 57static int __maybe_unused rk_spdif_runtime_suspend(struct device *dev)
58{ 58{
59 struct rk_spdif_dev *spdif = dev_get_drvdata(dev); 59 struct rk_spdif_dev *spdif = dev_get_drvdata(dev);
60 60
@@ -64,7 +64,7 @@ static int rk_spdif_runtime_suspend(struct device *dev)
64 return 0; 64 return 0;
65} 65}
66 66
67static int rk_spdif_runtime_resume(struct device *dev) 67static int __maybe_unused rk_spdif_runtime_resume(struct device *dev)
68{ 68{
69 struct rk_spdif_dev *spdif = dev_get_drvdata(dev); 69 struct rk_spdif_dev *spdif = dev_get_drvdata(dev);
70 int ret; 70 int ret;
@@ -316,26 +316,30 @@ static int rk_spdif_probe(struct platform_device *pdev)
316 spdif->mclk = devm_clk_get(&pdev->dev, "mclk"); 316 spdif->mclk = devm_clk_get(&pdev->dev, "mclk");
317 if (IS_ERR(spdif->mclk)) { 317 if (IS_ERR(spdif->mclk)) {
318 dev_err(&pdev->dev, "Can't retrieve rk_spdif master clock\n"); 318 dev_err(&pdev->dev, "Can't retrieve rk_spdif master clock\n");
319 return PTR_ERR(spdif->mclk); 319 ret = PTR_ERR(spdif->mclk);
320 goto err_disable_hclk;
320 } 321 }
321 322
322 ret = clk_prepare_enable(spdif->mclk); 323 ret = clk_prepare_enable(spdif->mclk);
323 if (ret) { 324 if (ret) {
324 dev_err(spdif->dev, "clock enable failed %d\n", ret); 325 dev_err(spdif->dev, "clock enable failed %d\n", ret);
325 return ret; 326 goto err_disable_clocks;
326 } 327 }
327 328
328 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 329 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
329 regs = devm_ioremap_resource(&pdev->dev, res); 330 regs = devm_ioremap_resource(&pdev->dev, res);
330 if (IS_ERR(regs)) 331 if (IS_ERR(regs)) {
331 return PTR_ERR(regs); 332 ret = PTR_ERR(regs);
333 goto err_disable_clocks;
334 }
332 335
333 spdif->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "hclk", regs, 336 spdif->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "hclk", regs,
334 &rk_spdif_regmap_config); 337 &rk_spdif_regmap_config);
335 if (IS_ERR(spdif->regmap)) { 338 if (IS_ERR(spdif->regmap)) {
336 dev_err(&pdev->dev, 339 dev_err(&pdev->dev,
337 "Failed to initialise managed register map\n"); 340 "Failed to initialise managed register map\n");
338 return PTR_ERR(spdif->regmap); 341 ret = PTR_ERR(spdif->regmap);
342 goto err_disable_clocks;
339 } 343 }
340 344
341 spdif->playback_dma_data.addr = res->start + SPDIF_SMPDR; 345 spdif->playback_dma_data.addr = res->start + SPDIF_SMPDR;
@@ -367,6 +371,10 @@ static int rk_spdif_probe(struct platform_device *pdev)
367 371
368err_pm_runtime: 372err_pm_runtime:
369 pm_runtime_disable(&pdev->dev); 373 pm_runtime_disable(&pdev->dev);
374err_disable_clocks:
375 clk_disable_unprepare(spdif->mclk);
376err_disable_hclk:
377 clk_disable_unprepare(spdif->hclk);
370 378
371 return ret; 379 return ret;
372} 380}
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index fd6e247d9fd8..91bad6731c9d 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -640,8 +640,12 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
640 tmp |= mod_slave; 640 tmp |= mod_slave;
641 break; 641 break;
642 case SND_SOC_DAIFMT_CBS_CFS: 642 case SND_SOC_DAIFMT_CBS_CFS:
643 /* Set default source clock in Master mode */ 643 /*
644 if (i2s->rclk_srcrate == 0) 644 * Set default source clock in Master mode, only when the
645 * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
646 * clock configuration assigned in DT is not overwritten.
647 */
648 if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL)
645 i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0, 649 i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
646 0, SND_SOC_CLOCK_IN); 650 0, SND_SOC_CLOCK_IN);
647 break; 651 break;
@@ -856,6 +860,11 @@ static int config_setup(struct i2s_dai *i2s)
856 return 0; 860 return 0;
857 861
858 if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { 862 if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
863 struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
864
865 if (i2s->rclk_srcrate == 0 && rclksrc && !IS_ERR(rclksrc))
866 i2s->rclk_srcrate = clk_get_rate(rclksrc);
867
859 psr = i2s->rclk_srcrate / i2s->frmclk / rfs; 868 psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
860 writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); 869 writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
861 dev_dbg(&i2s->pdev->dev, 870 dev_dbg(&i2s->pdev->dev,
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 085329878525..5976e3992dd1 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -235,6 +235,7 @@ enum rsnd_mod_type {
235 RSND_MOD_MIX, 235 RSND_MOD_MIX,
236 RSND_MOD_CTU, 236 RSND_MOD_CTU,
237 RSND_MOD_SRC, 237 RSND_MOD_SRC,
238 RSND_MOD_SSIP, /* SSI parent */
238 RSND_MOD_SSI, 239 RSND_MOD_SSI,
239 RSND_MOD_MAX, 240 RSND_MOD_MAX,
240}; 241};
@@ -365,6 +366,7 @@ struct rsnd_dai_stream {
365}; 366};
366#define rsnd_io_to_mod(io, i) ((i) < RSND_MOD_MAX ? (io)->mod[(i)] : NULL) 367#define rsnd_io_to_mod(io, i) ((i) < RSND_MOD_MAX ? (io)->mod[(i)] : NULL)
367#define rsnd_io_to_mod_ssi(io) rsnd_io_to_mod((io), RSND_MOD_SSI) 368#define rsnd_io_to_mod_ssi(io) rsnd_io_to_mod((io), RSND_MOD_SSI)
369#define rsnd_io_to_mod_ssip(io) rsnd_io_to_mod((io), RSND_MOD_SSIP)
368#define rsnd_io_to_mod_src(io) rsnd_io_to_mod((io), RSND_MOD_SRC) 370#define rsnd_io_to_mod_src(io) rsnd_io_to_mod((io), RSND_MOD_SRC)
369#define rsnd_io_to_mod_ctu(io) rsnd_io_to_mod((io), RSND_MOD_CTU) 371#define rsnd_io_to_mod_ctu(io) rsnd_io_to_mod((io), RSND_MOD_CTU)
370#define rsnd_io_to_mod_mix(io) rsnd_io_to_mod((io), RSND_MOD_MIX) 372#define rsnd_io_to_mod_mix(io) rsnd_io_to_mod((io), RSND_MOD_MIX)
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index c62a2947ac14..df79d7c846ea 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -143,6 +143,15 @@ static int rsnd_ssi_master_clk_start(struct rsnd_ssi *ssi,
143 for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) { 143 for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) {
144 144
145 /* 145 /*
146 * It will set SSIWSR.CONT here, but SSICR.CKDV = 000
147 * with it is not allowed. (SSIWSR.WS_MODE with
148 * SSICR.CKDV = 000 is not allowed either).
149 * Skip it. See SSICR.CKDV
150 */
151 if (j == 0)
152 continue;
153
154 /*
146 * this driver is assuming that 155 * this driver is assuming that
147 * system word is 64fs (= 2 x 32bit) 156 * system word is 64fs (= 2 x 32bit)
148 * see rsnd_ssi_init() 157 * see rsnd_ssi_init()
@@ -444,6 +453,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
444 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 453 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
445 u32 *buf = (u32 *)(runtime->dma_area + 454 u32 *buf = (u32 *)(runtime->dma_area +
446 rsnd_dai_pointer_offset(io, 0)); 455 rsnd_dai_pointer_offset(io, 0));
456 int shift = 0;
457
458 switch (runtime->sample_bits) {
459 case 32:
460 shift = 8;
461 break;
462 }
447 463
448 /* 464 /*
449 * 8/16/32 data can be assesse to TDR/RDR register 465 * 8/16/32 data can be assesse to TDR/RDR register
@@ -451,9 +467,9 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
451 * see rsnd_ssi_init() 467 * see rsnd_ssi_init()
452 */ 468 */
453 if (rsnd_io_is_play(io)) 469 if (rsnd_io_is_play(io))
454 rsnd_mod_write(mod, SSITDR, *buf); 470 rsnd_mod_write(mod, SSITDR, (*buf) << shift);
455 else 471 else
456 *buf = rsnd_mod_read(mod, SSIRDR); 472 *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
457 473
458 elapsed = rsnd_dai_pointer_update(io, sizeof(*buf)); 474 elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
459 } 475 }
@@ -550,11 +566,16 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
550 struct rsnd_priv *priv) 566 struct rsnd_priv *priv)
551{ 567{
552 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); 568 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
569 struct rsnd_mod *pure_ssi_mod = rsnd_io_to_mod_ssi(io);
553 struct device *dev = rsnd_priv_to_dev(priv); 570 struct device *dev = rsnd_priv_to_dev(priv);
554 int irq = ssi->info->irq; 571 int irq = ssi->info->irq;
555 572
556 rsnd_dma_quit(io, rsnd_mod_to_dma(mod)); 573 rsnd_dma_quit(io, rsnd_mod_to_dma(mod));
557 574
575 /* Do nothing if non SSI (= SSI parent, multi SSI) mod */
576 if (pure_ssi_mod != mod)
577 return 0;
578
558 /* PIO will request IRQ again */ 579 /* PIO will request IRQ again */
559 devm_free_irq(dev, irq, mod); 580 devm_free_irq(dev, irq, mod);
560 581
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 6a438a361592..9e784cc3e5d2 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -425,6 +425,8 @@ err_data:
425static void dapm_kcontrol_free(struct snd_kcontrol *kctl) 425static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
426{ 426{
427 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); 427 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
428
429 list_del(&data->paths);
428 kfree(data->wlist); 430 kfree(data->wlist);
429 kfree(data); 431 kfree(data);
430} 432}
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 977066ba1769..43b80db952d1 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1682,8 +1682,10 @@ int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
1682 continue; 1682 continue;
1683 1683
1684 if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) && 1684 if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
1685 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN)) 1685 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN)) {
1686 continue; 1686 soc_pcm_hw_free(be_substream);
1687 be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
1688 }
1687 1689
1688 dev_dbg(be->dev, "ASoC: close BE %s\n", 1690 dev_dbg(be->dev, "ASoC: close BE %s\n",
1689 dpcm->fe->dai_link->name); 1691 dpcm->fe->dai_link->name);
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index e3f34a86413c..c1e76feb3529 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1188,6 +1188,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
1188 kfree(sm); 1188 kfree(sm);
1189 continue; 1189 continue;
1190 } 1190 }
1191
1192 /* create any TLV data */
1193 soc_tplg_create_tlv(tplg, &kc[i], &mc->hdr);
1191 } 1194 }
1192 return kc; 1195 return kc;
1193 1196
diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
index ba9fc099cf67..503aef8fcde2 100644
--- a/sound/soc/ux500/mop500.c
+++ b/sound/soc/ux500/mop500.c
@@ -164,3 +164,7 @@ static struct platform_driver snd_soc_mop500_driver = {
164}; 164};
165 165
166module_platform_driver(snd_soc_mop500_driver); 166module_platform_driver(snd_soc_mop500_driver);
167
168MODULE_LICENSE("GPL v2");
169MODULE_DESCRIPTION("ASoC MOP500 board driver");
170MODULE_AUTHOR("Ola Lilja");
diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c
index f12c01dddc8d..d35ba7700f46 100644
--- a/sound/soc/ux500/ux500_pcm.c
+++ b/sound/soc/ux500/ux500_pcm.c
@@ -165,3 +165,8 @@ int ux500_pcm_unregister_platform(struct platform_device *pdev)
165 return 0; 165 return 0;
166} 166}
167EXPORT_SYMBOL_GPL(ux500_pcm_unregister_platform); 167EXPORT_SYMBOL_GPL(ux500_pcm_unregister_platform);
168
169MODULE_AUTHOR("Ola Lilja");
170MODULE_AUTHOR("Roger Nilsson");
171MODULE_DESCRIPTION("ASoC UX500 driver");
172MODULE_LICENSE("GPL v2");
diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
index cebea9b7f769..6a9be1df7851 100644
--- a/sound/usb/line6/midi.c
+++ b/sound/usb/line6/midi.c
@@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
125 } 125 }
126 126
127 usb_fill_int_urb(urb, line6->usbdev, 127 usb_fill_int_urb(urb, line6->usbdev,
128 usb_sndbulkpipe(line6->usbdev, 128 usb_sndintpipe(line6->usbdev,
129 line6->properties->ep_ctrl_w), 129 line6->properties->ep_ctrl_w),
130 transfer_buffer, length, midi_sent, line6, 130 transfer_buffer, length, midi_sent, line6,
131 line6->interval); 131 line6->interval);
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index c9ae29068c7c..97d6a18e6956 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -343,17 +343,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
343 int validx, int *value_ret) 343 int validx, int *value_ret)
344{ 344{
345 struct snd_usb_audio *chip = cval->head.mixer->chip; 345 struct snd_usb_audio *chip = cval->head.mixer->chip;
346 unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */ 346 /* enough space for one range */
347 unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)];
347 unsigned char *val; 348 unsigned char *val;
348 int idx = 0, ret, size; 349 int idx = 0, ret, val_size, size;
349 __u8 bRequest; 350 __u8 bRequest;
350 351
352 val_size = uac2_ctl_value_size(cval->val_type);
353
351 if (request == UAC_GET_CUR) { 354 if (request == UAC_GET_CUR) {
352 bRequest = UAC2_CS_CUR; 355 bRequest = UAC2_CS_CUR;
353 size = uac2_ctl_value_size(cval->val_type); 356 size = val_size;
354 } else { 357 } else {
355 bRequest = UAC2_CS_RANGE; 358 bRequest = UAC2_CS_RANGE;
356 size = sizeof(buf); 359 size = sizeof(__u16) + 3 * val_size;
357 } 360 }
358 361
359 memset(buf, 0, sizeof(buf)); 362 memset(buf, 0, sizeof(buf));
@@ -386,16 +389,17 @@ error:
386 val = buf + sizeof(__u16); 389 val = buf + sizeof(__u16);
387 break; 390 break;
388 case UAC_GET_MAX: 391 case UAC_GET_MAX:
389 val = buf + sizeof(__u16) * 2; 392 val = buf + sizeof(__u16) + val_size;
390 break; 393 break;
391 case UAC_GET_RES: 394 case UAC_GET_RES:
392 val = buf + sizeof(__u16) * 3; 395 val = buf + sizeof(__u16) + val_size * 2;
393 break; 396 break;
394 default: 397 default:
395 return -EINVAL; 398 return -EINVAL;
396 } 399 }
397 400
398 *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16))); 401 *value_ret = convert_signed_value(cval,
402 snd_usb_combine_bytes(val, val_size));
399 403
400 return 0; 404 return 0;
401} 405}
@@ -900,6 +904,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
900 } 904 }
901 break; 905 break;
902 906
907 case USB_ID(0x0d8c, 0x0103):
908 if (!strcmp(kctl->id.name, "PCM Playback Volume")) {
909 usb_audio_info(chip,
910 "set volume quirk for CM102-A+/102S+\n");
911 cval->min = -256;
912 }
913 break;
914
903 case USB_ID(0x0471, 0x0101): 915 case USB_ID(0x0471, 0x0101):
904 case USB_ID(0x0471, 0x0104): 916 case USB_ID(0x0471, 0x0104):
905 case USB_ID(0x0471, 0x0105): 917 case USB_ID(0x0471, 0x0105):
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 1f8fb0d904e0..f5cf23ffb35b 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -351,8 +351,11 @@ static struct usbmix_name_map bose_companion5_map[] = {
351/* 351/*
352 * Dell usb dock with ALC4020 codec had a firmware problem where it got 352 * Dell usb dock with ALC4020 codec had a firmware problem where it got
353 * screwed up when zero volume is passed; just skip it as a workaround 353 * screwed up when zero volume is passed; just skip it as a workaround
354 *
355 * Also the extension unit gives an access error, so skip it as well.
354 */ 356 */
355static const struct usbmix_name_map dell_alc4020_map[] = { 357static const struct usbmix_name_map dell_alc4020_map[] = {
358 { 4, NULL }, /* extension unit */
356 { 16, NULL }, 359 { 16, NULL },
357 { 19, NULL }, 360 { 19, NULL },
358 { 0 } 361 { 0 }
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 48afae053c56..a9079654107c 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -348,6 +348,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
348 348
349 alts = &iface->altsetting[1]; 349 alts = &iface->altsetting[1];
350 goto add_sync_ep; 350 goto add_sync_ep;
351 case USB_ID(0x1397, 0x0002):
352 ep = 0x81;
353 iface = usb_ifnum_to_if(dev, 1);
354
355 if (!iface || iface->num_altsetting == 0)
356 return -EINVAL;
357
358 alts = &iface->altsetting[1];
359 goto add_sync_ep;
351 } 360 }
352 if (attr == USB_ENDPOINT_SYNC_ASYNC && 361 if (attr == USB_ENDPOINT_SYNC_ASYNC &&
353 altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC && 362 altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
@@ -1291,7 +1300,7 @@ static void retire_capture_urb(struct snd_usb_substream *subs,
1291 if (bytes % (runtime->sample_bits >> 3) != 0) { 1300 if (bytes % (runtime->sample_bits >> 3) != 0) {
1292 int oldbytes = bytes; 1301 int oldbytes = bytes;
1293 bytes = frames * stride; 1302 bytes = frames * stride;
1294 dev_warn(&subs->dev->dev, 1303 dev_warn_ratelimited(&subs->dev->dev,
1295 "Corrected urb data len. %d->%d\n", 1304 "Corrected urb data len. %d->%d\n",
1296 oldbytes, bytes); 1305 oldbytes, bytes);
1297 } 1306 }
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 8a59d4782a0f..69bf5cf1e91e 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3277,4 +3277,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
3277 } 3277 }
3278}, 3278},
3279 3279
3280{
3281 /*
3282 * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
3283 * even though it advertises more. The capture interface doesn't work
3284 * even on windows.
3285 */
3286 USB_DEVICE(0x19b5, 0x0021),
3287 .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
3288 .ifnum = QUIRK_ANY_INTERFACE,
3289 .type = QUIRK_COMPOSITE,
3290 .data = (const struct snd_usb_audio_quirk[]) {
3291 {
3292 .ifnum = 0,
3293 .type = QUIRK_AUDIO_STANDARD_MIXER,
3294 },
3295 /* Capture */
3296 {
3297 .ifnum = 1,
3298 .type = QUIRK_IGNORE_INTERFACE,
3299 },
3300 /* Playback */
3301 {
3302 .ifnum = 2,
3303 .type = QUIRK_AUDIO_FIXED_ENDPOINT,
3304 .data = &(const struct audioformat) {
3305 .formats = SNDRV_PCM_FMTBIT_S16_LE,
3306 .channels = 2,
3307 .iface = 2,
3308 .altsetting = 1,
3309 .altset_idx = 1,
3310 .attributes = UAC_EP_CS_ATTR_FILL_MAX |
3311 UAC_EP_CS_ATTR_SAMPLE_RATE,
3312 .endpoint = 0x03,
3313 .ep_attr = USB_ENDPOINT_XFER_ISOC,
3314 .rates = SNDRV_PCM_RATE_48000,
3315 .rate_min = 48000,
3316 .rate_max = 48000,
3317 .nr_rates = 1,
3318 .rate_table = (unsigned int[]) {
3319 48000
3320 }
3321 }
3322 },
3323 }
3324 }
3325},
3326
3280#undef USB_DEVICE_VENDOR_SPEC 3327#undef USB_DEVICE_VENDOR_SPEC
diff --git a/tools/arch/x86/include/asm/unistd_32.h b/tools/arch/x86/include/asm/unistd_32.h
new file mode 100644
index 000000000000..cf33ab09273d
--- /dev/null
+++ b/tools/arch/x86/include/asm/unistd_32.h
@@ -0,0 +1,9 @@
1#ifndef __NR_perf_event_open
2# define __NR_perf_event_open 336
3#endif
4#ifndef __NR_futex
5# define __NR_futex 240
6#endif
7#ifndef __NR_gettid
8# define __NR_gettid 224
9#endif
diff --git a/tools/arch/x86/include/asm/unistd_64.h b/tools/arch/x86/include/asm/unistd_64.h
new file mode 100644
index 000000000000..2c9835695b56
--- /dev/null
+++ b/tools/arch/x86/include/asm/unistd_64.h
@@ -0,0 +1,9 @@
1#ifndef __NR_perf_event_open
2# define __NR_perf_event_open 298
3#endif
4#ifndef __NR_futex
5# define __NR_futex 202
6#endif
7#ifndef __NR_gettid
8# define __NR_gettid 186
9#endif
diff --git a/tools/build/Build.include b/tools/build/Build.include
index 4d000bc959b4..0340d8a51dab 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -12,6 +12,7 @@
12# Convenient variables 12# Convenient variables
13comma := , 13comma := ,
14squote := ' 14squote := '
15pound := \#
15 16
16### 17###
17# Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o 18# Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o
@@ -43,11 +44,11 @@ echo-cmd = $(if $($(quiet)cmd_$(1)),\
43### 44###
44# Replace >$< with >$$< to preserve $ when reloading the .cmd file 45# Replace >$< with >$$< to preserve $ when reloading the .cmd file
45# (needed for make) 46# (needed for make)
46# Replace >#< with >\#< to avoid starting a comment in the .cmd file 47# Replace >#< with >$(pound)< to avoid starting a comment in the .cmd file
47# (needed for make) 48# (needed for make)
48# Replace >'< with >'\''< to be able to enclose the whole string in '...' 49# Replace >'< with >'\''< to be able to enclose the whole string in '...'
49# (needed for the shell) 50# (needed for the shell)
50make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1))))) 51make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1)))))
51 52
52### 53###
53# Find any prerequisites that is newer than target or that does not exist. 54# Find any prerequisites that is newer than target or that does not exist.
@@ -62,8 +63,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
62 $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \ 63 $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \
63 rm -f $(depfile); \ 64 rm -f $(depfile); \
64 mv -f $(dot-target).tmp $(dot-target).cmd, \ 65 mv -f $(dot-target).tmp $(dot-target).cmd, \
65 printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \ 66 printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
66 printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \ 67 printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd; \
67 cat $(depfile) >> $(dot-target).cmd; \ 68 cat $(depfile) >> $(dot-target).cmd; \
68 printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd) 69 printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
69 70
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index 4a96473b180f..4ffc096eaf5d 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -19,6 +19,16 @@ else
19 Q=@ 19 Q=@
20endif 20endif
21 21
22ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
23ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
24 quiet=silent_
25endif
26else # make-3.8x
27ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
28 quiet=silent_
29endif
30endif
31
22build-dir := $(srctree)/tools/build 32build-dir := $(srctree)/tools/build
23 33
24# Define $(fixdep) for dep-cmd function 34# Define $(fixdep) for dep-cmd function
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index e176bad19bcb..ca080a129b33 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -487,6 +487,24 @@ bpf_object__init_maps(struct bpf_object *obj, void *data,
487 return 0; 487 return 0;
488} 488}
489 489
490static bool section_have_execinstr(struct bpf_object *obj, int idx)
491{
492 Elf_Scn *scn;
493 GElf_Shdr sh;
494
495 scn = elf_getscn(obj->efile.elf, idx);
496 if (!scn)
497 return false;
498
499 if (gelf_getshdr(scn, &sh) != &sh)
500 return false;
501
502 if (sh.sh_flags & SHF_EXECINSTR)
503 return true;
504
505 return false;
506}
507
490static int bpf_object__elf_collect(struct bpf_object *obj) 508static int bpf_object__elf_collect(struct bpf_object *obj)
491{ 509{
492 Elf *elf = obj->efile.elf; 510 Elf *elf = obj->efile.elf;
@@ -567,6 +585,14 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
567 } else if (sh.sh_type == SHT_REL) { 585 } else if (sh.sh_type == SHT_REL) {
568 void *reloc = obj->efile.reloc; 586 void *reloc = obj->efile.reloc;
569 int nr_reloc = obj->efile.nr_reloc + 1; 587 int nr_reloc = obj->efile.nr_reloc + 1;
588 int sec = sh.sh_info; /* points to other section */
589
590 /* Only do relo for section with exec instructions */
591 if (!section_have_execinstr(obj, sec)) {
592 pr_debug("skip relo %s(%d) for section(%d)\n",
593 name, idx, sec);
594 continue;
595 }
570 596
571 reloc = realloc(reloc, 597 reloc = realloc(reloc,
572 sizeof(*obj->efile.reloc) * nr_reloc); 598 sizeof(*obj->efile.reloc) * nr_reloc);
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 68276f35e323..6e4a10fe9dd0 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -4905,21 +4905,22 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
4905 else 4905 else
4906 ls = 2; 4906 ls = 2;
4907 4907
4908 if (*(ptr+1) == 'F' || *(ptr+1) == 'f' || 4908 if (isalnum(ptr[1]))
4909 *(ptr+1) == 'S' || *(ptr+1) == 's') {
4910 ptr++; 4909 ptr++;
4910
4911 if (*ptr == 'F' || *ptr == 'f' ||
4912 *ptr == 'S' || *ptr == 's') {
4911 show_func = *ptr; 4913 show_func = *ptr;
4912 } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') { 4914 } else if (*ptr == 'M' || *ptr == 'm') {
4913 print_mac_arg(s, *(ptr+1), data, size, event, arg); 4915 print_mac_arg(s, *ptr, data, size, event, arg);
4914 ptr++;
4915 arg = arg->next; 4916 arg = arg->next;
4916 break; 4917 break;
4917 } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') { 4918 } else if (*ptr == 'I' || *ptr == 'i') {
4918 int n; 4919 int n;
4919 4920
4920 n = print_ip_arg(s, ptr+1, data, size, event, arg); 4921 n = print_ip_arg(s, ptr, data, size, event, arg);
4921 if (n > 0) { 4922 if (n > 0) {
4922 ptr += n; 4923 ptr += n - 1;
4923 arg = arg->next; 4924 arg = arg->next;
4924 break; 4925 break;
4925 } 4926 }
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 88cccea3ca99..64309d73921b 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -1867,17 +1867,25 @@ static const char *get_field_str(struct filter_arg *arg, struct pevent_record *r
1867 struct pevent *pevent; 1867 struct pevent *pevent;
1868 unsigned long long addr; 1868 unsigned long long addr;
1869 const char *val = NULL; 1869 const char *val = NULL;
1870 unsigned int size;
1870 char hex[64]; 1871 char hex[64];
1871 1872
1872 /* If the field is not a string convert it */ 1873 /* If the field is not a string convert it */
1873 if (arg->str.field->flags & FIELD_IS_STRING) { 1874 if (arg->str.field->flags & FIELD_IS_STRING) {
1874 val = record->data + arg->str.field->offset; 1875 val = record->data + arg->str.field->offset;
1876 size = arg->str.field->size;
1877
1878 if (arg->str.field->flags & FIELD_IS_DYNAMIC) {
1879 addr = *(unsigned int *)val;
1880 val = record->data + (addr & 0xffff);
1881 size = addr >> 16;
1882 }
1875 1883
1876 /* 1884 /*
1877 * We need to copy the data since we can't be sure the field 1885 * We need to copy the data since we can't be sure the field
1878 * is null terminated. 1886 * is null terminated.
1879 */ 1887 */
1880 if (*(val + arg->str.field->size - 1)) { 1888 if (*(val + size - 1)) {
1881 /* copy it */ 1889 /* copy it */
1882 memcpy(arg->str.buffer, val, arg->str.field->size); 1890 memcpy(arg->str.buffer, val, arg->str.field->size);
1883 /* the buffer is already NULL terminated */ 1891 /* the buffer is already NULL terminated */
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index b4eb5b679081..73d192f57dc3 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -208,6 +208,47 @@ static const char * const numa_usage[] = {
208 NULL 208 NULL
209}; 209};
210 210
211/*
212 * To get number of numa nodes present.
213 */
214static int nr_numa_nodes(void)
215{
216 int i, nr_nodes = 0;
217
218 for (i = 0; i < g->p.nr_nodes; i++) {
219 if (numa_bitmask_isbitset(numa_nodes_ptr, i))
220 nr_nodes++;
221 }
222
223 return nr_nodes;
224}
225
226/*
227 * To check if given numa node is present.
228 */
229static int is_node_present(int node)
230{
231 return numa_bitmask_isbitset(numa_nodes_ptr, node);
232}
233
234/*
235 * To check given numa node has cpus.
236 */
237static bool node_has_cpus(int node)
238{
239 struct bitmask *cpu = numa_allocate_cpumask();
240 unsigned int i;
241
242 if (cpu && !numa_node_to_cpus(node, cpu)) {
243 for (i = 0; i < cpu->size; i++) {
244 if (numa_bitmask_isbitset(cpu, i))
245 return true;
246 }
247 }
248
249 return false; /* lets fall back to nocpus safely */
250}
251
211static cpu_set_t bind_to_cpu(int target_cpu) 252static cpu_set_t bind_to_cpu(int target_cpu)
212{ 253{
213 cpu_set_t orig_mask, mask; 254 cpu_set_t orig_mask, mask;
@@ -236,12 +277,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)
236 277
237static cpu_set_t bind_to_node(int target_node) 278static cpu_set_t bind_to_node(int target_node)
238{ 279{
239 int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes; 280 int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
240 cpu_set_t orig_mask, mask; 281 cpu_set_t orig_mask, mask;
241 int cpu; 282 int cpu;
242 int ret; 283 int ret;
243 284
244 BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus); 285 BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
245 BUG_ON(!cpus_per_node); 286 BUG_ON(!cpus_per_node);
246 287
247 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); 288 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
@@ -641,7 +682,7 @@ static int parse_setup_node_list(void)
641 int i; 682 int i;
642 683
643 for (i = 0; i < mul; i++) { 684 for (i = 0; i < mul; i++) {
644 if (t >= g->p.nr_tasks) { 685 if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
645 printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node); 686 printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
646 goto out; 687 goto out;
647 } 688 }
@@ -956,6 +997,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
956 sum = 0; 997 sum = 0;
957 998
958 for (node = 0; node < g->p.nr_nodes; node++) { 999 for (node = 0; node < g->p.nr_nodes; node++) {
1000 if (!is_node_present(node))
1001 continue;
959 nr = nodes[node]; 1002 nr = nodes[node];
960 nr_min = min(nr, nr_min); 1003 nr_min = min(nr, nr_min);
961 nr_max = max(nr, nr_max); 1004 nr_max = max(nr, nr_max);
@@ -976,8 +1019,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
976 process_groups = 0; 1019 process_groups = 0;
977 1020
978 for (node = 0; node < g->p.nr_nodes; node++) { 1021 for (node = 0; node < g->p.nr_nodes; node++) {
979 int processes = count_node_processes(node); 1022 int processes;
980 1023
1024 if (!is_node_present(node))
1025 continue;
1026 processes = count_node_processes(node);
981 nr = nodes[node]; 1027 nr = nodes[node];
982 tprintf(" %2d/%-2d", nr, processes); 1028 tprintf(" %2d/%-2d", nr, processes);
983 1029
@@ -1283,7 +1329,7 @@ static void print_summary(void)
1283 1329
1284 printf("\n ###\n"); 1330 printf("\n ###\n");
1285 printf(" # %d %s will execute (on %d nodes, %d CPUs):\n", 1331 printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
1286 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus); 1332 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
1287 printf(" # %5dx %5ldMB global shared mem operations\n", 1333 printf(" # %5dx %5ldMB global shared mem operations\n",
1288 g->p.nr_loops, g->p.bytes_global/1024/1024); 1334 g->p.nr_loops, g->p.bytes_global/1024/1024);
1289 printf(" # %5dx %5ldMB process shared mem operations\n", 1335 printf(" # %5dx %5ldMB process shared mem operations\n",
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 132afc97676c..9d4ac90ca87e 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -405,9 +405,9 @@ static int perf_del_probe_events(struct strfilter *filter)
405 } 405 }
406 406
407 if (ret == -ENOENT && ret2 == -ENOENT) 407 if (ret == -ENOENT && ret2 == -ENOENT)
408 pr_debug("\"%s\" does not hit any event.\n", str); 408 pr_warning("\"%s\" does not hit any event.\n", str);
409 /* Note that this is silently ignored */ 409 else
410 ret = 0; 410 ret = 0;
411 411
412error: 412error:
413 if (kfd >= 0) 413 if (kfd >= 0)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 4a8a02c302d2..47719bde34c6 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -70,6 +70,7 @@
70#include <linux/types.h> 70#include <linux/types.h>
71 71
72static volatile int done; 72static volatile int done;
73static volatile int resize;
73 74
74#define HEADER_LINE_NR 5 75#define HEADER_LINE_NR 5
75 76
@@ -79,10 +80,13 @@ static void perf_top__update_print_entries(struct perf_top *top)
79} 80}
80 81
81static void perf_top__sig_winch(int sig __maybe_unused, 82static void perf_top__sig_winch(int sig __maybe_unused,
82 siginfo_t *info __maybe_unused, void *arg) 83 siginfo_t *info __maybe_unused, void *arg __maybe_unused)
83{ 84{
84 struct perf_top *top = arg; 85 resize = 1;
86}
85 87
88static void perf_top__resize(struct perf_top *top)
89{
86 get_term_dimensions(&top->winsize); 90 get_term_dimensions(&top->winsize);
87 perf_top__update_print_entries(top); 91 perf_top__update_print_entries(top);
88} 92}
@@ -466,7 +470,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
466 .sa_sigaction = perf_top__sig_winch, 470 .sa_sigaction = perf_top__sig_winch,
467 .sa_flags = SA_SIGINFO, 471 .sa_flags = SA_SIGINFO,
468 }; 472 };
469 perf_top__sig_winch(SIGWINCH, NULL, top); 473 perf_top__resize(top);
470 sigaction(SIGWINCH, &act, NULL); 474 sigaction(SIGWINCH, &act, NULL);
471 } else { 475 } else {
472 signal(SIGWINCH, SIG_DFL); 476 signal(SIGWINCH, SIG_DFL);
@@ -1023,6 +1027,11 @@ static int __cmd_top(struct perf_top *top)
1023 1027
1024 if (hits == top->samples) 1028 if (hits == top->samples)
1025 ret = perf_evlist__poll(top->evlist, 100); 1029 ret = perf_evlist__poll(top->evlist, 100);
1030
1031 if (resize) {
1032 perf_top__resize(top);
1033 resize = 0;
1034 }
1026 } 1035 }
1027 1036
1028 ret = 0; 1037 ret = 0;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index ebe7115c751a..da8afc121118 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1152,6 +1152,10 @@ static struct syscall_fmt {
1152 { .name = "mlockall", .errmsg = true, 1152 { .name = "mlockall", .errmsg = true,
1153 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, 1153 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1154 { .name = "mmap", .hexret = true, 1154 { .name = "mmap", .hexret = true,
1155/* The standard mmap maps to old_mmap on s390x */
1156#if defined(__s390x__)
1157 .alias = "old_mmap",
1158#endif
1155 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ 1159 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
1156 [2] = SCA_MMAP_PROT, /* prot */ 1160 [2] = SCA_MMAP_PROT, /* prot */
1157 [3] = SCA_MMAP_FLAGS, /* flags */ 1161 [3] = SCA_MMAP_FLAGS, /* flags */
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 405c1c1e2975..9a4988cf7b38 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -200,6 +200,7 @@ CFLAGS += -I$(src-perf)/arch/$(ARCH)/include
200CFLAGS += -I$(srctree)/tools/include/ 200CFLAGS += -I$(srctree)/tools/include/
201CFLAGS += -I$(srctree)/arch/$(ARCH)/include/uapi 201CFLAGS += -I$(srctree)/arch/$(ARCH)/include/uapi
202CFLAGS += -I$(srctree)/arch/$(ARCH)/include 202CFLAGS += -I$(srctree)/arch/$(ARCH)/include
203CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/include
203CFLAGS += -I$(srctree)/include/uapi 204CFLAGS += -I$(srctree)/include/uapi
204CFLAGS += -I$(srctree)/include 205CFLAGS += -I$(srctree)/include
205 206
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index 83a25cef82fd..5cee8a3d0455 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -11,29 +11,11 @@
11#if defined(__i386__) 11#if defined(__i386__)
12#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 12#define cpu_relax() asm volatile("rep; nop" ::: "memory");
13#define CPUINFO_PROC {"model name"} 13#define CPUINFO_PROC {"model name"}
14#ifndef __NR_perf_event_open
15# define __NR_perf_event_open 336
16#endif
17#ifndef __NR_futex
18# define __NR_futex 240
19#endif
20#ifndef __NR_gettid
21# define __NR_gettid 224
22#endif
23#endif 14#endif
24 15
25#if defined(__x86_64__) 16#if defined(__x86_64__)
26#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 17#define cpu_relax() asm volatile("rep; nop" ::: "memory");
27#define CPUINFO_PROC {"model name"} 18#define CPUINFO_PROC {"model name"}
28#ifndef __NR_perf_event_open
29# define __NR_perf_event_open 298
30#endif
31#ifndef __NR_futex
32# define __NR_futex 202
33#endif
34#ifndef __NR_gettid
35# define __NR_gettid 186
36#endif
37#endif 19#endif
38 20
39#ifdef __powerpc__ 21#ifdef __powerpc__
diff --git a/tools/perf/tests/kmod-path.c b/tools/perf/tests/kmod-path.c
index 08c433b4bf4f..25e80c02230b 100644
--- a/tools/perf/tests/kmod-path.c
+++ b/tools/perf/tests/kmod-path.c
@@ -60,6 +60,7 @@ int test__kmod_path__parse(void)
60 M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_KERNEL, true); 60 M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_KERNEL, true);
61 M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_USER, false); 61 M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_USER, false);
62 62
63#ifdef HAVE_ZLIB_SUPPORT
63 /* path alloc_name alloc_ext kmod comp name ext */ 64 /* path alloc_name alloc_ext kmod comp name ext */
64 T("/xxxx/xxxx/x.ko.gz", true , true , true, true, "[x]", "gz"); 65 T("/xxxx/xxxx/x.ko.gz", true , true , true, true, "[x]", "gz");
65 T("/xxxx/xxxx/x.ko.gz", false , true , true, true, NULL , "gz"); 66 T("/xxxx/xxxx/x.ko.gz", false , true , true, true, NULL , "gz");
@@ -95,6 +96,7 @@ int test__kmod_path__parse(void)
95 M("x.ko.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true); 96 M("x.ko.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
96 M("x.ko.gz", PERF_RECORD_MISC_KERNEL, true); 97 M("x.ko.gz", PERF_RECORD_MISC_KERNEL, true);
97 M("x.ko.gz", PERF_RECORD_MISC_USER, false); 98 M("x.ko.gz", PERF_RECORD_MISC_USER, false);
99#endif
98 100
99 /* path alloc_name alloc_ext kmod comp name ext */ 101 /* path alloc_name alloc_ext kmod comp name ext */
100 T("[test_module]", true , true , true, false, "[test_module]", NULL); 102 T("[test_module]", true , true , true, false, "[test_module]", NULL);
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index d677e018e504..bf907c50fcae 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -126,7 +126,7 @@ int test__vmlinux_matches_kallsyms(void)
126 126
127 if (pair && UM(pair->start) == mem_start) { 127 if (pair && UM(pair->start) == mem_start) {
128next_pair: 128next_pair:
129 if (strcmp(sym->name, pair->name) == 0) { 129 if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
130 /* 130 /*
131 * kallsyms don't have the symbol end, so we 131 * kallsyms don't have the symbol end, so we
132 * set that by using the next symbol start - 1, 132 * set that by using the next symbol start - 1,
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 425df5c86c9c..425597186677 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -249,6 +249,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
249 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 249 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
250 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 250 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
251 (strncmp(name, "[vdso]", 6) == 0) || 251 (strncmp(name, "[vdso]", 6) == 0) ||
252 (strncmp(name, "[vdso32]", 8) == 0) ||
253 (strncmp(name, "[vdsox32]", 9) == 0) ||
252 (strncmp(name, "[vsyscall]", 10) == 0)) { 254 (strncmp(name, "[vsyscall]", 10) == 0)) {
253 m->kmod = false; 255 m->kmod = false;
254 256
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 26cba64345e3..46af9dde11e2 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -234,8 +234,8 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
234 if (machine__is_default_guest(machine)) 234 if (machine__is_default_guest(machine))
235 return 0; 235 return 0;
236 236
237 snprintf(filename, sizeof(filename), "%s/proc/%d/maps", 237 snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
238 machine->root_dir, pid); 238 machine->root_dir, pid, pid);
239 239
240 fp = fopen(filename, "r"); 240 fp = fopen(filename, "r");
241 if (fp == NULL) { 241 if (fp == NULL) {
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 39a8bd842d0d..4f2dc807471e 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -625,13 +625,13 @@ static void apply_config_terms(struct perf_evsel *evsel,
625 struct perf_evsel_config_term *term; 625 struct perf_evsel_config_term *term;
626 struct list_head *config_terms = &evsel->config_terms; 626 struct list_head *config_terms = &evsel->config_terms;
627 struct perf_event_attr *attr = &evsel->attr; 627 struct perf_event_attr *attr = &evsel->attr;
628 struct callchain_param param; 628 /* callgraph default */
629 struct callchain_param param = {
630 .record_mode = callchain_param.record_mode,
631 };
629 u32 dump_size = 0; 632 u32 dump_size = 0;
630 char *callgraph_buf = NULL; 633 char *callgraph_buf = NULL;
631 634
632 /* callgraph default */
633 param.record_mode = callchain_param.record_mode;
634
635 list_for_each_entry(term, config_terms, list) { 635 list_for_each_entry(term, config_terms, list) {
636 switch (term->type) { 636 switch (term->type) {
637 case PERF_EVSEL__CONFIG_TERM_PERIOD: 637 case PERF_EVSEL__CONFIG_TERM_PERIOD:
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 43838003c1a1..304f5d710143 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1258,8 +1258,16 @@ static int __event_process_build_id(struct build_id_event *bev,
1258 1258
1259 dso__set_build_id(dso, &bev->build_id); 1259 dso__set_build_id(dso, &bev->build_id);
1260 1260
1261 if (!is_kernel_module(filename, cpumode)) 1261 if (dso_type != DSO_TYPE_USER) {
1262 dso->kernel = dso_type; 1262 struct kmod_path m = { .name = NULL, };
1263
1264 if (!kmod_path__parse_name(&m, filename) && m.kmod)
1265 dso__set_short_name(dso, strdup(m.name), true);
1266 else
1267 dso->kernel = dso_type;
1268
1269 free(m.name);
1270 }
1263 1271
1264 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1272 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1265 sbuild_id); 1273 sbuild_id);
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 4fd37d6708cb..f6720afa9f34 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -720,7 +720,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
720 * cumulated only one time to prevent entries more than 100% 720 * cumulated only one time to prevent entries more than 100%
721 * overhead. 721 * overhead.
722 */ 722 */
723 he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1)); 723 he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
724 if (he_cache == NULL) 724 if (he_cache == NULL)
725 return -ENOMEM; 725 return -ENOMEM;
726 726
@@ -881,8 +881,6 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
881 if (err) 881 if (err)
882 return err; 882 return err;
883 883
884 iter->max_stack = max_stack_depth;
885
886 err = iter->ops->prepare_entry(iter, al); 884 err = iter->ops->prepare_entry(iter, al);
887 if (err) 885 if (err)
888 goto out; 886 goto out;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index a48a2078d288..46b7591acd9c 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -91,7 +91,6 @@ struct hist_entry_iter {
91 int curr; 91 int curr;
92 92
93 bool hide_unresolved; 93 bool hide_unresolved;
94 int max_stack;
95 94
96 struct perf_evsel *evsel; 95 struct perf_evsel *evsel;
97 struct perf_sample *sample; 96 struct perf_sample *sample;
diff --git a/tools/perf/util/include/asm/unistd_32.h b/tools/perf/util/include/asm/unistd_32.h
deleted file mode 100644
index 8b137891791f..000000000000
--- a/tools/perf/util/include/asm/unistd_32.h
+++ /dev/null
@@ -1 +0,0 @@
1
diff --git a/tools/perf/util/include/asm/unistd_64.h b/tools/perf/util/include/asm/unistd_64.h
deleted file mode 100644
index 8b137891791f..000000000000
--- a/tools/perf/util/include/asm/unistd_64.h
+++ /dev/null
@@ -1 +0,0 @@
1
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index eeeae0629ad3..dc17c881275d 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -111,6 +111,7 @@ struct intel_pt_decoder {
111 bool have_cyc; 111 bool have_cyc;
112 bool fixup_last_mtc; 112 bool fixup_last_mtc;
113 bool have_last_ip; 113 bool have_last_ip;
114 enum intel_pt_param_flags flags;
114 uint64_t pos; 115 uint64_t pos;
115 uint64_t last_ip; 116 uint64_t last_ip;
116 uint64_t ip; 117 uint64_t ip;
@@ -213,6 +214,8 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
213 decoder->data = params->data; 214 decoder->data = params->data;
214 decoder->return_compression = params->return_compression; 215 decoder->return_compression = params->return_compression;
215 216
217 decoder->flags = params->flags;
218
216 decoder->period = params->period; 219 decoder->period = params->period;
217 decoder->period_type = params->period_type; 220 decoder->period_type = params->period_type;
218 221
@@ -1010,6 +1013,15 @@ out_no_progress:
1010 return err; 1013 return err;
1011} 1014}
1012 1015
1016static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder,
1017 struct intel_pt_insn *intel_pt_insn,
1018 uint64_t ip, int err)
1019{
1020 return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err &&
1021 intel_pt_insn->branch == INTEL_PT_BR_INDIRECT &&
1022 ip == decoder->ip + intel_pt_insn->length;
1023}
1024
1013static int intel_pt_walk_fup(struct intel_pt_decoder *decoder) 1025static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
1014{ 1026{
1015 struct intel_pt_insn intel_pt_insn; 1027 struct intel_pt_insn intel_pt_insn;
@@ -1022,7 +1034,8 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
1022 err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip); 1034 err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
1023 if (err == INTEL_PT_RETURN) 1035 if (err == INTEL_PT_RETURN)
1024 return 0; 1036 return 0;
1025 if (err == -EAGAIN) { 1037 if (err == -EAGAIN ||
1038 intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
1026 if (decoder->set_fup_tx_flags) { 1039 if (decoder->set_fup_tx_flags) {
1027 decoder->set_fup_tx_flags = false; 1040 decoder->set_fup_tx_flags = false;
1028 decoder->tx_flags = decoder->fup_tx_flags; 1041 decoder->tx_flags = decoder->fup_tx_flags;
@@ -1032,7 +1045,7 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
1032 decoder->state.flags = decoder->fup_tx_flags; 1045 decoder->state.flags = decoder->fup_tx_flags;
1033 return 0; 1046 return 0;
1034 } 1047 }
1035 return err; 1048 return -EAGAIN;
1036 } 1049 }
1037 decoder->set_fup_tx_flags = false; 1050 decoder->set_fup_tx_flags = false;
1038 if (err) 1051 if (err)
@@ -1268,8 +1281,8 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
1268{ 1281{
1269 intel_pt_log("ERROR: Buffer overflow\n"); 1282 intel_pt_log("ERROR: Buffer overflow\n");
1270 intel_pt_clear_tx_flags(decoder); 1283 intel_pt_clear_tx_flags(decoder);
1271 decoder->have_tma = false;
1272 decoder->cbr = 0; 1284 decoder->cbr = 0;
1285 decoder->timestamp_insn_cnt = 0;
1273 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; 1286 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1274 decoder->overflow = true; 1287 decoder->overflow = true;
1275 return -EOVERFLOW; 1288 return -EOVERFLOW;
@@ -1486,14 +1499,18 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
1486 case INTEL_PT_PSB: 1499 case INTEL_PT_PSB:
1487 case INTEL_PT_TSC: 1500 case INTEL_PT_TSC:
1488 case INTEL_PT_TMA: 1501 case INTEL_PT_TMA:
1489 case INTEL_PT_CBR:
1490 case INTEL_PT_MODE_TSX: 1502 case INTEL_PT_MODE_TSX:
1491 case INTEL_PT_BAD: 1503 case INTEL_PT_BAD:
1492 case INTEL_PT_PSBEND: 1504 case INTEL_PT_PSBEND:
1493 intel_pt_log("ERROR: Missing TIP after FUP\n"); 1505 intel_pt_log("ERROR: Missing TIP after FUP\n");
1494 decoder->pkt_state = INTEL_PT_STATE_ERR3; 1506 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1507 decoder->pkt_step = 0;
1495 return -ENOENT; 1508 return -ENOENT;
1496 1509
1510 case INTEL_PT_CBR:
1511 intel_pt_calc_cbr(decoder);
1512 break;
1513
1497 case INTEL_PT_OVF: 1514 case INTEL_PT_OVF:
1498 return intel_pt_overflow(decoder); 1515 return intel_pt_overflow(decoder);
1499 1516
@@ -2152,14 +2169,6 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
2152 return &decoder->state; 2169 return &decoder->state;
2153} 2170}
2154 2171
2155static bool intel_pt_at_psb(unsigned char *buf, size_t len)
2156{
2157 if (len < INTEL_PT_PSB_LEN)
2158 return false;
2159 return memmem(buf, INTEL_PT_PSB_LEN, INTEL_PT_PSB_STR,
2160 INTEL_PT_PSB_LEN);
2161}
2162
2163/** 2172/**
2164 * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet. 2173 * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
2165 * @buf: pointer to buffer pointer 2174 * @buf: pointer to buffer pointer
@@ -2248,6 +2257,7 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
2248 * @buf: buffer 2257 * @buf: buffer
2249 * @len: size of buffer 2258 * @len: size of buffer
2250 * @tsc: TSC value returned 2259 * @tsc: TSC value returned
2260 * @rem: returns remaining size when TSC is found
2251 * 2261 *
2252 * Find a TSC packet in @buf and return the TSC value. This function assumes 2262 * Find a TSC packet in @buf and return the TSC value. This function assumes
2253 * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a 2263 * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
@@ -2255,7 +2265,8 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
2255 * 2265 *
2256 * Return: %true if TSC is found, false otherwise. 2266 * Return: %true if TSC is found, false otherwise.
2257 */ 2267 */
2258static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc) 2268static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
2269 size_t *rem)
2259{ 2270{
2260 struct intel_pt_pkt packet; 2271 struct intel_pt_pkt packet;
2261 int ret; 2272 int ret;
@@ -2266,6 +2277,7 @@ static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
2266 return false; 2277 return false;
2267 if (packet.type == INTEL_PT_TSC) { 2278 if (packet.type == INTEL_PT_TSC) {
2268 *tsc = packet.payload; 2279 *tsc = packet.payload;
2280 *rem = len;
2269 return true; 2281 return true;
2270 } 2282 }
2271 if (packet.type == INTEL_PT_PSBEND) 2283 if (packet.type == INTEL_PT_PSBEND)
@@ -2316,6 +2328,8 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
2316 * @len_a: size of first buffer 2328 * @len_a: size of first buffer
2317 * @buf_b: second buffer 2329 * @buf_b: second buffer
2318 * @len_b: size of second buffer 2330 * @len_b: size of second buffer
2331 * @consecutive: returns true if there is data in buf_b that is consecutive
2332 * to buf_a
2319 * 2333 *
2320 * If the trace contains TSC we can look at the last TSC of @buf_a and the 2334 * If the trace contains TSC we can look at the last TSC of @buf_a and the
2321 * first TSC of @buf_b in order to determine if the buffers overlap, and then 2335 * first TSC of @buf_b in order to determine if the buffers overlap, and then
@@ -2328,33 +2342,41 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
2328static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a, 2342static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
2329 size_t len_a, 2343 size_t len_a,
2330 unsigned char *buf_b, 2344 unsigned char *buf_b,
2331 size_t len_b) 2345 size_t len_b, bool *consecutive)
2332{ 2346{
2333 uint64_t tsc_a, tsc_b; 2347 uint64_t tsc_a, tsc_b;
2334 unsigned char *p; 2348 unsigned char *p;
2335 size_t len; 2349 size_t len, rem_a, rem_b;
2336 2350
2337 p = intel_pt_last_psb(buf_a, len_a); 2351 p = intel_pt_last_psb(buf_a, len_a);
2338 if (!p) 2352 if (!p)
2339 return buf_b; /* No PSB in buf_a => no overlap */ 2353 return buf_b; /* No PSB in buf_a => no overlap */
2340 2354
2341 len = len_a - (p - buf_a); 2355 len = len_a - (p - buf_a);
2342 if (!intel_pt_next_tsc(p, len, &tsc_a)) { 2356 if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
2343 /* The last PSB+ in buf_a is incomplete, so go back one more */ 2357 /* The last PSB+ in buf_a is incomplete, so go back one more */
2344 len_a -= len; 2358 len_a -= len;
2345 p = intel_pt_last_psb(buf_a, len_a); 2359 p = intel_pt_last_psb(buf_a, len_a);
2346 if (!p) 2360 if (!p)
2347 return buf_b; /* No full PSB+ => assume no overlap */ 2361 return buf_b; /* No full PSB+ => assume no overlap */
2348 len = len_a - (p - buf_a); 2362 len = len_a - (p - buf_a);
2349 if (!intel_pt_next_tsc(p, len, &tsc_a)) 2363 if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
2350 return buf_b; /* No TSC in buf_a => assume no overlap */ 2364 return buf_b; /* No TSC in buf_a => assume no overlap */
2351 } 2365 }
2352 2366
2353 while (1) { 2367 while (1) {
2354 /* Ignore PSB+ with no TSC */ 2368 /* Ignore PSB+ with no TSC */
2355 if (intel_pt_next_tsc(buf_b, len_b, &tsc_b) && 2369 if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
2356 intel_pt_tsc_cmp(tsc_a, tsc_b) < 0) 2370 int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
2357 return buf_b; /* tsc_a < tsc_b => no overlap */ 2371
2372 /* Same TSC, so buffers are consecutive */
2373 if (!cmp && rem_b >= rem_a) {
2374 *consecutive = true;
2375 return buf_b + len_b - (rem_b - rem_a);
2376 }
2377 if (cmp < 0)
2378 return buf_b; /* tsc_a < tsc_b => no overlap */
2379 }
2358 2380
2359 if (!intel_pt_step_psb(&buf_b, &len_b)) 2381 if (!intel_pt_step_psb(&buf_b, &len_b))
2360 return buf_b + len_b; /* No PSB in buf_b => no data */ 2382 return buf_b + len_b; /* No PSB in buf_b => no data */
@@ -2368,6 +2390,8 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
2368 * @buf_b: second buffer 2390 * @buf_b: second buffer
2369 * @len_b: size of second buffer 2391 * @len_b: size of second buffer
2370 * @have_tsc: can use TSC packets to detect overlap 2392 * @have_tsc: can use TSC packets to detect overlap
2393 * @consecutive: returns true if there is data in buf_b that is consecutive
2394 * to buf_a
2371 * 2395 *
2372 * When trace samples or snapshots are recorded there is the possibility that 2396 * When trace samples or snapshots are recorded there is the possibility that
2373 * the data overlaps. Note that, for the purposes of decoding, data is only 2397 * the data overlaps. Note that, for the purposes of decoding, data is only
@@ -2378,7 +2402,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
2378 */ 2402 */
2379unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a, 2403unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
2380 unsigned char *buf_b, size_t len_b, 2404 unsigned char *buf_b, size_t len_b,
2381 bool have_tsc) 2405 bool have_tsc, bool *consecutive)
2382{ 2406{
2383 unsigned char *found; 2407 unsigned char *found;
2384 2408
@@ -2390,7 +2414,8 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
2390 return buf_b; /* No overlap */ 2414 return buf_b; /* No overlap */
2391 2415
2392 if (have_tsc) { 2416 if (have_tsc) {
2393 found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b); 2417 found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
2418 consecutive);
2394 if (found) 2419 if (found)
2395 return found; 2420 return found;
2396 } 2421 }
@@ -2405,28 +2430,16 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
2405 } 2430 }
2406 2431
2407 /* Now len_b >= len_a */ 2432 /* Now len_b >= len_a */
2408 if (len_b > len_a) {
2409 /* The leftover buffer 'b' must start at a PSB */
2410 while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
2411 if (!intel_pt_step_psb(&buf_a, &len_a))
2412 return buf_b; /* No overlap */
2413 }
2414 }
2415
2416 while (1) { 2433 while (1) {
2417 /* Potential overlap so check the bytes */ 2434 /* Potential overlap so check the bytes */
2418 found = memmem(buf_a, len_a, buf_b, len_a); 2435 found = memmem(buf_a, len_a, buf_b, len_a);
2419 if (found) 2436 if (found) {
2437 *consecutive = true;
2420 return buf_b + len_a; 2438 return buf_b + len_a;
2439 }
2421 2440
2422 /* Try again at next PSB in buffer 'a' */ 2441 /* Try again at next PSB in buffer 'a' */
2423 if (!intel_pt_step_psb(&buf_a, &len_a)) 2442 if (!intel_pt_step_psb(&buf_a, &len_a))
2424 return buf_b; /* No overlap */ 2443 return buf_b; /* No overlap */
2425
2426 /* The leftover buffer 'b' must start at a PSB */
2427 while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
2428 if (!intel_pt_step_psb(&buf_a, &len_a))
2429 return buf_b; /* No overlap */
2430 }
2431 } 2444 }
2432} 2445}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index 02c38fec1c37..e420bd3be159 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -53,6 +53,14 @@ enum {
53 INTEL_PT_ERR_MAX, 53 INTEL_PT_ERR_MAX,
54}; 54};
55 55
56enum intel_pt_param_flags {
57 /*
58 * FUP packet can contain next linear instruction pointer instead of
59 * current linear instruction pointer.
60 */
61 INTEL_PT_FUP_WITH_NLIP = 1 << 0,
62};
63
56struct intel_pt_state { 64struct intel_pt_state {
57 enum intel_pt_sample_type type; 65 enum intel_pt_sample_type type;
58 int err; 66 int err;
@@ -91,6 +99,7 @@ struct intel_pt_params {
91 unsigned int mtc_period; 99 unsigned int mtc_period;
92 uint32_t tsc_ctc_ratio_n; 100 uint32_t tsc_ctc_ratio_n;
93 uint32_t tsc_ctc_ratio_d; 101 uint32_t tsc_ctc_ratio_d;
102 enum intel_pt_param_flags flags;
94}; 103};
95 104
96struct intel_pt_decoder; 105struct intel_pt_decoder;
@@ -102,7 +111,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder);
102 111
103unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a, 112unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
104 unsigned char *buf_b, size_t len_b, 113 unsigned char *buf_b, size_t len_b,
105 bool have_tsc); 114 bool have_tsc, bool *consecutive);
106 115
107int intel_pt__strerror(int code, char *buf, size_t buflen); 116int intel_pt__strerror(int code, char *buf, size_t buflen);
108 117
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index 7528ae4f7e28..e5c6caf913f3 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -281,7 +281,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
281 if (len < offs) 281 if (len < offs)
282 return INTEL_PT_NEED_MORE_BYTES; 282 return INTEL_PT_NEED_MORE_BYTES;
283 byte = buf[offs++]; 283 byte = buf[offs++];
284 payload |= (byte >> 1) << shift; 284 payload |= ((uint64_t)byte >> 1) << shift;
285 } 285 }
286 286
287 packet->type = INTEL_PT_CYC; 287 packet->type = INTEL_PT_CYC;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 89927b5beebf..c8f2d084a8ce 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -125,6 +125,7 @@ struct intel_pt_queue {
125 bool stop; 125 bool stop;
126 bool step_through_buffers; 126 bool step_through_buffers;
127 bool use_buffer_pid_tid; 127 bool use_buffer_pid_tid;
128 bool sync_switch;
128 pid_t pid, tid; 129 pid_t pid, tid;
129 int cpu; 130 int cpu;
130 int switch_state; 131 int switch_state;
@@ -188,14 +189,17 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
188static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a, 189static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
189 struct auxtrace_buffer *b) 190 struct auxtrace_buffer *b)
190{ 191{
192 bool consecutive = false;
191 void *start; 193 void *start;
192 194
193 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size, 195 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
194 pt->have_tsc); 196 pt->have_tsc, &consecutive);
195 if (!start) 197 if (!start)
196 return -EINVAL; 198 return -EINVAL;
197 b->use_size = b->data + b->size - start; 199 b->use_size = b->data + b->size - start;
198 b->use_data = start; 200 b->use_data = start;
201 if (b->use_size && consecutive)
202 b->consecutive = true;
199 return 0; 203 return 0;
200} 204}
201 205
@@ -672,6 +676,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
672 unsigned int queue_nr) 676 unsigned int queue_nr)
673{ 677{
674 struct intel_pt_params params = { .get_trace = 0, }; 678 struct intel_pt_params params = { .get_trace = 0, };
679 struct perf_env *env = pt->machine->env;
675 struct intel_pt_queue *ptq; 680 struct intel_pt_queue *ptq;
676 681
677 ptq = zalloc(sizeof(struct intel_pt_queue)); 682 ptq = zalloc(sizeof(struct intel_pt_queue));
@@ -749,6 +754,9 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
749 } 754 }
750 } 755 }
751 756
757 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
758 params.flags |= INTEL_PT_FUP_WITH_NLIP;
759
752 ptq->decoder = intel_pt_decoder_new(&params); 760 ptq->decoder = intel_pt_decoder_new(&params);
753 if (!ptq->decoder) 761 if (!ptq->decoder)
754 goto out_free; 762 goto out_free;
@@ -849,10 +857,12 @@ static int intel_pt_setup_queue(struct intel_pt *pt,
849 if (pt->timeless_decoding || !pt->have_sched_switch) 857 if (pt->timeless_decoding || !pt->have_sched_switch)
850 ptq->use_buffer_pid_tid = true; 858 ptq->use_buffer_pid_tid = true;
851 } 859 }
860
861 ptq->sync_switch = pt->sync_switch;
852 } 862 }
853 863
854 if (!ptq->on_heap && 864 if (!ptq->on_heap &&
855 (!pt->sync_switch || 865 (!ptq->sync_switch ||
856 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) { 866 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
857 const struct intel_pt_state *state; 867 const struct intel_pt_state *state;
858 int ret; 868 int ret;
@@ -1235,11 +1245,12 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
1235 if (pt->synth_opts.last_branch) 1245 if (pt->synth_opts.last_branch)
1236 intel_pt_update_last_branch_rb(ptq); 1246 intel_pt_update_last_branch_rb(ptq);
1237 1247
1238 if (!pt->sync_switch) 1248 if (!ptq->sync_switch)
1239 return 0; 1249 return 0;
1240 1250
1241 if (intel_pt_is_switch_ip(ptq, state->to_ip)) { 1251 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
1242 switch (ptq->switch_state) { 1252 switch (ptq->switch_state) {
1253 case INTEL_PT_SS_NOT_TRACING:
1243 case INTEL_PT_SS_UNKNOWN: 1254 case INTEL_PT_SS_UNKNOWN:
1244 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 1255 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1245 err = intel_pt_next_tid(pt, ptq); 1256 err = intel_pt_next_tid(pt, ptq);
@@ -1316,6 +1327,21 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
1316 return switch_ip; 1327 return switch_ip;
1317} 1328}
1318 1329
1330static void intel_pt_enable_sync_switch(struct intel_pt *pt)
1331{
1332 unsigned int i;
1333
1334 pt->sync_switch = true;
1335
1336 for (i = 0; i < pt->queues.nr_queues; i++) {
1337 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1338 struct intel_pt_queue *ptq = queue->priv;
1339
1340 if (ptq)
1341 ptq->sync_switch = true;
1342 }
1343}
1344
1319static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) 1345static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1320{ 1346{
1321 const struct intel_pt_state *state = ptq->state; 1347 const struct intel_pt_state *state = ptq->state;
@@ -1332,7 +1358,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1332 if (pt->switch_ip) { 1358 if (pt->switch_ip) {
1333 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n", 1359 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
1334 pt->switch_ip, pt->ptss_ip); 1360 pt->switch_ip, pt->ptss_ip);
1335 pt->sync_switch = true; 1361 intel_pt_enable_sync_switch(pt);
1336 } 1362 }
1337 } 1363 }
1338 } 1364 }
@@ -1348,9 +1374,9 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1348 if (state->err) { 1374 if (state->err) {
1349 if (state->err == INTEL_PT_ERR_NODATA) 1375 if (state->err == INTEL_PT_ERR_NODATA)
1350 return 1; 1376 return 1;
1351 if (pt->sync_switch && 1377 if (ptq->sync_switch &&
1352 state->from_ip >= pt->kernel_start) { 1378 state->from_ip >= pt->kernel_start) {
1353 pt->sync_switch = false; 1379 ptq->sync_switch = false;
1354 intel_pt_next_tid(pt, ptq); 1380 intel_pt_next_tid(pt, ptq);
1355 } 1381 }
1356 if (pt->synth_opts.errors) { 1382 if (pt->synth_opts.errors) {
@@ -1376,7 +1402,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1376 state->timestamp, state->est_timestamp); 1402 state->timestamp, state->est_timestamp);
1377 ptq->timestamp = state->est_timestamp; 1403 ptq->timestamp = state->est_timestamp;
1378 /* Use estimated TSC in unknown switch state */ 1404 /* Use estimated TSC in unknown switch state */
1379 } else if (pt->sync_switch && 1405 } else if (ptq->sync_switch &&
1380 ptq->switch_state == INTEL_PT_SS_UNKNOWN && 1406 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1381 intel_pt_is_switch_ip(ptq, state->to_ip) && 1407 intel_pt_is_switch_ip(ptq, state->to_ip) &&
1382 ptq->next_tid == -1) { 1408 ptq->next_tid == -1) {
@@ -1523,7 +1549,7 @@ static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
1523 return 1; 1549 return 1;
1524 1550
1525 ptq = intel_pt_cpu_to_ptq(pt, cpu); 1551 ptq = intel_pt_cpu_to_ptq(pt, cpu);
1526 if (!ptq) 1552 if (!ptq || !ptq->sync_switch)
1527 return 1; 1553 return 1;
1528 1554
1529 switch (ptq->switch_state) { 1555 switch (ptq->switch_state) {
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index b1b9e2385f4b..5e58149c4df2 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -79,7 +79,7 @@ static union perf_event *dup_event(struct ordered_events *oe,
79 79
80static void free_dup_event(struct ordered_events *oe, union perf_event *event) 80static void free_dup_event(struct ordered_events *oe, union perf_event *event)
81{ 81{
82 if (oe->copy_on_queue) { 82 if (event && oe->copy_on_queue) {
83 oe->cur_alloc_size -= event->header.size; 83 oe->cur_alloc_size -= event->header.size;
84 free(event); 84 free(event);
85 } 85 }
@@ -150,6 +150,7 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve
150 list_move(&event->list, &oe->cache); 150 list_move(&event->list, &oe->cache);
151 oe->nr_events--; 151 oe->nr_events--;
152 free_dup_event(oe, event->event); 152 free_dup_event(oe, event->event);
153 event->event = NULL;
153} 154}
154 155
155int ordered_events__queue(struct ordered_events *oe, union perf_event *event, 156int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 03875f9154e7..0195b7e8c54a 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2349,6 +2349,14 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
2349 2349
2350out: 2350out:
2351 free(nbase); 2351 free(nbase);
2352
2353 /* Final validation */
2354 if (ret >= 0 && !is_c_func_name(buf)) {
2355 pr_warning("Internal error: \"%s\" is an invalid event name.\n",
2356 buf);
2357 ret = -EINVAL;
2358 }
2359
2352 return ret; 2360 return ret;
2353} 2361}
2354 2362
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 010ff659b82f..4596496f6c0f 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -135,8 +135,14 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
135 if (perf_session__open(session) < 0) 135 if (perf_session__open(session) < 0)
136 goto out_close; 136 goto out_close;
137 137
138 perf_session__set_id_hdr_size(session); 138 /*
139 perf_session__set_comm_exec(session); 139 * set session attributes that are present in perf.data
140 * but not in pipe-mode.
141 */
142 if (!file->is_pipe) {
143 perf_session__set_id_hdr_size(session);
144 perf_session__set_comm_exec(session);
145 }
140 } 146 }
141 } else { 147 } else {
142 session->machines.host.env = &perf_env; 148 session->machines.host.env = &perf_env;
@@ -151,7 +157,11 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
151 pr_warning("Cannot read kernel map\n"); 157 pr_warning("Cannot read kernel map\n");
152 } 158 }
153 159
154 if (tool && tool->ordering_requires_timestamps && 160 /*
161 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
162 * processed, so perf_evlist__sample_id_all is not meaningful here.
163 */
164 if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps &&
155 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { 165 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
156 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 166 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
157 tool->ordered_events = false; 167 tool->ordered_events = false;
@@ -1411,6 +1421,7 @@ static int __perf_session__process_pipe_events(struct perf_session *session)
1411 buf = malloc(cur_size); 1421 buf = malloc(cur_size);
1412 if (!buf) 1422 if (!buf)
1413 return -errno; 1423 return -errno;
1424 ordered_events__set_copy_on_queue(oe, true);
1414more: 1425more:
1415 event = buf; 1426 event = buf;
1416 err = readn(fd, event, sizeof(struct perf_event_header)); 1427 err = readn(fd, event, sizeof(struct perf_event_header));
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 2d8ccd4d9e1b..87312056f75d 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -604,6 +604,9 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
604static int64_t 604static int64_t
605sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 605sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
606{ 606{
607 if (!left->branch_info || !right->branch_info)
608 return cmp_null(left->branch_info, right->branch_info);
609
607 return left->branch_info->flags.cycles - 610 return left->branch_info->flags.cycles -
608 right->branch_info->flags.cycles; 611 right->branch_info->flags.cycles;
609} 612}
@@ -611,6 +614,8 @@ sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
611static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 614static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
612 size_t size, unsigned int width) 615 size_t size, unsigned int width)
613{ 616{
617 if (!he->branch_info)
618 return scnprintf(bf, size, "%-.*s", width, "N/A");
614 if (he->branch_info->flags.cycles == 0) 619 if (he->branch_info->flags.cycles == 0)
615 return repsep_snprintf(bf, size, "%-*s", width, "-"); 620 return repsep_snprintf(bf, size, "%-*s", width, "-");
616 return repsep_snprintf(bf, size, "%-*hd", width, 621 return repsep_snprintf(bf, size, "%-*hd", width,
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 2dcfe9a7c8d0..60edec383281 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -37,6 +37,14 @@ static int __report_module(struct addr_location *al, u64 ip,
37 return 0; 37 return 0;
38 38
39 mod = dwfl_addrmodule(ui->dwfl, ip); 39 mod = dwfl_addrmodule(ui->dwfl, ip);
40 if (mod) {
41 Dwarf_Addr s;
42
43 dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
44 if (s != al->map->start)
45 mod = 0;
46 }
47
40 if (!mod) 48 if (!mod)
41 mod = dwfl_report_elf(ui->dwfl, dso->short_name, 49 mod = dwfl_report_elf(ui->dwfl, dso->short_name,
42 dso->long_name, -1, al->map->start, 50 dso->long_name, -1, al->map->start,
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 47b1e36c7ea0..9adc9af8b048 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -162,7 +162,7 @@ int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
162 162
163 size -= ret; 163 size -= ret;
164 off_in += ret; 164 off_in += ret;
165 off_out -= ret; 165 off_out += ret;
166 } 166 }
167 munmap(ptr, off_in + size); 167 munmap(ptr, off_in + size);
168 168
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 8abbef164b4e..7ea4438b801d 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -46,6 +46,16 @@ else
46NO_SUBDIR = : 46NO_SUBDIR = :
47endif 47endif
48 48
49ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
50ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
51 silent=1
52endif
53else # make-3.8x
54ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
55 silent=1
56endif
57endif
58
49# 59#
50# Define a callable command for descending to a new directory 60# Define a callable command for descending to a new directory
51# 61#
@@ -58,7 +68,7 @@ descend = \
58QUIET_SUBDIR0 = +$(MAKE) $(COMMAND_O) -C # space to separate -C and subdir 68QUIET_SUBDIR0 = +$(MAKE) $(COMMAND_O) -C # space to separate -C and subdir
59QUIET_SUBDIR1 = 69QUIET_SUBDIR1 =
60 70
61ifneq ($(findstring $(MAKEFLAGS),s),s) 71ifneq ($(silent),1)
62 ifneq ($(V),1) 72 ifneq ($(V),1)
63 QUIET_CC = @echo ' CC '$@; 73 QUIET_CC = @echo ' CC '$@;
64 QUIET_CC_FPIC = @echo ' CC FPIC '$@; 74 QUIET_CC_FPIC = @echo ' CC FPIC '$@;
@@ -82,3 +92,5 @@ ifneq ($(findstring $(MAKEFLAGS),s),s)
82 QUIET_INSTALL = @printf ' INSTALL %s\n' $1; 92 QUIET_INSTALL = @printf ' INSTALL %s\n' $1;
83 endif 93 endif
84endif 94endif
95
96pound := \#
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 24ebd3e3eb7d..5d2e479430d1 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -90,6 +90,7 @@ ifdef INSTALL_PATH
90 for TARGET in $(TARGETS); do \ 90 for TARGET in $(TARGETS); do \
91 echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \ 91 echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \
92 echo "echo ========================================" >> $(ALL_SCRIPT); \ 92 echo "echo ========================================" >> $(ALL_SCRIPT); \
93 echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
93 echo "cd $$TARGET" >> $(ALL_SCRIPT); \ 94 echo "cd $$TARGET" >> $(ALL_SCRIPT); \
94 make -s --no-print-directory -C $$TARGET emit_tests >> $(ALL_SCRIPT); \ 95 make -s --no-print-directory -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
95 echo "cd \$$ROOT" >> $(ALL_SCRIPT); \ 96 echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 856a1f327b3f..63c310cdac09 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -28,7 +28,12 @@ test_finish()
28 if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then 28 if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
29 echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout 29 echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
30 fi 30 fi
31 echo -n "$OLD_PATH" >/sys/module/firmware_class/parameters/path 31 if [ "$OLD_FWPATH" = "" ]; then
32 # A zero-length write won't work; write a null byte
33 printf '\000' >/sys/module/firmware_class/parameters/path
34 else
35 echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
36 fi
32 rm -f "$FW" 37 rm -f "$FW"
33 rmdir "$FWPATH" 38 rmdir "$FWPATH"
34} 39}
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
new file mode 100644
index 000000000000..5ba73035e1d9
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
@@ -0,0 +1,46 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3# description: Kprobe event string type argument
4
5[ -f kprobe_events ] || exit_unsupported # this is configurable
6
7echo 0 > events/enable
8echo > kprobe_events
9
10case `uname -m` in
11x86_64)
12 ARG2=%si
13 OFFS=8
14;;
15i[3456]86)
16 ARG2=%cx
17 OFFS=4
18;;
19aarch64)
20 ARG2=%x1
21 OFFS=8
22;;
23arm*)
24 ARG2=%r1
25 OFFS=4
26;;
27*)
28 echo "Please implement other architecture here"
29 exit_untested
30esac
31
32: "Test get argument (1)"
33echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string" > kprobe_events
34echo 1 > events/kprobes/testprobe/enable
35! echo test >> kprobe_events
36tail -n 1 trace | grep -qe "testprobe.* arg1=\"test\""
37
38echo 0 > events/kprobes/testprobe/enable
39: "Test get argument (2)"
40echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string arg2=+0(+${OFFS}(${ARG2})):string" > kprobe_events
41echo 1 > events/kprobes/testprobe/enable
42! echo test1 test2 >> kprobe_events
43tail -n 1 trace | grep -qe "testprobe.* arg1=\"test1\" arg2=\"test2\""
44
45echo 0 > events/enable
46echo > kprobe_events
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
new file mode 100644
index 000000000000..231bcd2c4eb5
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
@@ -0,0 +1,97 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3# description: Kprobe event argument syntax
4
5[ -f kprobe_events ] || exit_unsupported # this is configurable
6
7grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue
8
9echo 0 > events/enable
10echo > kprobe_events
11
12PROBEFUNC="vfs_read"
13GOODREG=
14BADREG=
15GOODSYM="_sdata"
16if ! grep -qw ${GOODSYM} /proc/kallsyms ; then
17 GOODSYM=$PROBEFUNC
18fi
19BADSYM="deaqswdefr"
20SYMADDR=0x`grep -w ${GOODSYM} /proc/kallsyms | cut -f 1 -d " "`
21GOODTYPE="x16"
22BADTYPE="y16"
23
24case `uname -m` in
25x86_64|i[3456]86)
26 GOODREG=%ax
27 BADREG=%ex
28;;
29aarch64)
30 GOODREG=%x0
31 BADREG=%ax
32;;
33arm*)
34 GOODREG=%r0
35 BADREG=%ax
36;;
37esac
38
39test_goodarg() # Good-args
40{
41 while [ "$1" ]; do
42 echo "p ${PROBEFUNC} $1" > kprobe_events
43 shift 1
44 done;
45}
46
47test_badarg() # Bad-args
48{
49 while [ "$1" ]; do
50 ! echo "p ${PROBEFUNC} $1" > kprobe_events
51 shift 1
52 done;
53}
54
55echo > kprobe_events
56
57: "Register access"
58test_goodarg ${GOODREG}
59test_badarg ${BADREG}
60
61: "Symbol access"
62test_goodarg "@${GOODSYM}" "@${SYMADDR}" "@${GOODSYM}+10" "@${GOODSYM}-10"
63test_badarg "@" "@${BADSYM}" "@${GOODSYM}*10" "@${GOODSYM}/10" \
64 "@${GOODSYM}%10" "@${GOODSYM}&10" "@${GOODSYM}|10"
65
66: "Stack access"
67test_goodarg "\$stack" "\$stack0" "\$stack1"
68test_badarg "\$stackp" "\$stack0+10" "\$stack1-10"
69
70: "Retval access"
71echo "r ${PROBEFUNC} \$retval" > kprobe_events
72! echo "p ${PROBEFUNC} \$retval" > kprobe_events
73
74: "Comm access"
75test_goodarg "\$comm"
76
77: "Indirect memory access"
78test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \
79 "+0(\$stack1)" "+10(@${GOODSYM}-10)" "+0(+10(+20(\$stack)))"
80test_badarg "+(${GOODREG})" "(${GOODREG}+10)" "-(${GOODREG})" "(${GOODREG})" \
81 "+10(\$comm)" "+0(${GOODREG})+10"
82
83: "Name assignment"
84test_goodarg "varname=${GOODREG}"
85test_badarg "varname=varname2=${GOODREG}"
86
87: "Type syntax"
88test_goodarg "${GOODREG}:${GOODTYPE}"
89test_badarg "${GOODREG}::${GOODTYPE}" "${GOODREG}:${BADTYPE}" \
90 "${GOODTYPE}:${GOODREG}"
91
92: "Combination check"
93
94test_goodarg "\$comm:string" "+0(\$stack):string"
95test_badarg "\$comm:x64" "\$stack:string" "${GOODREG}:string"
96
97echo > kprobe_events
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
new file mode 100644
index 000000000000..4fda01a08da4
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
@@ -0,0 +1,43 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3# description: Kprobe events - probe points
4
5[ -f kprobe_events ] || exit_unsupported # this is configurable
6
7TARGET_FUNC=create_trace_kprobe
8
9dec_addr() { # hexaddr
10 printf "%d" "0x"`echo $1 | tail -c 8`
11}
12
13set_offs() { # prev target next
14 A1=`dec_addr $1`
15 A2=`dec_addr $2`
16 A3=`dec_addr $3`
17 TARGET="0x$2" # an address
18 PREV=`expr $A1 - $A2` # offset to previous symbol
19 NEXT=+`expr $A3 - $A2` # offset to next symbol
20 OVERFLOW=+`printf "0x%x" ${PREV}` # overflow offset to previous symbol
21}
22
23# We have to decode symbol addresses to get correct offsets.
24# If the offset is not an instruction boundary, it cause -EILSEQ.
25set_offs `grep -A1 -B1 ${TARGET_FUNC} /proc/kallsyms | cut -f 1 -d " " | xargs`
26
27UINT_TEST=no
28# printf "%x" -1 returns (unsigned long)-1.
29if [ `printf "%x" -1 | wc -c` != 9 ]; then
30 UINT_TEST=yes
31fi
32
33echo 0 > events/enable
34echo > kprobe_events
35echo "p:testprobe ${TARGET_FUNC}" > kprobe_events
36echo "p:testprobe ${TARGET}" > kprobe_events
37echo "p:testprobe ${TARGET_FUNC}${NEXT}" > kprobe_events
38! echo "p:testprobe ${TARGET_FUNC}${PREV}" > kprobe_events
39if [ "${UINT_TEST}" = yes ]; then
40! echo "p:testprobe ${TARGET_FUNC}${OVERFLOW}" > kprobe_events
41fi
42echo > kprobe_events
43clear_trace
diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config
new file mode 100644
index 000000000000..835c7f4dadcd
--- /dev/null
+++ b/tools/testing/selftests/memfd/config
@@ -0,0 +1 @@
CONFIG_FUSE_FS=m
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 412459369686..9b654a070e7d 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -97,6 +97,8 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
97 97
98static void sock_fanout_set_ebpf(int fd) 98static void sock_fanout_set_ebpf(int fd)
99{ 99{
100 static char log_buf[65536];
101
100 const int len_off = __builtin_offsetof(struct __sk_buff, len); 102 const int len_off = __builtin_offsetof(struct __sk_buff, len);
101 struct bpf_insn prog[] = { 103 struct bpf_insn prog[] = {
102 { BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0 }, 104 { BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0 },
@@ -109,7 +111,6 @@ static void sock_fanout_set_ebpf(int fd)
109 { BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0 }, 111 { BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0 },
110 { BPF_JMP | BPF_EXIT, 0, 0, 0, 0 } 112 { BPF_JMP | BPF_EXIT, 0, 0, 0, 0 }
111 }; 113 };
112 char log_buf[512];
113 union bpf_attr attr; 114 union bpf_attr attr;
114 int pfd; 115 int pfd;
115 116
diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c
index 440180ff8089..ca29f5872817 100644
--- a/tools/testing/selftests/powerpc/mm/subpage_prot.c
+++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c
@@ -135,6 +135,16 @@ static int run_test(void *addr, unsigned long size)
135 return 0; 135 return 0;
136} 136}
137 137
138static int syscall_available(void)
139{
140 int rc;
141
142 errno = 0;
143 rc = syscall(__NR_subpage_prot, 0, 0, 0);
144
145 return rc == 0 || (errno != ENOENT && errno != ENOSYS);
146}
147
138int test_anon(void) 148int test_anon(void)
139{ 149{
140 unsigned long align; 150 unsigned long align;
@@ -145,6 +155,8 @@ int test_anon(void)
145 void *mallocblock; 155 void *mallocblock;
146 unsigned long mallocsize; 156 unsigned long mallocsize;
147 157
158 SKIP_IF(!syscall_available());
159
148 if (getpagesize() != 0x10000) { 160 if (getpagesize() != 0x10000) {
149 fprintf(stderr, "Kernel page size must be 64K!\n"); 161 fprintf(stderr, "Kernel page size must be 64K!\n");
150 return 1; 162 return 1;
@@ -180,6 +192,8 @@ int test_file(void)
180 off_t filesize; 192 off_t filesize;
181 int fd; 193 int fd;
182 194
195 SKIP_IF(!syscall_available());
196
183 fd = open(file_name, O_RDWR); 197 fd = open(file_name, O_RDWR);
184 if (fd == -1) { 198 if (fd == -1) {
185 perror("failed to open file"); 199 perror("failed to open file");
diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
index 42d4c8caad81..de8dc82e2567 100644
--- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
@@ -45,12 +45,12 @@ int test_body(void)
45 printf("Check DSCR TM context switch: "); 45 printf("Check DSCR TM context switch: ");
46 fflush(stdout); 46 fflush(stdout);
47 for (;;) { 47 for (;;) {
48 rv = 1;
49 asm __volatile__ ( 48 asm __volatile__ (
50 /* set a known value into the DSCR */ 49 /* set a known value into the DSCR */
51 "ld 3, %[dscr1];" 50 "ld 3, %[dscr1];"
52 "mtspr %[sprn_dscr], 3;" 51 "mtspr %[sprn_dscr], 3;"
53 52
53 "li %[rv], 1;"
54 /* start and suspend a transaction */ 54 /* start and suspend a transaction */
55 TBEGIN 55 TBEGIN
56 "beq 1f;" 56 "beq 1f;"
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index 3f81a1095206..50a6371b2b2e 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -51,7 +51,7 @@ then
51 mkdir $builddir 51 mkdir $builddir
52 fi 52 fi
53 else 53 else
54 echo Bad build directory: \"$builddir\" 54 echo Bad build directory: \"$buildloc\"
55 exit 2 55 exit 2
56 fi 56 fi
57fi 57fi
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 882fe83a3554..b3f345433ec7 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1476,15 +1476,19 @@ TEST_F(TRACE_syscall, syscall_dropped)
1476#define SECCOMP_SET_MODE_FILTER 1 1476#define SECCOMP_SET_MODE_FILTER 1
1477#endif 1477#endif
1478 1478
1479#ifndef SECCOMP_FLAG_FILTER_TSYNC 1479#ifndef SECCOMP_FILTER_FLAG_TSYNC
1480#define SECCOMP_FLAG_FILTER_TSYNC 1 1480#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
1481#endif
1482
1483#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
1484#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
1481#endif 1485#endif
1482 1486
1483#ifndef seccomp 1487#ifndef seccomp
1484int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter) 1488int seccomp(unsigned int op, unsigned int flags, void *args)
1485{ 1489{
1486 errno = 0; 1490 errno = 0;
1487 return syscall(__NR_seccomp, op, flags, filter); 1491 return syscall(__NR_seccomp, op, flags, args);
1488} 1492}
1489#endif 1493#endif
1490 1494
@@ -1576,6 +1580,78 @@ TEST(seccomp_syscall_mode_lock)
1576 } 1580 }
1577} 1581}
1578 1582
1583/*
1584 * Test detection of known and unknown filter flags. Userspace needs to be able
1585 * to check if a filter flag is supported by the current kernel and a good way
1586 * of doing that is by attempting to enter filter mode, with the flag bit in
1587 * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates
1588 * that the flag is valid and EINVAL indicates that the flag is invalid.
1589 */
1590TEST(detect_seccomp_filter_flags)
1591{
1592 unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
1593 SECCOMP_FILTER_FLAG_SPEC_ALLOW };
1594 unsigned int flag, all_flags;
1595 int i;
1596 long ret;
1597
1598 /* Test detection of known-good filter flags */
1599 for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
1600 int bits = 0;
1601
1602 flag = flags[i];
1603 /* Make sure the flag is a single bit! */
1604 while (flag) {
1605 if (flag & 0x1)
1606 bits ++;
1607 flag >>= 1;
1608 }
1609 ASSERT_EQ(1, bits);
1610 flag = flags[i];
1611
1612 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
1613 ASSERT_NE(ENOSYS, errno) {
1614 TH_LOG("Kernel does not support seccomp syscall!");
1615 }
1616 EXPECT_EQ(-1, ret);
1617 EXPECT_EQ(EFAULT, errno) {
1618 TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!",
1619 flag);
1620 }
1621
1622 all_flags |= flag;
1623 }
1624
1625 /* Test detection of all known-good filter flags */
1626 ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL);
1627 EXPECT_EQ(-1, ret);
1628 EXPECT_EQ(EFAULT, errno) {
1629 TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
1630 all_flags);
1631 }
1632
1633 /* Test detection of an unknown filter flag */
1634 flag = -1;
1635 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
1636 EXPECT_EQ(-1, ret);
1637 EXPECT_EQ(EINVAL, errno) {
1638 TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!",
1639 flag);
1640 }
1641
1642 /*
1643 * Test detection of an unknown filter flag that may simply need to be
1644 * added to this test
1645 */
1646 flag = flags[ARRAY_SIZE(flags) - 1] << 1;
1647 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
1648 EXPECT_EQ(-1, ret);
1649 EXPECT_EQ(EINVAL, errno) {
1650 TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?",
1651 flag);
1652 }
1653}
1654
1579TEST(TSYNC_first) 1655TEST(TSYNC_first)
1580{ 1656{
1581 struct sock_filter filter[] = { 1657 struct sock_filter filter[] = {
@@ -1592,7 +1668,7 @@ TEST(TSYNC_first)
1592 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 1668 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1593 } 1669 }
1594 1670
1595 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1671 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1596 &prog); 1672 &prog);
1597 ASSERT_NE(ENOSYS, errno) { 1673 ASSERT_NE(ENOSYS, errno) {
1598 TH_LOG("Kernel does not support seccomp syscall!"); 1674 TH_LOG("Kernel does not support seccomp syscall!");
@@ -1810,7 +1886,7 @@ TEST_F(TSYNC, two_siblings_with_ancestor)
1810 self->sibling_count++; 1886 self->sibling_count++;
1811 } 1887 }
1812 1888
1813 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1889 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1814 &self->apply_prog); 1890 &self->apply_prog);
1815 ASSERT_EQ(0, ret) { 1891 ASSERT_EQ(0, ret) {
1816 TH_LOG("Could install filter on all threads!"); 1892 TH_LOG("Could install filter on all threads!");
@@ -1871,7 +1947,7 @@ TEST_F(TSYNC, two_siblings_with_no_filter)
1871 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 1947 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
1872 } 1948 }
1873 1949
1874 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1950 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1875 &self->apply_prog); 1951 &self->apply_prog);
1876 ASSERT_NE(ENOSYS, errno) { 1952 ASSERT_NE(ENOSYS, errno) {
1877 TH_LOG("Kernel does not support seccomp syscall!"); 1953 TH_LOG("Kernel does not support seccomp syscall!");
@@ -1919,7 +1995,7 @@ TEST_F(TSYNC, two_siblings_with_one_divergence)
1919 self->sibling_count++; 1995 self->sibling_count++;
1920 } 1996 }
1921 1997
1922 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 1998 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1923 &self->apply_prog); 1999 &self->apply_prog);
1924 ASSERT_EQ(self->sibling[0].system_tid, ret) { 2000 ASSERT_EQ(self->sibling[0].system_tid, ret) {
1925 TH_LOG("Did not fail on diverged sibling."); 2001 TH_LOG("Did not fail on diverged sibling.");
@@ -1971,7 +2047,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
1971 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2047 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
1972 } 2048 }
1973 2049
1974 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 2050 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
1975 &self->apply_prog); 2051 &self->apply_prog);
1976 ASSERT_EQ(ret, self->sibling[0].system_tid) { 2052 ASSERT_EQ(ret, self->sibling[0].system_tid) {
1977 TH_LOG("Did not fail on diverged sibling."); 2053 TH_LOG("Did not fail on diverged sibling.");
@@ -2000,7 +2076,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
2000 /* Switch to the remaining sibling */ 2076 /* Switch to the remaining sibling */
2001 sib = !sib; 2077 sib = !sib;
2002 2078
2003 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 2079 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2004 &self->apply_prog); 2080 &self->apply_prog);
2005 ASSERT_EQ(0, ret) { 2081 ASSERT_EQ(0, ret) {
2006 TH_LOG("Expected the remaining sibling to sync"); 2082 TH_LOG("Expected the remaining sibling to sync");
@@ -2023,7 +2099,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
2023 while (!kill(self->sibling[sib].system_tid, 0)) 2099 while (!kill(self->sibling[sib].system_tid, 0))
2024 sleep(0.1); 2100 sleep(0.1);
2025 2101
2026 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, 2102 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2027 &self->apply_prog); 2103 &self->apply_prog);
2028 ASSERT_EQ(0, ret); /* just us chickens */ 2104 ASSERT_EQ(0, ret); /* just us chickens */
2029} 2105}
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
index d075ea0e5ca1..ade443a88421 100644
--- a/tools/testing/selftests/x86/entry_from_vm86.c
+++ b/tools/testing/selftests/x86/entry_from_vm86.c
@@ -95,6 +95,31 @@ asm (
95 "int3\n\t" 95 "int3\n\t"
96 "vmcode_int80:\n\t" 96 "vmcode_int80:\n\t"
97 "int $0x80\n\t" 97 "int $0x80\n\t"
98 "vmcode_popf_hlt:\n\t"
99 "push %ax\n\t"
100 "popf\n\t"
101 "hlt\n\t"
102 "vmcode_umip:\n\t"
103 /* addressing via displacements */
104 "smsw (2052)\n\t"
105 "sidt (2054)\n\t"
106 "sgdt (2060)\n\t"
107 /* addressing via registers */
108 "mov $2066, %bx\n\t"
109 "smsw (%bx)\n\t"
110 "mov $2068, %bx\n\t"
111 "sidt (%bx)\n\t"
112 "mov $2074, %bx\n\t"
113 "sgdt (%bx)\n\t"
114 /* register operands, only for smsw */
115 "smsw %ax\n\t"
116 "mov %ax, (2080)\n\t"
117 "int3\n\t"
118 "vmcode_umip_str:\n\t"
119 "str %eax\n\t"
120 "vmcode_umip_sldt:\n\t"
121 "sldt %eax\n\t"
122 "int3\n\t"
98 ".size vmcode, . - vmcode\n\t" 123 ".size vmcode, . - vmcode\n\t"
99 "end_vmcode:\n\t" 124 "end_vmcode:\n\t"
100 ".code32\n\t" 125 ".code32\n\t"
@@ -103,7 +128,8 @@ asm (
103 128
104extern unsigned char vmcode[], end_vmcode[]; 129extern unsigned char vmcode[], end_vmcode[];
105extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[], 130extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[],
106 vmcode_sti[], vmcode_int3[], vmcode_int80[]; 131 vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_popf_hlt[],
132 vmcode_umip[], vmcode_umip_str[], vmcode_umip_sldt[];
107 133
108/* Returns false if the test was skipped. */ 134/* Returns false if the test was skipped. */
109static bool do_test(struct vm86plus_struct *v86, unsigned long eip, 135static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
@@ -153,13 +179,75 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
153 (VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) { 179 (VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) {
154 printf("[OK]\tReturned correctly\n"); 180 printf("[OK]\tReturned correctly\n");
155 } else { 181 } else {
156 printf("[FAIL]\tIncorrect return reason\n"); 182 printf("[FAIL]\tIncorrect return reason (started at eip = 0x%lx, ended at eip = 0x%lx)\n", eip, v86->regs.eip);
157 nerrs++; 183 nerrs++;
158 } 184 }
159 185
160 return true; 186 return true;
161} 187}
162 188
189void do_umip_tests(struct vm86plus_struct *vm86, unsigned char *test_mem)
190{
191 struct table_desc {
192 unsigned short limit;
193 unsigned long base;
194 } __attribute__((packed));
195
196 /* Initialize variables with arbitrary values */
197 struct table_desc gdt1 = { .base = 0x3c3c3c3c, .limit = 0x9999 };
198 struct table_desc gdt2 = { .base = 0x1a1a1a1a, .limit = 0xaeae };
199 struct table_desc idt1 = { .base = 0x7b7b7b7b, .limit = 0xf1f1 };
200 struct table_desc idt2 = { .base = 0x89898989, .limit = 0x1313 };
201 unsigned short msw1 = 0x1414, msw2 = 0x2525, msw3 = 3737;
202
203 /* UMIP -- exit with INT3 unless kernel emulation did not trap #GP */
204 do_test(vm86, vmcode_umip - vmcode, VM86_TRAP, 3, "UMIP tests");
205
206 /* Results from displacement-only addressing */
207 msw1 = *(unsigned short *)(test_mem + 2052);
208 memcpy(&idt1, test_mem + 2054, sizeof(idt1));
209 memcpy(&gdt1, test_mem + 2060, sizeof(gdt1));
210
211 /* Results from register-indirect addressing */
212 msw2 = *(unsigned short *)(test_mem + 2066);
213 memcpy(&idt2, test_mem + 2068, sizeof(idt2));
214 memcpy(&gdt2, test_mem + 2074, sizeof(gdt2));
215
216 /* Results when using register operands */
217 msw3 = *(unsigned short *)(test_mem + 2080);
218
219 printf("[INFO]\tResult from SMSW:[0x%04x]\n", msw1);
220 printf("[INFO]\tResult from SIDT: limit[0x%04x]base[0x%08lx]\n",
221 idt1.limit, idt1.base);
222 printf("[INFO]\tResult from SGDT: limit[0x%04x]base[0x%08lx]\n",
223 gdt1.limit, gdt1.base);
224
225 if (msw1 != msw2 || msw1 != msw3)
226 printf("[FAIL]\tAll the results of SMSW should be the same.\n");
227 else
228 printf("[PASS]\tAll the results from SMSW are identical.\n");
229
230 if (memcmp(&gdt1, &gdt2, sizeof(gdt1)))
231 printf("[FAIL]\tAll the results of SGDT should be the same.\n");
232 else
233 printf("[PASS]\tAll the results from SGDT are identical.\n");
234
235 if (memcmp(&idt1, &idt2, sizeof(idt1)))
236 printf("[FAIL]\tAll the results of SIDT should be the same.\n");
237 else
238 printf("[PASS]\tAll the results from SIDT are identical.\n");
239
240 sethandler(SIGILL, sighandler, 0);
241 do_test(vm86, vmcode_umip_str - vmcode, VM86_SIGNAL, 0,
242 "STR instruction");
243 clearhandler(SIGILL);
244
245 sethandler(SIGILL, sighandler, 0);
246 do_test(vm86, vmcode_umip_sldt - vmcode, VM86_SIGNAL, 0,
247 "SLDT instruction");
248 clearhandler(SIGILL);
249}
250
163int main(void) 251int main(void)
164{ 252{
165 struct vm86plus_struct v86; 253 struct vm86plus_struct v86;
@@ -180,6 +268,9 @@ int main(void)
180 v86.regs.ds = load_addr / 16; 268 v86.regs.ds = load_addr / 16;
181 v86.regs.es = load_addr / 16; 269 v86.regs.es = load_addr / 16;
182 270
271 /* Use the end of the page as our stack. */
272 v86.regs.esp = 4096;
273
183 assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */ 274 assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */
184 275
185 /* #BR -- should deliver SIG??? */ 276 /* #BR -- should deliver SIG??? */
@@ -211,6 +302,23 @@ int main(void)
211 v86.regs.eflags &= ~X86_EFLAGS_IF; 302 v86.regs.eflags &= ~X86_EFLAGS_IF;
212 do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set"); 303 do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set");
213 304
305 /* POPF with VIP set but IF clear: should not trap */
306 v86.regs.eflags = X86_EFLAGS_VIP;
307 v86.regs.eax = 0;
308 do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP set and IF clear");
309
310 /* POPF with VIP set and IF set: should trap */
311 v86.regs.eflags = X86_EFLAGS_VIP;
312 v86.regs.eax = X86_EFLAGS_IF;
313 do_test(&v86, vmcode_popf_hlt - vmcode, VM86_STI, 0, "POPF with VIP and IF set");
314
315 /* POPF with VIP clear and IF set: should not trap */
316 v86.regs.eflags = 0;
317 v86.regs.eax = X86_EFLAGS_IF;
318 do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP clear and IF set");
319
320 v86.regs.eflags = 0;
321
214 /* INT3 -- should cause #BP */ 322 /* INT3 -- should cause #BP */
215 do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3"); 323 do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3");
216 324
@@ -218,6 +326,9 @@ int main(void)
218 v86.regs.eax = (unsigned int)-1; 326 v86.regs.eax = (unsigned int)-1;
219 do_test(&v86, vmcode_int80 - vmcode, VM86_INTx, 0x80, "int80"); 327 do_test(&v86, vmcode_int80 - vmcode, VM86_INTx, 0x80, "int80");
220 328
329 /* UMIP -- should exit with INTx 0x80 unless UMIP was not disabled */
330 do_umip_tests(&v86, addr);
331
221 /* Execute a null pointer */ 332 /* Execute a null pointer */
222 v86.regs.cs = 0; 333 v86.regs.cs = 0;
223 v86.regs.ss = 0; 334 v86.regs.ss = 0;
@@ -231,7 +342,7 @@ int main(void)
231 clearhandler(SIGSEGV); 342 clearhandler(SIGSEGV);
232 343
233 /* Make sure nothing explodes if we fork. */ 344 /* Make sure nothing explodes if we fork. */
234 if (fork() > 0) 345 if (fork() == 0)
235 return 0; 346 return 0;
236 347
237 return (nerrs == 0 ? 0 : 1); 348 return (nerrs == 0 ? 0 : 1);
diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
index 1c12536f2081..18f523557983 100644
--- a/tools/thermal/tmon/sysfs.c
+++ b/tools/thermal/tmon/sysfs.c
@@ -486,6 +486,7 @@ int zone_instance_to_index(int zone_inst)
486int update_thermal_data() 486int update_thermal_data()
487{ 487{
488 int i; 488 int i;
489 int next_thermal_record = cur_thermal_record + 1;
489 char tz_name[256]; 490 char tz_name[256];
490 static unsigned long samples; 491 static unsigned long samples;
491 492
@@ -495,9 +496,9 @@ int update_thermal_data()
495 } 496 }
496 497
497 /* circular buffer for keeping historic data */ 498 /* circular buffer for keeping historic data */
498 if (cur_thermal_record >= NR_THERMAL_RECORDS) 499 if (next_thermal_record >= NR_THERMAL_RECORDS)
499 cur_thermal_record = 0; 500 next_thermal_record = 0;
500 gettimeofday(&trec[cur_thermal_record].tv, NULL); 501 gettimeofday(&trec[next_thermal_record].tv, NULL);
501 if (tmon_log) { 502 if (tmon_log) {
502 fprintf(tmon_log, "%lu ", ++samples); 503 fprintf(tmon_log, "%lu ", ++samples);
503 fprintf(tmon_log, "%3.1f ", p_param.t_target); 504 fprintf(tmon_log, "%3.1f ", p_param.t_target);
@@ -507,11 +508,12 @@ int update_thermal_data()
507 snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE, 508 snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE,
508 ptdata.tzi[i].instance); 509 ptdata.tzi[i].instance);
509 sysfs_get_ulong(tz_name, "temp", 510 sysfs_get_ulong(tz_name, "temp",
510 &trec[cur_thermal_record].temp[i]); 511 &trec[next_thermal_record].temp[i]);
511 if (tmon_log) 512 if (tmon_log)
512 fprintf(tmon_log, "%lu ", 513 fprintf(tmon_log, "%lu ",
513 trec[cur_thermal_record].temp[i]/1000); 514 trec[next_thermal_record].temp[i] / 1000);
514 } 515 }
516 cur_thermal_record = next_thermal_record;
515 for (i = 0; i < ptdata.nr_cooling_dev; i++) { 517 for (i = 0; i < ptdata.nr_cooling_dev; i++) {
516 char cdev_name[256]; 518 char cdev_name[256];
517 unsigned long val; 519 unsigned long val;
diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
index 9aa19652e8e8..b43138f8b862 100644
--- a/tools/thermal/tmon/tmon.c
+++ b/tools/thermal/tmon/tmon.c
@@ -336,7 +336,6 @@ int main(int argc, char **argv)
336 show_data_w(); 336 show_data_w();
337 show_cooling_device(); 337 show_cooling_device();
338 } 338 }
339 cur_thermal_record++;
340 time_elapsed += ticktime; 339 time_elapsed += ticktime;
341 controller_handler(trec[0].temp[target_tz_index] / 1000, 340 controller_handler(trec[0].temp[target_tz_index] / 1000,
342 &yk); 341 &yk);
diff --git a/tools/usb/usbip/libsrc/usbip_common.c b/tools/usb/usbip/libsrc/usbip_common.c
index ac73710473de..8000445ff884 100644
--- a/tools/usb/usbip/libsrc/usbip_common.c
+++ b/tools/usb/usbip/libsrc/usbip_common.c
@@ -215,9 +215,16 @@ int read_usb_interface(struct usbip_usb_device *udev, int i,
215 struct usbip_usb_interface *uinf) 215 struct usbip_usb_interface *uinf)
216{ 216{
217 char busid[SYSFS_BUS_ID_SIZE]; 217 char busid[SYSFS_BUS_ID_SIZE];
218 int size;
218 struct udev_device *sif; 219 struct udev_device *sif;
219 220
220 sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i); 221 size = snprintf(busid, sizeof(busid), "%s:%d.%d",
222 udev->busid, udev->bConfigurationValue, i);
223 if (size < 0 || (unsigned int)size >= sizeof(busid)) {
224 err("busid length %i >= %lu or < 0", size,
225 (unsigned long)sizeof(busid));
226 return -1;
227 }
221 228
222 sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid); 229 sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid);
223 if (!sif) { 230 if (!sif) {
diff --git a/tools/usb/usbip/libsrc/usbip_host_driver.c b/tools/usb/usbip/libsrc/usbip_host_driver.c
index bef08d5c44e8..071b9ce99420 100644
--- a/tools/usb/usbip/libsrc/usbip_host_driver.c
+++ b/tools/usb/usbip/libsrc/usbip_host_driver.c
@@ -39,13 +39,19 @@ struct udev *udev_context;
39static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) 39static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
40{ 40{
41 char status_attr_path[SYSFS_PATH_MAX]; 41 char status_attr_path[SYSFS_PATH_MAX];
42 int size;
42 int fd; 43 int fd;
43 int length; 44 int length;
44 char status; 45 char status;
45 int value = 0; 46 int value = 0;
46 47
47 snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status", 48 size = snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
48 udev->path); 49 udev->path);
50 if (size < 0 || (unsigned int)size >= sizeof(status_attr_path)) {
51 err("usbip_status path length %i >= %lu or < 0", size,
52 (unsigned long)sizeof(status_attr_path));
53 return -1;
54 }
49 55
50 fd = open(status_attr_path, O_RDONLY); 56 fd = open(status_attr_path, O_RDONLY);
51 if (fd < 0) { 57 if (fd < 0) {
@@ -225,6 +231,7 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
225{ 231{
226 char attr_name[] = "usbip_sockfd"; 232 char attr_name[] = "usbip_sockfd";
227 char sockfd_attr_path[SYSFS_PATH_MAX]; 233 char sockfd_attr_path[SYSFS_PATH_MAX];
234 int size;
228 char sockfd_buff[30]; 235 char sockfd_buff[30];
229 int ret; 236 int ret;
230 237
@@ -244,10 +251,20 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
244 } 251 }
245 252
246 /* only the first interface is true */ 253 /* only the first interface is true */
247 snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s", 254 size = snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
248 edev->udev.path, attr_name); 255 edev->udev.path, attr_name);
256 if (size < 0 || (unsigned int)size >= sizeof(sockfd_attr_path)) {
257 err("exported device path length %i >= %lu or < 0", size,
258 (unsigned long)sizeof(sockfd_attr_path));
259 return -1;
260 }
249 261
250 snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd); 262 size = snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
263 if (size < 0 || (unsigned int)size >= sizeof(sockfd_buff)) {
264 err("socket length %i >= %lu or < 0", size,
265 (unsigned long)sizeof(sockfd_buff));
266 return -1;
267 }
251 268
252 ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff, 269 ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff,
253 strlen(sockfd_buff)); 270 strlen(sockfd_buff));
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
index ad9204773533..1274f326242c 100644
--- a/tools/usb/usbip/libsrc/vhci_driver.c
+++ b/tools/usb/usbip/libsrc/vhci_driver.c
@@ -55,12 +55,12 @@ static int parse_status(const char *value)
55 55
56 while (*c != '\0') { 56 while (*c != '\0') {
57 int port, status, speed, devid; 57 int port, status, speed, devid;
58 unsigned long socket; 58 int sockfd;
59 char lbusid[SYSFS_BUS_ID_SIZE]; 59 char lbusid[SYSFS_BUS_ID_SIZE];
60 60
61 ret = sscanf(c, "%d %d %d %x %lx %31s\n", 61 ret = sscanf(c, "%d %d %d %x %u %31s\n",
62 &port, &status, &speed, 62 &port, &status, &speed,
63 &devid, &socket, lbusid); 63 &devid, &sockfd, lbusid);
64 64
65 if (ret < 5) { 65 if (ret < 5) {
66 dbg("sscanf failed: %d", ret); 66 dbg("sscanf failed: %d", ret);
@@ -69,7 +69,7 @@ static int parse_status(const char *value)
69 69
70 dbg("port %d status %d speed %d devid %x", 70 dbg("port %d status %d speed %d devid %x",
71 port, status, speed, devid); 71 port, status, speed, devid);
72 dbg("socket %lx lbusid %s", socket, lbusid); 72 dbg("sockfd %u lbusid %s", sockfd, lbusid);
73 73
74 74
75 /* if a device is connected, look at it */ 75 /* if a device is connected, look at it */
diff --git a/tools/usb/usbip/src/usbip.c b/tools/usb/usbip/src/usbip.c
index d7599d943529..73d8eee8130b 100644
--- a/tools/usb/usbip/src/usbip.c
+++ b/tools/usb/usbip/src/usbip.c
@@ -176,6 +176,8 @@ int main(int argc, char *argv[])
176 break; 176 break;
177 case '?': 177 case '?':
178 printf("usbip: invalid option\n"); 178 printf("usbip: invalid option\n");
179 /* Terminate after printing error */
180 /* FALLTHRU */
179 default: 181 default:
180 usbip_usage(); 182 usbip_usage();
181 goto out; 183 goto out;
diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c
index fa46141ae68b..e121cfb1746a 100644
--- a/tools/usb/usbip/src/usbip_bind.c
+++ b/tools/usb/usbip/src/usbip_bind.c
@@ -144,6 +144,7 @@ static int bind_device(char *busid)
144 int rc; 144 int rc;
145 struct udev *udev; 145 struct udev *udev;
146 struct udev_device *dev; 146 struct udev_device *dev;
147 const char *devpath;
147 148
148 /* Check whether the device with this bus ID exists. */ 149 /* Check whether the device with this bus ID exists. */
149 udev = udev_new(); 150 udev = udev_new();
@@ -152,8 +153,16 @@ static int bind_device(char *busid)
152 err("device with the specified bus ID does not exist"); 153 err("device with the specified bus ID does not exist");
153 return -1; 154 return -1;
154 } 155 }
156 devpath = udev_device_get_devpath(dev);
155 udev_unref(udev); 157 udev_unref(udev);
156 158
159 /* If the device is already attached to vhci_hcd - bail out */
160 if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
161 err("bind loop detected: device: %s is attached to %s\n",
162 devpath, USBIP_VHCI_DRV_NAME);
163 return -1;
164 }
165
157 rc = unbind_other(busid); 166 rc = unbind_other(busid);
158 if (rc == UNBIND_ST_FAILED) { 167 if (rc == UNBIND_ST_FAILED) {
159 err("could not unbind driver from device on busid %s", busid); 168 err("could not unbind driver from device on busid %s", busid);
diff --git a/tools/usb/usbip/src/usbip_detach.c b/tools/usb/usbip/src/usbip_detach.c
index 9db9d21bb2ec..6a8db858caa5 100644
--- a/tools/usb/usbip/src/usbip_detach.c
+++ b/tools/usb/usbip/src/usbip_detach.c
@@ -43,7 +43,7 @@ void usbip_detach_usage(void)
43 43
44static int detach_port(char *port) 44static int detach_port(char *port)
45{ 45{
46 int ret; 46 int ret = 0;
47 uint8_t portnum; 47 uint8_t portnum;
48 char path[PATH_MAX+1]; 48 char path[PATH_MAX+1];
49 49
@@ -73,9 +73,12 @@ static int detach_port(char *port)
73 } 73 }
74 74
75 ret = usbip_vhci_detach_device(portnum); 75 ret = usbip_vhci_detach_device(portnum);
76 if (ret < 0) 76 if (ret < 0) {
77 return -1; 77 ret = -1;
78 goto call_driver_close;
79 }
78 80
81call_driver_close:
79 usbip_vhci_driver_close(); 82 usbip_vhci_driver_close();
80 83
81 return ret; 84 return ret;
diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c
index d5ce34a410e7..ac6081c3db82 100644
--- a/tools/usb/usbip/src/usbip_list.c
+++ b/tools/usb/usbip/src/usbip_list.c
@@ -180,6 +180,7 @@ static int list_devices(bool parsable)
180 const char *busid; 180 const char *busid;
181 char product_name[128]; 181 char product_name[128];
182 int ret = -1; 182 int ret = -1;
183 const char *devpath;
183 184
184 /* Create libudev context. */ 185 /* Create libudev context. */
185 udev = udev_new(); 186 udev = udev_new();
@@ -202,6 +203,14 @@ static int list_devices(bool parsable)
202 path = udev_list_entry_get_name(dev_list_entry); 203 path = udev_list_entry_get_name(dev_list_entry);
203 dev = udev_device_new_from_syspath(udev, path); 204 dev = udev_device_new_from_syspath(udev, path);
204 205
206 /* Ignore devices attached to vhci_hcd */
207 devpath = udev_device_get_devpath(dev);
208 if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
209 dbg("Skip the device %s already attached to %s\n",
210 devpath, USBIP_VHCI_DRV_NAME);
211 continue;
212 }
213
205 /* Get device information. */ 214 /* Get device information. */
206 idVendor = udev_device_get_sysattr_value(dev, "idVendor"); 215 idVendor = udev_device_get_sysattr_value(dev, "idVendor");
207 idProduct = udev_device_get_sysattr_value(dev, "idProduct"); 216 idProduct = udev_device_get_sysattr_value(dev, "idProduct");
diff --git a/tools/usb/usbip/src/usbipd.c b/tools/usb/usbip/src/usbipd.c
index 2a7cd2b8d966..8c5b0faba229 100644
--- a/tools/usb/usbip/src/usbipd.c
+++ b/tools/usb/usbip/src/usbipd.c
@@ -451,7 +451,7 @@ static void set_signal(void)
451 sigaction(SIGTERM, &act, NULL); 451 sigaction(SIGTERM, &act, NULL);
452 sigaction(SIGINT, &act, NULL); 452 sigaction(SIGINT, &act, NULL);
453 act.sa_handler = SIG_IGN; 453 act.sa_handler = SIG_IGN;
454 sigaction(SIGCLD, &act, NULL); 454 sigaction(SIGCHLD, &act, NULL);
455} 455}
456 456
457static const char *pid_file; 457static const char *pid_file;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 49001fa84ead..1203829316b2 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work)
119{ 119{
120 struct kvm_kernel_irqfd *irqfd = 120 struct kvm_kernel_irqfd *irqfd =
121 container_of(work, struct kvm_kernel_irqfd, shutdown); 121 container_of(work, struct kvm_kernel_irqfd, shutdown);
122 struct kvm *kvm = irqfd->kvm;
122 u64 cnt; 123 u64 cnt;
123 124
125 /* Make sure irqfd has been initalized in assign path. */
126 synchronize_srcu(&kvm->irq_srcu);
127
124 /* 128 /*
125 * Synchronize with the wait-queue and unhook ourselves to prevent 129 * Synchronize with the wait-queue and unhook ourselves to prevent
126 * further events. 130 * further events.
@@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
387 391
388 idx = srcu_read_lock(&kvm->irq_srcu); 392 idx = srcu_read_lock(&kvm->irq_srcu);
389 irqfd_update(kvm, irqfd); 393 irqfd_update(kvm, irqfd);
390 srcu_read_unlock(&kvm->irq_srcu, idx);
391 394
392 list_add_tail(&irqfd->list, &kvm->irqfds.items); 395 list_add_tail(&irqfd->list, &kvm->irqfds.items);
393 396
@@ -419,6 +422,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
419 irqfd->consumer.token, ret); 422 irqfd->consumer.token, ret);
420#endif 423#endif
421 424
425 srcu_read_unlock(&kvm->irq_srcu, idx);
422 return 0; 426 return 0;
423 427
424fail: 428fail:
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d080f06fd8d9..b814ae6822b6 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -902,8 +902,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
902 /* Check for overlaps */ 902 /* Check for overlaps */
903 r = -EEXIST; 903 r = -EEXIST;
904 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { 904 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
905 if ((slot->id >= KVM_USER_MEM_SLOTS) || 905 if (slot->id == id)
906 (slot->id == id))
907 continue; 906 continue;
908 if (!((base_gfn + npages <= slot->base_gfn) || 907 if (!((base_gfn + npages <= slot->base_gfn) ||
909 (base_gfn >= slot->base_gfn + slot->npages))) 908 (base_gfn >= slot->base_gfn + slot->npages)))