aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPraneeth Bajjuri2017-08-28 18:03:19 -0500
committerPraneeth Bajjuri2017-08-28 18:03:19 -0500
commitdda8acaf6f2d9bddce62c305edf6b3071f04c136 (patch)
tree612f8b7b18ff3a84184ddc0ce456b9b0287cace6 /arch
parentb22bbe0c5fa4fd5eb4609108a750b28a744a643e (diff)
parenteabbcea7629d5f2ec91568f7bd104536614107db (diff)
downloadkernel-omap-dda8acaf6f2d9bddce62c305edf6b3071f04c136.tar.gz
kernel-omap-dda8acaf6f2d9bddce62c305edf6b3071f04c136.tar.xz
kernel-omap-dda8acaf6f2d9bddce62c305edf6b3071f04c136.zip
Merge branch 'p-ti-lsk-android-linux-4.4.y' of git://git.omapzoom.org/kernel/omap into 6AM.1.3-rvc-video6AM.1.3-rvc-video
* 'p-ti-lsk-android-linux-4.4.y' of git://git.omapzoom.org/kernel/omap: (2048 commits) ARM: dts: dra7: Remove deprecated PCI compatible string ARM: dts: dra76-evm: Enable x2 PCIe lanes ARM: dts: DRA72x: Use PCIe compatible specific to dra72 ARM: dts: DRA74x: Use PCIe compatible specific to dra74 ARM: dts: dra7: Add properties to enable PCIe x2 lane mode PCI: dwc: pci-dra7xx: Enable x2 mode support PCI: dwc: dra7xx: Add support for SoC specific compatible strings dt-bindings: PCI: dra7xx: Add properties to enable x2 lane in dra7 dt-bindings: PCI: dra7xx: Add SoC specific compatible strings ARM: dts: dra7-evm: Move pcie RC node to common file ARM: dts: dra76-evm: add higher speed MMC/SD modes Linux 4.4.84 usb: qmi_wwan: add D-Link DWM-222 device ID usb: optimize acpi companion search for usb port devices perf/x86: Fix LBR related crashes on Intel Atom pids: make task_tgid_nr_ns() safe Sanitize 'move_pages()' permission checks irqchip/atmel-aic: Fix unbalanced refcount in aic_common_rtc_irq_fixup() irqchip/atmel-aic: Fix unbalanced of_node_put() in aic_common_irq_fixup() x86/asm/64: Clear AC on NMI entries ... Signed-off-by: Praneeth Bajjuri <praneeth@ti.com> Conflicts: arch/arm/boot/dts/Makefile drivers/gpu/drm/omapdrm/dss/dispc.c
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/osf_sys.c6
-rw-r--r--arch/arc/include/asm/delay.h4
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/kernel/unaligned.c3
-rw-r--r--arch/arc/mm/mmap.c2
-rw-r--r--arch/arm/boot/dts/Makefile7
-rw-r--r--arch/arm/boot/dts/armada-388-gp.dts4
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts5
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_xplained.dts2
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi4
-rw-r--r--arch/arm/boot/dts/dra7-evm-common.dtsi393
-rw-r--r--arch/arm/boot/dts/dra7-evm-fpd-auo-g101evn01.0.dts37
-rw-r--r--arch/arm/boot/dts/dra7-evm-fpd-lg.dts38
-rw-r--r--arch/arm/boot/dts/dra7-evm-lcd-lg.dts8
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts441
-rw-r--r--arch/arm/boot/dts/dra7.dtsi7
-rw-r--r--arch/arm/boot/dts/dra72-evm-common.dtsi27
-rw-r--r--arch/arm/boot/dts/dra72-evm-fpd-lg.dts39
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts34
-rw-r--r--arch/arm/boot/dts/dra72x.dtsi12
-rw-r--r--arch/arm/boot/dts/dra74x.dtsi12
-rw-r--r--arch/arm/boot/dts/dra76-evm-fpd-auo-g101evn01.0.dts67
-rw-r--r--arch/arm/boot/dts/dra76-evm.dts498
-rw-r--r--arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi435
-rw-r--r--arch/arm/boot/dts/dra76x.dtsi81
-rw-r--r--arch/arm/boot/dts/dra7x-evm-fpd-auo-g101evn01.0.dtsi107
-rw-r--r--arch/arm/boot/dts/dra7x-evm-fpd-lg.dtsi59
-rw-r--r--arch/arm/boot/dts/dra7x-evm-lcd-lg.dtsi3
-rw-r--r--arch/arm/boot/dts/dra7x-evm-lcd-osd.dtsi1
-rw-r--r--arch/arm/boot/dts/dra7xx-jamr3.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6dl.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi1
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi35
-rw-r--r--arch/arm/boot/dts/tegra20-paz00.dts1
-rw-r--r--arch/arm/configs/s3c2410_defconfig6
-rw-r--r--arch/arm/crypto/aes-ce-glue.c4
-rw-r--r--arch/arm/include/asm/elf.h8
-rw-r--r--arch/arm/include/asm/ftrace.h18
-rw-r--r--arch/arm/include/asm/kvm_mmu.h9
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/pj4-cp0.c4
-rw-r--r--arch/arm/kernel/ptrace.c2
-rw-r--r--arch/arm/kernel/vdso.c13
-rw-r--r--arch/arm/kvm/init.S5
-rw-r--r--arch/arm/kvm/mmu.c32
-rw-r--r--arch/arm/kvm/psci.c8
-rw-r--r--arch/arm/lib/getuser.S2
-rw-r--r--arch/arm/mach-at91/pm.c18
-rw-r--r--arch/arm/mach-omap2/board-generic.c1
-rw-r--r--arch/arm/mach-omap2/id.c14
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S3
-rw-r--r--arch/arm/mach-omap2/omap-smp.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c70
-rw-r--r--arch/arm/mach-omap2/powerdomains7xx_data.c33
-rw-r--r--arch/arm/mach-omap2/soc.h6
-rw-r--r--arch/arm/mach-omap2/timer.c6
-rw-r--r--arch/arm/mm/fault.c4
-rw-r--r--arch/arm/mm/fault.h4
-rw-r--r--arch/arm/mm/mmap.c4
-rw-r--r--arch/arm/mm/mmu.c8
-rw-r--r--arch/arm/vdso/Makefile2
-rw-r--r--arch/arm64/Kconfig56
-rw-r--r--arch/arm64/Makefile16
-rw-r--r--arch/arm64/boot/dts/arm/juno-r1.dts28
-rw-r--r--arch/arm64/boot/dts/arm/juno-sched-energy.dtsi147
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts36
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi6
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/crypto/Kconfig5
-rw-r--r--arch/arm64/crypto/Makefile3
-rw-r--r--arch/arm64/crypto/aes-glue.c4
-rw-r--r--arch/arm64/crypto/aes-modes.S88
-rw-r--r--arch/arm64/crypto/poly-hash-ce-core.S163
-rw-r--r--arch/arm64/crypto/poly-hash-ce-glue.c166
-rw-r--r--arch/arm64/include/asm/acpi.h6
-rw-r--r--arch/arm64/include/asm/alternative.h70
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h13
-rw-r--r--arch/arm64/include/asm/assembler.h17
-rw-r--r--arch/arm64/include/asm/barrier.h21
-rw-r--r--arch/arm64/include/asm/cacheflush.h1
-rw-r--r--arch/arm64/include/asm/cmpxchg.h2
-rw-r--r--arch/arm64/include/asm/elf.h12
-rw-r--r--arch/arm64/include/asm/futex.h3
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h6
-rw-r--r--arch/arm64/include/asm/kexec.h98
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h3
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/include/asm/mmu_context.h6
-rw-r--r--arch/arm64/include/asm/page.h12
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/include/asm/smp.h52
-rw-r--r--arch/arm64/include/asm/spinlock.h7
-rw-r--r--arch/arm64/include/asm/thread_info.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h119
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h8
-rw-r--r--arch/arm64/include/asm/virt.h5
-rw-r--r--arch/arm64/kernel/Makefile8
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c6
-rw-r--r--arch/arm64/kernel/asm-offsets.c14
-rw-r--r--arch/arm64/kernel/cpu-reset.S54
-rw-r--r--arch/arm64/kernel/cpu-reset.h34
-rw-r--r--arch/arm64/kernel/cpufeature.c36
-rw-r--r--arch/arm64/kernel/crash_dump.c71
-rw-r--r--arch/arm64/kernel/entry.S24
-rw-r--r--arch/arm64/kernel/head.S34
-rw-r--r--arch/arm64/kernel/hibernate.c92
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c154
-rw-r--r--arch/arm64/kernel/hyp-stub.S10
-rw-r--r--arch/arm64/kernel/machine_kexec.c364
-rw-r--r--arch/arm64/kernel/ptrace.c7
-rw-r--r--arch/arm64/kernel/relocate_kernel.S130
-rw-r--r--arch/arm64/kernel/setup.c7
-rw-r--r--arch/arm64/kernel/smp.c153
-rw-r--r--arch/arm64/kernel/stacktrace.c5
-rw-r--r--arch/arm64/kernel/traps.c10
-rw-r--r--arch/arm64/kernel/vdso.c12
-rw-r--r--arch/arm64/kernel/vdso/Makefile7
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S331
-rw-r--r--arch/arm64/kvm/hyp/Makefile4
-rw-r--r--arch/arm64/kvm/sys_regs.c6
-rw-r--r--arch/arm64/lib/clear_user.S3
-rw-r--r--arch/arm64/lib/copy_from_user.S3
-rw-r--r--arch/arm64/lib/copy_in_user.S3
-rw-r--r--arch/arm64/lib/copy_to_user.S3
-rw-r--r--arch/arm64/mm/cache.S6
-rw-r--r--arch/arm64/mm/fault.c17
-rw-r--r--arch/arm64/mm/hugetlbpage.c22
-rw-r--r--arch/arm64/mm/init.c181
-rw-r--r--arch/arm64/mm/mmu.c140
-rw-r--r--arch/arm64/mm/pageattr.c13
-rw-r--r--arch/arm64/mm/proc.S9
-rw-r--r--arch/arm64/net/bpf_jit_comp.c8
-rw-r--r--arch/arm64/xen/hypercall.S8
-rw-r--r--arch/c6x/kernel/ptrace.c41
-rw-r--r--arch/frv/mm/elf-fdpic.c2
-rw-r--r--arch/h8300/kernel/ptrace.c8
-rw-r--r--arch/ia64/Makefile4
-rw-r--r--arch/metag/include/asm/uaccess.h64
-rw-r--r--arch/metag/kernel/ptrace.c19
-rw-r--r--arch/metag/lib/usercopy.c312
-rw-r--r--arch/mips/Kconfig3
-rw-r--r--arch/mips/ath79/common.c16
-rw-r--r--arch/mips/bcm47xx/buttons.c10
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S20
-rw-r--r--arch/mips/configs/ip22_defconfig4
-rw-r--r--arch/mips/configs/ip27_defconfig3
-rw-r--r--arch/mips/configs/lemote2f_defconfig2
-rw-r--r--arch/mips/configs/malta_defconfig4
-rw-r--r--arch/mips/configs/malta_kvm_defconfig4
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig4
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig4
-rw-r--r--arch/mips/configs/nlm_xlp_defconfig2
-rw-r--r--arch/mips/configs/nlm_xlr_defconfig2
-rw-r--r--arch/mips/dec/int-handler.S40
-rw-r--r--arch/mips/include/asm/branch.h5
-rw-r--r--arch/mips/include/asm/checksum.h2
-rw-r--r--arch/mips/include/asm/irq.h12
-rw-r--r--arch/mips/include/asm/spinlock.h8
-rw-r--r--arch/mips/include/asm/stackframe.h7
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/branch.c42
-rw-r--r--arch/mips/kernel/crash.c16
-rw-r--r--arch/mips/kernel/elf.c2
-rw-r--r--arch/mips/kernel/entry.S3
-rw-r--r--arch/mips/kernel/genex.S81
-rw-r--r--arch/mips/kernel/irq.c11
-rw-r--r--arch/mips/kernel/kgdb.c48
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c12
-rw-r--r--arch/mips/kernel/pm-cps.c9
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/process.c166
-rw-r--r--arch/mips/kernel/ptrace.c5
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/syscall.c15
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c10
-rw-r--r--arch/mips/math-emu/cp1emu.c38
-rw-r--r--arch/mips/mm/mmap.c2
-rw-r--r--arch/mips/mm/sc-ip22.c54
-rw-r--r--arch/mips/mm/tlbex.c25
-rw-r--r--arch/mips/netlogic/common/reset.S11
-rw-r--r--arch/mips/netlogic/common/smpboot.S4
-rw-r--r--arch/mips/ralink/mt7620.c84
-rw-r--r--arch/mips/ralink/prom.c9
-rw-r--r--arch/mips/ralink/rt288x.c12
-rw-r--r--arch/mips/ralink/rt305x.c11
-rw-r--r--arch/mips/ralink/rt3883.c14
-rw-r--r--arch/mips/sgi-ip22/Platform2
-rw-r--r--arch/nios2/kernel/prom.c7
-rw-r--r--arch/nios2/kernel/setup.c3
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/include/asm/bitops.h8
-rw-r--r--arch/parisc/include/asm/dma-mapping.h11
-rw-r--r--arch/parisc/include/asm/mmu_context.h15
-rw-r--r--arch/parisc/include/uapi/asm/bitsperlong.h2
-rw-r--r--arch/parisc/include/uapi/asm/swab.h5
-rw-r--r--arch/parisc/kernel/sys_parisc.c15
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/boot/zImage.lds.S1
-rw-r--r--arch/powerpc/include/asm/atomic.h4
-rw-r--r--arch/powerpc/include/asm/elf.h13
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/kernel/align.c27
-rw-r--r--arch/powerpc/kernel/eeh.c10
-rw-r--r--arch/powerpc/kernel/eeh_driver.c21
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S12
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c4
-rw-r--r--arch/powerpc/kernel/kprobes.c11
-rw-r--r--arch/powerpc/kernel/mce.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c9
-rw-r--r--arch/powerpc/kernel/traps.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c60
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S41
-rw-r--r--arch/powerpc/kvm/emulate.c1
-rw-r--r--arch/powerpc/lib/sstep.c39
-rw-r--r--arch/powerpc/mm/hash_native_64.c7
-rw-r--r--arch/powerpc/mm/slb_low.S10
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c2
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/boot/compressed/misc.c35
-rw-r--r--arch/s390/include/asm/ctl_reg.h4
-rw-r--r--arch/s390/include/asm/elf.h15
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/processor.h3
-rw-r--r--arch/s390/include/asm/syscall.h6
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/kernel/crash_dump.c33
-rw-r--r--arch/s390/kernel/entry.S40
-rw-r--r--arch/s390/kernel/ptrace.c8
-rw-r--r--arch/s390/kernel/setup.c8
-rw-r--r--arch/s390/kvm/kvm-s390.c3
-rw-r--r--arch/s390/mm/init.c10
-rw-r--r--arch/s390/mm/mmap.c4
-rw-r--r--arch/s390/mm/pgtable.c19
-rw-r--r--arch/s390/mm/vmem.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c3
-rw-r--r--arch/s390/pci/pci_dma.c16
-rw-r--r--arch/sh/mm/mmap.c4
-rw-r--r--arch/sparc/Kconfig4
-rw-r--r--arch/sparc/include/asm/mmu_64.h2
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h46
-rw-r--r--arch/sparc/include/asm/pgtable_32.h4
-rw-r--r--arch/sparc/include/asm/pgtable_64.h15
-rw-r--r--arch/sparc/include/asm/pil.h1
-rw-r--r--arch/sparc/include/asm/setup.h2
-rw-r--r--arch/sparc/include/asm/trap_block.h1
-rw-r--r--arch/sparc/include/asm/vio.h1
-rw-r--r--arch/sparc/kernel/irq_64.c17
-rw-r--r--arch/sparc/kernel/kernel.h1
-rw-r--r--arch/sparc/kernel/ptrace_64.c2
-rw-r--r--arch/sparc/kernel/smp_64.c216
-rw-r--r--arch/sparc/kernel/sun4v_ivec.S15
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c4
-rw-r--r--arch/sparc/kernel/traps_64.c5
-rw-r--r--arch/sparc/kernel/tsb.S23
-rw-r--r--arch/sparc/kernel/ttable_64.S2
-rw-r--r--arch/sparc/kernel/vio.c68
-rw-r--r--arch/sparc/mm/hugetlbpage.c2
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/sparc/mm/init_64.c88
-rw-r--r--arch/sparc/mm/tsb.c7
-rw-r--r--arch/sparc/mm/ultra.S5
-rw-r--r--arch/sparc/power/hibernate.c3
-rw-r--r--arch/tile/kernel/ptrace.c2
-rw-r--r--arch/tile/mm/hugetlbpage.c2
-rw-r--r--arch/x86/boot/boot.h2
-rw-r--r--arch/x86/boot/string.c1
-rw-r--r--arch/x86/boot/string.h9
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c26
-rw-r--r--arch/x86/crypto/sha1_avx2_x86_64_asm.S67
-rw-r--r--arch/x86/entry/entry_64.S2
-rw-r--r--arch/x86/entry/vdso/Makefile4
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c11
-rw-r--r--arch/x86/include/asm/elf.h15
-rw-r--r--arch/x86/include/asm/kvm_emulate.h4
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/pat.h1
-rw-r--r--arch/x86/include/asm/pmem.h45
-rw-r--r--arch/x86/include/asm/xen/hypercall.h3
-rw-r--r--arch/x86/kernel/acpi/boot.c8
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c5
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c24
-rw-r--r--arch/x86/kernel/cpu/perf_event.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c11
-rw-r--r--arch/x86/kernel/fpu/init.c1
-rw-r--r--arch/x86/kernel/ftrace.c12
-rw-r--r--arch/x86/kernel/head64.c1
-rw-r--r--arch/x86/kernel/hpet.c1
-rw-r--r--arch/x86/kernel/kprobes/common.h2
-rw-r--r--arch/x86/kernel/kprobes/core.c6
-rw-r--r--arch/x86/kernel/kprobes/opt.c2
-rw-r--r--arch/x86/kernel/kvm.c6
-rw-r--r--arch/x86/kernel/pci-calgary_64.c2
-rw-r--r--arch/x86/kernel/setup.c7
-rw-r--r--arch/x86/kernel/sys_x86_64.c4
-rw-r--r--arch/x86/kvm/cpuid.c39
-rw-r--r--arch/x86/kvm/cpuid.h9
-rw-r--r--arch/x86/kvm/emulate.c16
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/mmu.h1
-rw-r--r--arch/x86/kvm/pmu_intel.c2
-rw-r--r--arch/x86/kvm/vmx.c108
-rw-r--r--arch/x86/kvm/x86.c69
-rw-r--r--arch/x86/lib/copy_user_64.S7
-rw-r--r--arch/x86/mm/hugetlbpage.c2
-rw-r--r--arch/x86/mm/init.c41
-rw-r--r--arch/x86/mm/kasan_init_64.c1
-rw-r--r--arch/x86/mm/mpx.c12
-rw-r--r--arch/x86/mm/numa_32.c1
-rw-r--r--arch/x86/mm/pat.c28
-rw-r--r--arch/x86/mm/tlb.c4
-rw-r--r--arch/x86/pci/xen.c23
-rw-r--r--arch/x86/platform/goldfish/goldfish.c14
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_wdt.c2
-rw-r--r--arch/x86/tools/relocs.c3
-rw-r--r--arch/x86/um/ptrace_64.c2
-rw-r--r--arch/x86/xen/mmu.c7
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--arch/x86/xen/spinlock.c6
-rw-r--r--arch/x86/xen/time.c6
-rw-r--r--arch/xtensa/include/asm/irq.h3
-rw-r--r--arch/xtensa/kernel/irq.c5
-rw-r--r--arch/xtensa/kernel/setup.c4
-rw-r--r--arch/xtensa/kernel/syscall.c2
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h6
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c10
342 files changed, 6762 insertions, 2266 deletions
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 6cc08166ff00..63f06a2b1f7f 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1188,8 +1188,10 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
1188 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) 1188 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
1189 return -EFAULT; 1189 return -EFAULT;
1190 1190
1191 err = 0; 1191 err = put_user(status, ustatus);
1192 err |= put_user(status, ustatus); 1192 if (ret < 0)
1193 return err ? err : ret;
1194
1193 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); 1195 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
1194 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); 1196 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
1195 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); 1197 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index a36e8601114d..d5da2115d78a 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
26 " lp 1f \n" 26 " lp 1f \n"
27 " nop \n" 27 " nop \n"
28 "1: \n" 28 "1: \n"
29 : : "r"(loops)); 29 :
30 : "r"(loops)
31 : "lp_count");
30} 32}
31 33
32extern void __bad_udelay(void); 34extern void __bad_udelay(void);
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index b5ff87e6f4b7..aee1a77934cf 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -16,6 +16,7 @@
16 ; 16 ;
17 ; Now manually save: r12, sp, fp, gp, r25 17 ; Now manually save: r12, sp, fp, gp, r25
18 18
19 PUSH r30
19 PUSH r12 20 PUSH r12
20 21
21 ; Saving pt_regs->sp correctly requires some extra work due to the way 22 ; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
72 POPAX AUX_USER_SP 73 POPAX AUX_USER_SP
731: 741:
74 POP r12 75 POP r12
76 POP r30
75 77
76.endm 78.endm
77 79
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 69095da1fcfd..47111d565a95 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -84,7 +84,7 @@ struct pt_regs {
84 unsigned long fp; 84 unsigned long fp;
85 unsigned long sp; /* user/kernel sp depending on where we came from */ 85 unsigned long sp; /* user/kernel sp depending on where we came from */
86 86
87 unsigned long r12; 87 unsigned long r12, r30;
88 88
89 /*------- Below list auto saved by h/w -----------*/ 89 /*------- Below list auto saved by h/w -----------*/
90 unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; 90 unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index abd961f3e763..5f69c3bd59bb 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
241 if (state.fault) 241 if (state.fault)
242 goto fault; 242 goto fault;
243 243
244 /* clear any remanants of delay slot */
244 if (delay_mode(regs)) { 245 if (delay_mode(regs)) {
245 regs->ret = regs->bta; 246 regs->ret = regs->bta & ~1U;
246 regs->status32 &= ~STATUS_DE_MASK; 247 regs->status32 &= ~STATUS_DE_MASK;
247 } else { 248 } else {
248 regs->ret += state.instr_len; 249 regs->ret += state.instr_len;
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 2e06d56e987b..cf4ae6958240 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
64 64
65 vma = find_vma(mm, addr); 65 vma = find_vma(mm, addr);
66 if (TASK_SIZE - len >= addr && 66 if (TASK_SIZE - len >= addr &&
67 (!vma || addr + len <= vma->vm_start)) 67 (!vma || addr + len <= vm_start_gap(vma)))
68 return addr; 68 return addr;
69 } 69 }
70 70
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index c1bff8a87485..4e037366a62f 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -494,11 +494,13 @@ dtb-$(CONFIG_SOC_OMAP5) += \
494dtb-$(CONFIG_SOC_DRA7XX) += \ 494dtb-$(CONFIG_SOC_DRA7XX) += \
495 dra7-evm.dtb \ 495 dra7-evm.dtb \
496 dra7-evm-lcd-lg.dtb \ 496 dra7-evm-lcd-lg.dtb \
497 dra7-evm-fpd-lg.dtb \
497 dra7-evm-lcd-lg-late-attach.dtb \ 498 dra7-evm-lcd-lg-late-attach.dtb \
498 dra7-evm-lcd-lg-late-attach-no-map.dtb \ 499 dra7-evm-lcd-lg-late-attach-no-map.dtb \
499 dra7-evm-lcd-osd.dtb \ 500 dra7-evm-lcd-osd.dtb \
500 dra7-evm-lcd-osd101t2587.dtb \ 501 dra7-evm-lcd-osd101t2587.dtb \
501 dra7-evm-lcd-osd-late-attach.dtb \ 502 dra7-evm-lcd-osd-late-attach.dtb \
503 dra7-evm-fpd-auo-g101evn01.0.dtb \
502 dra7-evm-vision.dtb \ 504 dra7-evm-vision.dtb \
503 dra7-evm-robust-rvc.dtb \ 505 dra7-evm-robust-rvc.dtb \
504 dra7-evm-early-video.dtb \ 506 dra7-evm-early-video.dtb \
@@ -516,6 +518,7 @@ dtb-$(CONFIG_SOC_DRA7XX) += \
516 dra72-evm-lcd-lg.dtb \ 518 dra72-evm-lcd-lg.dtb \
517 dra72-evm-lcd-osd.dtb \ 519 dra72-evm-lcd-osd.dtb \
518 dra72-evm-lcd-osd101t2587.dtb \ 520 dra72-evm-lcd-osd101t2587.dtb \
521 dra72-evm-fpd-lg.dtb \
519 dra72-evm-revc.dtb \ 522 dra72-evm-revc.dtb \
520 dra72-evm-revc-lcd-osd101t2045.dtb \ 523 dra72-evm-revc-lcd-osd101t2045.dtb \
521 dra72-evm-revc-lcd-osd101t2587.dtb \ 524 dra72-evm-revc-lcd-osd101t2587.dtb \
@@ -524,7 +527,9 @@ dtb-$(CONFIG_SOC_DRA7XX) += \
524 dra71-evm.dtb \ 527 dra71-evm.dtb \
525 dra71-evm-lcd-auo-g101evn01.0.dtb \ 528 dra71-evm-lcd-auo-g101evn01.0.dtb \
526 dra71-evm-robust-rvc.dtb \ 529 dra71-evm-robust-rvc.dtb \
527 dra71-evm-early-video.dtb 530 dra71-evm-early-video.dtb \
531 dra76-evm.dtb \
532 dra76-evm-fpd-auo-g101evn01.0.dtb
528dtb-$(CONFIG_ARCH_ORION5X) += \ 533dtb-$(CONFIG_ARCH_ORION5X) += \
529 orion5x-lacie-d2-network.dtb \ 534 orion5x-lacie-d2-network.dtb \
530 orion5x-lacie-ethernet-disk-mini-v2.dtb \ 535 orion5x-lacie-ethernet-disk-mini-v2.dtb \
diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
index cd316021d6ce..6c1b45c1af66 100644
--- a/arch/arm/boot/dts/armada-388-gp.dts
+++ b/arch/arm/boot/dts/armada-388-gp.dts
@@ -89,7 +89,7 @@
89 pinctrl-names = "default"; 89 pinctrl-names = "default";
90 pinctrl-0 = <&pca0_pins>; 90 pinctrl-0 = <&pca0_pins>;
91 interrupt-parent = <&gpio0>; 91 interrupt-parent = <&gpio0>;
92 interrupts = <18 IRQ_TYPE_EDGE_FALLING>; 92 interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
93 gpio-controller; 93 gpio-controller;
94 #gpio-cells = <2>; 94 #gpio-cells = <2>;
95 interrupt-controller; 95 interrupt-controller;
@@ -101,7 +101,7 @@
101 compatible = "nxp,pca9555"; 101 compatible = "nxp,pca9555";
102 pinctrl-names = "default"; 102 pinctrl-names = "default";
103 interrupt-parent = <&gpio0>; 103 interrupt-parent = <&gpio0>;
104 interrupts = <18 IRQ_TYPE_EDGE_FALLING>; 104 interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
105 gpio-controller; 105 gpio-controller;
106 #gpio-cells = <2>; 106 #gpio-cells = <2>;
107 interrupt-controller; 107 interrupt-controller;
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index e74df327cdd3..20618a897c99 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -122,6 +122,8 @@
122 uart1: serial@f8020000 { 122 uart1: serial@f8020000 {
123 pinctrl-names = "default"; 123 pinctrl-names = "default";
124 pinctrl-0 = <&pinctrl_uart1_default>; 124 pinctrl-0 = <&pinctrl_uart1_default>;
125 atmel,use-dma-rx;
126 atmel,use-dma-tx;
125 status = "okay"; 127 status = "okay";
126 }; 128 };
127 129
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index f3e2b96c06a3..0bd325c314e1 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -162,9 +162,10 @@
162 }; 162 };
163 163
164 adc0: adc@f8018000 { 164 adc0: adc@f8018000 {
165 atmel,adc-vref = <3300>;
166 atmel,adc-channels-used = <0xfe>;
165 pinctrl-0 = < 167 pinctrl-0 = <
166 &pinctrl_adc0_adtrg 168 &pinctrl_adc0_adtrg
167 &pinctrl_adc0_ad0
168 &pinctrl_adc0_ad1 169 &pinctrl_adc0_ad1
169 &pinctrl_adc0_ad2 170 &pinctrl_adc0_ad2
170 &pinctrl_adc0_ad3 171 &pinctrl_adc0_ad3
@@ -172,8 +173,6 @@
172 &pinctrl_adc0_ad5 173 &pinctrl_adc0_ad5
173 &pinctrl_adc0_ad6 174 &pinctrl_adc0_ad6
174 &pinctrl_adc0_ad7 175 &pinctrl_adc0_ad7
175 &pinctrl_adc0_ad8
176 &pinctrl_adc0_ad9
177 >; 176 >;
178 status = "okay"; 177 status = "okay";
179 }; 178 };
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index da84e65b56ef..e27024cdf48b 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -110,6 +110,8 @@
110 }; 110 };
111 111
112 usart3: serial@fc00c000 { 112 usart3: serial@fc00c000 {
113 atmel,use-dma-rx;
114 atmel,use-dma-tx;
113 status = "okay"; 115 status = "okay";
114 }; 116 };
115 117
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 6f50f672efbd..de8ac998604d 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -54,14 +54,14 @@
54 timer@0200 { 54 timer@0200 {
55 compatible = "arm,cortex-a9-global-timer"; 55 compatible = "arm,cortex-a9-global-timer";
56 reg = <0x0200 0x100>; 56 reg = <0x0200 0x100>;
57 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 57 interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
58 clocks = <&clk_periph>; 58 clocks = <&clk_periph>;
59 }; 59 };
60 60
61 local-timer@0600 { 61 local-timer@0600 {
62 compatible = "arm,cortex-a9-twd-timer"; 62 compatible = "arm,cortex-a9-twd-timer";
63 reg = <0x0600 0x100>; 63 reg = <0x0600 0x100>;
64 interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; 64 interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
65 clocks = <&clk_periph>; 65 clocks = <&clk_periph>;
66 }; 66 };
67 67
diff --git a/arch/arm/boot/dts/dra7-evm-common.dtsi b/arch/arm/boot/dts/dra7-evm-common.dtsi
new file mode 100644
index 000000000000..2bbc04791628
--- /dev/null
+++ b/arch/arm/boot/dts/dra7-evm-common.dtsi
@@ -0,0 +1,393 @@
1/*
2 * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <dt-bindings/gpio/gpio.h>
10#include <dt-bindings/clk/ti-dra7-atl.h>
11#include <dt-bindings/input/input.h>
12
13/ {
14 aliases {
15 display0 = &hdmi0;
16 sound0 = &sound0;
17 sound1 = &hdmi;
18 };
19
20 chosen {
21 stdout-path = &uart1;
22 };
23
24 sound0: sound0 {
25 compatible = "simple-audio-card";
26 simple-audio-card,name = "DRA7xx-EVM";
27 simple-audio-card,widgets =
28 "Headphone", "Headphone Jack",
29 "Line", "Line Out",
30 "Microphone", "Mic Jack",
31 "Line", "Line In";
32 simple-audio-card,routing =
33 "Headphone Jack", "HPLOUT",
34 "Headphone Jack", "HPROUT",
35 "Line Out", "LLOUT",
36 "Line Out", "RLOUT",
37 "MIC3L", "Mic Jack",
38 "MIC3R", "Mic Jack",
39 "Mic Jack", "Mic Bias",
40 "LINE1L", "Line In",
41 "LINE1R", "Line In";
42 simple-audio-card,format = "dsp_b";
43 simple-audio-card,bitclock-master = <&sound0_master>;
44 simple-audio-card,frame-master = <&sound0_master>;
45 simple-audio-card,bitclock-inversion;
46
47 sound0_master: simple-audio-card,cpu {
48 sound-dai = <&mcasp3>;
49 system-clock-frequency = <11289600>;
50 };
51
52 simple-audio-card,codec {
53 sound-dai = <&tlv320aic3106>;
54 clocks = <&atl_clkin2_ck>;
55 };
56 };
57
58 extcon_usb1: extcon_usb1 {
59 compatible = "linux,extcon-usb-gpio";
60 id-gpio = <&pcf_gpio_21 1 GPIO_ACTIVE_HIGH>;
61 };
62
63 leds {
64 compatible = "gpio-leds";
65 led0 {
66 label = "dra7:usr1";
67 gpios = <&pcf_lcd 4 GPIO_ACTIVE_LOW>;
68 default-state = "off";
69 };
70
71 led1 {
72 label = "dra7:usr2";
73 gpios = <&pcf_lcd 5 GPIO_ACTIVE_LOW>;
74 default-state = "off";
75 };
76
77 led2 {
78 label = "dra7:usr3";
79 gpios = <&pcf_lcd 6 GPIO_ACTIVE_LOW>;
80 default-state = "off";
81 };
82
83 led3 {
84 label = "dra7:usr4";
85 gpios = <&pcf_lcd 7 GPIO_ACTIVE_LOW>;
86 default-state = "off";
87 };
88 };
89
90 gpio_keys {
91 compatible = "gpio-keys";
92 #address-cells = <1>;
93 #size-cells = <0>;
94 autorepeat;
95
96 USER1 {
97 label = "btnUser1";
98 linux,code = <BTN_0>;
99 gpios = <&pcf_lcd 2 GPIO_ACTIVE_LOW>;
100 };
101
102 USER2 {
103 label = "btnUser2";
104 linux,code = <BTN_1>;
105 gpios = <&pcf_lcd 3 GPIO_ACTIVE_LOW>;
106 };
107 };
108
109 hdmi0: connector@1 {
110 compatible = "hdmi-connector";
111 label = "hdmi";
112
113 type = "a";
114
115 port {
116 hdmi_connector_in: endpoint {
117 remote-endpoint = <&tpd12s015_out>;
118 };
119 };
120 };
121
122 tpd12s015: encoder@1 {
123 ports {
124 #address-cells = <1>;
125 #size-cells = <0>;
126
127 port@0 {
128 reg = <0>;
129
130 tpd12s015_in: endpoint {
131 remote-endpoint = <&hdmi_out>;
132 };
133 };
134
135 port@1 {
136 reg = <1>;
137
138 tpd12s015_out: endpoint {
139 remote-endpoint = <&hdmi_connector_in>;
140 };
141 };
142 };
143 };
144
145 clk_ov10633_fixed: clk_ov10633_fixed {
146 #clock-cells = <0>;
147 compatible = "fixed-clock";
148 clock-frequency = <24000000>;
149 };
150};
151
152&dra7_pmx_core {
153 dcan1_pins_default: dcan1_pins_default {
154 pinctrl-single,pins = <
155 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
156 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */
157 >;
158 };
159
160 dcan1_pins_sleep: dcan1_pins_sleep {
161 pinctrl-single,pins = <
162 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */
163 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */
164 >;
165 };
166};
167
168&i2c3 {
169 status = "okay";
170 clock-frequency = <400000>;
171};
172
173&mcspi1 {
174 status = "okay";
175};
176
177&mcspi2 {
178 status = "okay";
179};
180
181&uart1 {
182 status = "okay";
183 interrupts-extended = <&crossbar_mpu GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
184 <&dra7_pmx_core 0x3e0>;
185};
186
187&uart2 {
188 status = "okay";
189};
190
191&uart3 {
192 status = "okay";
193};
194
195&qspi {
196 status = "okay";
197
198 spi-max-frequency = <76800000>;
199 m25p80@0 {
200 compatible = "s25fl256s1";
201 spi-max-frequency = <76800000>;
202 reg = <0>;
203 spi-tx-bus-width = <1>;
204 spi-rx-bus-width = <4>;
205 #address-cells = <1>;
206 #size-cells = <1>;
207
208 /* MTD partition table.
209 * The ROM checks the first four physical blocks
210 * for a valid file to boot and the flash here is
211 * 64KiB block size.
212 */
213 partition@0 {
214 label = "QSPI.SPL";
215 reg = <0x00000000 0x000040000>;
216 };
217 partition@1 {
218 label = "QSPI.u-boot";
219 reg = <0x00040000 0x00100000>;
220 };
221 partition@2 {
222 label = "QSPI.u-boot-spl-os";
223 reg = <0x00140000 0x00080000>;
224 };
225 partition@3 {
226 label = "QSPI.u-boot-env";
227 reg = <0x001c0000 0x00010000>;
228 };
229 partition@4 {
230 label = "QSPI.u-boot-env.backup1";
231 reg = <0x001d0000 0x0010000>;
232 };
233 partition@5 {
234 label = "QSPI.kernel";
235 reg = <0x001e0000 0x0800000>;
236 };
237 partition@6 {
238 label = "QSPI.file-system";
239 reg = <0x009e0000 0x01620000>;
240 };
241 };
242};
243
244&omap_dwc3_1 {
245 extcon = <&extcon_usb1>;
246};
247
248&usb1 {
249 dr_mode = "otg";
250};
251
252&usb2 {
253 dr_mode = "host";
254};
255
256&dcan1 {
257 status = "ok";
258 pinctrl-names = "default", "sleep", "active";
259 pinctrl-0 = <&dcan1_pins_sleep>;
260 pinctrl-1 = <&dcan1_pins_sleep>;
261 pinctrl-2 = <&dcan1_pins_default>;
262};
263
264&atl {
265 assigned-clocks = <&abe_dpll_sys_clk_mux>,
266 <&atl_gfclk_mux>,
267 <&dpll_abe_ck>,
268 <&dpll_abe_m2x2_ck>,
269 <&atl_clkin1_ck>,
270 <&atl_clkin2_ck>;
271 assigned-clock-parents = <&sys_clkin2>, <&dpll_abe_m2_ck>;
272 assigned-clock-rates = <0>, <0>, <180633600>, <361267200>,
273 <11289600>, <11289600>;
274
275 status = "okay";
276
277 atl2 {
278 bws = <DRA7_ATL_WS_MCASP2_FSX>;
279 aws = <DRA7_ATL_WS_MCASP3_FSX>;
280 };
281};
282
283&mcasp3 {
284 #sound-dai-cells = <0>;
285
286 assigned-clocks = <&mcasp3_ahclkx_mux>;
287 assigned-clock-parents = <&atl_clkin2_ck>;
288
289 status = "okay";
290
291 op-mode = <0>; /* MCASP_IIS_MODE */
292 tdm-slots = <2>;
293 /* 4 serializer */
294 serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
295 1 2 0 0
296 >;
297 tx-num-evt = <32>;
298 rx-num-evt = <32>;
299};
300
301&mailbox5 {
302 status = "okay";
303 mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
304 status = "okay";
305 };
306 mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
307 status = "okay";
308 };
309};
310
311&mailbox6 {
312 status = "okay";
313 mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
314 status = "okay";
315 };
316 mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
317 status = "okay";
318 };
319};
320
321&mmu0_dsp1 {
322 status = "okay";
323};
324
325&mmu1_dsp1 {
326 status = "okay";
327};
328
329&mmu0_dsp2 {
330 status = "okay";
331};
332
333&mmu1_dsp2 {
334 status = "okay";
335};
336
337&mmu_ipu1 {
338 status = "okay";
339};
340
341&mmu_ipu2 {
342 status = "okay";
343};
344
345&ipu2 {
346 status = "okay";
347 memory-region = <&ipu2_cma_pool>;
348 mboxes = <&mailbox6 &mbox_ipu2_ipc3x>;
349 timers = <&timer3>;
350 watchdog-timers = <&timer4>, <&timer9>;
351};
352
353&ipu1 {
354 status = "okay";
355 memory-region = <&ipu1_cma_pool>;
356 mboxes = <&mailbox5 &mbox_ipu1_ipc3x>;
357 timers = <&timer11>;
358 watchdog-timers = <&timer7>, <&timer8>;
359};
360
361&dsp1 {
362 status = "okay";
363 memory-region = <&dsp1_cma_pool>;
364 mboxes = <&mailbox5 &mbox_dsp1_ipc3x>;
365 timers = <&timer5>;
366 watchdog-timers = <&timer10>;
367};
368
369&dsp2 {
370 status = "okay";
371 memory-region = <&dsp2_cma_pool>;
372 mboxes = <&mailbox6 &mbox_dsp2_ipc3x>;
373 timers = <&timer6>;
374 watchdog-timers = <&timer13>;
375};
376
377&vip1 {
378 status = "okay";
379};
380
381&hdmi {
382 status = "okay";
383
384 port {
385 hdmi_out: endpoint {
386 remote-endpoint = <&tpd12s015_in>;
387 };
388 };
389};
390
391&pcie1_rc {
392 status = "okay";
393};
diff --git a/arch/arm/boot/dts/dra7-evm-fpd-auo-g101evn01.0.dts b/arch/arm/boot/dts/dra7-evm-fpd-auo-g101evn01.0.dts
new file mode 100644
index 000000000000..64e79754bb22
--- /dev/null
+++ b/arch/arm/boot/dts/dra7-evm-fpd-auo-g101evn01.0.dts
@@ -0,0 +1,37 @@
1#include "dra7-evm.dts"
2#include "dra7x-evm-fpd-auo-g101evn01.0.dtsi"
3
4/ {
5 aliases {
6 display0 = &fpd_disp;
7 display1 = &hdmi0;
8 };
9};
10
11&dss {
12 ports {
13 status = "ok";
14 };
15};
16
17&disp_ser {
18 status = "ok";
19 ranges = <0x0 0x2d>;
20};
21
22/* Tie the end points of DSS and FPDLink together */
23
24&fpd_in {
25 remote-endpoint = <&dpi_out3>;
26};
27
28&dpi_out3 {
29 remote-endpoint = <&fpd_in>;
30};
31
32&lcd_fpd {
33 enable-gpios = <&pcf_gpio_21 0 GPIO_ACTIVE_LOW>;
34 /* P0, SEL_GPMC_AD_VID_S0 */
35
36 status = "ok";
37};
diff --git a/arch/arm/boot/dts/dra7-evm-fpd-lg.dts b/arch/arm/boot/dts/dra7-evm-fpd-lg.dts
new file mode 100644
index 000000000000..b8df0766ecff
--- /dev/null
+++ b/arch/arm/boot/dts/dra7-evm-fpd-lg.dts
@@ -0,0 +1,38 @@
1#include "dra7-evm.dts"
2#include "dra7x-evm-fpd-lg.dtsi"
3
4/ {
5 aliases {
6 display0 = &fpd_disp;
7 display1 = &hdmi0;
8 };
9};
10
11&dss {
12 ports {
13 status = "ok";
14 };
15};
16
17&disp_ser {
18 status = "ok";
19 ranges = <0x2c 0x2c>,
20 <0x1c 0x1c>;
21};
22
23/* Tie the end points of DSS and FPDLink together */
24
25&fpd_in {
26 remote-endpoint = <&dpi_out3>;
27};
28
29&dpi_out3 {
30 remote-endpoint = <&fpd_in>;
31};
32
33&lcd_fpd {
34 enable-gpios = <&pcf_gpio_21 0 GPIO_ACTIVE_LOW>;
35 /* P0, SEL_GPMC_AD_VID_S0 */
36
37 status = "ok";
38};
diff --git a/arch/arm/boot/dts/dra7-evm-lcd-lg.dts b/arch/arm/boot/dts/dra7-evm-lcd-lg.dts
index f67beaa8ff01..2b09d3a8839f 100644
--- a/arch/arm/boot/dts/dra7-evm-lcd-lg.dts
+++ b/arch/arm/boot/dts/dra7-evm-lcd-lg.dts
@@ -14,3 +14,11 @@
14 interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; 14 interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
15}; 15};
16 16
17
18/* Uncomment the below lines to enable the FPDLink display */
19
20/*
21&lcd_fpd {
22 status = "okay";
23};
24*/
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index c2af973a17c2..4a8697306e7a 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -8,9 +8,7 @@
8/dts-v1/; 8/dts-v1/;
9 9
10#include "dra74x.dtsi" 10#include "dra74x.dtsi"
11#include <dt-bindings/gpio/gpio.h> 11#include "dra7-evm-common.dtsi"
12#include <dt-bindings/clk/ti-dra7-atl.h>
13#include <dt-bindings/input/input.h>
14 12
15/ { 13/ {
16 model = "TI DRA742"; 14 model = "TI DRA742";
@@ -56,13 +54,17 @@
56 }; 54 };
57 55
58 aliases { 56 aliases {
59 display0 = &hdmi0;
60 display1 = &fpd_disp;
61 sound0 = &snd0;
62 sound1 = &hdmi;
63 i2c7 = &disp_ser; 57 i2c7 = &disp_ser;
64 }; 58 };
65 59
60 evm_1v8_sw: fixedregulator-evm_1v8 {
61 compatible = "regulator-fixed";
62 regulator-name = "evm_1v8";
63 vin-supply = <&smps9_reg>;
64 regulator-min-microvolt = <1800000>;
65 regulator-max-microvolt = <1800000>;
66 };
67
66 evm_3v3_sd: fixedregulator-sd { 68 evm_3v3_sd: fixedregulator-sd {
67 compatible = "regulator-fixed"; 69 compatible = "regulator-fixed";
68 regulator-name = "evm_3v3_sd"; 70 regulator-name = "evm_3v3_sd";
@@ -99,16 +101,6 @@
99 enable-active-high; 101 enable-active-high;
100 }; 102 };
101 103
102 extcon_usb1: extcon_usb1 {
103 compatible = "linux,extcon-usb-gpio";
104 id-gpio = <&pcf_gpio_21 1 GPIO_ACTIVE_HIGH>;
105 };
106
107 extcon_usb2: extcon_usb2 {
108 compatible = "linux,extcon-usb-gpio";
109 id-gpio = <&pcf_gpio_21 2 GPIO_ACTIVE_HIGH>;
110 };
111
112 vtt_fixed: fixedregulator-vtt { 104 vtt_fixed: fixedregulator-vtt {
113 compatible = "regulator-fixed"; 105 compatible = "regulator-fixed";
114 regulator-name = "vtt_fixed"; 106 regulator-name = "vtt_fixed";
@@ -121,151 +113,28 @@
121 gpio = <&gpio7 11 GPIO_ACTIVE_HIGH>; 113 gpio = <&gpio7 11 GPIO_ACTIVE_HIGH>;
122 }; 114 };
123 115
124 snd0: sound@0 { 116 extcon_usb2: extcon_usb2 {
125 compatible = "simple-audio-card"; 117 compatible = "linux,extcon-usb-gpio";
126 simple-audio-card,name = "DRA7xx-EVM"; 118 id-gpio = <&pcf_gpio_21 2 GPIO_ACTIVE_HIGH>;
127 simple-audio-card,widgets =
128 "Headphone", "Headphone Jack",
129 "Line", "Line Out",
130 "Microphone", "Mic Jack",
131 "Line", "Line In";
132 simple-audio-card,routing =
133 "Headphone Jack", "HPLOUT",
134 "Headphone Jack", "HPROUT",
135 "Line Out", "LLOUT",
136 "Line Out", "RLOUT",
137 "MIC3L", "Mic Jack",
138 "MIC3R", "Mic Jack",
139 "Mic Jack", "Mic Bias",
140 "LINE1L", "Line In",
141 "LINE1R", "Line In";
142 simple-audio-card,format = "dsp_b";
143 simple-audio-card,bitclock-master = <&sound0_master>;
144 simple-audio-card,frame-master = <&sound0_master>;
145 simple-audio-card,bitclock-inversion;
146
147 sound0_master: simple-audio-card,cpu {
148 sound-dai = <&mcasp3>;
149 system-clock-frequency = <11289600>;
150 };
151
152 simple-audio-card,codec {
153 sound-dai = <&tlv320aic3106>;
154 clocks = <&atl_clkin2_ck>;
155 };
156 };
157
158 leds {
159 compatible = "gpio-leds";
160 led@0 {
161 label = "dra7:usr1";
162 gpios = <&pcf_lcd 4 GPIO_ACTIVE_LOW>;
163 default-state = "off";
164 };
165
166 led@1 {
167 label = "dra7:usr2";
168 gpios = <&pcf_lcd 5 GPIO_ACTIVE_LOW>;
169 default-state = "off";
170 };
171
172 led@2 {
173 label = "dra7:usr3";
174 gpios = <&pcf_lcd 6 GPIO_ACTIVE_LOW>;
175 default-state = "off";
176 };
177
178 led@3 {
179 label = "dra7:usr4";
180 gpios = <&pcf_lcd 7 GPIO_ACTIVE_LOW>;
181 default-state = "off";
182 };
183 };
184
185 gpio_keys {
186 compatible = "gpio-keys";
187 #address-cells = <1>;
188 #size-cells = <0>;
189 autorepeat;
190
191 USER1 {
192 label = "btnUser1";
193 linux,code = <BTN_0>;
194 gpios = <&pcf_lcd 2 GPIO_ACTIVE_LOW>;
195 };
196
197 USER2 {
198 label = "btnUser2";
199 linux,code = <BTN_1>;
200 gpios = <&pcf_lcd 3 GPIO_ACTIVE_LOW>;
201 };
202 };
203
204 hdmi0: connector@1 {
205 compatible = "hdmi-connector";
206 label = "hdmi";
207
208 type = "a";
209
210 port {
211 hdmi_connector_in: endpoint {
212 remote-endpoint = <&tpd12s015_out>;
213 };
214 };
215 }; 119 };
216 120
217 tpd12s015: encoder@1 { 121};
218 compatible = "ti,dra7evm-tpd12s015";
219
220 pinctrl-names = "i2c", "ddc";
221 pinctrl-0 = <&hdmi_i2c_sel_pin &hdmi_i2c_pins_i2c>;
222 pinctrl-1 = <&hdmi_i2c_sel_pin &hdmi_i2c_pins_ddc>;
223
224 ddc-i2c-bus = <&i2c2>;
225 mcasp-gpio = <&mcasp8>;
226
227 gpios = <&pcf_hdmi 4 0>, /* P4, CT CP HPD */
228 <&pcf_hdmi 5 0>, /* P5, LS OE */
229 <&gpio7 12 0>; /* gpio7_12/sp1_cs2, HPD */
230
231 ports {
232 #address-cells = <1>;
233 #size-cells = <0>;
234
235 port@0 {
236 reg = <0>;
237 122
238 tpd12s015_in: endpoint@0 { 123&tpd12s015 {
239 remote-endpoint = <&hdmi_out>; 124 compatible = "ti,dra7evm-tpd12s015";
240 }; 125 pinctrl-names = "i2c", "ddc";
241 }; 126 pinctrl-0 = <&hdmi_i2c_sel_pin &hdmi_i2c_pins_i2c>;
127 pinctrl-1 = <&hdmi_i2c_sel_pin &hdmi_i2c_pins_ddc>;
242 128
243 port@1 { 129 gpios = <&pcf_hdmi 4 GPIO_ACTIVE_HIGH>, /* P4, CT CP HPD */
244 reg = <1>; 130 <&pcf_hdmi 5 GPIO_ACTIVE_HIGH>, /* P5, LS OE */
131 <&gpio7 12 GPIO_ACTIVE_HIGH>; /* gpio7_12/sp1_cs2, HPD */
245 132
246 tpd12s015_out: endpoint@0 { 133 ddc-i2c-bus = <&i2c2>;
247 remote-endpoint = <&hdmi_connector_in>; 134 mcasp-gpio = <&mcasp8>;
248 };
249 };
250 };
251 };
252}; 135};
253 136
254&dra7_pmx_core { 137&dra7_pmx_core {
255 dcan1_pins_default: dcan1_pins_default {
256 pinctrl-single,pins = <
257 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
258 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */
259 >;
260 };
261
262 dcan1_pins_sleep: dcan1_pins_sleep {
263 pinctrl-single,pins = <
264 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */
265 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */
266 >;
267 };
268
269 hdmi_i2c_sel_pin: pinmux_hdmi_i2c_sel_pin { 138 hdmi_i2c_sel_pin: pinmux_hdmi_i2c_sel_pin {
270 pinctrl-single,pins = < 139 pinctrl-single,pins = <
271 /* this pin is used as a GPIO via mcasp */ 140 /* this pin is used as a GPIO via mcasp */
@@ -758,6 +627,8 @@
758 tps659038: tps659038@58 { 627 tps659038: tps659038@58 {
759 compatible = "ti,tps659038"; 628 compatible = "ti,tps659038";
760 reg = <0x58>; 629 reg = <0x58>;
630 ti,palmas-override-powerhold;
631 ti,system-power-controller;
761 632
762 tps659038_pmic { 633 tps659038_pmic {
763 compatible = "ti,tps659038-pmic"; 634 compatible = "ti,tps659038-pmic";
@@ -978,59 +849,10 @@ i2c_p3_exp: &i2c2 {
978 849
979 #address-cells = <1>; 850 #address-cells = <1>;
980 #size-cells = <0>; 851 #size-cells = <0>;
981 ranges = <0x2c 0x2c>, 852 status = "disabled";
982 <0x1c 0x1c>;
983
984 disp_des: deserializer@2c {
985 compatible = "ti,ds90uh928q";
986 reg = <0x2c>;
987 slave-mode;
988 };
989
990 /* TLC chip for LCD panel power and backlight */
991 fpd_disp: tlc59108@1c {
992 status = "disabled";
993 reg = <0x1c>;
994 compatible = "ti,tlc59108-fpddisp";
995 enable-gpios = <&pcf_gpio_21 0 GPIO_ACTIVE_LOW>;
996 /* P0, SEL_GPMC_AD_VID_S0 */
997
998 port@lcd3 {
999 fpd_in: endpoint {
1000 remote-endpoint = <&dpi_out3>;
1001 };
1002 };
1003 };
1004 }; 853 };
1005}; 854};
1006 855
1007&i2c3 {
1008 status = "okay";
1009 clock-frequency = <400000>;
1010};
1011
1012&mcspi1 {
1013 status = "okay";
1014};
1015
1016&mcspi2 {
1017 status = "okay";
1018};
1019
1020&uart1 {
1021 status = "okay";
1022 interrupts-extended = <&crossbar_mpu GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
1023 <&dra7_pmx_core 0x3e0>;
1024};
1025
1026&uart2 {
1027 status = "okay";
1028};
1029
1030&uart3 {
1031 status = "okay";
1032};
1033
1034&mmc1 { 856&mmc1 {
1035 status = "okay"; 857 status = "okay";
1036 vmmc-supply = <&evm_3v3_sd>; 858 vmmc-supply = <&evm_3v3_sd>;
@@ -1088,7 +910,7 @@ i2c_p3_exp: &i2c2 {
1088 910
1089 #address-cells = <1>; 911 #address-cells = <1>;
1090 #size-cells = <0>; 912 #size-cells = <0>;
1091 wlcore: wlcore@0 { 913 wlcore: wlcore@2 {
1092 compatible = "ti,wl1835"; 914 compatible = "ti,wl1835";
1093 reg = <2>; 915 reg = <2>;
1094 interrupt-parent = <&gpio5>; 916 interrupt-parent = <&gpio5>;
@@ -1116,75 +938,10 @@ i2c_p3_exp: &i2c2 {
1116 vdd-supply = <&smps7_reg>; 938 vdd-supply = <&smps7_reg>;
1117}; 939};
1118 940
1119&pcie1_rc {
1120 status = "okay";
1121};
1122
1123&qspi {
1124 status = "okay";
1125
1126 spi-max-frequency = <76800000>;
1127 m25p80@0 {
1128 compatible = "s25fl256s1";
1129 spi-max-frequency = <76800000>;
1130 reg = <0>;
1131 spi-tx-bus-width = <1>;
1132 spi-rx-bus-width = <4>;
1133 #address-cells = <1>;
1134 #size-cells = <1>;
1135
1136 /* MTD partition table.
1137 * The ROM checks the first four physical blocks
1138 * for a valid file to boot and the flash here is
1139 * 64KiB block size.
1140 */
1141 partition@0 {
1142 label = "QSPI.SPL";
1143 reg = <0x00000000 0x000040000>;
1144 };
1145 partition@1 {
1146 label = "QSPI.u-boot";
1147 reg = <0x00040000 0x00100000>;
1148 };
1149 partition@2 {
1150 label = "QSPI.u-boot-spl-os";
1151 reg = <0x00140000 0x00080000>;
1152 };
1153 partition@3 {
1154 label = "QSPI.u-boot-env";
1155 reg = <0x001c0000 0x00010000>;
1156 };
1157 partition@4 {
1158 label = "QSPI.u-boot-env.backup1";
1159 reg = <0x001d0000 0x0010000>;
1160 };
1161 partition@5 {
1162 label = "QSPI.kernel";
1163 reg = <0x001e0000 0x0800000>;
1164 };
1165 partition@6 {
1166 label = "QSPI.file-system";
1167 reg = <0x009e0000 0x01620000>;
1168 };
1169 };
1170};
1171
1172&omap_dwc3_1 {
1173 extcon = <&extcon_usb1>;
1174};
1175
1176&omap_dwc3_2 { 941&omap_dwc3_2 {
1177 extcon = <&extcon_usb2>; 942 extcon = <&extcon_usb2>;
1178}; 943};
1179 944
1180&usb1 {
1181 dr_mode = "otg";
1182};
1183
1184&usb2 {
1185 dr_mode = "host";
1186};
1187
1188&elm { 945&elm {
1189 status = "okay"; 946 status = "okay";
1190}; 947};
@@ -1301,131 +1058,6 @@ i2c_p3_exp: &i2c2 {
1301 dual_emac_res_vlan = <2>; 1058 dual_emac_res_vlan = <2>;
1302}; 1059};
1303 1060
1304&dcan1 {
1305 status = "ok";
1306 pinctrl-names = "default", "sleep", "active";
1307 pinctrl-0 = <&dcan1_pins_sleep>;
1308 pinctrl-1 = <&dcan1_pins_sleep>;
1309 pinctrl-2 = <&dcan1_pins_default>;
1310};
1311
1312&atl {
1313 assigned-clocks = <&abe_dpll_sys_clk_mux>,
1314 <&atl_gfclk_mux>,
1315 <&dpll_abe_ck>,
1316 <&dpll_abe_m2x2_ck>,
1317 <&atl_clkin1_ck>,
1318 <&atl_clkin2_ck>;
1319 assigned-clock-parents = <&sys_clkin2>, <&dpll_abe_m2_ck>;
1320 assigned-clock-rates = <0>, <0>, <180633600>, <361267200>,
1321 <11289600>, <11289600>;
1322
1323 status = "okay";
1324
1325 atl2 {
1326 bws = <DRA7_ATL_WS_MCASP2_FSX>;
1327 aws = <DRA7_ATL_WS_MCASP3_FSX>;
1328 };
1329};
1330
1331&mcasp3 {
1332 #sound-dai-cells = <0>;
1333
1334 assigned-clocks = <&mcasp3_ahclkx_mux>;
1335 assigned-clock-parents = <&atl_clkin2_ck>;
1336
1337 status = "okay";
1338
1339 op-mode = <0>; /* MCASP_IIS_MODE */
1340 tdm-slots = <2>;
1341 /* 4 serializer */
1342 serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
1343 1 2 0 0
1344 >;
1345 tx-num-evt = <32>;
1346 rx-num-evt = <32>;
1347};
1348
1349&mcasp8 {
1350 /* not used for audio. only the AXR2 pin is used as GPIO */
1351 status = "okay";
1352};
1353
1354&mailbox5 {
1355 status = "okay";
1356 mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
1357 status = "okay";
1358 };
1359 mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
1360 status = "okay";
1361 };
1362};
1363
1364&mailbox6 {
1365 status = "okay";
1366 mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
1367 status = "okay";
1368 };
1369 mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
1370 status = "okay";
1371 };
1372};
1373
1374&mmu0_dsp1 {
1375 status = "okay";
1376};
1377
1378&mmu1_dsp1 {
1379 status = "okay";
1380};
1381
1382&mmu0_dsp2 {
1383 status = "okay";
1384};
1385
1386&mmu1_dsp2 {
1387 status = "okay";
1388};
1389
1390&mmu_ipu1 {
1391 status = "okay";
1392};
1393
1394&mmu_ipu2 {
1395 status = "okay";
1396};
1397
1398&ipu2 {
1399 status = "okay";
1400 memory-region = <&ipu2_cma_pool>;
1401 mboxes = <&mailbox6 &mbox_ipu2_ipc3x>;
1402 timers = <&timer3>;
1403 watchdog-timers = <&timer4>, <&timer9>;
1404};
1405
1406&ipu1 {
1407 status = "okay";
1408 memory-region = <&ipu1_cma_pool>;
1409 mboxes = <&mailbox5 &mbox_ipu1_ipc3x>;
1410 timers = <&timer11>;
1411 watchdog-timers = <&timer7>, <&timer8>;
1412};
1413
1414&dsp1 {
1415 status = "okay";
1416 memory-region = <&dsp1_cma_pool>;
1417 mboxes = <&mailbox5 &mbox_dsp1_ipc3x>;
1418 timers = <&timer5>;
1419 watchdog-timers = <&timer10>;
1420};
1421
1422&dsp2 {
1423 status = "okay";
1424 memory-region = <&dsp2_cma_pool>;
1425 mboxes = <&mailbox6 &mbox_dsp2_ipc3x>;
1426 timers = <&timer6>;
1427};
1428
1429&dss { 1061&dss {
1430 status = "okay"; 1062 status = "okay";
1431 1063
@@ -1440,29 +1072,18 @@ i2c_p3_exp: &i2c2 {
1440 reg = <2>; 1072 reg = <2>;
1441 1073
1442 dpi_out3: endpoint { 1074 dpi_out3: endpoint {
1443 remote-endpoint = <&fpd_in>;
1444 data-lines = <24>; 1075 data-lines = <24>;
1445 }; 1076 };
1446 }; 1077 };
1447 }; 1078 };
1448}; 1079};
1449 1080
1450&bb2d {
1451 status = "okay";
1452};
1453
1454&hdmi { 1081&hdmi {
1455 status = "ok";
1456 vdda-supply = <&ldo3_reg>; 1082 vdda-supply = <&ldo3_reg>;
1457
1458 port {
1459 hdmi_out: endpoint {
1460 remote-endpoint = <&tpd12s015_in>;
1461 };
1462 };
1463}; 1083};
1464 1084
1465&vip1 { 1085&mcasp8 {
1086 /* not used for audio. only the AXR2 pin is used as GPIO */
1466 status = "okay"; 1087 status = "okay";
1467}; 1088};
1468 1089
@@ -1474,6 +1095,10 @@ video_in: &vin1a {
1474 }; 1095 };
1475}; 1096};
1476 1097
1098&bb2d {
1099 status = "okay";
1100};
1101
1477#include "dra7xx-jamr3.dtsi" 1102#include "dra7xx-jamr3.dtsi"
1478&tvp_5158{ 1103&tvp_5158{
1479 mux-gpios = <&pcf_hdmi 3 GPIO_ACTIVE_HIGH>, /*CAM_FPD_MUX_S0*/ 1104 mux-gpios = <&pcf_hdmi 3 GPIO_ACTIVE_HIGH>, /*CAM_FPD_MUX_S0*/
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 4a2b643bd60d..43316d454d41 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -300,7 +300,6 @@
300 * node and enable pcie1_ep mode. 300 * node and enable pcie1_ep mode.
301 */ 301 */
302 pcie1_rc: pcie_rc@51000000 { 302 pcie1_rc: pcie_rc@51000000 {
303 compatible = "ti,dra7-pcie";
304 reg = <0x51000000 0x2000>, <0x51002000 0x14c>, <0x1000 0x2000>; 303 reg = <0x51000000 0x2000>, <0x51002000 0x14c>, <0x1000 0x2000>;
305 reg-names = "rc_dbics", "ti_conf", "config"; 304 reg-names = "rc_dbics", "ti_conf", "config";
306 interrupts = <0 232 0x4>, <0 233 0x4>; 305 interrupts = <0 232 0x4>, <0 233 0x4>;
@@ -315,6 +314,8 @@
315 ti,hwmods = "pcie1"; 314 ti,hwmods = "pcie1";
316 phys = <&pcie1_phy>; 315 phys = <&pcie1_phy>;
317 phy-names = "pcie-phy0"; 316 phy-names = "pcie-phy0";
317 syscon-lane-conf = <&scm_conf 0x558>;
318 syscon-lane-sel = <&scm_conf_pcie 0x18>;
318 interrupt-map-mask = <0 0 0 7>; 319 interrupt-map-mask = <0 0 0 7>;
319 interrupt-map = <0 0 0 1 &pcie1_intc 1>, 320 interrupt-map = <0 0 0 1 &pcie1_intc 1>,
320 <0 0 0 2 &pcie1_intc 2>, 321 <0 0 0 2 &pcie1_intc 2>,
@@ -329,7 +330,6 @@
329 }; 330 };
330 331
331 pcie1_ep: pcie_ep@51000000 { 332 pcie1_ep: pcie_ep@51000000 {
332 compatible = "ti,dra7-pcie-ep";
333 reg = <0x51000000 0x28>, <0x51002000 0x14c>, <0x51001000 0x28>, <0x1000 0x10000000>; 333 reg = <0x51000000 0x28>, <0x51002000 0x14c>, <0x51001000 0x28>, <0x1000 0x10000000>;
334 reg-names = "ep_dbics", "ti_conf", "ep_dbics2", "addr_space"; 334 reg-names = "ep_dbics", "ti_conf", "ep_dbics2", "addr_space";
335 interrupts = <0 232 0x4>; 335 interrupts = <0 232 0x4>;
@@ -351,8 +351,7 @@
351 ranges = <0x51800000 0x51800000 0x3000 351 ranges = <0x51800000 0x51800000 0x3000
352 0x0 0x30000000 0x10000000>; 352 0x0 0x30000000 0x10000000>;
353 status = "disabled"; 353 status = "disabled";
354 pcie@51800000 { 354 pcie2_rc: pcie@51800000 {
355 compatible = "ti,dra7-pcie";
356 reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>; 355 reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>;
357 reg-names = "rc_dbics", "ti_conf", "config"; 356 reg-names = "rc_dbics", "ti_conf", "config";
358 interrupts = <0 355 0x4>, <0 356 0x4>; 357 interrupts = <0 355 0x4>, <0 356 0x4>;
diff --git a/arch/arm/boot/dts/dra72-evm-common.dtsi b/arch/arm/boot/dts/dra72-evm-common.dtsi
index 4ff5187da184..aa3b8abde63d 100644
--- a/arch/arm/boot/dts/dra72-evm-common.dtsi
+++ b/arch/arm/boot/dts/dra72-evm-common.dtsi
@@ -16,7 +16,6 @@
16 16
17 aliases { 17 aliases {
18 display0 = &hdmi0; 18 display0 = &hdmi0;
19 display1 = &fpd_disp;
20 sound0 = &snd0; 19 sound0 = &snd0;
21 sound1 = &hdmi; 20 sound1 = &hdmi;
22 i2c7 = &disp_ser; 21 i2c7 = &disp_ser;
@@ -589,35 +588,14 @@ i2c_p3_exp: &i2c5 {
589 disp_ser: serializer@1b { 588 disp_ser: serializer@1b {
590 compatible = "ti,ds90uh925q"; 589 compatible = "ti,ds90uh925q";
591 reg = <0x1b>; 590 reg = <0x1b>;
591 status = "disabled";
592 592
593 #address-cells = <1>; 593 #address-cells = <1>;
594 #size-cells = <0>; 594 #size-cells = <0>;
595 ranges = <0x2c 0x2c>,
596 <0x1c 0x1c>;
597
598 disp_des: deserializer@2c {
599 compatible = "ti,ds90uh928q";
600 reg = <0x2c>;
601 slave-mode;
602 };
603
604 /* TLC chip for LCD panel power and backlight */
605 fpd_disp: tlc59108@1c {
606 status = "disabled";
607 reg = <0x1c>;
608 compatible = "ti,tlc59108-fpddisp";
609 enable-gpios = <&pcf_gpio_21 0 GPIO_ACTIVE_LOW>;
610 /* P0, SEL_GPMC_AD_VID_S0 */
611
612 port@lcd3 {
613 fpd_in: endpoint {
614 remote-endpoint = <&dpi_out3>;
615 };
616 };
617 };
618 }; 595 };
619}; 596};
620 597
598
621&dss { 599&dss {
622 status = "ok"; 600 status = "ok";
623 vdda_video-supply = <&ldo5_reg>; 601 vdda_video-supply = <&ldo5_reg>;
@@ -630,7 +608,6 @@ i2c_p3_exp: &i2c5 {
630 reg = <2>; 608 reg = <2>;
631 609
632 dpi_out3: endpoint { 610 dpi_out3: endpoint {
633 remote-endpoint = <&fpd_in>;
634 data-lines = <24>; 611 data-lines = <24>;
635 }; 612 };
636 }; 613 };
diff --git a/arch/arm/boot/dts/dra72-evm-fpd-lg.dts b/arch/arm/boot/dts/dra72-evm-fpd-lg.dts
new file mode 100644
index 000000000000..9ed0f66fdb76
--- /dev/null
+++ b/arch/arm/boot/dts/dra72-evm-fpd-lg.dts
@@ -0,0 +1,39 @@
1#include "dra72-evm.dts"
2#include "dra7x-evm-fpd-lg.dtsi"
3
4/* Set display aliases for use by Android */
5/ {
6 aliases {
7 display0 = &fpd_disp;
8 display1 = &hdmi0;
9 };
10};
11
12&dss {
13 ports {
14 status = "ok";
15 };
16};
17
18&disp_ser {
19 status = "ok";
20 ranges = <0x2c 0x2c>,
21 <0x1c 0x1c>;
22};
23
24/* Tie the end points of DSS and FPDLink together */
25
26&fpd_in {
27 remote-endpoint = <&dpi_out3>;
28};
29
30&dpi_out3 {
31 remote-endpoint = <&fpd_in>;
32};
33
34&lcd_fpd {
35 enable-gpios = <&pcf_gpio_21 0 GPIO_ACTIVE_LOW>;
36 /* P0, SEL_GPMC_AD_VID_S0 */
37
38 status = "ok";
39};
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index deef9b8520b2..cc108d2f9b09 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -11,7 +11,6 @@
11 11
12 aliases { 12 aliases {
13 display0 = &hdmi0; 13 display0 = &hdmi0;
14 display1 = &fpd_disp;
15 sound1 = &hdmi; 14 sound1 = &hdmi;
16 i2c7 = &disp_ser; 15 i2c7 = &disp_ser;
17 }; 16 };
@@ -215,39 +214,6 @@
215 }; 214 };
216}; 215};
217 216
218&i2c5 {
219 disp_ser: serializer@1b {
220 compatible = "ti,ds90uh925q";
221 reg = <0x1b>;
222
223 #address-cells = <1>;
224 #size-cells = <0>;
225 ranges = <0x2c 0x2c>,
226 <0x1c 0x1c>;
227
228 disp_des: deserializer@2c {
229 compatible = "ti,ds90uh928q";
230 reg = <0x2c>;
231 slave-mode;
232 };
233
234 /* TLC chip for LCD panel power and backlight */
235 fpd_disp: tlc59108@1c {
236 status = "disabled";
237 reg = <0x1c>;
238 compatible = "ti,tlc59108-fpddisp";
239 enable-gpios = <&pcf_gpio_21 0 GPIO_ACTIVE_LOW>;
240 /* P0, SEL_GPMC_AD_VID_S0 */
241
242 port@lcd3 {
243 fpd_in: endpoint {
244 remote-endpoint = <&dpi_out3>;
245 };
246 };
247 };
248 };
249};
250
251&hdmi { 217&hdmi {
252 vdda_video-supply = <&ldo5_reg>; 218 vdda_video-supply = <&ldo5_reg>;
253}; 219};
diff --git a/arch/arm/boot/dts/dra72x.dtsi b/arch/arm/boot/dts/dra72x.dtsi
index 68341c30beb1..29705754460c 100644
--- a/arch/arm/boot/dts/dra72x.dtsi
+++ b/arch/arm/boot/dts/dra72x.dtsi
@@ -140,3 +140,15 @@
140 status = "disabled"; 140 status = "disabled";
141 }; 141 };
142}; 142};
143
144&pcie1_rc {
145 compatible = "ti,dra726-pcie-rc";
146};
147
148&pcie1_ep {
149 compatible = "ti,dra726-pcie-ep";
150};
151
152&pcie2_rc {
153 compatible = "ti,dra726-pcie-rc";
154};
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index b96f6c7f77d0..a95a1d17ad2d 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -259,3 +259,15 @@
259 status = "disabled"; 259 status = "disabled";
260 }; 260 };
261}; 261};
262
263&pcie1_rc {
264 compatible = "ti,dra746-pcie-rc";
265};
266
267&pcie1_ep {
268 compatible = "ti,dra746-pcie-ep";
269};
270
271&pcie2_rc {
272 compatible = "ti,dra746-pcie-rc";
273};
diff --git a/arch/arm/boot/dts/dra76-evm-fpd-auo-g101evn01.0.dts b/arch/arm/boot/dts/dra76-evm-fpd-auo-g101evn01.0.dts
new file mode 100644
index 000000000000..69983d70d1f4
--- /dev/null
+++ b/arch/arm/boot/dts/dra76-evm-fpd-auo-g101evn01.0.dts
@@ -0,0 +1,67 @@
1#include "dra76-evm.dts"
2#include "dra7x-evm-fpd-auo-g101evn01.0.dtsi"
3
4/ {
5 aliases {
6 display0 = &fpd_disp;
7 display1 = &hdmi0;
8 };
9};
10
11&dss {
12 ports {
13 status = "ok";
14 };
15};
16
17&disp_ser {
18 status = "ok";
19 ranges = <0x0 0x2d>;
20};
21
22/* Tie the end points of DSS and FPDLink together */
23&fpd_in {
24 remote-endpoint = <&dpi_out3>;
25};
26
27&dpi_out3 {
28 remote-endpoint = <&fpd_in>;
29};
30
31&lcd_fpd {
32 status = "ok";
33};
34
35/* U21 on the EVM */
36/* gpmc_ad_vid_s0: high: GPMC , low: VOUT3 */
37&pcf_gpio_21 {
38 p0 {
39 gpio-hog;
40 gpios = <0 GPIO_ACTIVE_HIGH>;
41 output-low;
42 line-name = "gpmc_ad_vid_s0";
43 };
44};
45
46
47/* U110 on the EVM. For Rev A boards */
48/*
49&pcf_hdmi {
50 p11 {
51 gpio-hog;
52 gpios = <11 GPIO_ACTIVE_HIGH>;
53 output-low;
54 line-name = "disp1_vpoc_onn";
55 };
56};
57*/
58
59/* For supplying power to the display via FPDLink */
60&gpio2 {
61 p2 {
62 gpio-hog;
63 gpios = <2 GPIO_ACTIVE_HIGH>;
64 output-low;
65 line-name = "disp1_vpoc_onn";
66 };
67};
diff --git a/arch/arm/boot/dts/dra76-evm.dts b/arch/arm/boot/dts/dra76-evm.dts
new file mode 100644
index 000000000000..213f4b53f965
--- /dev/null
+++ b/arch/arm/boot/dts/dra76-evm.dts
@@ -0,0 +1,498 @@
1/*
2 * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8/dts-v1/;
9
10#include "dra76x.dtsi"
11#include "dra7-evm-common.dtsi"
12#include "dra76x-mmc-iodelay.dtsi"
13#include <dt-bindings/net/ti-dp83867.h>
14
15/ {
16 model = "TI DRA762 EVM";
17 compatible = "ti,dra76-evm", "ti,dra76", "ti,dra7";
18
19 memory {
20 device_type = "memory";
21 reg = <0x0 0x80000000 0x0 0x80000000>;
22 };
23
24 reserved-memory {
25 #address-cells = <2>;
26 #size-cells = <2>;
27 ranges;
28
29 ipu2_cma_pool: ipu2_cma@95800000 {
30 compatible = "shared-dma-pool";
31 reg = <0x0 0x95800000 0x0 0x3800000>;
32 reusable;
33 status = "okay";
34 };
35
36 dsp1_cma_pool: dsp1_cma@99000000 {
37 compatible = "shared-dma-pool";
38 reg = <0x0 0x99000000 0x0 0x4000000>;
39 reusable;
40 status = "okay";
41 };
42
43 ipu1_cma_pool: ipu1_cma@9d000000 {
44 compatible = "shared-dma-pool";
45 reg = <0x0 0x9d000000 0x0 0x2000000>;
46 reusable;
47 status = "okay";
48 };
49
50 dsp2_cma_pool: dsp2_cma@9f000000 {
51 compatible = "shared-dma-pool";
52 reg = <0x0 0x9f000000 0x0 0x800000>;
53 reusable;
54 status = "okay";
55 };
56 };
57
58 aliases {
59 i2c7 = &disp_ser;
60 };
61
62 vsys_12v0: fixedregulator-vsys12v0 {
63 /* main supply */
64 compatible = "regulator-fixed";
65 regulator-name = "vsys_12v0";
66 regulator-min-microvolt = <12000000>;
67 regulator-max-microvolt = <12000000>;
68 regulator-always-on;
69 regulator-boot-on;
70 };
71
72 vsys_5v0: fixedregulator-vsys5v0 {
73 /* Output of Cntlr B of TPS43351-Q1 on dra76-evm */
74 compatible = "regulator-fixed";
75 regulator-name = "vsys_5v0";
76 regulator-min-microvolt = <5000000>;
77 regulator-max-microvolt = <5000000>;
78 vin-supply = <&vsys_12v0>;
79 regulator-always-on;
80 regulator-boot-on;
81 };
82
83 vsys_3v3: fixedregulator-vsys3v3 {
84 /* Output of Cntlr A of TPS43351-Q1 on dra76-evm */
85 compatible = "regulator-fixed";
86 regulator-name = "vsys_3v3";
87 regulator-min-microvolt = <3300000>;
88 regulator-max-microvolt = <3300000>;
89 vin-supply = <&vsys_12v0>;
90 regulator-always-on;
91 regulator-boot-on;
92 };
93
94 vio_3v3: fixedregulator-vio_3v3 {
95 compatible = "regulator-fixed";
96 regulator-name = "vio_3v3";
97 regulator-min-microvolt = <3300000>;
98 regulator-max-microvolt = <3300000>;
99 vin-supply = <&vsys_3v3>;
100 regulator-always-on;
101 regulator-boot-on;
102 };
103
104 vio_3v3_sd: fixedregulator-sd {
105 compatible = "regulator-fixed";
106 regulator-name = "vio_3v3_sd";
107 regulator-min-microvolt = <3300000>;
108 regulator-max-microvolt = <3300000>;
109 vin-supply = <&vio_3v3>;
110 enable-active-high;
111 gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
112 };
113
114 vio_1v8: fixedregulator-vio_1v8 {
115 compatible = "regulator-fixed";
116 regulator-name = "vio_1v8";
117 regulator-min-microvolt = <1800000>;
118 regulator-max-microvolt = <1800000>;
119 vin-supply = <&smps5_reg>;
120 };
121
122 vtt_fixed: fixedregulator-vtt {
123 compatible = "regulator-fixed";
124 regulator-name = "vtt_fixed";
125 regulator-min-microvolt = <1350000>;
126 regulator-max-microvolt = <1350000>;
127 vin-supply = <&vsys_3v3>;
128 regulator-always-on;
129 regulator-boot-on;
130 };
131
132 aic_dvdd: fixedregulator-aic_dvdd {
133 /* TPS77018DBVT */
134 compatible = "regulator-fixed";
135 regulator-name = "aic_dvdd";
136 vin-supply = <&vio_3v3>;
137 regulator-min-microvolt = <1800000>;
138 regulator-max-microvolt = <1800000>;
139 };
140};
141
142&i2c1 {
143 status = "okay";
144 clock-frequency = <400000>;
145
146 tps65917: tps65917@58 {
147 compatible = "ti,tps65917";
148 reg = <0x58>;
149 ti,system-power-controller;
150 interrupt-controller;
151 #interrupt-cells = <2>;
152
153 tps65917_pmic {
154 compatible = "ti,tps65917-pmic";
155
156 smps12-in-supply = <&vsys_3v3>;
157 smps3-in-supply = <&vsys_3v3>;
158 smps4-in-supply = <&vsys_3v3>;
159 smps5-in-supply = <&vsys_3v3>;
160 ldo1-in-supply = <&vsys_3v3>;
161 ldo2-in-supply = <&vsys_3v3>;
162 ldo3-in-supply = <&vsys_5v0>;
163 ldo4-in-supply = <&vsys_5v0>;
164 ldo5-in-supply = <&vsys_3v3>;
165
166 tps65917_regulators: regulators {
167 smps12_reg: smps12 {
168 /* VDD_DSPEVE */
169 regulator-name = "smps12";
170 regulator-min-microvolt = <850000>;
171 regulator-max-microvolt = <1250000>;
172 regulator-always-on;
173 regulator-boot-on;
174 };
175
176 smps3_reg: smps3 {
177 /* VDD_CORE */
178 regulator-name = "smps3";
179 regulator-min-microvolt = <850000>;
180 regulator-max-microvolt = <1250000>;
181 regulator-boot-on;
182 regulator-always-on;
183 };
184
185 smps4_reg: smps4 {
186 /* VDD_IVA */
187 regulator-name = "smps4";
188 regulator-min-microvolt = <850000>;
189 regulator-max-microvolt = <1250000>;
190 regulator-always-on;
191 regulator-boot-on;
192 };
193
194 smps5_reg: smps5 {
195 /* VDDS1V8 */
196 regulator-name = "smps5";
197 regulator-min-microvolt = <1800000>;
198 regulator-max-microvolt = <1800000>;
199 regulator-boot-on;
200 regulator-always-on;
201 };
202
203 ldo1_reg: ldo1 {
204 /* LDO1_OUT --> VDA_PHY1_1V8 */
205 regulator-name = "ldo1";
206 regulator-min-microvolt = <1800000>;
207 regulator-max-microvolt = <1800000>;
208 regulator-always-on;
209 regulator-boot-on;
210 regulator-allow-bypass;
211 };
212
213 ldo2_reg: ldo2 {
214 /* LDO2_OUT --> VDA_PHY2_1V8 */
215 regulator-name = "ldo2";
216 regulator-min-microvolt = <1800000>;
217 regulator-max-microvolt = <1800000>;
218 regulator-allow-bypass;
219 regulator-always-on;
220 };
221
222 ldo3_reg: ldo3 {
223 /* VDA_USB_3V3 */
224 regulator-name = "ldo3";
225 regulator-min-microvolt = <3300000>;
226 regulator-max-microvolt = <3300000>;
227 regulator-boot-on;
228 regulator-always-on;
229 };
230
231 ldo5_reg: ldo5 {
232 /* VDDA_1V8_PLL */
233 regulator-name = "ldo5";
234 regulator-min-microvolt = <1800000>;
235 regulator-max-microvolt = <1800000>;
236 regulator-always-on;
237 regulator-boot-on;
238 };
239
240 ldo4_reg: ldo4 {
241 /* VDD_SDIO_DV */
242 regulator-name = "ldo4";
243 regulator-min-microvolt = <1800000>;
244 regulator-max-microvolt = <3300000>;
245 regulator-boot-on;
246 regulator-always-on;
247 };
248 };
249 };
250
251 tps65917_power_button {
252 compatible = "ti,palmas-pwrbutton";
253 interrupt-parent = <&tps65917>;
254 interrupts = <1 IRQ_TYPE_NONE>;
255 wakeup-source;
256 ti,palmas-long-press-seconds = <6>;
257 };
258 };
259
260 lp87565: lp87565@60 {
261 compatible = "ti,lp87565-q1";
262 reg = <0x60>;
263
264 buck10-in-supply =<&vsys_3v3>;
265 buck23-in-supply =<&vsys_3v3>;
266
267 regulators: regulators {
268 buck10_reg: buck10 {
269 /*VDD_MPU*/
270 regulator-name = "buck10";
271 regulator-min-microvolt = <850000>;
272 regulator-max-microvolt = <1250000>;
273 regulator-always-on;
274 regulator-boot-on;
275 };
276
277 buck23_reg: buck23 {
278 /* VDD_GPU*/
279 regulator-name = "buck23";
280 regulator-min-microvolt = <850000>;
281 regulator-max-microvolt = <1250000>;
282 regulator-boot-on;
283 regulator-always-on;
284 };
285 };
286 };
287
288 pcf_lcd: pcf8757@20 {
289 compatible = "ti,pcf8575", "nxp,pcf8575";
290 reg = <0x20>;
291 gpio-controller;
292 #gpio-cells = <2>;
293 interrupt-controller;
294 #interrupt-cells = <2>;
295 interrupt-parent = <&gpio1>;
296 interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
297 };
298
299 pcf_gpio_21: pcf8757@21 {
300 compatible = "ti,pcf8575", "nxp,pcf8575";
301 reg = <0x21>;
302 gpio-controller;
303 #gpio-cells = <2>;
304 interrupt-parent = <&gpio1>;
305 interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
306 interrupt-controller;
307 #interrupt-cells = <2>;
308 };
309
310 pcf_hdmi: pcf8575@26 {
311 compatible = "ti,pcf8575", "nxp,pcf8575";
312 reg = <0x26>;
313 gpio-controller;
314 #gpio-cells = <2>;
315 p1 {
316 /* vin6_sel_s0: high: VIN6, low: audio */
317 gpio-hog;
318 gpios = <1 GPIO_ACTIVE_HIGH>;
319 output-low;
320 line-name = "vin6_sel_s0";
321 };
322 };
323
324 tlv320aic3106: tlv320aic3106@19 {
325 #sound-dai-cells = <0>;
326 compatible = "ti,tlv320aic3106";
327 reg = <0x19>;
328 adc-settle-ms = <40>;
329 ai3x-micbias-vg = <1>; /* 2.0V */
330 status = "okay";
331
332 /* Regulators */
333 AVDD-supply = <&vio_3v3>;
334 IOVDD-supply = <&vio_3v3>;
335 DRVDD-supply = <&vio_3v3>;
336 DVDD-supply = <&aic_dvdd>;
337 };
338};
339
340&tpd12s015 {
341 compatible = "ti,tpd12s015";
342
343 gpios = <&gpio7 30 GPIO_ACTIVE_HIGH>, /* gpio7_30, CT CP HPD */
344 <&gpio7 31 GPIO_ACTIVE_HIGH>, /* gpio7_31, LS OE */
345 <&gpio7 12 GPIO_ACTIVE_HIGH>; /* gpio7_12/sp1_cs2, HPD */
346
347};
348
349&mmc1 {
350 status = "okay";
351 vmmc-supply = <&vio_3v3_sd>;
352 vmmc_aux-supply = <&ldo4_reg>;
353 bus-width = <4>;
354 /*
355 * SDCD signal is not being used here - using the fact that GPIO mode
356 * is always hardwired.
357 */
358 cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>;
359 max-frequency = <192000000>;
360 pinctrl-names = "default", "hs";
361 pinctrl-0 = <&mmc1_pins_default>;
362 pinctrl-1 = <&mmc1_pins_hs>;
363};
364
365&mmc2 {
366 status = "okay";
367 vmmc-supply = <&vio_1v8>;
368 bus-width = <8>;
369 max-frequency = <192000000>;
370 pinctrl-names = "default", "hs", "ddr_1_8v", "hs200_1_8v";
371 pinctrl-0 = <&mmc2_pins_default>;
372 pinctrl-1 = <&mmc2_pins_hs>;
373 pinctrl-2 = <&mmc2_pins_ddr>;
374 pinctrl-3 = <&mmc2_pins_hs200 &mmc2_iodelay_hs200_conf>;
375};
376
377&oppdm_mpu {
378 vdd-supply = <&buck10_reg>;
379};
380
381&oppdm_dspeve {
382 vdd-supply = <&smps12_reg>;
383};
384
385&oppdm_gpu {
386 vdd-supply = <&buck23_reg>;
387};
388
389&oppdm_ivahd {
390 vdd-supply = <&smps4_reg>;
391};
392
393&oppdm_core {
394 vdd-supply = <&smps3_reg>;
395};
396
397/* No RTC on this device */
398&rtc {
399 status = "disabled";
400};
401
402&mac {
403 status = "okay";
404
405 dual_emac;
406};
407
408&cpsw_emac0 {
409 phy_id = <&davinci_mdio>, <2>;
410 phy-mode = "rgmii-id";
411 dual_emac_res_vlan = <1>;
412};
413
414&cpsw_emac1 {
415 phy_id = <&davinci_mdio>, <3>;
416 phy-mode = "rgmii-id";
417 dual_emac_res_vlan = <2>;
418};
419
420&davinci_mdio {
421 dp83867_0: ethernet-phy@2 {
422 reg = <2>;
423 ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
424 ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
425 ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
426 ti,min-output-impedance;
427 ti,dp83867-rxctrl-strap-quirk;
428 };
429
430 dp83867_1: ethernet-phy@3 {
431 reg = <3>;
432 ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
433 ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
434 ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
435 ti,min-output-impedance;
436 ti,dp83867-rxctrl-strap-quirk;
437 };
438};
439
440&usb2_phy1 {
441 phy-supply = <&ldo3_reg>;
442};
443
444&usb2_phy2 {
445 phy-supply = <&ldo3_reg>;
446};
447
448&i2c3 {
449 disp_ser: serializer@0c {
450 compatible = "ti,ds90ub921q";
451 reg = <0x0c>;
452
453 #address-cells = <1>;
454 #size-cells = <0>;
455 status = "disabled";
456 };
457};
458
459&dss {
460 status = "ok";
461 vdda_video-supply = <&ldo5_reg>;
462
463 ports {
464 #address-cells = <1>;
465 #size-cells = <0>;
466
467 status = "disabled";
468
469 port@lcd3 {
470 reg = <2>;
471
472 dpi_out3: endpoint {
473 data-lines = <24>;
474 };
475 };
476 };
477};
478
479&hdmi {
480 vdda-supply = <&ldo1_reg>;
481};
482
483&qspi {
484 spi-max-frequency = <96000000>;
485 m25p80@0 {
486 spi-max-frequency = <96000000>;
487 };
488};
489
490&pcie2_phy {
491 status = "okay";
492};
493
494&pcie1_rc {
495 num-lanes = <2>;
496 phys = <&pcie1_phy>, <&pcie2_phy>;
497 phy-names = "pcie-phy0", "pcie-phy1";
498};
diff --git a/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi
new file mode 100644
index 000000000000..c95a8a1091ab
--- /dev/null
+++ b/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi
@@ -0,0 +1,435 @@
1/*
2 * MMC IOdelay values for TI's DRA76x and AM576x SoCs.
3 *
4 * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/*
17 * Rules for modifying this file:
18 * a) Update of this file should typically correspond to a datamanual revision.
19 * Datamanual revision that was used should be updated in comment below.
20 * If there is no update to datamanual, do not update the values. If you
21 * need to use values different from that recommended by the datamanual
22 * for your design, then you should consider adding values to the device-
23 * -tree file for your board directly.
24 * b) We keep the mode names as close to the datamanual as possible. So
25 * if the manual calls a mode, DDR50, or DDR or DDR 1.8v or DDR 3.3v,
26 * we follow that in code too.
27 * c) If the values change between multiple revisions of silicon, we add
28 * a revision tag to both the new and old entry. Use 'rev11' for PG 1.1,
29 * 'rev20' for PG 2.0 and so on.
30 * d) The node name and node label should be the exact same string. This is
31 * to curb naming creativity and achieve consistency.
32 *
33 * Datamanual Revisions:
34 *
35 * DRA76x Silicon Revision 1.0: SPRS993A, Revised July 2017
36 *
37 */
38
39&dra7_pmx_core {
40 mmc1_pins_default: mmc1_pins_default {
41 pinctrl-single,pins = <
42 DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */
43 DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */
44 DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */
45 DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */
46 DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */
47 DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */
48 >;
49 };
50
51 mmc1_pins_sdr12: mmc1_pins_sdr12 {
52 pinctrl-single,pins = <
53 DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */
54 DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */
55 DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */
56 DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */
57 DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */
58 DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */
59 >;
60 };
61
62 mmc1_pins_hs: mmc1_pins_hs {
63 pinctrl-single,pins = <
64 DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_clk.clk */
65 DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_cmd.cmd */
66 DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat0.dat0 */
67 DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat1.dat1 */
68 DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat2.dat2 */
69 DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat3.dat3 */
70 >;
71 };
72
73 mmc1_pins_sdr25: mmc1_pins_sdr25 {
74 pinctrl-single,pins = <
75 DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_clk.clk */
76 DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_cmd.cmd */
77 DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat0.dat0 */
78 DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat1.dat1 */
79 DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat2.dat2 */
80 DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE11 | MUX_MODE0) /* mmc1_dat3.dat3 */
81 >;
82 };
83
84 mmc1_pins_sdr50: mmc1_pins_sdr50 {
85 pinctrl-single,pins = <
86 DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_clk.clk */
87 DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_cmd.cmd */
88 DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_dat0.dat0 */
89 DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_dat1.dat1 */
90 DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_dat2.dat2 */
91 DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_VIRTUAL_MODE10 | MUX_MODE0) /* mmc1_dat3.dat3 */
92 >;
93 };
94
95 mmc1_pins_ddr50: mmc1_pins_ddr50 {
96 pinctrl-single,pins = <
97 DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_clk.clk */
98 DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_cmd.cmd */
99 DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat0.dat0 */
100 DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat1.dat1 */
101 DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat2.dat2 */
102 DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat3.dat3 */
103 >;
104 };
105
106 mmc1_pins_sdr104: mmc1_pins_sdr104 {
107 pinctrl-single,pins = <
108 DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_clk.clk */
109 DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_cmd.cmd */
110 DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat0.dat0 */
111 DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat1.dat1 */
112 DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat2.dat2 */
113 DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0) /* mmc1_dat3.dat3 */
114 >;
115 };
116
117 mmc2_pins_default: mmc2_pins_default {
118 pinctrl-single,pins = <
119 DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */
120 DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */
121 DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */
122 DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */
123 DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */
124 DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */
125 DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */
126 DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */
127 DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */
128 DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */
129 >;
130 };
131
132 mmc2_pins_hs: mmc2_pins_hs {
133 pinctrl-single,pins = <
134 DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */
135 DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */
136 DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */
137 DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */
138 DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */
139 DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */
140 DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */
141 DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */
142 DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */
143 DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */
144 >;
145 };
146
147 mmc2_pins_ddr: mmc2_pins_ddr {
148 pinctrl-single,pins = <
149 DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */
150 DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */
151 DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */
152 DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */
153 DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */
154 DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */
155 DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */
156 DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */
157 DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */
158 DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */
159 >;
160 };
161
162 mmc2_pins_hs200: mmc2_pins_hs200 {
163 pinctrl-single,pins = <
164 DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a23.mmc2_clk */
165 DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */
166 DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */
167 DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */
168 DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */
169 DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */
170 DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */
171 DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */
172 DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */
173 DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */
174 >;
175 };
176
177 mmc3_pins_default: mmc3_pins_default {
178 pinctrl-single,pins = <
179 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
180 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
181 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
182 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
183 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
184 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
185 >;
186 };
187
188 mmc3_pins_hs: mmc3_pins_hs {
189 pinctrl-single,pins = <
190 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
191 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
192 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
193 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
194 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
195 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
196 >;
197 };
198
199 mmc3_pins_sdr12: mmc3_pins_sdr12 {
200 pinctrl-single,pins = <
201 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
202 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
203 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
204 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
205 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
206 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
207 >;
208 };
209
210 mmc3_pins_sdr25: mmc3_pins_sdr25 {
211 pinctrl-single,pins = <
212 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
213 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
214 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
215 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
216 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
217 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
218 >;
219 };
220
221 mmc3_pins_sdr50: mmc3_pins_sdr50 {
222 pinctrl-single,pins = <
223 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
224 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
225 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
226 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
227 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
228 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
229 >;
230 };
231
232 mmc4_pins_default: mmc4_pins_default {
233 pinctrl-single,pins = <
234 DRA7XX_CORE_IOPAD(0x37e8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_ctsn.mmc4_clk */
235 DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_rtsn.mmc4_cmd */
236 DRA7XX_CORE_IOPAD(0x37f0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rxd.mmc4_dat0 */
237 DRA7XX_CORE_IOPAD(0x37f4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_txd.mmc4_dat1 */
238 DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_ctsn.mmc4_dat2 */
239 DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rtsn.mmc4_dat3 */
240 >;
241 };
242
243 mmc4_pins_sdr12: mmc4_pins_sdr12 {
244 pinctrl-single,pins = <
245 DRA7XX_CORE_IOPAD(0x37e8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_ctsn.mmc4_clk */
246 DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_rtsn.mmc4_cmd */
247 DRA7XX_CORE_IOPAD(0x37f0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rxd.mmc4_dat0 */
248 DRA7XX_CORE_IOPAD(0x37f4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_txd.mmc4_dat1 */
249 DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_ctsn.mmc4_dat2 */
250 DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rtsn.mmc4_dat3 */
251 >;
252 };
253
254 mmc4_pins_hs: mmc4_pins_hs {
255 pinctrl-single,pins = <
256 DRA7XX_CORE_IOPAD(0x37e8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_ctsn.mmc4_clk */
257 DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_rtsn.mmc4_cmd */
258 DRA7XX_CORE_IOPAD(0x37f0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rxd.mmc4_dat0 */
259 DRA7XX_CORE_IOPAD(0x37f4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_txd.mmc4_dat1 */
260 DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_ctsn.mmc4_dat2 */
261 DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rtsn.mmc4_dat3 */
262 >;
263 };
264
265 mmc4_pins_sdr25: mmc4_pins_sdr25 {
266 pinctrl-single,pins = <
267 DRA7XX_CORE_IOPAD(0x37e8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_ctsn.mmc4_clk */
268 DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart1_rtsn.mmc4_cmd */
269 DRA7XX_CORE_IOPAD(0x37f0, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rxd.mmc4_dat0 */
270 DRA7XX_CORE_IOPAD(0x37f4, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_txd.mmc4_dat1 */
271 DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_ctsn.mmc4_dat2 */
272 DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE3) /* uart2_rtsn.mmc4_dat3 */
273 >;
274 };
275};
276
277&dra7_iodelay_core {
278
279 /* Corresponds to MMC1_DDR_MANUAL1 in datamanual */
280 mmc1_iodelay_ddr_conf: mmc1_iodelay_ddr_conf {
281 pinctrl-single,pins = <
282 0x618 (A_DELAY(489) | G_DELAY(0)) /* CFG_MMC1_CLK_IN */
283 0x624 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_CMD_IN */
284 0x630 (A_DELAY(374) | G_DELAY(0)) /* CFG_MMC1_DAT0_IN */
285 0x63c (A_DELAY(31) | G_DELAY(0)) /* CFG_MMC1_DAT1_IN */
286 0x648 (A_DELAY(56) | G_DELAY(0)) /* CFG_MMC1_DAT2_IN */
287 0x654 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT3_IN */
288 0x620 (A_DELAY(1355) | G_DELAY(0)) /* CFG_MMC1_CLK_OUT */
289 0x628 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_CMD_OEN */
290 0x62c (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_CMD_OUT */
291 0x634 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT0_OEN */
292 0x638 (A_DELAY(0) | G_DELAY(4)) /* CFG_MMC1_DAT0_OUT */
293 0x640 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT1_OEN */
294 0x644 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT1_OUT */
295 0x64c (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT2_OEN */
296 0x650 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT2_OUT */
297 0x658 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT3_OEN */
298 0x65c (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT3_OUT */
299 >;
300 };
301
302 /* Corresponds to MMC1_SDR104_MANUAL1 in datamanual */
303 mmc1_iodelay_sdr104_conf: mmc1_iodelay_sdr104_conf {
304 pinctrl-single,pins = <
305 0x620 (A_DELAY(892) | G_DELAY(0)) /* CFG_MMC1_CLK_OUT */
306 0x628 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_CMD_OEN */
307 0x62c (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_CMD_OUT */
308 0x634 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT0_OEN */
309 0x638 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT0_OUT */
310 0x640 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT1_OEN */
311 0x644 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT1_OUT */
312 0x64c (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT2_OEN */
313 0x650 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT2_OUT */
314 0x658 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT3_OEN */
315 0x65c (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC1_DAT3_OUT */
316 >;
317 };
318
319 /* Corresponds to MMC2_HS200_MANUAL1 in datamanual */
320 mmc2_iodelay_hs200_conf: mmc2_iodelay_hs200_conf {
321 pinctrl-single,pins = <
322 0x190 (A_DELAY(384) | G_DELAY(0)) /* CFG_GPMC_A19_OEN */
323 0x194 (A_DELAY(0) | G_DELAY(174)) /* CFG_GPMC_A19_OUT */
324 0x1a8 (A_DELAY(410) | G_DELAY(0)) /* CFG_GPMC_A20_OEN */
325 0x1ac (A_DELAY(85) | G_DELAY(0)) /* CFG_GPMC_A20_OUT */
326 0x1b4 (A_DELAY(468) | G_DELAY(0)) /* CFG_GPMC_A21_OEN */
327 0x1b8 (A_DELAY(139) | G_DELAY(0)) /* CFG_GPMC_A21_OUT */
328 0x1c0 (A_DELAY(676) | G_DELAY(0)) /* CFG_GPMC_A22_OEN */
329 0x1c4 (A_DELAY(69) | G_DELAY(0)) /* CFG_GPMC_A22_OUT */
330 0x1d0 (A_DELAY(1062) | G_DELAY(154)) /* CFG_GPMC_A23_OUT */
331 0x1d8 (A_DELAY(640) | G_DELAY(0)) /* CFG_GPMC_A24_OEN */
332 0x1dc (A_DELAY(0) | G_DELAY(0)) /* CFG_GPMC_A24_OUT */
333 0x1e4 (A_DELAY(356) | G_DELAY(0)) /* CFG_GPMC_A25_OEN */
334 0x1e8 (A_DELAY(0) | G_DELAY(0)) /* CFG_GPMC_A25_OUT */
335 0x1f0 (A_DELAY(579) | G_DELAY(0)) /* CFG_GPMC_A26_OEN */
336 0x1f4 (A_DELAY(0) | G_DELAY(0)) /* CFG_GPMC_A26_OUT */
337 0x1fc (A_DELAY(435) | G_DELAY(0)) /* CFG_GPMC_A27_OEN */
338 0x200 (A_DELAY(36) | G_DELAY(0)) /* CFG_GPMC_A27_OUT */
339 0x364 (A_DELAY(759) | G_DELAY(0)) /* CFG_GPMC_CS1_OEN */
340 0x368 (A_DELAY(72) | G_DELAY(0)) /* CFG_GPMC_CS1_OUT */
341 >;
342 };
343
344 /* Corresponds to MMC3_MANUAL1 in datamanual */
345 mmc3_iodelay_manual1_conf: mmc3_iodelay_manual1_conf {
346 pinctrl-single,pins = <
347 0x678 (A_DELAY(0) | G_DELAY(386)) /* CFG_MMC3_CLK_IN */
348 0x680 (A_DELAY(605) | G_DELAY(0)) /* CFG_MMC3_CLK_OUT */
349 0x684 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_CMD_IN */
350 0x688 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_CMD_OEN */
351 0x68c (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_CMD_OUT */
352 0x690 (A_DELAY(171) | G_DELAY(0)) /* CFG_MMC3_DAT0_IN */
353 0x694 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT0_OEN */
354 0x698 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT0_OUT */
355 0x69c (A_DELAY(221) | G_DELAY(0)) /* CFG_MMC3_DAT1_IN */
356 0x6a0 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT1_OEN */
357 0x6a4 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT1_OUT */
358 0x6a8 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT2_IN */
359 0x6ac (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT2_OEN */
360 0x6b0 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT2_OUT */
361 0x6b4 (A_DELAY(474) | G_DELAY(0)) /* CFG_MMC3_DAT3_IN */
362 0x6b8 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT3_OEN */
363 0x6bc (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT3_OUT */
364 >;
365 };
366
367 /* Corresponds to MMC3_MANUAL2 in datamanual */
368 mmc3_iodelay_sdr50_conf: mmc3_iodelay_sdr50_conf {
369 pinctrl-single,pins = <
370 0x678 (A_DELAY(852) | G_DELAY(0)) /* CFG_MMC3_CLK_IN */
371 0x680 (A_DELAY(94) | G_DELAY(0)) /* CFG_MMC3_CLK_OUT */
372 0x684 (A_DELAY(122) | G_DELAY(0)) /* CFG_MMC3_CMD_IN */
373 0x688 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_CMD_OEN */
374 0x68c (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_CMD_OUT */
375 0x690 (A_DELAY(91) | G_DELAY(0)) /* CFG_MMC3_DAT0_IN */
376 0x694 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT0_OEN */
377 0x698 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT0_OUT */
378 0x69c (A_DELAY(57) | G_DELAY(0)) /* CFG_MMC3_DAT1_IN */
379 0x6a0 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT1_OEN */
380 0x6a4 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT1_OUT */
381 0x6a8 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT2_IN */
382 0x6ac (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT2_OEN */
383 0x6b0 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT2_OUT */
384 0x6b4 (A_DELAY(375) | G_DELAY(0)) /* CFG_MMC3_DAT3_IN */
385 0x6b8 (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT3_OEN */
386 0x6bc (A_DELAY(0) | G_DELAY(0)) /* CFG_MMC3_DAT3_OUT */
387 >;
388 };
389
390 /* Corresponds to MMC4_MANUAL1 in datamanual */
391 mmc4_iodelay_manual1_conf: mmc4_iodelay_manual1_conf {
392 pinctrl-single,pins = <
393 0x840 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART1_CTSN_IN */
394 0x848 (A_DELAY(1147) | G_DELAY(0)) /* CFG_UART1_CTSN_OUT */
395 0x84c (A_DELAY(1834) | G_DELAY(0)) /* CFG_UART1_RTSN_IN */
396 0x850 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART1_RTSN_OEN */
397 0x854 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART1_RTSN_OUT */
398 0x870 (A_DELAY(2165) | G_DELAY(0)) /* CFG_UART2_CTSN_IN */
399 0x874 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_CTSN_OEN */
400 0x878 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_CTSN_OUT */
401 0x87c (A_DELAY(1929) | G_DELAY(64)) /* CFG_UART2_RTSN_IN */
402 0x880 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_RTSN_OEN */
403 0x884 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_RTSN_OUT */
404 0x888 (A_DELAY(1935) | G_DELAY(128)) /* CFG_UART2_RXD_IN */
405 0x88c (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_RXD_OEN */
406 0x890 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_RXD_OUT */
407 0x894 (A_DELAY(2172) | G_DELAY(44)) /* CFG_UART2_TXD_IN */
408 0x898 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_TXD_OEN */
409 0x89c (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_TXD_OUT */
410 >;
411 };
412
413 /* Corresponds to MMC4_DS_MANUAL1 in datamanual */
414 mmc4_iodelay_default_conf: mmc4_iodelay_default_conf {
415 pinctrl-single,pins = <
416 0x840 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART1_CTSN_IN */
417 0x848 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART1_CTSN_OUT */
418 0x84c (A_DELAY(307) | G_DELAY(0)) /* CFG_UART1_RTSN_IN */
419 0x850 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART1_RTSN_OEN */
420 0x854 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART1_RTSN_OUT */
421 0x870 (A_DELAY(785) | G_DELAY(0)) /* CFG_UART2_CTSN_IN */
422 0x874 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_CTSN_OEN */
423 0x878 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_CTSN_OUT */
424 0x87c (A_DELAY(613) | G_DELAY(0)) /* CFG_UART2_RTSN_IN */
425 0x880 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_RTSN_OEN */
426 0x884 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_RTSN_OUT */
427 0x888 (A_DELAY(683) | G_DELAY(0)) /* CFG_UART2_RXD_IN */
428 0x88c (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_RXD_OEN */
429 0x890 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_RXD_OUT */
430 0x894 (A_DELAY(835) | G_DELAY(0)) /* CFG_UART2_TXD_IN */
431 0x898 (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_TXD_OEN */
432 0x89c (A_DELAY(0) | G_DELAY(0)) /* CFG_UART2_TXD_OUT */
433 >;
434 };
435};
diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
new file mode 100644
index 000000000000..6edc1d40fe19
--- /dev/null
+++ b/arch/arm/boot/dts/dra76x.dtsi
@@ -0,0 +1,81 @@
1/*
2 * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include "dra74x.dtsi"
10
11/ {
12 compatible = "ti,dra762", "ti,dra7";
13
14 ocp {
15 cal: cal@4845b000 {
16 compatible = "ti,dra72-cal";
17 ti,hwmods = "cal";
18 reg = <0x489B0000 0x400>,
19 <0x489B0800 0x40>,
20 <0x489B0900 0x40>,
21 <0x4A0026DC 0x4>;
22 reg-names = "cal_top",
23 "cal_rx_core0",
24 "cal_rx_core1",
25 "camerrx_control";
26 interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
27 #address-cells = <1>;
28 #size-cells = <0>;
29 status = "disabled";
30
31 ports {
32 #address-cells = <1>;
33 #size-cells = <0>;
34
35 csi2_0: port@0 {
36 reg = <0>;
37 };
38 csi2_1: port@1 {
39 reg = <1>;
40 };
41 };
42 };
43 };
44};
45
46/* MCAN interrupts are hard-wired to irqs 67, 68 */
47&crossbar_mpu {
48 ti,irqs-skip = <10 67 68 133 139 140>;
49};
50
51&cpu0_opp_table {
52 opp_plus@1800000000 {
53 opp-hz = /bits/ 64 <1800000000>;
54 opp-microvolt = <1250000 950000 1250000>;
55 opp-supported-hw = <0xFF 0x08>;
56 };
57};
58
59&oppdm_mpu {
60 ti,efuse-settings = <
61 /* uV offset */
62 1060000 0x0
63 1160000 0x4
64 1210000 0x8
65 1250000 0xC
66 >;
67};
68
69&abb_mpu {
70 ti,abb_info = <
71 /*uV ABB efuse rbb_m fbb_m vset_m*/
72 1060000 0 0x0 0 0x02000000 0x01F00000
73 1160000 0 0x4 0 0x02000000 0x01F00000
74 1210000 0 0x8 0 0x02000000 0x01F00000
75 1250000 0 0xC 0 0x02000000 0x01F00000
76 >;
77};
78
79&mmc3 {
80 max-frequency = <96000000>;
81};
diff --git a/arch/arm/boot/dts/dra7x-evm-fpd-auo-g101evn01.0.dtsi b/arch/arm/boot/dts/dra7x-evm-fpd-auo-g101evn01.0.dtsi
new file mode 100644
index 000000000000..c928be92c1ac
--- /dev/null
+++ b/arch/arm/boot/dts/dra7x-evm-fpd-auo-g101evn01.0.dtsi
@@ -0,0 +1,107 @@
1/*
2 * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/ {
10 backlight {
11 compatible = "led-backlight";
12 leds = <&backlight_led>;
13 brightness-levels = <0 2 38 74 110 146 182 218 255>;
14 default-brightness-level = <8>;
15
16 enable-gpios = <&pcf_display_board 0 GPIO_ACTIVE_LOW>;
17 };
18
19 lcd_fpd: display {
20 compatible = "auo,g101evn01.0", "panel-dpi";
21
22 label = "lcd_fpd";
23 status = "disabled";
24
25 panel-timing {
26 clock-frequency = <68930000>;
27 hactive = <1280>;
28 vactive = <800>;
29
30 hfront-porch = <48>;
31 hsync-len = <32>;
32 hback-porch = <48>;
33
34 vfront-porch = <4>;
35 vsync-len = <4>;
36 vback-porch = <8>;
37
38 hsync-active = <0>;
39 vsync-active = <0>;
40 de-active = <1>;
41 pixelclk-active = <1>;
42 };
43
44 port {
45 fpd_in: endpoint {
46 };
47 };
48 };
49};
50
51&disp_ser {
52
53 /*
54 * 0x2c - deserializer
55 * 0x40 - TLC59108
56 * 0x27 - PCF8575
57 * 0x57 - EEPROM
58 * 0x14 - Goodix Touch Controller
59 * 0x28 - RF430CL330H
60 */
61
62 disp_des: deserializer@2c {
63 #address-cells = <1>;
64 #size-cells = <0>;
65 compatible = "ti,ds90ub924q";
66 reg = <0x2c>;
67 slave-mode;
68 };
69
70 /* TLC chip for LCD panel power and backlight */
71 fpd_disp: tlc59108@40 {
72 #address-cells = <1>;
73 #size-cells = <0>;
74 compatible = "ti,tlc59108";
75 reg = <0x40>;
76
77 backlight_led: bl@2 {
78 label = "backlight";
79 reg = <0x2>;
80 };
81 };
82
83 pcf_display_board: gpio@27 {
84 compatible = "nxp,pcf8575";
85 reg = <0x27>;
86 gpio-controller;
87 #gpio-cells = <2>;
88 };
89
90 touchscreen: goodix-gt9271@14 {
91 status = "okay";
92 compatible = "goodix,gt9271";
93 reg = <0x14>;
94
95 touchscreen-size-x = <1280>;
96 touchscreen-size-y = <800>;
97 touchscreen-inverted-y;
98
99 /* Reset gpio line is inverted before going to touch panel */
100 reset-gpios = <&pcf_display_board 5 GPIO_ACTIVE_LOW>;
101 irq-gpios = <&pcf_display_board 6 GPIO_ACTIVE_HIGH>;
102 };
103
104 /* Below two slaves on the I2C bus are not yet defined */
105 /* ID EEPROM 0x57 */
106 /* RF430CL330H 0x28 */
107};
diff --git a/arch/arm/boot/dts/dra7x-evm-fpd-lg.dtsi b/arch/arm/boot/dts/dra7x-evm-fpd-lg.dtsi
new file mode 100644
index 000000000000..b4760b4efdad
--- /dev/null
+++ b/arch/arm/boot/dts/dra7x-evm-fpd-lg.dtsi
@@ -0,0 +1,59 @@
1/*
2 * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <dt-bindings/gpio/gpio.h>
10
11
12/ {
13 lcd_fpd: display_fpd {
14 /*
15 * This is not really a dpi panel, but panel-dpi driver
16 * works as dummy panel driver.
17 */
18 compatible = "lg,lp101wx2", "panel-dpi";
19
20 label = "lcd_fpd";
21 status = "disabled";
22
23 panel-timing {
24 clock-frequency = <69300404>;
25 hactive = <1280>;
26 vactive = <800>;
27
28 hfront-porch = <48>;
29 hback-porch = <44>;
30 hsync-len = <32>;
31
32 vfront-porch = <4>;
33 vback-porch = <7>;
34 vsync-len = <12>;
35
36 hsync-active = <0>;
37 vsync-active = <0>;
38 de-active = <1>;
39 pixelclk-active = <0>;
40 };
41
42 port {
43 fpd_in: endpoint {
44 };
45 };
46 };
47};
48
49&disp_ser {
50 disp_des: deserializer@2c {
51 compatible = "ti,ds90uh928q";
52 reg = <0x2c>;
53 slave-mode;
54 };
55 fpd_disp: tlc59108@1c {
56 compatible = "ti,tlc59108";
57 reg = <0x1c>;
58 };
59};
diff --git a/arch/arm/boot/dts/dra7x-evm-lcd-lg.dtsi b/arch/arm/boot/dts/dra7x-evm-lcd-lg.dtsi
index 97939eef7b94..23268b7a474f 100644
--- a/arch/arm/boot/dts/dra7x-evm-lcd-lg.dtsi
+++ b/arch/arm/boot/dts/dra7x-evm-lcd-lg.dtsi
@@ -12,7 +12,6 @@
12 aliases { 12 aliases {
13 display0 = &tlc59108; 13 display0 = &tlc59108;
14 display1 = &hdmi0; 14 display1 = &hdmi0;
15 display2 = &fpd_disp;
16 }; 15 };
17 16
18 backlight { 17 backlight {
@@ -51,7 +50,7 @@
51 hsync-active = <0>; 50 hsync-active = <0>;
52 vsync-active = <0>; 51 vsync-active = <0>;
53 de-active = <1>; 52 de-active = <1>;
54 pixelclk-active = <1>; 53 pixelclk-active = <0>;
55 }; 54 };
56 55
57 port { 56 port {
diff --git a/arch/arm/boot/dts/dra7x-evm-lcd-osd.dtsi b/arch/arm/boot/dts/dra7x-evm-lcd-osd.dtsi
index 219e4f953797..7dc9c986a73a 100644
--- a/arch/arm/boot/dts/dra7x-evm-lcd-osd.dtsi
+++ b/arch/arm/boot/dts/dra7x-evm-lcd-osd.dtsi
@@ -10,7 +10,6 @@
10 aliases { 10 aliases {
11 display0 = &lcd; 11 display0 = &lcd;
12 display1 = &hdmi0; 12 display1 = &hdmi0;
13 display2 = &fpd_disp;
14 }; 13 };
15 14
16 lcd_bl: backlight { 15 lcd_bl: backlight {
diff --git a/arch/arm/boot/dts/dra7xx-jamr3.dtsi b/arch/arm/boot/dts/dra7xx-jamr3.dtsi
index 994c6e735789..894e97f93775 100644
--- a/arch/arm/boot/dts/dra7xx-jamr3.dtsi
+++ b/arch/arm/boot/dts/dra7xx-jamr3.dtsi
@@ -47,7 +47,17 @@
47 <&hwspinlock 6>, 47 <&hwspinlock 6>,
48 <&hwspinlock 7>, 48 <&hwspinlock 7>,
49 <&hwspinlock 8>, 49 <&hwspinlock 8>,
50 <&hwspinlock 9>; 50 <&hwspinlock 9>,
51 <&hwspinlock 10>,
52 <&hwspinlock 11>,
53 <&hwspinlock 12>,
54 <&hwspinlock 13>,
55 <&hwspinlock 14>,
56 <&hwspinlock 15>,
57 <&hwspinlock 16>,
58 <&hwspinlock 17>,
59 <&hwspinlock 18>,
60 <&hwspinlock 19>;
51 }; 61 };
52 62
53 sr0 { 63 sr0 {
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
index 4b0ec0703825..8ca9217204a0 100644
--- a/arch/arm/boot/dts/imx6dl.dtsi
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -30,7 +30,7 @@
30 /* kHz uV */ 30 /* kHz uV */
31 996000 1250000 31 996000 1250000
32 792000 1175000 32 792000 1175000
33 396000 1075000 33 396000 1150000
34 >; 34 >;
35 fsl,soc-operating-points = < 35 fsl,soc-operating-points = <
36 /* ARM kHz SOC-PU uV */ 36 /* ARM kHz SOC-PU uV */
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 5f5e0f3d5b64..27cd4abfc74d 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -697,6 +697,8 @@
697 vmmc_aux-supply = <&vsim>; 697 vmmc_aux-supply = <&vsim>;
698 bus-width = <8>; 698 bus-width = <8>;
699 non-removable; 699 non-removable;
700 no-sdio;
701 no-sd;
700}; 702};
701 703
702&mmc3 { 704&mmc3 {
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index f648ec2dd5a6..304bcf9b630e 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -1022,6 +1022,7 @@
1022 phy-names = "sata-phy"; 1022 phy-names = "sata-phy";
1023 clocks = <&sata_ref_clk>; 1023 clocks = <&sata_ref_clk>;
1024 ti,hwmods = "sata"; 1024 ti,hwmods = "sata";
1025 ports-implemented = <0x1>;
1025 }; 1026 };
1026 1027
1027 dss: dss@58000000 { 1028 dss: dss@58000000 {
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 4dfca8fc49b3..1bc61ece2589 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -856,6 +856,13 @@
856 compatible = "atmel,at91sam9260-usart"; 856 compatible = "atmel,at91sam9260-usart";
857 reg = <0xf801c000 0x100>; 857 reg = <0xf801c000 0x100>;
858 interrupts = <24 IRQ_TYPE_LEVEL_HIGH 7>; 858 interrupts = <24 IRQ_TYPE_LEVEL_HIGH 7>;
859 dmas = <&dma0
860 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
861 AT91_XDMAC_DT_PERID(35))>,
862 <&dma0
863 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
864 AT91_XDMAC_DT_PERID(36))>;
865 dma-names = "tx", "rx";
859 clocks = <&uart0_clk>; 866 clocks = <&uart0_clk>;
860 clock-names = "usart"; 867 clock-names = "usart";
861 status = "disabled"; 868 status = "disabled";
@@ -865,6 +872,13 @@
865 compatible = "atmel,at91sam9260-usart"; 872 compatible = "atmel,at91sam9260-usart";
866 reg = <0xf8020000 0x100>; 873 reg = <0xf8020000 0x100>;
867 interrupts = <25 IRQ_TYPE_LEVEL_HIGH 7>; 874 interrupts = <25 IRQ_TYPE_LEVEL_HIGH 7>;
875 dmas = <&dma0
876 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
877 AT91_XDMAC_DT_PERID(37))>,
878 <&dma0
879 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
880 AT91_XDMAC_DT_PERID(38))>;
881 dma-names = "tx", "rx";
868 clocks = <&uart1_clk>; 882 clocks = <&uart1_clk>;
869 clock-names = "usart"; 883 clock-names = "usart";
870 status = "disabled"; 884 status = "disabled";
@@ -874,6 +888,13 @@
874 compatible = "atmel,at91sam9260-usart"; 888 compatible = "atmel,at91sam9260-usart";
875 reg = <0xf8024000 0x100>; 889 reg = <0xf8024000 0x100>;
876 interrupts = <26 IRQ_TYPE_LEVEL_HIGH 7>; 890 interrupts = <26 IRQ_TYPE_LEVEL_HIGH 7>;
891 dmas = <&dma0
892 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
893 AT91_XDMAC_DT_PERID(39))>,
894 <&dma0
895 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
896 AT91_XDMAC_DT_PERID(40))>;
897 dma-names = "tx", "rx";
877 clocks = <&uart2_clk>; 898 clocks = <&uart2_clk>;
878 clock-names = "usart"; 899 clock-names = "usart";
879 status = "disabled"; 900 status = "disabled";
@@ -985,6 +1006,13 @@
985 compatible = "atmel,at91sam9260-usart"; 1006 compatible = "atmel,at91sam9260-usart";
986 reg = <0xfc008000 0x100>; 1007 reg = <0xfc008000 0x100>;
987 interrupts = <27 IRQ_TYPE_LEVEL_HIGH 7>; 1008 interrupts = <27 IRQ_TYPE_LEVEL_HIGH 7>;
1009 dmas = <&dma0
1010 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
1011 AT91_XDMAC_DT_PERID(41))>,
1012 <&dma0
1013 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
1014 AT91_XDMAC_DT_PERID(42))>;
1015 dma-names = "tx", "rx";
988 clocks = <&uart3_clk>; 1016 clocks = <&uart3_clk>;
989 clock-names = "usart"; 1017 clock-names = "usart";
990 status = "disabled"; 1018 status = "disabled";
@@ -993,6 +1021,13 @@
993 uart4: serial@fc00c000 { 1021 uart4: serial@fc00c000 {
994 compatible = "atmel,at91sam9260-usart"; 1022 compatible = "atmel,at91sam9260-usart";
995 reg = <0xfc00c000 0x100>; 1023 reg = <0xfc00c000 0x100>;
1024 dmas = <&dma0
1025 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
1026 AT91_XDMAC_DT_PERID(43))>,
1027 <&dma0
1028 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
1029 AT91_XDMAC_DT_PERID(44))>;
1030 dma-names = "tx", "rx";
996 interrupts = <28 IRQ_TYPE_LEVEL_HIGH 7>; 1031 interrupts = <28 IRQ_TYPE_LEVEL_HIGH 7>;
997 clocks = <&uart4_clk>; 1032 clocks = <&uart4_clk>;
998 clock-names = "usart"; 1033 clock-names = "usart";
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index ed7e1009326c..d9ee0fd817e9 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -565,6 +565,7 @@
565 regulator-name = "+3VS,vdd_pnl"; 565 regulator-name = "+3VS,vdd_pnl";
566 regulator-min-microvolt = <3300000>; 566 regulator-min-microvolt = <3300000>;
567 regulator-max-microvolt = <3300000>; 567 regulator-max-microvolt = <3300000>;
568 regulator-boot-on;
568 gpio = <&gpio TEGRA_GPIO(A, 4) GPIO_ACTIVE_HIGH>; 569 gpio = <&gpio TEGRA_GPIO(A, 4) GPIO_ACTIVE_HIGH>;
569 enable-active-high; 570 enable-active-high;
570 }; 571 };
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index f3142369f594..01116ee1284b 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -87,9 +87,9 @@ CONFIG_IPV6_TUNNEL=m
87CONFIG_NETFILTER=y 87CONFIG_NETFILTER=y
88CONFIG_NF_CONNTRACK=m 88CONFIG_NF_CONNTRACK=m
89CONFIG_NF_CONNTRACK_EVENTS=y 89CONFIG_NF_CONNTRACK_EVENTS=y
90CONFIG_NF_CT_PROTO_DCCP=m 90CONFIG_NF_CT_PROTO_DCCP=y
91CONFIG_NF_CT_PROTO_SCTP=m 91CONFIG_NF_CT_PROTO_SCTP=y
92CONFIG_NF_CT_PROTO_UDPLITE=m 92CONFIG_NF_CT_PROTO_UDPLITE=y
93CONFIG_NF_CONNTRACK_AMANDA=m 93CONFIG_NF_CONNTRACK_AMANDA=m
94CONFIG_NF_CONNTRACK_FTP=m 94CONFIG_NF_CONNTRACK_FTP=m
95CONFIG_NF_CONNTRACK_H323=m 95CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index 679c589c4828..1f7b98e1a00d 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -369,7 +369,7 @@ static struct crypto_alg aes_algs[] = { {
369 .cra_blkcipher = { 369 .cra_blkcipher = {
370 .min_keysize = AES_MIN_KEY_SIZE, 370 .min_keysize = AES_MIN_KEY_SIZE,
371 .max_keysize = AES_MAX_KEY_SIZE, 371 .max_keysize = AES_MAX_KEY_SIZE,
372 .ivsize = AES_BLOCK_SIZE, 372 .ivsize = 0,
373 .setkey = ce_aes_setkey, 373 .setkey = ce_aes_setkey,
374 .encrypt = ecb_encrypt, 374 .encrypt = ecb_encrypt,
375 .decrypt = ecb_decrypt, 375 .decrypt = ecb_decrypt,
@@ -446,7 +446,7 @@ static struct crypto_alg aes_algs[] = { {
446 .cra_ablkcipher = { 446 .cra_ablkcipher = {
447 .min_keysize = AES_MIN_KEY_SIZE, 447 .min_keysize = AES_MIN_KEY_SIZE,
448 .max_keysize = AES_MAX_KEY_SIZE, 448 .max_keysize = AES_MAX_KEY_SIZE,
449 .ivsize = AES_BLOCK_SIZE, 449 .ivsize = 0,
450 .setkey = ablk_set_key, 450 .setkey = ablk_set_key,
451 .encrypt = ablk_encrypt, 451 .encrypt = ablk_encrypt,
452 .decrypt = ablk_decrypt, 452 .decrypt = ablk_decrypt,
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index d2315ffd8f12..f13ae153fb24 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
112#define CORE_DUMP_USE_REGSET 112#define CORE_DUMP_USE_REGSET
113#define ELF_EXEC_PAGESIZE 4096 113#define ELF_EXEC_PAGESIZE 4096
114 114
115/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 115/* This is the base location for PIE (ET_DYN with INTERP) loads. */
116 use of this is to invoke "./ld.so someprog" to test out a new version of 116#define ELF_ET_DYN_BASE 0x400000UL
117 the loader. We need to make sure that it is out of the way of the program
118 that it will "exec", and that there is sufficient room for the brk. */
119
120#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
121 117
122/* When the program starts, a1 contains a pointer to a function to be 118/* When the program starts, a1 contains a pointer to a function to be
123 registered with atexit, as per the SVR4 ABI. A value of 0 means we 119 registered with atexit, as per the SVR4 ABI. A value of 0 means we
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index bfe2a2f5a644..22b73112b75f 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
54 54
55#define ftrace_return_address(n) return_address(n) 55#define ftrace_return_address(n) return_address(n)
56 56
57#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
58
59static inline bool arch_syscall_match_sym_name(const char *sym,
60 const char *name)
61{
62 if (!strcmp(sym, "sys_mmap2"))
63 sym = "sys_mmap_pgoff";
64 else if (!strcmp(sym, "sys_statfs64_wrapper"))
65 sym = "sys_statfs64";
66 else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
67 sym = "sys_fstatfs64";
68 else if (!strcmp(sym, "sys_arm_fadvise64_64"))
69 sym = "sys_fadvise64_64";
70
71 /* Ignore case since sym may start with "SyS" instead of "sys" */
72 return !strcasecmp(sym, name);
73}
74
57#endif /* ifndef __ASSEMBLY__ */ 75#endif /* ifndef __ASSEMBLY__ */
58 76
59#endif /* _ASM_ARM_FTRACE */ 77#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index c7ba9a42e857..ebf866a3a8c8 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -205,18 +205,12 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
205 * and iterate over the range. 205 * and iterate over the range.
206 */ 206 */
207 207
208 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
209
210 VM_BUG_ON(size & ~PAGE_MASK); 208 VM_BUG_ON(size & ~PAGE_MASK);
211 209
212 if (!need_flush && !icache_is_pipt())
213 goto vipt_cache;
214
215 while (size) { 210 while (size) {
216 void *va = kmap_atomic_pfn(pfn); 211 void *va = kmap_atomic_pfn(pfn);
217 212
218 if (need_flush) 213 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
219 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
220 214
221 if (icache_is_pipt()) 215 if (icache_is_pipt())
222 __cpuc_coherent_user_range((unsigned long)va, 216 __cpuc_coherent_user_range((unsigned long)va,
@@ -228,7 +222,6 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
228 kunmap_atomic(va); 222 kunmap_atomic(va);
229 } 223 }
230 224
231vipt_cache:
232 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { 225 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
233 /* any kind of VIPT cache */ 226 /* any kind of VIPT cache */
234 __flush_icache_all(); 227 __flush_icache_all();
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 80856def2465..82bdac0f2804 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -73,7 +73,6 @@ obj-$(CONFIG_IWMMXT) += iwmmxt.o
73obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o 73obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
74obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \ 74obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
75 perf_event_v7.o 75 perf_event_v7.o
76CFLAGS_pj4-cp0.o := -marm
77AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt 76AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
78obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o 77obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
79obj-$(CONFIG_VDSO) += vdso.o 78obj-$(CONFIG_VDSO) += vdso.o
diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c
index 8153e36b2491..7c9248b74d3f 100644
--- a/arch/arm/kernel/pj4-cp0.c
+++ b/arch/arm/kernel/pj4-cp0.c
@@ -66,9 +66,13 @@ static void __init pj4_cp_access_write(u32 value)
66 66
67 __asm__ __volatile__ ( 67 __asm__ __volatile__ (
68 "mcr p15, 0, %1, c1, c0, 2\n\t" 68 "mcr p15, 0, %1, c1, c0, 2\n\t"
69#ifdef CONFIG_THUMB2_KERNEL
70 "isb\n\t"
71#else
69 "mrc p15, 0, %0, c1, c0, 2\n\t" 72 "mrc p15, 0, %0, c1, c0, 2\n\t"
70 "mov %0, %0\n\t" 73 "mov %0, %0\n\t"
71 "sub pc, pc, #4\n\t" 74 "sub pc, pc, #4\n\t"
75#endif
72 : "=r" (temp) : "r" (value)); 76 : "=r" (temp) : "r" (value));
73} 77}
74 78
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 4d9375814b53..d54c53b7ab63 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
600 const void *kbuf, const void __user *ubuf) 600 const void *kbuf, const void __user *ubuf)
601{ 601{
602 int ret; 602 int ret;
603 struct pt_regs newregs; 603 struct pt_regs newregs = *task_pt_regs(target);
604 604
605 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 605 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
606 &newregs, 606 &newregs,
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 54a5aeab988d..bbbffe946122 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -17,6 +17,7 @@
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/cache.h>
20#include <linux/elf.h> 21#include <linux/elf.h>
21#include <linux/err.h> 22#include <linux/err.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
@@ -39,7 +40,7 @@
39static struct page **vdso_text_pagelist; 40static struct page **vdso_text_pagelist;
40 41
41/* Total number of pages needed for the data and text portions of the VDSO. */ 42/* Total number of pages needed for the data and text portions of the VDSO. */
42unsigned int vdso_total_pages __read_mostly; 43unsigned int vdso_total_pages __ro_after_init;
43 44
44/* 45/*
45 * The VDSO data page. 46 * The VDSO data page.
@@ -47,13 +48,13 @@ unsigned int vdso_total_pages __read_mostly;
47static union vdso_data_store vdso_data_store __page_aligned_data; 48static union vdso_data_store vdso_data_store __page_aligned_data;
48static struct vdso_data *vdso_data = &vdso_data_store.data; 49static struct vdso_data *vdso_data = &vdso_data_store.data;
49 50
50static struct page *vdso_data_page; 51static struct page *vdso_data_page __ro_after_init;
51static struct vm_special_mapping vdso_data_mapping = { 52static const struct vm_special_mapping vdso_data_mapping = {
52 .name = "[vvar]", 53 .name = "[vvar]",
53 .pages = &vdso_data_page, 54 .pages = &vdso_data_page,
54}; 55};
55 56
56static struct vm_special_mapping vdso_text_mapping = { 57static struct vm_special_mapping vdso_text_mapping __ro_after_init = {
57 .name = "[vdso]", 58 .name = "[vdso]",
58}; 59};
59 60
@@ -67,7 +68,7 @@ struct elfinfo {
67/* Cached result of boot-time check for whether the arch timer exists, 68/* Cached result of boot-time check for whether the arch timer exists,
68 * and if so, whether the virtual counter is useable. 69 * and if so, whether the virtual counter is useable.
69 */ 70 */
70static bool cntvct_ok __read_mostly; 71static bool cntvct_ok __ro_after_init;
71 72
72static bool __init cntvct_functional(void) 73static bool __init cntvct_functional(void)
73{ 74{
@@ -224,7 +225,7 @@ static int install_vvar(struct mm_struct *mm, unsigned long addr)
224 VM_READ | VM_MAYREAD, 225 VM_READ | VM_MAYREAD,
225 &vdso_data_mapping); 226 &vdso_data_mapping);
226 227
227 return IS_ERR(vma) ? PTR_ERR(vma) : 0; 228 return PTR_ERR_OR_ZERO(vma);
228} 229}
229 230
230/* assumes mmap_sem is write-locked */ 231/* assumes mmap_sem is write-locked */
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 3988e72d16ff..bfc5aae0c280 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -110,7 +110,6 @@ __do_hyp_init:
110 @ - Write permission implies XN: disabled 110 @ - Write permission implies XN: disabled
111 @ - Instruction cache: enabled 111 @ - Instruction cache: enabled
112 @ - Data/Unified cache: enabled 112 @ - Data/Unified cache: enabled
113 @ - Memory alignment checks: enabled
114 @ - MMU: enabled (this code must be run from an identity mapping) 113 @ - MMU: enabled (this code must be run from an identity mapping)
115 mrc p15, 4, r0, c1, c0, 0 @ HSCR 114 mrc p15, 4, r0, c1, c0, 0 @ HSCR
116 ldr r2, =HSCTLR_MASK 115 ldr r2, =HSCTLR_MASK
@@ -118,8 +117,8 @@ __do_hyp_init:
118 mrc p15, 0, r1, c1, c0, 0 @ SCTLR 117 mrc p15, 0, r1, c1, c0, 0 @ SCTLR
119 ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) 118 ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
120 and r1, r1, r2 119 and r1, r1, r2
121 ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) 120 ARM( ldr r2, =(HSCTLR_M) )
122 THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) 121 THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
123 orr r1, r1, r2 122 orr r1, r1, r2
124 orr r0, r0, r1 123 orr r0, r0, r1
125 isb 124 isb
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 767872411d97..360cea172b06 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -301,6 +301,14 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
301 next = kvm_pgd_addr_end(addr, end); 301 next = kvm_pgd_addr_end(addr, end);
302 if (!pgd_none(*pgd)) 302 if (!pgd_none(*pgd))
303 unmap_puds(kvm, pgd, addr, next); 303 unmap_puds(kvm, pgd, addr, next);
304 /*
305 * If we are dealing with a large range in
306 * stage2 table, release the kvm->mmu_lock
307 * to prevent starvation and lockup detector
308 * warnings.
309 */
310 if (kvm && (next != end))
311 cond_resched_lock(&kvm->mmu_lock);
304 } while (pgd++, addr = next, addr != end); 312 } while (pgd++, addr = next, addr != end);
305} 313}
306 314
@@ -745,6 +753,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
745 */ 753 */
746static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) 754static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
747{ 755{
756 assert_spin_locked(&kvm->mmu_lock);
748 unmap_range(kvm, kvm->arch.pgd, start, size); 757 unmap_range(kvm, kvm->arch.pgd, start, size);
749} 758}
750 759
@@ -803,6 +812,7 @@ void stage2_unmap_vm(struct kvm *kvm)
803 int idx; 812 int idx;
804 813
805 idx = srcu_read_lock(&kvm->srcu); 814 idx = srcu_read_lock(&kvm->srcu);
815 down_read(&current->mm->mmap_sem);
806 spin_lock(&kvm->mmu_lock); 816 spin_lock(&kvm->mmu_lock);
807 817
808 slots = kvm_memslots(kvm); 818 slots = kvm_memslots(kvm);
@@ -810,6 +820,7 @@ void stage2_unmap_vm(struct kvm *kvm)
810 stage2_unmap_memslot(kvm, memslot); 820 stage2_unmap_memslot(kvm, memslot);
811 821
812 spin_unlock(&kvm->mmu_lock); 822 spin_unlock(&kvm->mmu_lock);
823 up_read(&current->mm->mmap_sem);
813 srcu_read_unlock(&kvm->srcu, idx); 824 srcu_read_unlock(&kvm->srcu, idx);
814} 825}
815 826
@@ -829,7 +840,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
829 if (kvm->arch.pgd == NULL) 840 if (kvm->arch.pgd == NULL)
830 return; 841 return;
831 842
843 spin_lock(&kvm->mmu_lock);
832 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 844 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
845 spin_unlock(&kvm->mmu_lock);
846
833 kvm_free_hwpgd(kvm_get_hwpgd(kvm)); 847 kvm_free_hwpgd(kvm_get_hwpgd(kvm));
834 if (KVM_PREALLOC_LEVEL > 0) 848 if (KVM_PREALLOC_LEVEL > 0)
835 kfree(kvm->arch.pgd); 849 kfree(kvm->arch.pgd);
@@ -862,6 +876,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
862 pmd_t *pmd; 876 pmd_t *pmd;
863 877
864 pud = stage2_get_pud(kvm, cache, addr); 878 pud = stage2_get_pud(kvm, cache, addr);
879 if (!pud)
880 return NULL;
881
865 if (pud_none(*pud)) { 882 if (pud_none(*pud)) {
866 if (!cache) 883 if (!cache)
867 return NULL; 884 return NULL;
@@ -1619,12 +1636,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1619 1636
1620int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) 1637int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1621{ 1638{
1639 if (!kvm->arch.pgd)
1640 return 0;
1622 trace_kvm_age_hva(start, end); 1641 trace_kvm_age_hva(start, end);
1623 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); 1642 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
1624} 1643}
1625 1644
1626int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 1645int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1627{ 1646{
1647 if (!kvm->arch.pgd)
1648 return 0;
1628 trace_kvm_test_age_hva(hva); 1649 trace_kvm_test_age_hva(hva);
1629 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); 1650 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
1630} 1651}
@@ -1771,6 +1792,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1771 (KVM_PHYS_SIZE >> PAGE_SHIFT)) 1792 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1772 return -EFAULT; 1793 return -EFAULT;
1773 1794
1795 down_read(&current->mm->mmap_sem);
1774 /* 1796 /*
1775 * A memory region could potentially cover multiple VMAs, and any holes 1797 * A memory region could potentially cover multiple VMAs, and any holes
1776 * between them, so iterate over all of them to find out if we can map 1798 * between them, so iterate over all of them to find out if we can map
@@ -1814,8 +1836,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1814 pa += vm_start - vma->vm_start; 1836 pa += vm_start - vma->vm_start;
1815 1837
1816 /* IO region dirty page logging not allowed */ 1838 /* IO region dirty page logging not allowed */
1817 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) 1839 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1818 return -EINVAL; 1840 ret = -EINVAL;
1841 goto out;
1842 }
1819 1843
1820 ret = kvm_phys_addr_ioremap(kvm, gpa, pa, 1844 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1821 vm_end - vm_start, 1845 vm_end - vm_start,
@@ -1827,7 +1851,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1827 } while (hva < reg_end); 1851 } while (hva < reg_end);
1828 1852
1829 if (change == KVM_MR_FLAGS_ONLY) 1853 if (change == KVM_MR_FLAGS_ONLY)
1830 return ret; 1854 goto out;
1831 1855
1832 spin_lock(&kvm->mmu_lock); 1856 spin_lock(&kvm->mmu_lock);
1833 if (ret) 1857 if (ret)
@@ -1835,6 +1859,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1835 else 1859 else
1836 stage2_flush_memslot(kvm, memslot); 1860 stage2_flush_memslot(kvm, memslot);
1837 spin_unlock(&kvm->mmu_lock); 1861 spin_unlock(&kvm->mmu_lock);
1862out:
1863 up_read(&current->mm->mmap_sem);
1838 return ret; 1864 return ret;
1839} 1865}
1840 1866
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index a9b3b905e661..443db0c43d7c 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -208,9 +208,10 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
208 208
209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) 209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
210{ 210{
211 int ret = 1; 211 struct kvm *kvm = vcpu->kvm;
212 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); 212 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
213 unsigned long val; 213 unsigned long val;
214 int ret = 1;
214 215
215 switch (psci_fn) { 216 switch (psci_fn) {
216 case PSCI_0_2_FN_PSCI_VERSION: 217 case PSCI_0_2_FN_PSCI_VERSION:
@@ -230,7 +231,9 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
230 break; 231 break;
231 case PSCI_0_2_FN_CPU_ON: 232 case PSCI_0_2_FN_CPU_ON:
232 case PSCI_0_2_FN64_CPU_ON: 233 case PSCI_0_2_FN64_CPU_ON:
234 mutex_lock(&kvm->lock);
233 val = kvm_psci_vcpu_on(vcpu); 235 val = kvm_psci_vcpu_on(vcpu);
236 mutex_unlock(&kvm->lock);
234 break; 237 break;
235 case PSCI_0_2_FN_AFFINITY_INFO: 238 case PSCI_0_2_FN_AFFINITY_INFO:
236 case PSCI_0_2_FN64_AFFINITY_INFO: 239 case PSCI_0_2_FN64_AFFINITY_INFO:
@@ -279,6 +282,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
279 282
280static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) 283static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
281{ 284{
285 struct kvm *kvm = vcpu->kvm;
282 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); 286 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
283 unsigned long val; 287 unsigned long val;
284 288
@@ -288,7 +292,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
288 val = PSCI_RET_SUCCESS; 292 val = PSCI_RET_SUCCESS;
289 break; 293 break;
290 case KVM_PSCI_FN_CPU_ON: 294 case KVM_PSCI_FN_CPU_ON:
295 mutex_lock(&kvm->lock);
291 val = kvm_psci_vcpu_on(vcpu); 296 val = kvm_psci_vcpu_on(vcpu);
297 mutex_unlock(&kvm->lock);
292 break; 298 break;
293 default: 299 default:
294 val = PSCI_RET_NOT_SUPPORTED; 300 val = PSCI_RET_NOT_SUPPORTED;
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 8ecfd15c3a02..df73914e81c8 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -67,7 +67,7 @@ ENTRY(__get_user_4)
67ENDPROC(__get_user_4) 67ENDPROC(__get_user_4)
68 68
69ENTRY(__get_user_8) 69ENTRY(__get_user_8)
70 check_uaccess r0, 8, r1, r2, __get_user_bad 70 check_uaccess r0, 8, r1, r2, __get_user_bad8
71#ifdef CONFIG_THUMB2_KERNEL 71#ifdef CONFIG_THUMB2_KERNEL
725: TUSER(ldr) r2, [r0] 725: TUSER(ldr) r2, [r0]
736: TUSER(ldr) r3, [r0, #4] 736: TUSER(ldr) r3, [r0, #4]
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 23726fb31741..d687f860a2da 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -286,6 +286,22 @@ static void at91_ddr_standby(void)
286 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); 286 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
287} 287}
288 288
289static void sama5d3_ddr_standby(void)
290{
291 u32 lpr0;
292 u32 saved_lpr0;
293
294 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
295 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
296 lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
297
298 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
299
300 cpu_do_idle();
301
302 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
303}
304
289/* We manage both DDRAM/SDRAM controllers, we need more than one value to 305/* We manage both DDRAM/SDRAM controllers, we need more than one value to
290 * remember. 306 * remember.
291 */ 307 */
@@ -320,7 +336,7 @@ static const struct of_device_id const ramc_ids[] __initconst = {
320 { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, 336 { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
321 { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, 337 { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
322 { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, 338 { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
323 { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby }, 339 { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
324 { /*sentinel*/ } 340 { /*sentinel*/ }
325}; 341};
326 342
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index e71fcd7b5acb..0cef41dc85e2 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -284,6 +284,7 @@ MACHINE_END
284 284
285#ifdef CONFIG_SOC_DRA7XX 285#ifdef CONFIG_SOC_DRA7XX
286static const char *const dra74x_boards_compat[] __initconst = { 286static const char *const dra74x_boards_compat[] __initconst = {
287 "ti,dra76",
287 "ti,am5728", 288 "ti,am5728",
288 "ti,am5726", 289 "ti,am5726",
289 "ti,dra742", 290 "ti,dra742",
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 84ee6027e20c..5a357f5a1b6c 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -647,6 +647,15 @@ void __init dra7xxx_check_revision(void)
647 hawkeye = (idcode >> 12) & 0xffff; 647 hawkeye = (idcode >> 12) & 0xffff;
648 rev = (idcode >> 28) & 0xff; 648 rev = (idcode >> 28) & 0xff;
649 switch (hawkeye) { 649 switch (hawkeye) {
650 case 0xbb50:
651 switch (rev) {
652 case 0:
653 default:
654 omap_revision = DRA762_REV_ES1_0;
655 break;
656 }
657 break;
658
650 case 0xb990: 659 case 0xb990:
651 switch (rev) { 660 switch (rev) {
652 case 0: 661 case 0:
@@ -668,9 +677,12 @@ void __init dra7xxx_check_revision(void)
668 omap_revision = DRA722_REV_ES1_0; 677 omap_revision = DRA722_REV_ES1_0;
669 break; 678 break;
670 case 1: 679 case 1:
671 default:
672 omap_revision = DRA722_REV_ES2_0; 680 omap_revision = DRA722_REV_ES2_0;
673 break; 681 break;
682 case 2:
683 default:
684 omap_revision = DRA722_REV_ES2_1;
685 break;
674 } 686 }
675 break; 687 break;
676 688
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 6d1dffca6c7b..748dde9fa4a5 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -17,6 +17,7 @@
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <asm/assembler.h>
20 21
21#include "omap44xx.h" 22#include "omap44xx.h"
22 23
@@ -56,7 +57,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
56 cmp r0, r4 57 cmp r0, r4
57 bne wait_2 58 bne wait_2
58 ldr r12, =API_HYP_ENTRY 59 ldr r12, =API_HYP_ENTRY
59 adr r0, hyp_boot 60 badr r0, hyp_boot
60 smc #0 61 smc #0
61hyp_boot: 62hyp_boot:
62 b secondary_startup 63 b secondary_startup
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index acc38a3213e7..6c3d89694b14 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -254,7 +254,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
254 254
255 if (cpu_is_omap446x()) 255 if (cpu_is_omap446x())
256 startup_addr = omap4460_secondary_startup; 256 startup_addr = omap4460_secondary_startup;
257 if (soc_is_dra74x() || soc_is_omap54xx()) 257 if (soc_is_dra74x() || soc_is_omap54xx() || soc_is_dra76x())
258 omap5_erratum_workaround_801819(); 258 omap5_erratum_workaround_801819();
259 259
260 /* 260 /*
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index adcfe4642a04..2028a81e13e6 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -375,6 +375,21 @@ static struct omap_hwmod dra7xx_cal_hwmod = {
375 }, 375 },
376}; 376};
377 377
378static struct omap_hwmod dra76x_cal_hwmod = {
379 .name = "cal",
380 .class = &dra7xx_cal_hwmod_class,
381 .clkdm_name = "cam_clkdm",
382 .main_clk = "vip3_gclk_mux",
383 .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
384 .prcm = {
385 .omap4 = {
386 .clkctrl_offs = DRA7XX_CM_CAM_VIP3_CLKCTRL_OFFSET,
387 .context_offs = DRA7XX_RM_CAM_VIP3_CONTEXT_OFFSET,
388 .modulemode = MODULEMODE_HWCTRL,
389 },
390 },
391};
392
378/* 393/*
379 * 'counter' class 394 * 'counter' class
380 * 395 *
@@ -3276,6 +3291,28 @@ static struct omap_hwmod dra7xx_wd_timer2_hwmod = {
3276 }, 3291 },
3277}; 3292};
3278 3293
3294/*
3295 * 'mcan' class
3296 *
3297 */
3298static struct omap_hwmod_class dra76x_mcan_hwmod_class = {
3299 .name = "mcan",
3300};
3301
3302/* mcan */
3303static struct omap_hwmod dra76x_mcan_hwmod = {
3304 .name = "mcan",
3305 .class = &dra76x_mcan_hwmod_class,
3306 .clkdm_name = "wkupaon_clkdm",
3307 .main_clk = "mcan_clk",
3308 .prcm = {
3309 .omap4 = {
3310 .clkctrl_offs = DRA7XX_CM_WKUPAON_ADC_CLKCTRL_OFFSET,
3311 .context_offs = DRA7XX_RM_WKUPAON_ADC_CONTEXT_OFFSET,
3312 .modulemode = MODULEMODE_SWCTRL,
3313 },
3314 },
3315};
3279 3316
3280/* 3317/*
3281 * Interfaces 3318 * Interfaces
@@ -4542,6 +4579,14 @@ static struct omap_hwmod_ocp_if dra7xx_l4_per3__cal = {
4542 .user = OCP_USER_MPU | OCP_USER_SDMA, 4579 .user = OCP_USER_MPU | OCP_USER_SDMA,
4543}; 4580};
4544 4581
4582/* l4_per3 -> dra76x_cal */
4583static struct omap_hwmod_ocp_if dra76x_l4_per3__cal = {
4584 .master = &dra7xx_l4_per3_hwmod,
4585 .slave = &dra76x_cal_hwmod,
4586 .clk = "l3_iclk_div",
4587 .user = OCP_USER_MPU | OCP_USER_SDMA,
4588};
4589
4545/* l4_wkup -> wd_timer2 */ 4590/* l4_wkup -> wd_timer2 */
4546static struct omap_hwmod_ocp_if dra7xx_l4_wkup__wd_timer2 = { 4591static struct omap_hwmod_ocp_if dra7xx_l4_wkup__wd_timer2 = {
4547 .master = &dra7xx_l4_wkup_hwmod, 4592 .master = &dra7xx_l4_wkup_hwmod,
@@ -4550,6 +4595,14 @@ static struct omap_hwmod_ocp_if dra7xx_l4_wkup__wd_timer2 = {
4550 .user = OCP_USER_MPU | OCP_USER_SDMA, 4595 .user = OCP_USER_MPU | OCP_USER_SDMA,
4551}; 4596};
4552 4597
4598/* l3_main_1 -> mcan */
4599static struct omap_hwmod_ocp_if dra76x_l3_main_1__mcan = {
4600 .master = &dra7xx_l3_main_1_hwmod,
4601 .slave = &dra76x_mcan_hwmod,
4602 .clk = "l3_iclk_div",
4603 .user = OCP_USER_MPU | OCP_USER_SDMA,
4604};
4605
4553static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { 4606static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
4554 &dra7xx_l3_main_1__dmm, 4607 &dra7xx_l3_main_1__dmm,
4555 &dra7xx_l3_main_2__l3_instr, 4608 &dra7xx_l3_main_2__l3_instr,
@@ -4700,6 +4753,17 @@ static struct omap_hwmod_ocp_if *dra7xx_gp_hwmod_ocp_ifs[] __initdata = {
4700}; 4753};
4701 4754
4702/* SoC variant specific hwmod links */ 4755/* SoC variant specific hwmod links */
4756static struct omap_hwmod_ocp_if *dra76x_hwmod_ocp_ifs[] __initdata = {
4757 &dra7xx_l4_per3__usb_otg_ss4,
4758 &dra7xx_l4_per3__vip2,
4759 &dra7xx_l3_main_1__mmu0_dsp2,
4760 &dra7xx_l3_main_1__mmu1_dsp2,
4761 &dra7xx_dsp2__l3_main_1,
4762 &dra76x_l3_main_1__mcan,
4763 &dra76x_l4_per3__cal,
4764 NULL,
4765};
4766
4703static struct omap_hwmod_ocp_if *dra74x_hwmod_ocp_ifs[] __initdata = { 4767static struct omap_hwmod_ocp_if *dra74x_hwmod_ocp_ifs[] __initdata = {
4704 &dra7xx_l4_per3__usb_otg_ss4, 4768 &dra7xx_l4_per3__usb_otg_ss4,
4705 &dra7xx_l4_per3__vip2, 4769 &dra7xx_l4_per3__vip2,
@@ -4731,12 +4795,14 @@ int __init dra7xx_hwmod_init(void)
4731 ret = omap_hwmod_register_links(dra74x_hwmod_ocp_ifs); 4795 ret = omap_hwmod_register_links(dra74x_hwmod_ocp_ifs);
4732 else if (!ret && soc_is_dra72x()) 4796 else if (!ret && soc_is_dra72x())
4733 ret = omap_hwmod_register_links(dra72x_hwmod_ocp_ifs); 4797 ret = omap_hwmod_register_links(dra72x_hwmod_ocp_ifs);
4798 else if (!ret && soc_is_dra76x())
4799 ret = omap_hwmod_register_links(dra76x_hwmod_ocp_ifs);
4734 4800
4735 if (!ret && omap_type() == OMAP2_DEVICE_TYPE_GP) 4801 if (!ret && omap_type() == OMAP2_DEVICE_TYPE_GP)
4736 ret = omap_hwmod_register_links(dra7xx_gp_hwmod_ocp_ifs); 4802 ret = omap_hwmod_register_links(dra7xx_gp_hwmod_ocp_ifs);
4737 4803
4738 /* now for the IPs *NOT* in dra71 */ 4804 /* now for the IPs available only in dra74 and dra72 */
4739 if (!ret && !of_machine_is_compatible("ti,dra718")) 4805 if (!ret && !of_machine_is_compatible("ti,dra718") && !soc_is_dra76x())
4740 ret = omap_hwmod_register_links(dra74x_dra72x_hwmod_ocp_ifs); 4806 ret = omap_hwmod_register_links(dra74x_dra72x_hwmod_ocp_ifs);
4741 4807
4742 return ret; 4808 return ret;
diff --git a/arch/arm/mach-omap2/powerdomains7xx_data.c b/arch/arm/mach-omap2/powerdomains7xx_data.c
index 0a6caa6e14e5..2497e6f04ea0 100644
--- a/arch/arm/mach-omap2/powerdomains7xx_data.c
+++ b/arch/arm/mach-omap2/powerdomains7xx_data.c
@@ -29,6 +29,7 @@
29#include "prcm44xx.h" 29#include "prcm44xx.h"
30#include "prm7xx.h" 30#include "prm7xx.h"
31#include "prcm_mpu7xx.h" 31#include "prcm_mpu7xx.h"
32#include "soc.h"
32 33
33/* iva_7xx_pwrdm: IVA-HD power domain */ 34/* iva_7xx_pwrdm: IVA-HD power domain */
34static struct powerdomain iva_7xx_pwrdm = { 35static struct powerdomain iva_7xx_pwrdm = {
@@ -63,6 +64,14 @@ static struct powerdomain custefuse_7xx_pwrdm = {
63 .flags = PWRDM_HAS_LOWPOWERSTATECHANGE, 64 .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
64}; 65};
65 66
67/* custefuse_aon_7xx_pwrdm: Customer efuse controller power domain */
68static struct powerdomain custefuse_aon_7xx_pwrdm = {
69 .name = "custefuse_pwrdm",
70 .prcm_offs = DRA7XX_PRM_CUSTEFUSE_INST,
71 .prcm_partition = DRA7XX_PRM_PARTITION,
72 .pwrsts = PWRSTS_ON,
73};
74
66/* ipu_7xx_pwrdm: Audio back end power domain */ 75/* ipu_7xx_pwrdm: Audio back end power domain */
67static struct powerdomain ipu_7xx_pwrdm = { 76static struct powerdomain ipu_7xx_pwrdm = {
68 .name = "ipu_pwrdm", 77 .name = "ipu_pwrdm",
@@ -350,7 +359,6 @@ static struct powerdomain eve1_7xx_pwrdm = {
350static struct powerdomain *powerdomains_dra7xx[] __initdata = { 359static struct powerdomain *powerdomains_dra7xx[] __initdata = {
351 &iva_7xx_pwrdm, 360 &iva_7xx_pwrdm,
352 &rtc_7xx_pwrdm, 361 &rtc_7xx_pwrdm,
353 &custefuse_7xx_pwrdm,
354 &ipu_7xx_pwrdm, 362 &ipu_7xx_pwrdm,
355 &dss_7xx_pwrdm, 363 &dss_7xx_pwrdm,
356 &l4per_7xx_pwrdm, 364 &l4per_7xx_pwrdm,
@@ -374,9 +382,32 @@ static struct powerdomain *powerdomains_dra7xx[] __initdata = {
374 NULL 382 NULL
375}; 383};
376 384
385static struct powerdomain *powerdomains_dra76x[] __initdata = {
386 &custefuse_aon_7xx_pwrdm,
387 NULL
388};
389
390static struct powerdomain *powerdomains_dra74x[] __initdata = {
391 &custefuse_7xx_pwrdm,
392 NULL
393};
394
395static struct powerdomain *powerdomains_dra72x[] __initdata = {
396 &custefuse_aon_7xx_pwrdm,
397 NULL
398};
399
377void __init dra7xx_powerdomains_init(void) 400void __init dra7xx_powerdomains_init(void)
378{ 401{
379 pwrdm_register_platform_funcs(&omap4_pwrdm_operations); 402 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
380 pwrdm_register_pwrdms(powerdomains_dra7xx); 403 pwrdm_register_pwrdms(powerdomains_dra7xx);
404
405 if (soc_is_dra76x())
406 pwrdm_register_pwrdms(powerdomains_dra76x);
407 else if (soc_is_dra74x())
408 pwrdm_register_pwrdms(powerdomains_dra74x);
409 else if (soc_is_dra72x())
410 pwrdm_register_pwrdms(powerdomains_dra72x);
411
381 pwrdm_complete_init(); 412 pwrdm_complete_init();
382} 413}
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index 1e11307fd766..1ae1b6fe0370 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -239,6 +239,7 @@ IS_TI_SUBCLASS(816x, 0x816)
239IS_TI_SUBCLASS(814x, 0x814) 239IS_TI_SUBCLASS(814x, 0x814)
240IS_AM_SUBCLASS(335x, 0x335) 240IS_AM_SUBCLASS(335x, 0x335)
241IS_AM_SUBCLASS(437x, 0x437) 241IS_AM_SUBCLASS(437x, 0x437)
242IS_DRA_SUBCLASS(76x, 0x76)
242IS_DRA_SUBCLASS(75x, 0x75) 243IS_DRA_SUBCLASS(75x, 0x75)
243IS_DRA_SUBCLASS(72x, 0x72) 244IS_DRA_SUBCLASS(72x, 0x72)
244 245
@@ -262,6 +263,7 @@ IS_DRA_SUBCLASS(72x, 0x72)
262#define soc_is_omap54xx() 0 263#define soc_is_omap54xx() 0
263#define soc_is_omap543x() 0 264#define soc_is_omap543x() 0
264#define soc_is_dra7xx() 0 265#define soc_is_dra7xx() 0
266#define soc_is_dra76x() 0
265#define soc_is_dra74x() 0 267#define soc_is_dra74x() 0
266#define soc_is_dra72x() 0 268#define soc_is_dra72x() 0
267 269
@@ -412,9 +414,11 @@ IS_OMAP_TYPE(3430, 0x3430)
412 414
413#if defined(CONFIG_SOC_DRA7XX) 415#if defined(CONFIG_SOC_DRA7XX)
414#undef soc_is_dra7xx 416#undef soc_is_dra7xx
417#undef soc_is_dra76x
415#undef soc_is_dra74x 418#undef soc_is_dra74x
416#undef soc_is_dra72x 419#undef soc_is_dra72x
417#define soc_is_dra7xx() is_dra7xx() 420#define soc_is_dra7xx() is_dra7xx()
421#define soc_is_dra76x() is_dra76x()
418#define soc_is_dra74x() is_dra75x() 422#define soc_is_dra74x() is_dra75x()
419#define soc_is_dra72x() is_dra72x() 423#define soc_is_dra72x() is_dra72x()
420#endif 424#endif
@@ -484,11 +488,13 @@ IS_OMAP_TYPE(3430, 0x3430)
484#define OMAP5432_REV_ES2_0 (OMAP54XX_CLASS | (0x32 << 16) | (0x20 << 8)) 488#define OMAP5432_REV_ES2_0 (OMAP54XX_CLASS | (0x32 << 16) | (0x20 << 8))
485 489
486#define DRA7XX_CLASS 0x07000000 490#define DRA7XX_CLASS 0x07000000
491#define DRA762_REV_ES1_0 (DRA7XX_CLASS | (0x62 << 16) | (0x10 << 8))
487#define DRA752_REV_ES1_0 (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8)) 492#define DRA752_REV_ES1_0 (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8))
488#define DRA752_REV_ES1_1 (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8)) 493#define DRA752_REV_ES1_1 (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8))
489#define DRA752_REV_ES2_0 (DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8)) 494#define DRA752_REV_ES2_0 (DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8))
490#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8)) 495#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
491#define DRA722_REV_ES2_0 (DRA7XX_CLASS | (0x22 << 16) | (0x20 << 8)) 496#define DRA722_REV_ES2_0 (DRA7XX_CLASS | (0x22 << 16) | (0x20 << 8))
497#define DRA722_REV_ES2_1 (DRA7XX_CLASS | (0x22 << 16) | (0x21 << 8))
492 498
493void omap2xxx_check_revision(void); 499void omap2xxx_check_revision(void);
494void omap3xxx_check_revision(void); 500void omap3xxx_check_revision(void);
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 25f0eee33869..a67af98e532b 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -561,8 +561,7 @@ void __init omap_init_time(void)
561 __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon", 561 __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
562 2, "timer_sys_ck", NULL, false); 562 2, "timer_sys_ck", NULL, false);
563 563
564 if (of_have_populated_dt()) 564 clocksource_probe();
565 clocksource_probe();
566} 565}
567 566
568#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX) 567#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
@@ -570,6 +569,8 @@ void __init omap3_secure_sync32k_timer_init(void)
570{ 569{
571 __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure", 570 __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
572 2, "timer_sys_ck", NULL, false); 571 2, "timer_sys_ck", NULL, false);
572
573 clocksource_probe();
573} 574}
574#endif /* CONFIG_ARCH_OMAP3 */ 575#endif /* CONFIG_ARCH_OMAP3 */
575 576
@@ -579,6 +580,7 @@ void __init omap3_gptimer_timer_init(void)
579{ 580{
580 __omap_sync32k_timer_init(2, "timer_sys_ck", NULL, 581 __omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
581 1, "timer_sys_ck", "ti,timer-alwon", true); 582 1, "timer_sys_ck", "ti,timer-alwon", true);
583
582 if (of_have_populated_dt()) 584 if (of_have_populated_dt())
583 clocksource_probe(); 585 clocksource_probe();
584} 586}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 506c225c66cc..c73f10c1984f 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
610 610
611void __init early_abt_enable(void) 611void __init early_abt_enable(void)
612{ 612{
613 fsr_info[22].fn = early_abort_handler; 613 fsr_info[FSR_FS_AEA].fn = early_abort_handler;
614 local_abt_enable(); 614 local_abt_enable();
615 fsr_info[22].fn = do_bad; 615 fsr_info[FSR_FS_AEA].fn = do_bad;
616} 616}
617 617
618#ifndef CONFIG_ARM_LPAE 618#ifndef CONFIG_ARM_LPAE
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index 05ec5e0df32d..78830657cab3 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -11,11 +11,15 @@
11#define FSR_FS5_0 (0x3f) 11#define FSR_FS5_0 (0x3f)
12 12
13#ifdef CONFIG_ARM_LPAE 13#ifdef CONFIG_ARM_LPAE
14#define FSR_FS_AEA 17
15
14static inline int fsr_fs(unsigned int fsr) 16static inline int fsr_fs(unsigned int fsr)
15{ 17{
16 return fsr & FSR_FS5_0; 18 return fsr & FSR_FS5_0;
17} 19}
18#else 20#else
21#define FSR_FS_AEA 22
22
19static inline int fsr_fs(unsigned int fsr) 23static inline int fsr_fs(unsigned int fsr)
20{ 24{
21 return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; 25 return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 66353caa35b9..641334ebf46d 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
89 89
90 vma = find_vma(mm, addr); 90 vma = find_vma(mm, addr);
91 if (TASK_SIZE - len >= addr && 91 if (TASK_SIZE - len >= addr &&
92 (!vma || addr + len <= vma->vm_start)) 92 (!vma || addr + len <= vm_start_gap(vma)))
93 return addr; 93 return addr;
94 } 94 }
95 95
@@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
140 addr = PAGE_ALIGN(addr); 140 addr = PAGE_ALIGN(addr);
141 vma = find_vma(mm, addr); 141 vma = find_vma(mm, addr);
142 if (TASK_SIZE - len >= addr && 142 if (TASK_SIZE - len >= addr &&
143 (!vma || addr + len <= vma->vm_start)) 143 (!vma || addr + len <= vm_start_gap(vma)))
144 return addr; 144 return addr;
145 } 145 }
146 146
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index de9f8921e407..aead23f15213 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1184,15 +1184,15 @@ void __init sanity_check_meminfo(void)
1184 1184
1185 high_memory = __va(arm_lowmem_limit - 1) + 1; 1185 high_memory = __va(arm_lowmem_limit - 1) + 1;
1186 1186
1187 if (!memblock_limit)
1188 memblock_limit = arm_lowmem_limit;
1189
1187 /* 1190 /*
1188 * Round the memblock limit down to a pmd size. This 1191 * Round the memblock limit down to a pmd size. This
1189 * helps to ensure that we will allocate memory from the 1192 * helps to ensure that we will allocate memory from the
1190 * last full pmd, which should be mapped. 1193 * last full pmd, which should be mapped.
1191 */ 1194 */
1192 if (memblock_limit) 1195 memblock_limit = round_down(memblock_limit, PMD_SIZE);
1193 memblock_limit = round_down(memblock_limit, PMD_SIZE);
1194 if (!memblock_limit)
1195 memblock_limit = arm_lowmem_limit;
1196 1196
1197 memblock_set_current_limit(memblock_limit); 1197 memblock_set_current_limit(memblock_limit);
1198} 1198}
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 1160434eece0..59a8fa7b8a3b 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -74,5 +74,5 @@ $(MODLIB)/vdso: FORCE
74 @mkdir -p $(MODLIB)/vdso 74 @mkdir -p $(MODLIB)/vdso
75 75
76PHONY += vdso_install 76PHONY += vdso_install
77vdso_install: $(obj)/vdso.so.dbg $(MODLIB)/vdso FORCE 77vdso_install: $(obj)/vdso.so.dbg $(MODLIB)/vdso
78 $(call cmd,vdso_install) 78 $(call cmd,vdso_install)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 2543791ce8c2..049335584e0c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -141,6 +141,18 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN
141config ARCH_MMAP_RND_COMPAT_BITS_MAX 141config ARCH_MMAP_RND_COMPAT_BITS_MAX
142 default 16 142 default 16
143 143
144config ARM64_PAGE_SHIFT
145 int
146 default 16 if ARM64_64K_PAGES
147 default 14 if ARM64_16K_PAGES
148 default 12
149
150config ARM64_CONT_SHIFT
151 int
152 default 5 if ARM64_64K_PAGES
153 default 7 if ARM64_16K_PAGES
154 default 4
155
144config NO_IOPORT_MAP 156config NO_IOPORT_MAP
145 def_bool y if !PCI 157 def_bool y if !PCI
146 158
@@ -569,6 +581,7 @@ source kernel/Kconfig.preempt
569source kernel/Kconfig.hz 581source kernel/Kconfig.hz
570 582
571config ARCH_SUPPORTS_DEBUG_PAGEALLOC 583config ARCH_SUPPORTS_DEBUG_PAGEALLOC
584 depends on !HIBERNATION
572 def_bool y 585 def_bool y
573 586
574config ARCH_HAS_HOLES_MEMORYMODEL 587config ARCH_HAS_HOLES_MEMORYMODEL
@@ -618,6 +631,27 @@ config SECCOMP
618 and the task is only allowed to execute a few safe syscalls 631 and the task is only allowed to execute a few safe syscalls
619 defined by each seccomp mode. 632 defined by each seccomp mode.
620 633
634config KEXEC
635 depends on PM_SLEEP_SMP
636 select KEXEC_CORE
637 bool "kexec system call"
638 ---help---
639 kexec is a system call that implements the ability to shutdown your
640 current kernel, and to start another kernel. It is like a reboot
641 but it is independent of the system firmware. And like a reboot
642 you can start any kernel with it, not just Linux.
643
644config CRASH_DUMP
645 bool "Build kdump crash kernel"
646 help
647 Generate crash dump after being started by kexec. This should
648 be normally only set in special crash dump kernels which are
649 loaded in the main kernel with kexec-tools into a specially
650 reserved region and then later executed after a crash by
651 kdump/kexec.
652
653 For more details see Documentation/kdump/kdump.txt
654
621config XEN_DOM0 655config XEN_DOM0
622 def_bool y 656 def_bool y
623 depends on XEN 657 depends on XEN
@@ -720,7 +754,7 @@ config SETEND_EMULATION
720endif 754endif
721 755
722config ARM64_SW_TTBR0_PAN 756config ARM64_SW_TTBR0_PAN
723 bool "Emulate Priviledged Access Never using TTBR0_EL1 switching" 757 bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
724 help 758 help
725 Enabling this option prevents the kernel from accessing 759 Enabling this option prevents the kernel from accessing
726 user-space memory directly by pointing TTBR0_EL1 to a reserved 760 user-space memory directly by pointing TTBR0_EL1 to a reserved
@@ -934,6 +968,26 @@ config BUILD_ARM64_APPENDED_DTB_IMAGE
934 DTBs to be built by default (instead of a standalone Image.gz.) 968 DTBs to be built by default (instead of a standalone Image.gz.)
935 The image will built in arch/arm64/boot/Image.gz-dtb 969 The image will built in arch/arm64/boot/Image.gz-dtb
936 970
971choice
972 prompt "Appended DTB Kernel Image name"
973 depends on BUILD_ARM64_APPENDED_DTB_IMAGE
974 help
975 Enabling this option will cause a specific kernel image Image or
976 Image.gz to be used for final image creation.
977 The image will built in arch/arm64/boot/IMAGE-NAME-dtb
978
979 config IMG_GZ_DTB
980 bool "Image.gz-dtb"
981 config IMG_DTB
982 bool "Image-dtb"
983endchoice
984
985config BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME
986 string
987 depends on BUILD_ARM64_APPENDED_DTB_IMAGE
988 default "Image.gz-dtb" if IMG_GZ_DTB
989 default "Image-dtb" if IMG_DTB
990
937config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES 991config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES
938 string "Default dtb names" 992 string "Default dtb names"
939 depends on BUILD_ARM64_APPENDED_DTB_IMAGE 993 depends on BUILD_ARM64_APPENDED_DTB_IMAGE
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 101632379b8b..f1d8a05727cf 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -61,7 +61,9 @@ head-y := arch/arm64/kernel/head.o
61 61
62# The byte offset of the kernel image in RAM from the start of RAM. 62# The byte offset of the kernel image in RAM from the start of RAM.
63ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) 63ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
64TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}') 64TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
65 int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
66 rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
65else 67else
66TEXT_OFFSET := 0x00080000 68TEXT_OFFSET := 0x00080000
67endif 69endif
@@ -85,7 +87,7 @@ core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
85 87
86# Default target when executing plain make 88# Default target when executing plain make
87ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y) 89ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y)
88KBUILD_IMAGE := Image.gz-dtb 90KBUILD_IMAGE := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME))
89else 91else
90KBUILD_IMAGE := Image.gz 92KBUILD_IMAGE := Image.gz
91endif 93endif
@@ -128,6 +130,16 @@ archclean:
128 $(Q)$(MAKE) $(clean)=$(boot) 130 $(Q)$(MAKE) $(clean)=$(boot)
129 $(Q)$(MAKE) $(clean)=$(boot)/dts 131 $(Q)$(MAKE) $(clean)=$(boot)/dts
130 132
133# We need to generate vdso-offsets.h before compiling certain files in kernel/.
134# In order to do that, we should use the archprepare target, but we can't since
135# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
136# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
137# Therefore we need to generate the header after prepare0 has been made, hence
138# this hack.
139prepare: vdso_prepare
140vdso_prepare: prepare0
141 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
142
131define archhelp 143define archhelp
132 echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' 144 echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
133 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' 145 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
index 93bc3d7d51c0..29315af22147 100644
--- a/arch/arm64/boot/dts/arm/juno-r1.dts
+++ b/arch/arm64/boot/dts/arm/juno-r1.dts
@@ -60,6 +60,28 @@
60 }; 60 };
61 }; 61 };
62 62
63 idle-states {
64 entry-method = "arm,psci";
65
66 CPU_SLEEP_0: cpu-sleep-0 {
67 compatible = "arm,idle-state";
68 arm,psci-suspend-param = <0x0010000>;
69 local-timer-stop;
70 entry-latency-us = <300>;
71 exit-latency-us = <1200>;
72 min-residency-us = <2000>;
73 };
74
75 CLUSTER_SLEEP_0: cluster-sleep-0 {
76 compatible = "arm,idle-state";
77 arm,psci-suspend-param = <0x1010000>;
78 local-timer-stop;
79 entry-latency-us = <400>;
80 exit-latency-us = <1200>;
81 min-residency-us = <2500>;
82 };
83 };
84
63 A57_0: cpu@0 { 85 A57_0: cpu@0 {
64 compatible = "arm,cortex-a57","arm,armv8"; 86 compatible = "arm,cortex-a57","arm,armv8";
65 reg = <0x0 0x0>; 87 reg = <0x0 0x0>;
@@ -67,6 +89,7 @@
67 enable-method = "psci"; 89 enable-method = "psci";
68 next-level-cache = <&A57_L2>; 90 next-level-cache = <&A57_L2>;
69 clocks = <&scpi_dvfs 0>; 91 clocks = <&scpi_dvfs 0>;
92 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
70 }; 93 };
71 94
72 A57_1: cpu@1 { 95 A57_1: cpu@1 {
@@ -76,6 +99,7 @@
76 enable-method = "psci"; 99 enable-method = "psci";
77 next-level-cache = <&A57_L2>; 100 next-level-cache = <&A57_L2>;
78 clocks = <&scpi_dvfs 0>; 101 clocks = <&scpi_dvfs 0>;
102 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
79 }; 103 };
80 104
81 A53_0: cpu@100 { 105 A53_0: cpu@100 {
@@ -85,6 +109,7 @@
85 enable-method = "psci"; 109 enable-method = "psci";
86 next-level-cache = <&A53_L2>; 110 next-level-cache = <&A53_L2>;
87 clocks = <&scpi_dvfs 1>; 111 clocks = <&scpi_dvfs 1>;
112 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
88 }; 113 };
89 114
90 A53_1: cpu@101 { 115 A53_1: cpu@101 {
@@ -94,6 +119,7 @@
94 enable-method = "psci"; 119 enable-method = "psci";
95 next-level-cache = <&A53_L2>; 120 next-level-cache = <&A53_L2>;
96 clocks = <&scpi_dvfs 1>; 121 clocks = <&scpi_dvfs 1>;
122 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
97 }; 123 };
98 124
99 A53_2: cpu@102 { 125 A53_2: cpu@102 {
@@ -103,6 +129,7 @@
103 enable-method = "psci"; 129 enable-method = "psci";
104 next-level-cache = <&A53_L2>; 130 next-level-cache = <&A53_L2>;
105 clocks = <&scpi_dvfs 1>; 131 clocks = <&scpi_dvfs 1>;
132 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
106 }; 133 };
107 134
108 A53_3: cpu@103 { 135 A53_3: cpu@103 {
@@ -112,6 +139,7 @@
112 enable-method = "psci"; 139 enable-method = "psci";
113 next-level-cache = <&A53_L2>; 140 next-level-cache = <&A53_L2>;
114 clocks = <&scpi_dvfs 1>; 141 clocks = <&scpi_dvfs 1>;
142 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
115 }; 143 };
116 144
117 A57_L2: l2-cache0 { 145 A57_L2: l2-cache0 {
diff --git a/arch/arm64/boot/dts/arm/juno-sched-energy.dtsi b/arch/arm64/boot/dts/arm/juno-sched-energy.dtsi
new file mode 100644
index 000000000000..38207e4391ab
--- /dev/null
+++ b/arch/arm64/boot/dts/arm/juno-sched-energy.dtsi
@@ -0,0 +1,147 @@
1/*
2 * ARM JUNO specific energy cost model data. There are no unit requirements for
3 * the data. Data can be normalized to any reference point, but the
4 * normalization must be consistent. That is, one bogo-joule/watt must be the
5 * same quantity for all data, but we don't care what it is.
6 */
7
8/* static struct idle_state idle_states_cluster_a53[] = { */
9/* { .power = 56 }, /\* arch_cpu_idle() (active idle) = WFI *\/ */
10/* { .power = 56 }, /\* WFI *\/ */
11/* { .power = 56 }, /\* cpu-sleep-0 *\/ */
12/* { .power = 17 }, /\* cluster-sleep-0 *\/ */
13/* }; */
14
15/* static struct idle_state idle_states_cluster_a57[] = { */
16/* { .power = 65 }, /\* arch_cpu_idle() (active idle) = WFI *\/ */
17/* { .power = 65 }, /\* WFI *\/ */
18/* { .power = 65 }, /\* cpu-sleep-0 *\/ */
19/* { .power = 24 }, /\* cluster-sleep-0 *\/ */
20/* }; */
21
22/* static struct capacity_state cap_states_cluster_a53[] = { */
23/* /\* Power per cluster *\/ */
24/* { .cap = 235, .power = 26, }, /\* 450 MHz *\/ */
25/* { .cap = 303, .power = 30, }, /\* 575 MHz *\/ */
26/* { .cap = 368, .power = 39, }, /\* 700 MHz *\/ */
27/* { .cap = 406, .power = 47, }, /\* 775 MHz *\/ */
28/* { .cap = 447, .power = 57, }, /\* 850 Mhz *\/ */
29/* }; */
30
31/* static struct capacity_state cap_states_cluster_a57[] = { */
32/* /\* Power per cluster *\/ */
33/* { .cap = 417, .power = 24, }, /\* 450 MHz *\/ */
34/* { .cap = 579, .power = 32, }, /\* 625 MHz *\/ */
35/* { .cap = 744, .power = 43, }, /\* 800 MHz *\/ */
36/* { .cap = 883, .power = 49, }, /\* 950 MHz *\/ */
37/* { .cap = 1024, .power = 64, }, /\* 1100 MHz *\/ */
38/* }; */
39
40/* static struct sched_group_energy energy_cluster_a53 = { */
41/* .nr_idle_states = ARRAY_SIZE(idle_states_cluster_a53), */
42/* .idle_states = idle_states_cluster_a53, */
43/* .nr_cap_states = ARRAY_SIZE(cap_states_cluster_a53), */
44/* .cap_states = cap_states_cluster_a53, */
45/* }; */
46
47/* static struct sched_group_energy energy_cluster_a57 = { */
48/* .nr_idle_states = ARRAY_SIZE(idle_states_cluster_a57), */
49/* .idle_states = idle_states_cluster_a57, */
50/* .nr_cap_states = ARRAY_SIZE(cap_states_cluster_a57), */
51/* .cap_states = cap_states_cluster_a57, */
52/* }; */
53
54/* static struct idle_state idle_states_core_a53[] = { */
55/* { .power = 6 }, /\* arch_cpu_idle() (active idle) = WFI *\/ */
56/* { .power = 6 }, /\* WFI *\/ */
57/* { .power = 0 }, /\* cpu-sleep-0 *\/ */
58/* { .power = 0 }, /\* cluster-sleep-0 *\/ */
59/* }; */
60
61/* static struct idle_state idle_states_core_a57[] = { */
62/* { .power = 15 }, /\* arch_cpu_idle() (active idle) = WFI *\/ */
63/* { .power = 15 }, /\* WFI *\/ */
64/* { .power = 0 }, /\* cpu-sleep-0 *\/ */
65/* { .power = 0 }, /\* cluster-sleep-0 *\/ */
66/* }; */
67
68/* static struct capacity_state cap_states_core_a53[] = { */
69/* /\* Power per cpu *\/ */
70/* { .cap = 235, .power = 33, }, /\* 450 MHz *\/ */
71/* { .cap = 302, .power = 46, }, /\* 575 MHz *\/ */
72/* { .cap = 368, .power = 61, }, /\* 700 MHz *\/ */
73/* { .cap = 406, .power = 76, }, /\* 775 MHz *\/ */
74/* { .cap = 447, .power = 93, }, /\* 850 Mhz *\/ */
75/* }; */
76
77/* static struct capacity_state cap_states_core_a57[] = { */
78/* /\* Power per cpu *\/ */
79/* { .cap = 417, .power = 168, }, /\* 450 MHz *\/ */
80/* { .cap = 579, .power = 251, }, /\* 625 MHz *\/ */
81/* { .cap = 744, .power = 359, }, /\* 800 MHz *\/ */
82/* { .cap = 883, .power = 479, }, /\* 950 MHz *\/ */
83/* { .cap = 1024, .power = 616, }, /\* 1100 MHz *\/ */
84/* }; */
85
86energy-costs {
87 CPU_COST_A57: core-cost0 {
88 busy-cost-data = <
89 417 168
90 579 251
91 744 359
92 883 479
93 1023 616
94 >;
95 idle-cost-data = <
96 15
97 15
98 0
99 0
100 >;
101 };
102 CPU_COST_A53: core-cost1 {
103 busy-cost-data = <
104 235 33
105 302 46
106 368 61
107 406 76
108 447 93
109 >;
110 idle-cost-data = <
111 6
112 6
113 0
114 0
115 >;
116 };
117 CLUSTER_COST_A57: cluster-cost0 {
118 busy-cost-data = <
119 417 24
120 579 32
121 744 43
122 883 49
123 1024 64
124 >;
125 idle-cost-data = <
126 65
127 65
128 65
129 24
130 >;
131 };
132 CLUSTER_COST_A53: cluster-cost1 {
133 busy-cost-data = <
134 235 26
135 303 30
136 368 39
137 406 47
138 447 57
139 >;
140 idle-cost-data = <
141 56
142 56
143 56
144 17
145 >;
146 };
147};
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index 3e1a84b01b50..68816f71fa51 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -60,6 +60,28 @@
60 }; 60 };
61 }; 61 };
62 62
63 idle-states {
64 entry-method = "arm,psci";
65
66 CPU_SLEEP_0: cpu-sleep-0 {
67 compatible = "arm,idle-state";
68 arm,psci-suspend-param = <0x0010000>;
69 local-timer-stop;
70 entry-latency-us = <300>;
71 exit-latency-us = <1200>;
72 min-residency-us = <2000>;
73 };
74
75 CLUSTER_SLEEP_0: cluster-sleep-0 {
76 compatible = "arm,idle-state";
77 arm,psci-suspend-param = <0x1010000>;
78 local-timer-stop;
79 entry-latency-us = <400>;
80 exit-latency-us = <1200>;
81 min-residency-us = <2500>;
82 };
83 };
84
63 A57_0: cpu@0 { 85 A57_0: cpu@0 {
64 compatible = "arm,cortex-a57","arm,armv8"; 86 compatible = "arm,cortex-a57","arm,armv8";
65 reg = <0x0 0x0>; 87 reg = <0x0 0x0>;
@@ -67,6 +89,8 @@
67 enable-method = "psci"; 89 enable-method = "psci";
68 next-level-cache = <&A57_L2>; 90 next-level-cache = <&A57_L2>;
69 clocks = <&scpi_dvfs 0>; 91 clocks = <&scpi_dvfs 0>;
92 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
93 sched-energy-costs = <&CPU_COST_A57 &CLUSTER_COST_A57>;
70 }; 94 };
71 95
72 A57_1: cpu@1 { 96 A57_1: cpu@1 {
@@ -76,6 +100,8 @@
76 enable-method = "psci"; 100 enable-method = "psci";
77 next-level-cache = <&A57_L2>; 101 next-level-cache = <&A57_L2>;
78 clocks = <&scpi_dvfs 0>; 102 clocks = <&scpi_dvfs 0>;
103 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
104 sched-energy-costs = <&CPU_COST_A57 &CLUSTER_COST_A57>;
79 }; 105 };
80 106
81 A53_0: cpu@100 { 107 A53_0: cpu@100 {
@@ -85,6 +111,8 @@
85 enable-method = "psci"; 111 enable-method = "psci";
86 next-level-cache = <&A53_L2>; 112 next-level-cache = <&A53_L2>;
87 clocks = <&scpi_dvfs 1>; 113 clocks = <&scpi_dvfs 1>;
114 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
115 sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>;
88 }; 116 };
89 117
90 A53_1: cpu@101 { 118 A53_1: cpu@101 {
@@ -94,6 +122,8 @@
94 enable-method = "psci"; 122 enable-method = "psci";
95 next-level-cache = <&A53_L2>; 123 next-level-cache = <&A53_L2>;
96 clocks = <&scpi_dvfs 1>; 124 clocks = <&scpi_dvfs 1>;
125 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
126 sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>;
97 }; 127 };
98 128
99 A53_2: cpu@102 { 129 A53_2: cpu@102 {
@@ -103,6 +133,8 @@
103 enable-method = "psci"; 133 enable-method = "psci";
104 next-level-cache = <&A53_L2>; 134 next-level-cache = <&A53_L2>;
105 clocks = <&scpi_dvfs 1>; 135 clocks = <&scpi_dvfs 1>;
136 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
137 sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>;
106 }; 138 };
107 139
108 A53_3: cpu@103 { 140 A53_3: cpu@103 {
@@ -112,6 +144,8 @@
112 enable-method = "psci"; 144 enable-method = "psci";
113 next-level-cache = <&A53_L2>; 145 next-level-cache = <&A53_L2>;
114 clocks = <&scpi_dvfs 1>; 146 clocks = <&scpi_dvfs 1>;
147 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
148 sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>;
115 }; 149 };
116 150
117 A57_L2: l2-cache0 { 151 A57_L2: l2-cache0 {
@@ -121,6 +155,8 @@
121 A53_L2: l2-cache1 { 155 A53_L2: l2-cache1 {
122 compatible = "cache"; 156 compatible = "cache";
123 }; 157 };
158
159 /include/ "juno-sched-energy.dtsi"
124 }; 160 };
125 161
126 pmu_a57 { 162 pmu_a57 {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
index ce5d848251fa..7b34822d61e9 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -26,7 +26,7 @@
26 stdout-path = "serial0:115200n8"; 26 stdout-path = "serial0:115200n8";
27 }; 27 };
28 28
29 memory { 29 memory@0 {
30 device_type = "memory"; 30 device_type = "memory";
31 reg = <0x0 0x0 0x40000000>; 31 reg = <0x0 0x0 0x40000000>;
32 }; 32 };
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 857eda5c7217..172402cc1a0f 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -71,7 +71,7 @@
71 <1 10 0xf01>; 71 <1 10 0xf01>;
72 }; 72 };
73 73
74 amba_apu { 74 amba_apu: amba_apu@0 {
75 compatible = "simple-bus"; 75 compatible = "simple-bus";
76 #address-cells = <2>; 76 #address-cells = <2>;
77 #size-cells = <1>; 77 #size-cells = <1>;
@@ -191,7 +191,7 @@
191 }; 191 };
192 192
193 i2c0: i2c@ff020000 { 193 i2c0: i2c@ff020000 {
194 compatible = "cdns,i2c-r1p10"; 194 compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
195 status = "disabled"; 195 status = "disabled";
196 interrupt-parent = <&gic>; 196 interrupt-parent = <&gic>;
197 interrupts = <0 17 4>; 197 interrupts = <0 17 4>;
@@ -202,7 +202,7 @@
202 }; 202 };
203 203
204 i2c1: i2c@ff030000 { 204 i2c1: i2c@ff030000 {
205 compatible = "cdns,i2c-r1p10"; 205 compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
206 status = "disabled"; 206 status = "disabled";
207 interrupt-parent = <&gic>; 207 interrupt-parent = <&gic>;
208 interrupts = <0 18 4>; 208 interrupts = <0 18 4>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 79717faf2161..7dc5e58f7b7a 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -58,6 +58,8 @@ CONFIG_PREEMPT=y
58CONFIG_KSM=y 58CONFIG_KSM=y
59CONFIG_TRANSPARENT_HUGEPAGE=y 59CONFIG_TRANSPARENT_HUGEPAGE=y
60CONFIG_CMA=y 60CONFIG_CMA=y
61CONFIG_KEXEC=y
62CONFIG_CRASH_DUMP=y
61CONFIG_CMDLINE="console=ttyAMA0" 63CONFIG_CMDLINE="console=ttyAMA0"
62# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 64# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
63CONFIG_COMPAT=y 65CONFIG_COMPAT=y
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 2cf32e9887e1..de1aab4b5da8 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -23,6 +23,11 @@ config CRYPTO_GHASH_ARM64_CE
23 depends on ARM64 && KERNEL_MODE_NEON 23 depends on ARM64 && KERNEL_MODE_NEON
24 select CRYPTO_HASH 24 select CRYPTO_HASH
25 25
26config CRYPTO_POLY_HASH_ARM64_CE
27 tristate "poly_hash (for HEH encryption mode) using ARMv8 Crypto Extensions"
28 depends on ARM64 && KERNEL_MODE_NEON
29 select CRYPTO_HASH
30
26config CRYPTO_AES_ARM64_CE 31config CRYPTO_AES_ARM64_CE
27 tristate "AES core cipher using ARMv8 Crypto Extensions" 32 tristate "AES core cipher using ARMv8 Crypto Extensions"
28 depends on ARM64 && KERNEL_MODE_NEON 33 depends on ARM64 && KERNEL_MODE_NEON
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index abb79b3cfcfe..f0a8f2475ea3 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -17,6 +17,9 @@ sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
17obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o 17obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
18ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o 18ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
19 19
20obj-$(CONFIG_CRYPTO_POLY_HASH_ARM64_CE) += poly-hash-ce.o
21poly-hash-ce-y := poly-hash-ce-glue.o poly-hash-ce-core.o
22
20obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o 23obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
21CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto 24CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto
22 25
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 6a51dfccfe71..448b874a4826 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -294,7 +294,7 @@ static struct crypto_alg aes_algs[] = { {
294 .cra_blkcipher = { 294 .cra_blkcipher = {
295 .min_keysize = AES_MIN_KEY_SIZE, 295 .min_keysize = AES_MIN_KEY_SIZE,
296 .max_keysize = AES_MAX_KEY_SIZE, 296 .max_keysize = AES_MAX_KEY_SIZE,
297 .ivsize = AES_BLOCK_SIZE, 297 .ivsize = 0,
298 .setkey = aes_setkey, 298 .setkey = aes_setkey,
299 .encrypt = ecb_encrypt, 299 .encrypt = ecb_encrypt,
300 .decrypt = ecb_decrypt, 300 .decrypt = ecb_decrypt,
@@ -371,7 +371,7 @@ static struct crypto_alg aes_algs[] = { {
371 .cra_ablkcipher = { 371 .cra_ablkcipher = {
372 .min_keysize = AES_MIN_KEY_SIZE, 372 .min_keysize = AES_MIN_KEY_SIZE,
373 .max_keysize = AES_MAX_KEY_SIZE, 373 .max_keysize = AES_MAX_KEY_SIZE,
374 .ivsize = AES_BLOCK_SIZE, 374 .ivsize = 0,
375 .setkey = ablk_set_key, 375 .setkey = ablk_set_key,
376 .encrypt = ablk_encrypt, 376 .encrypt = ablk_encrypt,
377 .decrypt = ablk_decrypt, 377 .decrypt = ablk_decrypt,
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index c53dbeae79f2..838dad5c209f 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
193 cbz w6, .Lcbcencloop 193 cbz w6, .Lcbcencloop
194 194
195 ld1 {v0.16b}, [x5] /* get iv */ 195 ld1 {v0.16b}, [x5] /* get iv */
196 enc_prepare w3, x2, x5 196 enc_prepare w3, x2, x6
197 197
198.Lcbcencloop: 198.Lcbcencloop:
199 ld1 {v1.16b}, [x1], #16 /* get next pt block */ 199 ld1 {v1.16b}, [x1], #16 /* get next pt block */
200 eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */ 200 eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
201 encrypt_block v0, w3, x2, x5, w6 201 encrypt_block v0, w3, x2, x6, w7
202 st1 {v0.16b}, [x0], #16 202 st1 {v0.16b}, [x0], #16
203 subs w4, w4, #1 203 subs w4, w4, #1
204 bne .Lcbcencloop 204 bne .Lcbcencloop
205 st1 {v0.16b}, [x5] /* return iv */
205 ret 206 ret
206AES_ENDPROC(aes_cbc_encrypt) 207AES_ENDPROC(aes_cbc_encrypt)
207 208
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
211 cbz w6, .LcbcdecloopNx 212 cbz w6, .LcbcdecloopNx
212 213
213 ld1 {v7.16b}, [x5] /* get iv */ 214 ld1 {v7.16b}, [x5] /* get iv */
214 dec_prepare w3, x2, x5 215 dec_prepare w3, x2, x6
215 216
216.LcbcdecloopNx: 217.LcbcdecloopNx:
217#if INTERLEAVE >= 2 218#if INTERLEAVE >= 2
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
248.Lcbcdecloop: 249.Lcbcdecloop:
249 ld1 {v1.16b}, [x1], #16 /* get next ct block */ 250 ld1 {v1.16b}, [x1], #16 /* get next ct block */
250 mov v0.16b, v1.16b /* ...and copy to v0 */ 251 mov v0.16b, v1.16b /* ...and copy to v0 */
251 decrypt_block v0, w3, x2, x5, w6 252 decrypt_block v0, w3, x2, x6, w7
252 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */ 253 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
253 mov v7.16b, v1.16b /* ct is next iv */ 254 mov v7.16b, v1.16b /* ct is next iv */
254 st1 {v0.16b}, [x0], #16 255 st1 {v0.16b}, [x0], #16
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
256 bne .Lcbcdecloop 257 bne .Lcbcdecloop
257.Lcbcdecout: 258.Lcbcdecout:
258 FRAME_POP 259 FRAME_POP
260 st1 {v7.16b}, [x5] /* return iv */
259 ret 261 ret
260AES_ENDPROC(aes_cbc_decrypt) 262AES_ENDPROC(aes_cbc_decrypt)
261 263
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
267 269
268AES_ENTRY(aes_ctr_encrypt) 270AES_ENTRY(aes_ctr_encrypt)
269 FRAME_PUSH 271 FRAME_PUSH
270 cbnz w6, .Lctrfirst /* 1st time around? */ 272 cbz w6, .Lctrnotfirst /* 1st time around? */
271 umov x5, v4.d[1] /* keep swabbed ctr in reg */
272 rev x5, x5
273#if INTERLEAVE >= 2
274 cmn w5, w4 /* 32 bit overflow? */
275 bcs .Lctrinc
276 add x5, x5, #1 /* increment BE ctr */
277 b .LctrincNx
278#else
279 b .Lctrinc
280#endif
281.Lctrfirst:
282 enc_prepare w3, x2, x6 273 enc_prepare w3, x2, x6
283 ld1 {v4.16b}, [x5] 274 ld1 {v4.16b}, [x5]
284 umov x5, v4.d[1] /* keep swabbed ctr in reg */ 275
285 rev x5, x5 276.Lctrnotfirst:
277 umov x8, v4.d[1] /* keep swabbed ctr in reg */
278 rev x8, x8
286#if INTERLEAVE >= 2 279#if INTERLEAVE >= 2
287 cmn w5, w4 /* 32 bit overflow? */ 280 cmn w8, w4 /* 32 bit overflow? */
288 bcs .Lctrloop 281 bcs .Lctrloop
289.LctrloopNx: 282.LctrloopNx:
290 subs w4, w4, #INTERLEAVE 283 subs w4, w4, #INTERLEAVE
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
292#if INTERLEAVE == 2 285#if INTERLEAVE == 2
293 mov v0.8b, v4.8b 286 mov v0.8b, v4.8b
294 mov v1.8b, v4.8b 287 mov v1.8b, v4.8b
295 rev x7, x5 288 rev x7, x8
296 add x5, x5, #1 289 add x8, x8, #1
297 ins v0.d[1], x7 290 ins v0.d[1], x7
298 rev x7, x5 291 rev x7, x8
299 add x5, x5, #1 292 add x8, x8, #1
300 ins v1.d[1], x7 293 ins v1.d[1], x7
301 ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */ 294 ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
302 do_encrypt_block2x 295 do_encrypt_block2x
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
305 st1 {v0.16b-v1.16b}, [x0], #32 298 st1 {v0.16b-v1.16b}, [x0], #32
306#else 299#else
307 ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */ 300 ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
308 dup v7.4s, w5 301 dup v7.4s, w8
309 mov v0.16b, v4.16b 302 mov v0.16b, v4.16b
310 add v7.4s, v7.4s, v8.4s 303 add v7.4s, v7.4s, v8.4s
311 mov v1.16b, v4.16b 304 mov v1.16b, v4.16b
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
323 eor v2.16b, v7.16b, v2.16b 316 eor v2.16b, v7.16b, v2.16b
324 eor v3.16b, v5.16b, v3.16b 317 eor v3.16b, v5.16b, v3.16b
325 st1 {v0.16b-v3.16b}, [x0], #64 318 st1 {v0.16b-v3.16b}, [x0], #64
326 add x5, x5, #INTERLEAVE 319 add x8, x8, #INTERLEAVE
327#endif 320#endif
328 cbz w4, .LctroutNx 321 rev x7, x8
329.LctrincNx:
330 rev x7, x5
331 ins v4.d[1], x7 322 ins v4.d[1], x7
323 cbz w4, .Lctrout
332 b .LctrloopNx 324 b .LctrloopNx
333.LctroutNx:
334 sub x5, x5, #1
335 rev x7, x5
336 ins v4.d[1], x7
337 b .Lctrout
338.Lctr1x: 325.Lctr1x:
339 adds w4, w4, #INTERLEAVE 326 adds w4, w4, #INTERLEAVE
340 beq .Lctrout 327 beq .Lctrout
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
342.Lctrloop: 329.Lctrloop:
343 mov v0.16b, v4.16b 330 mov v0.16b, v4.16b
344 encrypt_block v0, w3, x2, x6, w7 331 encrypt_block v0, w3, x2, x6, w7
332
333 adds x8, x8, #1 /* increment BE ctr */
334 rev x7, x8
335 ins v4.d[1], x7
336 bcs .Lctrcarry /* overflow? */
337
338.Lctrcarrydone:
345 subs w4, w4, #1 339 subs w4, w4, #1
346 bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */ 340 bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
347 ld1 {v3.16b}, [x1], #16 341 ld1 {v3.16b}, [x1], #16
348 eor v3.16b, v0.16b, v3.16b 342 eor v3.16b, v0.16b, v3.16b
349 st1 {v3.16b}, [x0], #16 343 st1 {v3.16b}, [x0], #16
350 beq .Lctrout 344 bne .Lctrloop
351.Lctrinc: 345
352 adds x5, x5, #1 /* increment BE ctr */ 346.Lctrout:
353 rev x7, x5 347 st1 {v4.16b}, [x5] /* return next CTR value */
354 ins v4.d[1], x7 348 FRAME_POP
355 bcc .Lctrloop /* no overflow? */ 349 ret
356 umov x7, v4.d[0] /* load upper word of ctr */ 350
357 rev x7, x7 /* ... to handle the carry */
358 add x7, x7, #1
359 rev x7, x7
360 ins v4.d[0], x7
361 b .Lctrloop
362.Lctrhalfblock: 351.Lctrhalfblock:
363 ld1 {v3.8b}, [x1] 352 ld1 {v3.8b}, [x1]
364 eor v3.8b, v0.8b, v3.8b 353 eor v3.8b, v0.8b, v3.8b
365 st1 {v3.8b}, [x0] 354 st1 {v3.8b}, [x0]
366.Lctrout:
367 FRAME_POP 355 FRAME_POP
368 ret 356 ret
357
358.Lctrcarry:
359 umov x7, v4.d[0] /* load upper word of ctr */
360 rev x7, x7 /* ... to handle the carry */
361 add x7, x7, #1
362 rev x7, x7
363 ins v4.d[0], x7
364 b .Lctrcarrydone
369AES_ENDPROC(aes_ctr_encrypt) 365AES_ENDPROC(aes_ctr_encrypt)
370 .ltorg 366 .ltorg
371 367
diff --git a/arch/arm64/crypto/poly-hash-ce-core.S b/arch/arm64/crypto/poly-hash-ce-core.S
new file mode 100644
index 000000000000..8ccb544c5526
--- /dev/null
+++ b/arch/arm64/crypto/poly-hash-ce-core.S
@@ -0,0 +1,163 @@
1/*
2 * Accelerated poly_hash implementation with ARMv8 PMULL instructions.
3 *
4 * Based on ghash-ce-core.S.
5 *
6 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
7 * Copyright (C) 2017 Google, Inc. <ebiggers@google.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 */
13
14#include <linux/linkage.h>
15#include <asm/assembler.h>
16
17 KEY .req v0
18 KEY2 .req v1
19 T1 .req v2
20 T2 .req v3
21 GSTAR .req v4
22 XL .req v5
23 XM .req v6
24 XH .req v7
25
26 .text
27 .arch armv8-a+crypto
28
29 /* 16-byte aligned (2**4 = 16); not required, but might as well */
30 .align 4
31.Lgstar:
32 .quad 0x87, 0x87
33
34/*
35 * void pmull_poly_hash_update(le128 *digest, const le128 *key,
36 * const u8 *src, unsigned int blocks,
37 * unsigned int partial);
38 */
39ENTRY(pmull_poly_hash_update)
40
41 /* Load digest into XL */
42 ld1 {XL.16b}, [x0]
43
44 /* Load key into KEY */
45 ld1 {KEY.16b}, [x1]
46
47 /* Load g*(x) = g(x) + x^128 = x^7 + x^2 + x + 1 into both halves of
48 * GSTAR */
49 adr x1, .Lgstar
50 ld1 {GSTAR.2d}, [x1]
51
52 /* Set KEY2 to (KEY[1]+KEY[0]):(KEY[1]+KEY[0]). This is needed for
53 * Karatsuba multiplication. */
54 ext KEY2.16b, KEY.16b, KEY.16b, #8
55 eor KEY2.16b, KEY2.16b, KEY.16b
56
57 /* If 'partial' is nonzero, then we're finishing a pending block and
58 * should go right to the multiplication. */
59 cbnz w4, 1f
60
610:
62 /* Add the next block from 'src' to the digest */
63 ld1 {T1.16b}, [x2], #16
64 eor XL.16b, XL.16b, T1.16b
65 sub w3, w3, #1
66
671:
68 /*
69 * Multiply the current 128-bit digest (a1:a0, in XL) by the 128-bit key
70 * (b1:b0, in KEY) using Karatsuba multiplication.
71 */
72
73 /* T1 = (a1+a0):(a1+a0) */
74 ext T1.16b, XL.16b, XL.16b, #8
75 eor T1.16b, T1.16b, XL.16b
76
77 /* XH = a1 * b1 */
78 pmull2 XH.1q, XL.2d, KEY.2d
79
80 /* XL = a0 * b0 */
81 pmull XL.1q, XL.1d, KEY.1d
82
83 /* XM = (a1+a0) * (b1+b0) */
84 pmull XM.1q, T1.1d, KEY2.1d
85
86 /* XM += (XH[0]:XL[1]) + XL + XH */
87 ext T1.16b, XL.16b, XH.16b, #8
88 eor T2.16b, XL.16b, XH.16b
89 eor XM.16b, XM.16b, T1.16b
90 eor XM.16b, XM.16b, T2.16b
91
92 /*
93 * Now the 256-bit product is in XH[1]:XM:XL[0]. It represents a
94 * polynomial over GF(2) with degree as large as 255. We need to
95 * compute its remainder modulo g(x) = x^128+x^7+x^2+x+1. For this it
96 * is sufficient to compute the remainder of the high half 'c(x)x^128'
97 * add it to the low half. To reduce the high half we use the Barrett
98 * reduction method. The basic idea is that we can express the
99 * remainder p(x) as g(x)q(x) mod x^128, where q(x) = (c(x)x^128)/g(x).
100 * As detailed in [1], to avoid having to divide by g(x) at runtime the
101 * following equivalent expression can be derived:
102 *
103 * p(x) = [ g*(x)((c(x)q+(x))/x^128) ] mod x^128
104 *
105 * where g*(x) = x^128+g(x) = x^7+x^2+x+1, and q+(x) = x^256/g(x) = g(x)
106 * in this case. This is also equivalent to:
107 *
108 * p(x) = [ g*(x)((c(x)(x^128 + g*(x)))/x^128) ] mod x^128
109 * = [ g*(x)(c(x) + (c(x)g*(x))/x^128) ] mod x^128
110 *
111 * Since deg g*(x) < 64:
112 *
113 * p(x) = [ g*(x)(c(x) + ((c(x)/x^64)g*(x))/x^64) ] mod x^128
114 * = [ g*(x)((c(x)/x^64)x^64 + (c(x) mod x^64) +
115 * ((c(x)/x^64)g*(x))/x^64) ] mod x^128
116 *
117 * Letting t(x) = g*(x)(c(x)/x^64):
118 *
119 * p(x) = [ t(x)x^64 + g*(x)((c(x) mod x^64) + t(x)/x^64) ] mod x^128
120 *
121 * Therefore, to do the reduction we only need to issue two 64-bit =>
122 * 128-bit carryless multiplications: g*(x) times c(x)/x^64, and g*(x)
123 * times ((c(x) mod x^64) + t(x)/x^64). (Multiplication by x^64 doesn't
124 * count since it is simply a shift or move.)
125 *
126 * An alternate reduction method, also based on Barrett reduction and
127 * described in [1], uses only shifts and XORs --- no multiplications.
128 * However, the method with multiplications requires fewer instructions
129 * and is faster on processors with fast carryless multiplication.
130 *
131 * [1] "Intel Carry-Less Multiplication Instruction and its Usage for
132 * Computing the GCM Mode",
133 * https://software.intel.com/sites/default/files/managed/72/cc/clmul-wp-rev-2.02-2014-04-20.pdf
134 */
135
136 /* 256-bit product is XH[1]:XM:XL[0], so c(x) is XH[1]:XM[1] */
137
138 /* T1 = t(x) = g*(x)(c(x)/x^64) */
139 pmull2 T1.1q, GSTAR.2d, XH.2d
140
141 /* T2 = g*(x)((c(x) mod x^64) + t(x)/x^64) */
142 eor T2.16b, XM.16b, T1.16b
143 pmull2 T2.1q, GSTAR.2d, T2.2d
144
145 /* Make XL[0] be the low half of the 128-bit result by adding the low 64
146 * bits of the T2 term to what was already there. The 't(x)x^64' term
147 * makes no difference, so skip it. */
148 eor XL.16b, XL.16b, T2.16b
149
150 /* Make XL[1] be the high half of the 128-bit result by adding the high
151 * 64 bits of the 't(x)x^64' and T2 terms to what was already in XM[0],
152 * then moving XM[0] to XL[1]. */
153 eor XM.16b, XM.16b, T1.16b
154 ext T2.16b, T2.16b, T2.16b, #8
155 eor XM.16b, XM.16b, T2.16b
156 mov XL.d[1], XM.d[0]
157
158 /* If more blocks remain, then loop back to process the next block;
159 * else, store the digest and return. */
160 cbnz w3, 0b
161 st1 {XL.16b}, [x0]
162 ret
163ENDPROC(pmull_poly_hash_update)
diff --git a/arch/arm64/crypto/poly-hash-ce-glue.c b/arch/arm64/crypto/poly-hash-ce-glue.c
new file mode 100644
index 000000000000..e195740c9ecf
--- /dev/null
+++ b/arch/arm64/crypto/poly-hash-ce-glue.c
@@ -0,0 +1,166 @@
1/*
2 * Accelerated poly_hash implementation with ARMv8 PMULL instructions.
3 *
4 * Based on ghash-ce-glue.c.
5 *
6 * poly_hash is part of the HEH (Hash-Encrypt-Hash) encryption mode, proposed in
7 * Internet Draft https://tools.ietf.org/html/draft-cope-heh-01.
8 *
9 * poly_hash is very similar to GHASH: both algorithms are keyed hashes which
10 * interpret their input data as coefficients of a polynomial over GF(2^128),
11 * then calculate a hash value by evaluating that polynomial at the point given
12 * by the key, e.g. using Horner's rule. The difference is that poly_hash uses
13 * the more natural "ble" convention to represent GF(2^128) elements, whereas
14 * GHASH uses the less natural "lle" convention (see include/crypto/gf128mul.h).
15 * The ble convention makes it simpler to implement GF(2^128) multiplication.
16 *
17 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
18 * Copyright (C) 2017 Google Inc. <ebiggers@google.com>
19 *
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License version 2 as published
22 * by the Free Software Foundation.
23 */
24
25#include <asm/neon.h>
26#include <crypto/b128ops.h>
27#include <crypto/internal/hash.h>
28#include <linux/cpufeature.h>
29#include <linux/crypto.h>
30#include <linux/module.h>
31
32/*
33 * Note: in this algorithm we currently use 'le128' to represent GF(2^128)
34 * elements, even though poly_hash-generic uses 'be128'. Both types are
35 * actually "wrong" because the elements are actually in 'ble' format, and there
36 * should be a ble type to represent this --- as well as lle, bbe, and lbe types
37 * for the other conventions for representing GF(2^128) elements. But
38 * practically it doesn't matter which type we choose here, so we just use le128
39 * since it's arguably more accurate, while poly_hash-generic still has to use
40 * be128 because the generic GF(2^128) multiplication functions all take be128.
41 */
42
43struct poly_hash_desc_ctx {
44 le128 digest;
45 unsigned int count;
46};
47
48asmlinkage void pmull_poly_hash_update(le128 *digest, const le128 *key,
49 const u8 *src, unsigned int blocks,
50 unsigned int partial);
51
52static int poly_hash_setkey(struct crypto_shash *tfm,
53 const u8 *key, unsigned int keylen)
54{
55 if (keylen != sizeof(le128)) {
56 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
57 return -EINVAL;
58 }
59
60 memcpy(crypto_shash_ctx(tfm), key, sizeof(le128));
61 return 0;
62}
63
64static int poly_hash_init(struct shash_desc *desc)
65{
66 struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc);
67
68 ctx->digest = (le128) { 0 };
69 ctx->count = 0;
70 return 0;
71}
72
73static int poly_hash_update(struct shash_desc *desc, const u8 *src,
74 unsigned int len)
75{
76 struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc);
77 unsigned int partial = ctx->count % sizeof(le128);
78 u8 *dst = (u8 *)&ctx->digest + partial;
79
80 ctx->count += len;
81
82 /* Finishing at least one block? */
83 if (partial + len >= sizeof(le128)) {
84 const le128 *key = crypto_shash_ctx(desc->tfm);
85
86 if (partial) {
87 /* Finish the pending block. */
88 unsigned int n = sizeof(le128) - partial;
89
90 len -= n;
91 do {
92 *dst++ ^= *src++;
93 } while (--n);
94 }
95
96 /*
97 * Do the real work. If 'partial' is nonzero, this starts by
98 * multiplying 'digest' by 'key'. Then for each additional full
99 * block it adds the block to 'digest' and multiplies by 'key'.
100 */
101 kernel_neon_begin_partial(8);
102 pmull_poly_hash_update(&ctx->digest, key, src,
103 len / sizeof(le128), partial);
104 kernel_neon_end();
105
106 src += len - (len % sizeof(le128));
107 len %= sizeof(le128);
108 dst = (u8 *)&ctx->digest;
109 }
110
111 /* Continue adding the next block to 'digest'. */
112 while (len--)
113 *dst++ ^= *src++;
114 return 0;
115}
116
117static int poly_hash_final(struct shash_desc *desc, u8 *out)
118{
119 struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc);
120 unsigned int partial = ctx->count % sizeof(le128);
121
122 /* Finish the last block if needed. */
123 if (partial) {
124 const le128 *key = crypto_shash_ctx(desc->tfm);
125
126 kernel_neon_begin_partial(8);
127 pmull_poly_hash_update(&ctx->digest, key, NULL, 0, partial);
128 kernel_neon_end();
129 }
130
131 memcpy(out, &ctx->digest, sizeof(le128));
132 return 0;
133}
134
135static struct shash_alg poly_hash_alg = {
136 .digestsize = sizeof(le128),
137 .init = poly_hash_init,
138 .update = poly_hash_update,
139 .final = poly_hash_final,
140 .setkey = poly_hash_setkey,
141 .descsize = sizeof(struct poly_hash_desc_ctx),
142 .base = {
143 .cra_name = "poly_hash",
144 .cra_driver_name = "poly_hash-ce",
145 .cra_priority = 300,
146 .cra_ctxsize = sizeof(le128),
147 .cra_module = THIS_MODULE,
148 },
149};
150
151static int __init poly_hash_ce_mod_init(void)
152{
153 return crypto_register_shash(&poly_hash_alg);
154}
155
156static void __exit poly_hash_ce_mod_exit(void)
157{
158 crypto_unregister_shash(&poly_hash_alg);
159}
160
161MODULE_DESCRIPTION("Polynomial evaluation hash using ARMv8 Crypto Extensions");
162MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
163MODULE_LICENSE("GPL v2");
164
165module_cpu_feature_match(PMULL, poly_hash_ce_mod_init);
166module_exit(poly_hash_ce_mod_exit);
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index aee323b13802..0a11cd502dbc 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -22,9 +22,9 @@
22#define ACPI_MADT_GICC_LENGTH \ 22#define ACPI_MADT_GICC_LENGTH \
23 (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) 23 (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
24 24
25#define BAD_MADT_GICC_ENTRY(entry, end) \ 25#define BAD_MADT_GICC_ENTRY(entry, end) \
26 (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ 26 (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
27 (entry)->header.length != ACPI_MADT_GICC_LENGTH) 27 (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
28 28
29/* Basic configuration for ACPI */ 29/* Basic configuration for ACPI */
30#ifdef CONFIG_ACPI 30#ifdef CONFIG_ACPI
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 8746ff6abd77..55101bd86b98 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -2,6 +2,7 @@
2#define __ASM_ALTERNATIVE_H 2#define __ASM_ALTERNATIVE_H
3 3
4#include <asm/cpufeature.h> 4#include <asm/cpufeature.h>
5#include <asm/insn.h>
5 6
6#ifndef __ASSEMBLY__ 7#ifndef __ASSEMBLY__
7 8
@@ -90,34 +91,55 @@ void apply_alternatives(void *start, size_t length);
90.endm 91.endm
91 92
92/* 93/*
93 * Begin an alternative code sequence. 94 * Alternative sequences
95 *
96 * The code for the case where the capability is not present will be
97 * assembled and linked as normal. There are no restrictions on this
98 * code.
99 *
100 * The code for the case where the capability is present will be
101 * assembled into a special section to be used for dynamic patching.
102 * Code for that case must:
103 *
104 * 1. Be exactly the same length (in bytes) as the default code
105 * sequence.
94 * 106 *
95 * The code that follows this macro will be assembled and linked as 107 * 2. Not contain a branch target that is used outside of the
96 * normal. There are no restrictions on this code. 108 * alternative sequence it is defined in (branches into an
109 * alternative sequence are not fixed up).
110 */
111
112/*
113 * Begin an alternative code sequence.
97 */ 114 */
98.macro alternative_if_not cap 115.macro alternative_if_not cap
116 .set .Lasm_alt_mode, 0
99 .pushsection .altinstructions, "a" 117 .pushsection .altinstructions, "a"
100 altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f 118 altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
101 .popsection 119 .popsection
102661: 120661:
103.endm 121.endm
104 122
123.macro alternative_if cap
124 .set .Lasm_alt_mode, 1
125 .pushsection .altinstructions, "a"
126 altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
127 .popsection
128 .pushsection .altinstr_replacement, "ax"
129 .align 2 /* So GAS knows label 661 is suitably aligned */
130661:
131.endm
132
105/* 133/*
106 * Provide the alternative code sequence. 134 * Provide the other half of the alternative code sequence.
107 *
108 * The code that follows this macro is assembled into a special
109 * section to be used for dynamic patching. Code that follows this
110 * macro must:
111 *
112 * 1. Be exactly the same length (in bytes) as the default code
113 * sequence.
114 *
115 * 2. Not contain a branch target that is used outside of the
116 * alternative sequence it is defined in (branches into an
117 * alternative sequence are not fixed up).
118 */ 135 */
119.macro alternative_else 136.macro alternative_else
120662: .pushsection .altinstr_replacement, "ax" 137662:
138 .if .Lasm_alt_mode==0
139 .pushsection .altinstr_replacement, "ax"
140 .else
141 .popsection
142 .endif
121663: 143663:
122.endm 144.endm
123 145
@@ -125,11 +147,25 @@ void apply_alternatives(void *start, size_t length);
125 * Complete an alternative code sequence. 147 * Complete an alternative code sequence.
126 */ 148 */
127.macro alternative_endif 149.macro alternative_endif
128664: .popsection 150664:
151 .if .Lasm_alt_mode==0
152 .popsection
153 .endif
129 .org . - (664b-663b) + (662b-661b) 154 .org . - (664b-663b) + (662b-661b)
130 .org . - (662b-661b) + (664b-663b) 155 .org . - (662b-661b) + (664b-663b)
131.endm 156.endm
132 157
158/*
159 * Provides a trivial alternative or default sequence consisting solely
160 * of NOPs. The number of NOPs is chosen automatically to match the
161 * previous case.
162 */
163.macro alternative_else_nop_endif
164alternative_else
165 nops (662b-661b) / AARCH64_INSN_SIZE
166alternative_endif
167.endm
168
133#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ 169#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
134 alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) 170 alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
135 171
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
new file mode 100644
index 000000000000..be2d2347d995
--- /dev/null
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -0,0 +1,13 @@
1#ifndef __ASM_ASM_UACCESS_H
2#define __ASM_ASM_UACCESS_H
3
4/*
5 * Remove the address tag from a virtual address, if present.
6 */
7 .macro clear_address_tag, dst, addr
8 tst \addr, #(1 << 55)
9 bic \dst, \addr, #(0xff << 56)
10 csel \dst, \dst, \addr, eq
11 .endm
12
13#endif
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index aeb4554b3af3..d8855ca6068a 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -108,6 +108,15 @@
108 .endm 108 .endm
109 109
110/* 110/*
111 * NOP sequence
112 */
113 .macro nops, num
114 .rept \num
115 nop
116 .endr
117 .endm
118
119/*
111 * Emit an entry into the exception table 120 * Emit an entry into the exception table
112 */ 121 */
113 .macro _asm_extable, from, to 122 .macro _asm_extable, from, to
@@ -383,15 +392,11 @@ alternative_endif
383 */ 392 */
384 .macro post_ttbr0_update_workaround 393 .macro post_ttbr0_update_workaround
385#ifdef CONFIG_CAVIUM_ERRATUM_27456 394#ifdef CONFIG_CAVIUM_ERRATUM_27456
386alternative_if_not ARM64_WORKAROUND_CAVIUM_27456 395alternative_if ARM64_WORKAROUND_CAVIUM_27456
387 nop
388 nop
389 nop
390alternative_else
391 ic iallu 396 ic iallu
392 dsb nsh 397 dsb nsh
393 isb 398 isb
394alternative_endif 399alternative_else_nop_endif
395#endif 400#endif
396 .endm 401 .endm
397 402
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 9622eb48f894..0671711b46ab 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -20,6 +20,9 @@
20 20
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22 22
23#define __nops(n) ".rept " #n "\nnop\n.endr\n"
24#define nops(n) asm volatile(__nops(n))
25
23#define sev() asm volatile("sev" : : : "memory") 26#define sev() asm volatile("sev" : : : "memory")
24#define wfe() asm volatile("wfe" : : : "memory") 27#define wfe() asm volatile("wfe" : : : "memory")
25#define wfi() asm volatile("wfi" : : : "memory") 28#define wfi() asm volatile("wfi" : : : "memory")
@@ -41,23 +44,33 @@
41 44
42#define smp_store_release(p, v) \ 45#define smp_store_release(p, v) \
43do { \ 46do { \
47 union { typeof(*p) __val; char __c[1]; } __u = \
48 { .__val = (__force typeof(*p)) (v) }; \
44 compiletime_assert_atomic_type(*p); \ 49 compiletime_assert_atomic_type(*p); \
45 switch (sizeof(*p)) { \ 50 switch (sizeof(*p)) { \
46 case 1: \ 51 case 1: \
47 asm volatile ("stlrb %w1, %0" \ 52 asm volatile ("stlrb %w1, %0" \
48 : "=Q" (*p) : "r" (v) : "memory"); \ 53 : "=Q" (*p) \
54 : "r" (*(__u8 *)__u.__c) \
55 : "memory"); \
49 break; \ 56 break; \
50 case 2: \ 57 case 2: \
51 asm volatile ("stlrh %w1, %0" \ 58 asm volatile ("stlrh %w1, %0" \
52 : "=Q" (*p) : "r" (v) : "memory"); \ 59 : "=Q" (*p) \
60 : "r" (*(__u16 *)__u.__c) \
61 : "memory"); \
53 break; \ 62 break; \
54 case 4: \ 63 case 4: \
55 asm volatile ("stlr %w1, %0" \ 64 asm volatile ("stlr %w1, %0" \
56 : "=Q" (*p) : "r" (v) : "memory"); \ 65 : "=Q" (*p) \
66 : "r" (*(__u32 *)__u.__c) \
67 : "memory"); \
57 break; \ 68 break; \
58 case 8: \ 69 case 8: \
59 asm volatile ("stlr %1, %0" \ 70 asm volatile ("stlr %1, %0" \
60 : "=Q" (*p) : "r" (v) : "memory"); \ 71 : "=Q" (*p) \
72 : "r" (*(__u64 *)__u.__c) \
73 : "memory"); \
61 break; \ 74 break; \
62 } \ 75 } \
63} while (0) 76} while (0)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 22dda613f9c9..22aabdeacc24 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -155,5 +155,6 @@ int set_memory_ro(unsigned long addr, int numpages);
155int set_memory_rw(unsigned long addr, int numpages); 155int set_memory_rw(unsigned long addr, int numpages);
156int set_memory_x(unsigned long addr, int numpages); 156int set_memory_x(unsigned long addr, int numpages);
157int set_memory_nx(unsigned long addr, int numpages); 157int set_memory_nx(unsigned long addr, int numpages);
158int set_memory_valid(unsigned long addr, unsigned long size, int enable);
158 159
159#endif 160#endif
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 510c7b404454..270c6b7b0a61 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -48,7 +48,7 @@ static inline unsigned long __xchg_case_##name(unsigned long x, \
48 " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \ 48 " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
49 " nop\n" \ 49 " nop\n" \
50 " " #nop_lse) \ 50 " " #nop_lse) \
51 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \ 51 : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
52 : "r" (x) \ 52 : "r" (x) \
53 : cl); \ 53 : cl); \
54 \ 54 \
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 7875c886ad24..cd3dfa346649 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -113,12 +113,11 @@
113#define ELF_EXEC_PAGESIZE PAGE_SIZE 113#define ELF_EXEC_PAGESIZE PAGE_SIZE
114 114
115/* 115/*
116 * This is the location that an ET_DYN program is loaded if exec'ed. Typical 116 * This is the base location for PIE (ET_DYN with INTERP) loads. On
117 * use of this is to invoke "./ld.so someprog" to test out a new version of 117 * 64-bit, this is above 4GB to leave the entire 32-bit address
118 * the loader. We need to make sure that it is out of the way of the program 118 * space open for things that want to use the area for 32-bit pointers.
119 * that it will "exec", and that there is sufficient room for the brk.
120 */ 119 */
121#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) 120#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
122 121
123#ifndef __ASSEMBLY__ 122#ifndef __ASSEMBLY__
124 123
@@ -169,7 +168,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
169 168
170#ifdef CONFIG_COMPAT 169#ifdef CONFIG_COMPAT
171 170
172#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) 171/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
172#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
173 173
174/* AArch32 registers. */ 174/* AArch32 registers. */
175#define COMPAT_ELF_NGREG 18 175#define COMPAT_ELF_NGREG 18
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index c93023dc42cd..2cd3b3251d3c 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -21,10 +21,7 @@
21#include <linux/futex.h> 21#include <linux/futex.h>
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23 23
24#include <asm/alternative.h>
25#include <asm/cpufeature.h>
26#include <asm/errno.h> 24#include <asm/errno.h>
27#include <asm/sysreg.h>
28 25
29#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ 26#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
30do { \ 27do { \
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 8740297dac77..1473fc2f7ab7 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
20#include <linux/threads.h> 20#include <linux/threads.h>
21#include <asm/irq.h> 21#include <asm/irq.h>
22 22
23#define NR_IPI 6 23#define NR_IPI 7
24 24
25typedef struct { 25typedef struct {
26 unsigned int __softirq_pending; 26 unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 9732908bfc8a..c72b8e201ab4 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -68,7 +68,11 @@ static inline void decode_ctrl_reg(u32 reg,
68/* Lengths */ 68/* Lengths */
69#define ARM_BREAKPOINT_LEN_1 0x1 69#define ARM_BREAKPOINT_LEN_1 0x1
70#define ARM_BREAKPOINT_LEN_2 0x3 70#define ARM_BREAKPOINT_LEN_2 0x3
71#define ARM_BREAKPOINT_LEN_3 0x7
71#define ARM_BREAKPOINT_LEN_4 0xf 72#define ARM_BREAKPOINT_LEN_4 0xf
73#define ARM_BREAKPOINT_LEN_5 0x1f
74#define ARM_BREAKPOINT_LEN_6 0x3f
75#define ARM_BREAKPOINT_LEN_7 0x7f
72#define ARM_BREAKPOINT_LEN_8 0xff 76#define ARM_BREAKPOINT_LEN_8 0xff
73 77
74/* Kernel stepping */ 78/* Kernel stepping */
@@ -110,7 +114,7 @@ struct perf_event;
110struct pmu; 114struct pmu;
111 115
112extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, 116extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
113 int *gen_len, int *gen_type); 117 int *gen_len, int *gen_type, int *offset);
114extern int arch_check_bp_in_kernelspace(struct perf_event *bp); 118extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
115extern int arch_validate_hwbkpt_settings(struct perf_event *bp); 119extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
116extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, 120extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
new file mode 100644
index 000000000000..e17f0529a882
--- /dev/null
+++ b/arch/arm64/include/asm/kexec.h
@@ -0,0 +1,98 @@
1/*
2 * kexec for arm64
3 *
4 * Copyright (C) Linaro.
5 * Copyright (C) Huawei Futurewei Technologies.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ARM64_KEXEC_H
13#define _ARM64_KEXEC_H
14
15/* Maximum physical address we can use pages from */
16
17#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
18
19/* Maximum address we can reach in physical address mode */
20
21#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
22
23/* Maximum address we can use for the control code buffer */
24
25#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
26
27#define KEXEC_CONTROL_PAGE_SIZE 4096
28
29#define KEXEC_ARCH KEXEC_ARCH_AARCH64
30
31#ifndef __ASSEMBLY__
32
33/**
34 * crash_setup_regs() - save registers for the panic kernel
35 *
36 * @newregs: registers are saved here
37 * @oldregs: registers to be saved (may be %NULL)
38 */
39
40static inline void crash_setup_regs(struct pt_regs *newregs,
41 struct pt_regs *oldregs)
42{
43 if (oldregs) {
44 memcpy(newregs, oldregs, sizeof(*newregs));
45 } else {
46 u64 tmp1, tmp2;
47
48 __asm__ __volatile__ (
49 "stp x0, x1, [%2, #16 * 0]\n"
50 "stp x2, x3, [%2, #16 * 1]\n"
51 "stp x4, x5, [%2, #16 * 2]\n"
52 "stp x6, x7, [%2, #16 * 3]\n"
53 "stp x8, x9, [%2, #16 * 4]\n"
54 "stp x10, x11, [%2, #16 * 5]\n"
55 "stp x12, x13, [%2, #16 * 6]\n"
56 "stp x14, x15, [%2, #16 * 7]\n"
57 "stp x16, x17, [%2, #16 * 8]\n"
58 "stp x18, x19, [%2, #16 * 9]\n"
59 "stp x20, x21, [%2, #16 * 10]\n"
60 "stp x22, x23, [%2, #16 * 11]\n"
61 "stp x24, x25, [%2, #16 * 12]\n"
62 "stp x26, x27, [%2, #16 * 13]\n"
63 "stp x28, x29, [%2, #16 * 14]\n"
64 "mov %0, sp\n"
65 "stp x30, %0, [%2, #16 * 15]\n"
66
67 "/* faked current PSTATE */\n"
68 "mrs %0, CurrentEL\n"
69 "mrs %1, SPSEL\n"
70 "orr %0, %0, %1\n"
71 "mrs %1, DAIF\n"
72 "orr %0, %0, %1\n"
73 "mrs %1, NZCV\n"
74 "orr %0, %0, %1\n"
75 /* pc */
76 "adr %1, 1f\n"
77 "1:\n"
78 "stp %1, %0, [%2, #16 * 16]\n"
79 : "=&r" (tmp1), "=&r" (tmp2)
80 : "r" (newregs)
81 : "memory"
82 );
83 }
84}
85
86#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_HIBERNATION)
87extern bool crash_is_nosave(unsigned long pfn);
88extern void crash_prepare_suspend(void);
89extern void crash_post_resume(void);
90#else
91static inline bool crash_is_nosave(unsigned long pfn) {return false; }
92static inline void crash_prepare_suspend(void) {}
93static inline void crash_post_resume(void) {}
94#endif
95
96#endif /* __ASSEMBLY__ */
97
98#endif
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 342a5ac2f3da..320dc9c7e4f4 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -237,8 +237,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
237{ 237{
238 void *va = page_address(pfn_to_page(pfn)); 238 void *va = page_address(pfn_to_page(pfn));
239 239
240 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) 240 kvm_flush_dcache_to_poc(va, size);
241 kvm_flush_dcache_to_poc(va, size);
242 241
243 if (!icache_is_aliasing()) { /* PIPT */ 242 if (!icache_is_aliasing()) { /* PIPT */
244 flush_icache_range((unsigned long)va, 243 flush_icache_range((unsigned long)va,
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 990124a67eeb..5472251c8e6c 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -33,7 +33,7 @@ extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
33extern void init_mem_pgprot(void); 33extern void init_mem_pgprot(void);
34extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 34extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
35 unsigned long virt, phys_addr_t size, 35 unsigned long virt, phys_addr_t size,
36 pgprot_t prot); 36 pgprot_t prot, bool allow_block_mappings);
37extern void *fixmap_remap_fdt(phys_addr_t dt_phys); 37extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
38 38
39#endif 39#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 4a32fd5f101d..e53d30c6f779 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -218,9 +218,11 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
218 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous 218 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
219 * value may have not been initialised yet (activate_mm caller) or the 219 * value may have not been initialised yet (activate_mm caller) or the
220 * ASID has changed since the last run (following the context switch 220 * ASID has changed since the last run (following the context switch
221 * of another thread of the same process). 221 * of another thread of the same process). Avoid setting the reserved
222 * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
222 */ 223 */
223 update_saved_ttbr0(tsk, next); 224 if (next != &init_mm)
225 update_saved_ttbr0(tsk, next);
224} 226}
225 227
226#define deactivate_mm(tsk,mm) do { } while (0) 228#define deactivate_mm(tsk,mm) do { } while (0)
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index fbafd0ad16df..fb2617df2de9 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -23,16 +23,8 @@
23 23
24/* PAGE_SHIFT determines the page size */ 24/* PAGE_SHIFT determines the page size */
25/* CONT_SHIFT determines the number of pages which can be tracked together */ 25/* CONT_SHIFT determines the number of pages which can be tracked together */
26#ifdef CONFIG_ARM64_64K_PAGES 26#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT
27#define PAGE_SHIFT 16 27#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT
28#define CONT_SHIFT 5
29#elif defined(CONFIG_ARM64_16K_PAGES)
30#define PAGE_SHIFT 14
31#define CONT_SHIFT 7
32#else
33#define PAGE_SHIFT 12
34#define CONT_SHIFT 4
35#endif
36#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 28#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
37#define PAGE_MASK (~(PAGE_SIZE-1)) 29#define PAGE_MASK (~(PAGE_SIZE-1))
38 30
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 5eedfd83acc7..1528d52eb8c0 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -21,8 +21,6 @@
21 21
22#include <uapi/asm/ptrace.h> 22#include <uapi/asm/ptrace.h>
23 23
24#define _PSR_PAN_BIT 22
25
26/* Current Exception Level values, as contained in CurrentEL */ 24/* Current Exception Level values, as contained in CurrentEL */
27#define CurrentEL_EL1 (1 << 2) 25#define CurrentEL_EL1 (1 << 2)
28#define CurrentEL_EL2 (2 << 2) 26#define CurrentEL_EL2 (2 << 2)
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 2013a4dc5124..1d3ff7e4a6c2 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -16,6 +16,19 @@
16#ifndef __ASM_SMP_H 16#ifndef __ASM_SMP_H
17#define __ASM_SMP_H 17#define __ASM_SMP_H
18 18
19/* Values for secondary_data.status */
20
21#define CPU_MMU_OFF (-1)
22#define CPU_BOOT_SUCCESS (0)
23/* The cpu invoked ops->cpu_die, synchronise it with cpu_kill */
24#define CPU_KILL_ME (1)
25/* The cpu couldn't die gracefully and is looping in the kernel */
26#define CPU_STUCK_IN_KERNEL (2)
27/* Fatal system error detected by secondary CPU, crash the system */
28#define CPU_PANIC_KERNEL (3)
29
30#ifndef __ASSEMBLY__
31
19#include <linux/threads.h> 32#include <linux/threads.h>
20#include <linux/cpumask.h> 33#include <linux/cpumask.h>
21#include <linux/thread_info.h> 34#include <linux/thread_info.h>
@@ -54,11 +67,17 @@ asmlinkage void secondary_start_kernel(void);
54 67
55/* 68/*
56 * Initial data for bringing up a secondary CPU. 69 * Initial data for bringing up a secondary CPU.
70 * @stack - sp for the secondary CPU
71 * @status - Result passed back from the secondary CPU to
72 * indicate failure.
57 */ 73 */
58struct secondary_data { 74struct secondary_data {
59 void *stack; 75 void *stack;
76 long status;
60}; 77};
78
61extern struct secondary_data secondary_data; 79extern struct secondary_data secondary_data;
80extern long __early_cpu_boot_status;
62extern void secondary_entry(void); 81extern void secondary_entry(void);
63 82
64extern void arch_send_call_function_single_ipi(int cpu); 83extern void arch_send_call_function_single_ipi(int cpu);
@@ -77,5 +96,38 @@ extern int __cpu_disable(void);
77 96
78extern void __cpu_die(unsigned int cpu); 97extern void __cpu_die(unsigned int cpu);
79extern void cpu_die(void); 98extern void cpu_die(void);
99extern void cpu_die_early(void);
100
101static inline void cpu_park_loop(void)
102{
103 for (;;) {
104 wfe();
105 wfi();
106 }
107}
108
109static inline void update_cpu_boot_status(int val)
110{
111 WRITE_ONCE(secondary_data.status, val);
112 /* Ensure the visibility of the status update */
113 dsb(ishst);
114}
115
116/*
117 * If a secondary CPU enters the kernel but fails to come online,
118 * (e.g. due to mismatched features), and cannot exit the kernel,
119 * we increment cpus_stuck_in_kernel and leave the CPU in a
120 * quiesecent loop within the kernel text. The memory containing
121 * this loop must not be re-used for anything else as the 'stuck'
122 * core is executing it.
123 *
124 * This function is used to inhibit features like kexec and hibernate.
125 */
126bool cpus_are_stuck_in_kernel(void);
127
128extern void smp_send_crash_stop(void);
129extern bool smp_crash_stop_failed(void);
130
131#endif /* ifndef __ASSEMBLY__ */
80 132
81#endif /* ifndef __ASM_SMP_H */ 133#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 43a66881fd57..73f5d548bba1 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -31,6 +31,12 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
31 unsigned int tmp; 31 unsigned int tmp;
32 arch_spinlock_t lockval; 32 arch_spinlock_t lockval;
33 33
34 /*
35 * Ensure prior spin_lock operations to other locks have completed
36 * on this CPU before we test whether "lock" is locked.
37 */
38 smp_mb();
39
34 asm volatile( 40 asm volatile(
35" sevl\n" 41" sevl\n"
36"1: wfe\n" 42"1: wfe\n"
@@ -152,6 +158,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
152 158
153static inline int arch_spin_is_locked(arch_spinlock_t *lock) 159static inline int arch_spin_is_locked(arch_spinlock_t *lock)
154{ 160{
161 smp_mb(); /* See arch_spin_unlock_wait */
155 return !arch_spin_value_unlocked(READ_ONCE(*lock)); 162 return !arch_spin_value_unlocked(READ_ONCE(*lock));
156} 163}
157 164
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index b3325a9cb90f..794d22603f04 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -47,10 +47,10 @@ typedef unsigned long mm_segment_t;
47struct thread_info { 47struct thread_info {
48 unsigned long flags; /* low level flags */ 48 unsigned long flags; /* low level flags */
49 mm_segment_t addr_limit; /* address limit */ 49 mm_segment_t addr_limit; /* address limit */
50 struct task_struct *task; /* main task structure */
50#ifdef CONFIG_ARM64_SW_TTBR0_PAN 51#ifdef CONFIG_ARM64_SW_TTBR0_PAN
51 u64 ttbr0; /* saved TTBR0_EL1 */ 52 u64 ttbr0; /* saved TTBR0_EL1 */
52#endif 53#endif
53 struct task_struct *task; /* main task structure */
54 int preempt_count; /* 0 => preemptable, <0 => bug */ 54 int preempt_count; /* 0 => preemptable, <0 => bug */
55 int cpu; /* cpu */ 55 int cpu; /* cpu */
56}; 56};
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index c37c064d7cdd..064cef9ae2d1 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -18,19 +18,21 @@
18#ifndef __ASM_UACCESS_H 18#ifndef __ASM_UACCESS_H
19#define __ASM_UACCESS_H 19#define __ASM_UACCESS_H
20 20
21#include <asm/alternative.h>
22#include <asm/kernel-pgtable.h>
23#include <asm/sysreg.h>
24
21#ifndef __ASSEMBLY__ 25#ifndef __ASSEMBLY__
22 26
23/* 27/*
24 * User space memory access functions 28 * User space memory access functions
25 */ 29 */
30#include <linux/bitops.h>
26#include <linux/string.h> 31#include <linux/string.h>
27#include <linux/thread_info.h> 32#include <linux/thread_info.h>
28 33
29#include <asm/alternative.h>
30#include <asm/cpufeature.h> 34#include <asm/cpufeature.h>
31#include <asm/kernel-pgtable.h>
32#include <asm/ptrace.h> 35#include <asm/ptrace.h>
33#include <asm/sysreg.h>
34#include <asm/errno.h> 36#include <asm/errno.h>
35#include <asm/memory.h> 37#include <asm/memory.h>
36#include <asm/compiler.h> 38#include <asm/compiler.h>
@@ -107,16 +109,24 @@ static inline void set_fs(mm_segment_t fs)
107 */ 109 */
108#define __range_ok(addr, size) \ 110#define __range_ok(addr, size) \
109({ \ 111({ \
112 unsigned long __addr = (unsigned long __force)(addr); \
110 unsigned long flag, roksum; \ 113 unsigned long flag, roksum; \
111 __chk_user_ptr(addr); \ 114 __chk_user_ptr(addr); \
112 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ 115 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
113 : "=&r" (flag), "=&r" (roksum) \ 116 : "=&r" (flag), "=&r" (roksum) \
114 : "1" (addr), "Ir" (size), \ 117 : "1" (__addr), "Ir" (size), \
115 "r" (current_thread_info()->addr_limit) \ 118 "r" (current_thread_info()->addr_limit) \
116 : "cc"); \ 119 : "cc"); \
117 flag; \ 120 flag; \
118}) 121})
119 122
123/*
124 * When dealing with data aborts, watchpoints, or instruction traps we may end
125 * up with a tagged userland pointer. Clear the tag to get a sane pointer to
126 * pass on to access_ok(), for instance.
127 */
128#define untagged_addr(addr) sign_extend64(addr, 55)
129
120#define access_ok(type, addr, size) __range_ok(addr, size) 130#define access_ok(type, addr, size) __range_ok(addr, size)
121#define user_addr_max get_fs 131#define user_addr_max get_fs
122 132
@@ -130,7 +140,7 @@ static inline void set_fs(mm_segment_t fs)
130 * User access enabling/disabling. 140 * User access enabling/disabling.
131 */ 141 */
132#ifdef CONFIG_ARM64_SW_TTBR0_PAN 142#ifdef CONFIG_ARM64_SW_TTBR0_PAN
133static inline void uaccess_ttbr0_disable(void) 143static inline void __uaccess_ttbr0_disable(void)
134{ 144{
135 unsigned long ttbr; 145 unsigned long ttbr;
136 146
@@ -140,7 +150,7 @@ static inline void uaccess_ttbr0_disable(void)
140 isb(); 150 isb();
141} 151}
142 152
143static inline void uaccess_ttbr0_enable(void) 153static inline void __uaccess_ttbr0_enable(void)
144{ 154{
145 unsigned long flags; 155 unsigned long flags;
146 156
@@ -154,30 +164,44 @@ static inline void uaccess_ttbr0_enable(void)
154 isb(); 164 isb();
155 local_irq_restore(flags); 165 local_irq_restore(flags);
156} 166}
167
168static inline bool uaccess_ttbr0_disable(void)
169{
170 if (!system_uses_ttbr0_pan())
171 return false;
172 __uaccess_ttbr0_disable();
173 return true;
174}
175
176static inline bool uaccess_ttbr0_enable(void)
177{
178 if (!system_uses_ttbr0_pan())
179 return false;
180 __uaccess_ttbr0_enable();
181 return true;
182}
157#else 183#else
158static inline void uaccess_ttbr0_disable(void) 184static inline bool uaccess_ttbr0_disable(void)
159{ 185{
186 return false;
160} 187}
161 188
162static inline void uaccess_ttbr0_enable(void) 189static inline bool uaccess_ttbr0_enable(void)
163{ 190{
191 return false;
164} 192}
165#endif 193#endif
166 194
167#define __uaccess_disable(alt) \ 195#define __uaccess_disable(alt) \
168do { \ 196do { \
169 if (system_uses_ttbr0_pan()) \ 197 if (!uaccess_ttbr0_disable()) \
170 uaccess_ttbr0_disable(); \
171 else \
172 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ 198 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
173 CONFIG_ARM64_PAN)); \ 199 CONFIG_ARM64_PAN)); \
174} while (0) 200} while (0)
175 201
176#define __uaccess_enable(alt) \ 202#define __uaccess_enable(alt) \
177do { \ 203do { \
178 if (system_uses_ttbr0_pan()) \ 204 if (!uaccess_ttbr0_enable()) \
179 uaccess_ttbr0_enable(); \
180 else \
181 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ 205 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
182 CONFIG_ARM64_PAN)); \ 206 CONFIG_ARM64_PAN)); \
183} while (0) 207} while (0)
@@ -407,69 +431,62 @@ extern __must_check long strnlen_user(const char __user *str, long n);
407 431
408#else /* __ASSEMBLY__ */ 432#else /* __ASSEMBLY__ */
409 433
410#include <asm/alternative.h>
411#include <asm/assembler.h> 434#include <asm/assembler.h>
412#include <asm/kernel-pgtable.h>
413 435
414/* 436/*
415 * User access enabling/disabling macros. 437 * User access enabling/disabling macros.
416 */ 438 */
417 .macro uaccess_ttbr0_disable, tmp1 439#ifdef CONFIG_ARM64_SW_TTBR0_PAN
440 .macro __uaccess_ttbr0_disable, tmp1
418 mrs \tmp1, ttbr1_el1 // swapper_pg_dir 441 mrs \tmp1, ttbr1_el1 // swapper_pg_dir
419 add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir 442 add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
420 msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 443 msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
421 isb 444 isb
422 .endm 445 .endm
423 446
424 .macro uaccess_ttbr0_enable, tmp1 447 .macro __uaccess_ttbr0_enable, tmp1
425 get_thread_info \tmp1 448 get_thread_info \tmp1
426 ldr \tmp1, [\tmp1, #TI_TTBR0] // load saved TTBR0_EL1 449 ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
427 msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 450 msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
428 isb 451 isb
429 .endm 452 .endm
430 453
454 .macro uaccess_ttbr0_disable, tmp1
455alternative_if_not ARM64_HAS_PAN
456 __uaccess_ttbr0_disable \tmp1
457alternative_else_nop_endif
458 .endm
459
460 .macro uaccess_ttbr0_enable, tmp1, tmp2
461alternative_if_not ARM64_HAS_PAN
462 save_and_disable_irq \tmp2 // avoid preemption
463 __uaccess_ttbr0_enable \tmp1
464 restore_irq \tmp2
465alternative_else_nop_endif
466 .endm
467#else
468 .macro uaccess_ttbr0_disable, tmp1
469 .endm
470
471 .macro uaccess_ttbr0_enable, tmp1, tmp2
472 .endm
473#endif
474
431/* 475/*
432 * These macros are no-ops when UAO is present. 476 * These macros are no-ops when UAO is present.
433 */ 477 */
434 .macro uaccess_disable_not_uao, tmp1 478 .macro uaccess_disable_not_uao, tmp1
435#ifdef CONFIG_ARM64_SW_TTBR0_PAN
436alternative_if_not ARM64_HAS_PAN
437 uaccess_ttbr0_disable \tmp1 479 uaccess_ttbr0_disable \tmp1
438alternative_else 480alternative_if ARM64_ALT_PAN_NOT_UAO
439 nop
440 nop
441 nop
442 nop
443alternative_endif
444#endif
445alternative_if_not ARM64_ALT_PAN_NOT_UAO
446 nop
447alternative_else
448 SET_PSTATE_PAN(1) 481 SET_PSTATE_PAN(1)
449alternative_endif 482alternative_else_nop_endif
450 .endm 483 .endm
451 484
452 .macro uaccess_enable_not_uao, tmp1, tmp2 485 .macro uaccess_enable_not_uao, tmp1, tmp2
453#ifdef CONFIG_ARM64_SW_TTBR0_PAN 486 uaccess_ttbr0_enable \tmp1, \tmp2
454alternative_if_not ARM64_HAS_PAN 487alternative_if ARM64_ALT_PAN_NOT_UAO
455 save_and_disable_irq \tmp2 // avoid preemption
456 uaccess_ttbr0_enable \tmp1
457 restore_irq \tmp2
458alternative_else
459 nop
460 nop
461 nop
462 nop
463 nop
464 nop
465 nop
466alternative_endif
467#endif
468alternative_if_not ARM64_ALT_PAN_NOT_UAO
469 nop
470alternative_else
471 SET_PSTATE_PAN(0) 488 SET_PSTATE_PAN(0)
472alternative_endif 489alternative_else_nop_endif
473 .endm 490 .endm
474 491
475#endif /* __ASSEMBLY__ */ 492#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
index de66199673d7..2b9a63771eda 100644
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -22,6 +22,8 @@
22 22
23struct vdso_data { 23struct vdso_data {
24 __u64 cs_cycle_last; /* Timebase at clocksource init */ 24 __u64 cs_cycle_last; /* Timebase at clocksource init */
25 __u64 raw_time_sec; /* Raw time */
26 __u64 raw_time_nsec;
25 __u64 xtime_clock_sec; /* Kernel time */ 27 __u64 xtime_clock_sec; /* Kernel time */
26 __u64 xtime_clock_nsec; 28 __u64 xtime_clock_nsec;
27 __u64 xtime_coarse_sec; /* Coarse time */ 29 __u64 xtime_coarse_sec; /* Coarse time */
@@ -29,8 +31,10 @@ struct vdso_data {
29 __u64 wtm_clock_sec; /* Wall to monotonic time */ 31 __u64 wtm_clock_sec; /* Wall to monotonic time */
30 __u64 wtm_clock_nsec; 32 __u64 wtm_clock_nsec;
31 __u32 tb_seq_count; /* Timebase sequence counter */ 33 __u32 tb_seq_count; /* Timebase sequence counter */
32 __u32 cs_mult; /* Clocksource multiplier */ 34 /* cs_* members must be adjacent and in this order (ldp accesses) */
33 __u32 cs_shift; /* Clocksource shift */ 35 __u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */
36 __u32 cs_shift; /* Clocksource shift (mono = raw) */
37 __u32 cs_raw_mult; /* Raw clocksource multiplier */
34 __u32 tz_minuteswest; /* Whacky timezone stuff */ 38 __u32 tz_minuteswest; /* Whacky timezone stuff */
35 __u32 tz_dsttime; 39 __u32 tz_dsttime;
36 __u32 use_syscall; 40 __u32 use_syscall;
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 06e6a5238c4c..e6c27b8ce311 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -34,6 +34,11 @@
34 */ 34 */
35#define HVC_SET_VECTORS 1 35#define HVC_SET_VECTORS 1
36 36
37/*
38 * HVC_SOFT_RESTART - CPU soft reset, used by the cpu_soft_restart routine.
39 */
40#define HVC_SOFT_RESTART 2
41
37#define BOOT_CPU_MODE_EL1 (0xe11) 42#define BOOT_CPU_MODE_EL1 (0xe11)
38#define BOOT_CPU_MODE_EL2 (0xe12) 43#define BOOT_CPU_MODE_EL2 (0xe12)
39 44
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 20bcc2db06bf..01c0b3881f88 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -44,13 +44,11 @@ arm64-obj-$(CONFIG_ACPI) += acpi.o
44arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 44arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
45arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o 45arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
46arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o 46arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
47arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o 47arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
48 cpu-reset.o
49arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
48 50
49obj-y += $(arm64-obj-y) vdso/ probes/ 51obj-y += $(arm64-obj-y) vdso/ probes/
50obj-m += $(arm64-obj-m) 52obj-m += $(arm64-obj-m)
51head-y := head.o 53head-y := head.o
52extra-y += $(head-y) vmlinux.lds 54extra-y += $(head-y) vmlinux.lds
53
54# vDSO - this must be built first to generate the symbol offsets
55$(call objectify,$(arm64-obj-y)): $(obj)/vdso/vdso-offsets.h
56$(obj)/vdso/vdso-offsets.h: $(obj)/vdso
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index a0a0f2b20608..57e7d6e44c8b 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -14,7 +14,6 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/sysctl.h> 15#include <linux/sysctl.h>
16 16
17#include <asm/alternative.h>
18#include <asm/cpufeature.h> 17#include <asm/cpufeature.h>
19#include <asm/insn.h> 18#include <asm/insn.h>
20#include <asm/opcodes.h> 19#include <asm/opcodes.h>
@@ -300,8 +299,9 @@ do { \
300 _ASM_EXTABLE(0b, 4b) \ 299 _ASM_EXTABLE(0b, 4b) \
301 _ASM_EXTABLE(1b, 4b) \ 300 _ASM_EXTABLE(1b, 4b) \
302 : "=&r" (res), "+r" (data), "=&r" (temp) \ 301 : "=&r" (res), "+r" (data), "=&r" (temp) \
303 : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ 302 : "r" ((unsigned long)addr), "i" (-EAGAIN), \
304 : "memory"); \ 303 "i" (-EFAULT) \
304 : "memory") \
305 uaccess_disable(); \ 305 uaccess_disable(); \
306} while (0) 306} while (0)
307 307
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index dac70c160289..7a3f34b15fb0 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -38,11 +38,11 @@ int main(void)
38 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 38 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
39 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 39 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
40 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); 40 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
41#ifdef CONFIG_ARM64_SW_TTBR0_PAN
42 DEFINE(TI_TTBR0, offsetof(struct thread_info, ttbr0));
43#endif
44 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 41 DEFINE(TI_TASK, offsetof(struct thread_info, task));
45 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 42 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
43#ifdef CONFIG_ARM64_SW_TTBR0_PAN
44 DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0));
45#endif
46 BLANK(); 46 BLANK();
47 DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); 47 DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
48 BLANK(); 48 BLANK();
@@ -92,6 +92,7 @@ int main(void)
92 BLANK(); 92 BLANK();
93 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); 93 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
94 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); 94 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
95 DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
95 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 96 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
96 DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); 97 DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
97 DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE); 98 DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
@@ -99,6 +100,8 @@ int main(void)
99 DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); 100 DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
100 BLANK(); 101 BLANK();
101 DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last)); 102 DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last));
103 DEFINE(VDSO_RAW_TIME_SEC, offsetof(struct vdso_data, raw_time_sec));
104 DEFINE(VDSO_RAW_TIME_NSEC, offsetof(struct vdso_data, raw_time_nsec));
102 DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec)); 105 DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec));
103 DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); 106 DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
104 DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec)); 107 DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
@@ -106,7 +109,8 @@ int main(void)
106 DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec)); 109 DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec));
107 DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec)); 110 DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec));
108 DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count)); 111 DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count));
109 DEFINE(VDSO_CS_MULT, offsetof(struct vdso_data, cs_mult)); 112 DEFINE(VDSO_CS_MONO_MULT, offsetof(struct vdso_data, cs_mono_mult));
113 DEFINE(VDSO_CS_RAW_MULT, offsetof(struct vdso_data, cs_raw_mult));
110 DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift)); 114 DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift));
111 DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest)); 115 DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest));
112 DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); 116 DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
@@ -120,6 +124,8 @@ int main(void)
120 DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); 124 DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
121 DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); 125 DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
122 BLANK(); 126 BLANK();
127 DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
128 BLANK();
123#ifdef CONFIG_KVM_ARM_HOST 129#ifdef CONFIG_KVM_ARM_HOST
124 DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); 130 DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
125 DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); 131 DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
new file mode 100644
index 000000000000..65f42d257414
--- /dev/null
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -0,0 +1,54 @@
1/*
2 * CPU reset routines
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 * Copyright (C) 2015 Huawei Futurewei Technologies.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/linkage.h>
14#include <asm/assembler.h>
15#include <asm/sysreg.h>
16#include <asm/virt.h>
17
18.text
19.pushsection .idmap.text, "ax"
20
21/*
22 * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
23 * cpu_soft_restart.
24 *
25 * @el2_switch: Flag to indicate a swich to EL2 is needed.
26 * @entry: Location to jump to for soft reset.
27 * arg0: First argument passed to @entry.
28 * arg1: Second argument passed to @entry.
29 * arg2: Third argument passed to @entry.
30 *
31 * Put the CPU into the same state as it would be if it had been reset, and
32 * branch to what would be the reset vector. It must be executed with the
33 * flat identity mapping.
34 */
35ENTRY(__cpu_soft_restart)
36 /* Clear sctlr_el1 flags. */
37 mrs x12, sctlr_el1
38 ldr x13, =SCTLR_ELx_FLAGS
39 bic x12, x12, x13
40 msr sctlr_el1, x12
41 isb
42
43 cbz x0, 1f // el2_switch?
44 mov x0, #HVC_SOFT_RESTART
45 hvc #0 // no return
46
471: mov x18, x1 // entry
48 mov x0, x2 // arg0
49 mov x1, x3 // arg1
50 mov x2, x4 // arg2
51 br x18
52ENDPROC(__cpu_soft_restart)
53
54.popsection
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
new file mode 100644
index 000000000000..d4e9ecb264f0
--- /dev/null
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -0,0 +1,34 @@
1/*
2 * CPU reset routines
3 *
4 * Copyright (C) 2015 Huawei Futurewei Technologies.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef _ARM64_CPU_RESET_H
12#define _ARM64_CPU_RESET_H
13
14#include <asm/virt.h>
15
16void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
17 unsigned long arg0, unsigned long arg1, unsigned long arg2);
18
19static inline void __noreturn cpu_soft_restart(unsigned long el2_switch,
20 unsigned long entry, unsigned long arg0, unsigned long arg1,
21 unsigned long arg2)
22{
23 typeof(__cpu_soft_restart) *restart;
24
25 el2_switch = el2_switch && !is_kernel_in_hyp_mode() &&
26 is_hyp_mode_available();
27 restart = (void *)virt_to_phys(__cpu_soft_restart);
28
29 cpu_install_idmap();
30 restart(el2_switch, entry, arg0, arg1, arg2);
31 unreachable();
32}
33
34#endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index cdf1dca64133..53fab76d3c39 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -895,28 +895,6 @@ static u64 __raw_read_system_reg(u32 sys_id)
895} 895}
896 896
897/* 897/*
898 * Park the CPU which doesn't have the capability as advertised
899 * by the system.
900 */
901static void fail_incapable_cpu(char *cap_type,
902 const struct arm64_cpu_capabilities *cap)
903{
904 int cpu = smp_processor_id();
905
906 pr_crit("CPU%d: missing %s : %s\n", cpu, cap_type, cap->desc);
907 /* Mark this CPU absent */
908 set_cpu_present(cpu, 0);
909
910 /* Check if we can park ourselves */
911 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
912 cpu_ops[cpu]->cpu_die(cpu);
913 asm(
914 "1: wfe\n"
915 " wfi\n"
916 " b 1b");
917}
918
919/*
920 * Run through the enabled system capabilities and enable() it on this CPU. 898 * Run through the enabled system capabilities and enable() it on this CPU.
921 * The capabilities were decided based on the available CPUs at the boot time. 899 * The capabilities were decided based on the available CPUs at the boot time.
922 * Any new CPU should match the system wide status of the capability. If the 900 * Any new CPU should match the system wide status of the capability. If the
@@ -944,8 +922,11 @@ void verify_local_cpu_capabilities(void)
944 * If the new CPU misses an advertised feature, we cannot proceed 922 * If the new CPU misses an advertised feature, we cannot proceed
945 * further, park the cpu. 923 * further, park the cpu.
946 */ 924 */
947 if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) 925 if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) {
948 fail_incapable_cpu("arm64_features", &caps[i]); 926 pr_crit("CPU%d: missing feature: %s\n",
927 smp_processor_id(), caps[i].desc);
928 cpu_die_early();
929 }
949 if (caps[i].enable) 930 if (caps[i].enable)
950 caps[i].enable(NULL); 931 caps[i].enable(NULL);
951 } 932 }
@@ -953,8 +934,11 @@ void verify_local_cpu_capabilities(void)
953 for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) { 934 for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) {
954 if (!cpus_have_hwcap(&caps[i])) 935 if (!cpus_have_hwcap(&caps[i]))
955 continue; 936 continue;
956 if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) 937 if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) {
957 fail_incapable_cpu("arm64_hwcaps", &caps[i]); 938 pr_crit("CPU%d: missing HWCAP: %s\n",
939 smp_processor_id(), caps[i].desc);
940 cpu_die_early();
941 }
958 } 942 }
959} 943}
960 944
diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
new file mode 100644
index 000000000000..f46d57c31443
--- /dev/null
+++ b/arch/arm64/kernel/crash_dump.c
@@ -0,0 +1,71 @@
1/*
2 * Routines for doing kexec-based kdump
3 *
4 * Copyright (C) 2017 Linaro Limited
5 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/crash_dump.h>
13#include <linux/errno.h>
14#include <linux/io.h>
15#include <linux/memblock.h>
16#include <linux/uaccess.h>
17#include <asm/memory.h>
18
19/**
20 * copy_oldmem_page() - copy one page from old kernel memory
21 * @pfn: page frame number to be copied
22 * @buf: buffer where the copied page is placed
23 * @csize: number of bytes to copy
24 * @offset: offset in bytes into the page
25 * @userbuf: if set, @buf is in a user address space
26 *
27 * This function copies one page from old kernel memory into buffer pointed by
28 * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
29 * copied or negative error in case of failure.
30 */
31ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
32 size_t csize, unsigned long offset,
33 int userbuf)
34{
35 void *vaddr;
36
37 if (!csize)
38 return 0;
39
40 vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
41 if (!vaddr)
42 return -ENOMEM;
43
44 if (userbuf) {
45 if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
46 memunmap(vaddr);
47 return -EFAULT;
48 }
49 } else {
50 memcpy(buf, vaddr + offset, csize);
51 }
52
53 memunmap(vaddr);
54
55 return csize;
56}
57
58/**
59 * elfcorehdr_read - read from ELF core header
60 * @buf: buffer where the data is placed
61 * @csize: number of bytes to read
62 * @ppos: address in the memory
63 *
64 * This function reads @count bytes from elf core header which exists
65 * on crash dump kernel's memory.
66 */
67ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
68{
69 memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
70 return count;
71}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index db8a09331f2b..5472cedfe750 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -32,6 +32,7 @@
32#include <asm/ptrace.h> 32#include <asm/ptrace.h>
33#include <asm/thread_info.h> 33#include <asm/thread_info.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35#include <asm/asm-uaccess.h>
35#include <asm/unistd.h> 36#include <asm/unistd.h>
36 37
37/* 38/*
@@ -120,11 +121,9 @@
120 * feature as all TTBR0_EL1 accesses are disabled, not just those to 121 * feature as all TTBR0_EL1 accesses are disabled, not just those to
121 * user mappings. 122 * user mappings.
122 */ 123 */
123alternative_if_not ARM64_HAS_PAN 124alternative_if ARM64_HAS_PAN
124 nop
125alternative_else
126 b 1f // skip TTBR0 PAN 125 b 1f // skip TTBR0 PAN
127alternative_endif 126alternative_else_nop_endif
128 127
129 .if \el != 0 128 .if \el != 0
130 mrs x21, ttbr0_el1 129 mrs x21, ttbr0_el1
@@ -134,7 +133,7 @@ alternative_endif
134 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR 133 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
135 .endif 134 .endif
136 135
137 uaccess_ttbr0_disable x21 136 __uaccess_ttbr0_disable x21
1381: 1371:
139#endif 138#endif
140 139
@@ -181,17 +180,15 @@ alternative_endif
181 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR 180 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
182 * PAN bit checking. 181 * PAN bit checking.
183 */ 182 */
184alternative_if_not ARM64_HAS_PAN 183alternative_if ARM64_HAS_PAN
185 nop
186alternative_else
187 b 2f // skip TTBR0 PAN 184 b 2f // skip TTBR0 PAN
188alternative_endif 185alternative_else_nop_endif
189 186
190 .if \el != 0 187 .if \el != 0
191 tbnz x22, #_PSR_PAN_BIT, 1f // Skip re-enabling TTBR0 access if previously disabled 188 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
192 .endif 189 .endif
193 190
194 uaccess_ttbr0_enable x0 191 __uaccess_ttbr0_enable x0
195 192
196 .if \el == 0 193 .if \el == 0
197 /* 194 /*
@@ -435,12 +432,13 @@ el1_da:
435 /* 432 /*
436 * Data abort handling 433 * Data abort handling
437 */ 434 */
438 mrs x0, far_el1 435 mrs x3, far_el1
439 enable_dbg 436 enable_dbg
440 // re-enable interrupts if they were enabled in the aborted context 437 // re-enable interrupts if they were enabled in the aborted context
441 tbnz x23, #7, 1f // PSR_I_BIT 438 tbnz x23, #7, 1f // PSR_I_BIT
442 enable_irq 439 enable_irq
4431: 4401:
441 clear_address_tag x0, x3
444 mov x2, sp // struct pt_regs 442 mov x2, sp // struct pt_regs
445 bl do_mem_abort 443 bl do_mem_abort
446 444
@@ -602,7 +600,7 @@ el0_da:
602 // enable interrupts before calling the main handler 600 // enable interrupts before calling the main handler
603 enable_dbg_and_irq 601 enable_dbg_and_irq
604 ct_user_exit 602 ct_user_exit
605 bic x0, x26, #(0xff << 56) 603 clear_address_tag x0, x26
606 mov x1, x25 604 mov x1, x25
607 mov x2, sp 605 mov x2, sp
608 bl do_mem_abort 606 bl do_mem_abort
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 8cfd5ab37743..9c2e1564e9a7 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -36,6 +36,7 @@
36#include <asm/pgtable-hwdef.h> 36#include <asm/pgtable-hwdef.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/page.h> 38#include <asm/page.h>
39#include <asm/smp.h>
39#include <asm/sysreg.h> 40#include <asm/sysreg.h>
40#include <asm/thread_info.h> 41#include <asm/thread_info.h>
41#include <asm/virt.h> 42#include <asm/virt.h>
@@ -643,7 +644,8 @@ __secondary_switched:
643 msr vbar_el1, x5 644 msr vbar_el1, x5
644 isb 645 isb
645 646
646 ldr_l x0, secondary_data // get secondary_data.stack 647 adr_l x0, secondary_data
648 ldr x0, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
647 mov sp, x0 649 mov sp, x0
648 and x0, x0, #~(THREAD_SIZE - 1) 650 and x0, x0, #~(THREAD_SIZE - 1)
649 msr sp_el0, x0 // save thread_info 651 msr sp_el0, x0 // save thread_info
@@ -652,6 +654,29 @@ __secondary_switched:
652ENDPROC(__secondary_switched) 654ENDPROC(__secondary_switched)
653 655
654/* 656/*
657 * The booting CPU updates the failed status @__early_cpu_boot_status,
658 * with MMU turned off.
659 *
660 * update_early_cpu_boot_status tmp, status
661 * - Corrupts tmp1, tmp2
662 * - Writes 'status' to __early_cpu_boot_status and makes sure
663 * it is committed to memory.
664 */
665
666 .macro update_early_cpu_boot_status status, tmp1, tmp2
667 mov \tmp2, #\status
668 str_l \tmp2, __early_cpu_boot_status, \tmp1
669 dmb sy
670 dc ivac, \tmp1 // Invalidate potentially stale cache line
671 .endm
672
673 .pushsection .data..cacheline_aligned
674 .align L1_CACHE_SHIFT
675ENTRY(__early_cpu_boot_status)
676 .long 0
677 .popsection
678
679/*
655 * Enable the MMU. 680 * Enable the MMU.
656 * 681 *
657 * x0 = SCTLR_EL1 value for turning on the MMU. 682 * x0 = SCTLR_EL1 value for turning on the MMU.
@@ -669,6 +694,7 @@ ENTRY(__enable_mmu)
669 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 694 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
670 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED 695 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
671 b.ne __no_granule_support 696 b.ne __no_granule_support
697 update_early_cpu_boot_status 0, x1, x2
672 msr ttbr0_el1, x25 // load TTBR0 698 msr ttbr0_el1, x25 // load TTBR0
673 msr ttbr1_el1, x26 // load TTBR1 699 msr ttbr1_el1, x26 // load TTBR1
674 isb 700 isb
@@ -708,8 +734,12 @@ ENTRY(__enable_mmu)
708ENDPROC(__enable_mmu) 734ENDPROC(__enable_mmu)
709 735
710__no_granule_support: 736__no_granule_support:
737 /* Indicate that this CPU can't boot and is stuck in the kernel */
738 update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
7391:
711 wfe 740 wfe
712 b __no_granule_support 741 wfi
742 b 1b
713ENDPROC(__no_granule_support) 743ENDPROC(__no_granule_support)
714 744
715__primary_switch: 745__primary_switch:
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index f8df75d740f4..35a33d705536 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -27,6 +27,7 @@
27#include <asm/barrier.h> 27#include <asm/barrier.h>
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/irqflags.h> 29#include <asm/irqflags.h>
30#include <asm/kexec.h>
30#include <asm/memory.h> 31#include <asm/memory.h>
31#include <asm/mmu_context.h> 32#include <asm/mmu_context.h>
32#include <asm/pgalloc.h> 33#include <asm/pgalloc.h>
@@ -34,6 +35,7 @@
34#include <asm/pgtable-hwdef.h> 35#include <asm/pgtable-hwdef.h>
35#include <asm/sections.h> 36#include <asm/sections.h>
36#include <asm/suspend.h> 37#include <asm/suspend.h>
38#include <asm/sysreg.h>
37#include <asm/virt.h> 39#include <asm/virt.h>
38 40
39/* 41/*
@@ -99,7 +101,8 @@ int pfn_is_nosave(unsigned long pfn)
99 unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin); 101 unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
100 unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1); 102 unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
101 103
102 return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn); 104 return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
105 crash_is_nosave(pfn);
103} 106}
104 107
105void notrace save_processor_state(void) 108void notrace save_processor_state(void)
@@ -216,12 +219,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
216 set_pte(pte, __pte(virt_to_phys((void *)dst) | 219 set_pte(pte, __pte(virt_to_phys((void *)dst) |
217 pgprot_val(PAGE_KERNEL_EXEC))); 220 pgprot_val(PAGE_KERNEL_EXEC)));
218 221
219 /* Load our new page tables */ 222 /*
220 asm volatile("msr ttbr0_el1, %0;" 223 * Load our new page tables. A strict BBM approach requires that we
221 "isb;" 224 * ensure that TLBs are free of any entries that may overlap with the
222 "tlbi vmalle1is;" 225 * global mappings we are about to install.
223 "dsb ish;" 226 *
224 "isb" : : "r"(virt_to_phys(pgd))); 227 * For a real hibernate/resume cycle TTBR0 currently points to a zero
228 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
229 * runtime services), while for a userspace-driven test_resume cycle it
230 * points to userspace page tables (and we must point it at a zero page
231 * ourselves). Elsewhere we only (un)install the idmap with preemption
232 * disabled, so T0SZ should be as required regardless.
233 */
234 cpu_set_reserved_ttbr0();
235 local_flush_tlb_all();
236 write_sysreg(virt_to_phys(pgd), ttbr0_el1);
237 isb();
225 238
226 *phys_dst_addr = virt_to_phys((void *)dst); 239 *phys_dst_addr = virt_to_phys((void *)dst);
227 240
@@ -239,11 +252,17 @@ int swsusp_arch_suspend(void)
239 local_dbg_save(flags); 252 local_dbg_save(flags);
240 253
241 if (__cpu_suspend_enter(&state)) { 254 if (__cpu_suspend_enter(&state)) {
255 /* make the crash dump kernel image visible/saveable */
256 crash_prepare_suspend();
257
242 ret = swsusp_save(); 258 ret = swsusp_save();
243 } else { 259 } else {
244 /* Clean kernel to PoC for secondary core startup */ 260 /* Clean kernel to PoC for secondary core startup */
245 __flush_dcache_area(LMADDR(KERNEL_START), KERNEL_END - KERNEL_START); 261 __flush_dcache_area(LMADDR(KERNEL_START), KERNEL_END - KERNEL_START);
246 262
263 /* make the crash dump kernel image protected again */
264 crash_post_resume();
265
247 /* 266 /*
248 * Tell the hibernation core that we've just restored 267 * Tell the hibernation core that we've just restored
249 * the memory 268 * the memory
@@ -388,6 +407,38 @@ int swsusp_arch_resume(void)
388 void *, phys_addr_t, phys_addr_t); 407 void *, phys_addr_t, phys_addr_t);
389 408
390 /* 409 /*
410 * Restoring the memory image will overwrite the ttbr1 page tables.
411 * Create a second copy of just the linear map, and use this when
412 * restoring.
413 */
414 tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
415 if (!tmp_pg_dir) {
416 pr_err("Failed to allocate memory for temporary page tables.");
417 rc = -ENOMEM;
418 goto out;
419 }
420 rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
421 if (rc)
422 goto out;
423
424 /*
425 * Since we only copied the linear map, we need to find restore_pblist's
426 * linear map address.
427 */
428 lm_restore_pblist = LMADDR(restore_pblist);
429
430 /*
431 * We need a zero page that is zero before & after resume in order to
432 * to break before make on the ttbr1 page tables.
433 */
434 zero_page = (void *)get_safe_page(GFP_ATOMIC);
435 if (!zero_page) {
436 pr_err("Failed to allocate zero page.");
437 rc = -ENOMEM;
438 goto out;
439 }
440
441 /*
391 * Locate the exit code in the bottom-but-one page, so that *NULL 442 * Locate the exit code in the bottom-but-one page, so that *NULL
392 * still has disastrous affects. 443 * still has disastrous affects.
393 */ 444 */
@@ -413,27 +464,6 @@ int swsusp_arch_resume(void)
413 __flush_dcache_area(hibernate_exit, exit_size); 464 __flush_dcache_area(hibernate_exit, exit_size);
414 465
415 /* 466 /*
416 * Restoring the memory image will overwrite the ttbr1 page tables.
417 * Create a second copy of just the linear map, and use this when
418 * restoring.
419 */
420 tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
421 if (!tmp_pg_dir) {
422 pr_err("Failed to allocate memory for temporary page tables.");
423 rc = -ENOMEM;
424 goto out;
425 }
426 rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
427 if (rc)
428 goto out;
429
430 /*
431 * Since we only copied the linear map, we need to find restore_pblist's
432 * linear map address.
433 */
434 lm_restore_pblist = LMADDR(restore_pblist);
435
436 /*
437 * KASLR will cause the el2 vectors to be in a different location in 467 * KASLR will cause the el2 vectors to be in a different location in
438 * the resumed kernel. Load hibernate's temporary copy into el2. 468 * the resumed kernel. Load hibernate's temporary copy into el2.
439 * 469 *
@@ -447,12 +477,6 @@ int swsusp_arch_resume(void)
447 __hyp_set_vectors(el2_vectors); 477 __hyp_set_vectors(el2_vectors);
448 } 478 }
449 479
450 /*
451 * We need a zero page that is zero before & after resume in order to
452 * to break before make on the ttbr1 page tables.
453 */
454 zero_page = (void *)get_safe_page(GFP_ATOMIC);
455
456 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, 480 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
457 resume_hdr.reenter_kernel, lm_restore_pblist, 481 resume_hdr.reenter_kernel, lm_restore_pblist,
458 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); 482 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 367a954f9937..1c694f3c643c 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -36,6 +36,7 @@
36#include <asm/traps.h> 36#include <asm/traps.h>
37#include <asm/cputype.h> 37#include <asm/cputype.h>
38#include <asm/system_misc.h> 38#include <asm/system_misc.h>
39#include <asm/uaccess.h>
39 40
40/* Breakpoint currently in use for each BRP. */ 41/* Breakpoint currently in use for each BRP. */
41static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); 42static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
@@ -317,9 +318,21 @@ static int get_hbp_len(u8 hbp_len)
317 case ARM_BREAKPOINT_LEN_2: 318 case ARM_BREAKPOINT_LEN_2:
318 len_in_bytes = 2; 319 len_in_bytes = 2;
319 break; 320 break;
321 case ARM_BREAKPOINT_LEN_3:
322 len_in_bytes = 3;
323 break;
320 case ARM_BREAKPOINT_LEN_4: 324 case ARM_BREAKPOINT_LEN_4:
321 len_in_bytes = 4; 325 len_in_bytes = 4;
322 break; 326 break;
327 case ARM_BREAKPOINT_LEN_5:
328 len_in_bytes = 5;
329 break;
330 case ARM_BREAKPOINT_LEN_6:
331 len_in_bytes = 6;
332 break;
333 case ARM_BREAKPOINT_LEN_7:
334 len_in_bytes = 7;
335 break;
323 case ARM_BREAKPOINT_LEN_8: 336 case ARM_BREAKPOINT_LEN_8:
324 len_in_bytes = 8; 337 len_in_bytes = 8;
325 break; 338 break;
@@ -349,7 +362,7 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
349 * to generic breakpoint descriptions. 362 * to generic breakpoint descriptions.
350 */ 363 */
351int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, 364int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
352 int *gen_len, int *gen_type) 365 int *gen_len, int *gen_type, int *offset)
353{ 366{
354 /* Type */ 367 /* Type */
355 switch (ctrl.type) { 368 switch (ctrl.type) {
@@ -369,17 +382,33 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
369 return -EINVAL; 382 return -EINVAL;
370 } 383 }
371 384
385 if (!ctrl.len)
386 return -EINVAL;
387 *offset = __ffs(ctrl.len);
388
372 /* Len */ 389 /* Len */
373 switch (ctrl.len) { 390 switch (ctrl.len >> *offset) {
374 case ARM_BREAKPOINT_LEN_1: 391 case ARM_BREAKPOINT_LEN_1:
375 *gen_len = HW_BREAKPOINT_LEN_1; 392 *gen_len = HW_BREAKPOINT_LEN_1;
376 break; 393 break;
377 case ARM_BREAKPOINT_LEN_2: 394 case ARM_BREAKPOINT_LEN_2:
378 *gen_len = HW_BREAKPOINT_LEN_2; 395 *gen_len = HW_BREAKPOINT_LEN_2;
379 break; 396 break;
397 case ARM_BREAKPOINT_LEN_3:
398 *gen_len = HW_BREAKPOINT_LEN_3;
399 break;
380 case ARM_BREAKPOINT_LEN_4: 400 case ARM_BREAKPOINT_LEN_4:
381 *gen_len = HW_BREAKPOINT_LEN_4; 401 *gen_len = HW_BREAKPOINT_LEN_4;
382 break; 402 break;
403 case ARM_BREAKPOINT_LEN_5:
404 *gen_len = HW_BREAKPOINT_LEN_5;
405 break;
406 case ARM_BREAKPOINT_LEN_6:
407 *gen_len = HW_BREAKPOINT_LEN_6;
408 break;
409 case ARM_BREAKPOINT_LEN_7:
410 *gen_len = HW_BREAKPOINT_LEN_7;
411 break;
383 case ARM_BREAKPOINT_LEN_8: 412 case ARM_BREAKPOINT_LEN_8:
384 *gen_len = HW_BREAKPOINT_LEN_8; 413 *gen_len = HW_BREAKPOINT_LEN_8;
385 break; 414 break;
@@ -423,9 +452,21 @@ static int arch_build_bp_info(struct perf_event *bp)
423 case HW_BREAKPOINT_LEN_2: 452 case HW_BREAKPOINT_LEN_2:
424 info->ctrl.len = ARM_BREAKPOINT_LEN_2; 453 info->ctrl.len = ARM_BREAKPOINT_LEN_2;
425 break; 454 break;
455 case HW_BREAKPOINT_LEN_3:
456 info->ctrl.len = ARM_BREAKPOINT_LEN_3;
457 break;
426 case HW_BREAKPOINT_LEN_4: 458 case HW_BREAKPOINT_LEN_4:
427 info->ctrl.len = ARM_BREAKPOINT_LEN_4; 459 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
428 break; 460 break;
461 case HW_BREAKPOINT_LEN_5:
462 info->ctrl.len = ARM_BREAKPOINT_LEN_5;
463 break;
464 case HW_BREAKPOINT_LEN_6:
465 info->ctrl.len = ARM_BREAKPOINT_LEN_6;
466 break;
467 case HW_BREAKPOINT_LEN_7:
468 info->ctrl.len = ARM_BREAKPOINT_LEN_7;
469 break;
429 case HW_BREAKPOINT_LEN_8: 470 case HW_BREAKPOINT_LEN_8:
430 info->ctrl.len = ARM_BREAKPOINT_LEN_8; 471 info->ctrl.len = ARM_BREAKPOINT_LEN_8;
431 break; 472 break;
@@ -517,18 +558,17 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
517 default: 558 default:
518 return -EINVAL; 559 return -EINVAL;
519 } 560 }
520
521 info->address &= ~alignment_mask;
522 info->ctrl.len <<= offset;
523 } else { 561 } else {
524 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) 562 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
525 alignment_mask = 0x3; 563 alignment_mask = 0x3;
526 else 564 else
527 alignment_mask = 0x7; 565 alignment_mask = 0x7;
528 if (info->address & alignment_mask) 566 offset = info->address & alignment_mask;
529 return -EINVAL;
530 } 567 }
531 568
569 info->address &= ~alignment_mask;
570 info->ctrl.len <<= offset;
571
532 /* 572 /*
533 * Disallow per-task kernel breakpoints since these would 573 * Disallow per-task kernel breakpoints since these would
534 * complicate the stepping code. 574 * complicate the stepping code.
@@ -661,12 +701,47 @@ unlock:
661} 701}
662NOKPROBE_SYMBOL(breakpoint_handler); 702NOKPROBE_SYMBOL(breakpoint_handler);
663 703
704/*
705 * Arm64 hardware does not always report a watchpoint hit address that matches
706 * one of the watchpoints set. It can also report an address "near" the
707 * watchpoint if a single instruction access both watched and unwatched
708 * addresses. There is no straight-forward way, short of disassembling the
709 * offending instruction, to map that address back to the watchpoint. This
710 * function computes the distance of the memory access from the watchpoint as a
711 * heuristic for the likelyhood that a given access triggered the watchpoint.
712 *
713 * See Section D2.10.5 "Determining the memory location that caused a Watchpoint
714 * exception" of ARMv8 Architecture Reference Manual for details.
715 *
716 * The function returns the distance of the address from the bytes watched by
717 * the watchpoint. In case of an exact match, it returns 0.
718 */
719static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
720 struct arch_hw_breakpoint_ctrl *ctrl)
721{
722 u64 wp_low, wp_high;
723 u32 lens, lene;
724
725 lens = __ffs(ctrl->len);
726 lene = __fls(ctrl->len);
727
728 wp_low = val + lens;
729 wp_high = val + lene;
730 if (addr < wp_low)
731 return wp_low - addr;
732 else if (addr > wp_high)
733 return addr - wp_high;
734 else
735 return 0;
736}
737
664static int watchpoint_handler(unsigned long addr, unsigned int esr, 738static int watchpoint_handler(unsigned long addr, unsigned int esr,
665 struct pt_regs *regs) 739 struct pt_regs *regs)
666{ 740{
667 int i, step = 0, *kernel_step, access; 741 int i, step = 0, *kernel_step, access, closest_match = 0;
742 u64 min_dist = -1, dist;
668 u32 ctrl_reg; 743 u32 ctrl_reg;
669 u64 val, alignment_mask; 744 u64 val;
670 struct perf_event *wp, **slots; 745 struct perf_event *wp, **slots;
671 struct debug_info *debug_info; 746 struct debug_info *debug_info;
672 struct arch_hw_breakpoint *info; 747 struct arch_hw_breakpoint *info;
@@ -675,35 +750,15 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
675 slots = this_cpu_ptr(wp_on_reg); 750 slots = this_cpu_ptr(wp_on_reg);
676 debug_info = &current->thread.debug; 751 debug_info = &current->thread.debug;
677 752
753 /*
754 * Find all watchpoints that match the reported address. If no exact
755 * match is found. Attribute the hit to the closest watchpoint.
756 */
757 rcu_read_lock();
678 for (i = 0; i < core_num_wrps; ++i) { 758 for (i = 0; i < core_num_wrps; ++i) {
679 rcu_read_lock();
680
681 wp = slots[i]; 759 wp = slots[i];
682
683 if (wp == NULL) 760 if (wp == NULL)
684 goto unlock; 761 continue;
685
686 info = counter_arch_bp(wp);
687 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
688 if (is_compat_task()) {
689 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
690 alignment_mask = 0x7;
691 else
692 alignment_mask = 0x3;
693 } else {
694 alignment_mask = 0x7;
695 }
696
697 /* Check if the watchpoint value matches. */
698 val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
699 if (val != (addr & ~alignment_mask))
700 goto unlock;
701
702 /* Possible match, check the byte address select to confirm. */
703 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
704 decode_ctrl_reg(ctrl_reg, &ctrl);
705 if (!((1 << (addr & alignment_mask)) & ctrl.len))
706 goto unlock;
707 762
708 /* 763 /*
709 * Check that the access type matches. 764 * Check that the access type matches.
@@ -712,18 +767,41 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
712 access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W : 767 access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
713 HW_BREAKPOINT_R; 768 HW_BREAKPOINT_R;
714 if (!(access & hw_breakpoint_type(wp))) 769 if (!(access & hw_breakpoint_type(wp)))
715 goto unlock; 770 continue;
716 771
772 /* Check if the watchpoint value and byte select match. */
773 val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
774 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
775 decode_ctrl_reg(ctrl_reg, &ctrl);
776 dist = get_distance_from_watchpoint(addr, val, &ctrl);
777 if (dist < min_dist) {
778 min_dist = dist;
779 closest_match = i;
780 }
781 /* Is this an exact match? */
782 if (dist != 0)
783 continue;
784
785 info = counter_arch_bp(wp);
717 info->trigger = addr; 786 info->trigger = addr;
718 perf_bp_event(wp, regs); 787 perf_bp_event(wp, regs);
719 788
720 /* Do we need to handle the stepping? */ 789 /* Do we need to handle the stepping? */
721 if (!wp->overflow_handler) 790 if (!wp->overflow_handler)
722 step = 1; 791 step = 1;
792 }
793 if (min_dist > 0 && min_dist != -1) {
794 /* No exact match found. */
795 wp = slots[closest_match];
796 info = counter_arch_bp(wp);
797 info->trigger = addr;
798 perf_bp_event(wp, regs);
723 799
724unlock: 800 /* Do we need to handle the stepping? */
725 rcu_read_unlock(); 801 if (!wp->overflow_handler)
802 step = 1;
726 } 803 }
804 rcu_read_unlock();
727 805
728 if (!step) 806 if (!step)
729 return 0; 807 return 0;
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 8727f4490772..d3b5f75e652e 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -71,8 +71,16 @@ el1_sync:
71 msr vbar_el2, x1 71 msr vbar_el2, x1
72 b 9f 72 b 9f
73 73
742: cmp x0, #HVC_SOFT_RESTART
75 b.ne 3f
76 mov x0, x2
77 mov x2, x4
78 mov x4, x1
79 mov x1, x3
80 br x4 // no return
81
74 /* Someone called kvm_call_hyp() against the hyp-stub... */ 82 /* Someone called kvm_call_hyp() against the hyp-stub... */
752: mov x0, #ARM_EXCEPTION_HYP_GONE 833: mov x0, #ARM_EXCEPTION_HYP_GONE
76 84
779: eret 859: eret
78ENDPROC(el1_sync) 86ENDPROC(el1_sync)
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
new file mode 100644
index 000000000000..481f54a866c5
--- /dev/null
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -0,0 +1,364 @@
1/*
2 * kexec for arm64
3 *
4 * Copyright (C) Linaro.
5 * Copyright (C) Huawei Futurewei Technologies.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/interrupt.h>
13#include <linux/irq.h>
14#include <linux/kernel.h>
15#include <linux/kexec.h>
16#include <linux/page-flags.h>
17#include <linux/smp.h>
18
19#include <asm/cacheflush.h>
20#include <asm/cpu_ops.h>
21#include <asm/memory.h>
22#include <asm/mmu.h>
23#include <asm/mmu_context.h>
24#include <asm/page.h>
25
26#include "cpu-reset.h"
27
28/* Global variables for the arm64_relocate_new_kernel routine. */
29extern const unsigned char arm64_relocate_new_kernel[];
30extern const unsigned long arm64_relocate_new_kernel_size;
31
32/**
33 * kexec_image_info - For debugging output.
34 */
35#define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
36static void _kexec_image_info(const char *func, int line,
37 const struct kimage *kimage)
38{
39 unsigned long i;
40
41 pr_debug("%s:%d:\n", func, line);
42 pr_debug(" kexec kimage info:\n");
43 pr_debug(" type: %d\n", kimage->type);
44 pr_debug(" start: %lx\n", kimage->start);
45 pr_debug(" head: %lx\n", kimage->head);
46 pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
47
48 for (i = 0; i < kimage->nr_segments; i++) {
49 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
50 i,
51 kimage->segment[i].mem,
52 kimage->segment[i].mem + kimage->segment[i].memsz,
53 kimage->segment[i].memsz,
54 kimage->segment[i].memsz / PAGE_SIZE);
55 }
56}
57
58void machine_kexec_cleanup(struct kimage *kimage)
59{
60 /* Empty routine needed to avoid build errors. */
61}
62
63/**
64 * machine_kexec_prepare - Prepare for a kexec reboot.
65 *
66 * Called from the core kexec code when a kernel image is loaded.
67 * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus
68 * are stuck in the kernel. This avoids a panic once we hit machine_kexec().
69 */
70int machine_kexec_prepare(struct kimage *kimage)
71{
72 kexec_image_info(kimage);
73
74 if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
75 pr_err("Can't kexec: CPUs are stuck in the kernel.\n");
76 return -EBUSY;
77 }
78
79 return 0;
80}
81
82/**
83 * kexec_list_flush - Helper to flush the kimage list and source pages to PoC.
84 */
85static void kexec_list_flush(struct kimage *kimage)
86{
87 kimage_entry_t *entry;
88
89 for (entry = &kimage->head; ; entry++) {
90 unsigned int flag;
91 void *addr;
92
93 /* flush the list entries. */
94 __flush_dcache_area(entry, sizeof(kimage_entry_t));
95
96 flag = *entry & IND_FLAGS;
97 if (flag == IND_DONE)
98 break;
99
100 addr = phys_to_virt(*entry & PAGE_MASK);
101
102 switch (flag) {
103 case IND_INDIRECTION:
104 /* Set entry point just before the new list page. */
105 entry = (kimage_entry_t *)addr - 1;
106 break;
107 case IND_SOURCE:
108 /* flush the source pages. */
109 __flush_dcache_area(addr, PAGE_SIZE);
110 break;
111 case IND_DESTINATION:
112 break;
113 default:
114 BUG();
115 }
116 }
117}
118
119/**
120 * kexec_segment_flush - Helper to flush the kimage segments to PoC.
121 */
122static void kexec_segment_flush(const struct kimage *kimage)
123{
124 unsigned long i;
125
126 pr_debug("%s:\n", __func__);
127
128 for (i = 0; i < kimage->nr_segments; i++) {
129 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
130 i,
131 kimage->segment[i].mem,
132 kimage->segment[i].mem + kimage->segment[i].memsz,
133 kimage->segment[i].memsz,
134 kimage->segment[i].memsz / PAGE_SIZE);
135
136 __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
137 kimage->segment[i].memsz);
138 }
139}
140
141/**
142 * machine_kexec - Do the kexec reboot.
143 *
144 * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
145 */
146void machine_kexec(struct kimage *kimage)
147{
148 phys_addr_t reboot_code_buffer_phys;
149 void *reboot_code_buffer;
150 bool in_kexec_crash = (kimage == kexec_crash_image);
151 bool stuck_cpus = cpus_are_stuck_in_kernel();
152
153 /*
154 * New cpus may have become stuck_in_kernel after we loaded the image.
155 */
156 BUG_ON(!in_kexec_crash && (stuck_cpus || (num_online_cpus() > 1)));
157 WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()),
158 "Some CPUs may be stale, kdump will be unreliable.\n");
159
160 reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
161 reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
162
163 kexec_image_info(kimage);
164
165 pr_debug("%s:%d: control_code_page: %p\n", __func__, __LINE__,
166 kimage->control_code_page);
167 pr_debug("%s:%d: reboot_code_buffer_phys: %pa\n", __func__, __LINE__,
168 &reboot_code_buffer_phys);
169 pr_debug("%s:%d: reboot_code_buffer: %p\n", __func__, __LINE__,
170 reboot_code_buffer);
171 pr_debug("%s:%d: relocate_new_kernel: %p\n", __func__, __LINE__,
172 arm64_relocate_new_kernel);
173 pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
174 __func__, __LINE__, arm64_relocate_new_kernel_size,
175 arm64_relocate_new_kernel_size);
176
177 /*
178 * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
179 * after the kernel is shut down.
180 */
181 memcpy(reboot_code_buffer, arm64_relocate_new_kernel,
182 arm64_relocate_new_kernel_size);
183
184 /* Flush the reboot_code_buffer in preparation for its execution. */
185 __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
186 flush_icache_range((uintptr_t)reboot_code_buffer,
187 arm64_relocate_new_kernel_size);
188
189 /* Flush the kimage list and its buffers. */
190 kexec_list_flush(kimage);
191
192 /* Flush the new image if already in place. */
193 if ((kimage != kexec_crash_image) && (kimage->head & IND_DONE))
194 kexec_segment_flush(kimage);
195
196 pr_info("Bye!\n");
197
198 /* Disable all DAIF exceptions. */
199 asm volatile ("msr daifset, #0xf" : : : "memory");
200
201 /*
202 * cpu_soft_restart will shutdown the MMU, disable data caches, then
203 * transfer control to the reboot_code_buffer which contains a copy of
204 * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel
205 * uses physical addressing to relocate the new image to its final
206 * position and transfers control to the image entry point when the
207 * relocation is complete.
208 */
209
210 cpu_soft_restart(kimage != kexec_crash_image,
211 reboot_code_buffer_phys, kimage->head, kimage->start, 0);
212
213 BUG(); /* Should never get here. */
214}
215
216static void machine_kexec_mask_interrupts(void)
217{
218 unsigned int i;
219 struct irq_desc *desc;
220
221 for_each_irq_desc(i, desc) {
222 struct irq_chip *chip;
223 int ret;
224
225 chip = irq_desc_get_chip(desc);
226 if (!chip)
227 continue;
228
229 /*
230 * First try to remove the active state. If this
231 * fails, try to EOI the interrupt.
232 */
233 ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
234
235 if (ret && irqd_irq_inprogress(&desc->irq_data) &&
236 chip->irq_eoi)
237 chip->irq_eoi(&desc->irq_data);
238
239 if (chip->irq_mask)
240 chip->irq_mask(&desc->irq_data);
241
242 if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
243 chip->irq_disable(&desc->irq_data);
244 }
245}
246
247/**
248 * machine_crash_shutdown - shutdown non-crashing cpus and save registers
249 */
250void machine_crash_shutdown(struct pt_regs *regs)
251{
252 local_irq_disable();
253
254 /* shutdown non-crashing cpus */
255 smp_send_crash_stop();
256
257 /* for crashing cpu */
258 crash_save_cpu(regs, smp_processor_id());
259 machine_kexec_mask_interrupts();
260
261 pr_info("Starting crashdump kernel...\n");
262}
263
264void arch_kexec_protect_crashkres(void)
265{
266 int i;
267
268 kexec_segment_flush(kexec_crash_image);
269
270 for (i = 0; i < kexec_crash_image->nr_segments; i++)
271 set_memory_valid(
272 __phys_to_virt(kexec_crash_image->segment[i].mem),
273 kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0);
274}
275
276void arch_kexec_unprotect_crashkres(void)
277{
278 int i;
279
280 for (i = 0; i < kexec_crash_image->nr_segments; i++)
281 set_memory_valid(
282 __phys_to_virt(kexec_crash_image->segment[i].mem),
283 kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1);
284}
285
286#ifdef CONFIG_HIBERNATION
287/*
288 * To preserve the crash dump kernel image, the relevant memory segments
289 * should be mapped again around the hibernation.
290 */
291void crash_prepare_suspend(void)
292{
293 if (kexec_crash_image)
294 arch_kexec_unprotect_crashkres();
295}
296
297void crash_post_resume(void)
298{
299 if (kexec_crash_image)
300 arch_kexec_protect_crashkres();
301}
302
303/*
304 * crash_is_nosave
305 *
306 * Return true only if a page is part of reserved memory for crash dump kernel,
307 * but does not hold any data of loaded kernel image.
308 *
309 * Note that all the pages in crash dump kernel memory have been initially
310 * marked as Reserved in kexec_reserve_crashkres_pages().
311 *
312 * In hibernation, the pages which are Reserved and yet "nosave" are excluded
313 * from the hibernation iamge. crash_is_nosave() does thich check for crash
314 * dump kernel and will reduce the total size of hibernation image.
315 */
316
317bool crash_is_nosave(unsigned long pfn)
318{
319 int i;
320 phys_addr_t addr;
321
322 if (!crashk_res.end)
323 return false;
324
325 /* in reserved memory? */
326 addr = __pfn_to_phys(pfn);
327 if ((addr < crashk_res.start) || (crashk_res.end < addr))
328 return false;
329
330 if (!kexec_crash_image)
331 return true;
332
333 /* not part of loaded kernel image? */
334 for (i = 0; i < kexec_crash_image->nr_segments; i++)
335 if (addr >= kexec_crash_image->segment[i].mem &&
336 addr < (kexec_crash_image->segment[i].mem +
337 kexec_crash_image->segment[i].memsz))
338 return false;
339
340 return true;
341}
342
343void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
344{
345 unsigned long addr;
346 struct page *page;
347
348 for (addr = begin; addr < end; addr += PAGE_SIZE) {
349 page = phys_to_page(addr);
350 ClearPageReserved(page);
351 free_reserved_page(page);
352 }
353}
354#endif /* CONFIG_HIBERNATION */
355
356void arch_crash_save_vmcoreinfo(void)
357{
358 VMCOREINFO_NUMBER(VA_BITS);
359 /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
360 vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
361 kimage_voffset);
362 vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
363 PHYS_OFFSET);
364}
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index c5ef05959813..6204b7600d1b 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -327,13 +327,13 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
327 struct arch_hw_breakpoint_ctrl ctrl, 327 struct arch_hw_breakpoint_ctrl ctrl,
328 struct perf_event_attr *attr) 328 struct perf_event_attr *attr)
329{ 329{
330 int err, len, type, disabled = !ctrl.enabled; 330 int err, len, type, offset, disabled = !ctrl.enabled;
331 331
332 attr->disabled = disabled; 332 attr->disabled = disabled;
333 if (disabled) 333 if (disabled)
334 return 0; 334 return 0;
335 335
336 err = arch_bp_generic_fields(ctrl, &len, &type); 336 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
337 if (err) 337 if (err)
338 return err; 338 return err;
339 339
@@ -352,6 +352,7 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
352 352
353 attr->bp_len = len; 353 attr->bp_len = len;
354 attr->bp_type = type; 354 attr->bp_type = type;
355 attr->bp_addr += offset;
355 356
356 return 0; 357 return 0;
357} 358}
@@ -404,7 +405,7 @@ static int ptrace_hbp_get_addr(unsigned int note_type,
404 if (IS_ERR(bp)) 405 if (IS_ERR(bp))
405 return PTR_ERR(bp); 406 return PTR_ERR(bp);
406 407
407 *addr = bp ? bp->attr.bp_addr : 0; 408 *addr = bp ? counter_arch_bp(bp)->address : 0;
408 return 0; 409 return 0;
409} 410}
410 411
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
new file mode 100644
index 000000000000..51b73cdde287
--- /dev/null
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -0,0 +1,130 @@
1/*
2 * kexec for arm64
3 *
4 * Copyright (C) Linaro.
5 * Copyright (C) Huawei Futurewei Technologies.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kexec.h>
13#include <linux/linkage.h>
14
15#include <asm/assembler.h>
16#include <asm/kexec.h>
17#include <asm/page.h>
18#include <asm/sysreg.h>
19
20/*
21 * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
22 *
23 * The memory that the old kernel occupies may be overwritten when coping the
24 * new image to its final location. To assure that the
25 * arm64_relocate_new_kernel routine which does that copy is not overwritten,
26 * all code and data needed by arm64_relocate_new_kernel must be between the
27 * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The
28 * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
29 * control_code_page, a special page which has been set up to be preserved
30 * during the copy operation.
31 */
32ENTRY(arm64_relocate_new_kernel)
33
34 /* Setup the list loop variables. */
35 mov x17, x1 /* x17 = kimage_start */
36 mov x16, x0 /* x16 = kimage_head */
37 dcache_line_size x15, x0 /* x15 = dcache line size */
38 mov x14, xzr /* x14 = entry ptr */
39 mov x13, xzr /* x13 = copy dest */
40
41 /* Clear the sctlr_el2 flags. */
42 mrs x0, CurrentEL
43 cmp x0, #CurrentEL_EL2
44 b.ne 1f
45 mrs x0, sctlr_el2
46 ldr x1, =SCTLR_ELx_FLAGS
47 bic x0, x0, x1
48 msr sctlr_el2, x0
49 isb
501:
51
52 /* Check if the new image needs relocation. */
53 tbnz x16, IND_DONE_BIT, .Ldone
54
55.Lloop:
56 and x12, x16, PAGE_MASK /* x12 = addr */
57
58 /* Test the entry flags. */
59.Ltest_source:
60 tbz x16, IND_SOURCE_BIT, .Ltest_indirection
61
62 /* Invalidate dest page to PoC. */
63 mov x0, x13
64 add x20, x0, #PAGE_SIZE
65 sub x1, x15, #1
66 bic x0, x0, x1
672: dc ivac, x0
68 add x0, x0, x15
69 cmp x0, x20
70 b.lo 2b
71 dsb sy
72
73 mov x20, x13
74 mov x21, x12
75 copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7
76
77 /* dest += PAGE_SIZE */
78 add x13, x13, PAGE_SIZE
79 b .Lnext
80
81.Ltest_indirection:
82 tbz x16, IND_INDIRECTION_BIT, .Ltest_destination
83
84 /* ptr = addr */
85 mov x14, x12
86 b .Lnext
87
88.Ltest_destination:
89 tbz x16, IND_DESTINATION_BIT, .Lnext
90
91 /* dest = addr */
92 mov x13, x12
93
94.Lnext:
95 /* entry = *ptr++ */
96 ldr x16, [x14], #8
97
98 /* while (!(entry & DONE)) */
99 tbz x16, IND_DONE_BIT, .Lloop
100
101.Ldone:
102 /* wait for writes from copy_page to finish */
103 dsb nsh
104 ic iallu
105 dsb nsh
106 isb
107
108 /* Start new image. */
109 mov x0, xzr
110 mov x1, xzr
111 mov x2, xzr
112 mov x3, xzr
113 br x17
114
115ENDPROC(arm64_relocate_new_kernel)
116
117.ltorg
118
119.align 3 /* To keep the 64-bit values below naturally aligned. */
120
121.Lcopy_end:
122.org KEXEC_CONTROL_PAGE_SIZE
123
124/*
125 * arm64_relocate_new_kernel_size - Number of bytes to copy to the
126 * control_code_page.
127 */
128.globl arm64_relocate_new_kernel_size
129arm64_relocate_new_kernel_size:
130 .quad .Lcopy_end - arm64_relocate_new_kernel
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 0153c0d8ddb1..19749870c757 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -31,7 +31,6 @@
31#include <linux/screen_info.h> 31#include <linux/screen_info.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/kexec.h> 33#include <linux/kexec.h>
34#include <linux/crash_dump.h>
35#include <linux/root_dev.h> 34#include <linux/root_dev.h>
36#include <linux/cpu.h> 35#include <linux/cpu.h>
37#include <linux/interrupt.h> 36#include <linux/interrupt.h>
@@ -220,6 +219,12 @@ static void __init request_standard_resources(void)
220 if (kernel_data.start >= res->start && 219 if (kernel_data.start >= res->start &&
221 kernel_data.end <= res->end) 220 kernel_data.end <= res->end)
222 request_resource(res, &kernel_data); 221 request_resource(res, &kernel_data);
222#ifdef CONFIG_KEXEC_CORE
223 /* Userspace will find "Crash kernel" region in /proc/iomem. */
224 if (crashk_res.end && crashk_res.start >= res->start &&
225 crashk_res.end <= res->end)
226 request_resource(res, &crashk_res);
227#endif
223 } 228 }
224} 229}
225 230
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a84623d91410..a1d06fc42048 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -37,6 +37,7 @@
37#include <linux/completion.h> 37#include <linux/completion.h>
38#include <linux/of.h> 38#include <linux/of.h>
39#include <linux/irq_work.h> 39#include <linux/irq_work.h>
40#include <linux/kexec.h>
40 41
41#include <asm/alternative.h> 42#include <asm/alternative.h>
42#include <asm/atomic.h> 43#include <asm/atomic.h>
@@ -63,16 +64,29 @@
63 * where to place its SVC stack 64 * where to place its SVC stack
64 */ 65 */
65struct secondary_data secondary_data; 66struct secondary_data secondary_data;
67/* Number of CPUs which aren't online, but looping in kernel text. */
68int cpus_stuck_in_kernel;
66 69
67enum ipi_msg_type { 70enum ipi_msg_type {
68 IPI_RESCHEDULE, 71 IPI_RESCHEDULE,
69 IPI_CALL_FUNC, 72 IPI_CALL_FUNC,
70 IPI_CPU_STOP, 73 IPI_CPU_STOP,
74 IPI_CPU_CRASH_STOP,
71 IPI_TIMER, 75 IPI_TIMER,
72 IPI_IRQ_WORK, 76 IPI_IRQ_WORK,
73 IPI_WAKEUP 77 IPI_WAKEUP
74}; 78};
75 79
80#ifdef CONFIG_HOTPLUG_CPU
81static int op_cpu_kill(unsigned int cpu);
82#else
83static inline int op_cpu_kill(unsigned int cpu)
84{
85 return -ENOSYS;
86}
87#endif
88
89
76/* 90/*
77 * Boot a secondary CPU, and assign it the specified idle task. 91 * Boot a secondary CPU, and assign it the specified idle task.
78 * This also gives us the initial stack to use for this CPU. 92 * This also gives us the initial stack to use for this CPU.
@@ -90,12 +104,14 @@ static DECLARE_COMPLETION(cpu_running);
90int __cpu_up(unsigned int cpu, struct task_struct *idle) 104int __cpu_up(unsigned int cpu, struct task_struct *idle)
91{ 105{
92 int ret; 106 int ret;
107 long status;
93 108
94 /* 109 /*
95 * We need to tell the secondary core where to find its stack and the 110 * We need to tell the secondary core where to find its stack and the
96 * page tables. 111 * page tables.
97 */ 112 */
98 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 113 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
114 update_cpu_boot_status(CPU_MMU_OFF);
99 __flush_dcache_area(&secondary_data, sizeof(secondary_data)); 115 __flush_dcache_area(&secondary_data, sizeof(secondary_data));
100 116
101 /* 117 /*
@@ -119,6 +135,32 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
119 } 135 }
120 136
121 secondary_data.stack = NULL; 137 secondary_data.stack = NULL;
138 status = READ_ONCE(secondary_data.status);
139 if (ret && status) {
140
141 if (status == CPU_MMU_OFF)
142 status = READ_ONCE(__early_cpu_boot_status);
143
144 switch (status) {
145 default:
146 pr_err("CPU%u: failed in unknown state : 0x%lx\n",
147 cpu, status);
148 break;
149 case CPU_KILL_ME:
150 if (!op_cpu_kill(cpu)) {
151 pr_crit("CPU%u: died during early boot\n", cpu);
152 break;
153 }
154 /* Fall through */
155 pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
156 case CPU_STUCK_IN_KERNEL:
157 pr_crit("CPU%u: is stuck in kernel\n", cpu);
158 cpus_stuck_in_kernel++;
159 break;
160 case CPU_PANIC_KERNEL:
161 panic("CPU%u detected unsupported configuration\n", cpu);
162 }
163 }
122 164
123 return ret; 165 return ret;
124} 166}
@@ -184,6 +226,9 @@ asmlinkage void secondary_start_kernel(void)
184 */ 226 */
185 pr_info("CPU%u: Booted secondary processor [%08x]\n", 227 pr_info("CPU%u: Booted secondary processor [%08x]\n",
186 cpu, read_cpuid_id()); 228 cpu, read_cpuid_id());
229 update_cpu_boot_status(CPU_BOOT_SUCCESS);
230 /* Make sure the status update is visible before we complete */
231 smp_wmb();
187 set_cpu_online(cpu, true); 232 set_cpu_online(cpu, true);
188 complete(&cpu_running); 233 complete(&cpu_running);
189 234
@@ -311,6 +356,30 @@ void cpu_die(void)
311} 356}
312#endif 357#endif
313 358
359/*
360 * Kill the calling secondary CPU, early in bringup before it is turned
361 * online.
362 */
363void cpu_die_early(void)
364{
365 int cpu = smp_processor_id();
366
367 pr_crit("CPU%d: will not boot\n", cpu);
368
369 /* Mark this CPU absent */
370 set_cpu_present(cpu, 0);
371
372#ifdef CONFIG_HOTPLUG_CPU
373 update_cpu_boot_status(CPU_KILL_ME);
374 /* Check if we can park ourselves */
375 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
376 cpu_ops[cpu]->cpu_die(cpu);
377#endif
378 update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
379
380 cpu_park_loop();
381}
382
314static void __init hyp_mode_check(void) 383static void __init hyp_mode_check(void)
315{ 384{
316 if (is_hyp_mode_available()) 385 if (is_hyp_mode_available())
@@ -634,6 +703,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
634 S(IPI_RESCHEDULE, "Rescheduling interrupts"), 703 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
635 S(IPI_CALL_FUNC, "Function call interrupts"), 704 S(IPI_CALL_FUNC, "Function call interrupts"),
636 S(IPI_CPU_STOP, "CPU stop interrupts"), 705 S(IPI_CPU_STOP, "CPU stop interrupts"),
706 S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
637 S(IPI_TIMER, "Timer broadcast interrupts"), 707 S(IPI_TIMER, "Timer broadcast interrupts"),
638 S(IPI_IRQ_WORK, "IRQ work interrupts"), 708 S(IPI_IRQ_WORK, "IRQ work interrupts"),
639 S(IPI_WAKEUP, "CPU wake-up interrupts"), 709 S(IPI_WAKEUP, "CPU wake-up interrupts"),
@@ -718,6 +788,29 @@ static void ipi_cpu_stop(unsigned int cpu)
718 cpu_relax(); 788 cpu_relax();
719} 789}
720 790
791#ifdef CONFIG_KEXEC_CORE
792static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
793#endif
794
795static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
796{
797#ifdef CONFIG_KEXEC_CORE
798 crash_save_cpu(regs, cpu);
799
800 atomic_dec(&waiting_for_crash_ipi);
801
802 local_irq_disable();
803
804#ifdef CONFIG_HOTPLUG_CPU
805 if (cpu_ops[cpu]->cpu_die)
806 cpu_ops[cpu]->cpu_die(cpu);
807#endif
808
809 /* just in case */
810 cpu_park_loop();
811#endif
812}
813
721/* 814/*
722 * Main handler for inter-processor interrupts 815 * Main handler for inter-processor interrupts
723 */ 816 */
@@ -748,6 +841,15 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
748 irq_exit(); 841 irq_exit();
749 break; 842 break;
750 843
844 case IPI_CPU_CRASH_STOP:
845 if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
846 irq_enter();
847 ipi_cpu_crash_stop(cpu, regs);
848
849 unreachable();
850 }
851 break;
852
751#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 853#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
752 case IPI_TIMER: 854 case IPI_TIMER:
753 irq_enter(); 855 irq_enter();
@@ -816,6 +918,39 @@ void smp_send_stop(void)
816 pr_warning("SMP: failed to stop secondary CPUs\n"); 918 pr_warning("SMP: failed to stop secondary CPUs\n");
817} 919}
818 920
921#ifdef CONFIG_KEXEC_CORE
922void smp_send_crash_stop(void)
923{
924 cpumask_t mask;
925 unsigned long timeout;
926
927 if (num_online_cpus() == 1)
928 return;
929
930 cpumask_copy(&mask, cpu_online_mask);
931 cpumask_clear_cpu(smp_processor_id(), &mask);
932
933 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
934
935 pr_crit("SMP: stopping secondary CPUs\n");
936 smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
937
938 /* Wait up to one second for other CPUs to stop */
939 timeout = USEC_PER_SEC;
940 while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
941 udelay(1);
942
943 if (atomic_read(&waiting_for_crash_ipi) > 0)
944 pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
945 cpumask_pr_args(&mask));
946}
947
948bool smp_crash_stop_failed(void)
949{
950 return (atomic_read(&waiting_for_crash_ipi) > 0);
951}
952#endif
953
819/* 954/*
820 * not supported here 955 * not supported here
821 */ 956 */
@@ -823,3 +958,21 @@ int setup_profiling_timer(unsigned int multiplier)
823{ 958{
824 return -EINVAL; 959 return -EINVAL;
825} 960}
961
962static bool have_cpu_die(void)
963{
964#ifdef CONFIG_HOTPLUG_CPU
965 int any_cpu = raw_smp_processor_id();
966
967 if (cpu_ops[any_cpu]->cpu_die)
968 return true;
969#endif
970 return false;
971}
972
973bool cpus_are_stuck_in_kernel(void)
974{
975 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
976
977 return !!cpus_stuck_in_kernel || smp_spin_tables;
978}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index cfd46c227c8c..a99eff9afc1f 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -43,6 +43,9 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
43 unsigned long fp = frame->fp; 43 unsigned long fp = frame->fp;
44 unsigned long irq_stack_ptr; 44 unsigned long irq_stack_ptr;
45 45
46 if (!tsk)
47 tsk = current;
48
46 /* 49 /*
47 * Switching between stacks is valid when tracing current and in 50 * Switching between stacks is valid when tracing current and in
48 * non-preemptible context. 51 * non-preemptible context.
@@ -67,7 +70,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
67 frame->pc = *(unsigned long *)(fp + 8); 70 frame->pc = *(unsigned long *)(fp + 8);
68 71
69#ifdef CONFIG_FUNCTION_GRAPH_TRACER 72#ifdef CONFIG_FUNCTION_GRAPH_TRACER
70 if (tsk && tsk->ret_stack && 73 if (tsk->ret_stack &&
71 (frame->pc == (unsigned long)return_to_handler)) { 74 (frame->pc == (unsigned long)return_to_handler)) {
72 /* 75 /*
73 * This is a case where function graph tracer has 76 * This is a case where function graph tracer has
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index f5c82c76cf7c..a1cfcaa562a2 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -149,6 +149,11 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
149 unsigned long irq_stack_ptr; 149 unsigned long irq_stack_ptr;
150 int skip; 150 int skip;
151 151
152 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
153
154 if (!tsk)
155 tsk = current;
156
152 /* 157 /*
153 * Switching between stacks is valid when tracing current and in 158 * Switching between stacks is valid when tracing current and in
154 * non-preemptible context. 159 * non-preemptible context.
@@ -158,11 +163,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
158 else 163 else
159 irq_stack_ptr = 0; 164 irq_stack_ptr = 0;
160 165
161 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
162
163 if (!tsk)
164 tsk = current;
165
166 if (tsk == current) { 166 if (tsk == current) {
167 frame.fp = (unsigned long)__builtin_frame_address(0); 167 frame.fp = (unsigned long)__builtin_frame_address(0);
168 frame.sp = current_stack_pointer; 168 frame.sp = current_stack_pointer;
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 97bc68f4c689..3b8acfae7797 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -55,7 +55,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
55 */ 55 */
56static struct page *vectors_page[1]; 56static struct page *vectors_page[1];
57 57
58static int alloc_vectors_page(void) 58static int __init alloc_vectors_page(void)
59{ 59{
60 extern char __kuser_helper_start[], __kuser_helper_end[]; 60 extern char __kuser_helper_start[], __kuser_helper_end[];
61 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; 61 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
@@ -88,7 +88,7 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
88{ 88{
89 struct mm_struct *mm = current->mm; 89 struct mm_struct *mm = current->mm;
90 unsigned long addr = AARCH32_VECTORS_BASE; 90 unsigned long addr = AARCH32_VECTORS_BASE;
91 static struct vm_special_mapping spec = { 91 static const struct vm_special_mapping spec = {
92 .name = "[vectors]", 92 .name = "[vectors]",
93 .pages = vectors_page, 93 .pages = vectors_page,
94 94
@@ -212,10 +212,16 @@ void update_vsyscall(struct timekeeper *tk)
212 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; 212 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
213 213
214 if (!use_syscall) { 214 if (!use_syscall) {
215 /* tkr_mono.cycle_last == tkr_raw.cycle_last */
215 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; 216 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
217 vdso_data->raw_time_sec = tk->raw_time.tv_sec;
218 vdso_data->raw_time_nsec = tk->raw_time.tv_nsec;
216 vdso_data->xtime_clock_sec = tk->xtime_sec; 219 vdso_data->xtime_clock_sec = tk->xtime_sec;
217 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; 220 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
218 vdso_data->cs_mult = tk->tkr_mono.mult; 221 /* tkr_raw.xtime_nsec == 0 */
222 vdso_data->cs_mono_mult = tk->tkr_mono.mult;
223 vdso_data->cs_raw_mult = tk->tkr_raw.mult;
224 /* tkr_mono.shift == tkr_raw.shift */
219 vdso_data->cs_shift = tk->tkr_mono.shift; 225 vdso_data->cs_shift = tk->tkr_mono.shift;
220 } 226 }
221 227
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index b467fd0a384b..62c84f7cb01b 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -23,7 +23,7 @@ GCOV_PROFILE := n
23ccflags-y += -Wl,-shared 23ccflags-y += -Wl,-shared
24 24
25obj-y += vdso.o 25obj-y += vdso.o
26extra-y += vdso.lds vdso-offsets.h 26extra-y += vdso.lds
27CPPFLAGS_vdso.lds += -P -C -U$(ARCH) 27CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
28 28
29# Force dependency (incbin is bad) 29# Force dependency (incbin is bad)
@@ -42,11 +42,10 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
42gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh 42gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
43quiet_cmd_vdsosym = VDSOSYM $@ 43quiet_cmd_vdsosym = VDSOSYM $@
44define cmd_vdsosym 44define cmd_vdsosym
45 $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ && \ 45 $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
46 cp $@ include/generated/
47endef 46endef
48 47
49$(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE 48include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
50 $(call if_changed,vdsosym) 49 $(call if_changed,vdsosym)
51 50
52# Assembly rules for the .S files 51# Assembly rules for the .S files
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index efa79e8d4196..e00b4671bd7c 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -26,24 +26,109 @@
26#define NSEC_PER_SEC_HI16 0x3b9a 26#define NSEC_PER_SEC_HI16 0x3b9a
27 27
28vdso_data .req x6 28vdso_data .req x6
29use_syscall .req w7 29seqcnt .req w7
30seqcnt .req w8 30w_tmp .req w8
31x_tmp .req x8
32
33/*
34 * Conventions for macro arguments:
35 * - An argument is write-only if its name starts with "res".
36 * - All other arguments are read-only, unless otherwise specified.
37 */
31 38
32 .macro seqcnt_acquire 39 .macro seqcnt_acquire
339999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT] 409999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
34 tbnz seqcnt, #0, 9999b 41 tbnz seqcnt, #0, 9999b
35 dmb ishld 42 dmb ishld
36 ldr use_syscall, [vdso_data, #VDSO_USE_SYSCALL]
37 .endm 43 .endm
38 44
39 .macro seqcnt_read, cnt 45 .macro seqcnt_check fail
40 dmb ishld 46 dmb ishld
41 ldr \cnt, [vdso_data, #VDSO_TB_SEQ_COUNT] 47 ldr w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT]
48 cmp w_tmp, seqcnt
49 b.ne \fail
42 .endm 50 .endm
43 51
44 .macro seqcnt_check, cnt, fail 52 .macro syscall_check fail
45 cmp \cnt, seqcnt 53 ldr w_tmp, [vdso_data, #VDSO_USE_SYSCALL]
46 b.ne \fail 54 cbnz w_tmp, \fail
55 .endm
56
57 .macro get_nsec_per_sec res
58 mov \res, #NSEC_PER_SEC_LO16
59 movk \res, #NSEC_PER_SEC_HI16, lsl #16
60 .endm
61
62 /*
63 * Returns the clock delta, in nanoseconds left-shifted by the clock
64 * shift.
65 */
66 .macro get_clock_shifted_nsec res, cycle_last, mult
67 /* Read the virtual counter. */
68 isb
69 mrs x_tmp, cntvct_el0
70 /* Calculate cycle delta and convert to ns. */
71 sub \res, x_tmp, \cycle_last
72 /* We can only guarantee 56 bits of precision. */
73 movn x_tmp, #0xff00, lsl #48
74 and \res, x_tmp, \res
75 mul \res, \res, \mult
76 .endm
77
78 /*
79 * Returns in res_{sec,nsec} the REALTIME timespec, based on the
80 * "wall time" (xtime) and the clock_mono delta.
81 */
82 .macro get_ts_realtime res_sec, res_nsec, \
83 clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec
84 add \res_nsec, \clock_nsec, \xtime_nsec
85 udiv x_tmp, \res_nsec, \nsec_to_sec
86 add \res_sec, \xtime_sec, x_tmp
87 msub \res_nsec, x_tmp, \nsec_to_sec, \res_nsec
88 .endm
89
90 /*
91 * Returns in res_{sec,nsec} the timespec based on the clock_raw delta,
92 * used for CLOCK_MONOTONIC_RAW.
93 */
94 .macro get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec
95 udiv \res_sec, \clock_nsec, \nsec_to_sec
96 msub \res_nsec, \res_sec, \nsec_to_sec, \clock_nsec
97 .endm
98
99 /* sec and nsec are modified in place. */
100 .macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec
101 /* Add timespec. */
102 add \sec, \sec, \ts_sec
103 add \nsec, \nsec, \ts_nsec
104
105 /* Normalise the new timespec. */
106 cmp \nsec, \nsec_to_sec
107 b.lt 9999f
108 sub \nsec, \nsec, \nsec_to_sec
109 add \sec, \sec, #1
1109999:
111 cmp \nsec, #0
112 b.ge 9998f
113 add \nsec, \nsec, \nsec_to_sec
114 sub \sec, \sec, #1
1159998:
116 .endm
117
118 .macro clock_gettime_return, shift=0
119 .if \shift == 1
120 lsr x11, x11, x12
121 .endif
122 stp x10, x11, [x1, #TSPEC_TV_SEC]
123 mov x0, xzr
124 ret
125 .endm
126
127 .macro jump_slot jumptable, index, label
128 .if (. - \jumptable) != 4 * (\index)
129 .error "Jump slot index mismatch"
130 .endif
131 b \label
47 .endm 132 .endm
48 133
49 .text 134 .text
@@ -51,18 +136,25 @@ seqcnt .req w8
51/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */ 136/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */
52ENTRY(__kernel_gettimeofday) 137ENTRY(__kernel_gettimeofday)
53 .cfi_startproc 138 .cfi_startproc
54 mov x2, x30
55 .cfi_register x30, x2
56
57 /* Acquire the sequence counter and get the timespec. */
58 adr vdso_data, _vdso_data 139 adr vdso_data, _vdso_data
591: seqcnt_acquire
60 cbnz use_syscall, 4f
61
62 /* If tv is NULL, skip to the timezone code. */ 140 /* If tv is NULL, skip to the timezone code. */
63 cbz x0, 2f 141 cbz x0, 2f
64 bl __do_get_tspec 142
65 seqcnt_check w9, 1b 143 /* Compute the time of day. */
1441: seqcnt_acquire
145 syscall_check fail=4f
146 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
147 /* w11 = cs_mono_mult, w12 = cs_shift */
148 ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
149 ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
150 seqcnt_check fail=1b
151
152 get_nsec_per_sec res=x9
153 lsl x9, x9, x12
154
155 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
156 get_ts_realtime res_sec=x10, res_nsec=x11, \
157 clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
66 158
67 /* Convert ns to us. */ 159 /* Convert ns to us. */
68 mov x13, #1000 160 mov x13, #1000
@@ -76,95 +168,126 @@ ENTRY(__kernel_gettimeofday)
76 stp w4, w5, [x1, #TZ_MINWEST] 168 stp w4, w5, [x1, #TZ_MINWEST]
773: 1693:
78 mov x0, xzr 170 mov x0, xzr
79 ret x2 171 ret
804: 1724:
81 /* Syscall fallback. */ 173 /* Syscall fallback. */
82 mov x8, #__NR_gettimeofday 174 mov x8, #__NR_gettimeofday
83 svc #0 175 svc #0
84 ret x2 176 ret
85 .cfi_endproc 177 .cfi_endproc
86ENDPROC(__kernel_gettimeofday) 178ENDPROC(__kernel_gettimeofday)
87 179
180#define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE
181
88/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */ 182/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
89ENTRY(__kernel_clock_gettime) 183ENTRY(__kernel_clock_gettime)
90 .cfi_startproc 184 .cfi_startproc
91 cmp w0, #CLOCK_REALTIME 185 cmp w0, #JUMPSLOT_MAX
92 ccmp w0, #CLOCK_MONOTONIC, #0x4, ne 186 b.hi syscall
93 b.ne 2f 187 adr vdso_data, _vdso_data
188 adr x_tmp, jumptable
189 add x_tmp, x_tmp, w0, uxtw #2
190 br x_tmp
191
192 ALIGN
193jumptable:
194 jump_slot jumptable, CLOCK_REALTIME, realtime
195 jump_slot jumptable, CLOCK_MONOTONIC, monotonic
196 b syscall
197 b syscall
198 jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw
199 jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse
200 jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse
201
202 .if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1)
203 .error "Wrong jumptable size"
204 .endif
205
206 ALIGN
207realtime:
208 seqcnt_acquire
209 syscall_check fail=syscall
210 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
211 /* w11 = cs_mono_mult, w12 = cs_shift */
212 ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
213 ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
214 seqcnt_check fail=realtime
94 215
95 mov x2, x30 216 /* All computations are done with left-shifted nsecs. */
96 .cfi_register x30, x2 217 get_nsec_per_sec res=x9
218 lsl x9, x9, x12
97 219
98 /* Get kernel timespec. */ 220 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
99 adr vdso_data, _vdso_data 221 get_ts_realtime res_sec=x10, res_nsec=x11, \
1001: seqcnt_acquire 222 clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
101 cbnz use_syscall, 7f 223 clock_gettime_return, shift=1
102 224
103 bl __do_get_tspec 225 ALIGN
104 seqcnt_check w9, 1b 226monotonic:
227 seqcnt_acquire
228 syscall_check fail=syscall
229 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
230 /* w11 = cs_mono_mult, w12 = cs_shift */
231 ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
232 ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
233 ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
234 seqcnt_check fail=monotonic
105 235
106 mov x30, x2 236 /* All computations are done with left-shifted nsecs. */
237 lsl x4, x4, x12
238 get_nsec_per_sec res=x9
239 lsl x9, x9, x12
107 240
108 cmp w0, #CLOCK_MONOTONIC 241 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
109 b.ne 6f 242 get_ts_realtime res_sec=x10, res_nsec=x11, \
243 clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
110 244
111 /* Get wtm timespec. */ 245 add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9
112 ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC] 246 clock_gettime_return, shift=1
113 247
114 /* Check the sequence counter. */ 248 ALIGN
115 seqcnt_read w9 249monotonic_raw:
116 seqcnt_check w9, 1b 250 seqcnt_acquire
117 b 4f 251 syscall_check fail=syscall
1182: 252 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
119 cmp w0, #CLOCK_REALTIME_COARSE 253 /* w11 = cs_raw_mult, w12 = cs_shift */
120 ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne 254 ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT]
121 b.ne 8f 255 ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
256 seqcnt_check fail=monotonic_raw
122 257
123 /* xtime_coarse_nsec is already right-shifted */ 258 /* All computations are done with left-shifted nsecs. */
124 mov x12, #0 259 lsl x14, x14, x12
260 get_nsec_per_sec res=x9
261 lsl x9, x9, x12
125 262
126 /* Get coarse timespec. */ 263 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
127 adr vdso_data, _vdso_data 264 get_ts_clock_raw res_sec=x10, res_nsec=x11, \
1283: seqcnt_acquire 265 clock_nsec=x15, nsec_to_sec=x9
266
267 add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
268 clock_gettime_return, shift=1
269
270 ALIGN
271realtime_coarse:
272 seqcnt_acquire
129 ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC] 273 ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
274 seqcnt_check fail=realtime_coarse
275 clock_gettime_return
130 276
131 /* Get wtm timespec. */ 277 ALIGN
278monotonic_coarse:
279 seqcnt_acquire
280 ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
132 ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC] 281 ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
282 seqcnt_check fail=monotonic_coarse
133 283
134 /* Check the sequence counter. */ 284 /* Computations are done in (non-shifted) nsecs. */
135 seqcnt_read w9 285 get_nsec_per_sec res=x9
136 seqcnt_check w9, 3b 286 add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
287 clock_gettime_return
137 288
138 cmp w0, #CLOCK_MONOTONIC_COARSE 289 ALIGN
139 b.ne 6f 290syscall: /* Syscall fallback. */
1404:
141 /* Add on wtm timespec. */
142 add x10, x10, x13
143 lsl x14, x14, x12
144 add x11, x11, x14
145
146 /* Normalise the new timespec. */
147 mov x15, #NSEC_PER_SEC_LO16
148 movk x15, #NSEC_PER_SEC_HI16, lsl #16
149 lsl x15, x15, x12
150 cmp x11, x15
151 b.lt 5f
152 sub x11, x11, x15
153 add x10, x10, #1
1545:
155 cmp x11, #0
156 b.ge 6f
157 add x11, x11, x15
158 sub x10, x10, #1
159
1606: /* Store to the user timespec. */
161 lsr x11, x11, x12
162 stp x10, x11, [x1, #TSPEC_TV_SEC]
163 mov x0, xzr
164 ret
1657:
166 mov x30, x2
1678: /* Syscall fallback. */
168 mov x8, #__NR_clock_gettime 291 mov x8, #__NR_clock_gettime
169 svc #0 292 svc #0
170 ret 293 ret
@@ -176,6 +299,7 @@ ENTRY(__kernel_clock_getres)
176 .cfi_startproc 299 .cfi_startproc
177 cmp w0, #CLOCK_REALTIME 300 cmp w0, #CLOCK_REALTIME
178 ccmp w0, #CLOCK_MONOTONIC, #0x4, ne 301 ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
302 ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
179 b.ne 1f 303 b.ne 1f
180 304
181 ldr x2, 5f 305 ldr x2, 5f
@@ -203,46 +327,3 @@ ENTRY(__kernel_clock_getres)
203 .quad CLOCK_COARSE_RES 327 .quad CLOCK_COARSE_RES
204 .cfi_endproc 328 .cfi_endproc
205ENDPROC(__kernel_clock_getres) 329ENDPROC(__kernel_clock_getres)
206
207/*
208 * Read the current time from the architected counter.
209 * Expects vdso_data to be initialised.
210 * Clobbers the temporary registers (x9 - x15).
211 * Returns:
212 * - w9 = vDSO sequence counter
213 * - (x10, x11) = (ts->tv_sec, shifted ts->tv_nsec)
214 * - w12 = cs_shift
215 */
216ENTRY(__do_get_tspec)
217 .cfi_startproc
218
219 /* Read from the vDSO data page. */
220 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
221 ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
222 ldp w11, w12, [vdso_data, #VDSO_CS_MULT]
223 seqcnt_read w9
224
225 /* Read the virtual counter. */
226 isb
227 mrs x15, cntvct_el0
228
229 /* Calculate cycle delta and convert to ns. */
230 sub x10, x15, x10
231 /* We can only guarantee 56 bits of precision. */
232 movn x15, #0xff00, lsl #48
233 and x10, x15, x10
234 mul x10, x10, x11
235
236 /* Use the kernel time to calculate the new timespec. */
237 mov x11, #NSEC_PER_SEC_LO16
238 movk x11, #NSEC_PER_SEC_HI16, lsl #16
239 lsl x11, x11, x12
240 add x15, x10, x14
241 udiv x14, x15, x11
242 add x10, x13, x14
243 mul x13, x14, x11
244 sub x11, x15, x13
245
246 ret
247 .cfi_endproc
248ENDPROC(__do_get_tspec)
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 826032bc3945..acde4782621a 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -12,3 +12,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += switch.o
12obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o 12obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
13obj-$(CONFIG_KVM_ARM_HOST) += tlb.o 13obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
14obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o 14obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
15
16GCOV_PROFILE := n
17KASAN_SANITIZE := n
18UBSAN_SANITIZE := n
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index eec3598b4184..3ff507c177a5 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1055,8 +1055,8 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1055{ 1055{
1056 struct sys_reg_params params; 1056 struct sys_reg_params params;
1057 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1057 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1058 int Rt = (hsr >> 5) & 0xf; 1058 int Rt = (hsr >> 5) & 0x1f;
1059 int Rt2 = (hsr >> 10) & 0xf; 1059 int Rt2 = (hsr >> 10) & 0x1f;
1060 1060
1061 params.is_aarch32 = true; 1061 params.is_aarch32 = true;
1062 params.is_32bit = false; 1062 params.is_32bit = false;
@@ -1107,7 +1107,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1107{ 1107{
1108 struct sys_reg_params params; 1108 struct sys_reg_params params;
1109 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1109 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1110 int Rt = (hsr >> 5) & 0xf; 1110 int Rt = (hsr >> 5) & 0x1f;
1111 1111
1112 params.is_aarch32 = true; 1112 params.is_aarch32 = true;
1113 params.is_32bit = true; 1113 params.is_32bit = true;
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 08b5f18ba604..d7150e30438a 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -17,9 +17,6 @@
17 */ 17 */
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19 19
20#include <asm/assembler.h>
21#include <asm/cpufeature.h>
22#include <asm/sysreg.h>
23#include <asm/uaccess.h> 20#include <asm/uaccess.h>
24 21
25 .text 22 .text
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 6505ec81f1da..90154f3f7f2a 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -16,10 +16,7 @@
16 16
17#include <linux/linkage.h> 17#include <linux/linkage.h>
18 18
19#include <asm/assembler.h>
20#include <asm/cache.h> 19#include <asm/cache.h>
21#include <asm/cpufeature.h>
22#include <asm/sysreg.h>
23#include <asm/uaccess.h> 20#include <asm/uaccess.h>
24 21
25/* 22/*
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 9b04ff3ab610..718b1c4e2f85 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -18,10 +18,7 @@
18 18
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20 20
21#include <asm/assembler.h>
22#include <asm/cache.h> 21#include <asm/cache.h>
23#include <asm/cpufeature.h>
24#include <asm/sysreg.h>
25#include <asm/uaccess.h> 22#include <asm/uaccess.h>
26 23
27/* 24/*
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 8077e4f34d56..e99e31c9acac 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -16,10 +16,7 @@
16 16
17#include <linux/linkage.h> 17#include <linux/linkage.h>
18 18
19#include <asm/assembler.h>
20#include <asm/cache.h> 19#include <asm/cache.h>
21#include <asm/cpufeature.h>
22#include <asm/sysreg.h>
23#include <asm/uaccess.h> 20#include <asm/uaccess.h>
24 21
25/* 22/*
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 07d7352d7c38..3be2cda5dbda 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -23,6 +23,7 @@
23#include <asm/assembler.h> 23#include <asm/assembler.h>
24#include <asm/cpufeature.h> 24#include <asm/cpufeature.h>
25#include <asm/alternative.h> 25#include <asm/alternative.h>
26#include <asm/uaccess.h>
26 27
27/* 28/*
28 * flush_icache_range(start,end) 29 * flush_icache_range(start,end)
@@ -48,6 +49,7 @@ ENTRY(flush_icache_range)
48 * - end - virtual end address of region 49 * - end - virtual end address of region
49 */ 50 */
50ENTRY(__flush_cache_user_range) 51ENTRY(__flush_cache_user_range)
52 uaccess_ttbr0_enable x2, x3
51 dcache_line_size x2, x3 53 dcache_line_size x2, x3
52 sub x3, x2, #1 54 sub x3, x2, #1
53 bic x4, x0, x3 55 bic x4, x0, x3
@@ -69,10 +71,12 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
69 dsb ish 71 dsb ish
70 isb 72 isb
71 mov x0, #0 73 mov x0, #0
741:
75 uaccess_ttbr0_disable x1
72 ret 76 ret
739: 779:
74 mov x0, #-EFAULT 78 mov x0, #-EFAULT
75 ret 79 b 1b
76ENDPROC(flush_icache_range) 80ENDPROC(flush_icache_range)
77ENDPROC(__flush_cache_user_range) 81ENDPROC(__flush_cache_user_range)
78 82
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 380e02941691..c7809f41d9d1 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -88,21 +88,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
88 break; 88 break;
89 89
90 pud = pud_offset(pgd, addr); 90 pud = pud_offset(pgd, addr);
91 printk(", *pud=%016llx", pud_val(*pud)); 91 pr_cont(", *pud=%016llx", pud_val(*pud));
92 if (pud_none(*pud) || pud_bad(*pud)) 92 if (pud_none(*pud) || pud_bad(*pud))
93 break; 93 break;
94 94
95 pmd = pmd_offset(pud, addr); 95 pmd = pmd_offset(pud, addr);
96 printk(", *pmd=%016llx", pmd_val(*pmd)); 96 pr_cont(", *pmd=%016llx", pmd_val(*pmd));
97 if (pmd_none(*pmd) || pmd_bad(*pmd)) 97 if (pmd_none(*pmd) || pmd_bad(*pmd))
98 break; 98 break;
99 99
100 pte = pte_offset_map(pmd, addr); 100 pte = pte_offset_map(pmd, addr);
101 printk(", *pte=%016llx", pte_val(*pte)); 101 pr_cont(", *pte=%016llx", pte_val(*pte));
102 pte_unmap(pte); 102 pte_unmap(pte);
103 } while(0); 103 } while(0);
104 104
105 printk("\n"); 105 pr_cont("\n");
106} 106}
107 107
108#ifdef CONFIG_ARM64_HW_AFDBM 108#ifdef CONFIG_ARM64_HW_AFDBM
@@ -510,10 +510,10 @@ static const struct fault_info {
510 { do_bad, SIGBUS, 0, "unknown 17" }, 510 { do_bad, SIGBUS, 0, "unknown 17" },
511 { do_bad, SIGBUS, 0, "unknown 18" }, 511 { do_bad, SIGBUS, 0, "unknown 18" },
512 { do_bad, SIGBUS, 0, "unknown 19" }, 512 { do_bad, SIGBUS, 0, "unknown 19" },
513 { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, 513 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
514 { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, 514 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
515 { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, 515 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
516 { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" }, 516 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
517 { do_bad, SIGBUS, 0, "synchronous parity error" }, 517 { do_bad, SIGBUS, 0, "synchronous parity error" },
518 { do_bad, SIGBUS, 0, "unknown 25" }, 518 { do_bad, SIGBUS, 0, "unknown 25" },
519 { do_bad, SIGBUS, 0, "unknown 26" }, 519 { do_bad, SIGBUS, 0, "unknown 26" },
@@ -686,5 +686,6 @@ int cpu_enable_pan(void *__unused)
686int cpu_enable_uao(void *__unused) 686int cpu_enable_uao(void *__unused)
687{ 687{
688 asm(SET_PSTATE_UAO(1)); 688 asm(SET_PSTATE_UAO(1));
689 return 0;
689} 690}
690#endif /* CONFIG_ARM64_UAO */ 691#endif /* CONFIG_ARM64_UAO */
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index da30529bb1f6..019f13637fae 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -51,20 +51,8 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
51 *pgsize = PAGE_SIZE; 51 *pgsize = PAGE_SIZE;
52 if (!pte_cont(pte)) 52 if (!pte_cont(pte))
53 return 1; 53 return 1;
54 if (!pgd_present(*pgd)) {
55 VM_BUG_ON(!pgd_present(*pgd));
56 return 1;
57 }
58 pud = pud_offset(pgd, addr); 54 pud = pud_offset(pgd, addr);
59 if (!pud_present(*pud)) {
60 VM_BUG_ON(!pud_present(*pud));
61 return 1;
62 }
63 pmd = pmd_offset(pud, addr); 55 pmd = pmd_offset(pud, addr);
64 if (!pmd_present(*pmd)) {
65 VM_BUG_ON(!pmd_present(*pmd));
66 return 1;
67 }
68 if ((pte_t *)pmd == ptep) { 56 if ((pte_t *)pmd == ptep) {
69 *pgsize = PMD_SIZE; 57 *pgsize = PMD_SIZE;
70 return CONT_PMDS; 58 return CONT_PMDS;
@@ -212,7 +200,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
212 ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize); 200 ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
213 /* save the 1st pte to return */ 201 /* save the 1st pte to return */
214 pte = ptep_get_and_clear(mm, addr, cpte); 202 pte = ptep_get_and_clear(mm, addr, cpte);
215 for (i = 1; i < ncontig; ++i) { 203 for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
216 /* 204 /*
217 * If HW_AFDBM is enabled, then the HW could 205 * If HW_AFDBM is enabled, then the HW could
218 * turn on the dirty bit for any of the page 206 * turn on the dirty bit for any of the page
@@ -250,8 +238,8 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
250 pfn = pte_pfn(*cpte); 238 pfn = pte_pfn(*cpte);
251 ncontig = find_num_contig(vma->vm_mm, addr, cpte, 239 ncontig = find_num_contig(vma->vm_mm, addr, cpte,
252 *cpte, &pgsize); 240 *cpte, &pgsize);
253 for (i = 0; i < ncontig; ++i, ++cpte) { 241 for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
254 changed = ptep_set_access_flags(vma, addr, cpte, 242 changed |= ptep_set_access_flags(vma, addr, cpte,
255 pfn_pte(pfn, 243 pfn_pte(pfn,
256 hugeprot), 244 hugeprot),
257 dirty); 245 dirty);
@@ -273,7 +261,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
273 261
274 cpte = huge_pte_offset(mm, addr); 262 cpte = huge_pte_offset(mm, addr);
275 ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize); 263 ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
276 for (i = 0; i < ncontig; ++i, ++cpte) 264 for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
277 ptep_set_wrprotect(mm, addr, cpte); 265 ptep_set_wrprotect(mm, addr, cpte);
278 } else { 266 } else {
279 ptep_set_wrprotect(mm, addr, ptep); 267 ptep_set_wrprotect(mm, addr, ptep);
@@ -291,7 +279,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
291 cpte = huge_pte_offset(vma->vm_mm, addr); 279 cpte = huge_pte_offset(vma->vm_mm, addr);
292 ncontig = find_num_contig(vma->vm_mm, addr, cpte, 280 ncontig = find_num_contig(vma->vm_mm, addr, cpte,
293 *cpte, &pgsize); 281 *cpte, &pgsize);
294 for (i = 0; i < ncontig; ++i, ++cpte) 282 for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
295 ptep_clear_flush(vma, addr, cpte); 283 ptep_clear_flush(vma, addr, cpte);
296 } else { 284 } else {
297 ptep_clear_flush(vma, addr, ptep); 285 ptep_clear_flush(vma, addr, ptep);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 678878077996..74ac8a90ba3f 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -29,11 +29,14 @@
29#include <linux/gfp.h> 29#include <linux/gfp.h>
30#include <linux/memblock.h> 30#include <linux/memblock.h>
31#include <linux/sort.h> 31#include <linux/sort.h>
32#include <linux/of.h>
32#include <linux/of_fdt.h> 33#include <linux/of_fdt.h>
33#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
34#include <linux/dma-contiguous.h> 35#include <linux/dma-contiguous.h>
35#include <linux/efi.h> 36#include <linux/efi.h>
36#include <linux/swiotlb.h> 37#include <linux/swiotlb.h>
38#include <linux/kexec.h>
39#include <linux/crash_dump.h>
37 40
38#include <asm/boot.h> 41#include <asm/boot.h>
39#include <asm/fixmap.h> 42#include <asm/fixmap.h>
@@ -75,6 +78,142 @@ static int __init early_initrd(char *p)
75early_param("initrd", early_initrd); 78early_param("initrd", early_initrd);
76#endif 79#endif
77 80
81#ifdef CONFIG_KEXEC_CORE
82/*
83 * reserve_crashkernel() - reserves memory for crash kernel
84 *
85 * This function reserves memory area given in "crashkernel=" kernel command
86 * line parameter. The memory reserved is used by dump capture kernel when
87 * primary kernel is crashing.
88 */
89static void __init reserve_crashkernel(void)
90{
91 unsigned long long crash_base, crash_size;
92 int ret;
93
94 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
95 &crash_size, &crash_base);
96 /* no crashkernel= or invalid value specified */
97 if (ret || !crash_size)
98 return;
99
100 crash_size = PAGE_ALIGN(crash_size);
101
102 if (crash_base == 0) {
103 /* Current arm64 boot protocol requires 2MB alignment */
104 crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
105 crash_size, SZ_2M);
106 if (crash_base == 0) {
107 pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
108 crash_size);
109 return;
110 }
111 } else {
112 /* User specifies base address explicitly. */
113 if (!memblock_is_region_memory(crash_base, crash_size)) {
114 pr_warn("cannot reserve crashkernel: region is not memory\n");
115 return;
116 }
117
118 if (memblock_is_region_reserved(crash_base, crash_size)) {
119 pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
120 return;
121 }
122
123 if (!IS_ALIGNED(crash_base, SZ_2M)) {
124 pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
125 return;
126 }
127 }
128 memblock_reserve(crash_base, crash_size);
129
130 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
131 crash_base, crash_base + crash_size, crash_size >> 20);
132
133 crashk_res.start = crash_base;
134 crashk_res.end = crash_base + crash_size - 1;
135}
136
137static void __init kexec_reserve_crashkres_pages(void)
138{
139#ifdef CONFIG_HIBERNATION
140 phys_addr_t addr;
141 struct page *page;
142
143 if (!crashk_res.end)
144 return;
145
146 /*
147 * To reduce the size of hibernation image, all the pages are
148 * marked as Reserved initially.
149 */
150 for (addr = crashk_res.start; addr < (crashk_res.end + 1);
151 addr += PAGE_SIZE) {
152 page = phys_to_page(addr);
153 SetPageReserved(page);
154 }
155#endif
156}
157#else
158static void __init reserve_crashkernel(void)
159{
160}
161
162static void __init kexec_reserve_crashkres_pages(void)
163{
164}
165#endif /* CONFIG_KEXEC_CORE */
166
167#ifdef CONFIG_CRASH_DUMP
168static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
169 const char *uname, int depth, void *data)
170{
171 const __be32 *reg;
172 int len;
173
174 if (depth != 1 || strcmp(uname, "chosen") != 0)
175 return 0;
176
177 reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
178 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
179 return 1;
180
181 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg);
182 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg);
183
184 return 1;
185}
186
187/*
188 * reserve_elfcorehdr() - reserves memory for elf core header
189 *
190 * This function reserves the memory occupied by an elf core header
191 * described in the device tree. This region contains all the
192 * information about primary kernel's core image and is used by a dump
193 * capture kernel to access the system memory on primary kernel.
194 */
195static void __init reserve_elfcorehdr(void)
196{
197 of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
198
199 if (!elfcorehdr_size)
200 return;
201
202 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
203 pr_warn("elfcorehdr is overlapped\n");
204 return;
205 }
206
207 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
208
209 pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
210 elfcorehdr_size >> 10, elfcorehdr_addr);
211}
212#else
213static void __init reserve_elfcorehdr(void)
214{
215}
216#endif /* CONFIG_CRASH_DUMP */
78/* 217/*
79 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It 218 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
80 * currently assumes that for memory starting above 4G, 32-bit devices will 219 * currently assumes that for memory starting above 4G, 32-bit devices will
@@ -168,10 +307,45 @@ static int __init early_mem(char *p)
168} 307}
169early_param("mem", early_mem); 308early_param("mem", early_mem);
170 309
310static int __init early_init_dt_scan_usablemem(unsigned long node,
311 const char *uname, int depth, void *data)
312{
313 struct memblock_region *usablemem = data;
314 const __be32 *reg;
315 int len;
316
317 if (depth != 1 || strcmp(uname, "chosen") != 0)
318 return 0;
319
320 reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
321 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
322 return 1;
323
324 usablemem->base = dt_mem_next_cell(dt_root_addr_cells, &reg);
325 usablemem->size = dt_mem_next_cell(dt_root_size_cells, &reg);
326
327 return 1;
328}
329
330static void __init fdt_enforce_memory_region(void)
331{
332 struct memblock_region reg = {
333 .size = 0,
334 };
335
336 of_scan_flat_dt(early_init_dt_scan_usablemem, &reg);
337
338 if (reg.size)
339 memblock_cap_memory_range(reg.base, reg.size);
340}
341
171void __init arm64_memblock_init(void) 342void __init arm64_memblock_init(void)
172{ 343{
173 const s64 linear_region_size = -(s64)PAGE_OFFSET; 344 const s64 linear_region_size = -(s64)PAGE_OFFSET;
174 345
346 /* Handle linux,usable-memory-range property */
347 fdt_enforce_memory_region();
348
175 /* 349 /*
176 * Ensure that the linear region takes up exactly half of the kernel 350 * Ensure that the linear region takes up exactly half of the kernel
177 * virtual address space. This way, we can distinguish a linear address 351 * virtual address space. This way, we can distinguish a linear address
@@ -248,6 +422,11 @@ void __init arm64_memblock_init(void)
248 arm64_dma_phys_limit = max_zone_dma_phys(); 422 arm64_dma_phys_limit = max_zone_dma_phys();
249 else 423 else
250 arm64_dma_phys_limit = PHYS_MASK + 1; 424 arm64_dma_phys_limit = PHYS_MASK + 1;
425
426 reserve_crashkernel();
427
428 reserve_elfcorehdr();
429
251 dma_contiguous_reserve(arm64_dma_phys_limit); 430 dma_contiguous_reserve(arm64_dma_phys_limit);
252 431
253 memblock_allow_resize(); 432 memblock_allow_resize();
@@ -361,6 +540,8 @@ void __init mem_init(void)
361 /* this will put all unused low memory onto the freelists */ 540 /* this will put all unused low memory onto the freelists */
362 free_all_bootmem(); 541 free_all_bootmem();
363 542
543 kexec_reserve_crashkres_pages();
544
364 mem_init_print_info(NULL); 545 mem_init_print_info(NULL);
365 546
366#define MLK(b, t) b, t, ((t) - (b)) >> 10 547#define MLK(b, t) b, t, ((t) - (b)) >> 10
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 1cab2703f5a8..aeadc2c0a6c8 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -21,6 +21,8 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/ioport.h>
25#include <linux/kexec.h>
24#include <linux/libfdt.h> 26#include <linux/libfdt.h>
25#include <linux/mman.h> 27#include <linux/mman.h>
26#include <linux/nodemask.h> 28#include <linux/nodemask.h>
@@ -156,29 +158,10 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
156 } while (pmd++, i++, i < PTRS_PER_PMD); 158 } while (pmd++, i++, i < PTRS_PER_PMD);
157} 159}
158 160
159#ifdef CONFIG_DEBUG_PAGEALLOC
160static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
161{
162
163 /*
164 * If debug_page_alloc is enabled we must map the linear map
165 * using pages. However, other mappings created by
166 * create_mapping_noalloc must use sections in some cases. Allow
167 * sections to be used in those cases, where no pgtable_alloc
168 * function is provided.
169 */
170 return !pgtable_alloc || !debug_pagealloc_enabled();
171}
172#else
173static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
174{
175 return true;
176}
177#endif
178
179static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, 161static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
180 phys_addr_t phys, pgprot_t prot, 162 phys_addr_t phys, pgprot_t prot,
181 phys_addr_t (*pgtable_alloc)(void)) 163 phys_addr_t (*pgtable_alloc)(void),
164 bool allow_block_mappings)
182{ 165{
183 pmd_t *pmd; 166 pmd_t *pmd;
184 unsigned long next; 167 unsigned long next;
@@ -209,7 +192,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
209 next = pmd_addr_end(addr, end); 192 next = pmd_addr_end(addr, end);
210 /* try section mapping first */ 193 /* try section mapping first */
211 if (((addr | next | phys) & ~SECTION_MASK) == 0 && 194 if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
212 block_mappings_allowed(pgtable_alloc)) { 195 (!pgtable_alloc || allow_block_mappings)) {
213 pmd_t old_pmd =*pmd; 196 pmd_t old_pmd =*pmd;
214 pmd_set_huge(pmd, phys, prot); 197 pmd_set_huge(pmd, phys, prot);
215 /* 198 /*
@@ -248,7 +231,8 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
248 231
249static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, 232static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
250 phys_addr_t phys, pgprot_t prot, 233 phys_addr_t phys, pgprot_t prot,
251 phys_addr_t (*pgtable_alloc)(void)) 234 phys_addr_t (*pgtable_alloc)(void),
235 bool allow_block_mappings)
252{ 236{
253 pud_t *pud; 237 pud_t *pud;
254 unsigned long next; 238 unsigned long next;
@@ -269,7 +253,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
269 * For 4K granule only, attempt to put down a 1GB block 253 * For 4K granule only, attempt to put down a 1GB block
270 */ 254 */
271 if (use_1G_block(addr, next, phys) && 255 if (use_1G_block(addr, next, phys) &&
272 block_mappings_allowed(pgtable_alloc)) { 256 (!pgtable_alloc || allow_block_mappings)) {
273 pud_t old_pud = *pud; 257 pud_t old_pud = *pud;
274 pud_set_huge(pud, phys, prot); 258 pud_set_huge(pud, phys, prot);
275 259
@@ -290,7 +274,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
290 } 274 }
291 } else { 275 } else {
292 alloc_init_pmd(pud, addr, next, phys, prot, 276 alloc_init_pmd(pud, addr, next, phys, prot,
293 pgtable_alloc); 277 pgtable_alloc, allow_block_mappings);
294 } 278 }
295 phys += next - addr; 279 phys += next - addr;
296 } while (pud++, addr = next, addr != end); 280 } while (pud++, addr = next, addr != end);
@@ -304,7 +288,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
304 */ 288 */
305static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt, 289static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
306 phys_addr_t size, pgprot_t prot, 290 phys_addr_t size, pgprot_t prot,
307 phys_addr_t (*pgtable_alloc)(void)) 291 phys_addr_t (*pgtable_alloc)(void),
292 bool allow_block_mappings)
308{ 293{
309 unsigned long addr, length, end, next; 294 unsigned long addr, length, end, next;
310 295
@@ -322,7 +307,8 @@ static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
322 end = addr + length; 307 end = addr + length;
323 do { 308 do {
324 next = pgd_addr_end(addr, end); 309 next = pgd_addr_end(addr, end);
325 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc); 310 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
311 (!pgtable_alloc || allow_block_mappings));
326 phys += next - addr; 312 phys += next - addr;
327 } while (pgd++, addr = next, addr != end); 313 } while (pgd++, addr = next, addr != end);
328} 314}
@@ -340,9 +326,11 @@ static phys_addr_t late_pgtable_alloc(void)
340static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 326static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
341 unsigned long virt, phys_addr_t size, 327 unsigned long virt, phys_addr_t size,
342 pgprot_t prot, 328 pgprot_t prot,
343 phys_addr_t (*alloc)(void)) 329 phys_addr_t (*alloc)(void),
330 bool allow_block_mappings)
344{ 331{
345 init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc); 332 init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc,
333 allow_block_mappings);
346} 334}
347 335
348/* 336/*
@@ -358,16 +346,15 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
358 &phys, virt); 346 &phys, virt);
359 return; 347 return;
360 } 348 }
361 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, 349 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, true);
362 NULL);
363} 350}
364 351
365void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 352void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
366 unsigned long virt, phys_addr_t size, 353 unsigned long virt, phys_addr_t size,
367 pgprot_t prot) 354 pgprot_t prot, bool allow_block_mappings)
368{ 355{
369 __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 356 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
370 late_pgtable_alloc); 357 late_pgtable_alloc, allow_block_mappings);
371} 358}
372 359
373static void create_mapping_late(phys_addr_t phys, unsigned long virt, 360static void create_mapping_late(phys_addr_t phys, unsigned long virt,
@@ -380,10 +367,18 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
380 } 367 }
381 368
382 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, 369 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
383 late_pgtable_alloc); 370 late_pgtable_alloc, !debug_pagealloc_enabled());
371}
372
373static void __init __map_memblock(pgd_t *pgd, phys_addr_t start,
374 phys_addr_t end, pgprot_t prot,
375 bool allow_block_mappings)
376{
377 __create_pgd_mapping(pgd, start, __phys_to_virt(start), end - start,
378 prot, early_pgtable_alloc, allow_block_mappings);
384} 379}
385 380
386static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) 381static void __init map_mem(pgd_t *pgd)
387{ 382{
388 unsigned long kernel_start = __pa(_text); 383 unsigned long kernel_start = __pa(_text);
389 unsigned long kernel_end = __pa(__init_begin); 384 unsigned long kernel_end = __pa(__init_begin);
@@ -391,45 +386,15 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
391 /* 386 /*
392 * Take care not to create a writable alias for the 387 * Take care not to create a writable alias for the
393 * read-only text and rodata sections of the kernel image. 388 * read-only text and rodata sections of the kernel image.
389 * So temporarily mark them as NOMAP to skip mappings in
390 * the following for-loop
394 */ 391 */
395 392 memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
396 /* No overlap with the kernel text/rodata */ 393#ifdef CONFIG_KEXEC_CORE
397 if (end < kernel_start || start >= kernel_end) { 394 if (crashk_res.end)
398 __create_pgd_mapping(pgd, start, __phys_to_virt(start), 395 memblock_mark_nomap(crashk_res.start,
399 end - start, PAGE_KERNEL, 396 resource_size(&crashk_res));
400 early_pgtable_alloc); 397#endif
401 return;
402 }
403
404 /*
405 * This block overlaps the kernel text/rodata mappings.
406 * Map the portion(s) which don't overlap.
407 */
408 if (start < kernel_start)
409 __create_pgd_mapping(pgd, start,
410 __phys_to_virt(start),
411 kernel_start - start, PAGE_KERNEL,
412 early_pgtable_alloc);
413 if (kernel_end < end)
414 __create_pgd_mapping(pgd, kernel_end,
415 __phys_to_virt(kernel_end),
416 end - kernel_end, PAGE_KERNEL,
417 early_pgtable_alloc);
418
419 /*
420 * Map the linear alias of the [_text, __init_begin) interval as
421 * read-only/non-executable. This makes the contents of the
422 * region accessible to subsystems such as hibernate, but
423 * protects it from inadvertent modification or execution.
424 */
425 __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
426 kernel_end - kernel_start, PAGE_KERNEL_RO,
427 early_pgtable_alloc);
428}
429
430static void __init map_mem(pgd_t *pgd)
431{
432 struct memblock_region *reg;
433 398
434 /* map all the memory banks */ 399 /* map all the memory banks */
435 for_each_memblock(memory, reg) { 400 for_each_memblock(memory, reg) {
@@ -441,8 +406,33 @@ static void __init map_mem(pgd_t *pgd)
441 if (memblock_is_nomap(reg)) 406 if (memblock_is_nomap(reg))
442 continue; 407 continue;
443 408
444 __map_memblock(pgd, start, end); 409 __map_memblock(pgd, start, end,
410 PAGE_KERNEL, !debug_pagealloc_enabled());
445 } 411 }
412
413 /*
414 * Map the linear alias of the [_text, __init_begin) interval as
415 * read-only/non-executable. This makes the contents of the
416 * region accessible to subsystems such as hibernate, but
417 * protects it from inadvertent modification or execution.
418 */
419 __map_memblock(pgd, kernel_start, kernel_end,
420 PAGE_KERNEL_RO, !debug_pagealloc_enabled());
421 memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
422
423#ifdef CONFIG_KEXEC_CORE
424 /*
425 * Use page-level mappings here so that we can shrink the region
426 * in page granularity and put back unused memory to buddy system
427 * through /sys/kernel/kexec_crash_size interface.
428 */
429 if (crashk_res.end) {
430 __map_memblock(pgd, crashk_res.start, crashk_res.end + 1,
431 PAGE_KERNEL, false);
432 memblock_clear_nomap(crashk_res.start,
433 resource_size(&crashk_res));
434 }
435#endif
446} 436}
447 437
448void mark_rodata_ro(void) 438void mark_rodata_ro(void)
@@ -481,7 +471,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
481 BUG_ON(!PAGE_ALIGNED(size)); 471 BUG_ON(!PAGE_ALIGNED(size));
482 472
483 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, 473 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
484 early_pgtable_alloc); 474 early_pgtable_alloc, !debug_pagealloc_enabled());
485 475
486 vma->addr = va_start; 476 vma->addr = va_start;
487 vma->phys_addr = pa_start; 477 vma->phys_addr = pa_start;
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index ca6d268e3313..f4e39dbdd36b 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -125,6 +125,19 @@ int set_memory_x(unsigned long addr, int numpages)
125} 125}
126EXPORT_SYMBOL_GPL(set_memory_x); 126EXPORT_SYMBOL_GPL(set_memory_x);
127 127
128
129int set_memory_valid(unsigned long addr, int numpages, int enable)
130{
131 if (enable)
132 return __change_memory_common(addr, PAGE_SIZE * numpages,
133 __pgprot(PTE_VALID),
134 __pgprot(0));
135 else
136 return __change_memory_common(addr, PAGE_SIZE * numpages,
137 __pgprot(0),
138 __pgprot(PTE_VALID));
139}
140
128#ifdef CONFIG_DEBUG_PAGEALLOC 141#ifdef CONFIG_DEBUG_PAGEALLOC
129void __kernel_map_pages(struct page *page, int numpages, int enable) 142void __kernel_map_pages(struct page *page, int numpages, int enable)
130{ 143{
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 8292784d44c9..d88a2a80ada8 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -100,7 +100,16 @@ ENTRY(cpu_do_resume)
100 100
101 msr tcr_el1, x8 101 msr tcr_el1, x8
102 msr vbar_el1, x9 102 msr vbar_el1, x9
103
104 /*
105 * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
106 * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
107 * exception. Mask them until local_dbg_restore() in cpu_suspend()
108 * resets them.
109 */
110 disable_dbg
103 msr mdscr_el1, x10 111 msr mdscr_el1, x10
112
104 msr sctlr_el1, x12 113 msr sctlr_el1, x12
105 /* 114 /*
106 * Restore oslsr_el1 by writing oslar_el1 115 * Restore oslsr_el1 by writing oslar_el1
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index b162ad70effc..6297140dd84f 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -728,14 +728,14 @@ static int build_body(struct jit_ctx *ctx)
728 int ret; 728 int ret;
729 729
730 ret = build_insn(insn, ctx); 730 ret = build_insn(insn, ctx);
731
732 if (ctx->image == NULL)
733 ctx->offset[i] = ctx->idx;
734
735 if (ret > 0) { 731 if (ret > 0) {
736 i++; 732 i++;
733 if (ctx->image == NULL)
734 ctx->offset[i] = ctx->idx;
737 continue; 735 continue;
738 } 736 }
737 if (ctx->image == NULL)
738 ctx->offset[i] = ctx->idx;
739 if (ret) 739 if (ret)
740 return ret; 740 return ret;
741 } 741 }
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 6d6e4af1a4bf..b96db5dafec4 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -90,7 +90,6 @@ ENTRY(privcmd_call)
90 mov x2, x3 90 mov x2, x3
91 mov x3, x4 91 mov x3, x4
92 mov x4, x5 92 mov x4, x5
93#ifdef CONFIG_ARM64_SW_TTBR0_PAN
94 /* 93 /*
95 * Privcmd calls are issued by the userspace. The kernel needs to 94 * Privcmd calls are issued by the userspace. The kernel needs to
96 * enable access to TTBR0_EL1 as the hypervisor would issue stage 1 95 * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
@@ -99,15 +98,12 @@ ENTRY(privcmd_call)
99 * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation 98 * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
100 * is enabled (it implies that hardware UAO and PAN disabled). 99 * is enabled (it implies that hardware UAO and PAN disabled).
101 */ 100 */
102 uaccess_enable_not_uao x6, x7 101 uaccess_ttbr0_enable x6, x7
103#endif
104 hvc XEN_IMM 102 hvc XEN_IMM
105 103
106#ifdef CONFIG_ARM64_SW_TTBR0_PAN
107 /* 104 /*
108 * Disable userspace access from kernel once the hyp call completed. 105 * Disable userspace access from kernel once the hyp call completed.
109 */ 106 */
110 uaccess_disable_not_uao x6 107 uaccess_ttbr0_disable x6
111#endif
112 ret 108 ret
113ENDPROC(privcmd_call); 109ENDPROC(privcmd_call);
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
index 3c494e84444d..a511ac16a8e3 100644
--- a/arch/c6x/kernel/ptrace.c
+++ b/arch/c6x/kernel/ptrace.c
@@ -69,46 +69,6 @@ static int gpr_get(struct task_struct *target,
69 0, sizeof(*regs)); 69 0, sizeof(*regs));
70} 70}
71 71
72static int gpr_set(struct task_struct *target,
73 const struct user_regset *regset,
74 unsigned int pos, unsigned int count,
75 const void *kbuf, const void __user *ubuf)
76{
77 int ret;
78 struct pt_regs *regs = task_pt_regs(target);
79
80 /* Don't copyin TSR or CSR */
81 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
82 &regs,
83 0, PT_TSR * sizeof(long));
84 if (ret)
85 return ret;
86
87 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
88 PT_TSR * sizeof(long),
89 (PT_TSR + 1) * sizeof(long));
90 if (ret)
91 return ret;
92
93 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
94 &regs,
95 (PT_TSR + 1) * sizeof(long),
96 PT_CSR * sizeof(long));
97 if (ret)
98 return ret;
99
100 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
101 PT_CSR * sizeof(long),
102 (PT_CSR + 1) * sizeof(long));
103 if (ret)
104 return ret;
105
106 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
107 &regs,
108 (PT_CSR + 1) * sizeof(long), -1);
109 return ret;
110}
111
112enum c6x_regset { 72enum c6x_regset {
113 REGSET_GPR, 73 REGSET_GPR,
114}; 74};
@@ -120,7 +80,6 @@ static const struct user_regset c6x_regsets[] = {
120 .size = sizeof(u32), 80 .size = sizeof(u32),
121 .align = sizeof(u32), 81 .align = sizeof(u32),
122 .get = gpr_get, 82 .get = gpr_get,
123 .set = gpr_set
124 }, 83 },
125}; 84};
126 85
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index 836f14707a62..efa59f1f8022 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
74 addr = PAGE_ALIGN(addr); 74 addr = PAGE_ALIGN(addr);
75 vma = find_vma(current->mm, addr); 75 vma = find_vma(current->mm, addr);
76 if (TASK_SIZE - len >= addr && 76 if (TASK_SIZE - len >= addr &&
77 (!vma || addr + len <= vma->vm_start)) 77 (!vma || addr + len <= vm_start_gap(vma)))
78 goto success; 78 goto success;
79 } 79 }
80 80
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 92075544a19a..0dc1c8f622bc 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -95,7 +95,8 @@ static int regs_get(struct task_struct *target,
95 long *reg = (long *)&regs; 95 long *reg = (long *)&regs;
96 96
97 /* build user regs in buffer */ 97 /* build user regs in buffer */
98 for (r = 0; r < ARRAY_SIZE(register_offset); r++) 98 BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
99 for (r = 0; r < sizeof(regs) / sizeof(long); r++)
99 *reg++ = h8300_get_reg(target, r); 100 *reg++ = h8300_get_reg(target, r);
100 101
101 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 102 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -113,7 +114,8 @@ static int regs_set(struct task_struct *target,
113 long *reg; 114 long *reg;
114 115
115 /* build user regs in buffer */ 116 /* build user regs in buffer */
116 for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++) 117 BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
118 for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
117 *reg++ = h8300_get_reg(target, r); 119 *reg++ = h8300_get_reg(target, r);
118 120
119 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 121 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -122,7 +124,7 @@ static int regs_set(struct task_struct *target,
122 return ret; 124 return ret;
123 125
124 /* write back to pt_regs */ 126 /* write back to pt_regs */
125 for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++) 127 for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
126 h8300_put_reg(target, r, *reg++); 128 h8300_put_reg(target, r, *reg++);
127 return 0; 129 return 0;
128} 130}
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 970d0bd99621..648f1cef33fa 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -95,8 +95,8 @@ define archhelp
95 echo '* unwcheck - Check vmlinux for invalid unwind info' 95 echo '* unwcheck - Check vmlinux for invalid unwind info'
96endef 96endef
97 97
98archprepare: make_nr_irqs_h FORCE 98archprepare: make_nr_irqs_h
99PHONY += make_nr_irqs_h FORCE 99PHONY += make_nr_irqs_h FORCE
100 100
101make_nr_irqs_h: FORCE 101make_nr_irqs_h:
102 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h 102 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 273e61225c27..3db381205928 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -28,24 +28,32 @@
28 28
29#define segment_eq(a, b) ((a).seg == (b).seg) 29#define segment_eq(a, b) ((a).seg == (b).seg)
30 30
31#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
32/*
33 * Explicitly allow NULL pointers here. Parts of the kernel such
34 * as readv/writev use access_ok to validate pointers, but want
35 * to allow NULL pointers for various reasons. NULL pointers are
36 * safe to allow through because the first page is not mappable on
37 * Meta.
38 *
39 * We also wish to avoid letting user code access the system area
40 * and the kernel half of the address space.
41 */
42#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
43 ((addr) > PAGE_OFFSET && \
44 (addr) < LINCORE_BASE))
45
46static inline int __access_ok(unsigned long addr, unsigned long size) 31static inline int __access_ok(unsigned long addr, unsigned long size)
47{ 32{
48 return __kernel_ok || !__user_bad(addr, size); 33 /*
34 * Allow access to the user mapped memory area, but not the system area
35 * before it. The check extends to the top of the address space when
36 * kernel access is allowed (there's no real reason to user copy to the
37 * system area in any case).
38 */
39 if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
40 size <= get_fs().seg - addr))
41 return true;
42 /*
43 * Explicitly allow NULL pointers here. Parts of the kernel such
44 * as readv/writev use access_ok to validate pointers, but want
45 * to allow NULL pointers for various reasons. NULL pointers are
46 * safe to allow through because the first page is not mappable on
47 * Meta.
48 */
49 if (!addr)
50 return true;
51 /* Allow access to core code memory area... */
52 if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
53 size <= LINCORE_CODE_LIMIT + 1 - addr)
54 return true;
55 /* ... but no other areas. */
56 return false;
49} 57}
50 58
51#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \ 59#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \
@@ -186,8 +194,13 @@ do { \
186extern long __must_check __strncpy_from_user(char *dst, const char __user *src, 194extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
187 long count); 195 long count);
188 196
189#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count) 197static inline long
190 198strncpy_from_user(char *dst, const char __user *src, long count)
199{
200 if (!access_ok(VERIFY_READ, src, 1))
201 return -EFAULT;
202 return __strncpy_from_user(dst, src, count);
203}
191/* 204/*
192 * Return the size of a string (including the ending 0) 205 * Return the size of a string (including the ending 0)
193 * 206 *
@@ -197,20 +210,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
197 210
198#define strlen_user(str) strnlen_user(str, 32767) 211#define strlen_user(str) strnlen_user(str, 32767)
199 212
200extern unsigned long __must_check __copy_user_zeroing(void *to, 213extern unsigned long raw_copy_from_user(void *to, const void __user *from,
201 const void __user *from, 214 unsigned long n);
202 unsigned long n);
203 215
204static inline unsigned long 216static inline unsigned long
205copy_from_user(void *to, const void __user *from, unsigned long n) 217copy_from_user(void *to, const void __user *from, unsigned long n)
206{ 218{
219 unsigned long res = n;
207 if (likely(access_ok(VERIFY_READ, from, n))) 220 if (likely(access_ok(VERIFY_READ, from, n)))
208 return __copy_user_zeroing(to, from, n); 221 res = raw_copy_from_user(to, from, n);
209 memset(to, 0, n); 222 if (unlikely(res))
210 return n; 223 memset(to + (n - res), 0, res);
224 return res;
211} 225}
212 226
213#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) 227#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
214#define __copy_from_user_inatomic __copy_from_user 228#define __copy_from_user_inatomic __copy_from_user
215 229
216extern unsigned long __must_check __copy_user(void __user *to, 230extern unsigned long __must_check __copy_user(void __user *to,
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
index 7563628822bd..5e2dc7defd2c 100644
--- a/arch/metag/kernel/ptrace.c
+++ b/arch/metag/kernel/ptrace.c
@@ -24,6 +24,16 @@
24 * user_regset definitions. 24 * user_regset definitions.
25 */ 25 */
26 26
27static unsigned long user_txstatus(const struct pt_regs *regs)
28{
29 unsigned long data = (unsigned long)regs->ctx.Flags;
30
31 if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
32 data |= USER_GP_REGS_STATUS_CATCH_BIT;
33
34 return data;
35}
36
27int metag_gp_regs_copyout(const struct pt_regs *regs, 37int metag_gp_regs_copyout(const struct pt_regs *regs,
28 unsigned int pos, unsigned int count, 38 unsigned int pos, unsigned int count,
29 void *kbuf, void __user *ubuf) 39 void *kbuf, void __user *ubuf)
@@ -62,9 +72,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs,
62 if (ret) 72 if (ret)
63 goto out; 73 goto out;
64 /* TXSTATUS */ 74 /* TXSTATUS */
65 data = (unsigned long)regs->ctx.Flags; 75 data = user_txstatus(regs);
66 if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
67 data |= USER_GP_REGS_STATUS_CATCH_BIT;
68 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 76 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
69 &data, 4*25, 4*26); 77 &data, 4*25, 4*26);
70 if (ret) 78 if (ret)
@@ -119,6 +127,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs,
119 if (ret) 127 if (ret)
120 goto out; 128 goto out;
121 /* TXSTATUS */ 129 /* TXSTATUS */
130 data = user_txstatus(regs);
122 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 131 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
123 &data, 4*25, 4*26); 132 &data, 4*25, 4*26);
124 if (ret) 133 if (ret)
@@ -244,6 +253,8 @@ int metag_rp_state_copyin(struct pt_regs *regs,
244 unsigned long long *ptr; 253 unsigned long long *ptr;
245 int ret, i; 254 int ret, i;
246 255
256 if (count < 4*13)
257 return -EINVAL;
247 /* Read the entire pipeline before making any changes */ 258 /* Read the entire pipeline before making any changes */
248 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 259 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
249 &rp, 0, 4*13); 260 &rp, 0, 4*13);
@@ -303,7 +314,7 @@ static int metag_tls_set(struct task_struct *target,
303 const void *kbuf, const void __user *ubuf) 314 const void *kbuf, const void __user *ubuf)
304{ 315{
305 int ret; 316 int ret;
306 void __user *tls; 317 void __user *tls = target->thread.tls_ptr;
307 318
308 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 319 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
309 if (ret) 320 if (ret)
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index b3ebfe9c8e88..2792fc621088 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -29,7 +29,6 @@
29 COPY \ 29 COPY \
30 "1:\n" \ 30 "1:\n" \
31 " .section .fixup,\"ax\"\n" \ 31 " .section .fixup,\"ax\"\n" \
32 " MOV D1Ar1,#0\n" \
33 FIXUP \ 32 FIXUP \
34 " MOVT D1Ar1,#HI(1b)\n" \ 33 " MOVT D1Ar1,#HI(1b)\n" \
35 " JUMP D1Ar1,#LO(1b)\n" \ 34 " JUMP D1Ar1,#LO(1b)\n" \
@@ -260,27 +259,31 @@
260 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 259 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
261 "22:\n" \ 260 "22:\n" \
262 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 261 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
263 "SUB %3, %3, #32\n" \
264 "23:\n" \ 262 "23:\n" \
265 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 263 "SUB %3, %3, #32\n" \
266 "24:\n" \ 264 "24:\n" \
265 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
266 "25:\n" \
267 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 267 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
268 "26:\n" \
268 "SUB %3, %3, #32\n" \ 269 "SUB %3, %3, #32\n" \
269 "DCACHE [%1+#-64], D0Ar6\n" \ 270 "DCACHE [%1+#-64], D0Ar6\n" \
270 "BR $Lloop"id"\n" \ 271 "BR $Lloop"id"\n" \
271 \ 272 \
272 "MOV RAPF, %1\n" \ 273 "MOV RAPF, %1\n" \
273 "25:\n" \ 274 "27:\n" \
274 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 275 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
275 "26:\n" \ 276 "28:\n" \
276 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 277 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
278 "29:\n" \
277 "SUB %3, %3, #32\n" \ 279 "SUB %3, %3, #32\n" \
278 "27:\n" \ 280 "30:\n" \
279 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 281 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
280 "28:\n" \ 282 "31:\n" \
281 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 283 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
284 "32:\n" \
282 "SUB %0, %0, #8\n" \ 285 "SUB %0, %0, #8\n" \
283 "29:\n" \ 286 "33:\n" \
284 "SETL [%0++], D0.7, D1.7\n" \ 287 "SETL [%0++], D0.7, D1.7\n" \
285 "SUB %3, %3, #32\n" \ 288 "SUB %3, %3, #32\n" \
286 "1:" \ 289 "1:" \
@@ -312,11 +315,15 @@
312 " .long 26b,3b\n" \ 315 " .long 26b,3b\n" \
313 " .long 27b,3b\n" \ 316 " .long 27b,3b\n" \
314 " .long 28b,3b\n" \ 317 " .long 28b,3b\n" \
315 " .long 29b,4b\n" \ 318 " .long 29b,3b\n" \
319 " .long 30b,3b\n" \
320 " .long 31b,3b\n" \
321 " .long 32b,3b\n" \
322 " .long 33b,4b\n" \
316 " .previous\n" \ 323 " .previous\n" \
317 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ 324 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
318 : "0" (to), "1" (from), "2" (ret), "3" (n) \ 325 : "0" (to), "1" (from), "2" (ret), "3" (n) \
319 : "D1Ar1", "D0Ar2", "memory") 326 : "D1Ar1", "D0Ar2", "cc", "memory")
320 327
321/* rewind 'to' and 'from' pointers when a fault occurs 328/* rewind 'to' and 'from' pointers when a fault occurs
322 * 329 *
@@ -342,7 +349,7 @@
342#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ 349#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
343 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ 350 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
344 "LSR D0Ar2, D0Ar2, #8\n" \ 351 "LSR D0Ar2, D0Ar2, #8\n" \
345 "AND D0Ar2, D0Ar2, #0x7\n" \ 352 "ANDS D0Ar2, D0Ar2, #0x7\n" \
346 "ADDZ D0Ar2, D0Ar2, #4\n" \ 353 "ADDZ D0Ar2, D0Ar2, #4\n" \
347 "SUB D0Ar2, D0Ar2, #1\n" \ 354 "SUB D0Ar2, D0Ar2, #1\n" \
348 "MOV D1Ar1, #4\n" \ 355 "MOV D1Ar1, #4\n" \
@@ -403,47 +410,55 @@
403 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 410 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
404 "22:\n" \ 411 "22:\n" \
405 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 412 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
406 "SUB %3, %3, #16\n" \
407 "23:\n" \ 413 "23:\n" \
408 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
409 "24:\n" \
410 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
411 "SUB %3, %3, #16\n" \ 414 "SUB %3, %3, #16\n" \
412 "25:\n" \ 415 "24:\n" \
413 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 416 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
414 "26:\n" \ 417 "25:\n" \
415 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 418 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
419 "26:\n" \
416 "SUB %3, %3, #16\n" \ 420 "SUB %3, %3, #16\n" \
417 "27:\n" \ 421 "27:\n" \
418 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 422 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
419 "28:\n" \ 423 "28:\n" \
420 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 424 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
425 "29:\n" \
426 "SUB %3, %3, #16\n" \
427 "30:\n" \
428 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
429 "31:\n" \
430 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
431 "32:\n" \
421 "SUB %3, %3, #16\n" \ 432 "SUB %3, %3, #16\n" \
422 "DCACHE [%1+#-64], D0Ar6\n" \ 433 "DCACHE [%1+#-64], D0Ar6\n" \
423 "BR $Lloop"id"\n" \ 434 "BR $Lloop"id"\n" \
424 \ 435 \
425 "MOV RAPF, %1\n" \ 436 "MOV RAPF, %1\n" \
426 "29:\n" \ 437 "33:\n" \
427 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 438 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
428 "30:\n" \ 439 "34:\n" \
429 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 440 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
441 "35:\n" \
430 "SUB %3, %3, #16\n" \ 442 "SUB %3, %3, #16\n" \
431 "31:\n" \ 443 "36:\n" \
432 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 444 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
433 "32:\n" \ 445 "37:\n" \
434 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 446 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
447 "38:\n" \
435 "SUB %3, %3, #16\n" \ 448 "SUB %3, %3, #16\n" \
436 "33:\n" \ 449 "39:\n" \
437 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 450 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
438 "34:\n" \ 451 "40:\n" \
439 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 452 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
453 "41:\n" \
440 "SUB %3, %3, #16\n" \ 454 "SUB %3, %3, #16\n" \
441 "35:\n" \ 455 "42:\n" \
442 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 456 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
443 "36:\n" \ 457 "43:\n" \
444 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 458 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
459 "44:\n" \
445 "SUB %0, %0, #4\n" \ 460 "SUB %0, %0, #4\n" \
446 "37:\n" \ 461 "45:\n" \
447 "SETD [%0++], D0.7\n" \ 462 "SETD [%0++], D0.7\n" \
448 "SUB %3, %3, #16\n" \ 463 "SUB %3, %3, #16\n" \
449 "1:" \ 464 "1:" \
@@ -483,11 +498,19 @@
483 " .long 34b,3b\n" \ 498 " .long 34b,3b\n" \
484 " .long 35b,3b\n" \ 499 " .long 35b,3b\n" \
485 " .long 36b,3b\n" \ 500 " .long 36b,3b\n" \
486 " .long 37b,4b\n" \ 501 " .long 37b,3b\n" \
502 " .long 38b,3b\n" \
503 " .long 39b,3b\n" \
504 " .long 40b,3b\n" \
505 " .long 41b,3b\n" \
506 " .long 42b,3b\n" \
507 " .long 43b,3b\n" \
508 " .long 44b,3b\n" \
509 " .long 45b,4b\n" \
487 " .previous\n" \ 510 " .previous\n" \
488 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ 511 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
489 : "0" (to), "1" (from), "2" (ret), "3" (n) \ 512 : "0" (to), "1" (from), "2" (ret), "3" (n) \
490 : "D1Ar1", "D0Ar2", "memory") 513 : "D1Ar1", "D0Ar2", "cc", "memory")
491 514
492/* rewind 'to' and 'from' pointers when a fault occurs 515/* rewind 'to' and 'from' pointers when a fault occurs
493 * 516 *
@@ -513,7 +536,7 @@
513#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ 536#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
514 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ 537 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
515 "LSR D0Ar2, D0Ar2, #8\n" \ 538 "LSR D0Ar2, D0Ar2, #8\n" \
516 "AND D0Ar2, D0Ar2, #0x7\n" \ 539 "ANDS D0Ar2, D0Ar2, #0x7\n" \
517 "ADDZ D0Ar2, D0Ar2, #4\n" \ 540 "ADDZ D0Ar2, D0Ar2, #4\n" \
518 "SUB D0Ar2, D0Ar2, #1\n" \ 541 "SUB D0Ar2, D0Ar2, #1\n" \
519 "MOV D1Ar1, #4\n" \ 542 "MOV D1Ar1, #4\n" \
@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
538 if ((unsigned long) src & 1) { 561 if ((unsigned long) src & 1) {
539 __asm_copy_to_user_1(dst, src, retn); 562 __asm_copy_to_user_1(dst, src, retn);
540 n--; 563 n--;
564 if (retn)
565 return retn + n;
541 } 566 }
542 if ((unsigned long) dst & 1) { 567 if ((unsigned long) dst & 1) {
543 /* Worst case - byte copy */ 568 /* Worst case - byte copy */
544 while (n > 0) { 569 while (n > 0) {
545 __asm_copy_to_user_1(dst, src, retn); 570 __asm_copy_to_user_1(dst, src, retn);
546 n--; 571 n--;
572 if (retn)
573 return retn + n;
547 } 574 }
548 } 575 }
549 if (((unsigned long) src & 2) && n >= 2) { 576 if (((unsigned long) src & 2) && n >= 2) {
550 __asm_copy_to_user_2(dst, src, retn); 577 __asm_copy_to_user_2(dst, src, retn);
551 n -= 2; 578 n -= 2;
579 if (retn)
580 return retn + n;
552 } 581 }
553 if ((unsigned long) dst & 2) { 582 if ((unsigned long) dst & 2) {
554 /* Second worst case - word copy */ 583 /* Second worst case - word copy */
555 while (n >= 2) { 584 while (n >= 2) {
556 __asm_copy_to_user_2(dst, src, retn); 585 __asm_copy_to_user_2(dst, src, retn);
557 n -= 2; 586 n -= 2;
587 if (retn)
588 return retn + n;
558 } 589 }
559 } 590 }
560 591
@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
569 while (n >= 8) { 600 while (n >= 8) {
570 __asm_copy_to_user_8x64(dst, src, retn); 601 __asm_copy_to_user_8x64(dst, src, retn);
571 n -= 8; 602 n -= 8;
603 if (retn)
604 return retn + n;
572 } 605 }
573 } 606 }
574 if (n >= RAPF_MIN_BUF_SIZE) { 607 if (n >= RAPF_MIN_BUF_SIZE) {
@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
581 while (n >= 8) { 614 while (n >= 8) {
582 __asm_copy_to_user_8x64(dst, src, retn); 615 __asm_copy_to_user_8x64(dst, src, retn);
583 n -= 8; 616 n -= 8;
617 if (retn)
618 return retn + n;
584 } 619 }
585 } 620 }
586#endif 621#endif
@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
588 while (n >= 16) { 623 while (n >= 16) {
589 __asm_copy_to_user_16(dst, src, retn); 624 __asm_copy_to_user_16(dst, src, retn);
590 n -= 16; 625 n -= 16;
626 if (retn)
627 return retn + n;
591 } 628 }
592 629
593 while (n >= 4) { 630 while (n >= 4) {
594 __asm_copy_to_user_4(dst, src, retn); 631 __asm_copy_to_user_4(dst, src, retn);
595 n -= 4; 632 n -= 4;
633 if (retn)
634 return retn + n;
596 } 635 }
597 636
598 switch (n) { 637 switch (n) {
@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
609 break; 648 break;
610 } 649 }
611 650
651 /*
652 * If we get here, retn correctly reflects the number of failing
653 * bytes.
654 */
612 return retn; 655 return retn;
613} 656}
614EXPORT_SYMBOL(__copy_user); 657EXPORT_SYMBOL(__copy_user);
@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
617 __asm_copy_user_cont(to, from, ret, \ 660 __asm_copy_user_cont(to, from, ret, \
618 " GETB D1Ar1,[%1++]\n" \ 661 " GETB D1Ar1,[%1++]\n" \
619 "2: SETB [%0++],D1Ar1\n", \ 662 "2: SETB [%0++],D1Ar1\n", \
620 "3: ADD %2,%2,#1\n" \ 663 "3: ADD %2,%2,#1\n", \
621 " SETB [%0++],D1Ar1\n", \
622 " .long 2b,3b\n") 664 " .long 2b,3b\n")
623 665
624#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ 666#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
625 __asm_copy_user_cont(to, from, ret, \ 667 __asm_copy_user_cont(to, from, ret, \
626 " GETW D1Ar1,[%1++]\n" \ 668 " GETW D1Ar1,[%1++]\n" \
627 "2: SETW [%0++],D1Ar1\n" COPY, \ 669 "2: SETW [%0++],D1Ar1\n" COPY, \
628 "3: ADD %2,%2,#2\n" \ 670 "3: ADD %2,%2,#2\n" FIXUP, \
629 " SETW [%0++],D1Ar1\n" FIXUP, \
630 " .long 2b,3b\n" TENTRY) 671 " .long 2b,3b\n" TENTRY)
631 672
632#define __asm_copy_from_user_2(to, from, ret) \ 673#define __asm_copy_from_user_2(to, from, ret) \
@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
636 __asm_copy_from_user_2x_cont(to, from, ret, \ 677 __asm_copy_from_user_2x_cont(to, from, ret, \
637 " GETB D1Ar1,[%1++]\n" \ 678 " GETB D1Ar1,[%1++]\n" \
638 "4: SETB [%0++],D1Ar1\n", \ 679 "4: SETB [%0++],D1Ar1\n", \
639 "5: ADD %2,%2,#1\n" \ 680 "5: ADD %2,%2,#1\n", \
640 " SETB [%0++],D1Ar1\n", \
641 " .long 4b,5b\n") 681 " .long 4b,5b\n")
642 682
643#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ 683#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
644 __asm_copy_user_cont(to, from, ret, \ 684 __asm_copy_user_cont(to, from, ret, \
645 " GETD D1Ar1,[%1++]\n" \ 685 " GETD D1Ar1,[%1++]\n" \
646 "2: SETD [%0++],D1Ar1\n" COPY, \ 686 "2: SETD [%0++],D1Ar1\n" COPY, \
647 "3: ADD %2,%2,#4\n" \ 687 "3: ADD %2,%2,#4\n" FIXUP, \
648 " SETD [%0++],D1Ar1\n" FIXUP, \
649 " .long 2b,3b\n" TENTRY) 688 " .long 2b,3b\n" TENTRY)
650 689
651#define __asm_copy_from_user_4(to, from, ret) \ 690#define __asm_copy_from_user_4(to, from, ret) \
652 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") 691 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
653 692
654#define __asm_copy_from_user_5(to, from, ret) \
655 __asm_copy_from_user_4x_cont(to, from, ret, \
656 " GETB D1Ar1,[%1++]\n" \
657 "4: SETB [%0++],D1Ar1\n", \
658 "5: ADD %2,%2,#1\n" \
659 " SETB [%0++],D1Ar1\n", \
660 " .long 4b,5b\n")
661
662#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
663 __asm_copy_from_user_4x_cont(to, from, ret, \
664 " GETW D1Ar1,[%1++]\n" \
665 "4: SETW [%0++],D1Ar1\n" COPY, \
666 "5: ADD %2,%2,#2\n" \
667 " SETW [%0++],D1Ar1\n" FIXUP, \
668 " .long 4b,5b\n" TENTRY)
669
670#define __asm_copy_from_user_6(to, from, ret) \
671 __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
672
673#define __asm_copy_from_user_7(to, from, ret) \
674 __asm_copy_from_user_6x_cont(to, from, ret, \
675 " GETB D1Ar1,[%1++]\n" \
676 "6: SETB [%0++],D1Ar1\n", \
677 "7: ADD %2,%2,#1\n" \
678 " SETB [%0++],D1Ar1\n", \
679 " .long 6b,7b\n")
680
681#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
682 __asm_copy_from_user_4x_cont(to, from, ret, \
683 " GETD D1Ar1,[%1++]\n" \
684 "4: SETD [%0++],D1Ar1\n" COPY, \
685 "5: ADD %2,%2,#4\n" \
686 " SETD [%0++],D1Ar1\n" FIXUP, \
687 " .long 4b,5b\n" TENTRY)
688
689#define __asm_copy_from_user_8(to, from, ret) \
690 __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
691
692#define __asm_copy_from_user_9(to, from, ret) \
693 __asm_copy_from_user_8x_cont(to, from, ret, \
694 " GETB D1Ar1,[%1++]\n" \
695 "6: SETB [%0++],D1Ar1\n", \
696 "7: ADD %2,%2,#1\n" \
697 " SETB [%0++],D1Ar1\n", \
698 " .long 6b,7b\n")
699
700#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
701 __asm_copy_from_user_8x_cont(to, from, ret, \
702 " GETW D1Ar1,[%1++]\n" \
703 "6: SETW [%0++],D1Ar1\n" COPY, \
704 "7: ADD %2,%2,#2\n" \
705 " SETW [%0++],D1Ar1\n" FIXUP, \
706 " .long 6b,7b\n" TENTRY)
707
708#define __asm_copy_from_user_10(to, from, ret) \
709 __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
710
711#define __asm_copy_from_user_11(to, from, ret) \
712 __asm_copy_from_user_10x_cont(to, from, ret, \
713 " GETB D1Ar1,[%1++]\n" \
714 "8: SETB [%0++],D1Ar1\n", \
715 "9: ADD %2,%2,#1\n" \
716 " SETB [%0++],D1Ar1\n", \
717 " .long 8b,9b\n")
718
719#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
720 __asm_copy_from_user_8x_cont(to, from, ret, \
721 " GETD D1Ar1,[%1++]\n" \
722 "6: SETD [%0++],D1Ar1\n" COPY, \
723 "7: ADD %2,%2,#4\n" \
724 " SETD [%0++],D1Ar1\n" FIXUP, \
725 " .long 6b,7b\n" TENTRY)
726
727#define __asm_copy_from_user_12(to, from, ret) \
728 __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
729
730#define __asm_copy_from_user_13(to, from, ret) \
731 __asm_copy_from_user_12x_cont(to, from, ret, \
732 " GETB D1Ar1,[%1++]\n" \
733 "8: SETB [%0++],D1Ar1\n", \
734 "9: ADD %2,%2,#1\n" \
735 " SETB [%0++],D1Ar1\n", \
736 " .long 8b,9b\n")
737
738#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
739 __asm_copy_from_user_12x_cont(to, from, ret, \
740 " GETW D1Ar1,[%1++]\n" \
741 "8: SETW [%0++],D1Ar1\n" COPY, \
742 "9: ADD %2,%2,#2\n" \
743 " SETW [%0++],D1Ar1\n" FIXUP, \
744 " .long 8b,9b\n" TENTRY)
745
746#define __asm_copy_from_user_14(to, from, ret) \
747 __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
748
749#define __asm_copy_from_user_15(to, from, ret) \
750 __asm_copy_from_user_14x_cont(to, from, ret, \
751 " GETB D1Ar1,[%1++]\n" \
752 "10: SETB [%0++],D1Ar1\n", \
753 "11: ADD %2,%2,#1\n" \
754 " SETB [%0++],D1Ar1\n", \
755 " .long 10b,11b\n")
756
757#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
758 __asm_copy_from_user_12x_cont(to, from, ret, \
759 " GETD D1Ar1,[%1++]\n" \
760 "8: SETD [%0++],D1Ar1\n" COPY, \
761 "9: ADD %2,%2,#4\n" \
762 " SETD [%0++],D1Ar1\n" FIXUP, \
763 " .long 8b,9b\n" TENTRY)
764
765#define __asm_copy_from_user_16(to, from, ret) \
766 __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
767
768#define __asm_copy_from_user_8x64(to, from, ret) \ 693#define __asm_copy_from_user_8x64(to, from, ret) \
769 asm volatile ( \ 694 asm volatile ( \
770 " GETL D0Ar2,D1Ar1,[%1++]\n" \ 695 " GETL D0Ar2,D1Ar1,[%1++]\n" \
771 "2: SETL [%0++],D0Ar2,D1Ar1\n" \ 696 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
772 "1:\n" \ 697 "1:\n" \
773 " .section .fixup,\"ax\"\n" \ 698 " .section .fixup,\"ax\"\n" \
774 " MOV D1Ar1,#0\n" \
775 " MOV D0Ar2,#0\n" \
776 "3: ADD %2,%2,#8\n" \ 699 "3: ADD %2,%2,#8\n" \
777 " SETL [%0++],D0Ar2,D1Ar1\n" \
778 " MOVT D0Ar2,#HI(1b)\n" \ 700 " MOVT D0Ar2,#HI(1b)\n" \
779 " JUMP D0Ar2,#LO(1b)\n" \ 701 " JUMP D0Ar2,#LO(1b)\n" \
780 " .previous\n" \ 702 " .previous\n" \
@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
789 * 711 *
790 * Rationale: 712 * Rationale:
791 * A fault occurs while reading from user buffer, which is the 713 * A fault occurs while reading from user buffer, which is the
792 * source. Since the fault is at a single address, we only 714 * source.
793 * need to rewind by 8 bytes.
794 * Since we don't write to kernel buffer until we read first, 715 * Since we don't write to kernel buffer until we read first,
795 * the kernel buffer is at the right state and needn't be 716 * the kernel buffer is at the right state and needn't be
796 * corrected. 717 * corrected, but the source must be rewound to the beginning of
718 * the block, which is LSM_STEP*8 bytes.
719 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
720 * and stored in D0Ar2
721 *
722 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
723 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
724 * a fault happens at the 4th write, LSM_STEP will be 0
725 * instead of 4. The code copes with that.
797 */ 726 */
798#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ 727#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
799 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ 728 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
800 "SUB %1, %1, #8\n") 729 "LSR D0Ar2, D0Ar2, #5\n" \
730 "ANDS D0Ar2, D0Ar2, #0x38\n" \
731 "ADDZ D0Ar2, D0Ar2, #32\n" \
732 "SUB %1, %1, D0Ar2\n")
801 733
802/* rewind 'from' pointer when a fault occurs 734/* rewind 'from' pointer when a fault occurs
803 * 735 *
804 * Rationale: 736 * Rationale:
805 * A fault occurs while reading from user buffer, which is the 737 * A fault occurs while reading from user buffer, which is the
806 * source. Since the fault is at a single address, we only 738 * source.
807 * need to rewind by 4 bytes.
808 * Since we don't write to kernel buffer until we read first, 739 * Since we don't write to kernel buffer until we read first,
809 * the kernel buffer is at the right state and needn't be 740 * the kernel buffer is at the right state and needn't be
810 * corrected. 741 * corrected, but the source must be rewound to the beginning of
742 * the block, which is LSM_STEP*4 bytes.
743 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
744 * and stored in D0Ar2
745 *
746 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
747 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
748 * a fault happens at the 4th write, LSM_STEP will be 0
749 * instead of 4. The code copes with that.
811 */ 750 */
812#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ 751#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
813 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ 752 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
814 "SUB %1, %1, #4\n") 753 "LSR D0Ar2, D0Ar2, #6\n" \
754 "ANDS D0Ar2, D0Ar2, #0x1c\n" \
755 "ADDZ D0Ar2, D0Ar2, #16\n" \
756 "SUB %1, %1, D0Ar2\n")
815 757
816 758
817/* Copy from user to kernel, zeroing the bytes that were inaccessible in 759/*
818 userland. The return-value is the number of bytes that were 760 * Copy from user to kernel. The return-value is the number of bytes that were
819 inaccessible. */ 761 * inaccessible.
820unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, 762 */
821 unsigned long n) 763unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
764 unsigned long n)
822{ 765{
823 register char *dst asm ("A0.2") = pdst; 766 register char *dst asm ("A0.2") = pdst;
824 register const char __user *src asm ("A1.2") = psrc; 767 register const char __user *src asm ("A1.2") = psrc;
@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
830 if ((unsigned long) src & 1) { 773 if ((unsigned long) src & 1) {
831 __asm_copy_from_user_1(dst, src, retn); 774 __asm_copy_from_user_1(dst, src, retn);
832 n--; 775 n--;
776 if (retn)
777 return retn + n;
833 } 778 }
834 if ((unsigned long) dst & 1) { 779 if ((unsigned long) dst & 1) {
835 /* Worst case - byte copy */ 780 /* Worst case - byte copy */
@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
837 __asm_copy_from_user_1(dst, src, retn); 782 __asm_copy_from_user_1(dst, src, retn);
838 n--; 783 n--;
839 if (retn) 784 if (retn)
840 goto copy_exception_bytes; 785 return retn + n;
841 } 786 }
842 } 787 }
843 if (((unsigned long) src & 2) && n >= 2) { 788 if (((unsigned long) src & 2) && n >= 2) {
844 __asm_copy_from_user_2(dst, src, retn); 789 __asm_copy_from_user_2(dst, src, retn);
845 n -= 2; 790 n -= 2;
791 if (retn)
792 return retn + n;
846 } 793 }
847 if ((unsigned long) dst & 2) { 794 if ((unsigned long) dst & 2) {
848 /* Second worst case - word copy */ 795 /* Second worst case - word copy */
@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
850 __asm_copy_from_user_2(dst, src, retn); 797 __asm_copy_from_user_2(dst, src, retn);
851 n -= 2; 798 n -= 2;
852 if (retn) 799 if (retn)
853 goto copy_exception_bytes; 800 return retn + n;
854 } 801 }
855 } 802 }
856 803
857 /* We only need one check after the unalignment-adjustments,
858 because if both adjustments were done, either both or
859 neither reference had an exception. */
860 if (retn != 0)
861 goto copy_exception_bytes;
862
863#ifdef USE_RAPF 804#ifdef USE_RAPF
864 /* 64 bit copy loop */ 805 /* 64 bit copy loop */
865 if (!(((unsigned long) src | (unsigned long) dst) & 7)) { 806 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
872 __asm_copy_from_user_8x64(dst, src, retn); 813 __asm_copy_from_user_8x64(dst, src, retn);
873 n -= 8; 814 n -= 8;
874 if (retn) 815 if (retn)
875 goto copy_exception_bytes; 816 return retn + n;
876 } 817 }
877 } 818 }
878 819
@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
888 __asm_copy_from_user_8x64(dst, src, retn); 829 __asm_copy_from_user_8x64(dst, src, retn);
889 n -= 8; 830 n -= 8;
890 if (retn) 831 if (retn)
891 goto copy_exception_bytes; 832 return retn + n;
892 } 833 }
893 } 834 }
894#endif 835#endif
@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
898 n -= 4; 839 n -= 4;
899 840
900 if (retn) 841 if (retn)
901 goto copy_exception_bytes; 842 return retn + n;
902 } 843 }
903 844
904 /* If we get here, there were no memory read faults. */ 845 /* If we get here, there were no memory read faults. */
@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
924 /* If we get here, retn correctly reflects the number of failing 865 /* If we get here, retn correctly reflects the number of failing
925 bytes. */ 866 bytes. */
926 return retn; 867 return retn;
927
928 copy_exception_bytes:
929 /* We already have "retn" bytes cleared, and need to clear the
930 remaining "n" bytes. A non-optimized simple byte-for-byte in-line
931 memset is preferred here, since this isn't speed-critical code and
932 we'd rather have this a leaf-function than calling memset. */
933 {
934 char *endp;
935 for (endp = dst + n; dst < endp; dst++)
936 *dst = 0;
937 }
938
939 return retn + n;
940} 868}
941EXPORT_SYMBOL(__copy_user_zeroing); 869EXPORT_SYMBOL(raw_copy_from_user);
942 870
943#define __asm_clear_8x64(to, ret) \ 871#define __asm_clear_8x64(to, ret) \
944 asm volatile ( \ 872 asm volatile ( \
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index db459612de44..8b0424abc84c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -9,6 +9,7 @@ config MIPS
9 select HAVE_CONTEXT_TRACKING 9 select HAVE_CONTEXT_TRACKING
10 select HAVE_GENERIC_DMA_COHERENT 10 select HAVE_GENERIC_DMA_COHERENT
11 select HAVE_IDE 11 select HAVE_IDE
12 select HAVE_IRQ_EXIT_ON_IRQ_STACK
12 select HAVE_OPROFILE 13 select HAVE_OPROFILE
13 select HAVE_PERF_EVENTS 14 select HAVE_PERF_EVENTS
14 select PERF_USE_VMALLOC 15 select PERF_USE_VMALLOC
@@ -1463,7 +1464,7 @@ config CPU_MIPS64_R6
1463 select CPU_SUPPORTS_HIGHMEM 1464 select CPU_SUPPORTS_HIGHMEM
1464 select CPU_SUPPORTS_MSA 1465 select CPU_SUPPORTS_MSA
1465 select GENERIC_CSUM 1466 select GENERIC_CSUM
1466 select MIPS_O32_FP64_SUPPORT if MIPS32_O32 1467 select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
1467 help 1468 help
1468 Choose this option to build a kernel for release 6 or later of the 1469 Choose this option to build a kernel for release 6 or later of the
1469 MIPS64 architecture. New MIPS processors, starting with the Warrior 1470 MIPS64 architecture. New MIPS processors, starting with the Warrior
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index 3cedd1f95e0f..8ae4067a5eda 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -76,14 +76,14 @@ void ath79_ddr_set_pci_windows(void)
76{ 76{
77 BUG_ON(!ath79_ddr_pci_win_base); 77 BUG_ON(!ath79_ddr_pci_win_base);
78 78
79 __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0); 79 __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0x0);
80 __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 1); 80 __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 0x4);
81 __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 2); 81 __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 0x8);
82 __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 3); 82 __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 0xc);
83 __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 4); 83 __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 0x10);
84 __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 5); 84 __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 0x14);
85 __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 6); 85 __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 0x18);
86 __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 7); 86 __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 0x1c);
87} 87}
88EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows); 88EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows);
89 89
diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c
index 52caa75bfe4e..e2f50d690624 100644
--- a/arch/mips/bcm47xx/buttons.c
+++ b/arch/mips/bcm47xx/buttons.c
@@ -17,6 +17,12 @@
17 .active_low = 1, \ 17 .active_low = 1, \
18 } 18 }
19 19
20#define BCM47XX_GPIO_KEY_H(_gpio, _code) \
21 { \
22 .code = _code, \
23 .gpio = _gpio, \
24 }
25
20/* Asus */ 26/* Asus */
21 27
22static const struct gpio_keys_button 28static const struct gpio_keys_button
@@ -79,8 +85,8 @@ bcm47xx_buttons_asus_wl500gpv2[] __initconst = {
79 85
80static const struct gpio_keys_button 86static const struct gpio_keys_button
81bcm47xx_buttons_asus_wl500w[] __initconst = { 87bcm47xx_buttons_asus_wl500w[] __initconst = {
82 BCM47XX_GPIO_KEY(6, KEY_RESTART), 88 BCM47XX_GPIO_KEY_H(6, KEY_RESTART),
83 BCM47XX_GPIO_KEY(7, KEY_WPS_BUTTON), 89 BCM47XX_GPIO_KEY_H(7, KEY_WPS_BUTTON),
84}; 90};
85 91
86static const struct gpio_keys_button 92static const struct gpio_keys_button
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index 64e08df51d65..8b7004132491 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -208,18 +208,18 @@ EXC( STORE t2, UNIT(6)(dst), s_exc_p10u)
208 ADD src, src, 16*NBYTES 208 ADD src, src, 16*NBYTES
209EXC( STORE t3, UNIT(7)(dst), s_exc_p9u) 209EXC( STORE t3, UNIT(7)(dst), s_exc_p9u)
210 ADD dst, dst, 16*NBYTES 210 ADD dst, dst, 16*NBYTES
211EXC( LOAD t0, UNIT(-8)(src), l_exc_copy) 211EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16)
212EXC( LOAD t1, UNIT(-7)(src), l_exc_copy) 212EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16)
213EXC( LOAD t2, UNIT(-6)(src), l_exc_copy) 213EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16)
214EXC( LOAD t3, UNIT(-5)(src), l_exc_copy) 214EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16)
215EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u) 215EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u)
216EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u) 216EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u)
217EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u) 217EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
218EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u) 218EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
219EXC( LOAD t0, UNIT(-4)(src), l_exc_copy) 219EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16)
220EXC( LOAD t1, UNIT(-3)(src), l_exc_copy) 220EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16)
221EXC( LOAD t2, UNIT(-2)(src), l_exc_copy) 221EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16)
222EXC( LOAD t3, UNIT(-1)(src), l_exc_copy) 222EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16)
223EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u) 223EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u)
224EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u) 224EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u)
225EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u) 225EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u)
@@ -383,6 +383,10 @@ done:
383 nop 383 nop
384 END(memcpy) 384 END(memcpy)
385 385
386l_exc_copy_rewind16:
387 /* Rewind src and dst by 16*NBYTES for l_exc_copy */
388 SUB src, src, 16*NBYTES
389 SUB dst, dst, 16*NBYTES
386l_exc_copy: 390l_exc_copy:
387 /* 391 /*
388 * Copy bytes from src until faulting load address (or until a 392 * Copy bytes from src until faulting load address (or until a
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 57ed466e00db..2f140d75d01c 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -68,8 +68,8 @@ CONFIG_NETFILTER_NETLINK_QUEUE=m
68CONFIG_NF_CONNTRACK=m 68CONFIG_NF_CONNTRACK=m
69CONFIG_NF_CONNTRACK_SECMARK=y 69CONFIG_NF_CONNTRACK_SECMARK=y
70CONFIG_NF_CONNTRACK_EVENTS=y 70CONFIG_NF_CONNTRACK_EVENTS=y
71CONFIG_NF_CT_PROTO_DCCP=m 71CONFIG_NF_CT_PROTO_DCCP=y
72CONFIG_NF_CT_PROTO_UDPLITE=m 72CONFIG_NF_CT_PROTO_UDPLITE=y
73CONFIG_NF_CONNTRACK_AMANDA=m 73CONFIG_NF_CONNTRACK_AMANDA=m
74CONFIG_NF_CONNTRACK_FTP=m 74CONFIG_NF_CONNTRACK_FTP=m
75CONFIG_NF_CONNTRACK_H323=m 75CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 48e16d98b2cc..b15508447366 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -134,7 +134,7 @@ CONFIG_LIBFC=m
134CONFIG_SCSI_QLOGIC_1280=y 134CONFIG_SCSI_QLOGIC_1280=y
135CONFIG_SCSI_PMCRAID=m 135CONFIG_SCSI_PMCRAID=m
136CONFIG_SCSI_BFA_FC=m 136CONFIG_SCSI_BFA_FC=m
137CONFIG_SCSI_DH=m 137CONFIG_SCSI_DH=y
138CONFIG_SCSI_DH_RDAC=m 138CONFIG_SCSI_DH_RDAC=m
139CONFIG_SCSI_DH_HP_SW=m 139CONFIG_SCSI_DH_HP_SW=m
140CONFIG_SCSI_DH_EMC=m 140CONFIG_SCSI_DH_EMC=m
@@ -206,7 +206,6 @@ CONFIG_MLX4_EN=m
206# CONFIG_MLX4_DEBUG is not set 206# CONFIG_MLX4_DEBUG is not set
207CONFIG_TEHUTI=m 207CONFIG_TEHUTI=m
208CONFIG_BNX2X=m 208CONFIG_BNX2X=m
209CONFIG_QLGE=m
210CONFIG_SFC=m 209CONFIG_SFC=m
211CONFIG_BE2NET=m 210CONFIG_BE2NET=m
212CONFIG_LIBERTAS_THINFIRM=m 211CONFIG_LIBERTAS_THINFIRM=m
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 004cf52d1b7d..c24b87819ccb 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -39,7 +39,7 @@ CONFIG_HIBERNATION=y
39CONFIG_PM_STD_PARTITION="/dev/hda3" 39CONFIG_PM_STD_PARTITION="/dev/hda3"
40CONFIG_CPU_FREQ=y 40CONFIG_CPU_FREQ=y
41CONFIG_CPU_FREQ_DEBUG=y 41CONFIG_CPU_FREQ_DEBUG=y
42CONFIG_CPU_FREQ_STAT=m 42CONFIG_CPU_FREQ_STAT=y
43CONFIG_CPU_FREQ_STAT_DETAILS=y 43CONFIG_CPU_FREQ_STAT_DETAILS=y
44CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 44CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
45CONFIG_CPU_FREQ_GOV_POWERSAVE=m 45CONFIG_CPU_FREQ_GOV_POWERSAVE=m
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 5afb4840aec7..739ccd0dca64 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -59,8 +59,8 @@ CONFIG_NETFILTER=y
59CONFIG_NF_CONNTRACK=m 59CONFIG_NF_CONNTRACK=m
60CONFIG_NF_CONNTRACK_SECMARK=y 60CONFIG_NF_CONNTRACK_SECMARK=y
61CONFIG_NF_CONNTRACK_EVENTS=y 61CONFIG_NF_CONNTRACK_EVENTS=y
62CONFIG_NF_CT_PROTO_DCCP=m 62CONFIG_NF_CT_PROTO_DCCP=y
63CONFIG_NF_CT_PROTO_UDPLITE=m 63CONFIG_NF_CT_PROTO_UDPLITE=y
64CONFIG_NF_CONNTRACK_AMANDA=m 64CONFIG_NF_CONNTRACK_AMANDA=m
65CONFIG_NF_CONNTRACK_FTP=m 65CONFIG_NF_CONNTRACK_FTP=m
66CONFIG_NF_CONNTRACK_H323=m 66CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index 98f13879bb8f..47f4ecf125ba 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -60,8 +60,8 @@ CONFIG_NETFILTER=y
60CONFIG_NF_CONNTRACK=m 60CONFIG_NF_CONNTRACK=m
61CONFIG_NF_CONNTRACK_SECMARK=y 61CONFIG_NF_CONNTRACK_SECMARK=y
62CONFIG_NF_CONNTRACK_EVENTS=y 62CONFIG_NF_CONNTRACK_EVENTS=y
63CONFIG_NF_CT_PROTO_DCCP=m 63CONFIG_NF_CT_PROTO_DCCP=y
64CONFIG_NF_CT_PROTO_UDPLITE=m 64CONFIG_NF_CT_PROTO_UDPLITE=y
65CONFIG_NF_CONNTRACK_AMANDA=m 65CONFIG_NF_CONNTRACK_AMANDA=m
66CONFIG_NF_CONNTRACK_FTP=m 66CONFIG_NF_CONNTRACK_FTP=m
67CONFIG_NF_CONNTRACK_H323=m 67CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index 3b5d5913f548..e79d325aa085 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -59,8 +59,8 @@ CONFIG_NETFILTER=y
59CONFIG_NF_CONNTRACK=m 59CONFIG_NF_CONNTRACK=m
60CONFIG_NF_CONNTRACK_SECMARK=y 60CONFIG_NF_CONNTRACK_SECMARK=y
61CONFIG_NF_CONNTRACK_EVENTS=y 61CONFIG_NF_CONNTRACK_EVENTS=y
62CONFIG_NF_CT_PROTO_DCCP=m 62CONFIG_NF_CT_PROTO_DCCP=y
63CONFIG_NF_CT_PROTO_UDPLITE=m 63CONFIG_NF_CT_PROTO_UDPLITE=y
64CONFIG_NF_CONNTRACK_AMANDA=m 64CONFIG_NF_CONNTRACK_AMANDA=m
65CONFIG_NF_CONNTRACK_FTP=m 65CONFIG_NF_CONNTRACK_FTP=m
66CONFIG_NF_CONNTRACK_H323=m 66CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 732215732751..ae87ad86243b 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -61,8 +61,8 @@ CONFIG_NETFILTER=y
61CONFIG_NF_CONNTRACK=m 61CONFIG_NF_CONNTRACK=m
62CONFIG_NF_CONNTRACK_SECMARK=y 62CONFIG_NF_CONNTRACK_SECMARK=y
63CONFIG_NF_CONNTRACK_EVENTS=y 63CONFIG_NF_CONNTRACK_EVENTS=y
64CONFIG_NF_CT_PROTO_DCCP=m 64CONFIG_NF_CT_PROTO_DCCP=y
65CONFIG_NF_CT_PROTO_UDPLITE=m 65CONFIG_NF_CT_PROTO_UDPLITE=y
66CONFIG_NF_CONNTRACK_AMANDA=m 66CONFIG_NF_CONNTRACK_AMANDA=m
67CONFIG_NF_CONNTRACK_FTP=m 67CONFIG_NF_CONNTRACK_FTP=m
68CONFIG_NF_CONNTRACK_H323=m 68CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
index b3d1d37f85ea..47492fee2952 100644
--- a/arch/mips/configs/nlm_xlp_defconfig
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -111,7 +111,7 @@ CONFIG_NETFILTER=y
111CONFIG_NF_CONNTRACK=m 111CONFIG_NF_CONNTRACK=m
112CONFIG_NF_CONNTRACK_SECMARK=y 112CONFIG_NF_CONNTRACK_SECMARK=y
113CONFIG_NF_CONNTRACK_EVENTS=y 113CONFIG_NF_CONNTRACK_EVENTS=y
114CONFIG_NF_CT_PROTO_UDPLITE=m 114CONFIG_NF_CT_PROTO_UDPLITE=y
115CONFIG_NF_CONNTRACK_AMANDA=m 115CONFIG_NF_CONNTRACK_AMANDA=m
116CONFIG_NF_CONNTRACK_FTP=m 116CONFIG_NF_CONNTRACK_FTP=m
117CONFIG_NF_CONNTRACK_H323=m 117CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index 3d8016d6cf3e..472a818f1eb8 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -91,7 +91,7 @@ CONFIG_NETFILTER=y
91CONFIG_NF_CONNTRACK=m 91CONFIG_NF_CONNTRACK=m
92CONFIG_NF_CONNTRACK_SECMARK=y 92CONFIG_NF_CONNTRACK_SECMARK=y
93CONFIG_NF_CONNTRACK_EVENTS=y 93CONFIG_NF_CONNTRACK_EVENTS=y
94CONFIG_NF_CT_PROTO_UDPLITE=m 94CONFIG_NF_CT_PROTO_UDPLITE=y
95CONFIG_NF_CONNTRACK_AMANDA=m 95CONFIG_NF_CONNTRACK_AMANDA=m
96CONFIG_NF_CONNTRACK_FTP=m 96CONFIG_NF_CONNTRACK_FTP=m
97CONFIG_NF_CONNTRACK_H323=m 97CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 8c6f508e59de..554d1da97743 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -146,7 +146,25 @@
146 /* 146 /*
147 * Find irq with highest priority 147 * Find irq with highest priority
148 */ 148 */
149 PTR_LA t1,cpu_mask_nr_tbl 149 # open coded PTR_LA t1, cpu_mask_nr_tbl
150#if (_MIPS_SZPTR == 32)
151 # open coded la t1, cpu_mask_nr_tbl
152 lui t1, %hi(cpu_mask_nr_tbl)
153 addiu t1, %lo(cpu_mask_nr_tbl)
154
155#endif
156#if (_MIPS_SZPTR == 64)
157 # open coded dla t1, cpu_mask_nr_tbl
158 .set push
159 .set noat
160 lui t1, %highest(cpu_mask_nr_tbl)
161 lui AT, %hi(cpu_mask_nr_tbl)
162 daddiu t1, t1, %higher(cpu_mask_nr_tbl)
163 daddiu AT, AT, %lo(cpu_mask_nr_tbl)
164 dsll t1, 32
165 daddu t1, t1, AT
166 .set pop
167#endif
1501: lw t2,(t1) 1681: lw t2,(t1)
151 nop 169 nop
152 and t2,t0 170 and t2,t0
@@ -195,7 +213,25 @@
195 /* 213 /*
196 * Find irq with highest priority 214 * Find irq with highest priority
197 */ 215 */
198 PTR_LA t1,asic_mask_nr_tbl 216 # open coded PTR_LA t1,asic_mask_nr_tbl
217#if (_MIPS_SZPTR == 32)
218 # open coded la t1, asic_mask_nr_tbl
219 lui t1, %hi(asic_mask_nr_tbl)
220 addiu t1, %lo(asic_mask_nr_tbl)
221
222#endif
223#if (_MIPS_SZPTR == 64)
224 # open coded dla t1, asic_mask_nr_tbl
225 .set push
226 .set noat
227 lui t1, %highest(asic_mask_nr_tbl)
228 lui AT, %hi(asic_mask_nr_tbl)
229 daddiu t1, t1, %higher(asic_mask_nr_tbl)
230 daddiu AT, AT, %lo(asic_mask_nr_tbl)
231 dsll t1, 32
232 daddu t1, t1, AT
233 .set pop
234#endif
1992: lw t2,(t1) 2352: lw t2,(t1)
200 nop 236 nop
201 and t2,t0 237 and t2,t0
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index de781cf54bc7..da80878f2c0d 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -74,10 +74,7 @@ static inline int compute_return_epc(struct pt_regs *regs)
74 return __microMIPS_compute_return_epc(regs); 74 return __microMIPS_compute_return_epc(regs);
75 if (cpu_has_mips16) 75 if (cpu_has_mips16)
76 return __MIPS16e_compute_return_epc(regs); 76 return __MIPS16e_compute_return_epc(regs);
77 return regs->cp0_epc; 77 } else if (!delay_slot(regs)) {
78 }
79
80 if (!delay_slot(regs)) {
81 regs->cp0_epc += 4; 78 regs->cp0_epc += 4;
82 return 0; 79 return 0;
83 } 80 }
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 3ceacde5eb6e..17f89f9670b2 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -186,7 +186,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr,
186 " daddu %0, %4 \n" 186 " daddu %0, %4 \n"
187 " dsll32 $1, %0, 0 \n" 187 " dsll32 $1, %0, 0 \n"
188 " daddu %0, $1 \n" 188 " daddu %0, $1 \n"
189 " sltu $1, %0, $1 \n"
189 " dsra32 %0, %0, 0 \n" 190 " dsra32 %0, %0, 0 \n"
191 " addu %0, $1 \n"
190#endif 192#endif
191 " .set pop" 193 " .set pop"
192 : "=r" (sum) 194 : "=r" (sum)
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 15e0fecbc300..ebb9efb02502 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -17,6 +17,18 @@
17 17
18#include <irq.h> 18#include <irq.h>
19 19
20#define IRQ_STACK_SIZE THREAD_SIZE
21
22extern void *irq_stack[NR_CPUS];
23
24static inline bool on_irq_stack(int cpu, unsigned long sp)
25{
26 unsigned long low = (unsigned long)irq_stack[cpu];
27 unsigned long high = low + IRQ_STACK_SIZE;
28
29 return (low <= sp && sp <= high);
30}
31
20#ifdef CONFIG_I8259 32#ifdef CONFIG_I8259
21static inline int irq_canonicalize(int irq) 33static inline int irq_canonicalize(int irq)
22{ 34{
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 40196bebe849..2365ce0ad8f2 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -112,7 +112,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
112 " andi %[ticket], %[ticket], 0xffff \n" 112 " andi %[ticket], %[ticket], 0xffff \n"
113 " bne %[ticket], %[my_ticket], 4f \n" 113 " bne %[ticket], %[my_ticket], 4f \n"
114 " subu %[ticket], %[my_ticket], %[ticket] \n" 114 " subu %[ticket], %[my_ticket], %[ticket] \n"
115 "2: \n" 115 "2: .insn \n"
116 " .subsection 2 \n" 116 " .subsection 2 \n"
117 "4: andi %[ticket], %[ticket], 0xffff \n" 117 "4: andi %[ticket], %[ticket], 0xffff \n"
118 " sll %[ticket], 5 \n" 118 " sll %[ticket], 5 \n"
@@ -187,7 +187,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
187 " sc %[ticket], %[ticket_ptr] \n" 187 " sc %[ticket], %[ticket_ptr] \n"
188 " beqz %[ticket], 1b \n" 188 " beqz %[ticket], 1b \n"
189 " li %[ticket], 1 \n" 189 " li %[ticket], 1 \n"
190 "2: \n" 190 "2: .insn \n"
191 " .subsection 2 \n" 191 " .subsection 2 \n"
192 "3: b 2b \n" 192 "3: b 2b \n"
193 " li %[ticket], 0 \n" 193 " li %[ticket], 0 \n"
@@ -367,7 +367,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
367 " .set reorder \n" 367 " .set reorder \n"
368 __WEAK_LLSC_MB 368 __WEAK_LLSC_MB
369 " li %2, 1 \n" 369 " li %2, 1 \n"
370 "2: \n" 370 "2: .insn \n"
371 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 371 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
372 : GCC_OFF_SMALL_ASM() (rw->lock) 372 : GCC_OFF_SMALL_ASM() (rw->lock)
373 : "memory"); 373 : "memory");
@@ -407,7 +407,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
407 " lui %1, 0x8000 \n" 407 " lui %1, 0x8000 \n"
408 " sc %1, %0 \n" 408 " sc %1, %0 \n"
409 " li %2, 1 \n" 409 " li %2, 1 \n"
410 "2: \n" 410 "2: .insn \n"
411 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), 411 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
412 "=&r" (ret) 412 "=&r" (ret)
413 : GCC_OFF_SMALL_ASM() (rw->lock) 413 : GCC_OFF_SMALL_ASM() (rw->lock)
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index a71da576883c..5347f130f536 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -216,12 +216,19 @@
216 LONG_S $25, PT_R25(sp) 216 LONG_S $25, PT_R25(sp)
217 LONG_S $28, PT_R28(sp) 217 LONG_S $28, PT_R28(sp)
218 LONG_S $31, PT_R31(sp) 218 LONG_S $31, PT_R31(sp)
219
220 /* Set thread_info if we're coming from user mode */
221 mfc0 k0, CP0_STATUS
222 sll k0, 3 /* extract cu0 bit */
223 bltz k0, 9f
224
219 ori $28, sp, _THREAD_MASK 225 ori $28, sp, _THREAD_MASK
220 xori $28, _THREAD_MASK 226 xori $28, _THREAD_MASK
221#ifdef CONFIG_CPU_CAVIUM_OCTEON 227#ifdef CONFIG_CPU_CAVIUM_OCTEON
222 .set mips64 228 .set mips64
223 pref 0, 0($28) /* Prefetch the current pointer */ 229 pref 0, 0($28) /* Prefetch the current pointer */
224#endif 230#endif
2319:
225 .set pop 232 .set pop
226 .endm 233 .endm
227 234
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 154e2039ea5e..ec053ce7bb38 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -101,6 +101,7 @@ void output_thread_info_defines(void)
101 OFFSET(TI_REGS, thread_info, regs); 101 OFFSET(TI_REGS, thread_info, regs);
102 DEFINE(_THREAD_SIZE, THREAD_SIZE); 102 DEFINE(_THREAD_SIZE, THREAD_SIZE);
103 DEFINE(_THREAD_MASK, THREAD_MASK); 103 DEFINE(_THREAD_MASK, THREAD_MASK);
104 DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
104 BLANK(); 105 BLANK();
105} 106}
106 107
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index d8f9b357b222..71e8f4c0b8da 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -399,7 +399,7 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
399 * 399 *
400 * @regs: Pointer to pt_regs 400 * @regs: Pointer to pt_regs
401 * @insn: branch instruction to decode 401 * @insn: branch instruction to decode
402 * @returns: -EFAULT on error and forces SIGBUS, and on success 402 * @returns: -EFAULT on error and forces SIGILL, and on success
403 * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after 403 * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
404 * evaluating the branch. 404 * evaluating the branch.
405 * 405 *
@@ -431,7 +431,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
431 /* Fall through */ 431 /* Fall through */
432 case jr_op: 432 case jr_op:
433 if (NO_R6EMU && insn.r_format.func == jr_op) 433 if (NO_R6EMU && insn.r_format.func == jr_op)
434 goto sigill_r6; 434 goto sigill_r2r6;
435 regs->cp0_epc = regs->regs[insn.r_format.rs]; 435 regs->cp0_epc = regs->regs[insn.r_format.rs];
436 break; 436 break;
437 } 437 }
@@ -446,7 +446,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
446 switch (insn.i_format.rt) { 446 switch (insn.i_format.rt) {
447 case bltzl_op: 447 case bltzl_op:
448 if (NO_R6EMU) 448 if (NO_R6EMU)
449 goto sigill_r6; 449 goto sigill_r2r6;
450 case bltz_op: 450 case bltz_op:
451 if ((long)regs->regs[insn.i_format.rs] < 0) { 451 if ((long)regs->regs[insn.i_format.rs] < 0) {
452 epc = epc + 4 + (insn.i_format.simmediate << 2); 452 epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -459,7 +459,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
459 459
460 case bgezl_op: 460 case bgezl_op:
461 if (NO_R6EMU) 461 if (NO_R6EMU)
462 goto sigill_r6; 462 goto sigill_r2r6;
463 case bgez_op: 463 case bgez_op:
464 if ((long)regs->regs[insn.i_format.rs] >= 0) { 464 if ((long)regs->regs[insn.i_format.rs] >= 0) {
465 epc = epc + 4 + (insn.i_format.simmediate << 2); 465 epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -473,10 +473,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
473 case bltzal_op: 473 case bltzal_op:
474 case bltzall_op: 474 case bltzall_op:
475 if (NO_R6EMU && (insn.i_format.rs || 475 if (NO_R6EMU && (insn.i_format.rs ||
476 insn.i_format.rt == bltzall_op)) { 476 insn.i_format.rt == bltzall_op))
477 ret = -SIGILL; 477 goto sigill_r2r6;
478 break;
479 }
480 regs->regs[31] = epc + 8; 478 regs->regs[31] = epc + 8;
481 /* 479 /*
482 * OK we are here either because we hit a NAL 480 * OK we are here either because we hit a NAL
@@ -507,10 +505,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
507 case bgezal_op: 505 case bgezal_op:
508 case bgezall_op: 506 case bgezall_op:
509 if (NO_R6EMU && (insn.i_format.rs || 507 if (NO_R6EMU && (insn.i_format.rs ||
510 insn.i_format.rt == bgezall_op)) { 508 insn.i_format.rt == bgezall_op))
511 ret = -SIGILL; 509 goto sigill_r2r6;
512 break;
513 }
514 regs->regs[31] = epc + 8; 510 regs->regs[31] = epc + 8;
515 /* 511 /*
516 * OK we are here either because we hit a BAL 512 * OK we are here either because we hit a BAL
@@ -556,6 +552,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
556 /* 552 /*
557 * These are unconditional and in j_format. 553 * These are unconditional and in j_format.
558 */ 554 */
555 case jalx_op:
559 case jal_op: 556 case jal_op:
560 regs->regs[31] = regs->cp0_epc + 8; 557 regs->regs[31] = regs->cp0_epc + 8;
561 case j_op: 558 case j_op:
@@ -573,7 +570,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
573 */ 570 */
574 case beql_op: 571 case beql_op:
575 if (NO_R6EMU) 572 if (NO_R6EMU)
576 goto sigill_r6; 573 goto sigill_r2r6;
577 case beq_op: 574 case beq_op:
578 if (regs->regs[insn.i_format.rs] == 575 if (regs->regs[insn.i_format.rs] ==
579 regs->regs[insn.i_format.rt]) { 576 regs->regs[insn.i_format.rt]) {
@@ -587,7 +584,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
587 584
588 case bnel_op: 585 case bnel_op:
589 if (NO_R6EMU) 586 if (NO_R6EMU)
590 goto sigill_r6; 587 goto sigill_r2r6;
591 case bne_op: 588 case bne_op:
592 if (regs->regs[insn.i_format.rs] != 589 if (regs->regs[insn.i_format.rs] !=
593 regs->regs[insn.i_format.rt]) { 590 regs->regs[insn.i_format.rt]) {
@@ -601,7 +598,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
601 598
602 case blezl_op: /* not really i_format */ 599 case blezl_op: /* not really i_format */
603 if (!insn.i_format.rt && NO_R6EMU) 600 if (!insn.i_format.rt && NO_R6EMU)
604 goto sigill_r6; 601 goto sigill_r2r6;
605 case blez_op: 602 case blez_op:
606 /* 603 /*
607 * Compact branches for R6 for the 604 * Compact branches for R6 for the
@@ -636,7 +633,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
636 633
637 case bgtzl_op: 634 case bgtzl_op:
638 if (!insn.i_format.rt && NO_R6EMU) 635 if (!insn.i_format.rt && NO_R6EMU)
639 goto sigill_r6; 636 goto sigill_r2r6;
640 case bgtz_op: 637 case bgtz_op:
641 /* 638 /*
642 * Compact branches for R6 for the 639 * Compact branches for R6 for the
@@ -816,8 +813,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
816 break; 813 break;
817 } 814 }
818 /* Compact branch: BNEZC || JIALC */ 815 /* Compact branch: BNEZC || JIALC */
819 if (insn.i_format.rs) 816 if (!insn.i_format.rs) {
817 /* JIALC: set $31/ra */
820 regs->regs[31] = epc + 4; 818 regs->regs[31] = epc + 4;
819 }
821 regs->cp0_epc += 8; 820 regs->cp0_epc += 8;
822 break; 821 break;
823#endif 822#endif
@@ -841,11 +840,12 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
841 return ret; 840 return ret;
842 841
843sigill_dsp: 842sigill_dsp:
844 printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); 843 pr_info("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
845 force_sig(SIGBUS, current); 844 current->comm);
845 force_sig(SIGILL, current);
846 return -EFAULT; 846 return -EFAULT;
847sigill_r6: 847sigill_r2r6:
848 pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n", 848 pr_info("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n",
849 current->comm); 849 current->comm);
850 force_sig(SIGILL, current); 850 force_sig(SIGILL, current);
851 return -EFAULT; 851 return -EFAULT;
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index d434d5d5ae6e..610f0f3bdb34 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -14,12 +14,22 @@ static int crashing_cpu = -1;
14static cpumask_t cpus_in_crash = CPU_MASK_NONE; 14static cpumask_t cpus_in_crash = CPU_MASK_NONE;
15 15
16#ifdef CONFIG_SMP 16#ifdef CONFIG_SMP
17static void crash_shutdown_secondary(void *ignore) 17static void crash_shutdown_secondary(void *passed_regs)
18{ 18{
19 struct pt_regs *regs; 19 struct pt_regs *regs = passed_regs;
20 int cpu = smp_processor_id(); 20 int cpu = smp_processor_id();
21 21
22 regs = task_pt_regs(current); 22 /*
23 * If we are passed registers, use those. Otherwise get the
24 * regs from the last interrupt, which should be correct, as
25 * we are in an interrupt. But if the regs are not there,
26 * pull them from the top of the stack. They are probably
27 * wrong, but we need something to keep from crashing again.
28 */
29 if (!regs)
30 regs = get_irq_regs();
31 if (!regs)
32 regs = task_pt_regs(current);
23 33
24 if (!cpu_online(cpu)) 34 if (!cpu_online(cpu))
25 return; 35 return;
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index 4a4d9e067c89..3afffc30ee12 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -206,7 +206,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
206 else if ((prog_req.fr1 && prog_req.frdefault) || 206 else if ((prog_req.fr1 && prog_req.frdefault) ||
207 (prog_req.single && !prog_req.frdefault)) 207 (prog_req.single && !prog_req.frdefault))
208 /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */ 208 /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
209 state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) && 209 state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
210 cpu_has_mips_r2_r6) ? 210 cpu_has_mips_r2_r6) ?
211 FP_FR1 : FP_FR0; 211 FP_FR1 : FP_FR0;
212 else if (prog_req.fr1) 212 else if (prog_req.fr1)
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 7791840cf22c..db07793f7b43 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -11,6 +11,7 @@
11#include <asm/asm.h> 11#include <asm/asm.h>
12#include <asm/asmmacro.h> 12#include <asm/asmmacro.h>
13#include <asm/compiler.h> 13#include <asm/compiler.h>
14#include <asm/irqflags.h>
14#include <asm/regdef.h> 15#include <asm/regdef.h>
15#include <asm/mipsregs.h> 16#include <asm/mipsregs.h>
16#include <asm/stackframe.h> 17#include <asm/stackframe.h>
@@ -137,6 +138,7 @@ work_pending:
137 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS 138 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
138 beqz t0, work_notifysig 139 beqz t0, work_notifysig
139work_resched: 140work_resched:
141 TRACE_IRQS_OFF
140 jal schedule 142 jal schedule
141 143
142 local_irq_disable # make sure need_resched and 144 local_irq_disable # make sure need_resched and
@@ -173,6 +175,7 @@ syscall_exit_work:
173 beqz t0, work_pending # trace bit set? 175 beqz t0, work_pending # trace bit set?
174 local_irq_enable # could let syscall_trace_leave() 176 local_irq_enable # could let syscall_trace_leave()
175 # call schedule() instead 177 # call schedule() instead
178 TRACE_IRQS_ON
176 move a0, sp 179 move a0, sp
177 jal syscall_trace_leave 180 jal syscall_trace_leave
178 b resume_userspace 181 b resume_userspace
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index baa7b6fc0a60..619e30e2c4f0 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -188,9 +188,44 @@ NESTED(handle_int, PT_SIZE, sp)
188 188
189 LONG_L s0, TI_REGS($28) 189 LONG_L s0, TI_REGS($28)
190 LONG_S sp, TI_REGS($28) 190 LONG_S sp, TI_REGS($28)
191 PTR_LA ra, ret_from_irq 191
192 PTR_LA v0, plat_irq_dispatch 192 /*
193 jr v0 193 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
194 * Check if we are already using the IRQ stack.
195 */
196 move s1, sp # Preserve the sp
197
198 /* Get IRQ stack for this CPU */
199 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
200#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
201 lui k1, %hi(irq_stack)
202#else
203 lui k1, %highest(irq_stack)
204 daddiu k1, %higher(irq_stack)
205 dsll k1, 16
206 daddiu k1, %hi(irq_stack)
207 dsll k1, 16
208#endif
209 LONG_SRL k0, SMP_CPUID_PTRSHIFT
210 LONG_ADDU k1, k0
211 LONG_L t0, %lo(irq_stack)(k1)
212
213 # Check if already on IRQ stack
214 PTR_LI t1, ~(_THREAD_SIZE-1)
215 and t1, t1, sp
216 beq t0, t1, 2f
217
218 /* Switch to IRQ stack */
219 li t1, _IRQ_STACK_SIZE
220 PTR_ADD sp, t0, t1
221
2222:
223 jal plat_irq_dispatch
224
225 /* Restore sp */
226 move sp, s1
227
228 j ret_from_irq
194#ifdef CONFIG_CPU_MICROMIPS 229#ifdef CONFIG_CPU_MICROMIPS
195 nop 230 nop
196#endif 231#endif
@@ -263,8 +298,44 @@ NESTED(except_vec_vi_handler, 0, sp)
263 298
264 LONG_L s0, TI_REGS($28) 299 LONG_L s0, TI_REGS($28)
265 LONG_S sp, TI_REGS($28) 300 LONG_S sp, TI_REGS($28)
266 PTR_LA ra, ret_from_irq 301
267 jr v0 302 /*
303 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
304 * Check if we are already using the IRQ stack.
305 */
306 move s1, sp # Preserve the sp
307
308 /* Get IRQ stack for this CPU */
309 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
310#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
311 lui k1, %hi(irq_stack)
312#else
313 lui k1, %highest(irq_stack)
314 daddiu k1, %higher(irq_stack)
315 dsll k1, 16
316 daddiu k1, %hi(irq_stack)
317 dsll k1, 16
318#endif
319 LONG_SRL k0, SMP_CPUID_PTRSHIFT
320 LONG_ADDU k1, k0
321 LONG_L t0, %lo(irq_stack)(k1)
322
323 # Check if already on IRQ stack
324 PTR_LI t1, ~(_THREAD_SIZE-1)
325 and t1, t1, sp
326 beq t0, t1, 2f
327
328 /* Switch to IRQ stack */
329 li t1, _IRQ_STACK_SIZE
330 PTR_ADD sp, t0, t1
331
3322:
333 jalr v0
334
335 /* Restore sp */
336 move sp, s1
337
338 j ret_from_irq
268 END(except_vec_vi_handler) 339 END(except_vec_vi_handler)
269 340
270/* 341/*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 8eb5af805964..dc1180a8bfa1 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -25,6 +25,8 @@
25#include <linux/atomic.h> 25#include <linux/atomic.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27 27
28void *irq_stack[NR_CPUS];
29
28/* 30/*
29 * 'what should we do if we get a hw irq event on an illegal vector'. 31 * 'what should we do if we get a hw irq event on an illegal vector'.
30 * each architecture has to answer this themselves. 32 * each architecture has to answer this themselves.
@@ -55,6 +57,15 @@ void __init init_IRQ(void)
55 irq_set_noprobe(i); 57 irq_set_noprobe(i);
56 58
57 arch_init_irq(); 59 arch_init_irq();
60
61 for_each_possible_cpu(i) {
62 int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
63 void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
64
65 irq_stack[i] = s;
66 pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
67 irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
68 }
58} 69}
59 70
60#ifdef CONFIG_DEBUG_STACKOVERFLOW 71#ifdef CONFIG_DEBUG_STACKOVERFLOW
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index de63d36af895..732d6171ac6a 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -244,9 +244,6 @@ static int compute_signal(int tt)
244void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) 244void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
245{ 245{
246 int reg; 246 int reg;
247 struct thread_info *ti = task_thread_info(p);
248 unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
249 struct pt_regs *regs = (struct pt_regs *)ksp - 1;
250#if (KGDB_GDB_REG_SIZE == 32) 247#if (KGDB_GDB_REG_SIZE == 32)
251 u32 *ptr = (u32 *)gdb_regs; 248 u32 *ptr = (u32 *)gdb_regs;
252#else 249#else
@@ -254,25 +251,46 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
254#endif 251#endif
255 252
256 for (reg = 0; reg < 16; reg++) 253 for (reg = 0; reg < 16; reg++)
257 *(ptr++) = regs->regs[reg]; 254 *(ptr++) = 0;
258 255
259 /* S0 - S7 */ 256 /* S0 - S7 */
260 for (reg = 16; reg < 24; reg++) 257 *(ptr++) = p->thread.reg16;
261 *(ptr++) = regs->regs[reg]; 258 *(ptr++) = p->thread.reg17;
259 *(ptr++) = p->thread.reg18;
260 *(ptr++) = p->thread.reg19;
261 *(ptr++) = p->thread.reg20;
262 *(ptr++) = p->thread.reg21;
263 *(ptr++) = p->thread.reg22;
264 *(ptr++) = p->thread.reg23;
262 265
263 for (reg = 24; reg < 28; reg++) 266 for (reg = 24; reg < 28; reg++)
264 *(ptr++) = 0; 267 *(ptr++) = 0;
265 268
266 /* GP, SP, FP, RA */ 269 /* GP, SP, FP, RA */
267 for (reg = 28; reg < 32; reg++) 270 *(ptr++) = (long)p;
268 *(ptr++) = regs->regs[reg]; 271 *(ptr++) = p->thread.reg29;
269 272 *(ptr++) = p->thread.reg30;
270 *(ptr++) = regs->cp0_status; 273 *(ptr++) = p->thread.reg31;
271 *(ptr++) = regs->lo; 274
272 *(ptr++) = regs->hi; 275 *(ptr++) = p->thread.cp0_status;
273 *(ptr++) = regs->cp0_badvaddr; 276
274 *(ptr++) = regs->cp0_cause; 277 /* lo, hi */
275 *(ptr++) = regs->cp0_epc; 278 *(ptr++) = 0;
279 *(ptr++) = 0;
280
281 /*
282 * BadVAddr, Cause
283 * Ideally these would come from the last exception frame up the stack
284 * but that requires unwinding, otherwise we can't know much for sure.
285 */
286 *(ptr++) = 0;
287 *(ptr++) = 0;
288
289 /*
290 * PC
291 * use return address (RA), i.e. the moment after return from resume()
292 */
293 *(ptr++) = p->thread.reg31;
276} 294}
277 295
278void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) 296void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index af27334d6809..e3384065f5e7 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -434,8 +434,8 @@ static int multu_func(struct pt_regs *regs, u32 ir)
434 rs = regs->regs[MIPSInst_RS(ir)]; 434 rs = regs->regs[MIPSInst_RS(ir)];
435 res = (u64)rt * (u64)rs; 435 res = (u64)rt * (u64)rs;
436 rt = res; 436 rt = res;
437 regs->lo = (s64)rt; 437 regs->lo = (s64)(s32)rt;
438 regs->hi = (s64)(res >> 32); 438 regs->hi = (s64)(s32)(res >> 32);
439 439
440 MIPS_R2_STATS(muls); 440 MIPS_R2_STATS(muls);
441 441
@@ -671,9 +671,9 @@ static int maddu_func(struct pt_regs *regs, u32 ir)
671 res += ((((s64)rt) << 32) | (u32)rs); 671 res += ((((s64)rt) << 32) | (u32)rs);
672 672
673 rt = res; 673 rt = res;
674 regs->lo = (s64)rt; 674 regs->lo = (s64)(s32)rt;
675 rs = res >> 32; 675 rs = res >> 32;
676 regs->hi = (s64)rs; 676 regs->hi = (s64)(s32)rs;
677 677
678 MIPS_R2_STATS(dsps); 678 MIPS_R2_STATS(dsps);
679 679
@@ -729,9 +729,9 @@ static int msubu_func(struct pt_regs *regs, u32 ir)
729 res = ((((s64)rt) << 32) | (u32)rs) - res; 729 res = ((((s64)rt) << 32) | (u32)rs) - res;
730 730
731 rt = res; 731 rt = res;
732 regs->lo = (s64)rt; 732 regs->lo = (s64)(s32)rt;
733 rs = res >> 32; 733 rs = res >> 32;
734 regs->hi = (s64)rs; 734 regs->hi = (s64)(s32)rs;
735 735
736 MIPS_R2_STATS(dsps); 736 MIPS_R2_STATS(dsps);
737 737
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index f63a289977cc..0b3e58a3189f 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -55,7 +55,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
55 * state. Actually per-core rather than per-CPU. 55 * state. Actually per-core rather than per-CPU.
56 */ 56 */
57static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); 57static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
58static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
59 58
60/* Indicates online CPUs coupled with the current CPU */ 59/* Indicates online CPUs coupled with the current CPU */
61static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); 60static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
@@ -625,7 +624,6 @@ static int __init cps_gen_core_entries(unsigned cpu)
625{ 624{
626 enum cps_pm_state state; 625 enum cps_pm_state state;
627 unsigned core = cpu_data[cpu].core; 626 unsigned core = cpu_data[cpu].core;
628 unsigned dlinesz = cpu_data[cpu].dcache.linesz;
629 void *entry_fn, *core_rc; 627 void *entry_fn, *core_rc;
630 628
631 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { 629 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
@@ -645,16 +643,11 @@ static int __init cps_gen_core_entries(unsigned cpu)
645 } 643 }
646 644
647 if (!per_cpu(ready_count, core)) { 645 if (!per_cpu(ready_count, core)) {
648 core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); 646 core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
649 if (!core_rc) { 647 if (!core_rc) {
650 pr_err("Failed allocate core %u ready_count\n", core); 648 pr_err("Failed allocate core %u ready_count\n", core);
651 return -ENOMEM; 649 return -ENOMEM;
652 } 650 }
653 per_cpu(ready_count_alloc, core) = core_rc;
654
655 /* Ensure ready_count is aligned to a cacheline boundary */
656 core_rc += dlinesz - 1;
657 core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
658 per_cpu(ready_count, core) = core_rc; 651 per_cpu(ready_count, core) = core_rc;
659 } 652 }
660 653
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 298b2b773d12..f1fab6ff53e6 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -83,7 +83,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
83 } 83 }
84 84
85 seq_printf(m, "isa\t\t\t:"); 85 seq_printf(m, "isa\t\t\t:");
86 if (cpu_has_mips_r1) 86 if (cpu_has_mips_1)
87 seq_printf(m, " mips1"); 87 seq_printf(m, " mips1");
88 if (cpu_has_mips_2) 88 if (cpu_has_mips_2)
89 seq_printf(m, "%s", " mips2"); 89 seq_printf(m, "%s", " mips2");
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 44a6f25e902e..8c26ecac930d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -32,6 +32,7 @@
32#include <asm/cpu.h> 32#include <asm/cpu.h>
33#include <asm/dsp.h> 33#include <asm/dsp.h>
34#include <asm/fpu.h> 34#include <asm/fpu.h>
35#include <asm/irq.h>
35#include <asm/msa.h> 36#include <asm/msa.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
37#include <asm/mipsregs.h> 38#include <asm/mipsregs.h>
@@ -191,11 +192,9 @@ struct mips_frame_info {
191#define J_TARGET(pc,target) \ 192#define J_TARGET(pc,target) \
192 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) 193 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
193 194
194static inline int is_ra_save_ins(union mips_instruction *ip) 195static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
195{ 196{
196#ifdef CONFIG_CPU_MICROMIPS 197#ifdef CONFIG_CPU_MICROMIPS
197 union mips_instruction mmi;
198
199 /* 198 /*
200 * swsp ra,offset 199 * swsp ra,offset
201 * swm16 reglist,offset(sp) 200 * swm16 reglist,offset(sp)
@@ -205,29 +204,71 @@ static inline int is_ra_save_ins(union mips_instruction *ip)
205 * 204 *
206 * microMIPS is way more fun... 205 * microMIPS is way more fun...
207 */ 206 */
208 if (mm_insn_16bit(ip->halfword[0])) { 207 if (mm_insn_16bit(ip->halfword[1])) {
209 mmi.word = (ip->halfword[0] << 16); 208 switch (ip->mm16_r5_format.opcode) {
210 return (mmi.mm16_r5_format.opcode == mm_swsp16_op && 209 case mm_swsp16_op:
211 mmi.mm16_r5_format.rt == 31) || 210 if (ip->mm16_r5_format.rt != 31)
212 (mmi.mm16_m_format.opcode == mm_pool16c_op && 211 return 0;
213 mmi.mm16_m_format.func == mm_swm16_op); 212
213 *poff = ip->mm16_r5_format.simmediate;
214 *poff = (*poff << 2) / sizeof(ulong);
215 return 1;
216
217 case mm_pool16c_op:
218 switch (ip->mm16_m_format.func) {
219 case mm_swm16_op:
220 *poff = ip->mm16_m_format.imm;
221 *poff += 1 + ip->mm16_m_format.rlist;
222 *poff = (*poff << 2) / sizeof(ulong);
223 return 1;
224
225 default:
226 return 0;
227 }
228
229 default:
230 return 0;
231 }
214 } 232 }
215 else { 233
216 mmi.halfword[0] = ip->halfword[1]; 234 switch (ip->i_format.opcode) {
217 mmi.halfword[1] = ip->halfword[0]; 235 case mm_sw32_op:
218 return (mmi.mm_m_format.opcode == mm_pool32b_op && 236 if (ip->i_format.rs != 29)
219 mmi.mm_m_format.rd > 9 && 237 return 0;
220 mmi.mm_m_format.base == 29 && 238 if (ip->i_format.rt != 31)
221 mmi.mm_m_format.func == mm_swm32_func) || 239 return 0;
222 (mmi.i_format.opcode == mm_sw32_op && 240
223 mmi.i_format.rs == 29 && 241 *poff = ip->i_format.simmediate / sizeof(ulong);
224 mmi.i_format.rt == 31); 242 return 1;
243
244 case mm_pool32b_op:
245 switch (ip->mm_m_format.func) {
246 case mm_swm32_func:
247 if (ip->mm_m_format.rd < 0x10)
248 return 0;
249 if (ip->mm_m_format.base != 29)
250 return 0;
251
252 *poff = ip->mm_m_format.simmediate;
253 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
254 *poff /= sizeof(ulong);
255 return 1;
256 default:
257 return 0;
258 }
259
260 default:
261 return 0;
225 } 262 }
226#else 263#else
227 /* sw / sd $ra, offset($sp) */ 264 /* sw / sd $ra, offset($sp) */
228 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && 265 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
229 ip->i_format.rs == 29 && 266 ip->i_format.rs == 29 && ip->i_format.rt == 31) {
230 ip->i_format.rt == 31; 267 *poff = ip->i_format.simmediate / sizeof(ulong);
268 return 1;
269 }
270
271 return 0;
231#endif 272#endif
232} 273}
233 274
@@ -242,13 +283,16 @@ static inline int is_jump_ins(union mips_instruction *ip)
242 * 283 *
243 * microMIPS is kind of more fun... 284 * microMIPS is kind of more fun...
244 */ 285 */
245 union mips_instruction mmi; 286 if (mm_insn_16bit(ip->halfword[1])) {
246 287 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
247 mmi.word = (ip->halfword[0] << 16); 288 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
289 return 1;
290 return 0;
291 }
248 292
249 if ((mmi.mm16_r5_format.opcode == mm_pool16c_op && 293 if (ip->j_format.opcode == mm_j32_op)
250 (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) || 294 return 1;
251 ip->j_format.opcode == mm_jal32_op) 295 if (ip->j_format.opcode == mm_jal32_op)
252 return 1; 296 return 1;
253 if (ip->r_format.opcode != mm_pool32a_op || 297 if (ip->r_format.opcode != mm_pool32a_op ||
254 ip->r_format.func != mm_pool32axf_op) 298 ip->r_format.func != mm_pool32axf_op)
@@ -276,15 +320,13 @@ static inline int is_sp_move_ins(union mips_instruction *ip)
276 * 320 *
277 * microMIPS is not more fun... 321 * microMIPS is not more fun...
278 */ 322 */
279 if (mm_insn_16bit(ip->halfword[0])) { 323 if (mm_insn_16bit(ip->halfword[1])) {
280 union mips_instruction mmi; 324 return (ip->mm16_r3_format.opcode == mm_pool16d_op &&
281 325 ip->mm16_r3_format.simmediate && mm_addiusp_func) ||
282 mmi.word = (ip->halfword[0] << 16); 326 (ip->mm16_r5_format.opcode == mm_pool16d_op &&
283 return (mmi.mm16_r3_format.opcode == mm_pool16d_op && 327 ip->mm16_r5_format.rt == 29);
284 mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
285 (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
286 mmi.mm16_r5_format.rt == 29);
287 } 328 }
329
288 return ip->mm_i_format.opcode == mm_addiu32_op && 330 return ip->mm_i_format.opcode == mm_addiu32_op &&
289 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29; 331 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
290#else 332#else
@@ -299,30 +341,36 @@ static inline int is_sp_move_ins(union mips_instruction *ip)
299 341
300static int get_frame_info(struct mips_frame_info *info) 342static int get_frame_info(struct mips_frame_info *info)
301{ 343{
302#ifdef CONFIG_CPU_MICROMIPS 344 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
303 union mips_instruction *ip = (void *) (((char *) info->func) - 1); 345 union mips_instruction insn, *ip, *ip_end;
304#else 346 const unsigned int max_insns = 128;
305 union mips_instruction *ip = info->func; 347 unsigned int i;
306#endif
307 unsigned max_insns = info->func_size / sizeof(union mips_instruction);
308 unsigned i;
309 348
310 info->pc_offset = -1; 349 info->pc_offset = -1;
311 info->frame_size = 0; 350 info->frame_size = 0;
312 351
352 ip = (void *)msk_isa16_mode((ulong)info->func);
313 if (!ip) 353 if (!ip)
314 goto err; 354 goto err;
315 355
316 if (max_insns == 0) 356 ip_end = (void *)ip + info->func_size;
317 max_insns = 128U; /* unknown function size */
318 max_insns = min(128U, max_insns);
319 357
320 for (i = 0; i < max_insns; i++, ip++) { 358 for (i = 0; i < max_insns && ip < ip_end; i++, ip++) {
359 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
360 insn.halfword[0] = 0;
361 insn.halfword[1] = ip->halfword[0];
362 } else if (is_mmips) {
363 insn.halfword[0] = ip->halfword[1];
364 insn.halfword[1] = ip->halfword[0];
365 } else {
366 insn.word = ip->word;
367 }
321 368
322 if (is_jump_ins(ip)) 369 if (is_jump_ins(&insn))
323 break; 370 break;
371
324 if (!info->frame_size) { 372 if (!info->frame_size) {
325 if (is_sp_move_ins(ip)) 373 if (is_sp_move_ins(&insn))
326 { 374 {
327#ifdef CONFIG_CPU_MICROMIPS 375#ifdef CONFIG_CPU_MICROMIPS
328 if (mm_insn_16bit(ip->halfword[0])) 376 if (mm_insn_16bit(ip->halfword[0]))
@@ -345,11 +393,9 @@ static int get_frame_info(struct mips_frame_info *info)
345 } 393 }
346 continue; 394 continue;
347 } 395 }
348 if (info->pc_offset == -1 && is_ra_save_ins(ip)) { 396 if (info->pc_offset == -1 &&
349 info->pc_offset = 397 is_ra_save_ins(&insn, &info->pc_offset))
350 ip->i_format.simmediate / sizeof(long);
351 break; 398 break;
352 }
353 } 399 }
354 if (info->frame_size && info->pc_offset >= 0) /* nested */ 400 if (info->frame_size && info->pc_offset >= 0) /* nested */
355 return 0; 401 return 0;
@@ -507,7 +553,19 @@ EXPORT_SYMBOL(unwind_stack_by_address);
507unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, 553unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
508 unsigned long pc, unsigned long *ra) 554 unsigned long pc, unsigned long *ra)
509{ 555{
510 unsigned long stack_page = (unsigned long)task_stack_page(task); 556 unsigned long stack_page = 0;
557 int cpu;
558
559 for_each_possible_cpu(cpu) {
560 if (on_irq_stack(cpu, *sp)) {
561 stack_page = (unsigned long)irq_stack[cpu];
562 break;
563 }
564 }
565
566 if (!stack_page)
567 stack_page = (unsigned long)task_stack_page(task);
568
511 return unwind_stack_by_address(stack_page, sp, pc, ra); 569 return unwind_stack_by_address(stack_page, sp, pc, ra);
512} 570}
513#endif 571#endif
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 74d581569778..24c115a0721a 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -485,7 +485,8 @@ static int fpr_set(struct task_struct *target,
485 &target->thread.fpu, 485 &target->thread.fpu,
486 0, sizeof(elf_fpregset_t)); 486 0, sizeof(elf_fpregset_t));
487 487
488 for (i = 0; i < NUM_FPU_REGS; i++) { 488 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
489 for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
489 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 490 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
490 &fpr_val, i * sizeof(elf_fpreg_t), 491 &fpr_val, i * sizeof(elf_fpreg_t),
491 (i + 1) * sizeof(elf_fpreg_t)); 492 (i + 1) * sizeof(elf_fpreg_t));
@@ -926,7 +927,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
926 audit_syscall_exit(regs); 927 audit_syscall_exit(regs);
927 928
928 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 929 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
929 trace_sys_exit(regs, regs->regs[2]); 930 trace_sys_exit(regs, regs_return_value(regs));
930 931
931 if (test_thread_flag(TIF_SYSCALL_TRACE)) 932 if (test_thread_flag(TIF_SYSCALL_TRACE))
932 tracehook_report_syscall_exit(regs, 0); 933 tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 2d23c834ba96..29b0c5f978e4 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -372,7 +372,7 @@ EXPORT(sys_call_table)
372 PTR sys_writev 372 PTR sys_writev
373 PTR sys_cacheflush 373 PTR sys_cacheflush
374 PTR sys_cachectl 374 PTR sys_cachectl
375 PTR sys_sysmips 375 PTR __sys_sysmips
376 PTR sys_ni_syscall /* 4150 */ 376 PTR sys_ni_syscall /* 4150 */
377 PTR sys_getsid 377 PTR sys_getsid
378 PTR sys_fdatasync 378 PTR sys_fdatasync
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index deac63315d0e..a6323a969919 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -312,7 +312,7 @@ EXPORT(sys_call_table)
312 PTR sys_sched_getaffinity 312 PTR sys_sched_getaffinity
313 PTR sys_cacheflush 313 PTR sys_cacheflush
314 PTR sys_cachectl 314 PTR sys_cachectl
315 PTR sys_sysmips 315 PTR __sys_sysmips
316 PTR sys_io_setup /* 5200 */ 316 PTR sys_io_setup /* 5200 */
317 PTR sys_io_destroy 317 PTR sys_io_destroy
318 PTR sys_io_getevents 318 PTR sys_io_getevents
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index ee93d5fe61d7..e0fdca8d3abe 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -298,7 +298,7 @@ EXPORT(sysn32_call_table)
298 PTR compat_sys_sched_getaffinity 298 PTR compat_sys_sched_getaffinity
299 PTR sys_cacheflush 299 PTR sys_cacheflush
300 PTR sys_cachectl 300 PTR sys_cachectl
301 PTR sys_sysmips 301 PTR __sys_sysmips
302 PTR compat_sys_io_setup /* 6200 */ 302 PTR compat_sys_io_setup /* 6200 */
303 PTR sys_io_destroy 303 PTR sys_io_destroy
304 PTR compat_sys_io_getevents 304 PTR compat_sys_io_getevents
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b77052ec6fb2..87c697181d25 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -367,7 +367,7 @@ EXPORT(sys32_call_table)
367 PTR compat_sys_writev 367 PTR compat_sys_writev
368 PTR sys_cacheflush 368 PTR sys_cacheflush
369 PTR sys_cachectl 369 PTR sys_cachectl
370 PTR sys_sysmips 370 PTR __sys_sysmips
371 PTR sys_ni_syscall /* 4150 */ 371 PTR sys_ni_syscall /* 4150 */
372 PTR sys_getsid 372 PTR sys_getsid
373 PTR sys_fdatasync 373 PTR sys_fdatasync
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 53a7ef9a8f32..4234b2d726c5 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -28,6 +28,7 @@
28#include <linux/elf.h> 28#include <linux/elf.h>
29 29
30#include <asm/asm.h> 30#include <asm/asm.h>
31#include <asm/asm-eva.h>
31#include <asm/branch.h> 32#include <asm/branch.h>
32#include <asm/cachectl.h> 33#include <asm/cachectl.h>
33#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
@@ -138,10 +139,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
138 __asm__ __volatile__ ( 139 __asm__ __volatile__ (
139 " .set "MIPS_ISA_ARCH_LEVEL" \n" 140 " .set "MIPS_ISA_ARCH_LEVEL" \n"
140 " li %[err], 0 \n" 141 " li %[err], 0 \n"
141 "1: ll %[old], (%[addr]) \n" 142 "1: \n"
143 user_ll("%[old]", "(%[addr])")
142 " move %[tmp], %[new] \n" 144 " move %[tmp], %[new] \n"
143 "2: sc %[tmp], (%[addr]) \n" 145 "2: \n"
144 " bnez %[tmp], 4f \n" 146 user_sc("%[tmp]", "(%[addr])")
147 " beqz %[tmp], 4f \n"
145 "3: \n" 148 "3: \n"
146 " .insn \n" 149 " .insn \n"
147 " .subsection 2 \n" 150 " .subsection 2 \n"
@@ -199,6 +202,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
199 unreachable(); 202 unreachable();
200} 203}
201 204
205/*
206 * mips_atomic_set() normally returns directly via syscall_exit potentially
207 * clobbering static registers, so be sure to preserve them.
208 */
209save_static_function(sys_sysmips);
210
202SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2) 211SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
203{ 212{
204 switch (cmd) { 213 switch (cmd) {
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 99a402231f4d..31ca2edd7218 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -194,6 +194,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
194{ 194{
195 struct pt_regs regs; 195 struct pt_regs regs;
196 mm_segment_t old_fs = get_fs(); 196 mm_segment_t old_fs = get_fs();
197
198 regs.cp0_status = KSU_KERNEL;
197 if (sp) { 199 if (sp) {
198 regs.regs[29] = (unsigned long)sp; 200 regs.regs[29] = (unsigned long)sp;
199 regs.regs[31] = 0; 201 regs.regs[31] = 0;
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 80554e8f6037..daf580ce5ca2 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
467 467
468 if (!np_xbar) 468 if (!np_xbar)
469 panic("Failed to load xbar nodes from devicetree"); 469 panic("Failed to load xbar nodes from devicetree");
470 if (of_address_to_resource(np_pmu, 0, &res_xbar)) 470 if (of_address_to_resource(np_xbar, 0, &res_xbar))
471 panic("Failed to get xbar resources"); 471 panic("Failed to get xbar resources");
472 if (request_mem_region(res_xbar.start, resource_size(&res_xbar), 472 if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
473 res_xbar.name) < 0) 473 res_xbar.name) < 0)
@@ -545,7 +545,7 @@ void __init ltq_soc_init(void)
545 clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI); 545 clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI);
546 clkdev_add_pmu("1a800000.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI); 546 clkdev_add_pmu("1a800000.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI);
547 clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL); 547 clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL);
548 clkdev_add_pmu("1e108000.eth", NULL, 1, 0, PMU_SWITCH | PMU_PPE_DP); 548 clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP);
549 clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); 549 clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
550 clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); 550 clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
551 } else if (of_machine_is_compatible("lantiq,ar10")) { 551 } else if (of_machine_is_compatible("lantiq,ar10")) {
@@ -553,7 +553,7 @@ void __init ltq_soc_init(void)
553 ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz()); 553 ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz());
554 clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); 554 clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0);
555 clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1); 555 clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1);
556 clkdev_add_pmu("1e108000.eth", NULL, 1, 0, PMU_SWITCH | 556 clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH |
557 PMU_PPE_DP | PMU_PPE_TC); 557 PMU_PPE_DP | PMU_PPE_TC);
558 clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); 558 clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
559 clkdev_add_pmu("1f203000.rcu", "gphy", 1, 0, PMU_GPHY); 559 clkdev_add_pmu("1f203000.rcu", "gphy", 1, 0, PMU_GPHY);
@@ -575,11 +575,11 @@ void __init ltq_soc_init(void)
575 clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS); 575 clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS);
576 576
577 clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); 577 clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
578 clkdev_add_pmu("1e108000.eth", NULL, 1, 0, 578 clkdev_add_pmu("1e108000.eth", NULL, 0, 0,
579 PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | 579 PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
580 PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | 580 PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
581 PMU_PPE_QSB | PMU_PPE_TOP); 581 PMU_PPE_QSB | PMU_PPE_TOP);
582 clkdev_add_pmu("1f203000.rcu", "gphy", 1, 0, PMU_GPHY); 582 clkdev_add_pmu("1f203000.rcu", "gphy", 0, 0, PMU_GPHY);
583 clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); 583 clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
584 clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); 584 clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
585 clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); 585 clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 734a2c7665ec..6da2e4a6ba39 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -2496,6 +2496,35 @@ dcopuop:
2496 return 0; 2496 return 0;
2497} 2497}
2498 2498
2499/*
2500 * Emulate FPU instructions.
2501 *
2502 * If we use FPU hardware, then we have been typically called to handle
2503 * an unimplemented operation, such as where an operand is a NaN or
2504 * denormalized. In that case exit the emulation loop after a single
2505 * iteration so as to let hardware execute any subsequent instructions.
2506 *
2507 * If we have no FPU hardware or it has been disabled, then continue
2508 * emulating floating-point instructions until one of these conditions
2509 * has occurred:
2510 *
2511 * - a non-FPU instruction has been encountered,
2512 *
2513 * - an attempt to emulate has ended with a signal,
2514 *
2515 * - the ISA mode has been switched.
2516 *
2517 * We need to terminate the emulation loop if we got switched to the
2518 * MIPS16 mode, whether supported or not, so that we do not attempt
2519 * to emulate a MIPS16 instruction as a regular MIPS FPU instruction.
2520 * Similarly if we got switched to the microMIPS mode and only the
2521 * regular MIPS mode is supported, so that we do not attempt to emulate
2522 * a microMIPS instruction as a regular MIPS FPU instruction. Or if
2523 * we got switched to the regular MIPS mode and only the microMIPS mode
2524 * is supported, so that we do not attempt to emulate a regular MIPS
2525 * instruction that should cause an Address Error exception instead.
2526 * For simplicity we always terminate upon an ISA mode switch.
2527 */
2499int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, 2528int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
2500 int has_fpu, void *__user *fault_addr) 2529 int has_fpu, void *__user *fault_addr)
2501{ 2530{
@@ -2581,6 +2610,15 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
2581 break; 2610 break;
2582 if (sig) 2611 if (sig)
2583 break; 2612 break;
2613 /*
2614 * We have to check for the ISA bit explicitly here,
2615 * because `get_isa16_mode' may return 0 if support
2616 * for code compression has been globally disabled,
2617 * or otherwise we may produce the wrong signal or
2618 * even proceed successfully where we must not.
2619 */
2620 if ((xcp->cp0_epc ^ prevepc) & 0x1)
2621 break;
2584 2622
2585 cond_resched(); 2623 cond_resched();
2586 } while (xcp->cp0_epc > prevepc); 2624 } while (xcp->cp0_epc > prevepc);
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 353037699512..c5fdea5debe5 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
92 92
93 vma = find_vma(mm, addr); 93 vma = find_vma(mm, addr);
94 if (TASK_SIZE - len >= addr && 94 if (TASK_SIZE - len >= addr &&
95 (!vma || addr + len <= vma->vm_start)) 95 (!vma || addr + len <= vm_start_gap(vma)))
96 return addr; 96 return addr;
97 } 97 }
98 98
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index dc7c5a5214a9..efaf364fe581 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -31,26 +31,40 @@ static inline void indy_sc_wipe(unsigned long first, unsigned long last)
31 unsigned long tmp; 31 unsigned long tmp;
32 32
33 __asm__ __volatile__( 33 __asm__ __volatile__(
34 ".set\tpush\t\t\t# indy_sc_wipe\n\t" 34 " .set push # indy_sc_wipe \n"
35 ".set\tnoreorder\n\t" 35 " .set noreorder \n"
36 ".set\tmips3\n\t" 36 " .set mips3 \n"
37 ".set\tnoat\n\t" 37 " .set noat \n"
38 "mfc0\t%2, $12\n\t" 38 " mfc0 %2, $12 \n"
39 "li\t$1, 0x80\t\t\t# Go 64 bit\n\t" 39 " li $1, 0x80 # Go 64 bit \n"
40 "mtc0\t$1, $12\n\t" 40 " mtc0 $1, $12 \n"
41 41 " \n"
42 "dli\t$1, 0x9000000080000000\n\t" 42 " # \n"
43 "or\t%0, $1\t\t\t# first line to flush\n\t" 43 " # Open code a dli $1, 0x9000000080000000 \n"
44 "or\t%1, $1\t\t\t# last line to flush\n\t" 44 " # \n"
45 ".set\tat\n\t" 45 " # Required because binutils 2.25 will happily accept \n"
46 46 " # 64 bit instructions in .set mips3 mode but puke on \n"
47 "1:\tsw\t$0, 0(%0)\n\t" 47 " # 64 bit constants when generating 32 bit ELF \n"
48 "bne\t%0, %1, 1b\n\t" 48 " # \n"
49 " daddu\t%0, 32\n\t" 49 " lui $1,0x9000 \n"
50 50 " dsll $1,$1,0x10 \n"
51 "mtc0\t%2, $12\t\t\t# Back to 32 bit\n\t" 51 " ori $1,$1,0x8000 \n"
52 "nop; nop; nop; nop;\n\t" 52 " dsll $1,$1,0x10 \n"
53 ".set\tpop" 53 " \n"
54 " or %0, $1 # first line to flush \n"
55 " or %1, $1 # last line to flush \n"
56 " .set at \n"
57 " \n"
58 "1: sw $0, 0(%0) \n"
59 " bne %0, %1, 1b \n"
60 " daddu %0, 32 \n"
61 " \n"
62 " mtc0 %2, $12 # Back to 32 bit \n"
63 " nop # pipeline hazard \n"
64 " nop \n"
65 " nop \n"
66 " nop \n"
67 " .set pop \n"
54 : "=r" (first), "=r" (last), "=&r" (tmp) 68 : "=r" (first), "=r" (last), "=&r" (tmp)
55 : "0" (first), "1" (last)); 69 : "0" (first), "1" (last));
56} 70}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 29f73e00253d..63b7d6f82d24 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -757,7 +757,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
757static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, 757static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
758 struct uasm_label **l, 758 struct uasm_label **l,
759 unsigned int pte, 759 unsigned int pte,
760 unsigned int ptr) 760 unsigned int ptr,
761 unsigned int flush)
761{ 762{
762#ifdef CONFIG_SMP 763#ifdef CONFIG_SMP
763 UASM_i_SC(p, pte, 0, ptr); 764 UASM_i_SC(p, pte, 0, ptr);
@@ -766,6 +767,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
766#else 767#else
767 UASM_i_SW(p, pte, 0, ptr); 768 UASM_i_SW(p, pte, 0, ptr);
768#endif 769#endif
770 if (cpu_has_ftlb && flush) {
771 BUG_ON(!cpu_has_tlbinv);
772
773 UASM_i_MFC0(p, ptr, C0_ENTRYHI);
774 uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
775 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
776 build_tlb_write_entry(p, l, r, tlb_indexed);
777
778 uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
779 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
780 build_huge_update_entries(p, pte, ptr);
781 build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
782
783 return;
784 }
785
769 build_huge_update_entries(p, pte, ptr); 786 build_huge_update_entries(p, pte, ptr);
770 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); 787 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
771} 788}
@@ -2082,7 +2099,7 @@ static void build_r4000_tlb_load_handler(void)
2082 uasm_l_tlbl_goaround2(&l, p); 2099 uasm_l_tlbl_goaround2(&l, p);
2083 } 2100 }
2084 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); 2101 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2085 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2102 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2086#endif 2103#endif
2087 2104
2088 uasm_l_nopage_tlbl(&l, p); 2105 uasm_l_nopage_tlbl(&l, p);
@@ -2137,7 +2154,7 @@ static void build_r4000_tlb_store_handler(void)
2137 build_tlb_probe_entry(&p); 2154 build_tlb_probe_entry(&p);
2138 uasm_i_ori(&p, wr.r1, wr.r1, 2155 uasm_i_ori(&p, wr.r1, wr.r1,
2139 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2156 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2140 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2157 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2141#endif 2158#endif
2142 2159
2143 uasm_l_nopage_tlbs(&l, p); 2160 uasm_l_nopage_tlbs(&l, p);
@@ -2193,7 +2210,7 @@ static void build_r4000_tlb_modify_handler(void)
2193 build_tlb_probe_entry(&p); 2210 build_tlb_probe_entry(&p);
2194 uasm_i_ori(&p, wr.r1, wr.r1, 2211 uasm_i_ori(&p, wr.r1, wr.r1,
2195 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2212 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2196 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2213 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
2197#endif 2214#endif
2198 2215
2199 uasm_l_nopage_tlbm(&l, p); 2216 uasm_l_nopage_tlbm(&l, p);
diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S
index edbab9b8691f..c474981a6c0d 100644
--- a/arch/mips/netlogic/common/reset.S
+++ b/arch/mips/netlogic/common/reset.S
@@ -50,7 +50,6 @@
50#include <asm/netlogic/xlp-hal/sys.h> 50#include <asm/netlogic/xlp-hal/sys.h>
51#include <asm/netlogic/xlp-hal/cpucontrol.h> 51#include <asm/netlogic/xlp-hal/cpucontrol.h>
52 52
53#define CP0_EBASE $15
54#define SYS_CPU_COHERENT_BASE CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \ 53#define SYS_CPU_COHERENT_BASE CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
55 XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \ 54 XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
56 SYS_CPU_NONCOHERENT_MODE * 4 55 SYS_CPU_NONCOHERENT_MODE * 4
@@ -92,7 +91,7 @@
92 * registers. On XLPII CPUs, usual cache instructions work. 91 * registers. On XLPII CPUs, usual cache instructions work.
93 */ 92 */
94.macro xlp_flush_l1_dcache 93.macro xlp_flush_l1_dcache
95 mfc0 t0, CP0_EBASE, 0 94 mfc0 t0, CP0_PRID
96 andi t0, t0, PRID_IMP_MASK 95 andi t0, t0, PRID_IMP_MASK
97 slt t1, t0, 0x1200 96 slt t1, t0, 0x1200
98 beqz t1, 15f 97 beqz t1, 15f
@@ -171,7 +170,7 @@ FEXPORT(nlm_reset_entry)
171 nop 170 nop
172 171
1731: /* Entry point on core wakeup */ 1721: /* Entry point on core wakeup */
174 mfc0 t0, CP0_EBASE, 0 /* processor ID */ 173 mfc0 t0, CP0_PRID /* processor ID */
175 andi t0, PRID_IMP_MASK 174 andi t0, PRID_IMP_MASK
176 li t1, 0x1500 /* XLP 9xx */ 175 li t1, 0x1500 /* XLP 9xx */
177 beq t0, t1, 2f /* does not need to set coherent */ 176 beq t0, t1, 2f /* does not need to set coherent */
@@ -182,8 +181,8 @@ FEXPORT(nlm_reset_entry)
182 nop 181 nop
183 182
184 /* set bit in SYS coherent register for the core */ 183 /* set bit in SYS coherent register for the core */
185 mfc0 t0, CP0_EBASE, 1 184 mfc0 t0, CP0_EBASE
186 mfc0 t1, CP0_EBASE, 1 185 mfc0 t1, CP0_EBASE
187 srl t1, 5 186 srl t1, 5
188 andi t1, 0x3 /* t1 <- node */ 187 andi t1, 0x3 /* t1 <- node */
189 li t2, 0x40000 188 li t2, 0x40000
@@ -232,7 +231,7 @@ EXPORT(nlm_boot_siblings)
232 231
233 * NOTE: All GPR contents are lost after the mtcr above! 232 * NOTE: All GPR contents are lost after the mtcr above!
234 */ 233 */
235 mfc0 v0, CP0_EBASE, 1 234 mfc0 v0, CP0_EBASE
236 andi v0, 0x3ff /* v0 <- node/core */ 235 andi v0, 0x3ff /* v0 <- node/core */
237 236
238 /* 237 /*
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index 805355b0bd05..f0cc4c9de2bb 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -48,8 +48,6 @@
48#include <asm/netlogic/xlp-hal/sys.h> 48#include <asm/netlogic/xlp-hal/sys.h>
49#include <asm/netlogic/xlp-hal/cpucontrol.h> 49#include <asm/netlogic/xlp-hal/cpucontrol.h>
50 50
51#define CP0_EBASE $15
52
53 .set noreorder 51 .set noreorder
54 .set noat 52 .set noat
55 .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */ 53 .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
@@ -86,7 +84,7 @@ NESTED(nlm_boot_secondary_cpus, 16, sp)
86 PTR_L gp, 0(t1) 84 PTR_L gp, 0(t1)
87 85
88 /* a0 has the processor id */ 86 /* a0 has the processor id */
89 mfc0 a0, CP0_EBASE, 1 87 mfc0 a0, CP0_EBASE
90 andi a0, 0x3ff /* a0 <- node/core */ 88 andi a0, 0x3ff /* a0 <- node/core */
91 PTR_LA t0, nlm_early_init_secondary 89 PTR_LA t0, nlm_early_init_secondary
92 jalr t0 90 jalr t0
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index dfb04fcedb04..48d6349fd9d7 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -107,31 +107,31 @@ static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
107}; 107};
108 108
109static struct rt2880_pmx_func pwm1_grp_mt7628[] = { 109static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
110 FUNC("sdcx", 3, 19, 1), 110 FUNC("sdxc d6", 3, 19, 1),
111 FUNC("utif", 2, 19, 1), 111 FUNC("utif", 2, 19, 1),
112 FUNC("gpio", 1, 19, 1), 112 FUNC("gpio", 1, 19, 1),
113 FUNC("pwm", 0, 19, 1), 113 FUNC("pwm1", 0, 19, 1),
114}; 114};
115 115
116static struct rt2880_pmx_func pwm0_grp_mt7628[] = { 116static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
117 FUNC("sdcx", 3, 18, 1), 117 FUNC("sdxc d7", 3, 18, 1),
118 FUNC("utif", 2, 18, 1), 118 FUNC("utif", 2, 18, 1),
119 FUNC("gpio", 1, 18, 1), 119 FUNC("gpio", 1, 18, 1),
120 FUNC("pwm", 0, 18, 1), 120 FUNC("pwm0", 0, 18, 1),
121}; 121};
122 122
123static struct rt2880_pmx_func uart2_grp_mt7628[] = { 123static struct rt2880_pmx_func uart2_grp_mt7628[] = {
124 FUNC("sdcx", 3, 20, 2), 124 FUNC("sdxc d5 d4", 3, 20, 2),
125 FUNC("pwm", 2, 20, 2), 125 FUNC("pwm", 2, 20, 2),
126 FUNC("gpio", 1, 20, 2), 126 FUNC("gpio", 1, 20, 2),
127 FUNC("uart", 0, 20, 2), 127 FUNC("uart2", 0, 20, 2),
128}; 128};
129 129
130static struct rt2880_pmx_func uart1_grp_mt7628[] = { 130static struct rt2880_pmx_func uart1_grp_mt7628[] = {
131 FUNC("sdcx", 3, 45, 2), 131 FUNC("sw_r", 3, 45, 2),
132 FUNC("pwm", 2, 45, 2), 132 FUNC("pwm", 2, 45, 2),
133 FUNC("gpio", 1, 45, 2), 133 FUNC("gpio", 1, 45, 2),
134 FUNC("uart", 0, 45, 2), 134 FUNC("uart1", 0, 45, 2),
135}; 135};
136 136
137static struct rt2880_pmx_func i2c_grp_mt7628[] = { 137static struct rt2880_pmx_func i2c_grp_mt7628[] = {
@@ -143,21 +143,21 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
143 143
144static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) }; 144static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
145static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) }; 145static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
146static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) }; 146static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
147static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; 147static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
148 148
149static struct rt2880_pmx_func sd_mode_grp_mt7628[] = { 149static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
150 FUNC("jtag", 3, 22, 8), 150 FUNC("jtag", 3, 22, 8),
151 FUNC("utif", 2, 22, 8), 151 FUNC("utif", 2, 22, 8),
152 FUNC("gpio", 1, 22, 8), 152 FUNC("gpio", 1, 22, 8),
153 FUNC("sdcx", 0, 22, 8), 153 FUNC("sdxc", 0, 22, 8),
154}; 154};
155 155
156static struct rt2880_pmx_func uart0_grp_mt7628[] = { 156static struct rt2880_pmx_func uart0_grp_mt7628[] = {
157 FUNC("-", 3, 12, 2), 157 FUNC("-", 3, 12, 2),
158 FUNC("-", 2, 12, 2), 158 FUNC("-", 2, 12, 2),
159 FUNC("gpio", 1, 12, 2), 159 FUNC("gpio", 1, 12, 2),
160 FUNC("uart", 0, 12, 2), 160 FUNC("uart0", 0, 12, 2),
161}; 161};
162 162
163static struct rt2880_pmx_func i2s_grp_mt7628[] = { 163static struct rt2880_pmx_func i2s_grp_mt7628[] = {
@@ -171,7 +171,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
171 FUNC("-", 3, 6, 1), 171 FUNC("-", 3, 6, 1),
172 FUNC("refclk", 2, 6, 1), 172 FUNC("refclk", 2, 6, 1),
173 FUNC("gpio", 1, 6, 1), 173 FUNC("gpio", 1, 6, 1),
174 FUNC("spi", 0, 6, 1), 174 FUNC("spi cs1", 0, 6, 1),
175}; 175};
176 176
177static struct rt2880_pmx_func spis_grp_mt7628[] = { 177static struct rt2880_pmx_func spis_grp_mt7628[] = {
@@ -188,28 +188,44 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = {
188 FUNC("gpio", 0, 11, 1), 188 FUNC("gpio", 0, 11, 1),
189}; 189};
190 190
191#define MT7628_GPIO_MODE_MASK 0x3 191static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
192 192 FUNC("rsvd", 3, 35, 1),
193#define MT7628_GPIO_MODE_PWM1 30 193 FUNC("rsvd", 2, 35, 1),
194#define MT7628_GPIO_MODE_PWM0 28 194 FUNC("gpio", 1, 35, 1),
195#define MT7628_GPIO_MODE_UART2 26 195 FUNC("wled_kn", 0, 35, 1),
196#define MT7628_GPIO_MODE_UART1 24 196};
197#define MT7628_GPIO_MODE_I2C 20 197
198#define MT7628_GPIO_MODE_REFCLK 18 198static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
199#define MT7628_GPIO_MODE_PERST 16 199 FUNC("rsvd", 3, 44, 1),
200#define MT7628_GPIO_MODE_WDT 14 200 FUNC("rsvd", 2, 44, 1),
201#define MT7628_GPIO_MODE_SPI 12 201 FUNC("gpio", 1, 44, 1),
202#define MT7628_GPIO_MODE_SDMODE 10 202 FUNC("wled_an", 0, 44, 1),
203#define MT7628_GPIO_MODE_UART0 8 203};
204#define MT7628_GPIO_MODE_I2S 6 204
205#define MT7628_GPIO_MODE_CS1 4 205#define MT7628_GPIO_MODE_MASK 0x3
206#define MT7628_GPIO_MODE_SPIS 2 206
207#define MT7628_GPIO_MODE_GPIO 0 207#define MT7628_GPIO_MODE_WLED_KN 48
208#define MT7628_GPIO_MODE_WLED_AN 32
209#define MT7628_GPIO_MODE_PWM1 30
210#define MT7628_GPIO_MODE_PWM0 28
211#define MT7628_GPIO_MODE_UART2 26
212#define MT7628_GPIO_MODE_UART1 24
213#define MT7628_GPIO_MODE_I2C 20
214#define MT7628_GPIO_MODE_REFCLK 18
215#define MT7628_GPIO_MODE_PERST 16
216#define MT7628_GPIO_MODE_WDT 14
217#define MT7628_GPIO_MODE_SPI 12
218#define MT7628_GPIO_MODE_SDMODE 10
219#define MT7628_GPIO_MODE_UART0 8
220#define MT7628_GPIO_MODE_I2S 6
221#define MT7628_GPIO_MODE_CS1 4
222#define MT7628_GPIO_MODE_SPIS 2
223#define MT7628_GPIO_MODE_GPIO 0
208 224
209static struct rt2880_pmx_group mt7628an_pinmux_data[] = { 225static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
210 GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK, 226 GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
211 1, MT7628_GPIO_MODE_PWM1), 227 1, MT7628_GPIO_MODE_PWM1),
212 GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK, 228 GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
213 1, MT7628_GPIO_MODE_PWM0), 229 1, MT7628_GPIO_MODE_PWM0),
214 GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK, 230 GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
215 1, MT7628_GPIO_MODE_UART2), 231 1, MT7628_GPIO_MODE_UART2),
@@ -233,6 +249,10 @@ static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
233 1, MT7628_GPIO_MODE_SPIS), 249 1, MT7628_GPIO_MODE_SPIS),
234 GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK, 250 GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
235 1, MT7628_GPIO_MODE_GPIO), 251 1, MT7628_GPIO_MODE_GPIO),
252 GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
253 1, MT7628_GPIO_MODE_WLED_AN),
254 GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
255 1, MT7628_GPIO_MODE_WLED_KN),
236 { 0 } 256 { 0 }
237}; 257};
238 258
@@ -439,7 +459,7 @@ void __init ralink_clk_init(void)
439 ralink_clk_add("10000c00.uartlite", periph_rate); 459 ralink_clk_add("10000c00.uartlite", periph_rate);
440 ralink_clk_add("10180000.wmac", xtal_rate); 460 ralink_clk_add("10180000.wmac", xtal_rate);
441 461
442 if (IS_ENABLED(CONFIG_USB) && is_mt76x8()) { 462 if (IS_ENABLED(CONFIG_USB) && !is_mt76x8()) {
443 /* 463 /*
444 * When the CPU goes into sleep mode, the BUS clock will be 464 * When the CPU goes into sleep mode, the BUS clock will be
445 * too low for USB to function properly. Adjust the busses 465 * too low for USB to function properly. Adjust the busses
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
index 39a9142f71be..7ecb4af79b7b 100644
--- a/arch/mips/ralink/prom.c
+++ b/arch/mips/ralink/prom.c
@@ -30,8 +30,10 @@ const char *get_system_type(void)
30 return soc_info.sys_type; 30 return soc_info.sys_type;
31} 31}
32 32
33static __init void prom_init_cmdline(int argc, char **argv) 33static __init void prom_init_cmdline(void)
34{ 34{
35 int argc;
36 char **argv;
35 int i; 37 int i;
36 38
37 pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n", 39 pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n",
@@ -60,14 +62,11 @@ static __init void prom_init_cmdline(int argc, char **argv)
60 62
61void __init prom_init(void) 63void __init prom_init(void)
62{ 64{
63 int argc;
64 char **argv;
65
66 prom_soc_init(&soc_info); 65 prom_soc_init(&soc_info);
67 66
68 pr_info("SoC Type: %s\n", get_system_type()); 67 pr_info("SoC Type: %s\n", get_system_type());
69 68
70 prom_init_cmdline(argc, argv); 69 prom_init_cmdline();
71} 70}
72 71
73void __init prom_free_prom_memory(void) 72void __init prom_free_prom_memory(void)
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
index 844f5cd55c8f..9dd67749c592 100644
--- a/arch/mips/ralink/rt288x.c
+++ b/arch/mips/ralink/rt288x.c
@@ -40,16 +40,6 @@ static struct rt2880_pmx_group rt2880_pinmux_data_act[] = {
40 { 0 } 40 { 0 }
41}; 41};
42 42
43static void rt288x_wdt_reset(void)
44{
45 u32 t;
46
47 /* enable WDT reset output on pin SRAM_CS_N */
48 t = rt_sysc_r32(SYSC_REG_CLKCFG);
49 t |= CLKCFG_SRAM_CS_N_WDT;
50 rt_sysc_w32(t, SYSC_REG_CLKCFG);
51}
52
53void __init ralink_clk_init(void) 43void __init ralink_clk_init(void)
54{ 44{
55 unsigned long cpu_rate, wmac_rate = 40000000; 45 unsigned long cpu_rate, wmac_rate = 40000000;
@@ -119,5 +109,5 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
119 soc_info->mem_size_max = RT2880_MEM_SIZE_MAX; 109 soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
120 110
121 rt2880_pinmux_data = rt2880_pinmux_data_act; 111 rt2880_pinmux_data = rt2880_pinmux_data_act;
122 ralink_soc == RT2880_SOC; 112 ralink_soc = RT2880_SOC;
123} 113}
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index 9e4572592065..15b32cd01906 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -89,17 +89,6 @@ static struct rt2880_pmx_group rt5350_pinmux_data[] = {
89 { 0 } 89 { 0 }
90}; 90};
91 91
92static void rt305x_wdt_reset(void)
93{
94 u32 t;
95
96 /* enable WDT reset output on pin SRAM_CS_N */
97 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
98 t |= RT305X_SYSCFG_SRAM_CS0_MODE_WDT <<
99 RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT;
100 rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
101}
102
103static unsigned long rt5350_get_mem_size(void) 92static unsigned long rt5350_get_mem_size(void)
104{ 93{
105 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); 94 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index 582995aaaf4e..3c575093f8f1 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -36,7 +36,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
36static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; 36static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
37static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; 37static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
38static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; 38static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
39static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; 39static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
40static struct rt2880_pmx_func pci_func[] = { 40static struct rt2880_pmx_func pci_func[] = {
41 FUNC("pci-dev", 0, 40, 32), 41 FUNC("pci-dev", 0, 40, 32),
42 FUNC("pci-host2", 1, 40, 32), 42 FUNC("pci-host2", 1, 40, 32),
@@ -44,7 +44,7 @@ static struct rt2880_pmx_func pci_func[] = {
44 FUNC("pci-fnc", 3, 40, 32) 44 FUNC("pci-fnc", 3, 40, 32)
45}; 45};
46static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; 46static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
47static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; 47static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
48 48
49static struct rt2880_pmx_group rt3883_pinmux_data[] = { 49static struct rt2880_pmx_group rt3883_pinmux_data[] = {
50 GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), 50 GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
@@ -63,16 +63,6 @@ static struct rt2880_pmx_group rt3883_pinmux_data[] = {
63 { 0 } 63 { 0 }
64}; 64};
65 65
66static void rt3883_wdt_reset(void)
67{
68 u32 t;
69
70 /* enable WDT reset output on GPIO 2 */
71 t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
72 t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT;
73 rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
74}
75
76void __init ralink_clk_init(void) 66void __init ralink_clk_init(void)
77{ 67{
78 unsigned long cpu_rate, sys_rate; 68 unsigned long cpu_rate, sys_rate;
diff --git a/arch/mips/sgi-ip22/Platform b/arch/mips/sgi-ip22/Platform
index b7a4b7e04c38..e8f6b3a42a48 100644
--- a/arch/mips/sgi-ip22/Platform
+++ b/arch/mips/sgi-ip22/Platform
@@ -25,7 +25,7 @@ endif
25# Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys 25# Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys
26# 26#
27ifdef CONFIG_SGI_IP28 27ifdef CONFIG_SGI_IP28
28 ifeq ($(call cc-option-yn,-mr10k-cache-barrier=store), n) 28 ifeq ($(call cc-option-yn,-march=r10000 -mr10k-cache-barrier=store), n)
29 $(error gcc doesn't support needed option -mr10k-cache-barrier=store) 29 $(error gcc doesn't support needed option -mr10k-cache-barrier=store)
30 endif 30 endif
31endif 31endif
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 718dd197909f..de73beb36910 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
48 return alloc_bootmem_align(size, align); 48 return alloc_bootmem_align(size, align);
49} 49}
50 50
51int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
52 bool nomap)
53{
54 reserve_bootmem(base, size, BOOTMEM_DEFAULT);
55 return 0;
56}
57
51void __init early_init_devtree(void *params) 58void __init early_init_devtree(void *params)
52{ 59{
53 __be32 *dtb = (u32 *)__dtb_start; 60 __be32 *dtb = (u32 *)__dtb_start;
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index a4ff86d58d5c..6c4e351a7930 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -195,6 +195,9 @@ void __init setup_arch(char **cmdline_p)
195 } 195 }
196#endif /* CONFIG_BLK_DEV_INITRD */ 196#endif /* CONFIG_BLK_DEV_INITRD */
197 197
198 early_init_fdt_reserve_self();
199 early_init_fdt_scan_reserved_mem();
200
198 unflatten_and_copy_device_tree(); 201 unflatten_and_copy_device_tree();
199 202
200 setup_cpuinfo(); 203 setup_cpuinfo();
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index 2d69a853b742..3a08b55609b6 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -38,6 +38,8 @@ SECTIONS
38 /* Read-only sections, merged into text segment: */ 38 /* Read-only sections, merged into text segment: */
39 . = LOAD_BASE ; 39 . = LOAD_BASE ;
40 40
41 _text = .;
42
41 /* _s_kernel_ro must be page aligned */ 43 /* _s_kernel_ro must be page aligned */
42 . = ALIGN(PAGE_SIZE); 44 . = ALIGN(PAGE_SIZE);
43 _s_kernel_ro = .; 45 _s_kernel_ro = .;
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 3f9406d9b9d6..da87943328a5 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -6,7 +6,7 @@
6#endif 6#endif
7 7
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ 9#include <asm/types.h>
10#include <asm/byteorder.h> 10#include <asm/byteorder.h>
11#include <asm/barrier.h> 11#include <asm/barrier.h>
12#include <linux/atomic.h> 12#include <linux/atomic.h>
@@ -17,6 +17,12 @@
17 * to include/asm-i386/bitops.h or kerneldoc 17 * to include/asm-i386/bitops.h or kerneldoc
18 */ 18 */
19 19
20#if __BITS_PER_LONG == 64
21#define SHIFT_PER_LONG 6
22#else
23#define SHIFT_PER_LONG 5
24#endif
25
20#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) 26#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
21 27
22 28
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index d8d60a57183f..f53725202955 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -39,6 +39,8 @@ struct hppa_dma_ops {
39** flush/purge and allocate "regular" cacheable pages for everything. 39** flush/purge and allocate "regular" cacheable pages for everything.
40*/ 40*/
41 41
42#define DMA_ERROR_CODE (~(dma_addr_t)0)
43
42#ifdef CONFIG_PA11 44#ifdef CONFIG_PA11
43extern struct hppa_dma_ops pcxl_dma_ops; 45extern struct hppa_dma_ops pcxl_dma_ops;
44extern struct hppa_dma_ops pcx_dma_ops; 46extern struct hppa_dma_ops pcx_dma_ops;
@@ -209,12 +211,13 @@ parisc_walk_tree(struct device *dev)
209 break; 211 break;
210 } 212 }
211 } 213 }
212 BUG_ON(!dev->platform_data);
213 return dev->platform_data; 214 return dev->platform_data;
214} 215}
215 216
216#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu) 217#define GET_IOC(dev) ({ \
217 218 void *__pdata = parisc_walk_tree(dev); \
219 __pdata ? HBA_DATA(__pdata)->iommu : NULL; \
220})
218 221
219#ifdef CONFIG_IOMMU_CCIO 222#ifdef CONFIG_IOMMU_CCIO
220struct parisc_device; 223struct parisc_device;
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 59be25764433..a81226257878 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context)
49 mtctl(__space_to_prot(context), 8); 49 mtctl(__space_to_prot(context), 8);
50} 50}
51 51
52static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) 52static inline void switch_mm_irqs_off(struct mm_struct *prev,
53 struct mm_struct *next, struct task_struct *tsk)
53{ 54{
54
55 if (prev != next) { 55 if (prev != next) {
56 mtctl(__pa(next->pgd), 25); 56 mtctl(__pa(next->pgd), 25);
57 load_context(next->context); 57 load_context(next->context);
58 } 58 }
59} 59}
60 60
61static inline void switch_mm(struct mm_struct *prev,
62 struct mm_struct *next, struct task_struct *tsk)
63{
64 unsigned long flags;
65
66 local_irq_save(flags);
67 switch_mm_irqs_off(prev, next, tsk);
68 local_irq_restore(flags);
69}
70#define switch_mm_irqs_off switch_mm_irqs_off
71
61#define deactivate_mm(tsk,mm) do { } while (0) 72#define deactivate_mm(tsk,mm) do { } while (0)
62 73
63static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 74static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
index e0a23c7bdd43..07fa7e50bdc0 100644
--- a/arch/parisc/include/uapi/asm/bitsperlong.h
+++ b/arch/parisc/include/uapi/asm/bitsperlong.h
@@ -3,10 +3,8 @@
3 3
4#if defined(__LP64__) 4#if defined(__LP64__)
5#define __BITS_PER_LONG 64 5#define __BITS_PER_LONG 64
6#define SHIFT_PER_LONG 6
7#else 6#else
8#define __BITS_PER_LONG 32 7#define __BITS_PER_LONG 32
9#define SHIFT_PER_LONG 5
10#endif 8#endif
11 9
12#include <asm-generic/bitsperlong.h> 10#include <asm-generic/bitsperlong.h>
diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h
index e78403b129ef..928e1bbac98f 100644
--- a/arch/parisc/include/uapi/asm/swab.h
+++ b/arch/parisc/include/uapi/asm/swab.h
@@ -1,6 +1,7 @@
1#ifndef _PARISC_SWAB_H 1#ifndef _PARISC_SWAB_H
2#define _PARISC_SWAB_H 2#define _PARISC_SWAB_H
3 3
4#include <asm/bitsperlong.h>
4#include <linux/types.h> 5#include <linux/types.h>
5#include <linux/compiler.h> 6#include <linux/compiler.h>
6 7
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
38} 39}
39#define __arch_swab32 __arch_swab32 40#define __arch_swab32 __arch_swab32
40 41
41#if BITS_PER_LONG > 32 42#if __BITS_PER_LONG > 32
42/* 43/*
43** From "PA-RISC 2.0 Architecture", HP Professional Books. 44** From "PA-RISC 2.0 Architecture", HP Professional Books.
44** See Appendix I page 8 , "Endian Byte Swapping". 45** See Appendix I page 8 , "Endian Byte Swapping".
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
61 return x; 62 return x;
62} 63}
63#define __arch_swab64 __arch_swab64 64#define __arch_swab64 __arch_swab64
64#endif /* BITS_PER_LONG > 32 */ 65#endif /* __BITS_PER_LONG > 32 */
65 66
66#endif /* _PARISC_SWAB_H */ 67#endif /* _PARISC_SWAB_H */
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 5aba01ac457f..4dda73c44fee 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
88 unsigned long len, unsigned long pgoff, unsigned long flags) 88 unsigned long len, unsigned long pgoff, unsigned long flags)
89{ 89{
90 struct mm_struct *mm = current->mm; 90 struct mm_struct *mm = current->mm;
91 struct vm_area_struct *vma; 91 struct vm_area_struct *vma, *prev;
92 unsigned long task_size = TASK_SIZE; 92 unsigned long task_size = TASK_SIZE;
93 int do_color_align, last_mmap; 93 int do_color_align, last_mmap;
94 struct vm_unmapped_area_info info; 94 struct vm_unmapped_area_info info;
@@ -115,9 +115,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
115 else 115 else
116 addr = PAGE_ALIGN(addr); 116 addr = PAGE_ALIGN(addr);
117 117
118 vma = find_vma(mm, addr); 118 vma = find_vma_prev(mm, addr, &prev);
119 if (task_size - len >= addr && 119 if (task_size - len >= addr &&
120 (!vma || addr + len <= vma->vm_start)) 120 (!vma || addr + len <= vm_start_gap(vma)) &&
121 (!prev || addr >= vm_end_gap(prev)))
121 goto found_addr; 122 goto found_addr;
122 } 123 }
123 124
@@ -141,7 +142,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
141 const unsigned long len, const unsigned long pgoff, 142 const unsigned long len, const unsigned long pgoff,
142 const unsigned long flags) 143 const unsigned long flags)
143{ 144{
144 struct vm_area_struct *vma; 145 struct vm_area_struct *vma, *prev;
145 struct mm_struct *mm = current->mm; 146 struct mm_struct *mm = current->mm;
146 unsigned long addr = addr0; 147 unsigned long addr = addr0;
147 int do_color_align, last_mmap; 148 int do_color_align, last_mmap;
@@ -175,9 +176,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
175 addr = COLOR_ALIGN(addr, last_mmap, pgoff); 176 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
176 else 177 else
177 addr = PAGE_ALIGN(addr); 178 addr = PAGE_ALIGN(addr);
178 vma = find_vma(mm, addr); 179
180 vma = find_vma_prev(mm, addr, &prev);
179 if (TASK_SIZE - len >= addr && 181 if (TASK_SIZE - len >= addr &&
180 (!vma || addr + len <= vma->vm_start)) 182 (!vma || addr + len <= vm_start_gap(vma)) &&
183 (!prev || addr >= vm_end_gap(prev)))
181 goto found_addr; 184 goto found_addr;
182 } 185 }
183 186
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index d4ffcfbc9885..041e1f9ec129 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -361,7 +361,7 @@
361 ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */ 361 ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
362 ENTRY_SAME(add_key) 362 ENTRY_SAME(add_key)
363 ENTRY_SAME(request_key) /* 265 */ 363 ENTRY_SAME(request_key) /* 265 */
364 ENTRY_SAME(keyctl) 364 ENTRY_COMP(keyctl)
365 ENTRY_SAME(ioprio_set) 365 ENTRY_SAME(ioprio_set)
366 ENTRY_SAME(ioprio_get) 366 ENTRY_SAME(ioprio_get)
367 ENTRY_SAME(inotify_init) 367 ENTRY_SAME(inotify_init)
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 16dbe81c97c9..2f33a67bc531 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -298,7 +298,7 @@ bad_area:
298 case 15: /* Data TLB miss fault/Data page fault */ 298 case 15: /* Data TLB miss fault/Data page fault */
299 /* send SIGSEGV when outside of vma */ 299 /* send SIGSEGV when outside of vma */
300 if (!vma || 300 if (!vma ||
301 address < vma->vm_start || address > vma->vm_end) { 301 address < vma->vm_start || address >= vma->vm_end) {
302 si.si_signo = SIGSEGV; 302 si.si_signo = SIGSEGV;
303 si.si_code = SEGV_MAPERR; 303 si.si_code = SEGV_MAPERR;
304 break; 304 break;
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 861e72109df2..f080abfc2f83 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -68,6 +68,7 @@ SECTIONS
68 } 68 }
69 69
70#ifdef CONFIG_PPC64_BOOT_WRAPPER 70#ifdef CONFIG_PPC64_BOOT_WRAPPER
71 . = ALIGN(256);
71 .got : 72 .got :
72 { 73 {
73 __toc_start = .; 74 __toc_start = .;
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 55f106ed12bf..039c4b910615 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -460,7 +460,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
460 * Atomically increments @v by 1, so long as @v is non-zero. 460 * Atomically increments @v by 1, so long as @v is non-zero.
461 * Returns non-zero if @v was non-zero, and zero otherwise. 461 * Returns non-zero if @v was non-zero, and zero otherwise.
462 */ 462 */
463static __inline__ long atomic64_inc_not_zero(atomic64_t *v) 463static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
464{ 464{
465 long t1, t2; 465 long t1, t2;
466 466
@@ -479,7 +479,7 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
479 : "r" (&v->counter) 479 : "r" (&v->counter)
480 : "cc", "xer", "memory"); 480 : "cc", "xer", "memory");
481 481
482 return t1; 482 return t1 != 0;
483} 483}
484 484
485#endif /* __powerpc64__ */ 485#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index ee46ffef608e..743ad7a400d6 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -23,12 +23,13 @@
23#define CORE_DUMP_USE_REGSET 23#define CORE_DUMP_USE_REGSET
24#define ELF_EXEC_PAGESIZE PAGE_SIZE 24#define ELF_EXEC_PAGESIZE PAGE_SIZE
25 25
26/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 26/*
27 use of this is to invoke "./ld.so someprog" to test out a new version of 27 * This is the base location for PIE (ET_DYN with INTERP) loads. On
28 the loader. We need to make sure that it is out of the way of the program 28 * 64-bit, this is raised to 4GB to leave the entire 32-bit address
29 that it will "exec", and that there is sufficient room for the brk. */ 29 * space open for things that want to use the area for 32-bit pointers.
30 30 */
31#define ELF_ET_DYN_BASE 0x20000000 31#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \
32 0x100000000UL)
32 33
33#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) 34#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
34 35
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 627d129d7fcb..ca372bbc0ffe 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1236,7 +1236,7 @@ static inline unsigned long mfvtb (void)
1236 " .llong 0\n" \ 1236 " .llong 0\n" \
1237 ".previous" \ 1237 ".previous" \
1238 : "=r" (rval) \ 1238 : "=r" (rval) \
1239 : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \ 1239 : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
1240 rval;}) 1240 rval;})
1241#else 1241#else
1242#define mftb() ({unsigned long rval; \ 1242#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 86150fbb42c3..91e5c1758b5c 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -808,14 +808,25 @@ int fix_alignment(struct pt_regs *regs)
808 nb = aligninfo[instr].len; 808 nb = aligninfo[instr].len;
809 flags = aligninfo[instr].flags; 809 flags = aligninfo[instr].flags;
810 810
811 /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ 811 /*
812 if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { 812 * Handle some cases which give overlaps in the DSISR values.
813 nb = 8; 813 */
814 flags = LD+SW; 814 if (IS_XFORM(instruction)) {
815 } else if (IS_XFORM(instruction) && 815 switch (get_xop(instruction)) {
816 ((instruction >> 1) & 0x3ff) == 660) { 816 case 532: /* ldbrx */
817 nb = 8; 817 nb = 8;
818 flags = ST+SW; 818 flags = LD+SW;
819 break;
820 case 660: /* stdbrx */
821 nb = 8;
822 flags = ST+SW;
823 break;
824 case 20: /* lwarx */
825 case 84: /* ldarx */
826 case 116: /* lharx */
827 case 276: /* lqarx */
828 return 0; /* not emulated ever */
829 }
819 } 830 }
820 831
821 /* Byteswap little endian loads and stores */ 832 /* Byteswap little endian loads and stores */
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 98949b0df00a..6696c1986844 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -304,9 +304,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
304 * 304 *
305 * For pHyp, we have to enable IO for log retrieval. Otherwise, 305 * For pHyp, we have to enable IO for log retrieval. Otherwise,
306 * 0xFF's is always returned from PCI config space. 306 * 0xFF's is always returned from PCI config space.
307 *
308 * When the @severity is EEH_LOG_PERM, the PE is going to be
309 * removed. Prior to that, the drivers for devices included in
310 * the PE will be closed. The drivers rely on working IO path
311 * to bring the devices to quiet state. Otherwise, PCI traffic
312 * from those devices after they are removed is like to cause
313 * another unexpected EEH error.
307 */ 314 */
308 if (!(pe->type & EEH_PE_PHB)) { 315 if (!(pe->type & EEH_PE_PHB)) {
309 if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) 316 if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
317 severity == EEH_LOG_PERM)
310 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); 318 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
311 319
312 /* 320 /*
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 300382e5a2cc..9837c98caabe 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -485,7 +485,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
485static void *__eeh_clear_pe_frozen_state(void *data, void *flag) 485static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
486{ 486{
487 struct eeh_pe *pe = (struct eeh_pe *)data; 487 struct eeh_pe *pe = (struct eeh_pe *)data;
488 bool *clear_sw_state = flag; 488 bool clear_sw_state = *(bool *)flag;
489 int i, rc = 1; 489 int i, rc = 1;
490 490
491 for (i = 0; rc && i < 3; i++) 491 for (i = 0; rc && i < 3; i++)
@@ -655,7 +655,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
655 */ 655 */
656#define MAX_WAIT_FOR_RECOVERY 300 656#define MAX_WAIT_FOR_RECOVERY 300
657 657
658static void eeh_handle_normal_event(struct eeh_pe *pe) 658static bool eeh_handle_normal_event(struct eeh_pe *pe)
659{ 659{
660 struct pci_bus *frozen_bus; 660 struct pci_bus *frozen_bus;
661 int rc = 0; 661 int rc = 0;
@@ -665,7 +665,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
665 if (!frozen_bus) { 665 if (!frozen_bus) {
666 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n", 666 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
667 __func__, pe->phb->global_number, pe->addr); 667 __func__, pe->phb->global_number, pe->addr);
668 return; 668 return false;
669 } 669 }
670 670
671 eeh_pe_update_time_stamp(pe); 671 eeh_pe_update_time_stamp(pe);
@@ -790,7 +790,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
790 pr_info("EEH: Notify device driver to resume\n"); 790 pr_info("EEH: Notify device driver to resume\n");
791 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); 791 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
792 792
793 return; 793 return false;
794 794
795excess_failures: 795excess_failures:
796 /* 796 /*
@@ -831,7 +831,11 @@ perm_error:
831 pci_lock_rescan_remove(); 831 pci_lock_rescan_remove();
832 pcibios_remove_pci_devices(frozen_bus); 832 pcibios_remove_pci_devices(frozen_bus);
833 pci_unlock_rescan_remove(); 833 pci_unlock_rescan_remove();
834
835 /* The passed PE should no longer be used */
836 return true;
834 } 837 }
838 return false;
835} 839}
836 840
837static void eeh_handle_special_event(void) 841static void eeh_handle_special_event(void)
@@ -897,7 +901,14 @@ static void eeh_handle_special_event(void)
897 */ 901 */
898 if (rc == EEH_NEXT_ERR_FROZEN_PE || 902 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
899 rc == EEH_NEXT_ERR_FENCED_PHB) { 903 rc == EEH_NEXT_ERR_FENCED_PHB) {
900 eeh_handle_normal_event(pe); 904 /*
905 * eeh_handle_normal_event() can make the PE stale if it
906 * determines that the PE cannot possibly be recovered.
907 * Don't modify the PE state if that's the case.
908 */
909 if (eeh_handle_normal_event(pe))
910 continue;
911
901 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 912 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
902 } else { 913 } else {
903 pci_lock_rescan_remove(); 914 pci_lock_rescan_remove();
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index edba294620db..f6fd0332c3a2 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -716,7 +716,7 @@ resume_kernel:
716 716
717 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ 717 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
718 718
719 lwz r3,GPR1(r1) 719 ld r3,GPR1(r1)
720 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ 720 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
721 mr r4,r1 /* src: current exception frame */ 721 mr r4,r1 /* src: current exception frame */
722 mr r1,r3 /* Reroute the trampoline frame to r1 */ 722 mr r1,r3 /* Reroute the trampoline frame to r1 */
@@ -730,8 +730,8 @@ resume_kernel:
730 addi r6,r6,8 730 addi r6,r6,8
731 bdnz 2b 731 bdnz 2b
732 732
733 /* Do real store operation to complete stwu */ 733 /* Do real store operation to complete stdu */
734 lwz r5,GPR1(r1) 734 ld r5,GPR1(r1)
735 std r8,0(r5) 735 std r8,0(r5)
736 736
737 /* Clear _TIF_EMULATE_STACK_STORE flag */ 737 /* Clear _TIF_EMULATE_STACK_STORE flag */
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 488e6314f993..5cc93f0b52ca 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -735,8 +735,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
735 andis. r15,r14,(DBSR_IC|DBSR_BT)@h 735 andis. r15,r14,(DBSR_IC|DBSR_BT)@h
736 beq+ 1f 736 beq+ 1f
737 737
738#ifdef CONFIG_RELOCATABLE
739 ld r15,PACATOC(r13)
740 ld r14,interrupt_base_book3e@got(r15)
741 ld r15,__end_interrupts@got(r15)
742#else
738 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) 743 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
739 LOAD_REG_IMMEDIATE(r15,__end_interrupts) 744 LOAD_REG_IMMEDIATE(r15,__end_interrupts)
745#endif
740 cmpld cr0,r10,r14 746 cmpld cr0,r10,r14
741 cmpld cr1,r10,r15 747 cmpld cr1,r10,r15
742 blt+ cr0,1f 748 blt+ cr0,1f
@@ -799,8 +805,14 @@ kernel_dbg_exc:
799 andis. r15,r14,(DBSR_IC|DBSR_BT)@h 805 andis. r15,r14,(DBSR_IC|DBSR_BT)@h
800 beq+ 1f 806 beq+ 1f
801 807
808#ifdef CONFIG_RELOCATABLE
809 ld r15,PACATOC(r13)
810 ld r14,interrupt_base_book3e@got(r15)
811 ld r15,__end_interrupts@got(r15)
812#else
802 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) 813 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
803 LOAD_REG_IMMEDIATE(r15,__end_interrupts) 814 LOAD_REG_IMMEDIATE(r15,__end_interrupts)
815#endif
804 cmpld cr0,r10,r14 816 cmpld cr0,r10,r14
805 cmpld cr1,r10,r15 817 cmpld cr1,r10,r15
806 blt+ cr0,1f 818 blt+ cr0,1f
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 05e804cdecaa..fdf48785d3e9 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -227,8 +227,10 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
227 rcu_read_lock(); 227 rcu_read_lock();
228 228
229 bp = __this_cpu_read(bp_per_reg); 229 bp = __this_cpu_read(bp_per_reg);
230 if (!bp) 230 if (!bp) {
231 rc = NOTIFY_DONE;
231 goto out; 232 goto out;
233 }
232 info = counter_arch_bp(bp); 234 info = counter_arch_bp(bp);
233 235
234 /* 236 /*
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7c053f281406..1138fec3dd65 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -514,6 +514,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
514#endif 514#endif
515#endif 515#endif
516 516
517 /*
518 * jprobes use jprobe_return() which skips the normal return
519 * path of the function, and this messes up the accounting of the
520 * function graph tracer.
521 *
522 * Pause function graph tracing while performing the jprobe function.
523 */
524 pause_graph_tracing();
525
517 return 1; 526 return 1;
518} 527}
519 528
@@ -536,6 +545,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
536 * saved regs... 545 * saved regs...
537 */ 546 */
538 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 547 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
548 /* It's OK to start function graph tracing again */
549 unpause_graph_tracing();
539 preempt_enable_no_resched(); 550 preempt_enable_no_resched();
540 return 1; 551 return 1;
541} 552}
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index b2eb4686bd8f..da3c4c3f4ec8 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -204,6 +204,8 @@ static void machine_check_process_queued_event(struct irq_work *work)
204{ 204{
205 int index; 205 int index;
206 206
207 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
208
207 /* 209 /*
208 * For now just print it to console. 210 * For now just print it to console.
209 * TODO: log this error event to FSP or nvram. 211 * TODO: log this error event to FSP or nvram.
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 7b89e7b305e6..3139533640fc 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2664,6 +2664,9 @@ static void __init prom_find_boot_cpu(void)
2664 2664
2665 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 2665 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2666 2666
2667 if (!PHANDLE_VALID(cpu_pkg))
2668 return;
2669
2667 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 2670 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2668 prom.cpu = be32_to_cpu(rval); 2671 prom.cpu = be32_to_cpu(rval);
2669 2672
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 5c03a6a9b054..a20823210ac0 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -220,6 +220,15 @@ static void cpu_ready_for_interrupts(void)
220 unsigned long lpcr = mfspr(SPRN_LPCR); 220 unsigned long lpcr = mfspr(SPRN_LPCR);
221 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 221 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
222 } 222 }
223
224 /*
225 * Fixup HFSCR:TM based on CPU features. The bit is set by our
226 * early asm init because at that point we haven't updated our
227 * CPU features from firmware and device-tree. Here we have,
228 * so let's do it.
229 */
230 if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
231 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
223} 232}
224 233
225/* 234/*
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 37de90f8a845..e4dcb0a43e3f 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -297,8 +297,6 @@ long machine_check_early(struct pt_regs *regs)
297 297
298 __this_cpu_inc(irq_stat.mce_exceptions); 298 __this_cpu_inc(irq_stat.mce_exceptions);
299 299
300 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
301
302 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
303 handled = cur_cpu_spec->machine_check_early(regs); 301 handled = cur_cpu_spec->machine_check_early(regs);
304 return handled; 302 return handled;
@@ -704,6 +702,8 @@ void machine_check_exception(struct pt_regs *regs)
704 702
705 __this_cpu_inc(irq_stat.mce_exceptions); 703 __this_cpu_inc(irq_stat.mce_exceptions);
706 704
705 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
706
707 /* See if any machine dependent calls. In theory, we would want 707 /* See if any machine dependent calls. In theory, we would want
708 * to call the CPU first, and call the ppc_md. one if the CPU 708 * to call the CPU first, and call the ppc_md. one if the CPU
709 * one returns a positive number. However there is existing code 709 * one returns a positive number. However there is existing code
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3c3a367b6e59..428563b195c3 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2687,12 +2687,38 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2687{ 2687{
2688 int r; 2688 int r;
2689 int srcu_idx; 2689 int srcu_idx;
2690 unsigned long ebb_regs[3] = {}; /* shut up GCC */
2691 unsigned long user_tar = 0;
2692 unsigned long proc_fscr = 0;
2693 unsigned int user_vrsave;
2690 2694
2691 if (!vcpu->arch.sane) { 2695 if (!vcpu->arch.sane) {
2692 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 2696 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2693 return -EINVAL; 2697 return -EINVAL;
2694 } 2698 }
2695 2699
2700 /*
2701 * Don't allow entry with a suspended transaction, because
2702 * the guest entry/exit code will lose it.
2703 * If the guest has TM enabled, save away their TM-related SPRs
2704 * (they will get restored by the TM unavailable interrupt).
2705 */
2706#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2707 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
2708 (current->thread.regs->msr & MSR_TM)) {
2709 if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
2710 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2711 run->fail_entry.hardware_entry_failure_reason = 0;
2712 return -EINVAL;
2713 }
2714 /* Enable TM so we can read the TM SPRs */
2715 mtmsr(mfmsr() | MSR_TM);
2716 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
2717 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
2718 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
2719 }
2720#endif
2721
2696 kvmppc_core_prepare_to_enter(vcpu); 2722 kvmppc_core_prepare_to_enter(vcpu);
2697 2723
2698 /* No need to go into the guest when all we'll do is come back out */ 2724 /* No need to go into the guest when all we'll do is come back out */
@@ -2715,6 +2741,17 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2715 flush_fp_to_thread(current); 2741 flush_fp_to_thread(current);
2716 flush_altivec_to_thread(current); 2742 flush_altivec_to_thread(current);
2717 flush_vsx_to_thread(current); 2743 flush_vsx_to_thread(current);
2744
2745 /* Save userspace EBB and other register values */
2746 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
2747 ebb_regs[0] = mfspr(SPRN_EBBHR);
2748 ebb_regs[1] = mfspr(SPRN_EBBRR);
2749 ebb_regs[2] = mfspr(SPRN_BESCR);
2750 user_tar = mfspr(SPRN_TAR);
2751 proc_fscr = mfspr(SPRN_FSCR);
2752 }
2753 user_vrsave = mfspr(SPRN_VRSAVE);
2754
2718 vcpu->arch.wqp = &vcpu->arch.vcore->wq; 2755 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
2719 vcpu->arch.pgdir = current->mm->pgd; 2756 vcpu->arch.pgdir = current->mm->pgd;
2720 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 2757 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2736,6 +2773,29 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2736 } 2773 }
2737 } while (is_kvmppc_resume_guest(r)); 2774 } while (is_kvmppc_resume_guest(r));
2738 2775
2776 /* Restore userspace EBB and other register values */
2777 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
2778 mtspr(SPRN_EBBHR, ebb_regs[0]);
2779 mtspr(SPRN_EBBRR, ebb_regs[1]);
2780 mtspr(SPRN_BESCR, ebb_regs[2]);
2781 mtspr(SPRN_TAR, user_tar);
2782 mtspr(SPRN_FSCR, proc_fscr);
2783 }
2784 mtspr(SPRN_VRSAVE, user_vrsave);
2785
2786 /*
2787 * Since we don't do lazy TM reload, we need to reload
2788 * the TM registers here.
2789 */
2790#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2791 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
2792 (current->thread.regs->msr & MSR_TM)) {
2793 mtspr(SPRN_TFHAR, current->thread.tm_tfhar);
2794 mtspr(SPRN_TFIAR, current->thread.tm_tfiar);
2795 mtspr(SPRN_TEXASR, current->thread.tm_texasr);
2796 }
2797#endif
2798
2739 out: 2799 out:
2740 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 2800 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2741 atomic_dec(&vcpu->kvm->arch.vcpus_running); 2801 atomic_dec(&vcpu->kvm->arch.vcpus_running);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 1a743f87b37d..ffab9269bfe4 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -36,6 +36,13 @@
36#define NAPPING_CEDE 1 36#define NAPPING_CEDE 1
37#define NAPPING_NOVCPU 2 37#define NAPPING_NOVCPU 2
38 38
39/* Stack frame offsets for kvmppc_hv_entry */
40#define SFS 112
41#define STACK_SLOT_TRAP (SFS-4)
42#define STACK_SLOT_CIABR (SFS-16)
43#define STACK_SLOT_DAWR (SFS-24)
44#define STACK_SLOT_DAWRX (SFS-32)
45
39/* 46/*
40 * Call kvmppc_hv_entry in real mode. 47 * Call kvmppc_hv_entry in real mode.
41 * Must be called with interrupts hard-disabled. 48 * Must be called with interrupts hard-disabled.
@@ -274,10 +281,10 @@ kvm_novcpu_exit:
274 bl kvmhv_accumulate_time 281 bl kvmhv_accumulate_time
275#endif 282#endif
27613: mr r3, r12 28313: mr r3, r12
277 stw r12, 112-4(r1) 284 stw r12, STACK_SLOT_TRAP(r1)
278 bl kvmhv_commence_exit 285 bl kvmhv_commence_exit
279 nop 286 nop
280 lwz r12, 112-4(r1) 287 lwz r12, STACK_SLOT_TRAP(r1)
281 b kvmhv_switch_to_host 288 b kvmhv_switch_to_host
282 289
283/* 290/*
@@ -489,7 +496,7 @@ kvmppc_hv_entry:
489 */ 496 */
490 mflr r0 497 mflr r0
491 std r0, PPC_LR_STKOFF(r1) 498 std r0, PPC_LR_STKOFF(r1)
492 stdu r1, -112(r1) 499 stdu r1, -SFS(r1)
493 500
494 /* Save R1 in the PACA */ 501 /* Save R1 in the PACA */
495 std r1, HSTATE_HOST_R1(r13) 502 std r1, HSTATE_HOST_R1(r13)
@@ -643,6 +650,16 @@ kvmppc_got_guest:
643 mtspr SPRN_PURR,r7 650 mtspr SPRN_PURR,r7
644 mtspr SPRN_SPURR,r8 651 mtspr SPRN_SPURR,r8
645 652
653 /* Save host values of some registers */
654BEGIN_FTR_SECTION
655 mfspr r5, SPRN_CIABR
656 mfspr r6, SPRN_DAWR
657 mfspr r7, SPRN_DAWRX
658 std r5, STACK_SLOT_CIABR(r1)
659 std r6, STACK_SLOT_DAWR(r1)
660 std r7, STACK_SLOT_DAWRX(r1)
661END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
662
646BEGIN_FTR_SECTION 663BEGIN_FTR_SECTION
647 /* Set partition DABR */ 664 /* Set partition DABR */
648 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 665 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -1266,8 +1283,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1266 */ 1283 */
1267 li r0, 0 1284 li r0, 0
1268 mtspr SPRN_IAMR, r0 1285 mtspr SPRN_IAMR, r0
1269 mtspr SPRN_CIABR, r0 1286 mtspr SPRN_PSPB, r0
1270 mtspr SPRN_DAWRX, r0
1271 mtspr SPRN_TCSCR, r0 1287 mtspr SPRN_TCSCR, r0
1272 mtspr SPRN_WORT, r0 1288 mtspr SPRN_WORT, r0
1273 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1289 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
@@ -1283,6 +1299,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1283 std r6,VCPU_UAMOR(r9) 1299 std r6,VCPU_UAMOR(r9)
1284 li r6,0 1300 li r6,0
1285 mtspr SPRN_AMR,r6 1301 mtspr SPRN_AMR,r6
1302 mtspr SPRN_UAMOR, r6
1286 1303
1287 /* Switch DSCR back to host value */ 1304 /* Switch DSCR back to host value */
1288 mfspr r8, SPRN_DSCR 1305 mfspr r8, SPRN_DSCR
@@ -1424,6 +1441,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1424 slbia 1441 slbia
1425 ptesync 1442 ptesync
1426 1443
1444 /* Restore host values of some registers */
1445BEGIN_FTR_SECTION
1446 ld r5, STACK_SLOT_CIABR(r1)
1447 ld r6, STACK_SLOT_DAWR(r1)
1448 ld r7, STACK_SLOT_DAWRX(r1)
1449 mtspr SPRN_CIABR, r5
1450 mtspr SPRN_DAWR, r6
1451 mtspr SPRN_DAWRX, r7
1452END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1453
1427 /* 1454 /*
1428 * POWER7/POWER8 guest -> host partition switch code. 1455 * POWER7/POWER8 guest -> host partition switch code.
1429 * We don't have to lock against tlbies but we do 1456 * We don't have to lock against tlbies but we do
@@ -1533,8 +1560,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1533 li r0, KVM_GUEST_MODE_NONE 1560 li r0, KVM_GUEST_MODE_NONE
1534 stb r0, HSTATE_IN_GUEST(r13) 1561 stb r0, HSTATE_IN_GUEST(r13)
1535 1562
1536 ld r0, 112+PPC_LR_STKOFF(r1) 1563 ld r0, SFS+PPC_LR_STKOFF(r1)
1537 addi r1, r1, 112 1564 addi r1, r1, SFS
1538 mtlr r0 1565 mtlr r0
1539 blr 1566 blr
1540 1567
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 5cc2e7af3a7b..b379146de55b 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -302,7 +302,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
302 advance = 0; 302 advance = 0;
303 printk(KERN_ERR "Couldn't emulate instruction 0x%08x " 303 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
304 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); 304 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
305 kvmppc_core_queue_program(vcpu, 0);
306 } 305 }
307 } 306 }
308 307
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index dc885b30f7a6..e37162d356d8 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -687,8 +687,10 @@ int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
687 case 19: 687 case 19:
688 switch ((instr >> 1) & 0x3ff) { 688 switch ((instr >> 1) & 0x3ff) {
689 case 0: /* mcrf */ 689 case 0: /* mcrf */
690 rd = (instr >> 21) & 0x1c; 690 rd = 7 - ((instr >> 23) & 0x7);
691 ra = (instr >> 16) & 0x1c; 691 ra = 7 - ((instr >> 18) & 0x7);
692 rd *= 4;
693 ra *= 4;
692 val = (regs->ccr >> ra) & 0xf; 694 val = (regs->ccr >> ra) & 0xf;
693 regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd); 695 regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
694 goto instr_done; 696 goto instr_done;
@@ -967,6 +969,19 @@ int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
967#endif 969#endif
968 970
969 case 19: /* mfcr */ 971 case 19: /* mfcr */
972 if ((instr >> 20) & 1) {
973 imm = 0xf0000000UL;
974 for (sh = 0; sh < 8; ++sh) {
975 if (instr & (0x80000 >> sh)) {
976 regs->gpr[rd] = regs->ccr & imm;
977 break;
978 }
979 imm >>= 4;
980 }
981
982 goto instr_done;
983 }
984
970 regs->gpr[rd] = regs->ccr; 985 regs->gpr[rd] = regs->ccr;
971 regs->gpr[rd] &= 0xffffffffUL; 986 regs->gpr[rd] &= 0xffffffffUL;
972 goto instr_done; 987 goto instr_done;
@@ -1806,8 +1821,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1806 goto instr_done; 1821 goto instr_done;
1807 1822
1808 case LARX: 1823 case LARX:
1809 if (regs->msr & MSR_LE)
1810 return 0;
1811 if (op.ea & (size - 1)) 1824 if (op.ea & (size - 1))
1812 break; /* can't handle misaligned */ 1825 break; /* can't handle misaligned */
1813 err = -EFAULT; 1826 err = -EFAULT;
@@ -1829,8 +1842,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1829 goto ldst_done; 1842 goto ldst_done;
1830 1843
1831 case STCX: 1844 case STCX:
1832 if (regs->msr & MSR_LE)
1833 return 0;
1834 if (op.ea & (size - 1)) 1845 if (op.ea & (size - 1))
1835 break; /* can't handle misaligned */ 1846 break; /* can't handle misaligned */
1836 err = -EFAULT; 1847 err = -EFAULT;
@@ -1854,8 +1865,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1854 goto ldst_done; 1865 goto ldst_done;
1855 1866
1856 case LOAD: 1867 case LOAD:
1857 if (regs->msr & MSR_LE)
1858 return 0;
1859 err = read_mem(&regs->gpr[op.reg], op.ea, size, regs); 1868 err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
1860 if (!err) { 1869 if (!err) {
1861 if (op.type & SIGNEXT) 1870 if (op.type & SIGNEXT)
@@ -1867,8 +1876,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1867 1876
1868#ifdef CONFIG_PPC_FPU 1877#ifdef CONFIG_PPC_FPU
1869 case LOAD_FP: 1878 case LOAD_FP:
1870 if (regs->msr & MSR_LE)
1871 return 0;
1872 if (size == 4) 1879 if (size == 4)
1873 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); 1880 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
1874 else 1881 else
@@ -1877,15 +1884,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1877#endif 1884#endif
1878#ifdef CONFIG_ALTIVEC 1885#ifdef CONFIG_ALTIVEC
1879 case LOAD_VMX: 1886 case LOAD_VMX:
1880 if (regs->msr & MSR_LE)
1881 return 0;
1882 err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); 1887 err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
1883 goto ldst_done; 1888 goto ldst_done;
1884#endif 1889#endif
1885#ifdef CONFIG_VSX 1890#ifdef CONFIG_VSX
1886 case LOAD_VSX: 1891 case LOAD_VSX:
1887 if (regs->msr & MSR_LE)
1888 return 0;
1889 err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); 1892 err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
1890 goto ldst_done; 1893 goto ldst_done;
1891#endif 1894#endif
@@ -1908,8 +1911,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1908 goto instr_done; 1911 goto instr_done;
1909 1912
1910 case STORE: 1913 case STORE:
1911 if (regs->msr & MSR_LE)
1912 return 0;
1913 if ((op.type & UPDATE) && size == sizeof(long) && 1914 if ((op.type & UPDATE) && size == sizeof(long) &&
1914 op.reg == 1 && op.update_reg == 1 && 1915 op.reg == 1 && op.update_reg == 1 &&
1915 !(regs->msr & MSR_PR) && 1916 !(regs->msr & MSR_PR) &&
@@ -1922,8 +1923,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1922 1923
1923#ifdef CONFIG_PPC_FPU 1924#ifdef CONFIG_PPC_FPU
1924 case STORE_FP: 1925 case STORE_FP:
1925 if (regs->msr & MSR_LE)
1926 return 0;
1927 if (size == 4) 1926 if (size == 4)
1928 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); 1927 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
1929 else 1928 else
@@ -1932,15 +1931,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1932#endif 1931#endif
1933#ifdef CONFIG_ALTIVEC 1932#ifdef CONFIG_ALTIVEC
1934 case STORE_VMX: 1933 case STORE_VMX:
1935 if (regs->msr & MSR_LE)
1936 return 0;
1937 err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); 1934 err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
1938 goto ldst_done; 1935 goto ldst_done;
1939#endif 1936#endif
1940#ifdef CONFIG_VSX 1937#ifdef CONFIG_VSX
1941 case STORE_VSX: 1938 case STORE_VSX:
1942 if (regs->msr & MSR_LE)
1943 return 0;
1944 err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); 1939 err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
1945 goto ldst_done; 1940 goto ldst_done;
1946#endif 1941#endif
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index c8822af10a58..19d9b2d2d212 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -645,6 +645,10 @@ static void native_flush_hash_range(unsigned long number, int local)
645 unsigned long psize = batch->psize; 645 unsigned long psize = batch->psize;
646 int ssize = batch->ssize; 646 int ssize = batch->ssize;
647 int i; 647 int i;
648 unsigned int use_local;
649
650 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
651 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
648 652
649 local_irq_save(flags); 653 local_irq_save(flags);
650 654
@@ -671,8 +675,7 @@ static void native_flush_hash_range(unsigned long number, int local)
671 } pte_iterate_hashed_end(); 675 } pte_iterate_hashed_end();
672 } 676 }
673 677
674 if (mmu_has_feature(MMU_FTR_TLBIEL) && 678 if (use_local) {
675 mmu_psize_defs[psize].tlbiel && local) {
676 asm volatile("ptesync":::"memory"); 679 asm volatile("ptesync":::"memory");
677 for (i = 0; i < number; i++) { 680 for (i = 0; i < number; i++) {
678 vpn = batch->vpn[i]; 681 vpn = batch->vpn[i];
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 4c48b487698c..0b48ce40d351 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -179,6 +179,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
179 b slb_finish_load 179 b slb_finish_load
180 180
1818: /* invalid EA */ 1818: /* invalid EA */
182 /*
183 * It's possible the bad EA is too large to fit in the SLB cache, which
184 * would mean we'd fail to invalidate it on context switch. So mark the
185 * SLB cache as full so we force a full flush. We also set cr7+eq to
186 * mark the address as a kernel address, so slb_finish_load() skips
187 * trying to insert it into the SLB cache.
188 */
189 li r9,SLB_CACHE_ENTRIES + 1
190 sth r9,PACASLBCACHEPTR(r13)
191 crset 4*cr7+eq
182 li r10,0 /* BAD_VSID */ 192 li r10,0 /* BAD_VSID */
183 li r9,0 /* BAD_VSID */ 193 li r9,0 /* BAD_VSID */
184 li r11,SLB_VSID_USER /* flags don't much matter */ 194 li r11,SLB_VSID_USER /* flags don't much matter */
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 0f432a702870..6ad12b244770 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
105 if ((mm->task_size - len) < addr) 105 if ((mm->task_size - len) < addr)
106 return 0; 106 return 0;
107 vma = find_vma(mm, addr); 107 vma = find_vma(mm, addr);
108 return (!vma || (addr + len) <= vma->vm_start); 108 return (!vma || (addr + len) <= vm_start_gap(vma));
109} 109}
110 110
111static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) 111static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index e45b88a5d7e0..ae877c7b3905 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -148,7 +148,7 @@ opal_tracepoint_entry:
148opal_tracepoint_return: 148opal_tracepoint_return:
149 std r3,STK_REG(R31)(r1) 149 std r3,STK_REG(R31)(r1)
150 mr r4,r3 150 mr r4,r3
151 ld r0,STK_REG(R23)(r1) 151 ld r3,STK_REG(R23)(r1)
152 bl __trace_opal_exit 152 bl __trace_opal_exit
153 ld r3,STK_REG(R31)(r1) 153 ld r3,STK_REG(R31)(r1)
154 addi r1,r1,STACKFRAMESIZE 154 addi r1,r1,STACKFRAMESIZE
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index f244dcb4f2cf..96536c969c9c 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -280,7 +280,6 @@ int dlpar_detach_node(struct device_node *dn)
280 if (rc) 280 if (rc)
281 return rc; 281 return rc;
282 282
283 of_node_put(dn); /* Must decrement the refcount */
284 return 0; 283 return 0;
285} 284}
286 285
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index e9ff44cd5d86..e8b1027e1b5b 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -110,6 +110,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
110 for (i = 0; i < num_lmbs; i++) { 110 for (i = 0; i < num_lmbs; i++) {
111 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); 111 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
112 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index); 112 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
113 lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
113 lmbs[i].flags = be32_to_cpu(lmbs[i].flags); 114 lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
114 } 115 }
115 116
@@ -553,6 +554,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
553 for (i = 0; i < num_lmbs; i++) { 554 for (i = 0; i < num_lmbs; i++) {
554 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr); 555 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
555 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index); 556 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
557 lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
556 lmbs[i].flags = cpu_to_be32(lmbs[i].flags); 558 lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
557 } 559 }
558 560
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 7c7fcc042549..fb695f142563 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -82,7 +82,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
82 82
83 of_detach_node(np); 83 of_detach_node(np);
84 of_node_put(parent); 84 of_node_put(parent);
85 of_node_put(np); /* Must decrement the refcount */
86 return 0; 85 return 0;
87} 86}
88 87
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 60530fd93d6d..06176328f83d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -62,6 +62,9 @@ config PCI_QUIRKS
62config ARCH_SUPPORTS_UPROBES 62config ARCH_SUPPORTS_UPROBES
63 def_bool y 63 def_bool y
64 64
65config DEBUG_RODATA
66 def_bool y
67
65config S390 68config S390
66 def_bool y 69 def_bool y
67 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 70 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 4da604ebf6fd..ca15613eaaa4 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
141 141
142unsigned long decompress_kernel(void) 142unsigned long decompress_kernel(void)
143{ 143{
144 unsigned long output_addr; 144 void *output, *kernel_end;
145 unsigned char *output;
146 145
147 output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; 146 output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
148 check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); 147 kernel_end = output + SZ__bss_start;
149 memset(&_bss, 0, &_ebss - &_bss); 148 check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
150 free_mem_ptr = (unsigned long)&_end;
151 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
152 output = (unsigned char *) output_addr;
153 149
154#ifdef CONFIG_BLK_DEV_INITRD 150#ifdef CONFIG_BLK_DEV_INITRD
155 /* 151 /*
156 * Move the initrd right behind the end of the decompressed 152 * Move the initrd right behind the end of the decompressed
157 * kernel image. 153 * kernel image. This also prevents initrd corruption caused by
154 * bss clearing since kernel_end will always be located behind the
155 * current bss section..
158 */ 156 */
159 if (INITRD_START && INITRD_SIZE && 157 if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
160 INITRD_START < (unsigned long) output + SZ__bss_start) { 158 check_ipl_parmblock(kernel_end, INITRD_SIZE);
161 check_ipl_parmblock(output + SZ__bss_start, 159 memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
162 INITRD_START + INITRD_SIZE); 160 INITRD_START = (unsigned long) kernel_end;
163 memmove(output + SZ__bss_start,
164 (void *) INITRD_START, INITRD_SIZE);
165 INITRD_START = (unsigned long) output + SZ__bss_start;
166 } 161 }
167#endif 162#endif
168 163
164 /*
165 * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
166 * initialized afterwards since they reside in bss.
167 */
168 memset(&_bss, 0, &_ebss - &_bss);
169 free_mem_ptr = (unsigned long) &_end;
170 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
171
169 puts("Uncompressing Linux... "); 172 puts("Uncompressing Linux... ");
170 __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); 173 __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
171 puts("Ok, booting the kernel.\n"); 174 puts("Ok, booting the kernel.\n");
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab802f6..8e136b88cdf4 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -15,7 +15,9 @@
15 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ 15 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
16 asm volatile( \ 16 asm volatile( \
17 " lctlg %1,%2,%0\n" \ 17 " lctlg %1,%2,%0\n" \
18 : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ 18 : \
19 : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
20 : "memory"); \
19} 21}
20 22
21#define __ctl_store(array, low, high) { \ 23#define __ctl_store(array, low, high) { \
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index bab6739a1154..b9eb7b1a49d2 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -154,14 +154,13 @@ extern unsigned int vdso_enabled;
154#define CORE_DUMP_USE_REGSET 154#define CORE_DUMP_USE_REGSET
155#define ELF_EXEC_PAGESIZE 4096 155#define ELF_EXEC_PAGESIZE 4096
156 156
157/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 157/*
158 use of this is to invoke "./ld.so someprog" to test out a new version of 158 * This is the base location for PIE (ET_DYN with INTERP) loads. On
159 the loader. We need to make sure that it is out of the way of the program 159 * 64-bit, this is raised to 4GB to leave the entire 32-bit address
160 that it will "exec", and that there is sufficient room for the brk. 64-bit 160 * space open for things that want to use the area for 32-bit pointers.
161 tasks are aligned to 4GB. */ 161 */
162#define ELF_ET_DYN_BASE (is_32bit_task() ? \ 162#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
163 (STACK_TOP / 3 * 2) : \ 163 0x100000000UL)
164 (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
165 164
166/* This yields a mask that user programs can use to figure out what 165/* This yields a mask that user programs can use to figure out what
167 instruction set this CPU supports. */ 166 instruction set this CPU supports. */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 024f85f947ae..e2c0e4eab037 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -829,6 +829,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
829{ 829{
830 pgste_t pgste; 830 pgste_t pgste;
831 831
832 if (pte_present(entry))
833 pte_val(entry) &= ~_PAGE_UNUSED;
832 if (mm_has_pgste(mm)) { 834 if (mm_has_pgste(mm)) {
833 pgste = pgste_get_lock(ptep); 835 pgste = pgste_get_lock(ptep);
834 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; 836 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c1ea67db8404..c61ed7890cef 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -74,7 +74,8 @@ extern void execve_tail(void);
74 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. 74 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
75 */ 75 */
76 76
77#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) 77#define TASK_SIZE_OF(tsk) ((tsk)->mm ? \
78 (tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
78#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ 79#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
79 (1UL << 30) : (1UL << 41)) 80 (1UL << 30) : (1UL << 41))
80#define TASK_SIZE TASK_SIZE_OF(current) 81#define TASK_SIZE TASK_SIZE_OF(current)
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 6ba0bf928909..6bc941be6921 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -64,6 +64,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
64{ 64{
65 unsigned long mask = -1UL; 65 unsigned long mask = -1UL;
66 66
67 /*
68 * No arguments for this syscall, there's nothing to do.
69 */
70 if (!n)
71 return;
72
67 BUG_ON(i + n > 6); 73 BUG_ON(i + n > 6);
68#ifdef CONFIG_COMPAT 74#ifdef CONFIG_COMPAT
69 if (test_tsk_thread_flag(task, TIF_31BIT)) 75 if (test_tsk_thread_flag(task, TIF_31BIT))
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 5c7381c5ad7f..c8d837f0fbbc 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -150,7 +150,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
150 " jg 2b\n" \ 150 " jg 2b\n" \
151 ".popsection\n" \ 151 ".popsection\n" \
152 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ 152 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
153 : "=d" (__rc), "=Q" (*(to)) \ 153 : "=d" (__rc), "+Q" (*(to)) \
154 : "d" (size), "Q" (*(from)), \ 154 : "d" (size), "Q" (*(from)), \
155 "d" (__reg0), "K" (-EFAULT) \ 155 "d" (__reg0), "K" (-EFAULT) \
156 : "cc"); \ 156 : "cc"); \
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 171e09bb8ea2..df4685905015 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -23,6 +23,8 @@
23#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) 23#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
24#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) 24#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
25 25
26#define LINUX_NOTE_NAME "LINUX"
27
26static struct memblock_region oldmem_region; 28static struct memblock_region oldmem_region;
27 29
28static struct memblock_type oldmem_type = { 30static struct memblock_type oldmem_type = {
@@ -312,7 +314,7 @@ static void *nt_fpregset(void *ptr, struct save_area *sa)
312static void *nt_s390_timer(void *ptr, struct save_area *sa) 314static void *nt_s390_timer(void *ptr, struct save_area *sa)
313{ 315{
314 return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer), 316 return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer),
315 KEXEC_CORE_NOTE_NAME); 317 LINUX_NOTE_NAME);
316} 318}
317 319
318/* 320/*
@@ -321,7 +323,7 @@ static void *nt_s390_timer(void *ptr, struct save_area *sa)
321static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa) 323static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa)
322{ 324{
323 return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp, 325 return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp,
324 sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME); 326 sizeof(sa->clk_cmp), LINUX_NOTE_NAME);
325} 327}
326 328
327/* 329/*
@@ -330,7 +332,7 @@ static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa)
330static void *nt_s390_tod_preg(void *ptr, struct save_area *sa) 332static void *nt_s390_tod_preg(void *ptr, struct save_area *sa)
331{ 333{
332 return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg, 334 return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg,
333 sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME); 335 sizeof(sa->tod_reg), LINUX_NOTE_NAME);
334} 336}
335 337
336/* 338/*
@@ -339,7 +341,7 @@ static void *nt_s390_tod_preg(void *ptr, struct save_area *sa)
339static void *nt_s390_ctrs(void *ptr, struct save_area *sa) 341static void *nt_s390_ctrs(void *ptr, struct save_area *sa)
340{ 342{
341 return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs, 343 return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs,
342 sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME); 344 sizeof(sa->ctrl_regs), LINUX_NOTE_NAME);
343} 345}
344 346
345/* 347/*
@@ -348,7 +350,7 @@ static void *nt_s390_ctrs(void *ptr, struct save_area *sa)
348static void *nt_s390_prefix(void *ptr, struct save_area *sa) 350static void *nt_s390_prefix(void *ptr, struct save_area *sa)
349{ 351{
350 return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg, 352 return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg,
351 sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME); 353 sizeof(sa->pref_reg), LINUX_NOTE_NAME);
352} 354}
353 355
354/* 356/*
@@ -357,7 +359,7 @@ static void *nt_s390_prefix(void *ptr, struct save_area *sa)
357static void *nt_s390_vx_high(void *ptr, __vector128 *vx_regs) 359static void *nt_s390_vx_high(void *ptr, __vector128 *vx_regs)
358{ 360{
359 return nt_init(ptr, NT_S390_VXRS_HIGH, &vx_regs[16], 361 return nt_init(ptr, NT_S390_VXRS_HIGH, &vx_regs[16],
360 16 * sizeof(__vector128), KEXEC_CORE_NOTE_NAME); 362 16 * sizeof(__vector128), LINUX_NOTE_NAME);
361} 363}
362 364
363/* 365/*
@@ -370,12 +372,12 @@ static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
370 int i; 372 int i;
371 373
372 note = (Elf64_Nhdr *)ptr; 374 note = (Elf64_Nhdr *)ptr;
373 note->n_namesz = strlen(KEXEC_CORE_NOTE_NAME) + 1; 375 note->n_namesz = strlen(LINUX_NOTE_NAME) + 1;
374 note->n_descsz = 16 * 8; 376 note->n_descsz = 16 * 8;
375 note->n_type = NT_S390_VXRS_LOW; 377 note->n_type = NT_S390_VXRS_LOW;
376 len = sizeof(Elf64_Nhdr); 378 len = sizeof(Elf64_Nhdr);
377 379
378 memcpy(ptr + len, KEXEC_CORE_NOTE_NAME, note->n_namesz); 380 memcpy(ptr + len, LINUX_NOTE_NAME, note->n_namesz);
379 len = roundup(len + note->n_namesz, 4); 381 len = roundup(len + note->n_namesz, 4);
380 382
381 ptr += len; 383 ptr += len;
@@ -462,6 +464,20 @@ static void *nt_vmcoreinfo(void *ptr)
462} 464}
463 465
464/* 466/*
467 * Initialize final note (needed for /proc/vmcore code)
468 */
469static void *nt_final(void *ptr)
470{
471 Elf64_Nhdr *note;
472
473 note = (Elf64_Nhdr *) ptr;
474 note->n_namesz = 0;
475 note->n_descsz = 0;
476 note->n_type = 0;
477 return PTR_ADD(ptr, sizeof(Elf64_Nhdr));
478}
479
480/*
465 * Initialize ELF header (new kernel) 481 * Initialize ELF header (new kernel)
466 */ 482 */
467static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) 483static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
@@ -551,6 +567,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
551 ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs); 567 ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs);
552 } 568 }
553 ptr = nt_vmcoreinfo(ptr); 569 ptr = nt_vmcoreinfo(ptr);
570 ptr = nt_final(ptr);
554 memset(phdr, 0, sizeof(*phdr)); 571 memset(phdr, 0, sizeof(*phdr));
555 phdr->p_type = PT_NOTE; 572 phdr->p_type = PT_NOTE;
556 phdr->p_offset = notes_offset; 573 phdr->p_offset = notes_offset;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 424e6809ad07..4612ed7ec2e5 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -229,12 +229,17 @@ ENTRY(sie64a)
229 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 229 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
230.Lsie_done: 230.Lsie_done:
231# some program checks are suppressing. C code (e.g. do_protection_exception) 231# some program checks are suppressing. C code (e.g. do_protection_exception)
232# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other 232# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
233# instructions between sie64a and .Lsie_done should not cause program 233# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
234# interrupts. So lets use a nop (47 00 00 00) as a landing pad. 234# Other instructions between sie64a and .Lsie_done should not cause program
235# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
235# See also .Lcleanup_sie 236# See also .Lcleanup_sie
236.Lrewind_pad: 237.Lrewind_pad6:
237 nop 0 238 nopr 7
239.Lrewind_pad4:
240 nopr 7
241.Lrewind_pad2:
242 nopr 7
238 .globl sie_exit 243 .globl sie_exit
239sie_exit: 244sie_exit:
240 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 245 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@@ -247,7 +252,9 @@ sie_exit:
247 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code 252 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
248 j sie_exit 253 j sie_exit
249 254
250 EX_TABLE(.Lrewind_pad,.Lsie_fault) 255 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
256 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
257 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
251 EX_TABLE(sie_exit,.Lsie_fault) 258 EX_TABLE(sie_exit,.Lsie_fault)
252#endif 259#endif
253 260
@@ -308,6 +315,7 @@ ENTRY(system_call)
308 lg %r14,__LC_VDSO_PER_CPU 315 lg %r14,__LC_VDSO_PER_CPU
309 lmg %r0,%r10,__PT_R0(%r11) 316 lmg %r0,%r10,__PT_R0(%r11)
310 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 317 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
318.Lsysc_exit_timer:
311 stpt __LC_EXIT_TIMER 319 stpt __LC_EXIT_TIMER
312 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 320 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
313 lmg %r11,%r15,__PT_R11(%r11) 321 lmg %r11,%r15,__PT_R11(%r11)
@@ -593,6 +601,7 @@ ENTRY(io_int_handler)
593 lg %r14,__LC_VDSO_PER_CPU 601 lg %r14,__LC_VDSO_PER_CPU
594 lmg %r0,%r10,__PT_R0(%r11) 602 lmg %r0,%r10,__PT_R0(%r11)
595 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 603 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
604.Lio_exit_timer:
596 stpt __LC_EXIT_TIMER 605 stpt __LC_EXIT_TIMER
597 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 606 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
598 lmg %r11,%r15,__PT_R11(%r11) 607 lmg %r11,%r15,__PT_R11(%r11)
@@ -1118,15 +1127,23 @@ cleanup_critical:
1118 br %r14 1127 br %r14
1119 1128
1120.Lcleanup_sysc_restore: 1129.Lcleanup_sysc_restore:
1130 # check if stpt has been executed
1121 clg %r9,BASED(.Lcleanup_sysc_restore_insn) 1131 clg %r9,BASED(.Lcleanup_sysc_restore_insn)
1132 jh 0f
1133 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1134 cghi %r11,__LC_SAVE_AREA_ASYNC
1122 je 0f 1135 je 0f
1136 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
11370: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
1138 je 1f
1123 lg %r9,24(%r11) # get saved pointer to pt_regs 1139 lg %r9,24(%r11) # get saved pointer to pt_regs
1124 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1140 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1125 mvc 0(64,%r11),__PT_R8(%r9) 1141 mvc 0(64,%r11),__PT_R8(%r9)
1126 lmg %r0,%r7,__PT_R0(%r9) 1142 lmg %r0,%r7,__PT_R0(%r9)
11270: lmg %r8,%r9,__LC_RETURN_PSW 11431: lmg %r8,%r9,__LC_RETURN_PSW
1128 br %r14 1144 br %r14
1129.Lcleanup_sysc_restore_insn: 1145.Lcleanup_sysc_restore_insn:
1146 .quad .Lsysc_exit_timer
1130 .quad .Lsysc_done - 4 1147 .quad .Lsysc_done - 4
1131 1148
1132.Lcleanup_io_tif: 1149.Lcleanup_io_tif:
@@ -1134,15 +1151,20 @@ cleanup_critical:
1134 br %r14 1151 br %r14
1135 1152
1136.Lcleanup_io_restore: 1153.Lcleanup_io_restore:
1154 # check if stpt has been executed
1137 clg %r9,BASED(.Lcleanup_io_restore_insn) 1155 clg %r9,BASED(.Lcleanup_io_restore_insn)
1138 je 0f 1156 jh 0f
1157 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
11580: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
1159 je 1f
1139 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 1160 lg %r9,24(%r11) # get saved r11 pointer to pt_regs
1140 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1161 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1141 mvc 0(64,%r11),__PT_R8(%r9) 1162 mvc 0(64,%r11),__PT_R8(%r9)
1142 lmg %r0,%r7,__PT_R0(%r9) 1163 lmg %r0,%r7,__PT_R0(%r9)
11430: lmg %r8,%r9,__LC_RETURN_PSW 11641: lmg %r8,%r9,__LC_RETURN_PSW
1144 br %r14 1165 br %r14
1145.Lcleanup_io_restore_insn: 1166.Lcleanup_io_restore_insn:
1167 .quad .Lio_exit_timer
1146 .quad .Lio_done - 4 1168 .quad .Lio_done - 4
1147 1169
1148.Lcleanup_idle: 1170.Lcleanup_idle:
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 01c37b36caf9..02bd587b610b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
963 if (target == current) 963 if (target == current)
964 save_fpu_regs(); 964 save_fpu_regs();
965 965
966 if (MACHINE_HAS_VX)
967 convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
968 else
969 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
970
966 /* If setting FPC, must validate it first. */ 971 /* If setting FPC, must validate it first. */
967 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { 972 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
968 u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; 973 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
1067 if (target == current) 1072 if (target == current)
1068 save_fpu_regs(); 1073 save_fpu_regs();
1069 1074
1075 for (i = 0; i < __NUM_VXRS_LOW; i++)
1076 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1077
1070 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); 1078 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1071 if (rc == 0) 1079 if (rc == 0)
1072 for (i = 0; i < __NUM_VXRS_LOW; i++) 1080 for (i = 0; i < __NUM_VXRS_LOW; i++)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 1f581eb61bc2..d097d71685df 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -805,10 +805,10 @@ static void __init setup_randomness(void)
805{ 805{
806 struct sysinfo_3_2_2 *vmms; 806 struct sysinfo_3_2_2 *vmms;
807 807
808 vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL); 808 vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
809 if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count) 809 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
810 add_device_randomness(&vmms, vmms->count); 810 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
811 free_page((unsigned long) vmms); 811 memblock_free((unsigned long) vmms, PAGE_SIZE);
812} 812}
813 813
814/* 814/*
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 575dc123bda2..23e3f5d77a24 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -295,6 +295,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
295 struct kvm_memory_slot *memslot; 295 struct kvm_memory_slot *memslot;
296 int is_dirty = 0; 296 int is_dirty = 0;
297 297
298 if (kvm_is_ucontrol(kvm))
299 return -EINVAL;
300
298 mutex_lock(&kvm->slots_lock); 301 mutex_lock(&kvm->slots_lock);
299 302
300 r = -EINVAL; 303 r = -EINVAL;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index feff9caf89b5..91376d9e4286 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -109,6 +109,13 @@ void __init paging_init(void)
109 free_area_init_nodes(max_zone_pfns); 109 free_area_init_nodes(max_zone_pfns);
110} 110}
111 111
112void mark_rodata_ro(void)
113{
114 /* Text and rodata are already protected. Nothing to do here. */
115 pr_info("Write protecting the kernel read-only data: %luk\n",
116 ((unsigned long)&_eshared - (unsigned long)&_stext) >> 10);
117}
118
112void __init mem_init(void) 119void __init mem_init(void)
113{ 120{
114 if (MACHINE_HAS_TLB_LC) 121 if (MACHINE_HAS_TLB_LC)
@@ -127,9 +134,6 @@ void __init mem_init(void)
127 setup_zero_pages(); /* Setup zeroed pages. */ 134 setup_zero_pages(); /* Setup zeroed pages. */
128 135
129 mem_init_print_info(NULL); 136 mem_init_print_info(NULL);
130 printk("Write protected kernel read-only data: %#lx - %#lx\n",
131 (unsigned long)&_stext,
132 PFN_ALIGN((unsigned long)&_eshared) - 1);
133} 137}
134 138
135void free_initmem(void) 139void free_initmem(void)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index f2b6b1d9c804..126c4a9b9bf9 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -97,7 +97,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97 addr = PAGE_ALIGN(addr); 97 addr = PAGE_ALIGN(addr);
98 vma = find_vma(mm, addr); 98 vma = find_vma(mm, addr);
99 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 99 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
100 (!vma || addr + len <= vma->vm_start)) 100 (!vma || addr + len <= vm_start_gap(vma)))
101 return addr; 101 return addr;
102 } 102 }
103 103
@@ -135,7 +135,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
135 addr = PAGE_ALIGN(addr); 135 addr = PAGE_ALIGN(addr);
136 vma = find_vma(mm, addr); 136 vma = find_vma(mm, addr);
137 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 137 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
138 (!vma || addr + len <= vma->vm_start)) 138 (!vma || addr + len <= vm_start_gap(vma)))
139 return addr; 139 return addr;
140 } 140 }
141 141
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 8345ae1f117d..05ae254f84cf 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1237,11 +1237,28 @@ EXPORT_SYMBOL_GPL(s390_reset_cmma);
1237 */ 1237 */
1238bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) 1238bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
1239{ 1239{
1240 pgd_t *pgd;
1241 pud_t *pud;
1242 pmd_t *pmd;
1240 pte_t *pte; 1243 pte_t *pte;
1241 spinlock_t *ptl; 1244 spinlock_t *ptl;
1242 bool dirty = false; 1245 bool dirty = false;
1243 1246
1244 pte = get_locked_pte(gmap->mm, address, &ptl); 1247 pgd = pgd_offset(gmap->mm, address);
1248 pud = pud_alloc(gmap->mm, pgd, address);
1249 if (!pud)
1250 return false;
1251 pmd = pmd_alloc(gmap->mm, pud, address);
1252 if (!pmd)
1253 return false;
1254 /* We can't run guests backed by huge pages, but userspace can
1255 * still set them up and then try to migrate them without any
1256 * migration support.
1257 */
1258 if (pmd_large(*pmd))
1259 return true;
1260
1261 pte = pte_alloc_map_lock(gmap->mm, pmd, address, &ptl);
1245 if (unlikely(!pte)) 1262 if (unlikely(!pte))
1246 return false; 1263 return false;
1247 1264
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index ef7d6c8fea66..f354fd84adeb 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -372,7 +372,7 @@ void __init vmem_map_init(void)
372 ro_end = (unsigned long)&_eshared & PAGE_MASK; 372 ro_end = (unsigned long)&_eshared & PAGE_MASK;
373 for_each_memblock(memory, reg) { 373 for_each_memblock(memory, reg) {
374 start = reg->base; 374 start = reg->base;
375 end = reg->base + reg->size - 1; 375 end = reg->base + reg->size;
376 if (start >= ro_end || end <= ro_start) 376 if (start >= ro_end || end <= ro_start)
377 vmem_add_mem(start, end - start, 0); 377 vmem_add_mem(start, end - start, 0);
378 else if (start >= ro_start && end <= ro_end) 378 else if (start >= ro_start && end <= ro_end)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 0e2919dd8df3..1395eeb6005f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1250,7 +1250,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
1250 insn_count = bpf_jit_insn(jit, fp, i); 1250 insn_count = bpf_jit_insn(jit, fp, i);
1251 if (insn_count < 0) 1251 if (insn_count < 0)
1252 return -1; 1252 return -1;
1253 jit->addrs[i + 1] = jit->prg; /* Next instruction address */ 1253 /* Next instruction address */
1254 jit->addrs[i + insn_count] = jit->prg;
1254 } 1255 }
1255 bpf_jit_epilogue(jit); 1256 bpf_jit_epilogue(jit);
1256 1257
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 3a40f718baef..4004e03267cd 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -455,7 +455,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
455 zdev->dma_table = dma_alloc_cpu_table(); 455 zdev->dma_table = dma_alloc_cpu_table();
456 if (!zdev->dma_table) { 456 if (!zdev->dma_table) {
457 rc = -ENOMEM; 457 rc = -ENOMEM;
458 goto out_clean; 458 goto out;
459 } 459 }
460 460
461 /* 461 /*
@@ -475,18 +475,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
475 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); 475 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
476 if (!zdev->iommu_bitmap) { 476 if (!zdev->iommu_bitmap) {
477 rc = -ENOMEM; 477 rc = -ENOMEM;
478 goto out_reg; 478 goto free_dma_table;
479 } 479 }
480 480
481 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, 481 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
482 (u64) zdev->dma_table); 482 (u64) zdev->dma_table);
483 if (rc) 483 if (rc)
484 goto out_reg; 484 goto free_bitmap;
485 return 0;
486 485
487out_reg: 486 return 0;
487free_bitmap:
488 vfree(zdev->iommu_bitmap);
489 zdev->iommu_bitmap = NULL;
490free_dma_table:
488 dma_free_cpu_table(zdev->dma_table); 491 dma_free_cpu_table(zdev->dma_table);
489out_clean: 492 zdev->dma_table = NULL;
493out:
490 return rc; 494 return rc;
491} 495}
492 496
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 6777177807c2..7df7d5944188 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
63 63
64 vma = find_vma(mm, addr); 64 vma = find_vma(mm, addr);
65 if (TASK_SIZE - len >= addr && 65 if (TASK_SIZE - len >= addr &&
66 (!vma || addr + len <= vma->vm_start)) 66 (!vma || addr + len <= vm_start_gap(vma)))
67 return addr; 67 return addr;
68 } 68 }
69 69
@@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
113 113
114 vma = find_vma(mm, addr); 114 vma = find_vma(mm, addr);
115 if (TASK_SIZE - len >= addr && 115 if (TASK_SIZE - len >= addr &&
116 (!vma || addr + len <= vma->vm_start)) 116 (!vma || addr + len <= vm_start_gap(vma)))
117 return addr; 117 return addr;
118 } 118 }
119 119
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3736be630113..894bcaed002e 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -183,9 +183,9 @@ config NR_CPUS
183 int "Maximum number of CPUs" 183 int "Maximum number of CPUs"
184 depends on SMP 184 depends on SMP
185 range 2 32 if SPARC32 185 range 2 32 if SPARC32
186 range 2 1024 if SPARC64 186 range 2 4096 if SPARC64
187 default 32 if SPARC32 187 default 32 if SPARC32
188 default 64 if SPARC64 188 default 4096 if SPARC64
189 189
190source kernel/Kconfig.hz 190source kernel/Kconfig.hz
191 191
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index f7de0dbc38af..83b36a5371ff 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -52,7 +52,7 @@
52#define CTX_NR_MASK TAG_CONTEXT_BITS 52#define CTX_NR_MASK TAG_CONTEXT_BITS
53#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) 53#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
54 54
55#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL)) 55#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
56#define CTX_VALID(__ctx) \ 56#define CTX_VALID(__ctx) \
57 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) 57 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
58#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) 58#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index b84be675e507..0cdeb2b483a0 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -17,22 +17,19 @@ extern spinlock_t ctx_alloc_lock;
17extern unsigned long tlb_context_cache; 17extern unsigned long tlb_context_cache;
18extern unsigned long mmu_context_bmap[]; 18extern unsigned long mmu_context_bmap[];
19 19
20DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
20void get_new_mmu_context(struct mm_struct *mm); 21void get_new_mmu_context(struct mm_struct *mm);
21#ifdef CONFIG_SMP
22void smp_new_mmu_context_version(void);
23#else
24#define smp_new_mmu_context_version() do { } while (0)
25#endif
26
27int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 22int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
28void destroy_context(struct mm_struct *mm); 23void destroy_context(struct mm_struct *mm);
29 24
30void __tsb_context_switch(unsigned long pgd_pa, 25void __tsb_context_switch(unsigned long pgd_pa,
31 struct tsb_config *tsb_base, 26 struct tsb_config *tsb_base,
32 struct tsb_config *tsb_huge, 27 struct tsb_config *tsb_huge,
33 unsigned long tsb_descr_pa); 28 unsigned long tsb_descr_pa,
29 unsigned long secondary_ctx);
34 30
35static inline void tsb_context_switch(struct mm_struct *mm) 31static inline void tsb_context_switch_ctx(struct mm_struct *mm,
32 unsigned long ctx)
36{ 33{
37 __tsb_context_switch(__pa(mm->pgd), 34 __tsb_context_switch(__pa(mm->pgd),
38 &mm->context.tsb_block[0], 35 &mm->context.tsb_block[0],
@@ -43,9 +40,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
43#else 40#else
44 NULL 41 NULL
45#endif 42#endif
46 , __pa(&mm->context.tsb_descr[0])); 43 , __pa(&mm->context.tsb_descr[0]),
44 ctx);
47} 45}
48 46
47#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
48
49void tsb_grow(struct mm_struct *mm, 49void tsb_grow(struct mm_struct *mm,
50 unsigned long tsb_index, 50 unsigned long tsb_index,
51 unsigned long mm_rss); 51 unsigned long mm_rss);
@@ -74,8 +74,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
74static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 74static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
75{ 75{
76 unsigned long ctx_valid, flags; 76 unsigned long ctx_valid, flags;
77 int cpu; 77 int cpu = smp_processor_id();
78 78
79 per_cpu(per_cpu_secondary_mm, cpu) = mm;
79 if (unlikely(mm == &init_mm)) 80 if (unlikely(mm == &init_mm))
80 return; 81 return;
81 82
@@ -114,14 +115,12 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
114 * cpu0 to update it's TSB because at that point the cpu_vm_mask 115 * cpu0 to update it's TSB because at that point the cpu_vm_mask
115 * only had cpu1 set in it. 116 * only had cpu1 set in it.
116 */ 117 */
117 load_secondary_context(mm); 118 tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
118 tsb_context_switch(mm);
119 119
120 /* Any time a processor runs a context on an address space 120 /* Any time a processor runs a context on an address space
121 * for the first time, we must flush that context out of the 121 * for the first time, we must flush that context out of the
122 * local TLB. 122 * local TLB.
123 */ 123 */
124 cpu = smp_processor_id();
125 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { 124 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
126 cpumask_set_cpu(cpu, mm_cpumask(mm)); 125 cpumask_set_cpu(cpu, mm_cpumask(mm));
127 __flush_tlb_mm(CTX_HWBITS(mm->context), 126 __flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -131,26 +130,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
131} 130}
132 131
133#define deactivate_mm(tsk,mm) do { } while (0) 132#define deactivate_mm(tsk,mm) do { } while (0)
134 133#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
135/* Activate a new MM instance for the current task. */
136static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
137{
138 unsigned long flags;
139 int cpu;
140
141 spin_lock_irqsave(&mm->context.lock, flags);
142 if (!CTX_VALID(mm->context))
143 get_new_mmu_context(mm);
144 cpu = smp_processor_id();
145 if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
146 cpumask_set_cpu(cpu, mm_cpumask(mm));
147
148 load_secondary_context(mm);
149 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
150 tsb_context_switch(mm);
151 spin_unlock_irqrestore(&mm->context.lock, flags);
152}
153
154#endif /* !(__ASSEMBLY__) */ 134#endif /* !(__ASSEMBLY__) */
155 135
156#endif /* !(__SPARC64_MMU_CONTEXT_H) */ 136#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 91b963a887b7..29c3b400f949 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -91,9 +91,9 @@ extern unsigned long pfn_base;
91 * ZERO_PAGE is a global shared page that is always zero: used 91 * ZERO_PAGE is a global shared page that is always zero: used
92 * for zero-mapped memory areas etc.. 92 * for zero-mapped memory areas etc..
93 */ 93 */
94extern unsigned long empty_zero_page; 94extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
95 95
96#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) 96#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
97 97
98/* 98/*
99 * In general all page table modifications should use the V8 atomic 99 * In general all page table modifications should use the V8 atomic
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 408b715c95a5..9d81579f3d54 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -668,26 +668,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
668 return pte_pfn(pte); 668 return pte_pfn(pte);
669} 669}
670 670
671#ifdef CONFIG_TRANSPARENT_HUGEPAGE 671#define __HAVE_ARCH_PMD_WRITE
672static inline unsigned long pmd_dirty(pmd_t pmd) 672static inline unsigned long pmd_write(pmd_t pmd)
673{ 673{
674 pte_t pte = __pte(pmd_val(pmd)); 674 pte_t pte = __pte(pmd_val(pmd));
675 675
676 return pte_dirty(pte); 676 return pte_write(pte);
677} 677}
678 678
679static inline unsigned long pmd_young(pmd_t pmd) 679#ifdef CONFIG_TRANSPARENT_HUGEPAGE
680static inline unsigned long pmd_dirty(pmd_t pmd)
680{ 681{
681 pte_t pte = __pte(pmd_val(pmd)); 682 pte_t pte = __pte(pmd_val(pmd));
682 683
683 return pte_young(pte); 684 return pte_dirty(pte);
684} 685}
685 686
686static inline unsigned long pmd_write(pmd_t pmd) 687static inline unsigned long pmd_young(pmd_t pmd)
687{ 688{
688 pte_t pte = __pte(pmd_val(pmd)); 689 pte_t pte = __pte(pmd_val(pmd));
689 690
690 return pte_write(pte); 691 return pte_young(pte);
691} 692}
692 693
693static inline unsigned long pmd_trans_huge(pmd_t pmd) 694static inline unsigned long pmd_trans_huge(pmd_t pmd)
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
index 266937030546..522b43db2ed3 100644
--- a/arch/sparc/include/asm/pil.h
+++ b/arch/sparc/include/asm/pil.h
@@ -20,7 +20,6 @@
20#define PIL_SMP_CALL_FUNC 1 20#define PIL_SMP_CALL_FUNC 1
21#define PIL_SMP_RECEIVE_SIGNAL 2 21#define PIL_SMP_RECEIVE_SIGNAL 2
22#define PIL_SMP_CAPTURE 3 22#define PIL_SMP_CAPTURE 3
23#define PIL_SMP_CTX_NEW_VERSION 4
24#define PIL_DEVICE_IRQ 5 23#define PIL_DEVICE_IRQ 5
25#define PIL_SMP_CALL_FUNC_SNGL 6 24#define PIL_SMP_CALL_FUNC_SNGL 6
26#define PIL_DEFERRED_PCR_WORK 7 25#define PIL_DEFERRED_PCR_WORK 7
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 29d64b1758ed..be0cc1beed41 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -16,7 +16,7 @@ extern char reboot_command[];
16 */ 16 */
17extern unsigned char boot_cpu_id; 17extern unsigned char boot_cpu_id;
18 18
19extern unsigned long empty_zero_page; 19extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
20 20
21extern int serial_console; 21extern int serial_console;
22static inline int con_is_present(void) 22static inline int con_is_present(void)
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
index ec9c04de3664..ff05992dae7a 100644
--- a/arch/sparc/include/asm/trap_block.h
+++ b/arch/sparc/include/asm/trap_block.h
@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
54void init_cur_cpu_trap(struct thread_info *); 54void init_cur_cpu_trap(struct thread_info *);
55void setup_tba(void); 55void setup_tba(void);
56extern int ncpus_probed; 56extern int ncpus_probed;
57extern u64 cpu_mondo_counter[NR_CPUS];
57 58
58unsigned long real_hard_smp_processor_id(void); 59unsigned long real_hard_smp_processor_id(void);
59 60
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 8174f6cdbbbb..9dca7a892978 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -327,6 +327,7 @@ struct vio_dev {
327 int compat_len; 327 int compat_len;
328 328
329 u64 dev_no; 329 u64 dev_no;
330 u64 id;
330 331
331 unsigned long channel_id; 332 unsigned long channel_id;
332 333
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e22416ce56ea..bfbde8c4ffb2 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
1034{ 1034{
1035#ifdef CONFIG_SMP 1035#ifdef CONFIG_SMP
1036 unsigned long page; 1036 unsigned long page;
1037 void *mondo, *p;
1037 1038
1038 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); 1039 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
1040
1041 /* Make sure mondo block is 64byte aligned */
1042 p = kzalloc(127, GFP_KERNEL);
1043 if (!p) {
1044 prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
1045 prom_halt();
1046 }
1047 mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
1048 tb->cpu_mondo_block_pa = __pa(mondo);
1039 1049
1040 page = get_zeroed_page(GFP_KERNEL); 1050 page = get_zeroed_page(GFP_KERNEL);
1041 if (!page) { 1051 if (!page) {
1042 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); 1052 prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
1043 prom_halt(); 1053 prom_halt();
1044 } 1054 }
1045 1055
1046 tb->cpu_mondo_block_pa = __pa(page); 1056 tb->cpu_list_pa = __pa(page);
1047 tb->cpu_list_pa = __pa(page + 64);
1048#endif 1057#endif
1049} 1058}
1050 1059
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index e7f652be9e61..44f32dd4477f 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
37/* smp_64.c */ 37/* smp_64.c */
38void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); 38void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
39void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); 39void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
40void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
41void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); 40void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
42void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); 41void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
43 42
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 9ddc4928a089..c1566170964f 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -311,7 +311,7 @@ static int genregs64_set(struct task_struct *target,
311 } 311 }
312 312
313 if (!ret) { 313 if (!ret) {
314 unsigned long y; 314 unsigned long y = regs->y;
315 315
316 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 316 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
317 &y, 317 &y,
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 19cd08d18672..4511caa3b7e9 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -617,22 +617,48 @@ retry:
617 } 617 }
618} 618}
619 619
620/* Multi-cpu list version. */ 620#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
621#define MONDO_USEC_WAIT_MIN 2
622#define MONDO_USEC_WAIT_MAX 100
623#define MONDO_RETRY_LIMIT 500000
624
625/* Multi-cpu list version.
626 *
627 * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
628 * Sometimes not all cpus receive the mondo, requiring us to re-send
629 * the mondo until all cpus have received, or cpus are truly stuck
630 * unable to receive mondo, and we timeout.
631 * Occasionally a target cpu strand is borrowed briefly by hypervisor to
632 * perform guest service, such as PCIe error handling. Consider the
633 * service time, 1 second overall wait is reasonable for 1 cpu.
634 * Here two in-between mondo check wait time are defined: 2 usec for
635 * single cpu quick turn around and up to 100usec for large cpu count.
636 * Deliver mondo to large number of cpus could take longer, we adjusts
637 * the retry count as long as target cpus are making forward progress.
638 */
621static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) 639static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
622{ 640{
623 int retries, this_cpu, prev_sent, i, saw_cpu_error; 641 int this_cpu, tot_cpus, prev_sent, i, rem;
642 int usec_wait, retries, tot_retries;
643 u16 first_cpu = 0xffff;
644 unsigned long xc_rcvd = 0;
624 unsigned long status; 645 unsigned long status;
646 int ecpuerror_id = 0;
647 int enocpu_id = 0;
625 u16 *cpu_list; 648 u16 *cpu_list;
649 u16 cpu;
626 650
627 this_cpu = smp_processor_id(); 651 this_cpu = smp_processor_id();
628
629 cpu_list = __va(tb->cpu_list_pa); 652 cpu_list = __va(tb->cpu_list_pa);
630 653 usec_wait = cnt * MONDO_USEC_WAIT_MIN;
631 saw_cpu_error = 0; 654 if (usec_wait > MONDO_USEC_WAIT_MAX)
632 retries = 0; 655 usec_wait = MONDO_USEC_WAIT_MAX;
656 retries = tot_retries = 0;
657 tot_cpus = cnt;
633 prev_sent = 0; 658 prev_sent = 0;
659
634 do { 660 do {
635 int forward_progress, n_sent; 661 int n_sent, mondo_delivered, target_cpu_busy;
636 662
637 status = sun4v_cpu_mondo_send(cnt, 663 status = sun4v_cpu_mondo_send(cnt,
638 tb->cpu_list_pa, 664 tb->cpu_list_pa,
@@ -640,94 +666,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
640 666
641 /* HV_EOK means all cpus received the xcall, we're done. */ 667 /* HV_EOK means all cpus received the xcall, we're done. */
642 if (likely(status == HV_EOK)) 668 if (likely(status == HV_EOK))
643 break; 669 goto xcall_done;
670
671 /* If not these non-fatal errors, panic */
672 if (unlikely((status != HV_EWOULDBLOCK) &&
673 (status != HV_ECPUERROR) &&
674 (status != HV_ENOCPU)))
675 goto fatal_errors;
644 676
645 /* First, see if we made any forward progress. 677 /* First, see if we made any forward progress.
646 * 678 *
679 * Go through the cpu_list, count the target cpus that have
680 * received our mondo (n_sent), and those that did not (rem).
681 * Re-pack cpu_list with the cpus remain to be retried in the
682 * front - this simplifies tracking the truly stalled cpus.
683 *
647 * The hypervisor indicates successful sends by setting 684 * The hypervisor indicates successful sends by setting
648 * cpu list entries to the value 0xffff. 685 * cpu list entries to the value 0xffff.
686 *
687 * EWOULDBLOCK means some target cpus did not receive the
688 * mondo and retry usually helps.
689 *
690 * ECPUERROR means at least one target cpu is in error state,
691 * it's usually safe to skip the faulty cpu and retry.
692 *
693 * ENOCPU means one of the target cpu doesn't belong to the
694 * domain, perhaps offlined which is unexpected, but not
695 * fatal and it's okay to skip the offlined cpu.
649 */ 696 */
697 rem = 0;
650 n_sent = 0; 698 n_sent = 0;
651 for (i = 0; i < cnt; i++) { 699 for (i = 0; i < cnt; i++) {
652 if (likely(cpu_list[i] == 0xffff)) 700 cpu = cpu_list[i];
701 if (likely(cpu == 0xffff)) {
653 n_sent++; 702 n_sent++;
703 } else if ((status == HV_ECPUERROR) &&
704 (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
705 ecpuerror_id = cpu + 1;
706 } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
707 enocpu_id = cpu + 1;
708 } else {
709 cpu_list[rem++] = cpu;
710 }
654 } 711 }
655 712
656 forward_progress = 0; 713 /* No cpu remained, we're done. */
657 if (n_sent > prev_sent) 714 if (rem == 0)
658 forward_progress = 1; 715 break;
659 716
660 prev_sent = n_sent; 717 /* Otherwise, update the cpu count for retry. */
718 cnt = rem;
661 719
662 /* If we get a HV_ECPUERROR, then one or more of the cpus 720 /* Record the overall number of mondos received by the
663 * in the list are in error state. Use the cpu_state() 721 * first of the remaining cpus.
664 * hypervisor call to find out which cpus are in error state.
665 */ 722 */
666 if (unlikely(status == HV_ECPUERROR)) { 723 if (first_cpu != cpu_list[0]) {
667 for (i = 0; i < cnt; i++) { 724 first_cpu = cpu_list[0];
668 long err; 725 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
669 u16 cpu; 726 }
670 727
671 cpu = cpu_list[i]; 728 /* Was any mondo delivered successfully? */
672 if (cpu == 0xffff) 729 mondo_delivered = (n_sent > prev_sent);
673 continue; 730 prev_sent = n_sent;
674 731
675 err = sun4v_cpu_state(cpu); 732 /* or, was any target cpu busy processing other mondos? */
676 if (err == HV_CPU_STATE_ERROR) { 733 target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
677 saw_cpu_error = (cpu + 1); 734 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
678 cpu_list[i] = 0xffff;
679 }
680 }
681 } else if (unlikely(status != HV_EWOULDBLOCK))
682 goto fatal_mondo_error;
683 735
684 /* Don't bother rewriting the CPU list, just leave the 736 /* Retry count is for no progress. If we're making progress,
685 * 0xffff and non-0xffff entries in there and the 737 * reset the retry count.
686 * hypervisor will do the right thing.
687 *
688 * Only advance timeout state if we didn't make any
689 * forward progress.
690 */ 738 */
691 if (unlikely(!forward_progress)) { 739 if (likely(mondo_delivered || target_cpu_busy)) {
692 if (unlikely(++retries > 10000)) 740 tot_retries += retries;
693 goto fatal_mondo_timeout; 741 retries = 0;
694 742 } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
695 /* Delay a little bit to let other cpus catch up 743 goto fatal_mondo_timeout;
696 * on their cpu mondo queue work.
697 */
698 udelay(2 * cnt);
699 } 744 }
700 } while (1);
701 745
702 if (unlikely(saw_cpu_error)) 746 /* Delay a little bit to let other cpus catch up on
703 goto fatal_mondo_cpu_error; 747 * their cpu mondo queue work.
748 */
749 if (!mondo_delivered)
750 udelay(usec_wait);
704 751
705 return; 752 retries++;
753 } while (1);
706 754
707fatal_mondo_cpu_error: 755xcall_done:
708 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " 756 if (unlikely(ecpuerror_id > 0)) {
709 "(including %d) were in error state\n", 757 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
710 this_cpu, saw_cpu_error - 1); 758 this_cpu, ecpuerror_id - 1);
759 } else if (unlikely(enocpu_id > 0)) {
760 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
761 this_cpu, enocpu_id - 1);
762 }
711 return; 763 return;
712 764
765fatal_errors:
766 /* fatal errors include bad alignment, etc */
767 pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
768 this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
769 panic("Unexpected SUN4V mondo error %lu\n", status);
770
713fatal_mondo_timeout: 771fatal_mondo_timeout:
714 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " 772 /* some cpus being non-responsive to the cpu mondo */
715 " progress after %d retries.\n", 773 pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
716 this_cpu, retries); 774 this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
717 goto dump_cpu_list_and_out; 775 panic("SUN4V mondo timeout panic\n");
718
719fatal_mondo_error:
720 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
721 this_cpu, status);
722 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
723 "mondo_block_pa(%lx)\n",
724 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
725
726dump_cpu_list_and_out:
727 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
728 for (i = 0; i < cnt; i++)
729 printk("%u ", cpu_list[i]);
730 printk("]\n");
731} 776}
732 777
733static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); 778static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
@@ -959,37 +1004,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
959 preempt_enable(); 1004 preempt_enable();
960} 1005}
961 1006
962void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
963{
964 struct mm_struct *mm;
965 unsigned long flags;
966
967 clear_softint(1 << irq);
968
969 /* See if we need to allocate a new TLB context because
970 * the version of the one we are using is now out of date.
971 */
972 mm = current->active_mm;
973 if (unlikely(!mm || (mm == &init_mm)))
974 return;
975
976 spin_lock_irqsave(&mm->context.lock, flags);
977
978 if (unlikely(!CTX_VALID(mm->context)))
979 get_new_mmu_context(mm);
980
981 spin_unlock_irqrestore(&mm->context.lock, flags);
982
983 load_secondary_context(mm);
984 __flush_tlb_mm(CTX_HWBITS(mm->context),
985 SECONDARY_CONTEXT);
986}
987
988void smp_new_mmu_context_version(void)
989{
990 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
991}
992
993#ifdef CONFIG_KGDB 1007#ifdef CONFIG_KGDB
994void kgdb_roundup_cpus(unsigned long flags) 1008void kgdb_roundup_cpus(unsigned long flags)
995{ 1009{
diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
index 559bc5e9c199..34631995859a 100644
--- a/arch/sparc/kernel/sun4v_ivec.S
+++ b/arch/sparc/kernel/sun4v_ivec.S
@@ -26,6 +26,21 @@ sun4v_cpu_mondo:
26 ldxa [%g0] ASI_SCRATCHPAD, %g4 26 ldxa [%g0] ASI_SCRATCHPAD, %g4
27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
28 28
29 /* Get smp_processor_id() into %g3 */
30 sethi %hi(trap_block), %g5
31 or %g5, %lo(trap_block), %g5
32 sub %g4, %g5, %g3
33 srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
34
35 /* Increment cpu_mondo_counter[smp_processor_id()] */
36 sethi %hi(cpu_mondo_counter), %g5
37 or %g5, %lo(cpu_mondo_counter), %g5
38 sllx %g3, 3, %g3
39 add %g5, %g3, %g5
40 ldx [%g5], %g3
41 add %g3, 1, %g3
42 stx %g3, [%g5]
43
29 /* Get CPU mondo queue base phys address into %g7. */ 44 /* Get CPU mondo queue base phys address into %g7. */
30 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 45 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
31 46
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index b489e9759518..98a5cf313d39 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -118,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
118 118
119 vma = find_vma(mm, addr); 119 vma = find_vma(mm, addr);
120 if (task_size - len >= addr && 120 if (task_size - len >= addr &&
121 (!vma || addr + len <= vma->vm_start)) 121 (!vma || addr + len <= vm_start_gap(vma)))
122 return addr; 122 return addr;
123 } 123 }
124 124
@@ -181,7 +181,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
181 181
182 vma = find_vma(mm, addr); 182 vma = find_vma(mm, addr);
183 if (task_size - len >= addr && 183 if (task_size - len >= addr &&
184 (!vma || addr + len <= vma->vm_start)) 184 (!vma || addr + len <= vm_start_gap(vma)))
185 return addr; 185 return addr;
186 } 186 }
187 187
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index d21cd625c0de..d883c5951e8b 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -85,7 +85,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
85 85
86void bad_trap(struct pt_regs *regs, long lvl) 86void bad_trap(struct pt_regs *regs, long lvl)
87{ 87{
88 char buffer[32]; 88 char buffer[36];
89 siginfo_t info; 89 siginfo_t info;
90 90
91 if (notify_die(DIE_TRAP, "bad trap", regs, 91 if (notify_die(DIE_TRAP, "bad trap", regs,
@@ -116,7 +116,7 @@ void bad_trap(struct pt_regs *regs, long lvl)
116 116
117void bad_trap_tl1(struct pt_regs *regs, long lvl) 117void bad_trap_tl1(struct pt_regs *regs, long lvl)
118{ 118{
119 char buffer[32]; 119 char buffer[36];
120 120
121 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, 121 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
122 0, lvl, SIGTRAP) == NOTIFY_STOP) 122 0, lvl, SIGTRAP) == NOTIFY_STOP)
@@ -2659,6 +2659,7 @@ void do_getpsr(struct pt_regs *regs)
2659 } 2659 }
2660} 2660}
2661 2661
2662u64 cpu_mondo_counter[NR_CPUS] = {0};
2662struct trap_per_cpu trap_block[NR_CPUS]; 2663struct trap_per_cpu trap_block[NR_CPUS];
2663EXPORT_SYMBOL(trap_block); 2664EXPORT_SYMBOL(trap_block);
2664 2665
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index d568c8207af7..7d961f6e3907 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -375,6 +375,7 @@ tsb_flush:
375 * %o1: TSB base config pointer 375 * %o1: TSB base config pointer
376 * %o2: TSB huge config pointer, or NULL if none 376 * %o2: TSB huge config pointer, or NULL if none
377 * %o3: Hypervisor TSB descriptor physical address 377 * %o3: Hypervisor TSB descriptor physical address
378 * %o4: Secondary context to load, if non-zero
378 * 379 *
379 * We have to run this whole thing with interrupts 380 * We have to run this whole thing with interrupts
380 * disabled so that the current cpu doesn't change 381 * disabled so that the current cpu doesn't change
@@ -387,6 +388,17 @@ __tsb_context_switch:
387 rdpr %pstate, %g1 388 rdpr %pstate, %g1
388 wrpr %g1, PSTATE_IE, %pstate 389 wrpr %g1, PSTATE_IE, %pstate
389 390
391 brz,pn %o4, 1f
392 mov SECONDARY_CONTEXT, %o5
393
394661: stxa %o4, [%o5] ASI_DMMU
395 .section .sun4v_1insn_patch, "ax"
396 .word 661b
397 stxa %o4, [%o5] ASI_MMU
398 .previous
399 flush %g6
400
4011:
390 TRAP_LOAD_TRAP_BLOCK(%g2, %g3) 402 TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
391 403
392 stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] 404 stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
@@ -470,13 +482,16 @@ __tsb_context_switch:
470 .type copy_tsb,#function 482 .type copy_tsb,#function
471copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size 483copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
472 * %o2=new_tsb_base, %o3=new_tsb_size 484 * %o2=new_tsb_base, %o3=new_tsb_size
485 * %o4=page_size_shift
473 */ 486 */
474 sethi %uhi(TSB_PASS_BITS), %g7 487 sethi %uhi(TSB_PASS_BITS), %g7
475 srlx %o3, 4, %o3 488 srlx %o3, 4, %o3
476 add %o0, %o1, %g1 /* end of old tsb */ 489 add %o0, %o1, %o1 /* end of old tsb */
477 sllx %g7, 32, %g7 490 sllx %g7, 32, %g7
478 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ 491 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
479 492
493 mov %o4, %g1 /* page_size_shift */
494
480661: prefetcha [%o0] ASI_N, #one_read 495661: prefetcha [%o0] ASI_N, #one_read
481 .section .tsb_phys_patch, "ax" 496 .section .tsb_phys_patch, "ax"
482 .word 661b 497 .word 661b
@@ -501,9 +516,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
501 /* This can definitely be computed faster... */ 516 /* This can definitely be computed faster... */
502 srlx %o0, 4, %o5 /* Build index */ 517 srlx %o0, 4, %o5 /* Build index */
503 and %o5, 511, %o5 /* Mask index */ 518 and %o5, 511, %o5 /* Mask index */
504 sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ 519 sllx %o5, %g1, %o5 /* Put into vaddr position */
505 or %o4, %o5, %o4 /* Full VADDR. */ 520 or %o4, %o5, %o4 /* Full VADDR. */
506 srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ 521 srlx %o4, %g1, %o4 /* Shift down to create index */
507 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ 522 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
508 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ 523 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
509 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ 524 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
@@ -511,7 +526,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
511 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ 526 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
512 527
51380: add %o0, 16, %o0 52880: add %o0, 16, %o0
514 cmp %o0, %g1 529 cmp %o0, %o1
515 bne,pt %xcc, 90b 530 bne,pt %xcc, 90b
516 nop 531 nop
517 532
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index c6dfdaa29e20..170ead662f2a 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
50tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) 50tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
51tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) 51tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
52tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) 52tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
53tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) 53tl0_irq4: BTRAP(0x44)
54#else 54#else
55tl0_irq1: BTRAP(0x41) 55tl0_irq1: BTRAP(0x41)
56tl0_irq2: BTRAP(0x42) 56tl0_irq2: BTRAP(0x42)
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index cb5789c9f961..34824ca396f0 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -284,13 +284,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
284 if (!id) { 284 if (!id) {
285 dev_set_name(&vdev->dev, "%s", bus_id_name); 285 dev_set_name(&vdev->dev, "%s", bus_id_name);
286 vdev->dev_no = ~(u64)0; 286 vdev->dev_no = ~(u64)0;
287 vdev->id = ~(u64)0;
287 } else if (!cfg_handle) { 288 } else if (!cfg_handle) {
288 dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); 289 dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
289 vdev->dev_no = *id; 290 vdev->dev_no = *id;
291 vdev->id = ~(u64)0;
290 } else { 292 } else {
291 dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, 293 dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
292 *cfg_handle, *id); 294 *cfg_handle, *id);
293 vdev->dev_no = *cfg_handle; 295 vdev->dev_no = *cfg_handle;
296 vdev->id = *id;
294 } 297 }
295 298
296 vdev->dev.parent = parent; 299 vdev->dev.parent = parent;
@@ -333,27 +336,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
333 (void) vio_create_one(hp, node, &root_vdev->dev); 336 (void) vio_create_one(hp, node, &root_vdev->dev);
334} 337}
335 338
339struct vio_md_node_query {
340 const char *type;
341 u64 dev_no;
342 u64 id;
343};
344
336static int vio_md_node_match(struct device *dev, void *arg) 345static int vio_md_node_match(struct device *dev, void *arg)
337{ 346{
347 struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
338 struct vio_dev *vdev = to_vio_dev(dev); 348 struct vio_dev *vdev = to_vio_dev(dev);
339 349
340 if (vdev->mp == (u64) arg) 350 if (vdev->dev_no != query->dev_no)
341 return 1; 351 return 0;
352 if (vdev->id != query->id)
353 return 0;
354 if (strcmp(vdev->type, query->type))
355 return 0;
342 356
343 return 0; 357 return 1;
344} 358}
345 359
346static void vio_remove(struct mdesc_handle *hp, u64 node) 360static void vio_remove(struct mdesc_handle *hp, u64 node)
347{ 361{
362 const char *type;
363 const u64 *id, *cfg_handle;
364 u64 a;
365 struct vio_md_node_query query;
348 struct device *dev; 366 struct device *dev;
349 367
350 dev = device_find_child(&root_vdev->dev, (void *) node, 368 type = mdesc_get_property(hp, node, "device-type", NULL);
369 if (!type) {
370 type = mdesc_get_property(hp, node, "name", NULL);
371 if (!type)
372 type = mdesc_node_name(hp, node);
373 }
374
375 query.type = type;
376
377 id = mdesc_get_property(hp, node, "id", NULL);
378 cfg_handle = NULL;
379 mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
380 u64 target;
381
382 target = mdesc_arc_target(hp, a);
383 cfg_handle = mdesc_get_property(hp, target,
384 "cfg-handle", NULL);
385 if (cfg_handle)
386 break;
387 }
388
389 if (!id) {
390 query.dev_no = ~(u64)0;
391 query.id = ~(u64)0;
392 } else if (!cfg_handle) {
393 query.dev_no = *id;
394 query.id = ~(u64)0;
395 } else {
396 query.dev_no = *cfg_handle;
397 query.id = *id;
398 }
399
400 dev = device_find_child(&root_vdev->dev, &query,
351 vio_md_node_match); 401 vio_md_node_match);
352 if (dev) { 402 if (dev) {
353 printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); 403 printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
354 404
355 device_unregister(dev); 405 device_unregister(dev);
356 put_device(dev); 406 put_device(dev);
407 } else {
408 if (!id)
409 printk(KERN_ERR "VIO: Removed unknown %s node.\n",
410 type);
411 else if (!cfg_handle)
412 printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
413 type, *id);
414 else
415 printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
416 type, *cfg_handle, *id);
357 } 417 }
358} 418}
359 419
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index da1142401bf4..ffa842b4d7d4 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -115,7 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
115 addr = ALIGN(addr, HPAGE_SIZE); 115 addr = ALIGN(addr, HPAGE_SIZE);
116 vma = find_vma(mm, addr); 116 vma = find_vma(mm, addr);
117 if (task_size - len >= addr && 117 if (task_size - len >= addr &&
118 (!vma || addr + len <= vma->vm_start)) 118 (!vma || addr + len <= vm_start_gap(vma)))
119 return addr; 119 return addr;
120 } 120 }
121 if (mm->get_unmapped_area == arch_get_unmapped_area) 121 if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index eb8287155279..3b7092d9ea8f 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -301,7 +301,7 @@ void __init mem_init(void)
301 301
302 302
303 /* Saves us work later. */ 303 /* Saves us work later. */
304 memset((void *)&empty_zero_page, 0, PAGE_SIZE); 304 memset((void *)empty_zero_page, 0, PAGE_SIZE);
305 305
306 i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); 306 i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
307 i += 1; 307 i += 1;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 3d3414c14792..384aba109d7c 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -656,10 +656,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
656 656
657/* get_new_mmu_context() uses "cache + 1". */ 657/* get_new_mmu_context() uses "cache + 1". */
658DEFINE_SPINLOCK(ctx_alloc_lock); 658DEFINE_SPINLOCK(ctx_alloc_lock);
659unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; 659unsigned long tlb_context_cache = CTX_FIRST_VERSION;
660#define MAX_CTX_NR (1UL << CTX_NR_BITS) 660#define MAX_CTX_NR (1UL << CTX_NR_BITS)
661#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) 661#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
662DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); 662DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
663DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
664
665static void mmu_context_wrap(void)
666{
667 unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
668 unsigned long new_ver, new_ctx, old_ctx;
669 struct mm_struct *mm;
670 int cpu;
671
672 bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
673
674 /* Reserve kernel context */
675 set_bit(0, mmu_context_bmap);
676
677 new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
678 if (unlikely(new_ver == 0))
679 new_ver = CTX_FIRST_VERSION;
680 tlb_context_cache = new_ver;
681
682 /*
683 * Make sure that any new mm that are added into per_cpu_secondary_mm,
684 * are going to go through get_new_mmu_context() path.
685 */
686 mb();
687
688 /*
689 * Updated versions to current on those CPUs that had valid secondary
690 * contexts
691 */
692 for_each_online_cpu(cpu) {
693 /*
694 * If a new mm is stored after we took this mm from the array,
695 * it will go into get_new_mmu_context() path, because we
696 * already bumped the version in tlb_context_cache.
697 */
698 mm = per_cpu(per_cpu_secondary_mm, cpu);
699
700 if (unlikely(!mm || mm == &init_mm))
701 continue;
702
703 old_ctx = mm->context.sparc64_ctx_val;
704 if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
705 new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
706 set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
707 mm->context.sparc64_ctx_val = new_ctx;
708 }
709 }
710}
663 711
664/* Caller does TLB context flushing on local CPU if necessary. 712/* Caller does TLB context flushing on local CPU if necessary.
665 * The caller also ensures that CTX_VALID(mm->context) is false. 713 * The caller also ensures that CTX_VALID(mm->context) is false.
@@ -675,48 +723,30 @@ void get_new_mmu_context(struct mm_struct *mm)
675{ 723{
676 unsigned long ctx, new_ctx; 724 unsigned long ctx, new_ctx;
677 unsigned long orig_pgsz_bits; 725 unsigned long orig_pgsz_bits;
678 int new_version;
679 726
680 spin_lock(&ctx_alloc_lock); 727 spin_lock(&ctx_alloc_lock);
728retry:
729 /* wrap might have happened, test again if our context became valid */
730 if (unlikely(CTX_VALID(mm->context)))
731 goto out;
681 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 732 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
682 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 733 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
683 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 734 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
684 new_version = 0;
685 if (new_ctx >= (1 << CTX_NR_BITS)) { 735 if (new_ctx >= (1 << CTX_NR_BITS)) {
686 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 736 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
687 if (new_ctx >= ctx) { 737 if (new_ctx >= ctx) {
688 int i; 738 mmu_context_wrap();
689 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + 739 goto retry;
690 CTX_FIRST_VERSION;
691 if (new_ctx == 1)
692 new_ctx = CTX_FIRST_VERSION;
693
694 /* Don't call memset, for 16 entries that's just
695 * plain silly...
696 */
697 mmu_context_bmap[0] = 3;
698 mmu_context_bmap[1] = 0;
699 mmu_context_bmap[2] = 0;
700 mmu_context_bmap[3] = 0;
701 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
702 mmu_context_bmap[i + 0] = 0;
703 mmu_context_bmap[i + 1] = 0;
704 mmu_context_bmap[i + 2] = 0;
705 mmu_context_bmap[i + 3] = 0;
706 }
707 new_version = 1;
708 goto out;
709 } 740 }
710 } 741 }
742 if (mm->context.sparc64_ctx_val)
743 cpumask_clear(mm_cpumask(mm));
711 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); 744 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
712 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); 745 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
713out:
714 tlb_context_cache = new_ctx; 746 tlb_context_cache = new_ctx;
715 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 747 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
748out:
716 spin_unlock(&ctx_alloc_lock); 749 spin_unlock(&ctx_alloc_lock);
717
718 if (unlikely(new_version))
719 smp_new_mmu_context_version();
720} 750}
721 751
722static int numa_enabled = 1; 752static int numa_enabled = 1;
@@ -1493,7 +1523,7 @@ bool kern_addr_valid(unsigned long addr)
1493 if ((long)addr < 0L) { 1523 if ((long)addr < 0L) {
1494 unsigned long pa = __pa(addr); 1524 unsigned long pa = __pa(addr);
1495 1525
1496 if ((addr >> max_phys_bits) != 0UL) 1526 if ((pa >> max_phys_bits) != 0UL)
1497 return false; 1527 return false;
1498 1528
1499 return pfn_valid(pa >> PAGE_SHIFT); 1529 return pfn_valid(pa >> PAGE_SHIFT);
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 9cdeca0fa955..266411291634 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -451,7 +451,8 @@ retry_tsb_alloc:
451 extern void copy_tsb(unsigned long old_tsb_base, 451 extern void copy_tsb(unsigned long old_tsb_base,
452 unsigned long old_tsb_size, 452 unsigned long old_tsb_size,
453 unsigned long new_tsb_base, 453 unsigned long new_tsb_base,
454 unsigned long new_tsb_size); 454 unsigned long new_tsb_size,
455 unsigned long page_size_shift);
455 unsigned long old_tsb_base = (unsigned long) old_tsb; 456 unsigned long old_tsb_base = (unsigned long) old_tsb;
456 unsigned long new_tsb_base = (unsigned long) new_tsb; 457 unsigned long new_tsb_base = (unsigned long) new_tsb;
457 458
@@ -459,7 +460,9 @@ retry_tsb_alloc:
459 old_tsb_base = __pa(old_tsb_base); 460 old_tsb_base = __pa(old_tsb_base);
460 new_tsb_base = __pa(new_tsb_base); 461 new_tsb_base = __pa(new_tsb_base);
461 } 462 }
462 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); 463 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
464 tsb_index == MM_TSB_BASE ?
465 PAGE_SHIFT : REAL_HPAGE_SHIFT);
463 } 466 }
464 467
465 mm->context.tsb_block[tsb_index].tsb = new_tsb; 468 mm->context.tsb_block[tsb_index].tsb = new_tsb;
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 5d2fd6cd3189..fcf4d27a38fb 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -971,11 +971,6 @@ xcall_capture:
971 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint 971 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
972 retry 972 retry
973 973
974 .globl xcall_new_mmu_context_version
975xcall_new_mmu_context_version:
976 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
977 retry
978
979#ifdef CONFIG_KGDB 974#ifdef CONFIG_KGDB
980 .globl xcall_kgdb_capture 975 .globl xcall_kgdb_capture
981xcall_kgdb_capture: 976xcall_kgdb_capture:
diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
index 17bd2e167e07..df707a8ad311 100644
--- a/arch/sparc/power/hibernate.c
+++ b/arch/sparc/power/hibernate.c
@@ -35,6 +35,5 @@ void restore_processor_state(void)
35{ 35{
36 struct mm_struct *mm = current->active_mm; 36 struct mm_struct *mm = current->active_mm;
37 37
38 load_secondary_context(mm); 38 tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
39 tsb_context_switch(mm);
40} 39}
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index bdc126faf741..6239aa155f6d 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
111 const void *kbuf, const void __user *ubuf) 111 const void *kbuf, const void __user *ubuf)
112{ 112{
113 int ret; 113 int ret;
114 struct pt_regs regs; 114 struct pt_regs regs = *task_pt_regs(target);
115 115
116 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, 116 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
117 sizeof(regs)); 117 sizeof(regs));
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index c034dc3fe2d4..c97ee6c7f949 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -232,7 +232,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
232 addr = ALIGN(addr, huge_page_size(h)); 232 addr = ALIGN(addr, huge_page_size(h));
233 vma = find_vma(mm, addr); 233 vma = find_vma(mm, addr);
234 if (TASK_SIZE - len >= addr && 234 if (TASK_SIZE - len >= addr &&
235 (!vma || addr + len <= vma->vm_start)) 235 (!vma || addr + len <= vm_start_gap(vma)))
236 return addr; 236 return addr;
237 } 237 }
238 if (current->mm->get_unmapped_area == arch_get_unmapped_area) 238 if (current->mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 9011a88353de..ed1e9206f830 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -16,7 +16,7 @@
16#ifndef BOOT_BOOT_H 16#ifndef BOOT_BOOT_H
17#define BOOT_BOOT_H 17#define BOOT_BOOT_H
18 18
19#define STACK_SIZE 512 /* Minimum number of bytes for stack */ 19#define STACK_SIZE 1024 /* Minimum number of bytes for stack */
20 20
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22 22
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 318b8465d302..06ceddb3a22e 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/types.h> 15#include <linux/types.h>
16#include "ctype.h" 16#include "ctype.h"
17#include "string.h"
17 18
18int memcmp(const void *s1, const void *s2, size_t len) 19int memcmp(const void *s1, const void *s2, size_t len)
19{ 20{
diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
index 725e820602b1..113588ddb43f 100644
--- a/arch/x86/boot/string.h
+++ b/arch/x86/boot/string.h
@@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
18#define memset(d,c,l) __builtin_memset(d,c,l) 18#define memset(d,c,l) __builtin_memset(d,c,l)
19#define memcmp __builtin_memcmp 19#define memcmp __builtin_memcmp
20 20
21extern int strcmp(const char *str1, const char *str2);
22extern int strncmp(const char *cs, const char *ct, size_t count);
23extern size_t strlen(const char *s);
24extern char *strstr(const char *s1, const char *s2);
25extern size_t strnlen(const char *s, size_t maxlen);
26extern unsigned int atou(const char *s);
27extern unsigned long long simple_strtoull(const char *cp, char **endp,
28 unsigned int base);
29
21#endif /* BOOT_STRING_H */ 30#endif /* BOOT_STRING_H */
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 440df0c7a2ee..a69321a77783 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -219,6 +219,29 @@ static int ghash_async_final(struct ahash_request *req)
219 } 219 }
220} 220}
221 221
222static int ghash_async_import(struct ahash_request *req, const void *in)
223{
224 struct ahash_request *cryptd_req = ahash_request_ctx(req);
225 struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
226 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
227
228 ghash_async_init(req);
229 memcpy(dctx, in, sizeof(*dctx));
230 return 0;
231
232}
233
234static int ghash_async_export(struct ahash_request *req, void *out)
235{
236 struct ahash_request *cryptd_req = ahash_request_ctx(req);
237 struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
238 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
239
240 memcpy(out, dctx, sizeof(*dctx));
241 return 0;
242
243}
244
222static int ghash_async_digest(struct ahash_request *req) 245static int ghash_async_digest(struct ahash_request *req)
223{ 246{
224 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 247 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -288,8 +311,11 @@ static struct ahash_alg ghash_async_alg = {
288 .final = ghash_async_final, 311 .final = ghash_async_final,
289 .setkey = ghash_async_setkey, 312 .setkey = ghash_async_setkey,
290 .digest = ghash_async_digest, 313 .digest = ghash_async_digest,
314 .export = ghash_async_export,
315 .import = ghash_async_import,
291 .halg = { 316 .halg = {
292 .digestsize = GHASH_DIGEST_SIZE, 317 .digestsize = GHASH_DIGEST_SIZE,
318 .statesize = sizeof(struct ghash_desc_ctx),
293 .base = { 319 .base = {
294 .cra_name = "ghash", 320 .cra_name = "ghash",
295 .cra_driver_name = "ghash-clmulni", 321 .cra_driver_name = "ghash-clmulni",
diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
index 1cd792db15ef..1eab79c9ac48 100644
--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
@@ -117,11 +117,10 @@
117 .set T1, REG_T1 117 .set T1, REG_T1
118.endm 118.endm
119 119
120#define K_BASE %r8
121#define HASH_PTR %r9 120#define HASH_PTR %r9
121#define BLOCKS_CTR %r8
122#define BUFFER_PTR %r10 122#define BUFFER_PTR %r10
123#define BUFFER_PTR2 %r13 123#define BUFFER_PTR2 %r13
124#define BUFFER_END %r11
125 124
126#define PRECALC_BUF %r14 125#define PRECALC_BUF %r14
127#define WK_BUF %r15 126#define WK_BUF %r15
@@ -205,14 +204,14 @@
205 * blended AVX2 and ALU instruction scheduling 204 * blended AVX2 and ALU instruction scheduling
206 * 1 vector iteration per 8 rounds 205 * 1 vector iteration per 8 rounds
207 */ 206 */
208 vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP 207 vmovdqu (i * 2)(BUFFER_PTR), W_TMP
209 .elseif ((i & 7) == 1) 208 .elseif ((i & 7) == 1)
210 vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\ 209 vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
211 WY_TMP, WY_TMP 210 WY_TMP, WY_TMP
212 .elseif ((i & 7) == 2) 211 .elseif ((i & 7) == 2)
213 vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY 212 vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
214 .elseif ((i & 7) == 4) 213 .elseif ((i & 7) == 4)
215 vpaddd K_XMM(K_BASE), WY, WY_TMP 214 vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
216 .elseif ((i & 7) == 7) 215 .elseif ((i & 7) == 7)
217 vmovdqu WY_TMP, PRECALC_WK(i&~7) 216 vmovdqu WY_TMP, PRECALC_WK(i&~7)
218 217
@@ -255,7 +254,7 @@
255 vpxor WY, WY_TMP, WY_TMP 254 vpxor WY, WY_TMP, WY_TMP
256 .elseif ((i & 7) == 7) 255 .elseif ((i & 7) == 7)
257 vpxor WY_TMP2, WY_TMP, WY 256 vpxor WY_TMP2, WY_TMP, WY
258 vpaddd K_XMM(K_BASE), WY, WY_TMP 257 vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
259 vmovdqu WY_TMP, PRECALC_WK(i&~7) 258 vmovdqu WY_TMP, PRECALC_WK(i&~7)
260 259
261 PRECALC_ROTATE_WY 260 PRECALC_ROTATE_WY
@@ -291,7 +290,7 @@
291 vpsrld $30, WY, WY 290 vpsrld $30, WY, WY
292 vpor WY, WY_TMP, WY 291 vpor WY, WY_TMP, WY
293 .elseif ((i & 7) == 7) 292 .elseif ((i & 7) == 7)
294 vpaddd K_XMM(K_BASE), WY, WY_TMP 293 vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
295 vmovdqu WY_TMP, PRECALC_WK(i&~7) 294 vmovdqu WY_TMP, PRECALC_WK(i&~7)
296 295
297 PRECALC_ROTATE_WY 296 PRECALC_ROTATE_WY
@@ -446,6 +445,16 @@
446 445
447.endm 446.endm
448 447
448/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
449 * %1 + %2 >= %3 ? %4 : 0
450 */
451.macro ADD_IF_GE a, b, c, d
452 mov \a, RTA
453 add $\d, RTA
454 cmp $\c, \b
455 cmovge RTA, \a
456.endm
457
449/* 458/*
450 * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining 459 * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
451 */ 460 */
@@ -463,13 +472,16 @@
463 lea (2*4*80+32)(%rsp), WK_BUF 472 lea (2*4*80+32)(%rsp), WK_BUF
464 473
465 # Precalc WK for first 2 blocks 474 # Precalc WK for first 2 blocks
466 PRECALC_OFFSET = 0 475 ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
467 .set i, 0 476 .set i, 0
468 .rept 160 477 .rept 160
469 PRECALC i 478 PRECALC i
470 .set i, i + 1 479 .set i, i + 1
471 .endr 480 .endr
472 PRECALC_OFFSET = 128 481
482 /* Go to next block if needed */
483 ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
484 ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
473 xchg WK_BUF, PRECALC_BUF 485 xchg WK_BUF, PRECALC_BUF
474 486
475 .align 32 487 .align 32
@@ -479,8 +491,8 @@ _loop:
479 * we use K_BASE value as a signal of a last block, 491 * we use K_BASE value as a signal of a last block,
480 * it is set below by: cmovae BUFFER_PTR, K_BASE 492 * it is set below by: cmovae BUFFER_PTR, K_BASE
481 */ 493 */
482 cmp K_BASE, BUFFER_PTR 494 test BLOCKS_CTR, BLOCKS_CTR
483 jne _begin 495 jnz _begin
484 .align 32 496 .align 32
485 jmp _end 497 jmp _end
486 .align 32 498 .align 32
@@ -512,10 +524,10 @@ _loop0:
512 .set j, j+2 524 .set j, j+2
513 .endr 525 .endr
514 526
515 add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */ 527 /* Update Counter */
516 cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */ 528 sub $1, BLOCKS_CTR
517 cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */ 529 /* Move to the next block only if needed*/
518 530 ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
519 /* 531 /*
520 * rounds 532 * rounds
521 * 60,62,64,66,68 533 * 60,62,64,66,68
@@ -532,8 +544,8 @@ _loop0:
532 UPDATE_HASH 12(HASH_PTR), D 544 UPDATE_HASH 12(HASH_PTR), D
533 UPDATE_HASH 16(HASH_PTR), E 545 UPDATE_HASH 16(HASH_PTR), E
534 546
535 cmp K_BASE, BUFFER_PTR /* is current block the last one? */ 547 test BLOCKS_CTR, BLOCKS_CTR
536 je _loop 548 jz _loop
537 549
538 mov TB, B 550 mov TB, B
539 551
@@ -575,10 +587,10 @@ _loop2:
575 .set j, j+2 587 .set j, j+2
576 .endr 588 .endr
577 589
578 add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */ 590 /* update counter */
579 591 sub $1, BLOCKS_CTR
580 cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */ 592 /* Move to the next block only if needed*/
581 cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */ 593 ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
582 594
583 jmp _loop3 595 jmp _loop3
584_loop3: 596_loop3:
@@ -641,19 +653,12 @@ _loop3:
641 653
642 avx2_zeroupper 654 avx2_zeroupper
643 655
644 lea K_XMM_AR(%rip), K_BASE 656 /* Setup initial values */
645
646 mov CTX, HASH_PTR 657 mov CTX, HASH_PTR
647 mov BUF, BUFFER_PTR 658 mov BUF, BUFFER_PTR
648 lea 64(BUF), BUFFER_PTR2
649
650 shl $6, CNT /* mul by 64 */
651 add BUF, CNT
652 add $64, CNT
653 mov CNT, BUFFER_END
654 659
655 cmp BUFFER_END, BUFFER_PTR2 660 mov BUF, BUFFER_PTR2
656 cmovae K_BASE, BUFFER_PTR2 661 mov CNT, BLOCKS_CTR
657 662
658 xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP 663 xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
659 664
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index a55697d19824..cc0f2f5da19b 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1190,6 +1190,8 @@ ENTRY(nmi)
1190 * other IST entries. 1190 * other IST entries.
1191 */ 1191 */
1192 1192
1193 ASM_CLAC
1194
1193 /* Use %rdx as our temp variable throughout */ 1195 /* Use %rdx as our temp variable throughout */
1194 pushq %rdx 1196 pushq %rdx
1195 1197
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 265c0ed68118..7af017a8958f 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -187,10 +187,10 @@ vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
187$(MODLIB)/vdso: FORCE 187$(MODLIB)/vdso: FORCE
188 @mkdir -p $(MODLIB)/vdso 188 @mkdir -p $(MODLIB)/vdso
189 189
190$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE 190$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso
191 $(call cmd,vdso_install) 191 $(call cmd,vdso_install)
192 192
193PHONY += vdso_install $(vdso_img_insttargets) 193PHONY += vdso_install $(vdso_img_insttargets)
194vdso_install: $(vdso_img_insttargets) FORCE 194vdso_install: $(vdso_img_insttargets)
195 195
196clean-files := vdso32.so vdso32.so.dbg vdso64* vdso-image-*.c vdsox32.so* 196clean-files := vdso32.so vdso32.so.dbg vdso64* vdso-image-*.c vdsox32.so*
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index 08a317a9ae4b..a7508d7e20b7 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -31,8 +31,10 @@ static int __init vdso32_setup(char *s)
31{ 31{
32 vdso32_enabled = simple_strtoul(s, NULL, 0); 32 vdso32_enabled = simple_strtoul(s, NULL, 0);
33 33
34 if (vdso32_enabled > 1) 34 if (vdso32_enabled > 1) {
35 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n"); 35 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
36 vdso32_enabled = 0;
37 }
36 38
37 return 1; 39 return 1;
38} 40}
@@ -63,13 +65,18 @@ subsys_initcall(sysenter_setup);
63/* Register vsyscall32 into the ABI table */ 65/* Register vsyscall32 into the ABI table */
64#include <linux/sysctl.h> 66#include <linux/sysctl.h>
65 67
68static const int zero;
69static const int one = 1;
70
66static struct ctl_table abi_table2[] = { 71static struct ctl_table abi_table2[] = {
67 { 72 {
68 .procname = "vsyscall32", 73 .procname = "vsyscall32",
69 .data = &vdso32_enabled, 74 .data = &vdso32_enabled,
70 .maxlen = sizeof(int), 75 .maxlen = sizeof(int),
71 .mode = 0644, 76 .mode = 0644,
72 .proc_handler = proc_dointvec 77 .proc_handler = proc_dointvec_minmax,
78 .extra1 = (int *)&zero,
79 .extra2 = (int *)&one,
73 }, 80 },
74 {} 81 {}
75}; 82};
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 1514753fd435..bcd3d6199464 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -245,12 +245,13 @@ extern int force_personality32;
245#define CORE_DUMP_USE_REGSET 245#define CORE_DUMP_USE_REGSET
246#define ELF_EXEC_PAGESIZE 4096 246#define ELF_EXEC_PAGESIZE 4096
247 247
248/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 248/*
249 use of this is to invoke "./ld.so someprog" to test out a new version of 249 * This is the base location for PIE (ET_DYN with INTERP) loads. On
250 the loader. We need to make sure that it is out of the way of the program 250 * 64-bit, this is above 4GB to leave the entire 32-bit address
251 that it will "exec", and that there is sufficient room for the brk. */ 251 * space open for things that want to use the area for 32-bit pointers.
252 252 */
253#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) 253#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
254 (TASK_SIZE / 3 * 2))
254 255
255/* This yields a mask that user programs can use to figure out what 256/* This yields a mask that user programs can use to figure out what
256 instruction set this CPU supports. This could be done in user space, 257 instruction set this CPU supports. This could be done in user space,
@@ -278,7 +279,7 @@ struct task_struct;
278 279
279#define ARCH_DLINFO_IA32 \ 280#define ARCH_DLINFO_IA32 \
280do { \ 281do { \
281 if (vdso32_enabled) { \ 282 if (VDSO_CURRENT_BASE) { \
282 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ 283 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
283 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ 284 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
284 } \ 285 } \
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index e9cd7befcb76..19d14ac23ef9 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -221,6 +221,9 @@ struct x86_emulate_ops {
221 void (*get_cpuid)(struct x86_emulate_ctxt *ctxt, 221 void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
222 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); 222 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
223 void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); 223 void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
224
225 unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
226 void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
224}; 227};
225 228
226typedef u32 __attribute__((vector_size(16))) sse128_t; 229typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -290,7 +293,6 @@ struct x86_emulate_ctxt {
290 293
291 /* interruptibility state, as a result of execution of STI or MOV SS */ 294 /* interruptibility state, as a result of execution of STI or MOV SS */
292 int interruptibility; 295 int interruptibility;
293 int emul_flags;
294 296
295 bool perm_ok; /* do not check permissions if true */ 297 bool perm_ok; /* do not check permissions if true */
296 bool ud; /* inject an #UD if host doesn't support insn */ 298 bool ud; /* inject an #UD if host doesn't support insn */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 690b4027e17c..37db36fddc88 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -405,6 +405,8 @@
405#define MSR_IA32_TSC_ADJUST 0x0000003b 405#define MSR_IA32_TSC_ADJUST 0x0000003b
406#define MSR_IA32_BNDCFGS 0x00000d90 406#define MSR_IA32_BNDCFGS 0x00000d90
407 407
408#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
409
408#define MSR_IA32_XSS 0x00000da0 410#define MSR_IA32_XSS 0x00000da0
409 411
410#define FEATURE_CONTROL_LOCKED (1<<0) 412#define FEATURE_CONTROL_LOCKED (1<<0)
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 0b1ff4c1c14e..fffb2794dd89 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -7,6 +7,7 @@
7bool pat_enabled(void); 7bool pat_enabled(void);
8void pat_disable(const char *reason); 8void pat_disable(const char *reason);
9extern void pat_init(void); 9extern void pat_init(void);
10extern void init_cache_modes(void);
10 11
11extern int reserve_memtype(u64 start, u64 end, 12extern int reserve_memtype(u64 start, u64 end,
12 enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); 13 enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index d8ce3ec816ab..6503526d7b24 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -72,8 +72,8 @@ static inline void arch_wmb_pmem(void)
72 * @size: number of bytes to write back 72 * @size: number of bytes to write back
73 * 73 *
74 * Write back a cache range using the CLWB (cache line write back) 74 * Write back a cache range using the CLWB (cache line write back)
75 * instruction. This function requires explicit ordering with an 75 * instruction. Note that @size is internally rounded up to be cache
76 * arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation. 76 * line size aligned.
77 */ 77 */
78static inline void __arch_wb_cache_pmem(void *vaddr, size_t size) 78static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
79{ 79{
@@ -87,15 +87,6 @@ static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
87 clwb(p); 87 clwb(p);
88} 88}
89 89
90/*
91 * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
92 * iterators, so for other types (bvec & kvec) we must do a cache write-back.
93 */
94static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
95{
96 return iter_is_iovec(i) == false;
97}
98
99/** 90/**
100 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM 91 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
101 * @addr: PMEM destination address 92 * @addr: PMEM destination address
@@ -114,8 +105,36 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
114 /* TODO: skip the write-back by always using non-temporal stores */ 105 /* TODO: skip the write-back by always using non-temporal stores */
115 len = copy_from_iter_nocache(vaddr, bytes, i); 106 len = copy_from_iter_nocache(vaddr, bytes, i);
116 107
117 if (__iter_needs_pmem_wb(i)) 108 /*
118 __arch_wb_cache_pmem(vaddr, bytes); 109 * In the iovec case on x86_64 copy_from_iter_nocache() uses
110 * non-temporal stores for the bulk of the transfer, but we need
111 * to manually flush if the transfer is unaligned. A cached
112 * memory copy is used when destination or size is not naturally
113 * aligned. That is:
114 * - Require 8-byte alignment when size is 8 bytes or larger.
115 * - Require 4-byte alignment when size is 4 bytes.
116 *
117 * In the non-iovec case the entire destination needs to be
118 * flushed.
119 */
120 if (iter_is_iovec(i)) {
121 unsigned long flushed, dest = (unsigned long) addr;
122
123 if (bytes < 8) {
124 if (!IS_ALIGNED(dest, 4) || (bytes != 4))
125 __arch_wb_cache_pmem(addr, bytes);
126 } else {
127 if (!IS_ALIGNED(dest, 8)) {
128 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
129 __arch_wb_cache_pmem(addr, 1);
130 }
131
132 flushed = dest - (unsigned long) addr;
133 if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
134 __arch_wb_cache_pmem(addr + bytes - 1, 1);
135 }
136 } else
137 __arch_wb_cache_pmem(addr, bytes);
119 138
120 return len; 139 return len;
121} 140}
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 4c20dd333412..85133b2b8e99 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -43,6 +43,7 @@
43 43
44#include <asm/page.h> 44#include <asm/page.h>
45#include <asm/pgtable.h> 45#include <asm/pgtable.h>
46#include <asm/smap.h>
46 47
47#include <xen/interface/xen.h> 48#include <xen/interface/xen.h>
48#include <xen/interface/sched.h> 49#include <xen/interface/sched.h>
@@ -213,10 +214,12 @@ privcmd_call(unsigned call,
213 __HYPERCALL_DECLS; 214 __HYPERCALL_DECLS;
214 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); 215 __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
215 216
217 stac();
216 asm volatile("call *%[call]" 218 asm volatile("call *%[call]"
217 : __HYPERCALL_5PARAM 219 : __HYPERCALL_5PARAM
218 : [call] "a" (&hypercall_page[call]) 220 : [call] "a" (&hypercall_page[call])
219 : __HYPERCALL_CLOBBER5); 221 : __HYPERCALL_CLOBBER5);
222 clac();
220 223
221 return (long)__res; 224 return (long)__res;
222} 225}
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index e75907601a41..1e5eb9f2ff5f 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -329,6 +329,14 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
329 struct mpc_intsrc mp_irq; 329 struct mpc_intsrc mp_irq;
330 330
331 /* 331 /*
332 * Check bus_irq boundary.
333 */
334 if (bus_irq >= NR_IRQS_LEGACY) {
335 pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
336 return;
337 }
338
339 /*
332 * Convert 'gsi' to 'ioapic.pin'. 340 * Convert 'gsi' to 'ioapic.pin'.
333 */ 341 */
334 ioapic = mp_find_ioapic(gsi); 342 ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index aaacbd667212..fc91c98bee01 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2115,8 +2115,9 @@ static inline void __init check_timer(void)
2115 int idx; 2115 int idx;
2116 idx = find_irq_entry(apic1, pin1, mp_INT); 2116 idx = find_irq_entry(apic1, pin1, mp_INT);
2117 if (idx != -1 && irq_trigger(idx)) 2117 if (idx != -1 && irq_trigger(idx))
2118 unmask_ioapic_irq(irq_get_chip_data(0)); 2118 unmask_ioapic_irq(irq_get_irq_data(0));
2119 } 2119 }
2120 irq_domain_deactivate_irq(irq_data);
2120 irq_domain_activate_irq(irq_data); 2121 irq_domain_activate_irq(irq_data);
2121 if (timer_irq_works()) { 2122 if (timer_irq_works()) {
2122 if (disable_timer_pin_1 > 0) 2123 if (disable_timer_pin_1 > 0)
@@ -2138,6 +2139,7 @@ static inline void __init check_timer(void)
2138 * legacy devices should be connected to IO APIC #0 2139 * legacy devices should be connected to IO APIC #0
2139 */ 2140 */
2140 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2); 2141 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
2142 irq_domain_deactivate_irq(irq_data);
2141 irq_domain_activate_irq(irq_data); 2143 irq_domain_activate_irq(irq_data);
2142 legacy_pic->unmask(0); 2144 legacy_pic->unmask(0);
2143 if (timer_irq_works()) { 2145 if (timer_irq_works()) {
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index e99b15077e94..2116176c1721 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -53,7 +53,7 @@ static const char * const th_names[] = {
53 "load_store", 53 "load_store",
54 "insn_fetch", 54 "insn_fetch",
55 "combined_unit", 55 "combined_unit",
56 "", 56 "decode_unit",
57 "northbridge", 57 "northbridge",
58 "execution_unit", 58 "execution_unit",
59}; 59};
@@ -682,6 +682,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
682 const char *name = th_names[bank]; 682 const char *name = th_names[bank];
683 int err = 0; 683 int err = 0;
684 684
685 if (!dev)
686 return -ENODEV;
687
685 if (is_shared_bank(bank)) { 688 if (is_shared_bank(bank)) {
686 nb = node_to_amd_nb(amd_get_nb_id(cpu)); 689 nb = node_to_amd_nb(amd_get_nb_id(cpu));
687 690
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index cfc4a966e2b9..83b5f7a323a9 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -30,6 +30,7 @@
30#include <asm/apic.h> 30#include <asm/apic.h>
31#include <asm/timer.h> 31#include <asm/timer.h>
32#include <asm/reboot.h> 32#include <asm/reboot.h>
33#include <asm/nmi.h>
33 34
34struct ms_hyperv_info ms_hyperv; 35struct ms_hyperv_info ms_hyperv;
35EXPORT_SYMBOL_GPL(ms_hyperv); 36EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -157,6 +158,26 @@ static unsigned char hv_get_nmi_reason(void)
157 return 0; 158 return 0;
158} 159}
159 160
161#ifdef CONFIG_X86_LOCAL_APIC
162/*
163 * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
164 * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle
165 * unknown NMI on the first CPU which gets it.
166 */
167static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
168{
169 static atomic_t nmi_cpu = ATOMIC_INIT(-1);
170
171 if (!unknown_nmi_panic)
172 return NMI_DONE;
173
174 if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
175 return NMI_HANDLED;
176
177 return NMI_DONE;
178}
179#endif
180
160static void __init ms_hyperv_init_platform(void) 181static void __init ms_hyperv_init_platform(void)
161{ 182{
162 /* 183 /*
@@ -182,6 +203,9 @@ static void __init ms_hyperv_init_platform(void)
182 printk(KERN_INFO "HyperV: LAPIC Timer Frequency: %#x\n", 203 printk(KERN_INFO "HyperV: LAPIC Timer Frequency: %#x\n",
183 lapic_timer_frequency); 204 lapic_timer_frequency);
184 } 205 }
206
207 register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
208 "hv_nmi_unknown");
185#endif 209#endif
186 210
187 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) 211 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 1a8256dd6729..5b2f2306fbcc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1996,8 +1996,8 @@ static int x86_pmu_event_init(struct perf_event *event)
1996 1996
1997static void refresh_pce(void *ignored) 1997static void refresh_pce(void *ignored)
1998{ 1998{
1999 if (current->mm) 1999 if (current->active_mm)
2000 load_mm_cr4(current->mm); 2000 load_mm_cr4(current->active_mm);
2001} 2001}
2002 2002
2003static void x86_pmu_event_mapped(struct perf_event *event) 2003static void x86_pmu_event_mapped(struct perf_event *event)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 659f01e165d5..2cdae69d7e0b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -153,7 +153,7 @@ static void __intel_pmu_lbr_enable(bool pmi)
153 */ 153 */
154 if (cpuc->lbr_sel) 154 if (cpuc->lbr_sel)
155 lbr_select = cpuc->lbr_sel->config; 155 lbr_select = cpuc->lbr_sel->config;
156 if (!pmi) 156 if (!pmi && cpuc->lbr_sel)
157 wrmsrl(MSR_LBR_SELECT, lbr_select); 157 wrmsrl(MSR_LBR_SELECT, lbr_select);
158 158
159 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 159 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
@@ -410,6 +410,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
410 cpuc->lbr_entries[i].to = msr_lastbranch.to; 410 cpuc->lbr_entries[i].to = msr_lastbranch.to;
411 cpuc->lbr_entries[i].mispred = 0; 411 cpuc->lbr_entries[i].mispred = 0;
412 cpuc->lbr_entries[i].predicted = 0; 412 cpuc->lbr_entries[i].predicted = 0;
413 cpuc->lbr_entries[i].in_tx = 0;
414 cpuc->lbr_entries[i].abort = 0;
415 cpuc->lbr_entries[i].cycles = 0;
413 cpuc->lbr_entries[i].reserved = 0; 416 cpuc->lbr_entries[i].reserved = 0;
414 } 417 }
415 cpuc->lbr_stack.nr = i; 418 cpuc->lbr_stack.nr = i;
@@ -429,8 +432,10 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
429 int out = 0; 432 int out = 0;
430 int num = x86_pmu.lbr_nr; 433 int num = x86_pmu.lbr_nr;
431 434
432 if (cpuc->lbr_sel->config & LBR_CALL_STACK) 435 if (cpuc->lbr_sel) {
433 num = tos; 436 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
437 num = tos;
438 }
434 439
435 for (i = 0; i < num; i++) { 440 for (i = 0; i < num; i++) {
436 unsigned long lbr_idx = (tos - i) & mask; 441 unsigned long lbr_idx = (tos - i) & mask;
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index be39b5fde4b9..1011c05b1bd5 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -96,6 +96,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
96 * Boot time FPU feature detection code: 96 * Boot time FPU feature detection code:
97 */ 97 */
98unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 98unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
99EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
99 100
100static void __init fpu__init_system_mxcsr(void) 101static void __init fpu__init_system_mxcsr(void)
101{ 102{
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index eb6bd34582c6..1b96bfe09d42 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -977,6 +977,18 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
977 unsigned long return_hooker = (unsigned long) 977 unsigned long return_hooker = (unsigned long)
978 &return_to_handler; 978 &return_to_handler;
979 979
980 /*
981 * When resuming from suspend-to-ram, this function can be indirectly
982 * called from early CPU startup code while the CPU is in real mode,
983 * which would fail miserably. Make sure the stack pointer is a
984 * virtual address.
985 *
986 * This check isn't as accurate as virt_addr_valid(), but it should be
987 * good enough for this purpose, and it's fast.
988 */
989 if (unlikely((long)__builtin_frame_address(0) >= 0))
990 return;
991
980 if (unlikely(ftrace_graph_is_dead())) 992 if (unlikely(ftrace_graph_is_dead()))
981 return; 993 return;
982 994
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index f129a9af6357..b6b0077da1af 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 */ 5 */
6 6
7#define DISABLE_BRANCH_PROFILING
7#include <linux/init.h> 8#include <linux/init.h>
8#include <linux/linkage.h> 9#include <linux/linkage.h>
9#include <linux/types.h> 10#include <linux/types.h>
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index b8e6ff5cd5d0..acc9b8f19ca8 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -351,6 +351,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
351 } else { 351 } else {
352 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 352 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
353 353
354 irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
354 irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); 355 irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
355 disable_irq(hdev->irq); 356 disable_irq(hdev->irq);
356 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); 357 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index c6ee63f927ab..d688826e5736 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -67,7 +67,7 @@
67#endif 67#endif
68 68
69/* Ensure if the instruction can be boostable */ 69/* Ensure if the instruction can be boostable */
70extern int can_boost(kprobe_opcode_t *instruction); 70extern int can_boost(kprobe_opcode_t *instruction, void *addr);
71/* Recover instruction if given address is probed */ 71/* Recover instruction if given address is probed */
72extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, 72extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
73 unsigned long addr); 73 unsigned long addr);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 023c442c33bb..99d293ea2b49 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -163,12 +163,12 @@ NOKPROBE_SYMBOL(skip_prefixes);
163 * Returns non-zero if opcode is boostable. 163 * Returns non-zero if opcode is boostable.
164 * RIP relative instructions are adjusted at copying time in 64 bits mode 164 * RIP relative instructions are adjusted at copying time in 64 bits mode
165 */ 165 */
166int can_boost(kprobe_opcode_t *opcodes) 166int can_boost(kprobe_opcode_t *opcodes, void *addr)
167{ 167{
168 kprobe_opcode_t opcode; 168 kprobe_opcode_t opcode;
169 kprobe_opcode_t *orig_opcodes = opcodes; 169 kprobe_opcode_t *orig_opcodes = opcodes;
170 170
171 if (search_exception_tables((unsigned long)opcodes)) 171 if (search_exception_tables((unsigned long)addr))
172 return 0; /* Page fault may occur on this address. */ 172 return 0; /* Page fault may occur on this address. */
173 173
174retry: 174retry:
@@ -413,7 +413,7 @@ static int arch_copy_kprobe(struct kprobe *p)
413 * __copy_instruction can modify the displacement of the instruction, 413 * __copy_instruction can modify the displacement of the instruction,
414 * but it doesn't affect boostable check. 414 * but it doesn't affect boostable check.
415 */ 415 */
416 if (can_boost(p->ainsn.insn)) 416 if (can_boost(p->ainsn.insn, p->addr))
417 p->ainsn.boostable = 0; 417 p->ainsn.boostable = 0;
418 else 418 else
419 p->ainsn.boostable = -1; 419 p->ainsn.boostable = -1;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 7b3b9d15c47a..c9d488f3e4cd 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -177,7 +177,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
177 177
178 while (len < RELATIVEJUMP_SIZE) { 178 while (len < RELATIVEJUMP_SIZE) {
179 ret = __copy_instruction(dest + len, src + len); 179 ret = __copy_instruction(dest + len, src + len);
180 if (!ret || !can_boost(dest + len)) 180 if (!ret || !can_boost(dest + len, src + len))
181 return -EINVAL; 181 return -EINVAL;
182 len += ret; 182 len += ret;
183 } 183 }
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 47190bd399e7..32187f8a49b4 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token)
151 if (hlist_unhashed(&n.link)) 151 if (hlist_unhashed(&n.link))
152 break; 152 break;
153 153
154 rcu_irq_exit();
155
154 if (!n.halted) { 156 if (!n.halted) {
155 local_irq_enable(); 157 local_irq_enable();
156 schedule(); 158 schedule();
@@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token)
159 /* 161 /*
160 * We cannot reschedule. So halt. 162 * We cannot reschedule. So halt.
161 */ 163 */
162 rcu_irq_exit();
163 native_safe_halt(); 164 native_safe_halt();
164 rcu_irq_enter();
165 local_irq_disable(); 165 local_irq_disable();
166 } 166 }
167
168 rcu_irq_enter();
167 } 169 }
168 if (!n.halted) 170 if (!n.halted)
169 finish_wait(&n.wq, &wait); 171 finish_wait(&n.wq, &wait);
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 0497f719977d..c055e9a4e547 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -296,7 +296,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
296 296
297 /* were we called with bad_dma_address? */ 297 /* were we called with bad_dma_address? */
298 badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE); 298 badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
299 if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) { 299 if (unlikely(dma_addr < badend)) {
300 WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " 300 WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
301 "address 0x%Lx\n", dma_addr); 301 "address 0x%Lx\n", dma_addr);
302 return; 302 return;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d2bbe343fda7..e67b834279b2 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1048,6 +1048,13 @@ void __init setup_arch(char **cmdline_p)
1048 if (mtrr_trim_uncached_memory(max_pfn)) 1048 if (mtrr_trim_uncached_memory(max_pfn))
1049 max_pfn = e820_end_of_ram_pfn(); 1049 max_pfn = e820_end_of_ram_pfn();
1050 1050
1051 /*
1052 * This call is required when the CPU does not support PAT. If
1053 * mtrr_bp_init() invoked it already via pat_init() the call has no
1054 * effect.
1055 */
1056 init_cache_modes();
1057
1051#ifdef CONFIG_X86_32 1058#ifdef CONFIG_X86_32
1052 /* max_low_pfn get updated here */ 1059 /* max_low_pfn get updated here */
1053 find_low_pfn_range(); 1060 find_low_pfn_range();
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 10e0272d789a..136ad7c1ce7b 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -143,7 +143,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
143 addr = PAGE_ALIGN(addr); 143 addr = PAGE_ALIGN(addr);
144 vma = find_vma(mm, addr); 144 vma = find_vma(mm, addr);
145 if (end - len >= addr && 145 if (end - len >= addr &&
146 (!vma || addr + len <= vma->vm_start)) 146 (!vma || addr + len <= vm_start_gap(vma)))
147 return addr; 147 return addr;
148 } 148 }
149 149
@@ -186,7 +186,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
186 addr = PAGE_ALIGN(addr); 186 addr = PAGE_ALIGN(addr);
187 vma = find_vma(mm, addr); 187 vma = find_vma(mm, addr);
188 if (TASK_SIZE - len >= addr && 188 if (TASK_SIZE - len >= addr &&
189 (!vma || addr + len <= vma->vm_start)) 189 (!vma || addr + len <= vm_start_gap(vma)))
190 return addr; 190 return addr;
191 } 191 }
192 192
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 2e1fd586b895..83d6369c45f5 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -46,11 +46,18 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
46 return ret; 46 return ret;
47} 47}
48 48
49bool kvm_mpx_supported(void)
50{
51 return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
52 && kvm_x86_ops->mpx_supported());
53}
54EXPORT_SYMBOL_GPL(kvm_mpx_supported);
55
49u64 kvm_supported_xcr0(void) 56u64 kvm_supported_xcr0(void)
50{ 57{
51 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; 58 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
52 59
53 if (!kvm_x86_ops->mpx_supported()) 60 if (!kvm_mpx_supported())
54 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); 61 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
55 62
56 return xcr0; 63 return xcr0;
@@ -97,7 +104,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
97 if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) 104 if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
98 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); 105 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
99 106
100 vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); 107 vcpu->arch.eager_fpu = use_eager_fpu();
101 if (vcpu->arch.eager_fpu) 108 if (vcpu->arch.eager_fpu)
102 kvm_x86_ops->fpu_activate(vcpu); 109 kvm_x86_ops->fpu_activate(vcpu);
103 110
@@ -295,7 +302,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
295#endif 302#endif
296 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; 303 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
297 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; 304 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
298 unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0; 305 unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
299 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; 306 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
300 307
301 /* cpuid 1.edx */ 308 /* cpuid 1.edx */
@@ -737,18 +744,20 @@ out:
737static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) 744static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
738{ 745{
739 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; 746 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
740 int j, nent = vcpu->arch.cpuid_nent; 747 struct kvm_cpuid_entry2 *ej;
748 int j = i;
749 int nent = vcpu->arch.cpuid_nent;
741 750
742 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; 751 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
743 /* when no next entry is found, the current entry[i] is reselected */ 752 /* when no next entry is found, the current entry[i] is reselected */
744 for (j = i + 1; ; j = (j + 1) % nent) { 753 do {
745 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; 754 j = (j + 1) % nent;
746 if (ej->function == e->function) { 755 ej = &vcpu->arch.cpuid_entries[j];
747 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; 756 } while (ej->function != e->function);
748 return j; 757
749 } 758 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
750 } 759
751 return 0; /* silence gcc, even though control never reaches here */ 760 return j;
752} 761}
753 762
754/* find an entry with matching function, matching index (if needed), and that 763/* find an entry with matching function, matching index (if needed), and that
@@ -818,12 +827,6 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
818 if (!best) 827 if (!best)
819 best = check_cpuid_limit(vcpu, function, index); 828 best = check_cpuid_limit(vcpu, function, index);
820 829
821 /*
822 * Perfmon not yet supported for L2 guest.
823 */
824 if (is_guest_mode(vcpu) && function == 0xa)
825 best = NULL;
826
827 if (best) { 830 if (best) {
828 *eax = best->eax; 831 *eax = best->eax;
829 *ebx = best->ebx; 832 *ebx = best->ebx;
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 3f5c48ddba45..d1534feefcfe 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -4,6 +4,7 @@
4#include "x86.h" 4#include "x86.h"
5 5
6int kvm_update_cpuid(struct kvm_vcpu *vcpu); 6int kvm_update_cpuid(struct kvm_vcpu *vcpu);
7bool kvm_mpx_supported(void);
7struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 8struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
8 u32 function, u32 index); 9 u32 function, u32 index);
9int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, 10int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
@@ -134,20 +135,20 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
134 return best && (best->ebx & bit(X86_FEATURE_RTM)); 135 return best && (best->ebx & bit(X86_FEATURE_RTM));
135} 136}
136 137
137static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) 138static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
138{ 139{
139 struct kvm_cpuid_entry2 *best; 140 struct kvm_cpuid_entry2 *best;
140 141
141 best = kvm_find_cpuid_entry(vcpu, 7, 0); 142 best = kvm_find_cpuid_entry(vcpu, 7, 0);
142 return best && (best->ebx & bit(X86_FEATURE_MPX)); 143 return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
143} 144}
144 145
145static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu) 146static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
146{ 147{
147 struct kvm_cpuid_entry2 *best; 148 struct kvm_cpuid_entry2 *best;
148 149
149 best = kvm_find_cpuid_entry(vcpu, 7, 0); 150 best = kvm_find_cpuid_entry(vcpu, 7, 0);
150 return best && (best->ebx & bit(X86_FEATURE_PCOMMIT)); 151 return best && (best->ebx & bit(X86_FEATURE_MPX));
151} 152}
152 153
153static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu) 154static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 1dcea225977d..04b2f3cad7ba 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2531,7 +2531,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
2531 u64 smbase; 2531 u64 smbase;
2532 int ret; 2532 int ret;
2533 2533
2534 if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0) 2534 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2535 return emulate_ud(ctxt); 2535 return emulate_ud(ctxt);
2536 2536
2537 /* 2537 /*
@@ -2580,11 +2580,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
2580 return X86EMUL_UNHANDLEABLE; 2580 return X86EMUL_UNHANDLEABLE;
2581 } 2581 }
2582 2582
2583 if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) 2583 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2584 ctxt->ops->set_nmi_mask(ctxt, false); 2584 ctxt->ops->set_nmi_mask(ctxt, false);
2585 2585
2586 ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK; 2586 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2587 ctxt->emul_flags &= ~X86EMUL_SMM_MASK; 2587 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2588 return X86EMUL_CONTINUE; 2588 return X86EMUL_CONTINUE;
2589} 2589}
2590 2590
@@ -5296,6 +5296,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5296 const struct x86_emulate_ops *ops = ctxt->ops; 5296 const struct x86_emulate_ops *ops = ctxt->ops;
5297 int rc = X86EMUL_CONTINUE; 5297 int rc = X86EMUL_CONTINUE;
5298 int saved_dst_type = ctxt->dst.type; 5298 int saved_dst_type = ctxt->dst.type;
5299 unsigned emul_flags;
5299 5300
5300 ctxt->mem_read.pos = 0; 5301 ctxt->mem_read.pos = 0;
5301 5302
@@ -5310,6 +5311,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5310 goto done; 5311 goto done;
5311 } 5312 }
5312 5313
5314 emul_flags = ctxt->ops->get_hflags(ctxt);
5313 if (unlikely(ctxt->d & 5315 if (unlikely(ctxt->d &
5314 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { 5316 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5315 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || 5317 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
@@ -5343,7 +5345,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5343 fetch_possible_mmx_operand(ctxt, &ctxt->dst); 5345 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5344 } 5346 }
5345 5347
5346 if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { 5348 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5347 rc = emulator_check_intercept(ctxt, ctxt->intercept, 5349 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5348 X86_ICPT_PRE_EXCEPT); 5350 X86_ICPT_PRE_EXCEPT);
5349 if (rc != X86EMUL_CONTINUE) 5351 if (rc != X86EMUL_CONTINUE)
@@ -5372,7 +5374,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5372 goto done; 5374 goto done;
5373 } 5375 }
5374 5376
5375 if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { 5377 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5376 rc = emulator_check_intercept(ctxt, ctxt->intercept, 5378 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5377 X86_ICPT_POST_EXCEPT); 5379 X86_ICPT_POST_EXCEPT);
5378 if (rc != X86EMUL_CONTINUE) 5380 if (rc != X86EMUL_CONTINUE)
@@ -5426,7 +5428,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5426 5428
5427special_insn: 5429special_insn:
5428 5430
5429 if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { 5431 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5430 rc = emulator_check_intercept(ctxt, ctxt->intercept, 5432 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5431 X86_ICPT_POST_MEMACCESS); 5433 X86_ICPT_POST_MEMACCESS);
5432 if (rc != X86EMUL_CONTINUE) 5434 if (rc != X86EMUL_CONTINUE)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8eb8a934b531..1049c3c9b877 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3433,12 +3433,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3433 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); 3433 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3434} 3434}
3435 3435
3436static bool can_do_async_pf(struct kvm_vcpu *vcpu) 3436bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
3437{ 3437{
3438 if (unlikely(!lapic_in_kernel(vcpu) || 3438 if (unlikely(!lapic_in_kernel(vcpu) ||
3439 kvm_event_needs_reinjection(vcpu))) 3439 kvm_event_needs_reinjection(vcpu)))
3440 return false; 3440 return false;
3441 3441
3442 if (is_guest_mode(vcpu))
3443 return false;
3444
3442 return kvm_x86_ops->interrupt_allowed(vcpu); 3445 return kvm_x86_ops->interrupt_allowed(vcpu);
3443} 3446}
3444 3447
@@ -3454,7 +3457,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3454 if (!async) 3457 if (!async)
3455 return false; /* *pfn has correct page already */ 3458 return false; /* *pfn has correct page already */
3456 3459
3457 if (!prefault && can_do_async_pf(vcpu)) { 3460 if (!prefault && kvm_can_do_async_pf(vcpu)) {
3458 trace_kvm_try_async_get_page(gva, gfn); 3461 trace_kvm_try_async_get_page(gva, gfn);
3459 if (kvm_find_async_pf_gfn(vcpu, gfn)) { 3462 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3460 trace_kvm_async_pf_doublefault(gva, gfn); 3463 trace_kvm_async_pf_doublefault(gva, gfn);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 55ffb7b0f95e..e60fc80f8a9c 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -74,6 +74,7 @@ enum {
74int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct); 74int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
75void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); 75void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
76void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); 76void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
77bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
77 78
78static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 79static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
79{ 80{
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index ab38af4f4947..23a7c7ba377a 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
294 ((u64)1 << edx.split.bit_width_fixed) - 1; 294 ((u64)1 << edx.split.bit_width_fixed) - 1;
295 } 295 }
296 296
297 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | 297 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
298 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); 298 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
299 pmu->global_ctrl_mask = ~pmu->global_ctrl; 299 pmu->global_ctrl_mask = ~pmu->global_ctrl;
300 300
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bb620df05d0d..b12391119ce8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -863,7 +863,6 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
863static u64 construct_eptp(unsigned long root_hpa); 863static u64 construct_eptp(unsigned long root_hpa);
864static void kvm_cpu_vmxon(u64 addr); 864static void kvm_cpu_vmxon(u64 addr);
865static void kvm_cpu_vmxoff(void); 865static void kvm_cpu_vmxoff(void);
866static bool vmx_mpx_supported(void);
867static bool vmx_xsaves_supported(void); 866static bool vmx_xsaves_supported(void);
868static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu); 867static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
869static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); 868static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
@@ -2264,7 +2263,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
2264 if (!(vmcs12->exception_bitmap & (1u << nr))) 2263 if (!(vmcs12->exception_bitmap & (1u << nr)))
2265 return 0; 2264 return 0;
2266 2265
2267 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, 2266 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
2268 vmcs_read32(VM_EXIT_INTR_INFO), 2267 vmcs_read32(VM_EXIT_INTR_INFO),
2269 vmcs_readl(EXIT_QUALIFICATION)); 2268 vmcs_readl(EXIT_QUALIFICATION));
2270 return 1; 2269 return 1;
@@ -2541,7 +2540,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2541 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 2540 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2542 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; 2541 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2543 2542
2544 if (vmx_mpx_supported()) 2543 if (kvm_mpx_supported())
2545 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; 2544 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2546 2545
2547 /* We support free control of debug control saving. */ 2546 /* We support free control of debug control saving. */
@@ -2562,7 +2561,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2562 VM_ENTRY_LOAD_IA32_PAT; 2561 VM_ENTRY_LOAD_IA32_PAT;
2563 vmx->nested.nested_vmx_entry_ctls_high |= 2562 vmx->nested.nested_vmx_entry_ctls_high |=
2564 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); 2563 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
2565 if (vmx_mpx_supported()) 2564 if (kvm_mpx_supported())
2566 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; 2565 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2567 2566
2568 /* We support free control of debug control loading. */ 2567 /* We support free control of debug control loading. */
@@ -2813,7 +2812,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2813 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); 2812 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
2814 break; 2813 break;
2815 case MSR_IA32_BNDCFGS: 2814 case MSR_IA32_BNDCFGS:
2816 if (!vmx_mpx_supported()) 2815 if (!kvm_mpx_supported() ||
2816 (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
2817 return 1; 2817 return 1;
2818 msr_info->data = vmcs_read64(GUEST_BNDCFGS); 2818 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
2819 break; 2819 break;
@@ -2890,7 +2890,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2890 vmcs_writel(GUEST_SYSENTER_ESP, data); 2890 vmcs_writel(GUEST_SYSENTER_ESP, data);
2891 break; 2891 break;
2892 case MSR_IA32_BNDCFGS: 2892 case MSR_IA32_BNDCFGS:
2893 if (!vmx_mpx_supported()) 2893 if (!kvm_mpx_supported() ||
2894 (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
2895 return 1;
2896 if (is_noncanonical_address(data & PAGE_MASK) ||
2897 (data & MSR_IA32_BNDCFGS_RSVD))
2894 return 1; 2898 return 1;
2895 vmcs_write64(GUEST_BNDCFGS, data); 2899 vmcs_write64(GUEST_BNDCFGS, data);
2896 break; 2900 break;
@@ -3363,7 +3367,7 @@ static void init_vmcs_shadow_fields(void)
3363 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 3367 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
3364 switch (shadow_read_write_fields[i]) { 3368 switch (shadow_read_write_fields[i]) {
3365 case GUEST_BNDCFGS: 3369 case GUEST_BNDCFGS:
3366 if (!vmx_mpx_supported()) 3370 if (!kvm_mpx_supported())
3367 continue; 3371 continue;
3368 break; 3372 break;
3369 default: 3373 default:
@@ -3499,7 +3503,7 @@ static void fix_rmode_seg(int seg, struct kvm_segment *save)
3499 } 3503 }
3500 3504
3501 vmcs_write16(sf->selector, var.selector); 3505 vmcs_write16(sf->selector, var.selector);
3502 vmcs_write32(sf->base, var.base); 3506 vmcs_writel(sf->base, var.base);
3503 vmcs_write32(sf->limit, var.limit); 3507 vmcs_write32(sf->limit, var.limit);
3504 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); 3508 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
3505} 3509}
@@ -4867,6 +4871,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4867 if (vmx_xsaves_supported()) 4871 if (vmx_xsaves_supported())
4868 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); 4872 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
4869 4873
4874 if (enable_pml) {
4875 ASSERT(vmx->pml_pg);
4876 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
4877 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
4878 }
4879
4870 return 0; 4880 return 0;
4871} 4881}
4872 4882
@@ -6247,7 +6257,6 @@ static __init int hardware_setup(void)
6247 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); 6257 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
6248 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); 6258 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
6249 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); 6259 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
6250 vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
6251 6260
6252 memcpy(vmx_msr_bitmap_legacy_x2apic, 6261 memcpy(vmx_msr_bitmap_legacy_x2apic,
6253 vmx_msr_bitmap_legacy, PAGE_SIZE); 6262 vmx_msr_bitmap_legacy, PAGE_SIZE);
@@ -6672,14 +6681,20 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
6672 } 6681 }
6673 6682
6674 page = nested_get_page(vcpu, vmptr); 6683 page = nested_get_page(vcpu, vmptr);
6675 if (page == NULL || 6684 if (page == NULL) {
6676 *(u32 *)kmap(page) != VMCS12_REVISION) {
6677 nested_vmx_failInvalid(vcpu); 6685 nested_vmx_failInvalid(vcpu);
6686 skip_emulated_instruction(vcpu);
6687 return 1;
6688 }
6689 if (*(u32 *)kmap(page) != VMCS12_REVISION) {
6678 kunmap(page); 6690 kunmap(page);
6691 nested_release_page_clean(page);
6692 nested_vmx_failInvalid(vcpu);
6679 skip_emulated_instruction(vcpu); 6693 skip_emulated_instruction(vcpu);
6680 return 1; 6694 return 1;
6681 } 6695 }
6682 kunmap(page); 6696 kunmap(page);
6697 nested_release_page_clean(page);
6683 vmx->nested.vmxon_ptr = vmptr; 6698 vmx->nested.vmxon_ptr = vmptr;
6684 break; 6699 break;
6685 case EXIT_REASON_VMCLEAR: 6700 case EXIT_REASON_VMCLEAR:
@@ -7742,8 +7757,6 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
7742 case EXIT_REASON_TASK_SWITCH: 7757 case EXIT_REASON_TASK_SWITCH:
7743 return true; 7758 return true;
7744 case EXIT_REASON_CPUID: 7759 case EXIT_REASON_CPUID:
7745 if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
7746 return false;
7747 return true; 7760 return true;
7748 case EXIT_REASON_HLT: 7761 case EXIT_REASON_HLT:
7749 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); 7762 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
@@ -7828,6 +7841,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
7828 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 7841 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
7829 case EXIT_REASON_PCOMMIT: 7842 case EXIT_REASON_PCOMMIT:
7830 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_PCOMMIT); 7843 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_PCOMMIT);
7844 case EXIT_REASON_PML_FULL:
7845 /* We don't expose PML support to L1. */
7846 return false;
7831 default: 7847 default:
7832 return true; 7848 return true;
7833 } 7849 }
@@ -7839,22 +7855,6 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
7839 *info2 = vmcs_read32(VM_EXIT_INTR_INFO); 7855 *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
7840} 7856}
7841 7857
7842static int vmx_create_pml_buffer(struct vcpu_vmx *vmx)
7843{
7844 struct page *pml_pg;
7845
7846 pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
7847 if (!pml_pg)
7848 return -ENOMEM;
7849
7850 vmx->pml_pg = pml_pg;
7851
7852 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
7853 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
7854
7855 return 0;
7856}
7857
7858static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) 7858static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
7859{ 7859{
7860 if (vmx->pml_pg) { 7860 if (vmx->pml_pg) {
@@ -7915,7 +7915,7 @@ static void kvm_flush_pml_buffers(struct kvm *kvm)
7915static void vmx_dump_sel(char *name, uint32_t sel) 7915static void vmx_dump_sel(char *name, uint32_t sel)
7916{ 7916{
7917 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", 7917 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
7918 name, vmcs_read32(sel), 7918 name, vmcs_read16(sel),
7919 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), 7919 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
7920 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), 7920 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
7921 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); 7921 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
@@ -8789,14 +8789,26 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
8789 if (err) 8789 if (err)
8790 goto free_vcpu; 8790 goto free_vcpu;
8791 8791
8792 err = -ENOMEM;
8793
8794 /*
8795 * If PML is turned on, failure on enabling PML just results in failure
8796 * of creating the vcpu, therefore we can simplify PML logic (by
8797 * avoiding dealing with cases, such as enabling PML partially on vcpus
8798 * for the guest, etc.
8799 */
8800 if (enable_pml) {
8801 vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
8802 if (!vmx->pml_pg)
8803 goto uninit_vcpu;
8804 }
8805
8792 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); 8806 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
8793 BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) 8807 BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
8794 > PAGE_SIZE); 8808 > PAGE_SIZE);
8795 8809
8796 err = -ENOMEM; 8810 if (!vmx->guest_msrs)
8797 if (!vmx->guest_msrs) { 8811 goto free_pml;
8798 goto uninit_vcpu;
8799 }
8800 8812
8801 vmx->loaded_vmcs = &vmx->vmcs01; 8813 vmx->loaded_vmcs = &vmx->vmcs01;
8802 vmx->loaded_vmcs->vmcs = alloc_vmcs(); 8814 vmx->loaded_vmcs->vmcs = alloc_vmcs();
@@ -8840,18 +8852,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
8840 vmx->nested.current_vmptr = -1ull; 8852 vmx->nested.current_vmptr = -1ull;
8841 vmx->nested.current_vmcs12 = NULL; 8853 vmx->nested.current_vmcs12 = NULL;
8842 8854
8843 /*
8844 * If PML is turned on, failure on enabling PML just results in failure
8845 * of creating the vcpu, therefore we can simplify PML logic (by
8846 * avoiding dealing with cases, such as enabling PML partially on vcpus
8847 * for the guest, etc.
8848 */
8849 if (enable_pml) {
8850 err = vmx_create_pml_buffer(vmx);
8851 if (err)
8852 goto free_vmcs;
8853 }
8854
8855 return &vmx->vcpu; 8855 return &vmx->vcpu;
8856 8856
8857free_vmcs: 8857free_vmcs:
@@ -8859,6 +8859,8 @@ free_vmcs:
8859 free_loaded_vmcs(vmx->loaded_vmcs); 8859 free_loaded_vmcs(vmx->loaded_vmcs);
8860free_msrs: 8860free_msrs:
8861 kfree(vmx->guest_msrs); 8861 kfree(vmx->guest_msrs);
8862free_pml:
8863 vmx_destroy_pml_buffer(vmx);
8862uninit_vcpu: 8864uninit_vcpu:
8863 kvm_vcpu_uninit(&vmx->vcpu); 8865 kvm_vcpu_uninit(&vmx->vcpu);
8864free_vcpu: 8866free_vcpu:
@@ -9761,6 +9763,18 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
9761 9763
9762 } 9764 }
9763 9765
9766 if (enable_pml) {
9767 /*
9768 * Conceptually we want to copy the PML address and index from
9769 * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
9770 * since we always flush the log on each vmexit, this happens
9771 * to be equivalent to simply resetting the fields in vmcs02.
9772 */
9773 ASSERT(vmx->pml_pg);
9774 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
9775 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
9776 }
9777
9764 if (nested_cpu_has_ept(vmcs12)) { 9778 if (nested_cpu_has_ept(vmcs12)) {
9765 kvm_mmu_unload(vcpu); 9779 kvm_mmu_unload(vcpu);
9766 nested_ept_init_mmu_context(vcpu); 9780 nested_ept_init_mmu_context(vcpu);
@@ -10254,7 +10268,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10254 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); 10268 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
10255 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); 10269 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
10256 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); 10270 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
10257 if (vmx_mpx_supported()) 10271 if (kvm_mpx_supported())
10258 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 10272 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
10259 if (nested_cpu_has_xsaves(vmcs12)) 10273 if (nested_cpu_has_xsaves(vmcs12))
10260 vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP); 10274 vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 25a6efcfdf7f..8e526c6fd784 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2960,6 +2960,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2960 | KVM_VCPUEVENT_VALID_SMM)) 2960 | KVM_VCPUEVENT_VALID_SMM))
2961 return -EINVAL; 2961 return -EINVAL;
2962 2962
2963 /* INITs are latched while in SMM */
2964 if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
2965 (events->smi.smm || events->smi.pending) &&
2966 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
2967 return -EINVAL;
2968
2963 process_nmi(vcpu); 2969 process_nmi(vcpu);
2964 vcpu->arch.exception.pending = events->exception.injected; 2970 vcpu->arch.exception.pending = events->exception.injected;
2965 vcpu->arch.exception.nr = events->exception.nr; 2971 vcpu->arch.exception.nr = events->exception.nr;
@@ -3057,6 +3063,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
3057 memcpy(dest, xsave, XSAVE_HDR_OFFSET); 3063 memcpy(dest, xsave, XSAVE_HDR_OFFSET);
3058 3064
3059 /* Set XSTATE_BV */ 3065 /* Set XSTATE_BV */
3066 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
3060 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; 3067 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
3061 3068
3062 /* 3069 /*
@@ -3133,11 +3140,14 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
3133 } 3140 }
3134} 3141}
3135 3142
3143#define XSAVE_MXCSR_OFFSET 24
3144
3136static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 3145static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
3137 struct kvm_xsave *guest_xsave) 3146 struct kvm_xsave *guest_xsave)
3138{ 3147{
3139 u64 xstate_bv = 3148 u64 xstate_bv =
3140 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; 3149 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
3150 u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
3141 3151
3142 if (cpu_has_xsave) { 3152 if (cpu_has_xsave) {
3143 /* 3153 /*
@@ -3145,11 +3155,13 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
3145 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility 3155 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
3146 * with old userspace. 3156 * with old userspace.
3147 */ 3157 */
3148 if (xstate_bv & ~kvm_supported_xcr0()) 3158 if (xstate_bv & ~kvm_supported_xcr0() ||
3159 mxcsr & ~mxcsr_feature_mask)
3149 return -EINVAL; 3160 return -EINVAL;
3150 load_xsave(vcpu, (u8 *)guest_xsave->region); 3161 load_xsave(vcpu, (u8 *)guest_xsave->region);
3151 } else { 3162 } else {
3152 if (xstate_bv & ~XFEATURE_MASK_FPSSE) 3163 if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
3164 mxcsr & ~mxcsr_feature_mask)
3153 return -EINVAL; 3165 return -EINVAL;
3154 memcpy(&vcpu->arch.guest_fpu.state.fxsave, 3166 memcpy(&vcpu->arch.guest_fpu.state.fxsave,
3155 guest_xsave->region, sizeof(struct fxregs_state)); 3167 guest_xsave->region, sizeof(struct fxregs_state));
@@ -4596,16 +4608,20 @@ emul_write:
4596 4608
4597static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) 4609static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
4598{ 4610{
4599 /* TODO: String I/O for in kernel device */ 4611 int r = 0, i;
4600 int r;
4601 4612
4602 if (vcpu->arch.pio.in) 4613 for (i = 0; i < vcpu->arch.pio.count; i++) {
4603 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, 4614 if (vcpu->arch.pio.in)
4604 vcpu->arch.pio.size, pd); 4615 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
4605 else 4616 vcpu->arch.pio.size, pd);
4606 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, 4617 else
4607 vcpu->arch.pio.port, vcpu->arch.pio.size, 4618 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
4608 pd); 4619 vcpu->arch.pio.port, vcpu->arch.pio.size,
4620 pd);
4621 if (r)
4622 break;
4623 pd += vcpu->arch.pio.size;
4624 }
4609 return r; 4625 return r;
4610} 4626}
4611 4627
@@ -4643,6 +4659,8 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
4643 if (vcpu->arch.pio.count) 4659 if (vcpu->arch.pio.count)
4644 goto data_avail; 4660 goto data_avail;
4645 4661
4662 memset(vcpu->arch.pio_data, 0, size * count);
4663
4646 ret = emulator_pio_in_out(vcpu, size, port, val, count, true); 4664 ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
4647 if (ret) { 4665 if (ret) {
4648data_avail: 4666data_avail:
@@ -4826,6 +4844,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
4826 4844
4827 if (var.unusable) { 4845 if (var.unusable) {
4828 memset(desc, 0, sizeof(*desc)); 4846 memset(desc, 0, sizeof(*desc));
4847 if (base3)
4848 *base3 = 0;
4829 return false; 4849 return false;
4830 } 4850 }
4831 4851
@@ -4981,6 +5001,16 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
4981 kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); 5001 kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
4982} 5002}
4983 5003
5004static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
5005{
5006 return emul_to_vcpu(ctxt)->arch.hflags;
5007}
5008
5009static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
5010{
5011 kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
5012}
5013
4984static const struct x86_emulate_ops emulate_ops = { 5014static const struct x86_emulate_ops emulate_ops = {
4985 .read_gpr = emulator_read_gpr, 5015 .read_gpr = emulator_read_gpr,
4986 .write_gpr = emulator_write_gpr, 5016 .write_gpr = emulator_write_gpr,
@@ -5020,6 +5050,8 @@ static const struct x86_emulate_ops emulate_ops = {
5020 .intercept = emulator_intercept, 5050 .intercept = emulator_intercept,
5021 .get_cpuid = emulator_get_cpuid, 5051 .get_cpuid = emulator_get_cpuid,
5022 .set_nmi_mask = emulator_set_nmi_mask, 5052 .set_nmi_mask = emulator_set_nmi_mask,
5053 .get_hflags = emulator_get_hflags,
5054 .set_hflags = emulator_set_hflags,
5023}; 5055};
5024 5056
5025static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) 5057static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -5072,7 +5104,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
5072 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); 5104 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
5073 BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); 5105 BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
5074 BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); 5106 BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
5075 ctxt->emul_flags = vcpu->arch.hflags;
5076 5107
5077 init_decode_cache(ctxt); 5108 init_decode_cache(ctxt);
5078 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 5109 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
@@ -5468,8 +5499,6 @@ restart:
5468 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); 5499 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
5469 toggle_interruptibility(vcpu, ctxt->interruptibility); 5500 toggle_interruptibility(vcpu, ctxt->interruptibility);
5470 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 5501 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5471 if (vcpu->arch.hflags != ctxt->emul_flags)
5472 kvm_set_hflags(vcpu, ctxt->emul_flags);
5473 kvm_rip_write(vcpu, ctxt->eip); 5502 kvm_rip_write(vcpu, ctxt->eip);
5474 if (r == EMULATE_DONE) 5503 if (r == EMULATE_DONE)
5475 kvm_vcpu_check_singlestep(vcpu, rflags, &r); 5504 kvm_vcpu_check_singlestep(vcpu, rflags, &r);
@@ -5956,7 +5985,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5956 5985
5957 kvm_x86_ops->patch_hypercall(vcpu, instruction); 5986 kvm_x86_ops->patch_hypercall(vcpu, instruction);
5958 5987
5959 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 5988 return emulator_write_emulated(ctxt, rip, instruction, 3,
5989 &ctxt->exception);
5960} 5990}
5961 5991
5962static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 5992static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
@@ -6992,6 +7022,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
6992 mp_state->mp_state != KVM_MP_STATE_RUNNABLE) 7022 mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
6993 return -EINVAL; 7023 return -EINVAL;
6994 7024
7025 /* INITs are latched while in SMM */
7026 if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
7027 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
7028 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
7029 return -EINVAL;
7030
6995 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 7031 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
6996 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 7032 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
6997 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); 7033 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
@@ -8221,8 +8257,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
8221 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) 8257 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
8222 return true; 8258 return true;
8223 else 8259 else
8224 return !kvm_event_needs_reinjection(vcpu) && 8260 return kvm_can_do_async_pf(vcpu);
8225 kvm_x86_ops->interrupt_allowed(vcpu);
8226} 8261}
8227 8262
8228void kvm_arch_start_assignment(struct kvm *kvm) 8263void kvm_arch_start_assignment(struct kvm *kvm)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 27f89c79a44b..423644c230e7 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -80,7 +80,7 @@ ENTRY(copy_user_generic_unrolled)
80 movl %edx,%ecx 80 movl %edx,%ecx
81 andl $63,%edx 81 andl $63,%edx
82 shrl $6,%ecx 82 shrl $6,%ecx
83 jz 17f 83 jz .L_copy_short_string
841: movq (%rsi),%r8 841: movq (%rsi),%r8
852: movq 1*8(%rsi),%r9 852: movq 1*8(%rsi),%r9
863: movq 2*8(%rsi),%r10 863: movq 2*8(%rsi),%r10
@@ -101,7 +101,8 @@ ENTRY(copy_user_generic_unrolled)
101 leaq 64(%rdi),%rdi 101 leaq 64(%rdi),%rdi
102 decl %ecx 102 decl %ecx
103 jnz 1b 103 jnz 1b
10417: movl %edx,%ecx 104.L_copy_short_string:
105 movl %edx,%ecx
105 andl $7,%edx 106 andl $7,%edx
106 shrl $3,%ecx 107 shrl $3,%ecx
107 jz 20f 108 jz 20f
@@ -215,6 +216,8 @@ ENDPROC(copy_user_generic_string)
215 */ 216 */
216ENTRY(copy_user_enhanced_fast_string) 217ENTRY(copy_user_enhanced_fast_string)
217 ASM_STAC 218 ASM_STAC
219 cmpl $64,%edx
220 jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
218 movl %edx,%ecx 221 movl %edx,%ecx
2191: rep 2221: rep
220 movsb 223 movsb
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 42982b26e32b..39bdaf3ac44a 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -144,7 +144,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
144 addr = ALIGN(addr, huge_page_size(h)); 144 addr = ALIGN(addr, huge_page_size(h));
145 vma = find_vma(mm, addr); 145 vma = find_vma(mm, addr);
146 if (TASK_SIZE - len >= addr && 146 if (TASK_SIZE - len >= addr &&
147 (!vma || addr + len <= vma->vm_start)) 147 (!vma || addr + len <= vm_start_gap(vma)))
148 return addr; 148 return addr;
149 } 149 }
150 if (mm->get_unmapped_area == arch_get_unmapped_area) 150 if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 493f54172b4a..3aebbd6c6f5f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -628,21 +628,40 @@ void __init init_mem_mapping(void)
628 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 628 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
629 * is valid. The argument is a physical page number. 629 * is valid. The argument is a physical page number.
630 * 630 *
631 * 631 * On x86, access has to be given to the first megabyte of RAM because that
632 * On x86, access has to be given to the first megabyte of ram because that area 632 * area traditionally contains BIOS code and data regions used by X, dosemu,
633 * contains BIOS code and data regions used by X and dosemu and similar apps. 633 * and similar apps. Since they map the entire memory range, the whole range
634 * Access has to be given to non-kernel-ram areas as well, these contain the PCI 634 * must be allowed (for mapping), but any areas that would otherwise be
635 * mmio resources as well as potential bios/acpi data regions. 635 * disallowed are flagged as being "zero filled" instead of rejected.
636 * Access has to be given to non-kernel-ram areas as well, these contain the
637 * PCI mmio resources as well as potential bios/acpi data regions.
636 */ 638 */
637int devmem_is_allowed(unsigned long pagenr) 639int devmem_is_allowed(unsigned long pagenr)
638{ 640{
639 if (pagenr < 256) 641 if (page_is_ram(pagenr)) {
640 return 1; 642 /*
641 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) 643 * For disallowed memory regions in the low 1MB range,
644 * request that the page be shown as all zeros.
645 */
646 if (pagenr < 256)
647 return 2;
648
649 return 0;
650 }
651
652 /*
653 * This must follow RAM test, since System RAM is considered a
654 * restricted resource under CONFIG_STRICT_IOMEM.
655 */
656 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
657 /* Low 1MB bypasses iomem restrictions. */
658 if (pagenr < 256)
659 return 1;
660
642 return 0; 661 return 0;
643 if (!page_is_ram(pagenr)) 662 }
644 return 1; 663
645 return 0; 664 return 1;
646} 665}
647 666
648void free_init_pages(char *what, unsigned long begin, unsigned long end) 667void free_init_pages(char *what, unsigned long begin, unsigned long end)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index d470cf219a2d..4e5ac46adc9d 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,3 +1,4 @@
1#define DISABLE_BRANCH_PROFILING
1#define pr_fmt(fmt) "kasan: " fmt 2#define pr_fmt(fmt) "kasan: " fmt
2#include <linux/bootmem.h> 3#include <linux/bootmem.h>
3#include <linux/kasan.h> 4#include <linux/kasan.h>
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index ef05755a1900..7ed47b1e6f42 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
293 * We were not able to extract an address from the instruction, 293 * We were not able to extract an address from the instruction,
294 * probably because there was something invalid in it. 294 * probably because there was something invalid in it.
295 */ 295 */
296 if (info->si_addr == (void *)-1) { 296 if (info->si_addr == (void __user *)-1) {
297 err = -EINVAL; 297 err = -EINVAL;
298 goto err_out; 298 goto err_out;
299 } 299 }
@@ -525,15 +525,7 @@ int mpx_handle_bd_fault(void)
525 if (!kernel_managing_mpx_tables(current->mm)) 525 if (!kernel_managing_mpx_tables(current->mm))
526 return -EINVAL; 526 return -EINVAL;
527 527
528 if (do_mpx_bt_fault()) { 528 return do_mpx_bt_fault();
529 force_sig(SIGSEGV, current);
530 /*
531 * The force_sig() is essentially "handling" this
532 * exception, so we do not pass up the error
533 * from do_mpx_bt_fault().
534 */
535 }
536 return 0;
537} 529}
538 530
539/* 531/*
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 47b6436e41c2..3686a1db25b2 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -100,5 +100,6 @@ void __init initmem_init(void)
100 printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", 100 printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
101 (ulong) pfn_to_kaddr(highstart_pfn)); 101 (ulong) pfn_to_kaddr(highstart_pfn));
102 102
103 __vmalloc_start_set = true;
103 setup_bootmem_allocator(); 104 setup_bootmem_allocator();
104} 105}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 6ad687d104ca..3f1bb4f93a5a 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -36,14 +36,14 @@
36#undef pr_fmt 36#undef pr_fmt
37#define pr_fmt(fmt) "" fmt 37#define pr_fmt(fmt) "" fmt
38 38
39static bool boot_cpu_done; 39static bool __read_mostly boot_cpu_done;
40 40static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
41static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT); 41static bool __read_mostly pat_initialized;
42static void init_cache_modes(void); 42static bool __read_mostly init_cm_done;
43 43
44void pat_disable(const char *reason) 44void pat_disable(const char *reason)
45{ 45{
46 if (!__pat_enabled) 46 if (pat_disabled)
47 return; 47 return;
48 48
49 if (boot_cpu_done) { 49 if (boot_cpu_done) {
@@ -51,10 +51,8 @@ void pat_disable(const char *reason)
51 return; 51 return;
52 } 52 }
53 53
54 __pat_enabled = 0; 54 pat_disabled = true;
55 pr_info("x86/PAT: %s\n", reason); 55 pr_info("x86/PAT: %s\n", reason);
56
57 init_cache_modes();
58} 56}
59 57
60static int __init nopat(char *str) 58static int __init nopat(char *str)
@@ -66,7 +64,7 @@ early_param("nopat", nopat);
66 64
67bool pat_enabled(void) 65bool pat_enabled(void)
68{ 66{
69 return !!__pat_enabled; 67 return pat_initialized;
70} 68}
71EXPORT_SYMBOL_GPL(pat_enabled); 69EXPORT_SYMBOL_GPL(pat_enabled);
72 70
@@ -204,6 +202,8 @@ static void __init_cache_modes(u64 pat)
204 update_cache_mode_entry(i, cache); 202 update_cache_mode_entry(i, cache);
205 } 203 }
206 pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); 204 pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
205
206 init_cm_done = true;
207} 207}
208 208
209#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) 209#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
@@ -224,6 +224,7 @@ static void pat_bsp_init(u64 pat)
224 } 224 }
225 225
226 wrmsrl(MSR_IA32_CR_PAT, pat); 226 wrmsrl(MSR_IA32_CR_PAT, pat);
227 pat_initialized = true;
227 228
228 __init_cache_modes(pat); 229 __init_cache_modes(pat);
229} 230}
@@ -241,10 +242,9 @@ static void pat_ap_init(u64 pat)
241 wrmsrl(MSR_IA32_CR_PAT, pat); 242 wrmsrl(MSR_IA32_CR_PAT, pat);
242} 243}
243 244
244static void init_cache_modes(void) 245void init_cache_modes(void)
245{ 246{
246 u64 pat = 0; 247 u64 pat = 0;
247 static int init_cm_done;
248 248
249 if (init_cm_done) 249 if (init_cm_done)
250 return; 250 return;
@@ -286,8 +286,6 @@ static void init_cache_modes(void)
286 } 286 }
287 287
288 __init_cache_modes(pat); 288 __init_cache_modes(pat);
289
290 init_cm_done = 1;
291} 289}
292 290
293/** 291/**
@@ -305,10 +303,8 @@ void pat_init(void)
305 u64 pat; 303 u64 pat;
306 struct cpuinfo_x86 *c = &boot_cpu_data; 304 struct cpuinfo_x86 *c = &boot_cpu_data;
307 305
308 if (!pat_enabled()) { 306 if (pat_disabled)
309 init_cache_modes();
310 return; 307 return;
311 }
312 308
313 if ((c->x86_vendor == X86_VENDOR_INTEL) && 309 if ((c->x86_vendor == X86_VENDOR_INTEL) &&
314 (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || 310 (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 5fb6adaaa796..5a760fd66bec 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -134,8 +134,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
134{ 134{
135 struct flush_tlb_info info; 135 struct flush_tlb_info info;
136 136
137 if (end == 0)
138 end = start + PAGE_SIZE;
139 info.flush_mm = mm; 137 info.flush_mm = mm;
140 info.flush_start = start; 138 info.flush_start = start;
141 info.flush_end = end; 139 info.flush_end = end;
@@ -264,7 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
264 } 262 }
265 263
266 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 264 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
267 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); 265 flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
268 266
269 preempt_enable(); 267 preempt_enable();
270} 268}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index c6d6efed392a..7575f0798194 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -231,23 +231,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
231 return 1; 231 return 1;
232 232
233 for_each_pci_msi_entry(msidesc, dev) { 233 for_each_pci_msi_entry(msidesc, dev) {
234 __pci_read_msi_msg(msidesc, &msg); 234 pirq = xen_allocate_pirq_msi(dev, msidesc);
235 pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | 235 if (pirq < 0) {
236 ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); 236 irq = -ENODEV;
237 if (msg.data != XEN_PIRQ_MSI_DATA || 237 goto error;
238 xen_irq_from_pirq(pirq) < 0) {
239 pirq = xen_allocate_pirq_msi(dev, msidesc);
240 if (pirq < 0) {
241 irq = -ENODEV;
242 goto error;
243 }
244 xen_msi_compose_msg(dev, pirq, &msg);
245 __pci_write_msi_msg(msidesc, &msg);
246 dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
247 } else {
248 dev_dbg(&dev->dev,
249 "xen: msi already bound to pirq=%d\n", pirq);
250 } 238 }
239 xen_msi_compose_msg(dev, pirq, &msg);
240 __pci_write_msi_msg(msidesc, &msg);
241 dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
251 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 242 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
252 (type == PCI_CAP_ID_MSI) ? nvec : 1, 243 (type == PCI_CAP_ID_MSI) ? nvec : 1,
253 (type == PCI_CAP_ID_MSIX) ? 244 (type == PCI_CAP_ID_MSIX) ?
diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c
index 1693107a518e..0d17c0aafeb1 100644
--- a/arch/x86/platform/goldfish/goldfish.c
+++ b/arch/x86/platform/goldfish/goldfish.c
@@ -42,10 +42,22 @@ static struct resource goldfish_pdev_bus_resources[] = {
42 } 42 }
43}; 43};
44 44
45static bool goldfish_enable __initdata;
46
47static int __init goldfish_setup(char *str)
48{
49 goldfish_enable = true;
50 return 0;
51}
52__setup("goldfish", goldfish_setup);
53
45static int __init goldfish_init(void) 54static int __init goldfish_init(void)
46{ 55{
56 if (!goldfish_enable)
57 return -ENODEV;
58
47 platform_device_register_simple("goldfish_pdev_bus", -1, 59 platform_device_register_simple("goldfish_pdev_bus", -1,
48 goldfish_pdev_bus_resources, 2); 60 goldfish_pdev_bus_resources, 2);
49 return 0; 61 return 0;
50} 62}
51device_initcall(goldfish_init); 63device_initcall(goldfish_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
index de734134bc8d..40c616495da7 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
@@ -17,7 +17,7 @@
17#include <asm/intel-mid.h> 17#include <asm/intel-mid.h>
18#include <asm/io_apic.h> 18#include <asm/io_apic.h>
19 19
20#define TANGIER_EXT_TIMER0_MSI 15 20#define TANGIER_EXT_TIMER0_MSI 12
21 21
22static struct platform_device wdt_dev = { 22static struct platform_device wdt_dev = {
23 .name = "intel_mid_wdt", 23 .name = "intel_mid_wdt",
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 0c2fae8d929d..73eb7fd4aec4 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -992,11 +992,12 @@ static void emit_relocs(int as_text, int use_real_mode)
992 die("Segment relocations found but --realmode not specified\n"); 992 die("Segment relocations found but --realmode not specified\n");
993 993
994 /* Order the relocations for more efficient processing */ 994 /* Order the relocations for more efficient processing */
995 sort_relocs(&relocs16);
996 sort_relocs(&relocs32); 995 sort_relocs(&relocs32);
997#if ELF_BITS == 64 996#if ELF_BITS == 64
998 sort_relocs(&relocs32neg); 997 sort_relocs(&relocs32neg);
999 sort_relocs(&relocs64); 998 sort_relocs(&relocs64);
999#else
1000 sort_relocs(&relocs16);
1000#endif 1001#endif
1001 1002
1002 /* Print the relocations */ 1003 /* Print the relocations */
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index a629694ee750..e14c43a2d187 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -121,7 +121,7 @@ int poke_user(struct task_struct *child, long addr, long data)
121 else if ((addr >= offsetof(struct user, u_debugreg[0])) && 121 else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
122 (addr <= offsetof(struct user, u_debugreg[7]))) { 122 (addr <= offsetof(struct user, u_debugreg[7]))) {
123 addr -= offsetof(struct user, u_debugreg[0]); 123 addr -= offsetof(struct user, u_debugreg[0]);
124 addr = addr >> 2; 124 addr = addr >> 3;
125 if ((addr == 4) || (addr == 5)) 125 if ((addr == 4) || (addr == 5))
126 return -EIO; 126 return -EIO;
127 child->thread.arch.debugregs[addr] = data; 127 child->thread.arch.debugregs[addr] = data;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 1e56ff583459..63146c378f1e 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2038,7 +2038,8 @@ static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
2038 2038
2039/* 2039/*
2040 * Translate a virtual address to a physical one without relying on mapped 2040 * Translate a virtual address to a physical one without relying on mapped
2041 * page tables. 2041 * page tables. Don't rely on big pages being aligned in (guest) physical
2042 * space!
2042 */ 2043 */
2043static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) 2044static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2044{ 2045{
@@ -2059,7 +2060,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2059 sizeof(pud))); 2060 sizeof(pud)));
2060 if (!pud_present(pud)) 2061 if (!pud_present(pud))
2061 return 0; 2062 return 0;
2062 pa = pud_pfn(pud) << PAGE_SHIFT; 2063 pa = pud_val(pud) & PTE_PFN_MASK;
2063 if (pud_large(pud)) 2064 if (pud_large(pud))
2064 return pa + (vaddr & ~PUD_MASK); 2065 return pa + (vaddr & ~PUD_MASK);
2065 2066
@@ -2067,7 +2068,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2067 sizeof(pmd))); 2068 sizeof(pmd)));
2068 if (!pmd_present(pmd)) 2069 if (!pmd_present(pmd))
2069 return 0; 2070 return 0;
2070 pa = pmd_pfn(pmd) << PAGE_SHIFT; 2071 pa = pmd_val(pmd) & PTE_PFN_MASK;
2071 if (pmd_large(pmd)) 2072 if (pmd_large(pmd))
2072 return pa + (vaddr & ~PMD_MASK); 2073 return pa + (vaddr & ~PMD_MASK);
2073 2074
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index e345891450c3..df8844a1853a 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -713,10 +713,9 @@ static void __init xen_reserve_xen_mfnlist(void)
713 size = PFN_PHYS(xen_start_info->nr_p2m_frames); 713 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
714 } 714 }
715 715
716 if (!xen_is_e820_reserved(start, size)) { 716 memblock_reserve(start, size);
717 memblock_reserve(start, size); 717 if (!xen_is_e820_reserved(start, size))
718 return; 718 return;
719 }
720 719
721#ifdef CONFIG_X86_32 720#ifdef CONFIG_X86_32
722 /* 721 /*
@@ -727,6 +726,7 @@ static void __init xen_reserve_xen_mfnlist(void)
727 BUG(); 726 BUG();
728#else 727#else
729 xen_relocate_p2m(); 728 xen_relocate_p2m();
729 memblock_free(start, size);
730#endif 730#endif
731} 731}
732 732
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 9e2ba5c6e1dd..f42e78de1e10 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -27,6 +27,12 @@ static bool xen_pvspin = true;
27 27
28static void xen_qlock_kick(int cpu) 28static void xen_qlock_kick(int cpu)
29{ 29{
30 int irq = per_cpu(lock_kicker_irq, cpu);
31
32 /* Don't kick if the target's kicker interrupt is not initialized. */
33 if (irq == -1)
34 return;
35
30 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 36 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
31} 37}
32 38
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index f1ba6a092854..8846257d8792 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -343,11 +343,11 @@ static int xen_vcpuop_set_next_event(unsigned long delta,
343 WARN_ON(!clockevent_state_oneshot(evt)); 343 WARN_ON(!clockevent_state_oneshot(evt));
344 344
345 single.timeout_abs_ns = get_abs_timeout(delta); 345 single.timeout_abs_ns = get_abs_timeout(delta);
346 single.flags = VCPU_SSHOTTMR_future; 346 /* Get an event anyway, even if the timeout is already expired */
347 single.flags = 0;
347 348
348 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single); 349 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
349 350 BUG_ON(ret != 0);
350 BUG_ON(ret != 0 && ret != -ETIME);
351 351
352 return ret; 352 return ret;
353} 353}
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index f71f88ea7646..19707db966f1 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
29# define PLATFORM_NR_IRQS 0 29# define PLATFORM_NR_IRQS 0
30#endif 30#endif
31#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS 31#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
32#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) 32#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
33#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
33 34
34#if VARIANT_NR_IRQS == 0 35#if VARIANT_NR_IRQS == 0
35static inline void variant_init_irq(void) { } 36static inline void variant_init_irq(void) { }
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 4ac3d23161cf..441694464b1e 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
34{ 34{
35 int irq = irq_find_mapping(NULL, hwirq); 35 int irq = irq_find_mapping(NULL, hwirq);
36 36
37 if (hwirq >= NR_IRQS) {
38 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
39 __func__, hwirq);
40 }
41
42#ifdef CONFIG_DEBUG_STACKOVERFLOW 37#ifdef CONFIG_DEBUG_STACKOVERFLOW
43 /* Debugging check for stack overflow: is there less than 1KB free? */ 38 /* Debugging check for stack overflow: is there less than 1KB free? */
44 { 39 {
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 9735691f37f1..49ccbd9022f6 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -133,6 +133,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
133 133
134__tagtable(BP_TAG_INITRD, parse_tag_initrd); 134__tagtable(BP_TAG_INITRD, parse_tag_initrd);
135 135
136#endif /* CONFIG_BLK_DEV_INITRD */
137
136#ifdef CONFIG_OF 138#ifdef CONFIG_OF
137 139
138static int __init parse_tag_fdt(const bp_tag_t *tag) 140static int __init parse_tag_fdt(const bp_tag_t *tag)
@@ -145,8 +147,6 @@ __tagtable(BP_TAG_FDT, parse_tag_fdt);
145 147
146#endif /* CONFIG_OF */ 148#endif /* CONFIG_OF */
147 149
148#endif /* CONFIG_BLK_DEV_INITRD */
149
150static int __init parse_tag_cmdline(const bp_tag_t* tag) 150static int __init parse_tag_cmdline(const bp_tag_t* tag)
151{ 151{
152 strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE); 152 strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 83cf49685373..3aaaae18417c 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -87,7 +87,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
87 /* At this point: (!vmm || addr < vmm->vm_end). */ 87 /* At this point: (!vmm || addr < vmm->vm_end). */
88 if (TASK_SIZE - len < addr) 88 if (TASK_SIZE - len < addr)
89 return -ENOMEM; 89 return -ENOMEM;
90 if (!vmm || addr + len <= vmm->vm_start) 90 if (!vmm || addr + len <= vm_start_gap(vmm))
91 return addr; 91 return addr;
92 addr = vmm->vm_end; 92 addr = vmm->vm_end;
93 if (flags & MAP_SHARED) 93 if (flags & MAP_SHARED)
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index dbeea2b440a1..1fda7e20dfcb 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -24,16 +24,18 @@
24 24
25/* Interrupt configuration. */ 25/* Interrupt configuration. */
26 26
27#define PLATFORM_NR_IRQS 10 27#define PLATFORM_NR_IRQS 0
28 28
29/* Default assignment of LX60 devices to external interrupts. */ 29/* Default assignment of LX60 devices to external interrupts. */
30 30
31#ifdef CONFIG_XTENSA_MX 31#ifdef CONFIG_XTENSA_MX
32#define DUART16552_INTNUM XCHAL_EXTINT3_NUM 32#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
33#define OETH_IRQ XCHAL_EXTINT4_NUM 33#define OETH_IRQ XCHAL_EXTINT4_NUM
34#define C67X00_IRQ XCHAL_EXTINT8_NUM
34#else 35#else
35#define DUART16552_INTNUM XCHAL_EXTINT0_NUM 36#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
36#define OETH_IRQ XCHAL_EXTINT1_NUM 37#define OETH_IRQ XCHAL_EXTINT1_NUM
38#define C67X00_IRQ XCHAL_EXTINT5_NUM
37#endif 39#endif
38 40
39/* 41/*
@@ -63,5 +65,5 @@
63 65
64#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) 66#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
65#define C67X00_SIZE 0x10 67#define C67X00_SIZE 0x10
66#define C67X00_IRQ 5 68
67#endif /* __XTENSA_XTAVNET_HARDWARE_H */ 69#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index e9f65f79cf2e..d1e9439fad45 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -209,8 +209,8 @@ static struct resource ethoc_res[] = {
209 .flags = IORESOURCE_MEM, 209 .flags = IORESOURCE_MEM,
210 }, 210 },
211 [2] = { /* IRQ number */ 211 [2] = { /* IRQ number */
212 .start = OETH_IRQ, 212 .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
213 .end = OETH_IRQ, 213 .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
214 .flags = IORESOURCE_IRQ, 214 .flags = IORESOURCE_IRQ,
215 }, 215 },
216}; 216};
@@ -246,8 +246,8 @@ static struct resource c67x00_res[] = {
246 .flags = IORESOURCE_MEM, 246 .flags = IORESOURCE_MEM,
247 }, 247 },
248 [1] = { /* IRQ number */ 248 [1] = { /* IRQ number */
249 .start = C67X00_IRQ, 249 .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
250 .end = C67X00_IRQ, 250 .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
251 .flags = IORESOURCE_IRQ, 251 .flags = IORESOURCE_IRQ,
252 }, 252 },
253}; 253};
@@ -280,7 +280,7 @@ static struct resource serial_resource = {
280static struct plat_serial8250_port serial_platform_data[] = { 280static struct plat_serial8250_port serial_platform_data[] = {
281 [0] = { 281 [0] = {
282 .mapbase = DUART16552_PADDR, 282 .mapbase = DUART16552_PADDR,
283 .irq = DUART16552_INTNUM, 283 .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
284 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | 284 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
285 UPF_IOREMAP, 285 UPF_IOREMAP,
286 .iotype = UPIO_MEM32, 286 .iotype = UPIO_MEM32,