]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - ti-linux-kernel/ti-linux-kernel-next.git/commitdiff
Merged TI feature vigneshr_next into ti-linux-4.19.y-next ti-linux-4.19.y-next-20191029
authorLCPD Auto Merger <lcpd_integration@list.ti.com>
Tue, 29 Oct 2019 07:24:16 +0000 (02:24 -0500)
committerLCPD Auto Merger <lcpd_integration@list.ti.com>
Tue, 29 Oct 2019 07:24:16 +0000 (02:24 -0500)
TI-Feature: vigneshr_next
TI-Tree: git://git.ti.com/~vigneshr/ti-linux-kernel/vigneshr-ti-linux-kernel.git
TI-Branch: ti-linux-4.19.y-for-next

* 'ti-linux-4.19.y-for-next' of git://git.ti.com/~vigneshr/ti-linux-kernel/vigneshr-ti-linux-kernel:
  serial: 8250: 8250_omap: Remove redundant call to omap_8250_rx_dma_flush
  serial: 8250: 8250_omap: Fix DMA teardown sequence during RX timeout

Signed-off-by: LCPD Auto Merger <lcpd_integration@list.ti.com>
2000 files changed:
Documentation/PCI/endpoint/pci-test-ntb.txt [new file with mode: 0644]
Documentation/admin-guide/hw-vuln/spectre.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/arm64/elf_hwcaps.txt
Documentation/atomic_t.txt
Documentation/devicetree/bindings/display/bridge/cdns,mhdp.txt
Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt [new file with mode: 0644]
Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
Documentation/devicetree/bindings/mmc/mmc.txt
Documentation/devicetree/bindings/mmc/sdhci-am654.txt
Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
Documentation/devicetree/bindings/pci/endpoint/pci-epf-bus.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pci/endpoint/pci-epf-ntb.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pci/endpoint/pci-epf.txt [new file with mode: 0644]
Documentation/devicetree/bindings/phy/phy-cadence-torrent.txt [moved from Documentation/devicetree/bindings/phy/phy-cadence-dp.txt with 73% similarity]
Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.txt
Documentation/devicetree/bindings/watchdog/ti,rti-wdt.txt [new file with mode: 0644]
Documentation/filesystems/overlayfs.txt
Documentation/scheduler/sched-pelt.c
Documentation/usb/rio.txt [deleted file]
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/configs/nps_defconfig
arch/arc/configs/vdk_hs38_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/kernel/head.S
arch/arc/kernel/setup.c
arch/arc/kernel/troubleshoot.c
arch/arc/kernel/unwind.c
arch/arc/mm/fault.c
arch/arm/Kconfig
arch/arm/boot/dts/am571x-idk.dts
arch/arm/boot/dts/am572x-idk-common.dtsi
arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
arch/arm/boot/dts/bcm47094-linksys-panamera.dts
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/dra7-ipu-common-early-boot.dtsi [new file with mode: 0644]
arch/arm/boot/dts/dra71-evm.dts
arch/arm/boot/dts/dra72-evm-revc.dts
arch/arm/boot/dts/dra72-evm.dts
arch/arm/boot/dts/dra76-evm.dts
arch/arm/boot/dts/exynos5420-peach-pit.dts
arch/arm/boot/dts/exynos5800-peach-pi.dts
arch/arm/boot/dts/gemini-dlink-dir-685.dts
arch/arm/boot/dts/gemini-dlink-dns-313.dts
arch/arm/boot/dts/imx6ul.dtsi
arch/arm/boot/dts/imx7-colibri.dtsi
arch/arm/boot/dts/imx7d-cl-som-imx7.dts
arch/arm/boot/dts/qcom-ipq4019.dtsi
arch/arm/boot/dts/rk3288-veyron-mickey.dts
arch/arm/boot/dts/rk3288-veyron-minnie.dts
arch/arm/boot/dts/rk3288.dtsi
arch/arm/configs/badge4_defconfig
arch/arm/configs/corgi_defconfig
arch/arm/configs/pxa_defconfig
arch/arm/configs/s3c2410_defconfig
arch/arm/configs/spitz_defconfig
arch/arm/include/asm/dma-mapping.h
arch/arm/kvm/coproc.c
arch/arm/mach-davinci/devices-da8xx.c
arch/arm/mach-davinci/dm355.c
arch/arm/mach-davinci/dm365.c
arch/arm/mach-davinci/dm644x.c
arch/arm/mach-davinci/dm646x.c
arch/arm/mach-davinci/sleep.S
arch/arm/mach-omap1/ams-delta-fiq-handler.S
arch/arm/mach-omap1/ams-delta-fiq.c
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_7xx_data.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/prm3xxx.c
arch/arm/mach-rpc/dma.c
arch/arm/mach-zynq/platsmp.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/fault.c
arch/arm/mm/fault.h
arch/arm/mm/init.c
arch/arm/mm/mmap.c
arch/arm/mm/mmu.c
arch/arm/plat-samsung/watchdog-reset.c
arch/arm/xen/efi.c
arch/arm64/Kconfig
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
arch/arm64/boot/dts/nvidia/tegra210.dtsi
arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/boot/dts/ti/Makefile
arch/arm64/boot/dts/ti/k3-am654-base-board.dts
arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
arch/arm64/boot/dts/ti/k3-j721e-pcie-backplane.dtso [new file with mode: 0644]
arch/arm64/boot/dts/ti/k3-j721e-proc-board-tps65917.dts
arch/arm64/crypto/sha1-ce-glue.c
arch/arm64/crypto/sha2-ce-glue.c
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cmpxchg.h
arch/arm64/include/asm/compat.h
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/ptrace.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/tlbflush.h
arch/arm64/include/uapi/asm/hwcap.h
arch/arm64/include/uapi/asm/ptrace.h
arch/arm64/kernel/acpi.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/image.h
arch/arm64/kernel/process.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/return_address.c
arch/arm64/kernel/ssbd.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/topology.c
arch/arm64/kvm/hyp/sysreg-sr.c
arch/arm64/kvm/regmap.c
arch/arm64/kvm/sys_regs.c
arch/arm64/mm/init.c
arch/arm64/mm/mmap.c
arch/arm64/mm/proc.S
arch/ia64/kernel/module.c
arch/m68k/include/asm/atarihw.h
arch/m68k/include/asm/io_mm.h
arch/m68k/include/asm/macintosh.h
arch/mips/boot/compressed/Makefile
arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
arch/mips/boot/dts/qca/ar9331.dtsi
arch/mips/configs/mtx1_defconfig
arch/mips/configs/rm200_defconfig
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/mach-ath79/ar933x_uart.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/uapi/asm/hwcap.h
arch/mips/jz4740/board-qi_lb60.c
arch/mips/kernel/cacheinfo.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/i8253.c
arch/mips/kernel/proc.c
arch/mips/lantiq/irq.c
arch/mips/loongson64/Platform
arch/mips/loongson64/common/serial.c
arch/mips/mm/mmap.c
arch/mips/mm/tlbex.c
arch/mips/vdso/Makefile
arch/parisc/boot/compressed/vmlinux.lds.S
arch/parisc/kernel/ptrace.c
arch/parisc/mm/ioremap.c
arch/powerpc/boot/xz_config.h
arch/powerpc/include/asm/cacheflush.h
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/futex.h
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/include/asm/kvm_booke.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/mmu_context.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/mce.c
arch/powerpc/kernel/mce_power.c
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/pci_of_scan.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/swsusp_32.S
arch/powerpc/kernel/traps.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_64_vio_hv.c
arch/powerpc/kvm/book3s_emulate.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_hv_tm.c
arch/powerpc/kvm/book3s_hv_tm_builtin.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/book3s_xive.c
arch/powerpc/kvm/bookehv_interrupts.S
arch/powerpc/kvm/emulate_loadstore.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/pkeys.c
arch/powerpc/mm/tlb-radix.c
arch/powerpc/platforms/4xx/uic.c
arch/powerpc/platforms/powermac/sleep.S
arch/powerpc/platforms/powernv/npu-dma.c
arch/powerpc/platforms/powernv/opal-imc.c
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda-tce.c
arch/powerpc/platforms/powernv/pci.h
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/mobility.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/xive/common.c
arch/powerpc/sysdev/xive/native.c
arch/powerpc/xmon/xmon.c
arch/riscv/include/asm/switch_to.h
arch/riscv/kernel/entry.S
arch/riscv/kernel/ftrace.c
arch/s390/crypto/aes_s390.c
arch/s390/hypfs/inode.c
arch/s390/include/asm/facility.h
arch/s390/include/asm/page.h
arch/s390/kernel/process.c
arch/s390/kernel/topology.c
arch/s390/kernel/vmlinux.lds.S
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/net/bpf_jit_comp.c
arch/sh/boards/Kconfig
arch/sh/include/asm/io.h
arch/sh/kernel/hw_breakpoint.c
arch/um/include/asm/mmu_context.h
arch/x86/Makefile
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/misc.h
arch/x86/boot/compressed/pgtable_64.c
arch/x86/boot/string.c
arch/x86/entry/calling.h
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/events/amd/ibs.c
arch/x86/events/amd/uncore.c
arch/x86/events/intel/core.c
arch/x86/events/intel/uncore.h
arch/x86/events/intel/uncore_snbep.c
arch/x86/hyperv/mmu.c
arch/x86/include/asm/apic.h
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_64.h
arch/x86/include/asm/barrier.h
arch/x86/include/asm/bootparam_utils.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/mwait.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/traps.h
arch/x86/include/asm/uaccess.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/bigsmp_32.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/cacheinfo.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpuid-deps.c
arch/x86/kernel/cpu/mkcapflags.sh
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/head64.c
arch/x86/kernel/idt.c
arch/x86/kernel/irq.c
arch/x86/kernel/kvm.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/setup.c
arch/x86/kernel/smp.c
arch/x86/kernel/sysfb_efi.c
arch/x86/kernel/uprobes.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/emulate.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/irq.c
arch/x86/kvm/irq.h
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/mtrr.c
arch/x86/kvm/pmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/lib/cpu.c
arch/x86/lib/delay.c
arch/x86/math-emu/fpu_emu.h
arch/x86/math-emu/reg_constant.c
arch/x86/mm/fault.c
arch/x86/mm/pti.c
arch/x86/power/cpu.c
arch/x86/purgatory/Makefile
arch/x86/purgatory/purgatory.c
arch/x86/purgatory/string.c [deleted file]
arch/x86/xen/efi.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/xen-asm_64.S
arch/xtensa/kernel/setup.c
arch/xtensa/kernel/xtensa_ksyms.c
block/bfq-iosched.c
block/bio-integrity.c
block/blk-cgroup.c
block/blk-core.c
block/blk-flush.c
block/blk-iolatency.c
block/blk-mq-debugfs.c
block/blk-mq-sysfs.c
block/blk-mq.c
block/blk-mq.h
block/blk-rq-qos.c
block/blk-rq-qos.h
block/blk-sysfs.c
block/blk-throttle.c
block/blk-wbt.c
block/blk.h
block/mq-deadline.c
crypto/asymmetric_keys/Kconfig
crypto/chacha20poly1305.c
crypto/ghash-generic.c
crypto/serpent_generic.c
crypto/skcipher.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpi_video.c
drivers/acpi/acpica/acevents.h
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/evgpeblk.c
drivers/acpi/acpica/evxface.c
drivers/acpi/acpica/evxfgpe.c
drivers/acpi/arm64/iort.c
drivers/acpi/blacklist.c
drivers/acpi/cppc_acpi.c
drivers/acpi/custom_method.c
drivers/acpi/pci_irq.c
drivers/acpi/pptt.c
drivers/android/binder.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/libahci_platform.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata-sff.c
drivers/ata/libata-zpodd.c
drivers/atm/Kconfig
drivers/atm/iphase.c
drivers/auxdisplay/panel.c
drivers/base/base.h
drivers/base/cacheinfo.c
drivers/base/core.c
drivers/base/dd.c
drivers/base/firmware_loader/fallback.c
drivers/base/memory.c
drivers/base/regmap/Kconfig
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap-irq.c
drivers/base/regmap/regmap.c
drivers/base/soc.c
drivers/block/drbd/drbd_receiver.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/null_blk_main.c
drivers/block/pktcdvd.c
drivers/block/xen-blkback/xenbus.c
drivers/bluetooth/btqca.c
drivers/bluetooth/btrtl.c
drivers/bluetooth/btrtl.h
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_ath.c
drivers/bluetooth/hci_bcm.c
drivers/bluetooth/hci_bcsp.c
drivers/bluetooth/hci_intel.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_mrvl.c
drivers/bluetooth/hci_qca.c
drivers/bluetooth/hci_uart.h
drivers/bus/hisi_lpc.c
drivers/bus/ti-sysc.c
drivers/char/hpet.c
drivers/char/hw_random/core.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/mem.c
drivers/char/tpm/st33zp24/i2c.c
drivers/char/tpm/st33zp24/spi.c
drivers/char/tpm/st33zp24/st33zp24.h
drivers/char/tpm/tpm-chip.c
drivers/char/tpm/tpm-sysfs.c
drivers/char/tpm/tpm_i2c_infineon.c
drivers/char/tpm/tpm_i2c_nuvoton.c
drivers/clk/actions/owl-common.c
drivers/clk/at91/clk-generated.c
drivers/clk/at91/clk-main.c
drivers/clk/clk-qoriq.c
drivers/clk/clk-s2mps11.c
drivers/clk/qcom/gcc-sdm845.c
drivers/clk/renesas/clk-mstp.c
drivers/clk/renesas/renesas-cpg-mssr.c
drivers/clk/rockchip/clk-mmc-phase.c
drivers/clk/sirf/clk-common.c
drivers/clk/socfpga/clk-periph-s10.c
drivers/clk/sprd/Kconfig
drivers/clk/sprd/common.c
drivers/clk/sprd/pll.c
drivers/clk/sprd/sc9860-clk.c
drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
drivers/clk/tegra/clk-audio-sync.c
drivers/clk/tegra/clk-tegra-audio.c
drivers/clk/tegra/clk-tegra114.c
drivers/clk/tegra/clk-tegra124.c
drivers/clk/tegra/clk-tegra210.c
drivers/clk/tegra/clk-tegra30.c
drivers/clk/tegra/clk.h
drivers/clk/ti/autoidle.c
drivers/clk/ti/clk-dra7-atl.c
drivers/clk/ti/clkctrl.c
drivers/clk/ti/clockdomain.c
drivers/clk/zte/clk-zx296718.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/timer-ti-dm.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/pasemi-cpufreq.c
drivers/crypto/Kconfig
drivers/crypto/amcc/crypto4xx_alg.c
drivers/crypto/amcc/crypto4xx_core.c
drivers/crypto/amcc/crypto4xx_core.h
drivers/crypto/amcc/crypto4xx_trng.c
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamalg_desc.c
drivers/crypto/caam/caamalg_desc.h
drivers/crypto/cavium/zip/zip_main.c
drivers/crypto/ccp/ccp-crypto-aes-galois.c
drivers/crypto/ccp/ccp-dev.c
drivers/crypto/ccp/ccp-dev.h
drivers/crypto/ccp/ccp-ops.c
drivers/crypto/ccree/cc_aead.c
drivers/crypto/ccree/cc_driver.c
drivers/crypto/ccree/cc_fips.c
drivers/crypto/ccree/cc_pm.c
drivers/crypto/ccree/cc_pm.h
drivers/crypto/hisilicon/sec/sec_algs.c
drivers/crypto/inside-secure/safexcel_cipher.c
drivers/crypto/nx/nx-842-powernv.c
drivers/crypto/qat/qat_common/adf_common_drv.h
drivers/crypto/sa2ul.c
drivers/crypto/sa2ul.h
drivers/crypto/talitos.c
drivers/crypto/talitos.h
drivers/devfreq/exynos-bus.c
drivers/devfreq/governor_passive.c
drivers/devfreq/tegra-devfreq.c
drivers/dma-buf/dma-buf.c
drivers/dma-buf/reservation.c
drivers/dma-buf/sw_sync.c
drivers/dma/bcm2835-dma.c
drivers/dma/imx-sdma.c
drivers/dma/iop-adma.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/ste_dma40.c
drivers/dma/stm32-mdma.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/ti/dma-crossbar.c
drivers/dma/ti/edma.c
drivers/dma/ti/k3-navss-udma.c
drivers/dma/ti/k3-udma.c
drivers/dma/ti/omap-dma.c
drivers/edac/altera_edac.c
drivers/edac/amd64_edac.c
drivers/edac/edac_mc.c
drivers/edac/edac_mc_sysfs.c
drivers/edac/edac_module.h
drivers/edac/ghes_edac.c
drivers/edac/pnd2_edac.c
drivers/firmware/Kconfig
drivers/firmware/arm_scmi/driver.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/efi-bgrt.c
drivers/firmware/efi/efi.c
drivers/firmware/google/vpd.c
drivers/firmware/google/vpd_decode.c
drivers/firmware/google/vpd_decode.h
drivers/firmware/iscsi_ibft.c
drivers/firmware/psci_checker.c
drivers/firmware/qcom_scm.c
drivers/fpga/Kconfig
drivers/fpga/altera-ps-spi.c
drivers/fsi/fsi-scom.c
drivers/gpio/gpio-eic-sprd.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/si.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/calcs/Makefile
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/ast/ast_post.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/bridge/Makefile
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/bridge/cdns-mhdp-common.c [deleted file]
drivers/gpu/drm/bridge/cdns-mhdp-j721e.h
drivers/gpu/drm/bridge/cdns-mhdp.c
drivers/gpu/drm/bridge/cdns-mhdp.h
drivers/gpu/drm/bridge/sii902x.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_debugfs_crc.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_edid_load.c
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/drm_vblank.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_vgpu.c
drivers/gpu/drm/i915/intel_cdclk.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/vlv_dsi_pll.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_drv.h
drivers/gpu/drm/meson/meson_plane.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/head.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
drivers/gpu/drm/omapdrm/dss/dss.c
drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
drivers/gpu/drm/omapdrm/omap_plane.c
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/rockchip/Kconfig
drivers/gpu/drm/rockchip/Makefile
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/cdn-dp-core.h
drivers/gpu/drm/rockchip/cdn-dp-reg.c [new file with mode: 0644]
drivers/gpu/drm/rockchip/cdn-dp-reg.h [moved from include/drm/bridge/cdns-mhdp-common.h with 77% similarity]
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/stm/ltdc.c
drivers/gpu/drm/tidss/tidss_dispc6.c
drivers/gpu/drm/tidss/tidss_dispc7.c
drivers/gpu/drm/tidss/tidss_dispc7.h
drivers/gpu/drm/tidss/tidss_drv.c
drivers/gpu/drm/tidss/tidss_kms.c
drivers/gpu/drm/tidss/tidss_kms.h
drivers/gpu/drm/tidss/tidss_plane.c
drivers/gpu/drm/tidss/tidss_scale_coefs.c
drivers/gpu/drm/tidss/tidss_v_crtc.c
drivers/gpu/drm/tidss/tidss_v_plane.c
drivers/gpu/drm/tidss/tidss_wb.c
drivers/gpu/drm/tidss/tidss_wb.h
drivers/gpu/drm/tidss/tidss_wb_m2m.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
drivers/gpu/host1x/bus.c
drivers/gpu/ipu-v3/ipu-ic.c
drivers/hid/hid-a4tech.c
drivers/hid/hid-apple.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-holtek-kbd.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg.c
drivers/hid/hid-lg4ff.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-prodikeys.c
drivers/hid/hid-quirks.c
drivers/hid/hid-sony.c
drivers/hid/hid-tmff.c
drivers/hid/hidraw.c
drivers/hid/usbhid/hiddev.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hid/wacom_wac.h
drivers/hv/channel.c
drivers/hv/hv_kvp.c
drivers/hwmon/acpi_power_meter.c
drivers/hwmon/nct6775.c
drivers/hwmon/nct7802.c
drivers/hwtracing/coresight/coresight-etm4x.c
drivers/hwtracing/intel_th/msu.c
drivers/hwtracing/intel_th/pci.c
drivers/hwtracing/stm/core.c
drivers/i2c/busses/i2c-at91.c
drivers/i2c/busses/i2c-cht-wc.c
drivers/i2c/busses/i2c-designware-slave.c
drivers/i2c/busses/i2c-emev2.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-riic.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/i2c/i2c-core-base.c
drivers/iio/accel/cros_ec_accel_legacy.c
drivers/iio/adc/ad799x.c
drivers/iio/adc/axp288_adc.c
drivers/iio/adc/exynos_adc.c
drivers/iio/adc/hx711.c
drivers/iio/adc/max9611.c
drivers/iio/adc/rcar-gyroadc.c
drivers/iio/adc/stm32-adc-core.c
drivers/iio/adc/stm32-adc-core.h
drivers/iio/adc/stm32-adc.c
drivers/iio/adc/stm32-dfsdm-adc.c
drivers/iio/adc/stm32-dfsdm-core.c
drivers/iio/light/opt3001.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/cq.c
drivers/infiniband/core/device.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/restrack.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/fault.c
drivers/infiniband/hw/hfi1/mad.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/infiniband/sw/rxe/rxe_verbs.h
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/joystick/iforce/iforce-usb.c
drivers/input/misc/da9063_onkey.c
drivers/input/mouse/alps.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/synaptics.c
drivers/input/mouse/trackpoint.h
drivers/input/rmi4/rmi_driver.c
drivers/input/serio/hyperv-keyboard.c
drivers/input/tablet/gtco.c
drivers/input/tablet/kbtab.c
drivers/input/touchscreen/usbtouchscreen.c
drivers/iommu/Makefile
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu.h [new file with mode: 0644]
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_quirks.c [new file with mode: 0644]
drivers/iommu/dma-iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/iommu/omap-iommu-debug.c
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iommu.h
drivers/iommu/omap-iopgtable.h
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-imx-gpcv2.c
drivers/irqchip/irq-meson-gpio.c
drivers/isdn/capi/capi.c
drivers/isdn/hardware/mISDN/hfcsusb.c
drivers/isdn/mISDN/socket.c
drivers/leds/led-triggers.c
drivers/leds/leds-lp5562.c
drivers/lightnvm/pblk-core.c
drivers/mailbox/mailbox.c
drivers/mailbox/qcom-apcs-ipc-mailbox.c
drivers/md/bcache/alloc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/closure.c
drivers/md/bcache/extents.c
drivers/md/bcache/io.c
drivers/md/bcache/journal.c
drivers/md/bcache/super.c
drivers/md/bcache/sysfs.c
drivers/md/bcache/util.h
drivers/md/bcache/writeback.c
drivers/md/dm-cache-target.c
drivers/md/dm-core.h
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-kcopyd.c
drivers/md/dm-mpath.c
drivers/md/dm-raid.c
drivers/md/dm-rq.c
drivers/md/dm-table.c
drivers/md/dm-target.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-verity-target.c
drivers/md/dm-zoned-metadata.c
drivers/md/dm-zoned-reclaim.c
drivers/md/dm-zoned-target.c
drivers/md/dm-zoned.h
drivers/md/dm.c
drivers/md/md.c
drivers/md/md.h
drivers/md/persistent-data/dm-btree.c
drivers/md/persistent-data/dm-space-map-metadata.c
drivers/md/raid0.c
drivers/md/raid0.h
drivers/md/raid1.c
drivers/md/raid5.c
drivers/media/cec/Makefile
drivers/media/cec/cec-adap.c
drivers/media/cec/cec-edid.c [deleted file]
drivers/media/cec/cec-notifier.c
drivers/media/common/videobuf2/videobuf2-core.c
drivers/media/common/videobuf2/videobuf2-dma-sg.c
drivers/media/dvb-core/dvb_frontend.c
drivers/media/dvb-core/dvbdev.c
drivers/media/dvb-frontends/dvb-pll.c
drivers/media/dvb-frontends/tua6100.c
drivers/media/i2c/Kconfig
drivers/media/i2c/Makefile
drivers/media/i2c/adv7511-v4l2.c [moved from drivers/media/i2c/adv7511.c with 99% similarity]
drivers/media/i2c/adv7604.c
drivers/media/i2c/adv7842.c
drivers/media/i2c/ov5640.c
drivers/media/i2c/ov5645.c
drivers/media/i2c/ov7740.c
drivers/media/i2c/ov9650.c
drivers/media/i2c/tc358743.c
drivers/media/i2c/tvp5150.c
drivers/media/media-device.c
drivers/media/pci/saa7134/saa7134-i2c.c
drivers/media/pci/saa7146/hexium_gemini.c
drivers/media/pci/saa7164/saa7164-core.c
drivers/media/platform/coda/coda-bit.c
drivers/media/platform/coda/coda-common.c
drivers/media/platform/davinci/vpss.c
drivers/media/platform/exynos4-is/fimc-is.c
drivers/media/platform/exynos4-is/media-dev.c
drivers/media/platform/fsl-viu.c
drivers/media/platform/marvell-ccic/mcam-core.c
drivers/media/platform/mtk-mdp/mtk_mdp_core.c
drivers/media/platform/omap/omap_vout_vrfb.c
drivers/media/platform/omap3isp/isp.c
drivers/media/platform/omap3isp/ispccdc.c
drivers/media/platform/omap3isp/ispccp2.c
drivers/media/platform/omap3isp/ispcsi2.c
drivers/media/platform/omap3isp/isppreview.c
drivers/media/platform/omap3isp/ispresizer.c
drivers/media/platform/omap3isp/ispstat.c
drivers/media/platform/rcar_fdp1.c
drivers/media/platform/s5p-mfc/s5p_mfc.c
drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
drivers/media/platform/stm32/stm32-dcmi.c
drivers/media/platform/vim2m.c
drivers/media/platform/vimc/vimc-capture.c
drivers/media/platform/vivid/vivid-vid-cap.c
drivers/media/platform/vivid/vivid-vid-common.c
drivers/media/platform/vsp1/vsp1_dl.c
drivers/media/radio/radio-raremono.c
drivers/media/radio/si470x/radio-si470x-usb.c
drivers/media/radio/wl128x/fmdrv_v4l2.c
drivers/media/rc/iguanair.c
drivers/media/rc/imon.c
drivers/media/rc/ir-spi.c
drivers/media/rc/mceusb.c
drivers/media/rc/mtk-cir.c
drivers/media/usb/au0828/au0828-core.c
drivers/media/usb/cpia2/cpia2_usb.c
drivers/media/usb/dvb-usb/dib0700_devices.c
drivers/media/usb/dvb-usb/dvb-usb-init.c
drivers/media/usb/dvb-usb/pctv452e.c
drivers/media/usb/dvb-usb/technisat-usb2.c
drivers/media/usb/em28xx/em28xx-cards.c
drivers/media/usb/gspca/konica.c
drivers/media/usb/gspca/nw80x.c
drivers/media/usb/gspca/ov519.c
drivers/media/usb/gspca/ov534.c
drivers/media/usb/gspca/ov534_9.c
drivers/media/usb/gspca/se401.c
drivers/media/usb/gspca/sn9c20x.c
drivers/media/usb/gspca/sonixb.c
drivers/media/usb/gspca/sonixj.c
drivers/media/usb/gspca/spca1528.c
drivers/media/usb/gspca/sq930x.c
drivers/media/usb/gspca/sunplus.c
drivers/media/usb/gspca/vc032x.c
drivers/media/usb/gspca/w996Xcf.c
drivers/media/usb/hdpvr/hdpvr-core.c
drivers/media/usb/hdpvr/hdpvr-video.c
drivers/media/usb/pvrusb2/pvrusb2-hdw.c
drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
drivers/media/usb/pvrusb2/pvrusb2-std.c
drivers/media/usb/stkwebcam/stk-webcam.c
drivers/media/usb/tm6000/tm6000-dvb.c
drivers/media/usb/ttusb-dec/ttusb_dec.c
drivers/media/usb/uvc/uvc_ctrl.c
drivers/media/v4l2-core/v4l2-ctrls.c
drivers/media/v4l2-core/v4l2-dv-timings.c
drivers/memstick/core/memstick.c
drivers/memstick/host/jmb38x_ms.c
drivers/mfd/Kconfig
drivers/mfd/arizona-core.c
drivers/mfd/hi655x-pmic.c
drivers/mfd/intel-lpss-pci.c
drivers/mfd/madera-core.c
drivers/mfd/mfd-core.c
drivers/misc/eeprom/at24.c
drivers/misc/mei/bus-fixup.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/hw-me.c
drivers/misc/mei/hw-me.h
drivers/misc/mei/mei_dev.h
drivers/misc/mei/pci-me.c
drivers/misc/vmw_vmci/vmci_doorbell.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio_irq.c
drivers/mmc/host/cavium.c
drivers/mmc/host/cqhci.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/meson-mx-sdio.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-of-arasan.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sdhci-omap.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci-pci-o2micro.c
drivers/mmc/host/sdhci-pci.h
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci_am654.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/nand/raw/mtk_nand.c
drivers/mtd/nand/raw/nand_micron.c
drivers/mtd/nand/spi/core.c
drivers/net/arcnet/arcnet.c
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_hsi.c
drivers/net/can/dev.c
drivers/net/can/rcar/rcar_canfd.c
drivers/net/can/sja1000/peak_pcmcia.c
drivers/net/can/spi/mcp251x.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/qca8k.c
drivers/net/dsa/rtl8366.c
drivers/net/dsa/rtl8366rb.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cavium/common/cavium_ptp.c
drivers/net/ethernet/cavium/liquidio/request_manager.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hns3/hnae3.c
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns_mdio.c
drivers/net/ethernet/i825xx/lasi_82596.c
drivers/net/ethernet/i825xx/lib82596.c
drivers/net/ethernet/i825xx/sni_82596.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/seeq/sgiseeq.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ethernet/ti/am65-cpts.c
drivers/net/ethernet/ti/am65-cpts.h
drivers/net/ethernet/ti/icssg_prueth.c
drivers/net/ethernet/ti/icssg_prueth.h
drivers/net/ethernet/ti/prueth.c
drivers/net/ethernet/toshiba/tc35815.c
drivers/net/ethernet/tundra/tsi108_eth.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/gtp.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ieee802154/atusb.c
drivers/net/ieee802154/ca8210.c
drivers/net/ieee802154/mac802154_hwsim.c
drivers/net/macsec.c
drivers/net/phy/dp83867.c
drivers/net/phy/national.c
drivers/net/phy/phy_device.c
drivers/net/phy/phy_led_triggers.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp.c
drivers/net/ppp/ppp_generic.c
drivers/net/ppp/ppp_mppe.c
drivers/net/ppp/pppoe.c
drivers/net/ppp/pppox.c
drivers/net/ppp/pptp.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/asix_devices.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/cx82310_eth.c
drivers/net/usb/hso.c
drivers/net/usb/kalmia.c
drivers/net/usb/lan78xx.c
drivers/net/usb/pegasus.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/usbnet.c
drivers/net/vrf.c
drivers/net/wimax/i2400m/fw.c
drivers/net/wireless/ath/ath10k/hw.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/sdio.c
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/usb.c
drivers/net/wireless/ath/ath6kl/wmi.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/dfs_pattern_detector.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/fw/smem.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.h
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/libertas/if_usb.c
drivers/net/wireless/marvell/mwifiex/ie.c
drivers/net/wireless/marvell/mwifiex/main.h
drivers/net/wireless/marvell/mwifiex/scan.c
drivers/net/wireless/marvell/mwifiex/uap_cmd.c
drivers/net/wireless/mediatek/mt76/mt76x0/init.c
drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
drivers/net/wireless/mediatek/mt7601u/dma.c
drivers/net/wireless/mediatek/mt7601u/tx.c
drivers/net/wireless/realtek/rtlwifi/usb.c
drivers/net/wireless/rsi/rsi_91x_usb.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/nfc/nfcmrvl/main.c
drivers/nfc/nfcmrvl/uart.c
drivers/nfc/nfcmrvl/usb.c
drivers/nfc/st-nci/se.c
drivers/nfc/st21nfca/se.c
drivers/ntb/hw/Kconfig
drivers/ntb/hw/Makefile
drivers/ntb/hw/epf/Kconfig [new file with mode: 0644]
drivers/ntb/hw/epf/Makefile [new file with mode: 0644]
drivers/ntb/hw/epf/ntb_hw_epf.c [new file with mode: 0644]
drivers/ntb/test/ntb_perf.c
drivers/ntb/test/ntb_tool.c
drivers/nvdimm/bus.c
drivers/nvdimm/dax_devs.c
drivers/nvdimm/nd-core.h
drivers/nvdimm/pfn.h
drivers/nvdimm/pfn_devs.c
drivers/nvdimm/region.c
drivers/nvdimm/region_devs.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/loop.c
drivers/nvmem/core.c
drivers/parisc/dino.c
drivers/pci/controller/dwc/pci-dra7xx.c
drivers/pci/controller/dwc/pci-exynos.c
drivers/pci/controller/dwc/pci-imx6.c
drivers/pci/controller/dwc/pci-keystone.c
drivers/pci/controller/dwc/pcie-designware-host.c
drivers/pci/controller/dwc/pcie-histb.c
drivers/pci/controller/dwc/pcie-kirin.c
drivers/pci/controller/dwc/pcie-qcom.c
drivers/pci/controller/pci-hyperv.c
drivers/pci/controller/pci-tegra.c
drivers/pci/controller/pcie-cadence-ep.c
drivers/pci/controller/pcie-mobiveil.c
drivers/pci/controller/pcie-rockchip-host.c
drivers/pci/controller/pcie-xilinx-nwl.c
drivers/pci/controller/vmd.c
drivers/pci/endpoint/Makefile
drivers/pci/endpoint/functions/Kconfig
drivers/pci/endpoint/functions/Makefile
drivers/pci/endpoint/functions/pci-epf-ntb.c [new file with mode: 0644]
drivers/pci/endpoint/functions/pci-epf-test.c
drivers/pci/endpoint/pci-ep-cfs.c
drivers/pci/endpoint/pci-epc-core.c
drivers/pci/endpoint/pci-epf-bus.c [new file with mode: 0644]
drivers/pci/endpoint/pci-epf-core.c
drivers/pci/hotplug/rpaphp_core.c
drivers/pci/pci-driver.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.c
drivers/pci/quirks.c
drivers/perf/arm_pmu.c
drivers/phy/cadence/Kconfig
drivers/phy/cadence/Makefile
drivers/phy/cadence/phy-cadence-torrent.c [moved from drivers/phy/cadence/phy-cadence-dp.c with 80% similarity]
drivers/phy/renesas/phy-rcar-gen2.c
drivers/phy/renesas/phy-rcar-gen3-usb2.c
drivers/phy/ti/phy-j721e-wiz.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/mediatek/mtk-eint.c
drivers/pinctrl/meson/pinctrl-meson-gxbb.c
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-mcp23s08.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/sprd/pinctrl-sprd.c
drivers/pinctrl/tegra/pinctrl-tegra.c
drivers/platform/x86/intel_pmc_core.c
drivers/platform/x86/pmc_atom.c
drivers/power/supply/power_supply_sysfs.c
drivers/power/supply/sbs-battery.c
drivers/pps/pps.c
drivers/pwm/pwm-pru.c
drivers/pwm/pwm-stm32-lp.c
drivers/rapidio/devices/rio_mport_cdev.c
drivers/ras/cec.c
drivers/regulator/core.c
drivers/regulator/lm363x-regulator.c
drivers/regulator/s2mps11.c
drivers/remoteproc/omap_remoteproc.c
drivers/remoteproc/qcom_q6v5.c
drivers/remoteproc/qcom_q6v5_pil.c
drivers/remoteproc/remoteproc_core.c
drivers/remoteproc/ti_k3_dsp_remoteproc.c
drivers/remoteproc/ti_k3_r5_remoteproc.c
drivers/rpmsg-kdrv/rpmsg_kdrv_switch.c
drivers/rtc/rtc-pcf85363.c
drivers/rtc/rtc-snvs.c
drivers/s390/block/dasd_alias.c
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/css.c
drivers/s390/cio/qdio_main.c
drivers/s390/cio/qdio_setup.c
drivers/s390/cio/qdio_thinint.c
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/zcrypt_cex2a.c
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/crypto/zcrypt_pcixcc.c
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/virtio/virtio_ccw.c
drivers/scsi/NCR5380.c
drivers/scsi/NCR5380.h
drivers/scsi/ch.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/mac_scsi.c
drivers/scsi/megaraid.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_logging.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/ufs/ufshcd.c
drivers/soc/ti/pruss.c
drivers/soundwire/Kconfig
drivers/soundwire/Makefile
drivers/soundwire/cadence_master.c
drivers/soundwire/intel.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-bcm2835aux.c
drivers/spi/spi-gpio.c
drivers/staging/android/ion/ion_page_pool.c
drivers/staging/comedi/drivers/dt3000.c
drivers/staging/erofs/dir.c
drivers/staging/erofs/unzip_vle.c
drivers/staging/fbtft/fbtft-core.c
drivers/staging/gasket/apex_driver.c
drivers/staging/media/davinci_vpfe/vpfe_video.c
drivers/staging/media/imx/imx6-mipi-csi2.c
drivers/staging/vt6655/device_main.c
drivers/staging/vt6656/main_usb.c
drivers/staging/wilc1000/linux_wlan.c
drivers/staging/wlan-ng/cfg80211.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/target_core_iblock.c
drivers/target/target_core_iblock.h
drivers/target/target_core_user.c
drivers/thermal/thermal_core.c
drivers/thermal/thermal_hwmon.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/cpm_uart/cpm_uart_core.c
drivers/tty/serial/digicolor-usart.c
drivers/tty/serial/imx.c
drivers/tty/serial/max310x.c
drivers/tty/serial/msm_serial.c
drivers/tty/serial/pru_suart.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/serial_mctrl_gpio.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/sprd_serial.c
drivers/tty/serial/uartlite.c
drivers/tty/tty_ldsem.c
drivers/usb/cdns3/gadget.c
drivers/usb/chipidea/udc.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-wdm.c
drivers/usb/class/usblp.c
drivers/usb/core/config.c
drivers/usb/core/devio.c
drivers/usb/core/file.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/hub.c
drivers/usb/core/message.c
drivers/usb/dwc2/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_mass_storage.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/lpc32xx_udc.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/fotg210-hcd.c
drivers/usb/host/hwa-hc.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/xhci-rcar.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci.c
drivers/usb/image/microtek.c
drivers/usb/misc/Kconfig
drivers/usb/misc/Makefile
drivers/usb/misc/adutux.c
drivers/usb/misc/chaoskey.c
drivers/usb/misc/iowarrior.c
drivers/usb/misc/ldusb.c
drivers/usb/misc/legousbtower.c
drivers/usb/misc/rio500.c [deleted file]
drivers/usb/misc/rio500_usb.h [deleted file]
drivers/usb/misc/usblcd.c
drivers/usb/misc/yurex.c
drivers/usb/renesas_usbhs/common.h
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/fifo.h
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/renesas_usbhs/pipe.c
drivers/usb/renesas_usbhs/pipe.h
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/keyspan.c
drivers/usb/serial/option.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/usb-serial.c
drivers/usb/storage/realtek_cr.c
drivers/usb/storage/unusual_devs.h
drivers/usb/typec/tcpm.c
drivers/usb/usb-skeleton.c
drivers/vfio/pci/vfio_pci.c
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/vhost/test.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/vhost/vsock.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/ssd1307fb.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/aspeed_wdt.c
drivers/watchdog/bcm2835_wdt.c
drivers/watchdog/imx2_wdt.c
drivers/watchdog/rti_wdt.c [new file with mode: 0644]
drivers/watchdog/watchdog_dev.c
drivers/xen/balloon.c
drivers/xen/events/events_base.c
drivers/xen/evtchn.c
drivers/xen/pci.c
drivers/xen/swiotlb-xen.c
drivers/xen/xen-pciback/conf_space_capability.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/9p/cache.c
fs/9p/vfs_addr.c
fs/9p/vfs_file.c
fs/adfs/super.c
fs/afs/callback.c
fs/afs/cell.c
fs/afs/cmservice.c
fs/afs/dir.c
fs/afs/file.c
fs/afs/internal.h
fs/afs/vlclient.c
fs/afs/volume.c
fs/binfmt_elf.c
fs/btrfs/backref.c
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/props.c
fs/btrfs/qgroup.c
fs/btrfs/ref-verify.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/inode.c
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/ceph/snap.c
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/cifs_fs_sb.h
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/smb1ops.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smbdirect.c
fs/cifs/smbdirect.h
fs/cifs/transport.c
fs/cifs/xattr.c
fs/coda/file.c
fs/coda/psdev.c
fs/compat_ioctl.c
fs/crypto/crypto.c
fs/dax.c
fs/dlm/lowcomms.c
fs/ecryptfs/crypto.c
fs/exec.c
fs/ext4/block_validity.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/ext4_jbd2.h
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/move_extent.c
fs/ext4/namei.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/gc.c
fs/f2fs/inline.c
fs/f2fs/inode.c
fs/f2fs/node.c
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/xattr.c
fs/fat/dir.c
fs/fat/fatent.c
fs/fs-writeback.c
fs/fuse/cuse.c
fs/fuse/dev.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/bmap.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/libfs.c
fs/nfs/client.c
fs/nfs/delegation.c
fs/nfs/delegation.h
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/fscache.c
fs/nfs/fscache.h
fs/nfs/inode.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4client.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/proc.c
fs/nfs/read.c
fs/nfs/super.c
fs/nfs/write.c
fs/notify/fanotify/fanotify.c
fs/notify/inotify/inotify_fsnotify.c
fs/ocfs2/dlm/dlmunlock.c
fs/ocfs2/journal.c
fs/ocfs2/localalloc.c
fs/ocfs2/xattr.c
fs/open.c
fs/overlayfs/export.c
fs/overlayfs/inode.c
fs/overlayfs/ovl_entry.h
fs/overlayfs/super.c
fs/proc/base.c
fs/proc/page.c
fs/proc/proc_sysctl.c
fs/proc/task_mmu.c
fs/proc/task_nommu.c
fs/pstore/inode.c
fs/pstore/ram.c
fs/read_write.c
fs/seq_file.c
fs/statfs.c
fs/ubifs/tnc.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_attr.h [moved from fs/xfs/xfs_attr.h with 98% similarity]
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_defer.c
fs/xfs/libxfs/xfs_ialloc_btree.c
fs/xfs/xfs_attr_list.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_bmap_util.h
fs/xfs/xfs_dquot.c
fs/xfs/xfs_file.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_reflink.c
fs/xfs/xfs_super.c
fs/xfs/xfs_xattr.c
include/acpi/actbl2.h
include/asm-generic/bug.h
include/asm-generic/getorder.h
include/clocksource/timer-ti-dm.h
include/drm/drm_device.h
include/drm/drm_displayid.h
include/drm/drm_vblank.h
include/drm/i915_pciids.h
include/dt-bindings/phy/phy.h
include/kvm/arm_vgic.h
include/linux/acpi.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/bug.h
include/linux/ccp.h
include/linux/ceph/buffer.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/coda.h
include/linux/coda_psdev.h
include/linux/cpuhotplug.h
include/linux/cred.h
include/linux/device-mapper.h
include/linux/device.h
include/linux/dma-mapping.h
include/linux/fs.h
include/linux/gpio.h
include/linux/gpio/consumer.h
include/linux/host1x.h
include/linux/hwmon.h
include/linux/ieee80211.h
include/linux/if_pppox.h
include/linux/iova.h
include/linux/jbd2.h
include/linux/kernel.h
include/linux/kvm_host.h
include/linux/logic_pio.h
include/linux/mlx5/fs.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mmc/host.h
include/linux/nfs_page.h
include/linux/nfs_xdr.h
include/linux/pci-epc.h
include/linux/pci-epf.h
include/linux/pci_ids.h
include/linux/perf_event.h
include/linux/platform_data/dmtimer-omap.h
include/linux/platform_data/iommu-omap.h
include/linux/platform_data/remoteproc-omap.h
include/linux/pruss.h
include/linux/pruss_driver.h
include/linux/quotaops.h
include/linux/rcupdate.h
include/linux/remoteproc.h
include/linux/rpmsg-remotedev/rpmsg-remotedev.h
include/linux/sched.h
include/linux/sched/mm.h
include/linux/sched/numa_balancing.h
include/linux/watchdog.h
include/media/cec.h
include/media/v4l2-dv-timings.h
include/net/act_api.h
include/net/cfg80211.h
include/net/dst.h
include/net/ip_vs.h
include/net/netfilter/nf_tables.h
include/net/psample.h
include/net/sock_reuseport.h
include/net/tcp.h
include/net/tls.h
include/rdma/ib_verbs.h
include/scsi/libfcoe.h
include/scsi/scsi_dbg.h
include/scsi/scsi_eh.h
include/sound/compress_driver.h
include/sound/soc-dapm.h
include/trace/events/btrfs.h
include/trace/events/rxrpc.h
include/uapi/linux/bpf.h
include/uapi/linux/coda_psdev.h
include/uapi/linux/isdn/capicmd.h
include/uapi/linux/keyctl.h
include/uapi/linux/netfilter/xt_nfacct.h
include/uapi/linux/nilfs2_ondisk.h
include/uapi/linux/nl80211.h
include/xen/events.h
init/initramfs.c
ipc/mqueue.c
kernel/bpf/Makefile
kernel/bpf/syscall.c
kernel/cgroup/cgroup.c
kernel/cpu.c
kernel/cred.c
kernel/dma/coherent.c
kernel/elfcore.c
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/exit.c
kernel/fork.c
kernel/irq/autoprobe.c
kernel/irq/chip.c
kernel/irq/cpuhotplug.c
kernel/irq/internals.h
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/irq/resend.c
kernel/kallsyms.c
kernel/kexec_core.c
kernel/kprobes.c
kernel/livepatch/core.c
kernel/locking/lockdep.c
kernel/locking/lockdep_proc.c
kernel/locking/qspinlock_paravirt.h
kernel/module.c
kernel/padata.c
kernel/panic.c
kernel/pid_namespace.c
kernel/printk/printk.c
kernel/resource.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/membarrier.c
kernel/sched/sched-pelt.h
kernel/time/alarmtimer.c
kernel/time/ntp.c
kernel/time/posix-cpu-timers.c
kernel/time/tick-broadcast-hrtimer.c
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/time/timer_list.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_hwlat.c
lib/Kconfig.debug
lib/logic_pio.c
lib/reed_solomon/decode_rs.c
lib/scatterlist.c
lib/test_firmware.c
lib/test_overflow.c
lib/test_string.c
lib/textsearch.c
mm/cma.c
mm/compaction.c
mm/filemap.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/kmemleak.c
mm/memcontrol.c
mm/memfd.c
mm/memory-failure.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/mmu_notifier.c
mm/nommu.c
mm/oom_kill.c
mm/page_owner.c
mm/rmap.c
mm/slub.c
mm/swap.c
mm/usercopy.c
mm/vmalloc.c
mm/vmpressure.c
mm/vmscan.c
mm/zsmalloc.c
net/9p/client.c
net/9p/trans_virtio.c
net/9p/trans_xen.c
net/appletalk/ddp.c
net/ax25/af_ax25.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v_ogm.c
net/batman-adv/hard-interface.c
net/batman-adv/netlink.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/l2cap_core.c
net/bluetooth/smp.c
net/bridge/br_input.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_stp_bpdu.c
net/bridge/br_vlan.c
net/bridge/netfilter/ebtables.c
net/can/gw.c
net/ceph/osd_client.c
net/core/dev.c
net/core/filter.c
net/core/neighbour.c
net/core/netpoll.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_reuseport.c
net/core/stream.c
net/dsa/switch.c
net/ieee802154/socket.c
net/ipv4/datagram.c
net/ipv4/devinet.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ipip.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/raw_diag.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/ip6_tunnel.c
net/ipv6/mcast.c
net/ipv6/netfilter/ip6t_rpfilter.c
net/ipv6/ping.c
net/ipv6/route.c
net/ipv6/udp.c
net/key/af_key.c
net/l2tp/l2tp_ppp.c
net/mac80211/cfg.c
net/mac80211/debugfs_netdev.c
net/mac80211/driver-ops.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/util.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipset/ip_set_hash_ipmac.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_queue.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink.c
net/netfilter/nft_connlimit.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_hash.c
net/netfilter/nft_lookup.c
net/netfilter/nft_socket.c
net/netfilter/xt_nfacct.c
net/netfilter/xt_physdev.c
net/netrom/af_netrom.c
net/nfc/llcp_sock.c
net/nfc/nci/data.c
net/nfc/netlink.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/packet/af_packet.c
net/psample/psample.c
net/qrtr/qrtr.c
net/rds/bind.c
net/rds/ib.c
net/rds/recv.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/input.c
net/rxrpc/local_object.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/rxrpc/sendmsg.c
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_sample.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_skbmod.c
net/sched/act_tunnel_key.c
net/sched/act_vlan.c
net/sched/cls_api.c
net/sched/sch_api.c
net/sched/sch_cbq.c
net/sched/sch_codel.c
net/sched/sch_dsmark.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sched/sch_hhf.c
net/sched/sch_netem.c
net/sched/sch_sfq.c
net/sctp/protocol.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/stream.c
net/smc/af_smc.c
net/smc/smc_tx.c
net/tipc/addr.c
net/tipc/link.c
net/tipc/msg.c
net/tipc/name_distr.c
net/tipc/netlink_compat.c
net/tls/tls_device.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/hyperv_transport.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/core.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/scan.c
net/wireless/util.c
net/wireless/wext-compat.c
net/wireless/wext-sme.c
net/xdp/xdp_umem.c
net/xdp/xsk_queue.h
net/xfrm/Kconfig
net/xfrm/xfrm_user.c
scripts/Kconfig.include
scripts/Makefile.modpost
scripts/decode_stacktrace.sh
scripts/gcc-plugins/randomize_layout_plugin.c
scripts/genksyms/keywords.c
scripts/genksyms/parse.y
scripts/kallsyms.c
scripts/kconfig/confdata.c
scripts/kconfig/expr.h
scripts/namespace.pl
scripts/recordmcount.h
scripts/sphinx-pre-install
security/apparmor/policy_unpack.c
security/integrity/ima/ima_crypto.c
security/keys/request_key_auth.c
security/selinux/hooks.c
security/selinux/ss/policydb.c
security/smack/smack_access.c
security/smack/smack_lsm.c
sound/ac97/bus.c
sound/core/compress_offload.c
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_fifo.c
sound/core/seq/seq_fifo.h
sound/firewire/dice/dice-alesis.c
sound/firewire/motu/motu.c
sound/firewire/packets-buffer.c
sound/firewire/tascam/tascam-pcm.c
sound/firewire/tascam/tascam-stream.c
sound/hda/hdac_controller.c
sound/hda/hdac_i915.c
sound/i2c/other/ak4xxx-adda.c
sound/pci/hda/hda_auto_parser.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_controller.h
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_generic.h
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/es8316.c
sound/soc/codecs/hdac_hdmi.c
sound/soc/codecs/pcm3168a.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/tlv320aic31xx.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/boards/cht_bsw_max98090_ti.c
sound/soc/intel/common/sst-ipc.c
sound/soc/intel/skylake/skl-debug.c
sound/soc/intel/skylake/skl-nhlt.c
sound/soc/meson/axg-tdm.h
sound/soc/rockchip/rockchip_i2s.c
sound/soc/sh/rcar/adg.c
sound/soc/sh/rcar/core.c
sound/soc/soc-dapm.c
sound/soc/soc-generic-dmaengine-pcm.c
sound/soc/soc-pcm.c
sound/soc/sunxi/sun4i-i2s.c
sound/soc/ti/j721e-evm.c
sound/soc/uniphier/aio-cpu.c
sound/soc/uniphier/aio.h
sound/sound_core.c
sound/usb/hiface/pcm.c
sound/usb/line6/pcm.c
sound/usb/line6/podhd.c
sound/usb/mixer.c
sound/usb/mixer_quirks.c
sound/usb/pcm.c
sound/usb/quirks.c
sound/usb/stream.c
ti_config_fragments/audio_display.cfg
ti_config_fragments/connectivity.cfg
ti_config_fragments/v8_baseport.cfg
tools/bpf/bpftool/common.c
tools/bpf/bpftool/jit_disasm.c
tools/bpf/bpftool/prog.c
tools/hv/hv_kvp_daemon.c
tools/hv/hv_vss_daemon.c
tools/hv/lsvmbus
tools/iio/iio_utils.c
tools/include/uapi/asm/bitsperlong.h
tools/include/uapi/linux/bpf.h
tools/lib/bpf/bpf.c
tools/lib/bpf/libbpf.c
tools/lib/traceevent/Makefile
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-plugin.c
tools/objtool/Makefile
tools/objtool/elf.c
tools/perf/Makefile.config
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/s390/util/machine.c
tools/perf/arch/x86/util/unwind-libunwind.c
tools/perf/bench/numa.c
tools/perf/builtin-ftrace.c
tools/perf/builtin-probe.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-version.c
tools/perf/jvmti/libjvmti.c
tools/perf/perf.c
tools/perf/perf.h
tools/perf/pmu-events/jevents.c
tools/perf/tests/mmap-thread-lookup.c
tools/perf/tests/parse-events.c
tools/perf/tests/shell/record+probe_libc_inet_pton.sh
tools/perf/tests/shell/trace+probe_vfs_getname.sh
tools/perf/trace/beauty/ioctl.c
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/util/annotate.c
tools/perf/util/cpumap.c
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/jitdump.c
tools/perf/util/llvm-utils.c
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/metricgroup.c
tools/perf/util/session.c
tools/perf/util/stat-shadow.c
tools/perf/util/stat.c
tools/perf/util/stat.h
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/thread.c
tools/perf/util/xyarray.h
tools/power/cpupower/utils/cpufreq-set.c
tools/power/x86/turbostat/turbostat.c
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
tools/testing/nvdimm/test/nfit_test.h
tools/testing/selftests/bpf/sendmsg6_prog.c
tools/testing/selftests/bpf/test_lwt_seg6local.c
tools/testing/selftests/bpf/test_sock.c
tools/testing/selftests/cgroup/cgroup_util.c
tools/testing/selftests/kvm/config [new file with mode: 0644]
tools/testing/selftests/kvm/lib/x86.c
tools/testing/selftests/kvm/platform_info_test.c
tools/testing/selftests/net/fib_rule_tests.sh
tools/testing/selftests/net/forwarding/gre_multipath.sh
tools/testing/selftests/net/udpgso.c
usr/Makefile
virt/kvm/arm/arm.c
virt/kvm/arm/mmio.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-mmio.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic.c
virt/kvm/arm/vgic/vgic.h
virt/kvm/coalesced_mmio.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

diff --git a/Documentation/PCI/endpoint/pci-test-ntb.txt b/Documentation/PCI/endpoint/pci-test-ntb.txt
new file mode 100644 (file)
index 0000000..c8bfe9d
--- /dev/null
@@ -0,0 +1,315 @@
+                              PCI NTB FUNCTION
+                   Kishon Vijay Abraham I <kishon@ti.com>
+
+PCI NTB Function allows two different systems (or hosts) to communicate
+with each other by configurig the endpoint instances in such a way that
+transactions from one system is routed to the other system.
+
+In the below diagram, PCI NTB function configures the SoC with multiple
+PCIe Endpoint (EP) instances in such a way that transaction from one EP
+controller is routed to the other EP controller. Once PCI NTB function
+configures the SoC with multiple EP instances, HOST1 and HOST2 can
+communicate with each other using SoC as a bridge.
+
+   +-------------+                                   +-------------+
+   |             |                                   |             |
+   |    HOST1    |                                   |    HOST2    |
+   |             |                                   |             |
+   +------^------+                                   +------^------+
+          |                                                 |
+          |                                                 |
++---------|-------------------------------------------------|---------+
+|  +------v------+                                   +------v------+  |
+|  |             |                                   |             |  |
+|  |     EP      |                                   |     EP      |  |
+|  | CONTROLLER1 |                                   | CONTROLLER2 |  |
+|  |             <----------------------------------->             |  |
+|  |             |                                   |             |  |
+|  |             |                                   |             |  |
+|  |             |  SoC With Multiple EP Instances   |             |  |
+|  |             |  (Configured using NTB Function)  |             |  |
+|  +-------------+                                   +-------------+  |
++---------------------------------------------------------------------+
+
+Constructs used for Implementing NTB:
+
+       *) Config Region
+       *) Self Scratchpad Registers
+       *) Peer Scratchpad Registers
+       *) Doorbell Registers
+       *) Memory Window
+
+Modeling Constructs:
+
+  There are 5 or more distinct regions (config, self scratchpad, peer
+scratchpad, doorbell, one or more memory windows) to be modeled to achieve
+NTB functionality. Atleast one memory window is required while more than
+one is permitted. All these regions should be mapped to BAR for hosts to
+access these regions.
+
+If one 32-bit BAR is allocated for each of these regions, the scheme would
+look like
+       BAR0 -> Config Region
+       BAR1 -> Self Scratchpad
+       BAR2 -> Peer Scratchpad
+       BAR3 -> Doorbell
+       BAR4 -> Memory Window 1
+       BAR5 -> Memory Window 2
+
+However if we allocate a separate BAR for each of the region, there would not
+be enough BARs for all the regions in a platform that supports only 64-bit
+BAR.
+
+In order to be be supported by most of the platforms, the regions should be
+packed and mapped to BARs in a way that provides NTB functionality and
+also making sure the hosts doesn't access any region that it is not supposed
+to.
+
+The following scheme is used in EPF NTB Function
+
+       BAR0 -> Config Region + Self Scratchpad
+       BAR1 -> Peer Scratchpad
+       BAR2 -> Doorbell + Memory Window 1
+       BAR3 -> Memory Window 2
+       BAR4 -> Memory Window 3
+       BAR4 -> Memory Window 4
+
+With this scheme, for the basic NTB functionality 3 BARs should be sufficient.
+
+Modeling Config/Scratchpad Region:
+
++-----------------+------->+------------------+        +-----------------+
+|       BAR0      |        |  CONFIG REGION   |        |       BAR0      |
++-----------------+----+   +------------------+<-------+-----------------+
+|       BAR1      |    |   |SCRATCHPAD REGION |        |       BAR1      |
++-----------------+    +-->+------------------+<-------+-----------------+
+|       BAR2      |            Local Memory            |       BAR2      |
++-----------------+                                    +-----------------+
+|       BAR3      |                                    |       BAR3      |
++-----------------+                                    +-----------------+
+|       BAR4      |                                    |       BAR4      |
++-----------------+                                    +-----------------+
+|       BAR5      |                                    |       BAR5      |
++-----------------+                                    +-----------------+
+  EP CONTROLLER 1                                        EP CONTROLLER 2
+
+Above diagram shows Config region + Scratchpad region for HOST1 (connected to
+EP controller 1) allocated in local memory. The HOST1 can access the config
+region and scratchpad region (self scratchpad) using BAR0 of EP controller 1.
+The peer host (HOST2 connected to EP controller 2) can also access this
+scratchpad region (peer scratchpad) using BAR1 of EP controller 2. This
+diagram shows the case where Config region and Scratchpad region is allocated
+for HOST1, however the same is applicable for HOST2.
+
+Modeling Doorbell/Memory Window 1:
+
++-----------------+    +----->+----------------+-----------+-----------------+
+|       BAR0      |    |      |   Doorbell 1   +-----------> MSI|X ADDRESS 1 |
++-----------------+    |      +----------------+           +-----------------+
+|       BAR1      |    |      |   Doorbell 2   +---------+ |                 |
++-----------------+    |      +----------------+         | |                 |
+|       BAR2      |    |      |   Doorbell 3   +-------+ | +-----------------+
++-----------------+    |      +----------------+       | +-> MSI|X ADDRESS 2 |
+|       BAR3      |    |      |   Doorbell 4   +-----+ |   +-----------------+
++----------------------+      +----------------+     | |   |                 |
+|       BAR4      |           |                |     | |   +-----------------+
++----------------------+      |      MW1       +---+ | +-->+ MSI|X ADDRESS 3||
+|       BAR5      |    |      |                |   | |     +-----------------+
++-----------------+    +----->-----------------+   | |     |                 |
+  EP CONTROLLER 1             |                |   | |     +-----------------+
+                              |                |   | +---->+ MSI|X ADDRESS 4 |
+                              +----------------+   |       +-----------------+
+                               EP CONTROLLER 2     |       |                 |
+                                 (OB SPACE)        |       |                 |
+                                                   +------->      MW1        |
+                                                           |                 |
+                                                           |                 |
+                                                           +-----------------+
+                                                           |                 |
+                                                           |                 |
+                                                           |                 |
+                                                           |                 |
+                                                           |                 |
+                                                           +-----------------+
+                                                           PCI Address Space
+                                                           (Managed by HOST2)
+
+Above diagram shows how the doorbell and memory window 1 is mapped so that
+HOST1 can raise doorbell interrupt on HOST2 and also how HOST1 can access
+buffers exposed by HOST2 using memory window1 (MW1). Here doorbell and
+memory window 1 regions are allocated in EP controller 2 outbound (OB) address
+space. Allocating and configuring BARs for doorbell and memory window1
+is done during the initialization phase of NTB endpoint function driver.
+Mapping from EP controller 2 OB space to PCI address space is done when HOST2
+sends CMD_CONFIGURE_MW/CMD_CONFIGURE_DOORBELL. The commands are explained
+below.
+
+Modeling Optional Memory Windows:
+
+This is modeled the same was as MW1 but each of the additional memory windows
+is mapped to separate BARs.
+
+Config Region:
+
+Config Region is a construct that is specific to NTB implemented using NTB
+Endpoint Function Driver. The host and endpoint side NTB function driver will
+exchange informatio with each other using this region. Config Region has
+Control/Status Registers for configuring the Endpoint Controller. Host can
+write into this region for configuring the outbound ATU and to indicate the
+link status. Endpoint can indicate the status of commands issued be host in
+this region. Endpoint can also indicate the scratchpad offset, number of
+memory windows to the host using this region.
+
+The format of Config Region is given below. Each of the fields here are 32
+bits.
+
+       +------------------------+
+       |         COMMAND        |
+       +------------------------+
+       |         ARGUMENT       |
+       +------------------------+
+       |         STATUS         |
+       +------------------------+
+       |         TOPOLOGY       |
+       +------------------------+
+       |    ADDRESS (LOWER 32)  |
+       +------------------------+
+       |    ADDRESS (UPPER 32)  |
+       +------------------------+
+       |           SIZE         |
+       +------------------------+
+       |  MEMORY WINDOW1 OFFSET |
+       +------------------------+
+       |   NO OF MEMORY WINDOW  |
+       +------------------------+
+       |       SPAD OFFSET      |
+       +------------------------+
+       |        SPAD COUNT      |
+       +------------------------+
+       |      DB ENTRY SIZE     |
+       +------------------------+
+       |         DB DATA        |
+       +------------------------+
+       |            :           |
+       +------------------------+
+       |            :           |
+       +------------------------+
+       |         DB DATA        |
+       +------------------------+
+
+
+  COMMAND:
+
+       NTB function supports three commands:
+
+         CMD_CONFIGURE_DOORBELL (0x1): Command to configure doorbell. Before
+       invoking this command, the host should allocate and initialize
+       MSI/MSI-X vectors (i.e initialize the MSI/MSI-X capability in the
+       Endpoint). The endpoint on receiving this command will configure
+       the outbound ATU such that transaction to DB BAR will be routed
+       to the MSI/MSI-X address programmed by the host. The ARGUMENT
+       register should be populated with number of DBs to configure (in the
+       lower 16 bits) and if MSI or MSI-X should be configured (BIT 16).
+       (TODO: Add support for MSI-X).
+
+         CMD_CONFIGURE_MW (0x2): Command to configure memory window. The
+       host invokes this command after allocating a buffer that can be
+       accessed by remote host. The allocated address should be programmed
+       in the ADDRESS register (64 bit), the size should be programmed in
+       the SIZE register and the memory window index should be programmed
+       in the ARGUMENT register. The endpoint on receiving this command
+       will configure the outbound ATU such that trasaction to MW BAR
+       will be routed to the address provided by the host.
+
+         CMD_LINK_UP (0x3): Command to indicate an NTB application is
+       bound to the EP device on the host side. Once the endpoint
+       receives this command from both the hosts, the endpoint will
+       raise an LINK_UP event to both the hosts to indicate the hosts
+       can start communicating with each other.
+
+  ARGUMENT:
+
+       The value of this register is based on the commands issued in
+       command register. See COMMAND section for more information.
+
+  configuring memory window and to indicate the host side NTB application
+  has initialized.
+
+  TOPOLOGY:
+
+       Set to NTB_TOPO_B2B_USD for Primary interface
+       Set to NTB_TOPO_B2B_DSD for Secondary interface
+
+  ADDRESS/SIZE:
+
+       Address and Size to be used while configuring the memory window.
+       See "CMD_CONFIGURE_MW" for more info.
+
+  MEMORY WINDOW1 OFFSET:
+
+       Memory Window 1 and Doorbell registers are packed together in the
+       same BAR. The initial portion of the region will have doorbell
+       registers and the latter portion of the region is for memory window 1.
+       This register will specify the offset of the memory window 1.
+
+  NO OF MEMORY WINDOW:
+
+       Specifies the number of memory windows supported by the NTB device.
+
+  SPAD OFFSET:
+
+       Self scratchpad region and config region are packed together in the
+       same BAR. The initial portion of the will have config region and
+       the latter portion of the region is for self scratchpad. This
+       register will specify the offset of the self scratchpad registers.
+
+  SPAD COUNT:
+
+       Specifies the number of scratchpad registers supported by the NTB
+       device.
+
+  DB ENTRY SIZE:
+
+       Used to determine the offset within the DB BAR that should be written
+       in order to raise doorbell. EPF NTB can use either MSI/MSI-X to
+       ring doorbell (MSI-X support will be added later). MSI uses same
+       address for all the interrupts and MSI-X can provide different
+       addresses for different interrupts. The MSI/MSI-X address is provided
+       by the host and the address it gives is based on the MSI/MSI-X
+       implementation supported by the host. For instance, ARM platform
+       using GIC ITS will have same MSI-X address for all the interrupts.
+       In order to support all the combinations and use the same mechanism
+       for both MSI and MSI-X, EPF NTB allocates separate region in the
+       Outbound Address Space for each of the interrupts. This region will
+       be mapped to the MSI/MSI-X address provided by the host. If a host
+       provides the same address for all the interrupts, all the regions
+       will be translated to the same address. If a host provides different
+       address, the regions will be translated to different address. This
+       will ensure there is no difference while raising the doorbell.
+
+  DB DATA:
+
+       EPF NTB supports 32 interrupts. So there are 32 DB DATA registers.
+       This holds the MSI/MSI-X data that has to be written to MSI address
+       for raising doorbell interrupt. This will be populated by EPF NTB
+       while invoking CMD_CONFIGURE_MW.
+
+Scratchpad Registers:
+
+  Each host has it's own register space allocated in the memory of NTB EPC.
+  They are both readable and writable from both sides of the bridge. They
+  are used by applications built over NTB and can be used to pass control
+  and status information between both sides of a device.
+
+  Scratchpad registers has 2 parts
+       1) Self Scratchpad: Host's own register space
+       2) Peer Scratchpad: Remote host's register space.
+
+Doorbell Registers:
+
+  Registers using which one host can interrupt the other host.
+
+Memory Window:
+
+  Actual transfer of data between the two hosts will happen using the
+  memory window.
index 25f3b253219859a6fa120850b9fcd99bd775b1bd..e05e581af5cfe617f38112907aadea8d03f2a9d6 100644 (file)
@@ -41,10 +41,11 @@ Related CVEs
 
 The following CVE entries describe Spectre variants:
 
-   =============   =======================  =================
+   =============   =======================  ==========================
    CVE-2017-5753   Bounds check bypass      Spectre variant 1
    CVE-2017-5715   Branch target injection  Spectre variant 2
-   =============   =======================  =================
+   CVE-2019-1125   Spectre v1 swapgs        Spectre variant 1 (swapgs)
+   =============   =======================  ==========================
 
 Problem
 -------
@@ -78,6 +79,13 @@ There are some extensions of Spectre variant 1 attacks for reading data
 over the network, see :ref:`[12] <spec_ref12>`. However such attacks
 are difficult, low bandwidth, fragile, and are considered low risk.
 
+Note that, despite "Bounds Check Bypass" name, Spectre variant 1 is not
+only about user-controlled array bounds checks.  It can affect any
+conditional checks.  The kernel entry code interrupt, exception, and NMI
+handlers all have conditional swapgs checks.  Those may be problematic
+in the context of Spectre v1, as kernel code can speculatively run with
+a user GS.
+
 Spectre variant 2 (Branch Target Injection)
 -------------------------------------------
 
@@ -132,6 +140,9 @@ not cover all possible attack vectors.
 1. A user process attacking the kernel
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
+Spectre variant 1
+~~~~~~~~~~~~~~~~~
+
    The attacker passes a parameter to the kernel via a register or
    via a known address in memory during a syscall. Such parameter may
    be used later by the kernel as an index to an array or to derive
@@ -144,7 +155,40 @@ not cover all possible attack vectors.
    potentially be influenced for Spectre attacks, new "nospec" accessor
    macros are used to prevent speculative loading of data.
 
-   Spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
+Spectre variant 1 (swapgs)
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+   An attacker can train the branch predictor to speculatively skip the
+   swapgs path for an interrupt or exception.  If they initialize
+   the GS register to a user-space value, if the swapgs is speculatively
+   skipped, subsequent GS-related percpu accesses in the speculation
+   window will be done with the attacker-controlled GS value.  This
+   could cause privileged memory to be accessed and leaked.
+
+   For example:
+
+   ::
+
+     if (coming from user space)
+         swapgs
+     mov %gs:<percpu_offset>, %reg
+     mov (%reg), %reg1
+
+   When coming from user space, the CPU can speculatively skip the
+   swapgs, and then do a speculative percpu load using the user GS
+   value.  So the user can speculatively force a read of any kernel
+   value.  If a gadget exists which uses the percpu value as an address
+   in another load/store, then the contents of the kernel value may
+   become visible via an L1 side channel attack.
+
+   A similar attack exists when coming from kernel space.  The CPU can
+   speculatively do the swapgs, causing the user GS to get used for the
+   rest of the speculative window.
+
+Spectre variant 2
+~~~~~~~~~~~~~~~~~
+
+   A spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
    target buffer (BTB) before issuing syscall to launch an attack.
    After entering the kernel, the kernel could use the poisoned branch
    target buffer on indirect jump and jump to gadget code in speculative
@@ -280,11 +324,18 @@ The sysfs file showing Spectre variant 1 mitigation status is:
 
 The possible values in this file are:
 
-  =======================================  =================================
-  'Mitigation: __user pointer sanitation'  Protection in kernel on a case by
-                                           case base with explicit pointer
-                                           sanitation.
-  =======================================  =================================
+  .. list-table::
+
+     * - 'Not affected'
+       - The processor is not vulnerable.
+     * - 'Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers'
+       - The swapgs protections are disabled; otherwise it has
+         protection in the kernel on a case by case base with explicit
+         pointer sanitation and usercopy LFENCE barriers.
+     * - 'Mitigation: usercopy/swapgs barriers and __user pointer sanitization'
+       - Protection in the kernel on a case by case base with explicit
+         pointer sanitation, usercopy LFENCE barriers, and swapgs LFENCE
+         barriers.
 
 However, the protections are put in place on a case by case basis,
 and there is no guarantee that all possible attack vectors for Spectre
@@ -366,12 +417,27 @@ Turning on mitigation for Spectre variant 1 and Spectre variant 2
 1. Kernel mitigation
 ^^^^^^^^^^^^^^^^^^^^
 
+Spectre variant 1
+~~~~~~~~~~~~~~~~~
+
    For the Spectre variant 1, vulnerable kernel code (as determined
    by code audit or scanning tools) is annotated on a case by case
    basis to use nospec accessor macros for bounds clipping :ref:`[2]
    <spec_ref2>` to avoid any usable disclosure gadgets. However, it may
    not cover all attack vectors for Spectre variant 1.
 
+   Copy-from-user code has an LFENCE barrier to prevent the access_ok()
+   check from being mis-speculated.  The barrier is done by the
+   barrier_nospec() macro.
+
+   For the swapgs variant of Spectre variant 1, LFENCE barriers are
+   added to interrupt, exception and NMI entry where needed.  These
+   barriers are done by the FENCE_SWAPGS_KERNEL_ENTRY and
+   FENCE_SWAPGS_USER_ENTRY macros.
+
+Spectre variant 2
+~~~~~~~~~~~~~~~~~
+
    For Spectre variant 2 mitigation, the compiler turns indirect calls or
    jumps in the kernel into equivalent return trampolines (retpolines)
    :ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target
@@ -473,6 +539,12 @@ Mitigation control on the kernel command line
 Spectre variant 2 mitigation can be disabled or force enabled at the
 kernel command line.
 
+       nospectre_v1
+
+               [X86,PPC] Disable mitigations for Spectre Variant 1
+               (bounds check bypass). With this option data leaks are
+               possible in the system.
+
        nospectre_v2
 
                [X86] Disable all mitigations for the Spectre variant 2
index 1cee1174cde6e5cf1aa36cfde763ae2a87bb10c1..16607b178b4744e30f3c50ebe5eda30de4dfc782 100644 (file)
                        http://repo.or.cz/w/linux-2.6/mini2440.git
 
        mitigations=
-                       [X86,PPC,S390] Control optional mitigations for CPU
-                       vulnerabilities.  This is a set of curated,
+                       [X86,PPC,S390,ARM64] Control optional mitigations for
+                       CPU vulnerabilities.  This is a set of curated,
                        arch-independent options, each of which is an
                        aggregation of existing arch-specific options.
 
                                improves system performance, but it may also
                                expose users to several CPU vulnerabilities.
                                Equivalent to: nopti [X86,PPC]
+                                              kpti=0 [ARM64]
                                               nospectre_v1 [PPC]
                                               nobp=0 [S390]
-                                              nospectre_v2 [X86,PPC,S390]
+                                              nospectre_v1 [X86]
+                                              nospectre_v2 [X86,PPC,S390,ARM64]
                                               spectre_v2_user=off [X86]
                                               spec_store_bypass_disable=off [X86,PPC]
+                                              ssbd=force-off [ARM64]
                                               l1tf=off [X86]
                                               mds=off [X86]
 
                        nosmt=force: Force disable SMT, cannot be undone
                                     via the sysfs control file.
 
-       nospectre_v1    [PPC] Disable mitigations for Spectre Variant 1 (bounds
-                       check bypass). With this option data leaks are possible
-                       in the system.
+       nospectre_v1    [X66, PPC] Disable mitigations for Spectre Variant 1
+                       (bounds check bypass). With this option data leaks
+                       are possible in the system.
 
-       nospectre_v2    [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
-                       (indirect branch prediction) vulnerability. System may
-                       allow data leaks with this option, which is equivalent
-                       to spectre_v2=off.
+       nospectre_v2    [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
+                       the Spectre variant 2 (indirect branch prediction)
+                       vulnerability. System may allow data leaks with this
+                       option.
 
        nospec_store_bypass_disable
                        [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
                        Run specified binary instead of /init from the ramdisk,
                        used for early userspace startup. See initrd.
 
+       rdrand=         [X86]
+                       force - Override the decision by the kernel to hide the
+                               advertisement of RDRAND support (this affects
+                               certain AMD processors because of buggy BIOS
+                               support, specifically around the suspend/resume
+                               path).
+
        rdt=            [HW,X86,RDT]
                        Turn on/off individual RDT features. List is:
                        cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
index d6aff2c5e9e2d5f923f25aad9ad71bd2fb8642f1..6feaffe90e22cb59ae54df2a3c936098b4e4bd64 100644 (file)
@@ -178,3 +178,7 @@ HWCAP_ILRCPC
 HWCAP_FLAGM
 
     Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001.
+
+HWCAP_SSBS
+
+    Functionality implied by ID_AA64PFR1_EL1.SSBS == 0b0010.
index 913396ac582431cb3acbdc96a1bd7f4293661d0b..ed0d814df7e06a25daf0b381f9929d83e152a57c 100644 (file)
@@ -177,6 +177,9 @@ These helper barriers exist because architectures have varying implicit
 ordering on their SMP atomic primitives. For example our TSO architectures
 provide full ordered atomics and these barriers are no-ops.
 
+NOTE: when the atomic RmW ops are fully ordered, they should also imply a
+compiler barrier.
+
 Thus:
 
   atomic_fetch_add();
index df2a00163ccfba30d3ebbccd512bd92c4545f4bb..e584a3a4975c247e9f5abdecf875e19b6f5e5f2a 100644 (file)
@@ -5,11 +5,12 @@ The Cadence MHDP bridge is a DPI to DP bridge.
 
 Required properties:
 - compatible: should be "cdns,mhdp8546",
+  Use "ti,j721e-mhdp8546" for TI J7 SoCs.
 - reg: physical base address and length of the controller's registers,
 - clocks: DP bridge clock, it's used by the IP to know how to translate
        a number of clock cycles into a time (which is used to comply
        with DP standard timings and delays),
-- phys: see the Documentation/devicetree/bindings/phy/phy-cadence-dp.txt
+- phys: see the Documentation/devicetree/bindings/phy/phy-cadence-torrent.txt
 - phy-names: must be "dpphy"
 
 Required subnodes:
diff --git a/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt
new file mode 100644 (file)
index 0000000..a30d63d
--- /dev/null
@@ -0,0 +1,9 @@
+Armadeus ST0700 Adapt. A Santek ST0700I5Y-RBSLW 7.0" WVGA (800x480) TFT with
+an adapter board.
+
+Required properties:
+- compatible: "armadeus,st0700-adapt"
+- power-supply: see panel-common.txt
+
+Optional properties:
+- backlight: see panel-common.txt
index 6c49db7f8ad2597128b644316fe60936e994198c..e1fe02f3e3e9c421b170ffebc699085e47f9f0ec 100644 (file)
@@ -11,11 +11,13 @@ New driver handles the following
 
 Required properties:
 - compatible:          Must be "samsung,exynos-adc-v1"
-                               for exynos4412/5250 and s5pv210 controllers.
+                               for Exynos5250 controllers.
                        Must be "samsung,exynos-adc-v2" for
                                future controllers.
                        Must be "samsung,exynos3250-adc" for
                                controllers compatible with ADC of Exynos3250.
+                       Must be "samsung,exynos4212-adc" for
+                               controllers compatible with ADC of Exynos4212 and Exynos4412.
                        Must be "samsung,exynos7-adc" for
                                the ADC in Exynos7 and compatibles
                        Must be "samsung,s3c2410-adc" for
@@ -28,6 +30,8 @@ Required properties:
                                the ADC in s3c2443 and compatibles
                        Must be "samsung,s3c6410-adc" for
                                the ADC in s3c6410 and compatibles
+                       Must be "samsung,s5pv210-adc" for
+                               the ADC in s5pv210 and compatibles
 - reg:                 List of ADC register address range
                        - The base address and range of ADC register
                        - The base address and range of ADC_PHY register (every
index f5a0923b34ca1e5dfd11f3c9ba03792a6963b5c7..c269dbe384feab159be98e713a328585499e473a 100644 (file)
@@ -62,6 +62,10 @@ Optional properties:
   be referred to mmc-pwrseq-simple.txt. But now it's reused as a tunable delay
   waiting for I/O signalling and card power supply to be stable, regardless of
   whether pwrseq-simple is used. Default to 10ms if no available.
+- supports-cqe : The presence of this property indicates that the corresponding
+  MMC host controller supports HW command queue feature.
+- disable-cqe-dcmd: This property indicates that the MMC controller's command
+  queue engine (CQE) does not support direct commands (DCMDs).
 
 *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
 polarity properties, we have to fix the meaning of the "normal" and "inverted"
index c6ccecb9ae5a5c16f0cbc0f5c8acaa6be0b7469e..ed15c4a2d9795c1192375f124a3ffb480838f3ff 100644 (file)
@@ -39,6 +39,7 @@ Optional Properties (Required for ti,am654-sdhci-5.1 and ti,j721e-sdhci-8bit):
                                  Valid values are 33, 40, 50, 66 and 100 ohms.
 Optional Properties:
        - ti,strobe-sel: strobe select delay for HS400 speed mode. Default value: 0x0.
+       - ti,clkbuf-sel: clock delay buffer select. Default value: 0x0
 
 Example:
 
index 42cd81090a2c77f959020328aee5f953b02703db..3f3cfc1d8d4d855485c048c85b330b52970cae11 100644 (file)
@@ -16,7 +16,7 @@ Required properties:
 
 Optional properties:
 - interrupts: interrupt line number for the SMI error/done interrupt
-- clocks: phandle for up to three required clocks for the MDIO instance
+- clocks: phandle for up to four required clocks for the MDIO instance
 
 The child nodes of the MDIO driver are the individual PHY devices
 connected to this MDIO bus. They must have a "reg" property given the
diff --git a/Documentation/devicetree/bindings/pci/endpoint/pci-epf-bus.txt b/Documentation/devicetree/bindings/pci/endpoint/pci-epf-bus.txt
new file mode 100644 (file)
index 0000000..16727dd
--- /dev/null
@@ -0,0 +1,27 @@
+PCI Endpoint Function Bus
+
+This describes the bindings for endpoint function bus to which endpoint
+function devices should be attached.
+
+Required Properties:
+ - compatible: Should be "pci-epf-bus"
+
+One or more subnodes representing PCIe endpoint function device exposed
+to the remote host.
+
+Example:
+Following is an example of NTB device exposed to the remote host.
+
+epf_bus {
+       compatible = "pci-epf-bus";
+
+       ntb {
+               compatible = "pci-epf-ntb";
+               epcs = <&pcie0_ep>, <&pcie1_ep>;
+               epc-names = "primary", "secondary";
+               vendor-id = /bits/ 16 <0x104c>;
+               device-id = /bits/ 16 <0xb00d>;
+               num-mws = <4>;
+               mws-size = <0x100000>, <0x100000>, <0x100000>, <0x100000>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/pci/endpoint/pci-epf-ntb.txt b/Documentation/devicetree/bindings/pci/endpoint/pci-epf-ntb.txt
new file mode 100644 (file)
index 0000000..e789693
--- /dev/null
@@ -0,0 +1,31 @@
+PCI Endpoint NTB Function Device
+
+This describes the bindings to be used when a NTB device has to be
+exposed to the remote host over PCIe.
+
+Required Properties:
+ - compatible: Should be "pci-epf-ntb"
+ - epcs: As defined in generic pci-epf bindings defined in pci-epf.txt
+ - epc-names: As defined in generic pci-epf bindings defined in pci-epf.txt
+ - vendor-id: As defined in generic pci-epf bindings defined in pci-epf.txt
+ - device-id: As defined in generic pci-epf bindings defined in pci-epf.txt
+ - num-mws: Specify the number of memory windows. Should not be more than 4.
+ - mws-size: List of 'num-mws' entries containing size of each memory window.
+
+Optional Properties:
+ - spad-count: Specify the number of scratchpad registers to be supported
+ - db-count: Specify the number of doorbell interrupts to be supported. Must
+            not be greater than 32.
+
+Example:
+Following is an example of NTB device exposed to the remote host.
+
+ntb {
+       compatible = "pci-epf-ntb";
+       epcs = <&pcie0_ep>, <&pcie1_ep>;
+       epc-names = "primary", "secondary";
+       vendor-id = /bits/ 16 <0x104c>;
+       device-id = /bits/ 16 <0xb00d>;
+       num-mws = <4>;
+       mws-size = <0x100000>, <0x100000>, <0x100000>, <0x100000>;
+};
diff --git a/Documentation/devicetree/bindings/pci/endpoint/pci-epf.txt b/Documentation/devicetree/bindings/pci/endpoint/pci-epf.txt
new file mode 100644 (file)
index 0000000..f006395
--- /dev/null
@@ -0,0 +1,28 @@
+PCI Endpoint Function Device
+
+This describes the generic bindings to be used when a device has to be
+exposed to the remote host over PCIe. The device could be an actual
+peripheral in the platform or a virtual device created by the software.
+
+epcs : phandle to the endpoint controller device
+epc-names : the names of the endpoint controller device corresponding
+           to the EPCs present in the *epcs* phandle
+vendor-id: used to identify device manufacturer
+device-id: used to identify a particular device
+baseclass-code: used to classify the type of function the device performs
+subclass-code: used to identify more specifically the function of the device
+subsys-vendor-id: used to identify vendor of the add-in card or subsystem
+subsys-id: used to specify an id that is specific to a vendor
+
+Example:
+Following is an example of NTB device exposed to the remote host.
+
+ntb {
+       compatible = "pci-epf-ntb";
+       epcs = <&pcie0_ep>, <&pcie1_ep>;
+       epc-names = "primary", "secondary";
+       vendor-id = /bits/ 16 <0x104c>;
+       device-id = /bits/ 16 <0xb00d>;
+       num-mws = <4>;
+       mws-size = <0x100000>, <0x100000>, <0x100000>, <0x100000>;
+};
similarity index 73%
rename from Documentation/devicetree/bindings/phy/phy-cadence-dp.txt
rename to Documentation/devicetree/bindings/phy/phy-cadence-torrent.txt
index 7f49fd54ebc1fbeda35935d18940f5b8a9460616..b053c4b0a35d7573a943dc155d837898dde0e202 100644 (file)
@@ -1,12 +1,15 @@
-Cadence MHDP DisplayPort SD0801 PHY binding
-===========================================
+Cadence Torrent SD0801 PHY binding for DisplayPort
+===================================================
 
 This binding describes the Cadence SD0801 PHY hardware included with
 the Cadence MHDP DisplayPort controller.
 
 -------------------------------------------------------------------------------
 Required properties (controller (parent) node):
-- compatible   : Should be "cdns,dp-phy"
+- compatible   : Should be "cdns,torrent-phy"
+- clocks       : PHY reference clock. Must contain an entry in clock-names.
+                 See ../clocks/clock-bindings.txt for details.
+- clock-names  : Must be "refclk"
 - reg          : Defines the following sets of registers in the parent
                  mhdp device:
                        - Offset of the DPTX PHY configuration registers
@@ -21,7 +24,7 @@ Optional properties:
 
 Example:
        dp_phy: phy@f0fb030a00 {
-               compatible = "cdns,dp-phy";
+               compatible = "cdns,torrent-phy";
                reg = <0xf0 0xfb030a00 0x0 0x00000040>,
                      <0xf0 0xfb500000 0x0 0x00100000>;
                num_lanes = <4>;
index b551b4673486f4e262e10806c371505e0ca8a37b..f3bbca9a6615e3b4c345cd1e90e66f4a218aea29 100644 (file)
@@ -1,7 +1,8 @@
 TI J721E WIZ (SERDES Wrapper)
 
 Required properties:
- - compatible: Should be "ti,j721e-wiz"
+ - compatible: Should be "ti,j721e-wiz-16g" for Sierra phy wrapper,
+              or "ti,j721e-wiz-10g" for Torrent phy wrapper.
  - #address-cells : should be 2 to indicate the child node should use 2 cell
      for address
  - #size-cells: should be 2 to indicate the child node should use 2 cell for
@@ -27,6 +28,17 @@ clock bindings in Documentation/devicetree/bindings/clock/clock-bindings.txt
      If GPIO is active, lane 0 and lane 1 of SERDES will be swapped to
      achieve the funtionality of an exernal type-C plug flip mux.
 
+ - typec-dir-debounce: Number of milliseconds to wait before sampling
+     typec-dir-gpio. If not specified, the GPIO will be sampled ASAP.
+     Type-C spec states minimum CC pin debounce of 100 ms and maximum
+     of 200 ms.
+
+ - lane<n>-mode, : Integer describing static lane usage for lane n. For
+     Sierra there may be properties for n = 0 and n = 1, for Torrent n = 0,
+     n = 1, n = 2, and n = 3. The constants to indicate the lane usage 
+     are defined in "include/dt-bindings/phy/phy.h". The lane is assumed
+     to be unused if the lane<n>-use property does not exist.
+
 Required subnodes:
  - Clock Subnode: WIZ node should have '3' subnodes for each of the clock
      selects it supports. The clock subnodes should have the following names
@@ -45,7 +57,7 @@ Required subnodes:
 
 Example: Example shows binding for SERDES_16G (Sierra SERDES with WIZ wrapper)
 serdes_wiz0: wiz@5000000 {
-       compatible = "ti,j721e-wiz";
+       compatible = "ti,j721e-wiz-16g";
        #address-cells = <2>;
        #size-cells = <2>;
        power-domains = <&k3_pds 292 TI_SCI_PD_EXCLUSIVE>;
@@ -53,6 +65,8 @@ serdes_wiz0: wiz@5000000 {
        num-lanes = <2>;
        #reset-cells = <1>;
        ranges;
+       lane0-mode = <PHY_TYPE_PCIE>;
+       lane1-mode = <PHY_TYPE_PCIE>;
 
        pll0_refclk: pll0_refclk {
                clocks = <&k3_clks 292 11>, <&cmn_refclk>;
diff --git a/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.txt b/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.txt
new file mode 100644 (file)
index 0000000..aaa20cd
--- /dev/null
@@ -0,0 +1,18 @@
+Texas Instruments RTI (Real Time Interrupt) module. RTI is used for
+implementing a watchdog on TI SoCs.
+
+Required properties:
+       compatible      = "ti,rti-wdt";
+       reg             = Physical IO space for RTI
+       clocks          = Functional clock for the RTI
+
+Example:
+
+main_rti0: rti@2200000 {
+       compatible = "ti,rti-wdt";
+       reg = <0x0 0x2200000 0x0 0x100>;
+       clocks = <&k3_clks 252 1>;
+       power-domains = <&k3_pds 252 TI_SCI_PD_EXCLUSIVE>;
+       assigned-clocks = <&k3_clks 252 1>;
+       assigned-clock-parents = <&k3_clks 252 5>;
+};
index eef7d9d259e8570d102be8c7f1641158950262c2..d7dc9c818b830d45316a2b9a5a11275390939253 100644 (file)
@@ -302,7 +302,7 @@ beneath or above the path of another overlay lower layer path.
 
 Using an upper layer path and/or a workdir path that are already used by
 another overlay mount is not allowed and may fail with EBUSY.  Using
-partially overlapping paths is not allowed but will not fail with EBUSY.
+partially overlapping paths is not allowed and may fail with EBUSY.
 If files are accessed from two overlayfs mounts which share or overlap the
 upper layer and/or workdir path the behavior of the overlay is undefined,
 though it will not result in a crash or deadlock.
index e4219139386ae805575a7c1cae5dc83f5b3e52ca..7238b355919c757cc7b8bc38b905470b1a69a805 100644 (file)
@@ -20,7 +20,8 @@ void calc_runnable_avg_yN_inv(void)
        int i;
        unsigned int x;
 
-       printf("static const u32 runnable_avg_yN_inv[] = {");
+       /* To silence -Wunused-but-set-variable warnings. */
+       printf("static const u32 runnable_avg_yN_inv[] __maybe_unused = {");
        for (i = 0; i < HALFLIFE; i++) {
                x = ((1UL<<32)-1)*pow(y, i);
 
diff --git a/Documentation/usb/rio.txt b/Documentation/usb/rio.txt
deleted file mode 100644 (file)
index aee715a..0000000
+++ /dev/null
@@ -1,138 +0,0 @@
-Copyright (C) 1999, 2000 Bruce Tenison
-Portions Copyright (C) 1999, 2000 David Nelson
-Thanks to David Nelson for guidance and the usage of the scanner.txt
-and scanner.c files to model our driver and this informative file.
-
-Mar. 2, 2000
-
-CHANGES
-
-- Initial Revision
-
-
-OVERVIEW
-
-This README will address issues regarding how to configure the kernel
-to access a RIO 500 mp3 player.  
-Before I explain how to use this to access the Rio500 please be warned:
-
-W A R N I N G:
---------------
-
-Please note that this software is still under development.  The authors
-are in no way responsible for any damage that may occur, no matter how
-inconsequential.
-
-It seems that the Rio has a problem when sending .mp3 with low batteries.
-I suggest when the batteries are low and you want to transfer stuff that you
-replace it with a fresh one. In my case, what happened is I lost two 16kb
-blocks (they are no longer usable to store information to it). But I don't
-know if that's normal or not; it could simply be a problem with the flash 
-memory.
-
-In an extreme case, I left my Rio playing overnight and the batteries wore 
-down to nothing and appear to have corrupted the flash memory. My RIO 
-needed to be replaced as a result.  Diamond tech support is aware of the 
-problem.  Do NOT allow your batteries to wear down to nothing before 
-changing them.  It appears RIO 500 firmware does not handle low battery 
-power well at all. 
-
-On systems with OHCI controllers, the kernel OHCI code appears to have 
-power on problems with some chipsets.  If you are having problems 
-connecting to your RIO 500, try turning it on first and then plugging it 
-into the USB cable.  
-
-Contact information:
---------------------
-
-   The main page for the project is hosted at sourceforge.net in the following
-   URL: <http://rio500.sourceforge.net>. You can also go to the project's
-   sourceforge home page at: <http://sourceforge.net/projects/rio500/>.
-   There is also a mailing list: rio500-users@lists.sourceforge.net
-
-Authors:
--------
-
-Most of the code was written by Cesar Miquel <miquel@df.uba.ar>. Keith 
-Clayton <kclayton@jps.net> is incharge of the PPC port and making sure
-things work there. Bruce Tenison <btenison@dibbs.net> is adding support
-for .fon files and also does testing. The program will mostly sure be
-re-written and Pete Ikusz along with the rest will re-design it. I would
-also like to thank Tri Nguyen <tmn_3022000@hotmail.com> who provided use 
-with some important information regarding the communication with the Rio.
-
-ADDITIONAL INFORMATION and Userspace tools
-
-http://rio500.sourceforge.net/
-
-
-REQUIREMENTS
-
-A host with a USB port.  Ideally, either a UHCI (Intel) or OHCI
-(Compaq and others) hardware port should work.
-
-A Linux development kernel (2.3.x) with USB support enabled or a
-backported version to linux-2.2.x.  See http://www.linux-usb.org for
-more information on accomplishing this.
-
-A Linux kernel with RIO 500 support enabled.
-
-'lspci' which is only needed to determine the type of USB hardware
-available in your machine.
-
-CONFIGURATION
-
-Using `lspci -v`, determine the type of USB hardware available.
-
-  If you see something like:
-
-    USB Controller: ......
-    Flags: .....
-    I/O ports at ....
-
-  Then you have a UHCI based controller.
-
-  If you see something like:
-
-     USB Controller: .....
-     Flags: ....
-     Memory at .....
-
-  Then you have a OHCI based controller.
-
-Using `make menuconfig` or your preferred method for configuring the
-kernel, select 'Support for USB', 'OHCI/UHCI' depending on your
-hardware (determined from the steps above), 'USB Diamond Rio500 support', and
-'Preliminary USB device filesystem'.  Compile and install the modules
-(you may need to execute `depmod -a` to update the module
-dependencies).
-
-Add a device for the USB rio500:
-  `mknod /dev/usb/rio500 c 180 64`
-
-Set appropriate permissions for /dev/usb/rio500 (don't forget about
-group and world permissions).  Both read and write permissions are
-required for proper operation.
-
-Load the appropriate modules (if compiled as modules):
-
-  OHCI:
-    modprobe usbcore
-    modprobe usb-ohci
-    modprobe rio500
-
-  UHCI:
-    modprobe usbcore
-    modprobe usb-uhci  (or uhci)
-    modprobe rio500
-
-That's it.  The Rio500 Utils at: http://rio500.sourceforge.net should
-be able to access the rio500.
-
-BUGS
-
-If you encounter any problems feel free to drop me an email.
-
-Bruce Tenison
-btenison@dibbs.net
-
index 4bf5940a2c0b03002ed5431836cb49bb64f02e0c..8ef6fedbc8da97eed6f611ad099e572238f80412 100644 (file)
@@ -15176,13 +15176,6 @@ W:     http://www.linux-usb.org/usbnet
 S:     Maintained
 F:     drivers/net/usb/dm9601.c
 
-USB DIAMOND RIO500 DRIVER
-M:     Cesar Miquel <miquel@df.uba.ar>
-L:     rio500-users@lists.sourceforge.net
-W:     http://rio500.sourceforge.net
-S:     Maintained
-F:     drivers/usb/misc/rio500*
-
 USB EHCI DRIVER
 M:     Alan Stern <stern@rowland.harvard.edu>
 L:     linux-usb@vger.kernel.org
index 38f2150457fddee2490d2c15fa0970145c940332..89cd3e6360945846c676664cd5486f553454bf8c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 19
-SUBLEVEL = 59
-EXTRAVERSION =
+SUBLEVEL = 81
+EXTRAVERSION = -rc1
 NAME = "People's Front"
 
 # *DOCUMENTATION*
@@ -430,6 +430,7 @@ KBUILD_CFLAGS_MODULE  := -DMODULE
 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
 KBUILD_LDFLAGS :=
 GCC_PLUGINS_CFLAGS :=
+CLANG_FLAGS :=
 
 export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
 export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
@@ -482,7 +483,7 @@ endif
 
 ifeq ($(cc-name),clang)
 ifneq ($(CROSS_COMPILE),)
-CLANG_FLAGS    := --target=$(notdir $(CROSS_COMPILE:%-=%))
+CLANG_FLAGS    += --target=$(notdir $(CROSS_COMPILE:%-=%))
 GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
 CLANG_FLAGS    += --prefix=$(GCC_TOOLCHAIN_DIR)
 GCC_TOOLCHAIN  := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
@@ -491,6 +492,7 @@ ifneq ($(GCC_TOOLCHAIN),)
 CLANG_FLAGS    += --gcc-toolchain=$(GCC_TOOLCHAIN)
 endif
 CLANG_FLAGS    += -no-integrated-as
+CLANG_FLAGS    += -Werror=unknown-warning-option
 KBUILD_CFLAGS  += $(CLANG_FLAGS)
 KBUILD_AFLAGS  += $(CLANG_FLAGS)
 export CLANG_FLAGS
index 74953e76a57d581a795227d0570974f217161e05..0cce54182cc578119084c96f1dab8c9158bfc96c 100644 (file)
@@ -199,7 +199,6 @@ config NR_CPUS
 
 config ARC_SMP_HALT_ON_RESET
        bool "Enable Halt-on-reset boot mode"
-       default y if ARC_UBOOT_SUPPORT
        help
          In SMP configuration cores can be configured as Halt-on-reset
          or they could all start at same time. For Halt-on-reset, non
@@ -539,18 +538,6 @@ config ARC_DBG_TLB_PARANOIA
 
 endif
 
-config ARC_UBOOT_SUPPORT
-       bool "Support uboot arg Handling"
-       default n
-       help
-         ARC Linux by default checks for uboot provided args as pointers to
-         external cmdline or DTB. This however breaks in absence of uboot,
-         when booting from Metaware debugger directly, as the registers are
-         not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus
-         registers look like uboot args to kernel which then chokes.
-         So only enable the uboot arg checking/processing if users are sure
-         of uboot being in play.
-
 config ARC_BUILTIN_DTB_NAME
        string "Built in DTB"
        help
index 6e84060e7c90a2cbba081a46f87ab607aee1d22e..621f59407d7693057f642d64cfe31dbdb7cd7d9d 100644 (file)
@@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5
 # CONFIG_ARC_HAS_LLSC is not set
 CONFIG_ARC_KVADDR_SIZE=402
 CONFIG_ARC_EMUL_UNALIGNED=y
-CONFIG_ARC_UBOOT_SUPPORT=y
 CONFIG_PREEMPT=y
 CONFIG_NET=y
 CONFIG_UNIX=y
index 1e59a2e9c602fa2736cfc0d6fdd439b07a11105b..e447ace6fa1cab14f00f6ebadb7b15d2812c616a 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARC_PLAT_AXS10X=y
 CONFIG_AXS103=y
 CONFIG_ISA_ARCV2=y
-CONFIG_ARC_UBOOT_SUPPORT=y
 CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
 CONFIG_PREEMPT=y
 CONFIG_NET=y
index b5c3f6c54b032d2a84510737272cacbe1ec89b1c..c82cdb10aaf4fba577b43188809a395298ee3c5e 100644 (file)
@@ -15,8 +15,6 @@ CONFIG_AXS103=y
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
 # CONFIG_ARC_TIMERS_64BIT is not set
-# CONFIG_ARC_SMP_HALT_ON_RESET is not set
-CONFIG_ARC_UBOOT_SUPPORT=y
 CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
 CONFIG_PREEMPT=y
 CONFIG_NET=y
index 208bf2c9e7b0d98b97e1778addd3bfea3e3424ce..a72bbda2f7aad0099860ef3f5e99af12722defae 100644 (file)
@@ -100,7 +100,6 @@ ENTRY(stext)
        st.ab   0, [r5, 4]
 1:
 
-#ifdef CONFIG_ARC_UBOOT_SUPPORT
        ; Uboot - kernel ABI
        ;    r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
        ;    r1 = magic number (always zero as of now)
@@ -109,7 +108,6 @@ ENTRY(stext)
        st      r0, [@uboot_tag]
        st      r1, [@uboot_magic]
        st      r2, [@uboot_arg]
-#endif
 
        ; setup "current" tsk and optionally cache it in dedicated r25
        mov     r9, @init_task
index a1218937abd68ac1ba3905762cde0be5a1bcf027..89c97dcfa3602b3f5e1de180c9a9b7f47a76f23b 100644 (file)
@@ -493,7 +493,6 @@ void __init handle_uboot_args(void)
        bool use_embedded_dtb = true;
        bool append_cmdline = false;
 
-#ifdef CONFIG_ARC_UBOOT_SUPPORT
        /* check that we know this tag */
        if (uboot_tag != UBOOT_TAG_NONE &&
            uboot_tag != UBOOT_TAG_CMDLINE &&
@@ -525,7 +524,6 @@ void __init handle_uboot_args(void)
                append_cmdline = true;
 
 ignore_uboot_args:
-#endif
 
        if (use_embedded_dtb) {
                machine_desc = setup_machine_fdt(__dtb_start);
index 5c6663321e873541339d9d0c72f55cfb178efee4..215f515442e03d53ee3a18ade4c62e2a06987b3b 100644 (file)
@@ -179,6 +179,12 @@ void show_regs(struct pt_regs *regs)
        struct task_struct *tsk = current;
        struct callee_regs *cregs;
 
+       /*
+        * generic code calls us with preemption disabled, but some calls
+        * here could sleep, so re-enable to avoid lockdep splat
+        */
+       preempt_enable();
+
        print_task_path_n_nm(tsk);
        show_regs_print_info(KERN_INFO);
 
@@ -221,6 +227,8 @@ void show_regs(struct pt_regs *regs)
        cregs = (struct callee_regs *)current->thread.callee_reg;
        if (cregs)
                show_callee_regs(cregs);
+
+       preempt_disable();
 }
 
 void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
index 183391d4d33a4138d04418da4b90d61efe38c6c4..9cf2ee8b434937e14e03f4ef29090d56dd0cbdbb 100644 (file)
@@ -185,11 +185,6 @@ static void *__init unw_hdr_alloc_early(unsigned long sz)
                                       MAX_DMA_ADDRESS);
 }
 
-static void *unw_hdr_alloc(unsigned long sz)
-{
-       return kmalloc(sz, GFP_KERNEL);
-}
-
 static void init_unwind_table(struct unwind_table *table, const char *name,
                              const void *core_start, unsigned long core_size,
                              const void *init_start, unsigned long init_size,
@@ -370,6 +365,10 @@ ret_err:
 }
 
 #ifdef CONFIG_MODULES
+static void *unw_hdr_alloc(unsigned long sz)
+{
+       return kmalloc(sz, GFP_KERNEL);
+}
 
 static struct unwind_table *last_table;
 
index db6913094be3c9b2bc9ad87e91f7aab779da728d..4e8143de32e70107e733ae197c095c27ac443423 100644 (file)
@@ -66,14 +66,12 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
        struct vm_area_struct *vma = NULL;
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
-       siginfo_t info;
+       int si_code = SEGV_MAPERR;
        int ret;
        vm_fault_t fault;
        int write = regs->ecr_cause & ECR_C_PROTV_STORE;  /* ST/EX */
        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
-       clear_siginfo(&info);
-
        /*
         * We fault-in kernel-space virtual memory on-demand. The
         * 'reference' page table is init_mm.pgd.
@@ -83,16 +81,14 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
         * only copy the information from the master page table,
         * nothing more.
         */
-       if (address >= VMALLOC_START) {
+       if (address >= VMALLOC_START && !user_mode(regs)) {
                ret = handle_kernel_vaddr_fault(address);
                if (unlikely(ret))
-                       goto bad_area_nosemaphore;
+                       goto no_context;
                else
                        return;
        }
 
-       info.si_code = SEGV_MAPERR;
-
        /*
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
@@ -119,7 +115,7 @@ retry:
         * we can handle it..
         */
 good_area:
-       info.si_code = SEGV_ACCERR;
+       si_code = SEGV_ACCERR;
 
        /* Handle protection violation, execute on heap or stack */
 
@@ -143,12 +139,17 @@ good_area:
         */
        fault = handle_mm_fault(vma, address, flags);
 
-       /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
        if (unlikely(fatal_signal_pending(current))) {
-               if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
-                       up_read(&mm->mmap_sem);
-               if (user_mode(regs))
+
+               /*
+                * if fault retry, mmap_sem already relinquished by core mm
+                * so OK to return to user mode (with signal handled first)
+                */
+               if (fault & VM_FAULT_RETRY) {
+                       if (!user_mode(regs))
+                               goto no_context;
                        return;
+               }
        }
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
@@ -195,15 +196,10 @@ good_area:
 bad_area:
        up_read(&mm->mmap_sem);
 
-bad_area_nosemaphore:
        /* User mode accesses just cause a SIGSEGV */
        if (user_mode(regs)) {
                tsk->thread.fault_address = address;
-               info.si_signo = SIGSEGV;
-               info.si_errno = 0;
-               /* info.si_code has been set above */
-               info.si_addr = (void __user *)address;
-               force_sig_info(SIGSEGV, &info, tsk);
+               force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk);
                return;
        }
 
@@ -238,9 +234,5 @@ do_sigbus:
                goto no_context;
 
        tsk->thread.fault_address = address;
-       info.si_signo = SIGBUS;
-       info.si_errno = 0;
-       info.si_code = BUS_ADRERR;
-       info.si_addr = (void __user *)address;
-       force_sig_info(SIGBUS, &info, tsk);
+       force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk);
 }
index 51794c7fa6d5bf18bb83296149756733cb68dd84..185e552f1461057b697a0de07539628cacc13dd0 100644 (file)
@@ -1586,8 +1586,9 @@ config ARM_PATCH_IDIV
          code to do integer division.
 
 config AEABI
-       bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K
-       default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K
+       bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \
+               !CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG
+       default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG
        help
          This option allows for the kernel to be compiled using the latest
          ARM ABI (aka EABI).  This is only useful if you are using a user
index 74ce471c82be30117e52600e1cfb8d5ede132714..51845368fdaa82e9f89bb2f1fbc0191b5a4b7730 100644 (file)
        reset-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
        reset-delay-us = <2>;   /* PHY datasheet states 1uS min */
 };
+
+#include "dra7-ipu-common-early-boot.dtsi"
index b1213c7ae310ebafec3c581a60d298fe28a6a8d0..25ced01da8ac8368c510c27adf4d633c51ba9983 100644 (file)
 &vip2 {
        status = "okay";
 };
+
+#include "dra7-ipu-common-early-boot.dtsi"
index d7a48e2ea91533e972aac11026157317e881a860..09f123c939bf96985419ffa4ad4ca31f5885fad6 100644 (file)
        status = "okay";
        memory-region = <&dsp2_memory_region>;
 };
+
+#include "dra7-ipu-common-early-boot.dtsi"
index 36efe410dcd71a54ae6f802e405fb9b60296b58f..9e33c41f541125f1d5d4a497f0f7ef06bd1d8e06 100644 (file)
        };
 
        mdio-bus-mux {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
                /* BIT(9) = 1 => external mdio */
                mdio_ext: mdio@200 {
                        reg = <0x200>;
index ed1af942d860ad1d62e54bb4b76cbbb838b6aa15..86f100daedd34f4e2a5e588e990078d6ffc7edfe 100644 (file)
                        reusable;
                        status = "okay";
                };
+
+               gpu_memory_region: gpu-memory@c0000000 {
+                       compatible = "shared-dma-pool";
+                       reg = <0x0 0xc0000000 0x0 0x10000000>;
+                       reusable;
+                       status = "okay";
+               };
        };
 
        evm_3v3_sd: fixedregulator-sd {
        memory-region = <&dsp2_memory_region>;
 };
 
+&gpu {
+       memory-region = <&gpu_memory_region>;
+};
+
 &bb2d {
        status = "okay";
 };
+
+#include "dra7-ipu-common-early-boot.dtsi"
diff --git a/arch/arm/boot/dts/dra7-ipu-common-early-boot.dtsi b/arch/arm/boot/dts/dra7-ipu-common-early-boot.dtsi
new file mode 100644 (file)
index 0000000..9cad6c8
--- /dev/null
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Common dtsi file that needs to be included in corresponding TI DRA7xx
+ * and AM57xx board dts files that have the IPU1 _and_ IPU2 remote processors
+ * booted early from TI U-Boot/SPL.
+ */
+
+/ {
+       reserved-memory {
+               mmu-early-page-tables@95700000 {
+                       /* address need to match the usage within U-Boot */
+                       reg = <0x0 0x95700000 0x0 0x100000>;
+                       no-map;
+               };
+       };
+};
+
+/* IPU2 */
+&timer3 {
+       ti,no-idle-on-init;
+       ti,no-reset-on-init;
+};
+
+&timer4 {
+       ti,no-idle-on-init;
+       ti,no-reset-on-init;
+};
+
+&timer9 {
+       ti,no-idle-on-init;
+       ti,no-reset-on-init;
+};
+
+&mmu_ipu2{
+       ti,no-idle-on-init;
+       ti,no-reset-on-init;
+};
+
+&ipu2 {
+       ti,no-idle-on-init;
+       ti,no-reset-on-init;
+};
+
+/* IPU1 */
+&timer11 {
+       ti,no-idle-on-init;
+       ti,no-reset-on-init;
+};
+
+&timer7 {
+       ti,no-idle-on-init;
+       ti,no-reset-on-init;
+};
+
+&timer8 {
+       ti,no-idle-on-init;
+       ti,no-reset-on-init;
+};
+
+&mmu_ipu1{
+       ti,no-idle-on-init;
+       ti,no-reset-on-init;
+};
+
+&ipu1 {
+       ti,no-idle-on-init;
+       ti,no-reset-on-init;
+};
index b61c55176bcf0f19513810b49b3fe79f472cee9b..3c32394e04a5157307507683c36b1e6b6a3bdc8a 100644 (file)
                                regulator-name = "lp8733-ldo0";
                                regulator-min-microvolt = <3300000>;
                                regulator-max-microvolt = <3300000>;
+                               regulator-boot-on;
+                               regulator-always-on;
                        };
 
                        lp8733_ldo1_reg: ldo1 {
        status = "okay";
        memory-region = <&dsp1_memory_region>;
 };
+
+#include "dra7-ipu-common-early-boot.dtsi"
index c665ca1b9e75e94396d793a4268ff15498727125..bd0a4421b620b2ac6bf8c2497e5bac7a8b904e6b 100644 (file)
                #size-cells = <2>;
                ranges;
 
-               ipu2_cma_pool: ipu2_cma@95800000 {
+               ipu2_memory_region: ipu2-memory@95800000 {
                        compatible = "shared-dma-pool";
                        reg = <0x0 0x95800000 0x0 0x3800000>;
                        reusable;
                        status = "okay";
                };
 
-               dsp1_cma_pool: dsp1_cma@99000000 {
+               dsp1_memory_region: dsp1-memory@99000000 {
                        compatible = "shared-dma-pool";
                        reg = <0x0 0x99000000 0x0 0x4000000>;
                        reusable;
                        status = "okay";
                };
 
-               ipu1_cma_pool: ipu1_cma@9d000000 {
+               ipu1_memory_region: ipu1-memory@9d000000 {
                        compatible = "shared-dma-pool";
                        reg = <0x0 0x9d000000 0x0 0x2000000>;
                        reusable;
 
 &ipu2 {
        status = "okay";
-       memory-region = <&ipu2_cma_pool>;
+       memory-region = <&ipu2_memory_region>;
 };
 
 &ipu1 {
        status = "okay";
-       memory-region = <&ipu1_cma_pool>;
+       memory-region = <&ipu1_memory_region>;
 };
 
 &dsp1 {
        status = "okay";
-       memory-region = <&dsp1_cma_pool>;
+       memory-region = <&dsp1_memory_region>;
 };
+
+#include "dra7-ipu-common-early-boot.dtsi"
index d7c06f7020a556518d8e6d891262002cdbe04c29..e4d3266c71ff0e52832d003044c5d03de77fd38f 100644 (file)
        status = "okay";
        memory-region = <&dsp1_memory_region>;
 };
+
+#include "dra7-ipu-common-early-boot.dtsi"
index 29a0cba18e84f206ffb147542b2f70380454bc25..bf0946fdb13be5232f14c902dc8bee72cc09cf14 100644 (file)
                #size-cells = <2>;
                ranges;
 
-               ipu2_cma_pool: ipu2_cma@95800000 {
+               ipu2_memory_region: ipu2-memory@95800000 {
                        compatible = "shared-dma-pool";
                        reg = <0x0 0x95800000 0x0 0x3800000>;
                        reusable;
                        status = "okay";
                };
 
-               dsp1_cma_pool: dsp1_cma@99000000 {
+               dsp1_memory_region: dsp1-memory@99000000 {
                        compatible = "shared-dma-pool";
                        reg = <0x0 0x99000000 0x0 0x4000000>;
                        reusable;
                        status = "okay";
                };
 
-               ipu1_cma_pool: ipu1_cma@9d000000 {
+               ipu1_memory_region: ipu1-memory@9d000000 {
                        compatible = "shared-dma-pool";
                        reg = <0x0 0x9d000000 0x0 0x2000000>;
                        reusable;
                        status = "okay";
                };
 
-               dsp2_cma_pool: dsp2_cma@9f000000 {
+               dsp2_memory_region: dsp2-memory@9f000000 {
                        compatible = "shared-dma-pool";
                        reg = <0x0 0x9f000000 0x0 0x800000>;
                        reusable;
                        status = "okay";
                };
+
+               gpu_memory_region: gpu-memory@c0000000 {
+                       compatible = "shared-dma-pool";
+                       reg = <0x0 0xc0000000 0x0 0x10000000>;
+                       reusable;
+                       status = "okay";
+               };
        };
 
        vsys_12v0: fixedregulator-vsys12v0 {
 
 &ipu2 {
        status = "okay";
-       memory-region = <&ipu2_cma_pool>;
+       memory-region = <&ipu2_memory_region>;
 };
 
 &ipu1 {
        status = "okay";
-       memory-region = <&ipu1_cma_pool>;
+       memory-region = <&ipu1_memory_region>;
 };
 
 &dsp1 {
        status = "okay";
-       memory-region = <&dsp1_cma_pool>;
+       memory-region = <&dsp1_memory_region>;
 };
 
 &dsp2 {
        status = "okay";
-       memory-region = <&dsp2_cma_pool>;
+       memory-region = <&dsp2_memory_region>;
+};
+
+&gpu {
+       memory-region = <&gpu_memory_region>;
 };
+
+#include "dra7-ipu-common-early-boot.dtsi"
index 57c2332bf28247b354592c0dbe839fa5a8ed6560..25bdc9d97a4dfde6ffa6a3edd9e06d4902f16ae3 100644 (file)
                                regulator-name = "vdd_ldo10";
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
+                               regulator-always-on;
                                regulator-state-mem {
                                        regulator-off-in-suspend;
                                };
index d80ab9085da19330b877643345e24cb50439d89c..7989631b39ccf506333bb9db6f157ab3e1327483 100644 (file)
                                regulator-name = "vdd_ldo10";
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
+                               regulator-always-on;
                                regulator-state-mem {
                                        regulator-off-in-suspend;
                                };
index 502a361d1fe90d508abe60c337eba5c5c9d50764..15d6157b661dbbbfd3d14d7c6e342a53078b7cb0 100644 (file)
@@ -65,7 +65,7 @@
                gpio-miso = <&gpio1 8 GPIO_ACTIVE_HIGH>;
                gpio-mosi = <&gpio1 7 GPIO_ACTIVE_HIGH>;
                /* Collides with pflash CE1, not so cool */
-               cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+               cs-gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
                num-chipselects = <1>;
 
                panel: display@0 {
index d1329322b968540825459b040c96006b9f21eea8..361dccd6c7eeeecce84399a7553c6b43d4089110 100644 (file)
@@ -11,7 +11,7 @@
 
 / {
        model = "D-Link DNS-313 1-Bay Network Storage Enclosure";
-       compatible = "dlink,dir-313", "cortina,gemini";
+       compatible = "dlink,dns-313", "cortina,gemini";
        #address-cells = <1>;
        #size-cells = <1>;
 
index 2366f093cc76d822426fcff6f95fd0f7eb8bd999..336cdead3da54cd137eeaa45897685b489ebb5a0 100644 (file)
                        pwm1: pwm@2080000 {
                                compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
                                reg = <0x02080000 0x4000>;
-                               interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6UL_CLK_PWM1>,
                                         <&clks IMX6UL_CLK_PWM1>;
                                clock-names = "ipg", "per";
                        pwm2: pwm@2084000 {
                                compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
                                reg = <0x02084000 0x4000>;
-                               interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6UL_CLK_PWM2>,
                                         <&clks IMX6UL_CLK_PWM2>;
                                clock-names = "ipg", "per";
                        pwm3: pwm@2088000 {
                                compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
                                reg = <0x02088000 0x4000>;
-                               interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6UL_CLK_PWM3>,
                                         <&clks IMX6UL_CLK_PWM3>;
                                clock-names = "ipg", "per";
                        pwm4: pwm@208c000 {
                                compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
                                reg = <0x0208c000 0x4000>;
-                               interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6UL_CLK_PWM4>,
                                         <&clks IMX6UL_CLK_PWM4>;
                                clock-names = "ipg", "per";
index 895fbde4d4333a3d5f37397feac66163911e4833..c1ed83131b495b7f2811c03f783a086ec48c7800 100644 (file)
        vmmc-supply = <&reg_module_3v3>;
        vqmmc-supply = <&reg_DCDC3>;
        non-removable;
+       sdhci-caps-mask = <0x80000000 0x0>;
 };
 
 &iomuxc {
index 8bf365d28cacfd3731648922e705b011cb81b7b1..584418f517a88228053a65c63a90758d891f3817 100644 (file)
@@ -43,7 +43,7 @@
                          <&clks IMX7D_ENET1_TIME_ROOT_CLK>;
        assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
        assigned-clock-rates = <0>, <100000000>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        phy-handle = <&ethphy0>;
        fsl,magic-packet;
        status = "okay";
@@ -69,7 +69,7 @@
                          <&clks IMX7D_ENET2_TIME_ROOT_CLK>;
        assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
        assigned-clock-rates = <0>, <100000000>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        phy-handle = <&ethphy1>;
        fsl,magic-packet;
        status = "okay";
index 78db67337ed4a3ce90a8962f183444296c27fc53..54d056b01bb514ce84420ec9e918fa67b88c2081 100644 (file)
                        #address-cells = <3>;
                        #size-cells = <2>;
 
-                       ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000
-                                 0x82000000 0 0x48000000 0x48000000 0 0x10000000>;
+                       ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>,
+                                <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>;
 
-                       interrupts = <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>;
+                       interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "msi";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
index 1e0158acf895d99f8bde234891011e019e172a92..a593d0a998fc8bcb7cbf75960af21b4a2bac44ca 100644 (file)
        };
 };
 
-&emmc {
-       /delete-property/mmc-hs200-1_8v;
-};
-
 &i2c2 {
        status = "disabled";
 };
index f95d0c5fcf71263f044cb84a7efd6599878895be..6e8946052c78b12d688d4ec4a7621576f0cb4ab7 100644 (file)
        pwm-off-delay-ms = <200>;
 };
 
-&emmc {
-       /delete-property/mmc-hs200-1_8v;
-};
-
 &gpio_keys {
        pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
 
index c706adf4aed2f893e12805b899ac43a7ce91d0f6..440d6783faca55ad6073bfd2fdaae5fb3a2c9957 100644 (file)
                             <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
                             <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
                clock-frequency = <24000000>;
+               arm,no-tick-in-suspend;
        };
 
        timer: timer@ff810000 {
index 5ae5b5228467adb793e4a847dbbec838c4537300..ef484c4cfd1a252f30aba80e459beacf01ecb199 100644 (file)
@@ -91,7 +91,6 @@ CONFIG_USB_SERIAL_PL2303=m
 CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
 CONFIG_EXT2_FS=m
 CONFIG_EXT3_FS=m
 CONFIG_MSDOS_FS=y
index 09e1672777c9b0e76ea42acde31cb7e011e159ff..0ba8df0d48b9702a9d89a226bc820b1de939733e 100644 (file)
@@ -197,7 +197,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
index 6bb506edb1f53a4867e810b8d86e53957aa79ddb..cc63d09a1f86693dc63455c0d51daa1ada4bd923 100644 (file)
@@ -588,7 +588,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
index 2afb359f3168d50d2fb2a7ed47bf3f515ff3e7af..bd71d5bf98c911ed391adb01437b6bd97573931e 100644 (file)
@@ -334,7 +334,6 @@ CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
 CONFIG_USB_ADUTUX=m
 CONFIG_USB_SEVSEG=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYPRESS_CY7C63=m
index 9ea82c118661b179dea1284ac55b7ac7fde12d1e..3aff4ca2a94e2c6ba9683db626c6591506a0318b 100644 (file)
@@ -191,7 +191,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
index 8436f6ade57dd145a717c11aa5207854a68c25a0..b16cfe99d35f2723c4cd3752f2aa64d36c0207a6 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/xen/hypervisor.h>
 
 extern const struct dma_map_ops arm_dma_ops;
+extern const struct dma_map_ops arm_dma_m_ops;
 extern const struct dma_map_ops arm_coherent_dma_ops;
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
index fd6cde23bb5d0dd4f5080dbe1845ead9e446ac80..871fa50a09f19ef4b291d7609ff911f297494f91 100644 (file)
@@ -658,13 +658,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 }
 
 static void reset_coproc_regs(struct kvm_vcpu *vcpu,
-                             const struct coproc_reg *table, size_t num)
+                             const struct coproc_reg *table, size_t num,
+                             unsigned long *bmap)
 {
        unsigned long i;
 
        for (i = 0; i < num; i++)
-               if (table[i].reset)
+               if (table[i].reset) {
+                       int reg = table[i].reg;
+
                        table[i].reset(vcpu, &table[i]);
+                       if (reg > 0 && reg < NR_CP15_REGS) {
+                               set_bit(reg, bmap);
+                               if (table[i].is_64bit)
+                                       set_bit(reg + 1, bmap);
+                       }
+               }
 }
 
 static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
@@ -1439,17 +1448,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
 {
        size_t num;
        const struct coproc_reg *table;
-
-       /* Catch someone adding a register without putting in reset entry. */
-       memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
+       DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
 
        /* Generic chip reset first (so target could override). */
-       reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
+       reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
 
        table = get_target_table(vcpu->arch.target, &num);
-       reset_coproc_regs(vcpu, table, num);
+       reset_coproc_regs(vcpu, table, num, bmap);
 
        for (num = 1; num < NR_CP15_REGS; num++)
-               WARN(vcpu_cp15(vcpu, num) == 0x42424242,
+               WARN(!test_bit(num, bmap),
                     "Didn't reset vcpu_cp15(vcpu, %zi)", num);
 }
index 3c42bf9fa061875145038937dbfe732728370f9d..708931b470909fa12a8e6a3c0877321cb614c81c 100644 (file)
@@ -704,6 +704,46 @@ static struct resource da8xx_gpio_resources[] = {
        },
        { /* interrupt */
                .start  = IRQ_DA8XX_GPIO0,
+               .end    = IRQ_DA8XX_GPIO0,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO1,
+               .end    = IRQ_DA8XX_GPIO1,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO2,
+               .end    = IRQ_DA8XX_GPIO2,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO3,
+               .end    = IRQ_DA8XX_GPIO3,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO4,
+               .end    = IRQ_DA8XX_GPIO4,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO5,
+               .end    = IRQ_DA8XX_GPIO5,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO6,
+               .end    = IRQ_DA8XX_GPIO6,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO7,
+               .end    = IRQ_DA8XX_GPIO7,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO8,
                .end    = IRQ_DA8XX_GPIO8,
                .flags  = IORESOURCE_IRQ,
        },
index 9f7d38d12c8886134a0b4d149b6593bf228efc4a..2b0f5d97ab7c1acd694c0edab73120ff408f7338 100644 (file)
@@ -548,6 +548,36 @@ static struct resource dm355_gpio_resources[] = {
        },
        {       /* interrupt */
                .start  = IRQ_DM355_GPIOBNK0,
+               .end    = IRQ_DM355_GPIOBNK0,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM355_GPIOBNK1,
+               .end    = IRQ_DM355_GPIOBNK1,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM355_GPIOBNK2,
+               .end    = IRQ_DM355_GPIOBNK2,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM355_GPIOBNK3,
+               .end    = IRQ_DM355_GPIOBNK3,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM355_GPIOBNK4,
+               .end    = IRQ_DM355_GPIOBNK4,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM355_GPIOBNK5,
+               .end    = IRQ_DM355_GPIOBNK5,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM355_GPIOBNK6,
                .end    = IRQ_DM355_GPIOBNK6,
                .flags  = IORESOURCE_IRQ,
        },
index abcf2a5ed89b5e4780430911b1ef584711cf0142..42665914166a318e50fd741c60cea0a7c6e9a8a0 100644 (file)
@@ -267,6 +267,41 @@ static struct resource dm365_gpio_resources[] = {
        },
        {       /* interrupt */
                .start  = IRQ_DM365_GPIO0,
+               .end    = IRQ_DM365_GPIO0,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO1,
+               .end    = IRQ_DM365_GPIO1,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO2,
+               .end    = IRQ_DM365_GPIO2,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO3,
+               .end    = IRQ_DM365_GPIO3,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO4,
+               .end    = IRQ_DM365_GPIO4,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO5,
+               .end    = IRQ_DM365_GPIO5,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO6,
+               .end    = IRQ_DM365_GPIO6,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO7,
                .end    = IRQ_DM365_GPIO7,
                .flags  = IORESOURCE_IRQ,
        },
index 0720da7809a693eee06c22a80b0449a1cad06e17..de1ec6dc01e949cf1ee27fe89a375424f5f545f8 100644 (file)
@@ -492,6 +492,26 @@ static struct resource dm644_gpio_resources[] = {
        },
        {       /* interrupt */
                .start  = IRQ_GPIOBNK0,
+               .end    = IRQ_GPIOBNK0,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_GPIOBNK1,
+               .end    = IRQ_GPIOBNK1,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_GPIOBNK2,
+               .end    = IRQ_GPIOBNK2,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_GPIOBNK3,
+               .end    = IRQ_GPIOBNK3,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_GPIOBNK4,
                .end    = IRQ_GPIOBNK4,
                .flags  = IORESOURCE_IRQ,
        },
index 6bd2ed069d0d7491a28b5af9665838e030e41ed3..d9b93e2806d222e7959a9d2b626e192205e60c2d 100644 (file)
@@ -442,6 +442,16 @@ static struct resource dm646x_gpio_resources[] = {
        },
        {       /* interrupt */
                .start  = IRQ_DM646X_GPIOBNK0,
+               .end    = IRQ_DM646X_GPIOBNK0,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM646X_GPIOBNK1,
+               .end    = IRQ_DM646X_GPIOBNK1,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM646X_GPIOBNK2,
                .end    = IRQ_DM646X_GPIOBNK2,
                .flags  = IORESOURCE_IRQ,
        },
index cd350dee4df376a3452299df86ba53815b50649c..efcd400b2abb3a876d9b36a7918ff6d0d3bf93cd 100644 (file)
@@ -37,6 +37,7 @@
 #define DEEPSLEEP_SLEEPENABLE_BIT      BIT(31)
 
        .text
+       .arch   armv5te
 /*
  * Move DaVinci into deep sleep state
  *
index ddc27638ba2a5e7807b9a904df874c5e913ef812..017c792be0a076469fa9231039f6115e91a20ed6 100644 (file)
@@ -135,6 +135,8 @@ restart:
        orr r11, r11, r13                       @ mask all requested interrupts
        str r11, [r12, #OMAP1510_GPIO_INT_MASK]
 
+       str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts
+
        ands r10, r13, #KEYBRD_CLK_MASK         @ extract keyboard status - set?
        beq hksw                                @ no - try next source
 
@@ -142,7 +144,6 @@ restart:
        @@@@@@@@@@@@@@@@@@@@@@
        @ Keyboard clock FIQ mode interrupt handler
        @ r10 now contains KEYBRD_CLK_MASK, use it
-       str r10, [r12, #OMAP1510_GPIO_INT_STATUS]       @ ack the interrupt
        bic r11, r11, r10                               @ unmask it
        str r11, [r12, #OMAP1510_GPIO_INT_MASK]
 
index b0dc7ddf5877d70eeda21df28b331acdf99a4cdb..b8ba763fe10863293378ba15d17744e44fdeb589 100644 (file)
@@ -73,9 +73,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
                         * interrupts default to since commit 80ac93c27441
                         * requires interrupt already acked and unmasked.
                         */
-                       if (irq_chip->irq_ack)
-                               irq_chip->irq_ack(d);
-                       if (irq_chip->irq_unmask)
+                       if (!WARN_ON_ONCE(!irq_chip->irq_unmask))
                                irq_chip->irq_unmask(d);
                }
                for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
index b226c8aaf8b1c94c687cadbbad55c89e845cf5cd..7074cfd1ff413b57ded78cf2c4bc150ecef77775 100644 (file)
@@ -131,6 +131,9 @@ static int __init omap4_sram_init(void)
        struct device_node *np;
        struct gen_pool *sram_pool;
 
+       if (!soc_is_omap44xx() && !soc_is_omap54xx())
+               return 0;
+
        np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
        if (!np)
                pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
index 946768eb197e264c98c1e00355c74ca3e898742a..2ee93d5e6fc463f53f6f4cd1a356c21e6702965d 100644 (file)
@@ -2471,7 +2471,7 @@ static void _setup_iclk_autoidle(struct omap_hwmod *oh)
  */
 static int _setup_reset(struct omap_hwmod *oh)
 {
-       int r;
+       int r = 0;
 
        if (oh->_state != _HWMOD_STATE_INITIALIZED)
                return -EINVAL;
@@ -3498,6 +3498,7 @@ int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
        struct omap_hwmod_class *class;
        void __iomem *regs = NULL;
        unsigned long flags;
+       int ret = 0;
 
        sysc = kzalloc(sizeof(*sysc), GFP_KERNEL);
        if (!sysc)
@@ -3514,8 +3515,10 @@ int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
        if (!oh->_mpu_rt_va) {
                regs = ioremap(data->module_pa,
                               data->module_size);
-               if (!regs)
-                       return -ENOMEM;
+               if (!regs) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
        }
 
        /*
@@ -3523,8 +3526,10 @@ int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
         * may not yet have ioremapped their registers.
         */
        class = kmemdup(oh->class, sizeof(*oh->class), GFP_KERNEL);
-       if (!class)
-               return -ENOMEM;
+       if (!class) {
+               ret = -ENOMEM;
+               goto err;
+       }
 
        class->sysc = sysc;
 
@@ -3537,6 +3542,9 @@ int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
        spin_unlock_irqrestore(&oh->_lock, flags);
 
        return 0;
+err:
+       kfree(sysc);
+       return ret;
 }
 
 /**
@@ -3789,6 +3797,7 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
        struct omap_hwmod_ocp_if *oi;
        struct clockdomain *clkdm;
        struct clk_hw_omap *clk;
+       struct clk_hw *hw;
 
        if (!oh)
                return NULL;
@@ -3805,7 +3814,14 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
                c = oi->_clk;
        }
 
-       clk = to_clk_hw_omap(__clk_get_hw(c));
+       hw = __clk_get_hw(c);
+       if (!hw)
+               return NULL;
+
+       clk = to_clk_hw_omap(hw);
+       if (!clk)
+               return NULL;
+
        clkdm = clk->clkdm;
        if (!clkdm)
                return NULL;
index 186db132ac84ecee38ef601d6a7779dc9cceba5d..2919f38c69e9f3bf4674dbfa9dfeaac9e504c406 100644 (file)
@@ -947,7 +947,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
        .rev_offs       = 0x0000,
        .sysc_offs      = 0x0010,
        .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+       .sysc_flags     = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+                         SYSC_HAS_RESET_STATUS,
        .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
                          SIDLE_SMART_WKUP),
        .sysc_fields    = &omap_hwmod_sysc_type2,
index 6c43691b84db2efbf4ab3e58c0e6b342b8b9c708..63212585351f5af9c8bb2e2de13f3cba4a16418a 100644 (file)
@@ -539,7 +539,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
 static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
        .rev_offs       = 0x0,
        .sysc_offs      = 0x4,
-       .sysc_flags     = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET,
+       .sysc_flags     = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+                         SYSC_HAS_RESET_STATUS,
        .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
        .sysc_fields    = &omap_hwmod_sysc_type2,
 };
index a9fbf512ae0d0db72f978dcdcbabaf4f56201495..4d2e10826c84007ca7dabe14c7f896ec4e318ce1 100644 (file)
@@ -58,7 +58,18 @@ static struct gfx_sgx_platform_data sgx_pdata = {
 };
 #endif
 
+static bool __maybe_unused omap_device_is_enabled(struct platform_device *pdev)
+{
+       struct omap_device *od = to_omap_device(pdev);
+
+       if (od->_state == OMAP_DEVICE_STATE_ENABLED)
+               return true;
+       else
+               return false;
+}
+
 #if IS_ENABLED(CONFIG_OMAP_IOMMU)
+
 int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request,
                                    u8 *pwrst);
 #else
@@ -445,6 +456,7 @@ static void __init omap3_pandora_legacy_init(void)
 static struct omap_rproc_pdata omap4_ipu_dsp_pdata = {
        .device_enable = omap_rproc_device_enable,
        .device_shutdown = omap_rproc_device_shutdown,
+       .device_is_enabled = omap_device_is_enabled,
 };
 #endif
 
@@ -456,6 +468,7 @@ static struct iommu_platform_data omap4_iommu_pdata = {
        .deassert_reset = omap_device_deassert_hardreset,
        .device_enable = omap_device_enable,
        .device_idle = omap_device_idle,
+       .device_is_enabled = omap_device_is_enabled,
 };
 #endif
 
@@ -485,6 +498,7 @@ static struct iommu_platform_data dra7_ipu1_dsp_iommu_pdata = {
        .assert_reset = omap_device_assert_hardreset,
        .deassert_reset = omap_device_deassert_hardreset,
        .device_enable = omap_device_enable,
+       .device_is_enabled = omap_device_is_enabled,
        .device_idle = omap_device_idle,
        .set_pwrdm_constraint = omap_iommu_set_pwrdm_constraint,
 };
index ca03af8fe43ffc21a0252233e4cc2e1bb38cd771..ddf96adf65ab38ac13b957a0d70b57f621e6ad8e 100644 (file)
@@ -77,83 +77,6 @@ int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
        return 0;
 }
 
-/*
- * This API is to be called during init to set the various voltage
- * domains to the voltage as per the opp table. Typically we boot up
- * at the nominal voltage. So this function finds out the rate of
- * the clock associated with the voltage domain, finds out the correct
- * opp entry and sets the voltage domain to the voltage specified
- * in the opp entry
- */
-static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
-                                        const char *oh_name)
-{
-       struct voltagedomain *voltdm;
-       struct clk *clk;
-       struct dev_pm_opp *opp;
-       unsigned long freq, bootup_volt;
-       struct device *dev;
-
-       if (!vdd_name || !clk_name || !oh_name) {
-               pr_err("%s: invalid parameters\n", __func__);
-               goto exit;
-       }
-
-       if (!strncmp(oh_name, "mpu", 3))
-               /* 
-                * All current OMAPs share voltage rail and clock
-                * source, so CPU0 is used to represent the MPU-SS.
-                */
-               dev = get_cpu_device(0);
-       else
-               dev = omap_device_get_by_hwmod_name(oh_name);
-
-       if (IS_ERR(dev)) {
-               pr_err("%s: Unable to get dev pointer for hwmod %s\n",
-                       __func__, oh_name);
-               goto exit;
-       }
-
-       voltdm = voltdm_lookup(vdd_name);
-       if (!voltdm) {
-               pr_err("%s: unable to get vdd pointer for vdd_%s\n",
-                       __func__, vdd_name);
-               goto exit;
-       }
-
-       clk =  clk_get(NULL, clk_name);
-       if (IS_ERR(clk)) {
-               pr_err("%s: unable to get clk %s\n", __func__, clk_name);
-               goto exit;
-       }
-
-       freq = clk_get_rate(clk);
-       clk_put(clk);
-
-       opp = dev_pm_opp_find_freq_ceil(dev, &freq);
-       if (IS_ERR(opp)) {
-               pr_err("%s: unable to find boot up OPP for vdd_%s\n",
-                       __func__, vdd_name);
-               goto exit;
-       }
-
-       bootup_volt = dev_pm_opp_get_voltage(opp);
-       dev_pm_opp_put(opp);
-
-       if (!bootup_volt) {
-               pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
-                      __func__, vdd_name);
-               goto exit;
-       }
-
-       voltdm_scale(voltdm, bootup_volt);
-       return 0;
-
-exit:
-       pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name);
-       return -EINVAL;
-}
-
 #ifdef CONFIG_SUSPEND
 static int omap_pm_enter(suspend_state_t suspend_state)
 {
@@ -211,25 +134,6 @@ void omap_common_suspend_init(void *pm_suspend)
 }
 #endif /* CONFIG_SUSPEND */
 
-static void __init omap3_init_voltages(void)
-{
-       if (!soc_is_omap34xx())
-               return;
-
-       omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu");
-       omap2_set_init_voltage("core", "l3_ick", "l3_main");
-}
-
-static void __init omap4_init_voltages(void)
-{
-       if (!soc_is_omap44xx())
-               return;
-
-       omap2_set_init_voltage("mpu", "dpll_mpu_ck", "mpu");
-       omap2_set_init_voltage("core", "l3_div_ck", "l3_main_1");
-       omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva");
-}
-
 int __maybe_unused omap_pm_nop_init(void)
 {
        return 0;
@@ -249,10 +153,6 @@ int __init omap2_common_pm_late_init(void)
        omap4_twl_init();
        omap_voltage_late_init();
 
-       /* Initialize the voltages */
-       omap3_init_voltages();
-       omap4_init_voltages();
-
        /* Smartreflex device init */
        omap_devinit_smartreflex();
 
index 05858f966f7d9443f776fe2ee924c05e3e7b99a9..dfa65fc2c82bc14dbf69de2ccd27b9de9e83f037 100644 (file)
@@ -433,7 +433,7 @@ static void omap3_prm_reconfigure_io_chain(void)
  * registers, and omap3xxx_prm_reconfigure_io_chain() must be called.
  * No return value.
  */
-static void __init omap3xxx_prm_enable_io_wakeup(void)
+static void omap3xxx_prm_enable_io_wakeup(void)
 {
        if (prm_features & PRM_HAS_IO_WAKEUP)
                omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
index fb48f3141fb4d7cd2403aaece876ae338a308bf2..c4c96661eb89ae2d60ba3eecf4c84e17a401243f 100644 (file)
@@ -131,7 +131,7 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
        } while (1);
 
        idma->state = ~DMA_ST_AB;
-       disable_irq(irq);
+       disable_irq_nosync(irq);
 
        return IRQ_HANDLED;
 }
@@ -174,6 +174,9 @@ static void iomd_enable_dma(unsigned int chan, dma_t *dma)
                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
                }
 
+               idma->dma_addr = idma->dma.sg->dma_address;
+               idma->dma_len = idma->dma.sg->length;
+
                iomd_writeb(DMA_CR_C, dma_base + CR);
                idma->state = DMA_ST_AB;
        }
index caa6d5fe9078326ea65d29d8d6359e1a82306969..b296ada974091b4a8f3fc10be453034e807918ac 100644 (file)
@@ -65,7 +65,7 @@ int zynq_cpun_start(u32 address, int cpu)
                        * 0x4: Jump by mov instruction
                        * 0x8: Jumping address
                        */
-                       memcpy((__force void *)zero, &zynq_secondary_trampoline,
+                       memcpy_toio(zero, &zynq_secondary_trampoline,
                                                        trampoline_size);
                        writel(address, zero + trampoline_size);
 
index 8211cf45ece17f46762191cf45853db0998eba0f..c20602947ce4f027becf90c856ddae0fdc644656 100644 (file)
@@ -50,6 +50,7 @@ struct arm_dma_alloc_args {
        const void *caller;
        bool want_vaddr;
        int coherent_flag;
+       bool zero;
 };
 
 struct arm_dma_free_args {
@@ -203,6 +204,27 @@ const struct dma_map_ops arm_dma_ops = {
 };
 EXPORT_SYMBOL(arm_dma_ops);
 
+static void *arm_dma_malloc(struct device *dev, size_t size, dma_addr_t *handle,
+                           gfp_t gfp, unsigned long dma_attrs);
+
+const struct dma_map_ops arm_dma_m_ops = {
+       .alloc                  = arm_dma_malloc,
+       .free                   = arm_dma_free,
+       .mmap                   = arm_dma_mmap,
+       .get_sgtable            = arm_dma_get_sgtable,
+       .map_page               = arm_dma_map_page,
+       .unmap_page             = arm_dma_unmap_page,
+       .map_sg                 = arm_dma_map_sg,
+       .unmap_sg               = arm_dma_unmap_sg,
+       .sync_single_for_cpu    = arm_dma_sync_single_for_cpu,
+       .sync_single_for_device = arm_dma_sync_single_for_device,
+       .sync_sg_for_cpu        = arm_dma_sync_sg_for_cpu,
+       .sync_sg_for_device     = arm_dma_sync_sg_for_device,
+       .mapping_error          = arm_dma_mapping_error,
+       .dma_supported          = arm_dma_supported,
+};
+EXPORT_SYMBOL(arm_dma_m_ops);
+
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
        dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
@@ -356,7 +378,7 @@ static void __dma_free_buffer(struct page *page, size_t size)
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
                                     pgprot_t prot, struct page **ret_page,
                                     const void *caller, bool want_vaddr,
-                                    int coherent_flag, gfp_t gfp);
+                                    int coherent_flag, gfp_t gfp, bool zero);
 
 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
                                 pgprot_t prot, struct page **ret_page,
@@ -413,7 +435,7 @@ static int __init atomic_pool_init(void)
        if (dev_get_cma_area(NULL))
                ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
                                      &page, atomic_pool_init, true, NORMAL,
-                                     GFP_KERNEL);
+                                     GFP_KERNEL, true);
        else
                ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
                                           &page, atomic_pool_init, true);
@@ -587,7 +609,7 @@ static int __free_from_pool(void *start, size_t size)
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
                                     pgprot_t prot, struct page **ret_page,
                                     const void *caller, bool want_vaddr,
-                                    int coherent_flag, gfp_t gfp)
+                                    int coherent_flag, gfp_t gfp, bool zero)
 {
        unsigned long order = get_order(size);
        size_t count = size >> PAGE_SHIFT;
@@ -598,7 +620,8 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
        if (!page)
                return NULL;
 
-       __dma_clear_buffer(page, size, coherent_flag);
+       if (zero)
+               __dma_clear_buffer(page, size, coherent_flag);
 
        if (!want_vaddr)
                goto out;
@@ -675,7 +698,7 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
        return __alloc_from_contiguous(args->dev, args->size, args->prot,
                                       ret_page, args->caller,
                                       args->want_vaddr, args->coherent_flag,
-                                      args->gfp);
+                                      args->gfp, args->zero);
 }
 
 static void cma_allocator_free(struct arm_dma_free_args *args)
@@ -728,7 +751,7 @@ static struct arm_dma_allocator remap_allocator = {
 
 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                         gfp_t gfp, pgprot_t prot, bool is_coherent,
-                        unsigned long attrs, const void *caller)
+                        unsigned long attrs, const void *caller, bool zero)
 {
        u64 mask = get_coherent_dma_mask(dev);
        struct page *page = NULL;
@@ -743,6 +766,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                .caller = caller,
                .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
                .coherent_flag = is_coherent ? COHERENT : NORMAL,
+               .zero = zero,
        };
 
 #ifdef CONFIG_DMA_API_DEBUG
@@ -816,14 +840,27 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 
        return __dma_alloc(dev, size, handle, gfp, prot, false,
-                          attrs, __builtin_return_address(0));
+                          attrs, __builtin_return_address(0), true);
+}
+
+/*
+ * Same as arm_dma_alloc except don't zero memory on alloc
+ */
+void *arm_dma_malloc(struct device *dev, size_t size, dma_addr_t *handle,
+                    gfp_t gfp, unsigned long attrs)
+{
+       pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
+
+       return __dma_alloc(dev, size, handle, gfp, prot, false,
+                          attrs, __builtin_return_address(0),
+                          false);
 }
 
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
        dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
 {
        return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
-                          attrs, __builtin_return_address(0));
+                          attrs, __builtin_return_address(0), true);
 }
 
 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
index 3232afb6fdc00be7da29c521068d3ba08e08e500..a9ee0d9dc740a0582028a447d969d9ff0e39afbb 100644 (file)
@@ -216,7 +216,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
 {
        unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
 
-       if (fsr & FSR_WRITE)
+       if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
                mask = VM_WRITE;
        if (fsr & FSR_LNX_PF)
                mask = VM_EXEC;
@@ -287,7 +287,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
-       if (fsr & FSR_WRITE)
+       if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
                flags |= FAULT_FLAG_WRITE;
 
        /*
index c063708fa5032a5b4393be25ecfdc886d534aa31..9ecc2097a87a07e0c03bb5c915a1e175664db961 100644 (file)
@@ -6,6 +6,7 @@
  * Fault status register encodings.  We steal bit 31 for our own purposes.
  */
 #define FSR_LNX_PF             (1 << 31)
+#define FSR_CM                 (1 << 13)
 #define FSR_WRITE              (1 << 11)
 #define FSR_FS4                        (1 << 10)
 #define FSR_FS3_0              (15)
index 0cc8e04295a40dc1d16f308396afdfb7540aa48c..e1d330a269212e3b0176166a8b1be370ec01a3d8 100644 (file)
@@ -196,6 +196,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
 int pfn_valid(unsigned long pfn)
 {
+       phys_addr_t addr = __pfn_to_phys(pfn);
+
+       if (__phys_to_pfn(addr) != pfn)
+               return 0;
+
        return memblock_is_map_memory(__pfn_to_phys(pfn));
 }
 EXPORT_SYMBOL(pfn_valid);
@@ -713,7 +718,8 @@ static void update_sections_early(struct section_perm perms[], int n)
                if (t->flags & PF_KTHREAD)
                        continue;
                for_each_thread(t, s)
-                       set_section_perms(perms, n, true, s->mm);
+                       if (s->mm)
+                               set_section_perms(perms, n, true, s->mm);
        }
        set_section_perms(perms, n, true, current->active_mm);
        set_section_perms(perms, n, true, &init_mm);
index f866870db749c4bf2b0e5ff03f687cda5569e651..0b94b674aa91fa5b8ac994d143c96cece97f02c6 100644 (file)
@@ -18,8 +18,9 @@
         (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
 
 /* gap between mmap and stack */
-#define MIN_GAP (128*1024*1024UL)
-#define MAX_GAP ((TASK_SIZE)/6*5)
+#define MIN_GAP                (128*1024*1024UL)
+#define MAX_GAP                ((STACK_TOP)/6*5)
+#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
 
 static int mmap_is_legacy(struct rlimit *rlim_stack)
 {
@@ -35,13 +36,22 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 {
        unsigned long gap = rlim_stack->rlim_cur;
+       unsigned long pad = stack_guard_gap;
+
+       /* Account for stack randomization if necessary */
+       if (current->flags & PF_RANDOMIZE)
+               pad += (STACK_RND_MASK << PAGE_SHIFT);
+
+       /* Values close to RLIM_INFINITY can overflow. */
+       if (gap + pad > gap)
+               gap += pad;
 
        if (gap < MIN_GAP)
                gap = MIN_GAP;
        else if (gap > MAX_GAP)
                gap = MAX_GAP;
 
-       return PAGE_ALIGN(TASK_SIZE - gap - rnd);
+       return PAGE_ALIGN(STACK_TOP - gap - rnd);
 }
 
 /*
index e46a6a446cdd27126869bb97574d5bf51075e9e4..70e560cf8ca03dfff5a02b0726d8be9644c87f87 100644 (file)
@@ -1175,6 +1175,22 @@ void __init adjust_lowmem_bounds(void)
         */
        vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
 
+       /*
+        * The first usable region must be PMD aligned. Mark its start
+        * as MEMBLOCK_NOMAP if it isn't
+        */
+       for_each_memblock(memory, reg) {
+               if (!memblock_is_nomap(reg)) {
+                       if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
+                               phys_addr_t len;
+
+                               len = round_up(reg->base, PMD_SIZE) - reg->base;
+                               memblock_mark_nomap(reg->base, len);
+                       }
+                       break;
+               }
+       }
+
        for_each_memblock(memory, reg) {
                phys_addr_t block_start = reg->base;
                phys_addr_t block_end = reg->base + reg->size;
index ce42cc640a61a32b66ad84fdb8e2f82af976f4ef..71d85ff323f73d0e13d98ad5803287c1aba38e18 100644 (file)
@@ -62,6 +62,7 @@ void samsung_wdt_reset(void)
 #ifdef CONFIG_OF
 static const struct of_device_id s3c2410_wdt_match[] = {
        { .compatible = "samsung,s3c2410-wdt" },
+       { .compatible = "samsung,s3c6410-wdt" },
        {},
 };
 
index b4d78959cadf08df3cd3f983c162fdbe0dab46e3..bc9a37b3cecd6247c92c98c03221240898241bc8 100644 (file)
@@ -31,7 +31,9 @@ void __init xen_efi_runtime_setup(void)
        efi.get_variable             = xen_efi_get_variable;
        efi.get_next_variable        = xen_efi_get_next_variable;
        efi.set_variable             = xen_efi_set_variable;
+       efi.set_variable_nonblocking = xen_efi_set_variable;
        efi.query_variable_info      = xen_efi_query_variable_info;
+       efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
        efi.update_capsule           = xen_efi_update_capsule;
        efi.query_capsule_caps       = xen_efi_query_capsule_caps;
        efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
index 8790a29d0af43f8b1f34b20e10fad8586f550fb1..51fe21f5d0783745b70e38c6be86c0d30bdcba64 100644 (file)
@@ -84,6 +84,7 @@ config ARM64
        select GENERIC_CLOCKEVENTS
        select GENERIC_CLOCKEVENTS_BROADCAST
        select GENERIC_CPU_AUTOPROBE
+       select GENERIC_CPU_VULNERABILITIES
        select GENERIC_EARLY_IOREMAP
        select GENERIC_IDLE_POLL_SETUP
        select GENERIC_IRQ_MULTI_HANDLER
@@ -251,7 +252,8 @@ config GENERIC_CALIBRATE_DELAY
        def_bool y
 
 config ZONE_DMA32
-       def_bool y
+       bool "Support DMA32 zone" if EXPERT
+       default y
 
 config HAVE_GENERIC_GUP
        def_bool y
index 5089aa64088fcfe7e4f437248040969dd55a0e67..9a1ea8a464057a970c4eefbca99999e283043143 100644 (file)
                        tx-fifo-depth = <16384>;
                        rx-fifo-depth = <16384>;
                        snps,multicast-filter-bins = <256>;
+                       altr,sysmgr-syscon = <&sysmgr 0x44 0>;
                        status = "disabled";
                };
 
                        tx-fifo-depth = <16384>;
                        rx-fifo-depth = <16384>;
                        snps,multicast-filter-bins = <256>;
+                       altr,sysmgr-syscon = <&sysmgr 0x48 0>;
                        status = "disabled";
                };
 
                        tx-fifo-depth = <16384>;
                        rx-fifo-depth = <16384>;
                        snps,multicast-filter-bins = <256>;
+                       altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
                        status = "disabled";
                };
 
index 212e6634c9baa5173efd128eb9e37c28a6714468..7398ae8856dc0ecf425d8eaac7f753c1855800c5 100644 (file)
                        regulator-max-microvolt = <1320000>;
                        enable-gpios = <&pmic 6 GPIO_ACTIVE_HIGH>;
                        regulator-ramp-delay = <80>;
-                       regulator-enable-ramp-delay = <1000>;
+                       regulator-enable-ramp-delay = <2000>;
+                       regulator-settling-time-us = <160>;
                };
        };
 };
index 3be920efee823a2913f7695bbd09a4ca10f03eb8..6597c0894137a471546ab9a6b02d44ec04d32211 100644 (file)
                        compatible = "nvidia,tegra210-agic";
                        #interrupt-cells = <3>;
                        interrupt-controller;
-                       reg = <0x702f9000 0x2000>,
+                       reg = <0x702f9000 0x1000>,
                              <0x702fa000 0x2000>;
                        interrupts = <GIC_SPI 102 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
                        clocks = <&tegra_car TEGRA210_CLK_APE>;
index c142169a58fc5039c3b0c95bde31ec80c2c33b0d..e9147e35b7396d1aedaba08280e62ffb58c7e14e 100644 (file)
@@ -40,6 +40,7 @@
                pinctrl-0 = <&usb30_host_drv>;
                regulator-name = "vcc_host_5v";
                regulator-always-on;
+               regulator-boot-on;
                vin-supply = <&vcc_sys>;
        };
 
@@ -50,6 +51,7 @@
                pinctrl-0 = <&usb20_host_drv>;
                regulator-name = "vcc_host1_5v";
                regulator-always-on;
+               regulator-boot-on;
                vin-supply = <&vcc_sys>;
        };
 
index e065394360bbbf012ddabc678495f8873d1409c9..92186edefeb96e751ad25df2f2d2bdca5c2f7119 100644 (file)
                         <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
                clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
+               max-frequency = <150000000>;
                status = "disabled";
        };
 
                         <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
                clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
+               max-frequency = <150000000>;
                status = "disabled";
        };
 
                         <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
                clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
+               max-frequency = <150000000>;
                status = "disabled";
        };
 
index df7e62d9a670842d4f9a89ec114e7beddab921b2..cea44a7c7cf998566f2c9f99d3b400d1f11acc5a 100644 (file)
                reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>;
                interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
                interrupt-names = "isp0_mmu";
-               clocks = <&cru ACLK_ISP0_NOC>, <&cru HCLK_ISP0_NOC>;
+               clocks = <&cru ACLK_ISP0_WRAPPER>, <&cru HCLK_ISP0_WRAPPER>;
                clock-names = "aclk", "iface";
                #iommu-cells = <0>;
+               power-domains = <&power RK3399_PD_ISP0>;
                rockchip,disable-mmu-reset;
-               status = "disabled";
        };
 
        isp1_mmu: iommu@ff924000 {
                reg = <0x0 0xff924000 0x0 0x100>, <0x0 0xff925000 0x0 0x100>;
                interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH 0>;
                interrupt-names = "isp1_mmu";
-               clocks = <&cru ACLK_ISP1_NOC>, <&cru HCLK_ISP1_NOC>;
+               clocks = <&cru ACLK_ISP1_WRAPPER>, <&cru HCLK_ISP1_WRAPPER>;
                clock-names = "aclk", "iface";
                #iommu-cells = <0>;
+               power-domains = <&power RK3399_PD_ISP1>;
                rockchip,disable-mmu-reset;
-               status = "disabled";
        };
 
        hdmi_sound: hdmi-sound {
index be117c0ada131bea98f991be623767aa3cf8a9d7..ea9474ca5ff5b8479d0748ac43e6ac16f1868374 100644 (file)
@@ -25,7 +25,8 @@ dtb-$(CONFIG_ARCH_K3_J721E_SOC) += k3-j721e-common-proc-board.dtb \
                                   k3-j721e-proc-board-tps65917.dtb \
                                   k3-j721e-common-proc-board-infotainment.dtbo \
                                   k3-j721e-common-proc-board-infotainment-display-sharing.dtbo \
-                                  k3-j721e-common-proc-board-jailhouse.dtbo
+                                  k3-j721e-common-proc-board-jailhouse.dtbo \
+                                  k3-j721e-pcie-backplane.dtbo
 
 $(obj)/%.dtbo: $(src)/%.dtso FORCE
        $(call if_changed_dep,dtc)
index f86e18783205e106ebc79bae7087e5e9e28ed07b..2ebb59fa737d1ece2dd5e2ad57e7fa75cd85efc3 100644 (file)
@@ -66,7 +66,7 @@
                };
 
                rtos_ipc_memory_region: ipc-memories@a2000000 {
-                       reg = <0x00 0xa2000000 0x00 0x00100000>;
+                       reg = <0x00 0xa2000000 0x00 0x00200000>;
                        alignment = <0x1000>;
                        no-map;
                };
        bus-width = <8>;
        non-removable;
        ti,driver-strength-ohm = <50>;
+       disable-wp;
 };
 
 &sdhci1 {
        pinctrl-names = "default";
        pinctrl-0 = <&main_mmc1_pins_default>;
        ti,driver-strength-ohm = <50>;
+       disable-wp;
 };
 
 &gpu {
index 1328eb416d96a623d70390a07f3a2d47cbc5b9da..0bc4805ecfb2735562135efd774bccf8a967cde6 100644 (file)
        };
 
        dp0: connector {
-               compatible = "dp-connector"; /* No such binding exists yet.. */
+               compatible = "dp-connector";
+               label = "DP0";
 
                port {
                        dp_connector_in: endpoint {
-                       remote-endpoint = <&dp_bridge_output>;
+                               remote-endpoint = <&dp_bridge_output>;
                        };
                };
        };
                        ti,psd-size = <16>;
                };
        };
+
+       vdd_sd_dv_alt: gpio-regulator-TLV71033 {
+               compatible = "regulator-gpio";
+               pinctrl-names = "default";
+               pinctrl-0 = <&vdd_sd_dv_alt_pins_default>;
+               regulator-name = "tlv71033";
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <3300000>;
+               regulator-boot-on;
+               vin-supply = <&vsys_5v0>;
+               gpios = <&main_gpio0 117 GPIO_ACTIVE_HIGH>;
+               states = <1800000 0x0
+                         3300000 0x1>;
+       };
 };
 
 &wkup_pmx0 {
                        J721E_IOPAD(0x0, PIN_INPUT, 7) /* (AC18) EXTINTn.GPIO0_0 */
                >;
        };
+
+       vdd_sd_dv_alt_pins_default: vdd_sd_dv_alt_pins_default {
+               pinctrl-single,pins = <
+                       J721E_IOPAD(0x1d8, PIN_INPUT, 7) /* (W4) SPI1_CS1.GPIO0_117 */
+               >;
+       };
 };
 
 &dss {
        };
 };
 
+&serdes_wiz4 {
+       lane0-mode = <PHY_TYPE_DP>;
+       lane1-mode = <PHY_TYPE_DP>;
+       lane2-mode = <PHY_TYPE_DP>;
+       lane3-mode = <PHY_TYPE_DP>;
+};
+
+&mhdp {
+       status = "ok";
+       pinctrl-names = "default";
+       pinctrl-0 = <&dp0_pins_default>;
+};
+
+&dp0_ports {
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       port@0 {
+               reg = <0>;
+               dp_bridge_input: endpoint {
+                       remote-endpoint = <&dpi_out_real0>;
+               };
+       };
+
+       port@1 {
+               reg = <1>;
+               dp_bridge_output: endpoint {
+                       remote-endpoint = <&dp_connector_in>;
+               };
+       };
+};
+
 &main_i2c0 {
        pinctrl-names = "default";
        pinctrl-0 = <&main_i2c0_pins_default>;
        phy0: ethernet-phy@0 {
                reg = <0>;
                ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
-               ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
                ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
        };
 };
 
 &cpsw_port1 {
-       phy-mode = "rgmii-id";
+       phy-mode = "rgmii-rxid";
        phy-handle = <&phy0>;
 };
 
 
 &serdes_wiz3 {
        typec-dir-gpios = <&main_gpio1 3 GPIO_ACTIVE_HIGH>;
+       typec-dir-debounce = <300>;     /* TUSB321, tCCB_DEFAULT 133 ms */
+       lane0-mode = <PHY_TYPE_USB3>;
+       lane1-mode = <PHY_TYPE_USB3>;
 };
 
 &serdes3 {
        /* eMMC */
        non-removable;
        ti,driver-strength-ohm = <50>;
+       disable-wp;
 };
 
 &main_sdhci1 {
        /* SD/MMC */
        vmmc-supply = <&vdd_mmc1>;
+       vqmmc-supply = <&vdd_sd_dv_alt>;
        pinctrl-names = "default";
        pinctrl-0 = <&main_mmc1_pins_default>;
+       disable-wp;
 };
 
 &main_sdhci2 {
        status = "disabled";
 };
 
+&serdes_wiz0 {
+       lane0-mode = <PHY_TYPE_PCIE>;
+       lane1-mode = <PHY_TYPE_PCIE>;
+};
+
 &serdes0 {
        serdes0_pcie_link: link@0 {
                reg = <0>;
        };
 };
 
+&serdes_wiz1 {
+       lane0-mode = <PHY_TYPE_PCIE>;
+       lane1-mode = <PHY_TYPE_PCIE>;
+};
+
 &serdes1 {
        serdes1_pcie_link: link@0 {
                reg = <0>;
        };
 };
 
+&serdes_wiz2 {
+       lane0-mode = <PHY_TYPE_PCIE>;
+       lane1-mode = <PHY_TYPE_PCIE>;
+};
+
 &serdes2 {
        serdes2_pcie_link: link@0 {
                reg = <0>;
 &main_uart2 {
        status = "disabled";
 };
+
+&pcie0_ep {
+       phys = <&serdes0_pcie_link>;
+       phy-names = "pcie_phy";
+};
+
+&pcie1_ep {
+       phys = <&serdes1_pcie_link>;
+       phy-names = "pcie_phy";
+};
+
+&pcie2_ep {
+       phys = <&serdes2_pcie_link>;
+       phy-names = "pcie_phy";
+};
index d4e4fc9f396655019c4d8db8a57e503c9a889b55..31fe6ffba7a992eb380611cbc81def04a0f49b5f 100644 (file)
@@ -92,6 +92,7 @@
        smmu0: smmu@36600000 {
                compatible = "arm,smmu-v3";
                reg = <0x0 0x36600000 0x0 0x100000>;
+               power-domains = <&k3_pds 229 TI_SCI_PD_EXCLUSIVE>;
                interrupt-parent = <&gic500>;
                interrupts = <GIC_SPI 772 IRQ_TYPE_EDGE_RISING>,
                             <GIC_SPI 768 IRQ_TYPE_EDGE_RISING>;
        };
 
        serdes_wiz0: wiz@5000000 {
-               compatible = "ti,j721e-wiz";
+               compatible = "ti,j721e-wiz-16g";
                #address-cells = <2>;
                #size-cells = <2>;
                power-domains = <&k3_pds 292 TI_SCI_PD_EXCLUSIVE>;
        };
 
        serdes_wiz1: wiz@5010000 {
-               compatible = "ti,j721e-wiz";
+               compatible = "ti,j721e-wiz-16g";
                #address-cells = <2>;
                #size-cells = <2>;
                power-domains = <&k3_pds 293 TI_SCI_PD_EXCLUSIVE>;
        };
 
        serdes_wiz2: wiz@5020000 {
-               compatible = "ti,j721e-wiz";
+               compatible = "ti,j721e-wiz-16g";
                #address-cells = <2>;
                #size-cells = <2>;
                power-domains = <&k3_pds 294 TI_SCI_PD_EXCLUSIVE>;
        };
 
        serdes_wiz3: wiz@5030000 {
-               compatible = "ti,j721e-wiz";
+               compatible = "ti,j721e-wiz-16g";
                #address-cells = <2>;
                #size-cells = <2>;
                power-domains = <&k3_pds 295 TI_SCI_PD_EXCLUSIVE>;
        };
 
        serdes_wiz4: wiz@5050000 {
-               compatible = "ti,j721e-wiz";
+               compatible = "ti,j721e-wiz-10g";
                #address-cells = <2>;
                #size-cells = <2>;
                power-domains = <&k3_pds 297 TI_SCI_PD_EXCLUSIVE>;
 
                serdes4: serdes@5050000 {
                        /* XXX we also map EDP0 registers here as the PHY driver needs those... */
-                       compatible = "cdns,dp-phy";
+                       compatible = "cdns,torrent-phy";
                        reg = <0x00 0x05050000 0x0 0x00010000>, /* SERDES_10G0 */
                              <0x00 0x0A030A00 0x0 0x00000040>; /* DSS_EDP0_V2A_CORE_VP_REGS_APB + 30A00 */
 
                        num_lanes = <4>;
                        max_bit_rate = <5400>;
                        #phy-cells = <0>;
+                       clocks = <&wiz4_pll0_refclk>;
+                       clock-names = "refclk";
                };
        };
 
        mhdp: dp-bridge@000A000000 {
-               compatible = "cdns,mhdp8546";
+               compatible = "ti,j721e-mhdp8546", "cdns,mhdp8546";
                reg = <0x00 0x0A000000 0x0 0x30A00>, /* DSS_EDP0_V2A_CORE_VP_REGS_APB - upto PHY mapped area */
                      <0x00 0x04F40000 0x0 0x20>;    /* DSS_EDP0_INTG_CFG_VP */
 
-               pinctrl-names = "default";
-               pinctrl-0 = <&dp0_pins_default>;
+               status = "disabled";
 
                clocks = <&k3_clks 151 36>;
 
                power-domains = <&k3_pds 151 TI_SCI_PD_EXCLUSIVE>;
 
                /* TODO: No audio config yet */
-               /* TODO: Pinmux for eDP output pins */
 
-               ports {
+               dp0_ports: ports {
                        #address-cells = <1>;
                        #size-cells = <0>;
-
-                       port@0 {
-                               reg = <0>;
-                               dp_bridge_input: endpoint {
-                                       remote-endpoint = <&dpi_out_real0>;
-                               };
-                       };
-
-                       port@1 {
-                               reg = <1>;
-                               dp_bridge_output: endpoint {
-                                       remote-endpoint = <&dp_connector_in>;
-                               };
-                       };
                };
        };
 
                assigned-clocks = <&k3_clks 92 0>;
                assigned-clock-parents = <&k3_clks 92 1>;
                ti,otap-del-sel-legacy = <0x0>;
-               ti,otap-del-sel-sd-hs = <0x0>;
-               ti,otap-del-sel-sdr12 = <0x0>;
-               ti,otap-del-sel-sdr25 = <0x0>;
+               ti,otap-del-sel-sd-hs = <0xf>;
+               ti,otap-del-sel-sdr12 = <0xf>;
+               ti,otap-del-sel-sdr25 = <0xf>;
                ti,otap-del-sel-sdr50 = <0xc>;
                ti,otap-del-sel-sdr104 = <0x5>;
                ti,otap-del-sel-ddr50 = <0xc>;
                ti,trm-icp = <0x8>;
+               ti,clkbuf-sel = <0x7>;
                dma-coherent;
-               no-1-8-v;
-
+               sdhci-caps-mask = <0x2 0x0>;
        };
 
        main_sdhci2: sdhci@4f98000 {
                assigned-clocks = <&k3_clks 93 0>;
                assigned-clock-parents = <&k3_clks 93 1>;
                ti,otap-del-sel-legacy = <0x0>;
-               ti,otap-del-sel-sd-hs = <0x0>;
-               ti,otap-del-sel-sdr12 = <0x0>;
-               ti,otap-del-sel-sdr25 = <0x0>;
+               ti,otap-del-sel-sd-hs = <0xf>;
+               ti,otap-del-sel-sdr12 = <0xf>;
+               ti,otap-del-sel-sdr25 = <0xf>;
                ti,otap-del-sel-sdr50 = <0xc>;
                ti,otap-del-sel-sdr104 = <0x5>;
                ti,otap-del-sel-ddr50 = <0xc>;
                ti,trm-icp = <0x8>;
+               ti,clkbuf-sel = <0x7>;
                dma-coherent;
-               no-1-8-v;
+               sdhci-caps-mask = <0x2 0x0>;
        };
 
        main_r5fss0: r5fss@5c00000 {
                        };
                };
        };
+
+       main_rti0: rti@2200000 {
+               compatible = "ti,rti-wdt";
+               reg = <0x0 0x2200000 0x0 0x100>;
+               clocks = <&k3_clks 252 1>;
+               power-domains = <&k3_pds 252 TI_SCI_PD_EXCLUSIVE>;
+               assigned-clocks = <&k3_clks 252 1>;
+               assigned-clock-parents = <&k3_clks 252 5>;
+       };
+
+       main_rti1: rti@2210000 {
+               compatible = "ti,rti-wdt";
+               reg = <0x0 0x2210000 0x0 0x100>;
+               clocks = <&k3_clks 253 1>;
+               power-domains = <&k3_pds 253 TI_SCI_PD_EXCLUSIVE>;
+               assigned-clocks = <&k3_clks 253 1>;
+               assigned-clock-parents = <&k3_clks 253 5>;
+       };
 };
index bed5f0ff7ecd953c30c0e7690be5f7b6e88ed9b7..2776e807ea803e32df7f0ed8008daf81cfc03d33 100644 (file)
                        power-domains = <&k3_pds 102 TI_SCI_PD_EXCLUSIVE>;
                        clocks = <&k3_clks 102 0>;
                        assigned-clocks = <&k3_clks 102 0>;
-                       assigned-clock-rates = <166666666>;
+                       assigned-clock-rates = <250000000>;
                        #address-cells = <2>;
                        #size-cells = <1>;
                        mux-controls = <&hbmc_mux 0>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-pcie-backplane.dtso b/arch/arm64/boot/dts/ti/k3-j721e-pcie-backplane.dtso
new file mode 100644 (file)
index 0000000..8008063
--- /dev/null
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * J7 PCIe Backplane
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/pci/pci.h>
+
+/ {
+  fragment@101 {
+       target-path = "/";
+
+       __overlay__ {
+               epf_bus {
+                       compatible = "pci-epf-bus";
+
+                       ntb {
+                               compatible = "pci-epf-ntb";
+                               epcs = <&pcie0_ep>, <&pcie1_ep>;
+                               epc-names = "primary", "secondary";
+                               vendor-id = /bits/ 16 <0x104c>;
+                               device-id = /bits/ 16 <0xb00d>;
+                               num-mws = <4>;
+                               mws-size = <0x100000>, <0x100000>, <0x100000>, <0x100000>;
+                       };
+               };
+       };
+  };
+};
+
+&pcie0 {
+       pci-mode = <PCI_MODE_EP>;
+};
+
+&pcie1 {
+       pci-mode = <PCI_MODE_EP>;
+};
index 4fcf2be253bac3fb2a9a0b22005b88ba39c0c000..8d0d5c947fbdfff4f7013edeeb64cc5b67f1f1ab 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "k3-j721e-som-tps65917.dtsi"
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
 #include <dt-bindings/net/ti-dp83867.h>
 #include <dt-bindings/pci/pci.h>
 #include <dt-bindings/sound/ti-mcasp.h>
                };
        };
 
+       gpio_keys: gpio-keys {
+               compatible = "gpio-keys";
+               autorepeat;
+               pinctrl-names = "default";
+               pinctrl-0 = <&sw10_button_pins_default &sw11_button_pins_default>;
+
+               sw10: sw10 {
+                       label = "GPIO Key USER1";
+                       linux,code = <BTN_0>;
+                       gpios = <&main_gpio0 0 GPIO_ACTIVE_LOW>;
+               };
+
+               sw11: sw11 {
+                       label = "GPIO Key USER2";
+                       linux,code = <BTN_1>;
+                       gpios = <&wkup_gpio0 7 GPIO_ACTIVE_LOW>;
+               };
+       };
+
        vdd_mmc1: fixedregulator-sd {
                compatible = "regulator-fixed";
                regulator-name = "vdd_mmc1";
        };
 
        dp0: connector {
-               compatible = "dp-connector"; /* No such binding exists yet.. */
+               compatible = "dp-connector";
+               label = "DP0";
 
                port {
                        dp_connector_in: endpoint {
-                       remote-endpoint = <&dp_bridge_output>;
+                               remote-endpoint = <&dp_bridge_output>;
                        };
                };
        };
+
+       cpsw9g_virt_mac: main_r5fss_cpsw9g_virt_mac0 {
+               compatible = "ti,j721e-cpsw-virt-mac";
+               dma-coherent;
+               ti,psil-base = <0x4a00>;
+               ti,remote-name = "mpu_1_0_ethswitch-device-0";
+               interrupt-parent = <&main_udmass_inta>;
+
+               dmas = <&main_udmap &cpsw9g_virt_mac 0 UDMA_DIR_TX>,
+                      <&main_udmap &cpsw9g_virt_mac 1 UDMA_DIR_TX>,
+                      <&main_udmap &cpsw9g_virt_mac 2 UDMA_DIR_TX>,
+                      <&main_udmap &cpsw9g_virt_mac 3 UDMA_DIR_TX>,
+                      <&main_udmap &cpsw9g_virt_mac 4 UDMA_DIR_TX>,
+                      <&main_udmap &cpsw9g_virt_mac 5 UDMA_DIR_TX>,
+                      <&main_udmap &cpsw9g_virt_mac 6 UDMA_DIR_TX>,
+                      <&main_udmap &cpsw9g_virt_mac 0 UDMA_DIR_RX>;
+               dma-names = "tx0", "tx1", "tx2", "tx3",
+                           "tx4", "tx5", "tx6",
+                           "rx";
+
+               virt_emac_port {
+                       ti,label = "virt-port";
+                       /* local-mac-address = [0 0 0 0 0 0]; */
+               };
+
+               ti,psil-config0 {
+                       linux,udma-mode = <UDMA_PKT_MODE>;
+                       statictr-type = <PSIL_STATIC_TR_NONE>;
+                       ti,needs-epib;
+                       ti,psd-size = <16>;
+               };
+
+               ti,psil-config1 {
+                       linux,udma-mode = <UDMA_PKT_MODE>;
+                       statictr-type = <PSIL_STATIC_TR_NONE>;
+                       ti,needs-epib;
+                       ti,psd-size = <16>;
+               };
+
+               ti,psil-config2 {
+                       linux,udma-mode = <UDMA_PKT_MODE>;
+                       statictr-type = <PSIL_STATIC_TR_NONE>;
+                       ti,needs-epib;
+                       ti,psd-size = <16>;
+               };
+
+               ti,psil-config3 {
+                       linux,udma-mode = <UDMA_PKT_MODE>;
+                       statictr-type = <PSIL_STATIC_TR_NONE>;
+                       ti,needs-epib;
+                       ti,psd-size = <16>;
+               };
+
+               ti,psil-config4 {
+                       linux,udma-mode = <UDMA_PKT_MODE>;
+                       statictr-type = <PSIL_STATIC_TR_NONE>;
+                       ti,needs-epib;
+                       ti,psd-size = <16>;
+               };
+
+               ti,psil-config5 {
+                       linux,udma-mode = <UDMA_PKT_MODE>;
+                       statictr-type = <PSIL_STATIC_TR_NONE>;
+                       ti,needs-epib;
+                       ti,psd-size = <16>;
+               };
+
+               ti,psil-config6 {
+                       linux,udma-mode = <UDMA_PKT_MODE>;
+                       statictr-type = <PSIL_STATIC_TR_NONE>;
+                       ti,needs-epib;
+                       ti,psd-size = <16>;
+               };
+
+               ti,psil-config7 {
+                       linux,udma-mode = <UDMA_PKT_MODE>;
+                       statictr-type = <PSIL_STATIC_TR_NONE>;
+                       ti,needs-epib;
+                       ti,psd-size = <16>;
+               };
+       };
 };
 
 &wkup_pmx0 {
                        J721E_WKUP_IOPAD(0x38, PIN_INPUT, 0) /* (A23) MCU_OSPI1_LBCLKO */
                >;
        };
+
+       sw11_button_pins_default: sw11_button_pins_default {
+               pinctrl-single,pins = <
+                       J721E_WKUP_IOPAD(0xcc, PIN_INPUT, 7) /* (G28) WKUP_GPIO0_7 */
+               >;
+       };
 };
 
 &wkup_uart0 {
                pinctrl-single,pins = <
                        J721E_IOPAD(0x254, PIN_INPUT, 0) /* (R29) MMC1_CMD */
                        J721E_IOPAD(0x250, PIN_INPUT, 0) /* (P25) MMC1_CLK */
+                       J721E_IOPAD(0x2ac, PIN_INPUT, 0) /* (P25) MMC1_CLKLB */
                        J721E_IOPAD(0x24c, PIN_INPUT, 0) /* (R24) MMC1_DAT0 */
                        J721E_IOPAD(0x248, PIN_INPUT, 0) /* (P24) MMC1_DAT1 */
                        J721E_IOPAD(0x244, PIN_INPUT, 0) /* (R25) MMC1_DAT2 */
                        J721E_IOPAD(0x240, PIN_INPUT, 0) /* (R26) MMC1_DAT3 */
                        J721E_IOPAD(0x258, PIN_INPUT, 0) /* (P23) MMC1_SDCD */
                        J721E_IOPAD(0x25c, PIN_INPUT, 0) /* (R28) MMC1_SDWP */
-                       J721E_IOPAD(0x2ac, PIN_INPUT, 0)
                >;
        };
 
                        J721E_IOPAD(0x1c4, PIN_INPUT, 5) /* SPI0_CS1.DP0_HPD */
                >;
        };
+
+       sw10_button_pins_default: sw10_button_pins_default {
+               pinctrl-single,pins = <
+                       J721E_IOPAD(0x0, PIN_INPUT, 7) /* (AC18) EXTINTn.GPIO0_0 */
+               >;
+       };
 };
 
 &dss {
        };
 };
 
+&serdes_wiz4 {
+       lane0-mode = <PHY_TYPE_DP>;
+       lane1-mode = <PHY_TYPE_DP>;
+       lane2-mode = <PHY_TYPE_DP>;
+       lane3-mode = <PHY_TYPE_DP>;
+};
+
+&mhdp {
+       status = "ok";
+       pinctrl-names = "default";
+       pinctrl-0 = <&dp0_pins_default>;
+};
+
+&dp0_ports {
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       port@0 {
+               reg = <0>;
+               dp_bridge_input: endpoint {
+                       remote-endpoint = <&dpi_out_real0>;
+               };
+       };
+
+       port@1 {
+               reg = <1>;
+               dp_bridge_output: endpoint {
+                       remote-endpoint = <&dp_connector_in>;
+               };
+       };
+};
+
 &main_i2c0 {
        pinctrl-names = "default";
        pinctrl-0 = <&main_i2c0_pins_default>;
        phy0: ethernet-phy@0 {
                reg = <0>;
                ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
-               ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
                ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
        };
 };
 
 &cpsw_port1 {
-       phy-mode = "rgmii-id";
+       phy-mode = "rgmii-rxid";
        phy-handle = <&phy0>;
 };
 
 
 &serdes_wiz3 {
        typec-dir-gpios = <&main_gpio1 3 GPIO_ACTIVE_HIGH>;
+       typec-dir-debounce = <300>;     /* TUSB321, tCCB_DEFAULT 133 ms */
+       lane0-mode = <PHY_TYPE_USB3>;
+       lane1-mode = <PHY_TYPE_USB3>;
 };
 
 &serdes3 {
        dr_mode = "otg";
        maximum-speed = "super-speed";
        phys = <&serdes3_usb_link>;
-       phy-names = "cdns3,usbphy";
+       phy-names = "cdns3,usb3-phy";
 };
 
 &usbss1 {
        /* eMMC */
        non-removable;
        ti,driver-strength-ohm = <50>;
+       disable-wp;
 };
 
 &main_sdhci1 {
        vqmmc-supply = <&ldo1_reg>;
        pinctrl-names = "default";
        pinctrl-0 = <&main_mmc1_pins_default>;
+       disable-wp;
 };
 
 &main_sdhci2 {
        status = "disabled";
 };
 
+&serdes_wiz0 {
+       lane0-mode = <PHY_TYPE_PCIE>;
+       lane1-mode = <PHY_TYPE_PCIE>;
+};
+
 &serdes0 {
        serdes0_pcie_link: link@0 {
                reg = <0>;
        };
 };
 
+&serdes_wiz1 {
+       lane0-mode = <PHY_TYPE_PCIE>;
+       lane1-mode = <PHY_TYPE_PCIE>;
+};
+
 &serdes1 {
        serdes1_pcie_link: link@0 {
                reg = <0>;
        };
 };
 
+&serdes_wiz2 {
+       lane0-mode = <PHY_TYPE_PCIE>;
+       lane1-mode = <PHY_TYPE_PCIE>;
+};
+
 &serdes2 {
        serdes2_pcie_link: link@0 {
                reg = <0>;
                ti,adc-channels = <0 1 2 3 4 5 6 7>;
        };
 };
+
+&pcie0_ep {
+       phys = <&serdes0_pcie_link>;
+       phy-names = "pcie_phy";
+};
+
+&pcie1_ep {
+       phys = <&serdes1_pcie_link>;
+       phy-names = "pcie_phy";
+};
+
+&pcie2_ep {
+       phys = <&serdes2_pcie_link>;
+       phy-names = "pcie_phy";
+};
+
+/* uart2 assigned to cpsw9g eth-switch fw running on remote CPU core */
+&main_uart2 {
+       status = "disabled";
+};
index 17fac2889f56ea99dbcca0c87e8c6c6b45934235..d8c521c757e836717b5a80b70e1e83f293c9bc2c 100644 (file)
@@ -54,7 +54,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
                         unsigned int len, u8 *out)
 {
        struct sha1_ce_state *sctx = shash_desc_ctx(desc);
-       bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
+       bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
 
        if (!may_use_simd())
                return crypto_sha1_finup(desc, data, len, out);
index 261f5195cab74b2952ee2158daf08ab968939701..c47d1a28ff6bb6274180c6063e2034be1b991eca 100644 (file)
@@ -59,7 +59,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
                           unsigned int len, u8 *out)
 {
        struct sha256_ce_state *sctx = shash_desc_ctx(desc);
-       bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
+       bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
 
        if (!may_use_simd()) {
                if (len)
index f90f5d83b228aebab81b73a01f066b050a201c7c..5a97ac853168222a0525f3a3d5b75aa02d5bb0cf 100644 (file)
  * RAS Error Synchronization barrier
  */
        .macro  esb
+#ifdef CONFIG_ARM64_RAS_EXTN
        hint    #16
+#else
+       nop
+#endif
        .endm
 
 /*
index 3b0938281541912aae9fbe94ae97f799a0b342e4..d8b01c7c9cd3fa4d2aa58a0048bb582f37f74870 100644 (file)
@@ -74,7 +74,7 @@ __XCHG_CASE( ,  ,  mb_8, dmb ish, nop,  , a, l, "memory")
 #undef __XCHG_CASE
 
 #define __XCHG_GEN(sfx)                                                        \
-static inline unsigned long __xchg##sfx(unsigned long x,               \
+static __always_inline  unsigned long __xchg##sfx(unsigned long x,     \
                                        volatile void *ptr,             \
                                        int size)                       \
 {                                                                      \
@@ -116,7 +116,7 @@ __XCHG_GEN(_mb)
 #define xchg(...)              __xchg_wrapper( _mb, __VA_ARGS__)
 
 #define __CMPXCHG_GEN(sfx)                                             \
-static inline unsigned long __cmpxchg##sfx(volatile void *ptr,         \
+static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr,        \
                                           unsigned long old,           \
                                           unsigned long new,           \
                                           int size)                    \
@@ -223,7 +223,7 @@ __CMPWAIT_CASE( ,  , 8);
 #undef __CMPWAIT_CASE
 
 #define __CMPWAIT_GEN(sfx)                                             \
-static inline void __cmpwait##sfx(volatile void *ptr,                  \
+static __always_inline void __cmpwait##sfx(volatile void *ptr,         \
                                  unsigned long val,                    \
                                  int size)                             \
 {                                                                      \
index 1a037b94eba10d481866063bfcc8c5f59adf2e35..cee28a05ee98f0a63dabac43b939f46e457214a6 100644 (file)
@@ -159,6 +159,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
 }
 
 #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
+#define COMPAT_MINSIGSTKSZ     2048
 
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
index 25ce9056cf641806ce8dd0a45f2392b546f47e78..c3de0bbf0e9a2d783b1b9cdfd2b5c6b66b23f0b5 100644 (file)
@@ -52,7 +52,8 @@
 #define ARM64_MISMATCHED_CACHE_TYPE            31
 #define ARM64_HAS_STAGE2_FWB                   32
 #define ARM64_WORKAROUND_1463225               33
+#define ARM64_SSBS                             34
 
-#define ARM64_NCAPS                            34
+#define ARM64_NCAPS                            35
 
 #endif /* __ASM_CPUCAPS_H */
index 1717ba1db35ddb935720c20ec46c318d59ca9b83..dda6e50568107e190366b43ac2942b0639b6f41f 100644 (file)
  */
 
 enum ftr_type {
-       FTR_EXACT,      /* Use a predefined safe value */
-       FTR_LOWER_SAFE, /* Smaller value is safe */
-       FTR_HIGHER_SAFE,/* Bigger value is safe */
+       FTR_EXACT,                      /* Use a predefined safe value */
+       FTR_LOWER_SAFE,                 /* Smaller value is safe */
+       FTR_HIGHER_SAFE,                /* Bigger value is safe */
+       FTR_HIGHER_OR_ZERO_SAFE,        /* Bigger value is safe, but 0 is biggest */
 };
 
 #define FTR_STRICT     true    /* SANITY check strict matching required */
@@ -524,11 +525,7 @@ static inline int arm64_get_ssbd_state(void)
 #endif
 }
 
-#ifdef CONFIG_ARM64_SSBD
 void arm64_set_ssbd_mitigation(bool state);
-#else
-static inline void arm64_set_ssbd_mitigation(bool state) {}
-#endif
 
 #endif /* __ASSEMBLY__ */
 
index b4a48419769f2886f3561df90126db74c90949e8..9b7d5abd04afd32169d1ee29d0f87e33b5e48136 100644 (file)
 #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
                             MIDR_ARCHITECTURE_MASK)
 
-#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max)           \
-({                                                                     \
-       u32 _model = (midr) & MIDR_CPU_MODEL_MASK;                      \
-       u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);     \
-                                                                       \
-       _model == (model) && rv >= (rv_min) && rv <= (rv_max);          \
- })
-
 #define ARM_CPU_IMP_ARM                        0x41
 #define ARM_CPU_IMP_APM                        0x50
 #define ARM_CPU_IMP_CAVIUM             0x43
@@ -153,10 +145,19 @@ struct midr_range {
 
 #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
 
+static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
+                                          u32 rv_max)
+{
+       u32 _model = midr & MIDR_CPU_MODEL_MASK;
+       u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);
+
+       return _model == model && rv >= rv_min && rv <= rv_max;
+}
+
 static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
 {
-       return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
-                                range->rv_min, range->rv_max);
+       return midr_is_cpu_model_range(midr, range->model,
+                                      range->rv_min, range->rv_max);
 }
 
 static inline bool
index 7ed320895d1f463d1e95cd9ec6328a49eed765ae..f52a2968a3b696270bc73adbb7adfdca1ce01744 100644 (file)
@@ -94,7 +94,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
        ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
 
 #define alloc_screen_info(x...)                &screen_info
-#define free_screen_info(x...)
+
+static inline void free_screen_info(efi_system_table_t *sys_table_arg,
+                                   struct screen_info *si)
+{
+}
 
 /* redeclare as 'hidden' so the compiler will generate relative references */
 extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
index 6abe4002945f672ac7fd8c93ab95c1f9cb4b2d92..367b2e0b6d769a0a07c7ee307c267036cf19207a 100644 (file)
@@ -398,6 +398,8 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 
 DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
 
+void __kvm_enable_ssbs(void);
+
 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
                                       unsigned long hyp_stack_ptr,
                                       unsigned long vector_ptr)
@@ -418,6 +420,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
         */
        BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
        __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
+
+       /*
+        * Disabling SSBD on a non-VHE system requires us to enable SSBS
+        * at EL2.
+        */
+       if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
+           arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+               kvm_call_hyp(__kvm_enable_ssbs);
+       }
 }
 
 static inline bool kvm_arch_check_sve_has_vhe(void)
index ea423db39364456585192cee8eebf3e7432f2d35..212a4882665514a711a454d2360d892087b5f3b7 100644 (file)
@@ -224,8 +224,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
         * Only if the new pte is valid and kernel, otherwise TLB maintenance
         * or update_mmu_cache() have the necessary barriers.
         */
-       if (pte_valid_not_user(pte))
+       if (pte_valid_not_user(pte)) {
                dsb(ishst);
+               isb();
+       }
 }
 
 extern void __sync_icache_dcache(pte_t pteval);
@@ -419,8 +421,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                                 PMD_TYPE_SECT)
 
 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
-#define pud_sect(pud)          (0)
-#define pud_table(pud)         (1)
+static inline bool pud_sect(pud_t pud) { return false; }
+static inline bool pud_table(pud_t pud) { return true; }
 #else
 #define pud_sect(pud)          ((pud_val(pud) & PUD_TYPE_MASK) == \
                                 PUD_TYPE_SECT)
@@ -432,6 +434,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
        WRITE_ONCE(*pmdp, pmd);
        dsb(ishst);
+       isb();
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
@@ -483,6 +486,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
 {
        WRITE_ONCE(*pudp, pud);
        dsb(ishst);
+       isb();
 }
 
 static inline void pud_clear(pud_t *pudp)
index def5a5e807f02d4a81db3edba771fead4d7de80e..773ea8e0e442136269f9633c6abe0fec47ed1978 100644 (file)
@@ -177,11 +177,25 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
        regs->pc = pc;
 }
 
+static inline void set_ssbs_bit(struct pt_regs *regs)
+{
+       regs->pstate |= PSR_SSBS_BIT;
+}
+
+static inline void set_compat_ssbs_bit(struct pt_regs *regs)
+{
+       regs->pstate |= PSR_AA32_SSBS_BIT;
+}
+
 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
                                unsigned long sp)
 {
        start_thread_common(regs, pc);
        regs->pstate = PSR_MODE_EL0t;
+
+       if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+               set_ssbs_bit(regs);
+
        regs->sp = sp;
 }
 
@@ -198,6 +212,9 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
        regs->pstate |= PSR_AA32_E_BIT;
 #endif
 
+       if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+               set_compat_ssbs_bit(regs);
+
        regs->compat_sp = sp;
 }
 #endif
index 177b851ca6d997741580e73c89e448f274ac3ba7..6bc43889d11e46cbfb79882da231eb9728e6b7e7 100644 (file)
@@ -50,6 +50,7 @@
 #define PSR_AA32_I_BIT         0x00000080
 #define PSR_AA32_A_BIT         0x00000100
 #define PSR_AA32_E_BIT         0x00000200
+#define PSR_AA32_SSBS_BIT      0x00800000
 #define PSR_AA32_DIT_BIT       0x01000000
 #define PSR_AA32_Q_BIT         0x08000000
 #define PSR_AA32_V_BIT         0x10000000
index c1470931b8974936ed2a86fb231c15764d08f573..3091ae5975a3ad0846ff038ab9712d6effa2b127 100644 (file)
 
 #define REG_PSTATE_PAN_IMM             sys_reg(0, 0, 4, 0, 4)
 #define REG_PSTATE_UAO_IMM             sys_reg(0, 0, 4, 0, 3)
+#define REG_PSTATE_SSBS_IMM            sys_reg(0, 3, 4, 0, 1)
 
 #define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM |        \
                                      (!!x)<<8 | 0x1f)
 #define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM |        \
                                      (!!x)<<8 | 0x1f)
+#define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \
+                                      (!!x)<<8 | 0x1f)
 
 #define SYS_DC_ISW                     sys_insn(1, 0, 7, 6, 2)
 #define SYS_DC_CSW                     sys_insn(1, 0, 7, 10, 2)
 #define SYS_ICH_LR15_EL2               __SYS__LR8_EL2(7)
 
 /* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_DSSBS        (1UL << 44)
 #define SCTLR_ELx_EE    (1 << 25)
 #define SCTLR_ELx_IESB (1 << 21)
 #define SCTLR_ELx_WXN  (1 << 19)
                         (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
                         (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
                         (1 << 27) | (1 << 30) | (1 << 31) | \
-                        (0xffffffffUL << 32))
+                        (0xffffefffUL << 32))
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
 #define ENDIAN_SET_EL2         SCTLR_ELx_EE
 #define SCTLR_EL2_SET  (SCTLR_ELx_IESB   | ENDIAN_SET_EL2   | SCTLR_EL2_RES1)
 #define SCTLR_EL2_CLEAR        (SCTLR_ELx_M      | SCTLR_ELx_A    | SCTLR_ELx_C   | \
                         SCTLR_ELx_SA     | SCTLR_ELx_I    | SCTLR_ELx_WXN | \
-                        ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
+                        SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
 
 #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
 #error "Inconsistent SCTLR_EL2 set/clear bits"
                         (1 << 29))
 #define SCTLR_EL1_RES0  ((1 << 6)  | (1 << 10) | (1 << 13) | (1 << 17) | \
                         (1 << 27) | (1 << 30) | (1 << 31) | \
-                        (0xffffffffUL << 32))
+                        (0xffffefffUL << 32))
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
 #define ENDIAN_SET_EL1         (SCTLR_EL1_E0E | SCTLR_ELx_EE)
                         ENDIAN_SET_EL1 | SCTLR_EL1_UCI  | SCTLR_EL1_RES1)
 #define SCTLR_EL1_CLEAR        (SCTLR_ELx_A   | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD    |\
                         SCTLR_EL1_UMA | SCTLR_ELx_WXN     | ENDIAN_CLEAR_EL1 |\
-                        SCTLR_EL1_RES0)
+                        SCTLR_ELx_DSSBS | SCTLR_EL1_RES0)
 
 #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
 #error "Inconsistent SCTLR_EL1 set/clear bits"
 #define ID_AA64PFR0_EL0_64BIT_ONLY     0x1
 #define ID_AA64PFR0_EL0_32BIT_64BIT    0x2
 
+/* id_aa64pfr1 */
+#define ID_AA64PFR1_SSBS_SHIFT         4
+
+#define ID_AA64PFR1_SSBS_PSTATE_NI     0
+#define ID_AA64PFR1_SSBS_PSTATE_ONLY   1
+#define ID_AA64PFR1_SSBS_PSTATE_INSNS  2
+
 /* id_aa64mmfr0 */
 #define ID_AA64MMFR0_TGRAN4_SHIFT      28
 #define ID_AA64MMFR0_TGRAN64_SHIFT     24
index a4a1901140ee98d21863f4f2978a7b627c5d0014..fc247b96619cc4bb38ea565c18c253806f0ec3d5 100644 (file)
@@ -224,6 +224,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
 
        __tlbi(vaae1is, addr);
        dsb(ish);
+       isb();
 }
 #endif
 
index 17c65c8f33cb6073acda182e7b1c943e08730870..2bcd6e4f34740337c0d122fef5a2b513dcb75dd1 100644 (file)
@@ -48,5 +48,6 @@
 #define HWCAP_USCAT            (1 << 25)
 #define HWCAP_ILRCPC           (1 << 26)
 #define HWCAP_FLAGM            (1 << 27)
+#define HWCAP_SSBS             (1 << 28)
 
 #endif /* _UAPI__ASM_HWCAP_H */
index 5dff8eccd17d4c4ff2e7514519eed1bf959b0fa6..b0fd1d3001543f9462d33a4ca1f0b342a7b7a68f 100644 (file)
@@ -46,6 +46,7 @@
 #define PSR_I_BIT      0x00000080
 #define PSR_A_BIT      0x00000100
 #define PSR_D_BIT      0x00000200
+#define PSR_SSBS_BIT   0x00001000
 #define PSR_PAN_BIT    0x00400000
 #define PSR_UAO_BIT    0x00800000
 #define PSR_V_BIT      0x10000000
index ed46dc188b225d2d0aec587f435b5b2ce774c5cd..970f15c76bace5bebf097e550c3b8b5b32de3fd4 100644 (file)
@@ -154,10 +154,14 @@ static int __init acpi_fadt_sanity_check(void)
         */
        if (table->revision < 5 ||
           (table->revision == 5 && fadt->minor_revision < 1)) {
-               pr_err("Unsupported FADT revision %d.%d, should be 5.1+\n",
+               pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
                       table->revision, fadt->minor_revision);
-               ret = -EINVAL;
-               goto out;
+
+               if (!fadt->arm_boot_flags) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               pr_err("FADT has ARM boot flags set, assuming 5.1\n");
        }
 
        if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
index dc6c535cbd130c5e7d1c9946bbb16e7cb6b712ba..71888808ded72ade5574fd067b991e40a93e6cc7 100644 (file)
 #include <linux/arm-smccc.h>
 #include <linux/psci.h>
 #include <linux/types.h>
+#include <linux/cpu.h>
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
+#include <asm/smp_plat.h>
 
 static bool __maybe_unused
 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
@@ -87,7 +89,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
 
 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
@@ -109,9 +110,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
        __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
 }
 
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
-                                     const char *hyp_vecs_start,
-                                     const char *hyp_vecs_end)
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
+                                   const char *hyp_vecs_start,
+                                   const char *hyp_vecs_end)
 {
        static DEFINE_SPINLOCK(bp_lock);
        int cpu, slot = -1;
@@ -138,7 +139,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
 #define __smccc_workaround_1_smc_start         NULL
 #define __smccc_workaround_1_smc_end           NULL
 
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
                                      const char *hyp_vecs_start,
                                      const char *hyp_vecs_end)
 {
@@ -146,23 +147,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
 }
 #endif /* CONFIG_KVM_INDIRECT_VECTORS */
 
-static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
-                                    bp_hardening_cb_t fn,
-                                    const char *hyp_vecs_start,
-                                    const char *hyp_vecs_end)
-{
-       u64 pfr0;
-
-       if (!entry->matches(entry, SCOPE_LOCAL_CPU))
-               return;
-
-       pfr0 = read_cpuid(ID_AA64PFR0_EL1);
-       if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
-               return;
-
-       __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
-}
-
 #include <uapi/linux/psci.h>
 #include <linux/arm-smccc.h>
 #include <linux/psci.h>
@@ -189,60 +173,83 @@ static void qcom_link_stack_sanitization(void)
                     : "=&r" (tmp));
 }
 
-static void
-enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+static bool __nospectre_v2;
+static int __init parse_nospectre_v2(char *str)
+{
+       __nospectre_v2 = true;
+       return 0;
+}
+early_param("nospectre_v2", parse_nospectre_v2);
+
+/*
+ * -1: No workaround
+ *  0: No workaround required
+ *  1: Workaround installed
+ */
+static int detect_harden_bp_fw(void)
 {
        bp_hardening_cb_t cb;
        void *smccc_start, *smccc_end;
        struct arm_smccc_res res;
        u32 midr = read_cpuid_id();
 
-       if (!entry->matches(entry, SCOPE_LOCAL_CPU))
-               return;
-
        if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
-               return;
+               return -1;
 
        switch (psci_ops.conduit) {
        case PSCI_CONDUIT_HVC:
                arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
                                  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-               if ((int)res.a0 < 0)
-                       return;
-               cb = call_hvc_arch_workaround_1;
-               /* This is a guest, no need to patch KVM vectors */
-               smccc_start = NULL;
-               smccc_end = NULL;
+               switch ((int)res.a0) {
+               case 1:
+                       /* Firmware says we're just fine */
+                       return 0;
+               case 0:
+                       cb = call_hvc_arch_workaround_1;
+                       /* This is a guest, no need to patch KVM vectors */
+                       smccc_start = NULL;
+                       smccc_end = NULL;
+                       break;
+               default:
+                       return -1;
+               }
                break;
 
        case PSCI_CONDUIT_SMC:
                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
                                  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-               if ((int)res.a0 < 0)
-                       return;
-               cb = call_smc_arch_workaround_1;
-               smccc_start = __smccc_workaround_1_smc_start;
-               smccc_end = __smccc_workaround_1_smc_end;
+               switch ((int)res.a0) {
+               case 1:
+                       /* Firmware says we're just fine */
+                       return 0;
+               case 0:
+                       cb = call_smc_arch_workaround_1;
+                       smccc_start = __smccc_workaround_1_smc_start;
+                       smccc_end = __smccc_workaround_1_smc_end;
+                       break;
+               default:
+                       return -1;
+               }
                break;
 
        default:
-               return;
+               return -1;
        }
 
        if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
            ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
                cb = qcom_link_stack_sanitization;
 
-       install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+       if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
+               install_bp_hardening_cb(cb, smccc_start, smccc_end);
 
-       return;
+       return 1;
 }
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
-#ifdef CONFIG_ARM64_SSBD
 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 
 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+static bool __ssb_safe = true;
 
 static const struct ssbd_options {
        const char      *str;
@@ -312,6 +319,19 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
 
 void arm64_set_ssbd_mitigation(bool state)
 {
+       if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
+               pr_info_once("SSBD disabled by kernel configuration\n");
+               return;
+       }
+
+       if (this_cpu_has_cap(ARM64_SSBS)) {
+               if (state)
+                       asm volatile(SET_PSTATE_SSBS(0));
+               else
+                       asm volatile(SET_PSTATE_SSBS(1));
+               return;
+       }
+
        switch (psci_ops.conduit) {
        case PSCI_CONDUIT_HVC:
                arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
@@ -333,11 +353,28 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
        struct arm_smccc_res res;
        bool required = true;
        s32 val;
+       bool this_cpu_safe = false;
 
        WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 
+       if (cpu_mitigations_off())
+               ssbd_state = ARM64_SSBD_FORCE_DISABLE;
+
+       /* delay setting __ssb_safe until we get a firmware response */
+       if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
+               this_cpu_safe = true;
+
+       if (this_cpu_has_cap(ARM64_SSBS)) {
+               if (!this_cpu_safe)
+                       __ssb_safe = false;
+               required = false;
+               goto out_printmsg;
+       }
+
        if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
                ssbd_state = ARM64_SSBD_UNKNOWN;
+               if (!this_cpu_safe)
+                       __ssb_safe = false;
                return false;
        }
 
@@ -354,6 +391,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
        default:
                ssbd_state = ARM64_SSBD_UNKNOWN;
+               if (!this_cpu_safe)
+                       __ssb_safe = false;
                return false;
        }
 
@@ -362,14 +401,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
        switch (val) {
        case SMCCC_RET_NOT_SUPPORTED:
                ssbd_state = ARM64_SSBD_UNKNOWN;
+               if (!this_cpu_safe)
+                       __ssb_safe = false;
                return false;
 
+       /* machines with mixed mitigation requirements must not return this */
        case SMCCC_RET_NOT_REQUIRED:
                pr_info_once("%s mitigation not required\n", entry->desc);
                ssbd_state = ARM64_SSBD_MITIGATED;
                return false;
 
        case SMCCC_RET_SUCCESS:
+               __ssb_safe = false;
                required = true;
                break;
 
@@ -379,12 +422,13 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
 
        default:
                WARN_ON(1);
+               if (!this_cpu_safe)
+                       __ssb_safe = false;
                return false;
        }
 
        switch (ssbd_state) {
        case ARM64_SSBD_FORCE_DISABLE:
-               pr_info_once("%s disabled from command-line\n", entry->desc);
                arm64_set_ssbd_mitigation(false);
                required = false;
                break;
@@ -397,7 +441,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
                break;
 
        case ARM64_SSBD_FORCE_ENABLE:
-               pr_info_once("%s forced from command-line\n", entry->desc);
                arm64_set_ssbd_mitigation(true);
                required = true;
                break;
@@ -407,9 +450,27 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
                break;
        }
 
+out_printmsg:
+       switch (ssbd_state) {
+       case ARM64_SSBD_FORCE_DISABLE:
+               pr_info_once("%s disabled from command-line\n", entry->desc);
+               break;
+
+       case ARM64_SSBD_FORCE_ENABLE:
+               pr_info_once("%s forced from command-line\n", entry->desc);
+               break;
+       }
+
        return required;
 }
-#endif /* CONFIG_ARM64_SSBD */
+
+/* known invulnerable cores */
+static const struct midr_range arm64_ssb_cpus[] = {
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+       {},
+};
 
 #ifdef CONFIG_ARM64_ERRATUM_1463225
 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@@ -464,6 +525,10 @@ has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
        .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
        CAP_MIDR_RANGE_LIST(midr_list)
 
+/* Track overall mitigation state. We are only mitigated if all cores are ok */
+static bool __hardenbp_enab = true;
+static bool __spectrev2_safe = true;
+
 /*
  * Generic helper for handling capabilties with multiple (match,enable) pairs
  * of call backs, sharing the same capability bit.
@@ -496,26 +561,87 @@ multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
                        caps->cpu_enable(caps);
 }
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+/*
+ * List of CPUs that do not need any Spectre-v2 mitigation at all.
+ */
+static const struct midr_range spectre_v2_safe_list[] = {
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+       { /* sentinel */ }
+};
 
 /*
- * List of CPUs where we need to issue a psci call to
- * harden the branch predictor.
+ * Track overall bp hardening for all heterogeneous cores in the machine.
+ * We are only considered "safe" if all booted cores are known safe.
  */
-static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+static bool __maybe_unused
+check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+{
+       int need_wa;
+
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+       /* If the CPU has CSV2 set, we're safe */
+       if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
+                                                ID_AA64PFR0_CSV2_SHIFT))
+               return false;
+
+       /* Alternatively, we have a list of unaffected CPUs */
+       if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+               return false;
+
+       /* Fallback to firmware detection */
+       need_wa = detect_harden_bp_fw();
+       if (!need_wa)
+               return false;
+
+       __spectrev2_safe = false;
+
+       if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
+               pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
+               __hardenbp_enab = false;
+               return false;
+       }
+
+       /* forced off */
+       if (__nospectre_v2 || cpu_mitigations_off()) {
+               pr_info_once("spectrev2 mitigation disabled by command line option\n");
+               __hardenbp_enab = false;
+               return false;
+       }
+
+       if (need_wa < 0) {
+               pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
+               __hardenbp_enab = false;
+       }
+
+       return (need_wa > 0);
+}
+
+static const __maybe_unused struct midr_range tx2_family_cpus[] = {
        MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
        MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
-       MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
-       MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
-       MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
        {},
 };
 
-#endif
+static bool __maybe_unused
+needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
+                        int scope)
+{
+       int i;
+
+       if (!is_affected_midr_range_list(entry, scope) ||
+           !is_hyp_mode_available())
+               return false;
+
+       for_each_possible_cpu(i) {
+               if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
+                       return true;
+       }
+
+       return false;
+}
 
 #ifdef CONFIG_HARDEN_EL2_VECTORS
 
@@ -674,13 +800,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
        },
 #endif
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
        {
                .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-               .cpu_enable = enable_smccc_arch_workaround_1,
-               ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = check_branch_predictor,
        },
-#endif
 #ifdef CONFIG_HARDEN_EL2_VECTORS
        {
                .desc = "EL2 vector hardening",
@@ -688,14 +812,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
        },
 #endif
-#ifdef CONFIG_ARM64_SSBD
        {
                .desc = "Speculative Store Bypass Disable",
                .capability = ARM64_SSBD,
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .matches = has_ssbd_mitigation,
+               .midr_range_list = arm64_ssb_cpus,
        },
-#endif
 #ifdef CONFIG_ARM64_ERRATUM_1463225
        {
                .desc = "ARM erratum 1463225",
@@ -703,7 +826,50 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .matches = has_cortex_a76_erratum_1463225,
        },
+#endif
+#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
+       {
+               .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
+               .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
+               ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+               .matches = needs_tx2_tvm_workaround,
+       },
 #endif
        {
        }
 };
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       if (__spectrev2_safe)
+               return sprintf(buf, "Not affected\n");
+
+       if (__hardenbp_enab)
+               return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+
+       return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       if (__ssb_safe)
+               return sprintf(buf, "Not affected\n");
+
+       switch (ssbd_state) {
+       case ARM64_SSBD_KERNEL:
+       case ARM64_SSBD_FORCE_ENABLE:
+               if (IS_ENABLED(CONFIG_ARM64_SSBD))
+                       return sprintf(buf,
+                           "Mitigation: Speculative Store Bypass disabled via prctl\n");
+       }
+
+       return sprintf(buf, "Vulnerable\n");
+}
index 93f69d82225de1b59cd03e63710ed8031c5b4c76..ff5beb59b3dc377bb762534e4beaaa1d5acab250 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/stop_machine.h>
 #include <linux/types.h>
 #include <linux/mm.h>
+#include <linux/cpu.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
@@ -164,10 +165,23 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
        ARM64_FTR_END,
 };
 
+static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+       ARM64_FTR_END,
+};
+
 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
-       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
-       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+       /*
+        * We already refuse to boot CPUs that don't support our configured
+        * page size, so we can only detect mismatches for a page size other
+        * than the one we're currently using. Unfortunately, SoCs like this
+        * exist in the wild so, even though we don't like it, we'll have to go
+        * along with it and treat them as non-strict.
+        */
+       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
        /* Linux shouldn't care about secure memory */
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
@@ -206,8 +220,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
        /*
         * Linux can handle differing I-cache policies. Userspace JITs will
@@ -371,7 +385,7 @@ static const struct __ftr_reg_entry {
 
        /* Op1 = 0, CRn = 0, CRm = 4 */
        ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
-       ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
+       ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
        ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
 
        /* Op1 = 0, CRn = 0, CRm = 5 */
@@ -449,6 +463,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
        case FTR_LOWER_SAFE:
                ret = new < cur ? new : cur;
                break;
+       case FTR_HIGHER_OR_ZERO_SAFE:
+               if (!cur || !new)
+                       break;
+               /* Fallthrough */
        case FTR_HIGHER_SAFE:
                ret = new > cur ? new : cur;
                break;
@@ -657,7 +675,6 @@ void update_cpu_features(int cpu,
 
        /*
         * EL3 is not our concern.
-        * ID_AA64PFR1 is currently RES0.
         */
        taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
                                      info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
@@ -834,7 +851,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
        u32 midr = read_cpuid_id();
 
        /* Cavium ThunderX pass 1.x and 2.x */
-       return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
+       return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
                MIDR_CPU_VAR_REV(0, 0),
                MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
 }
@@ -873,7 +890,7 @@ static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
        return ctr & BIT(CTR_DIC_SHIFT);
 }
 
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static bool __meltdown_safe = true;
 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 
 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
@@ -883,9 +900,25 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
        static const struct midr_range kpti_safe_list[] = {
                MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
                MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
                { /* sentinel */ }
        };
-       char const *str = "command line option";
+       char const *str = "kpti command line option";
+       bool meltdown_safe;
+
+       meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+
+       /* Defer to CPU feature registers */
+       if (has_cpuid_feature(entry, scope))
+               meltdown_safe = true;
+
+       if (!meltdown_safe)
+               __meltdown_safe = false;
 
        /*
         * For reasons that aren't entirely clear, enabling KPTI on Cavium
@@ -897,6 +930,24 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
                __kpti_forced = -1;
        }
 
+       /* Useful for KASLR robustness */
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
+               if (!__kpti_forced) {
+                       str = "KASLR";
+                       __kpti_forced = 1;
+               }
+       }
+
+       if (cpu_mitigations_off() && !__kpti_forced) {
+               str = "mitigations=off";
+               __kpti_forced = -1;
+       }
+
+       if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+               pr_info_once("kernel page table isolation disabled by kernel configuration\n");
+               return false;
+       }
+
        /* Forced? */
        if (__kpti_forced) {
                pr_info_once("kernel page table isolation forced %s by %s\n",
@@ -904,18 +955,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
                return __kpti_forced > 0;
        }
 
-       /* Useful for KASLR robustness */
-       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
-               return true;
-
-       /* Don't force KPTI for CPUs that are not vulnerable */
-       if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
-               return false;
-
-       /* Defer to CPU feature registers */
-       return !has_cpuid_feature(entry, scope);
+       return !meltdown_safe;
 }
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 static void
 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 {
@@ -940,6 +983,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 
        return;
 }
+#else
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+{
+}
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 static int __init parse_kpti(char *str)
 {
@@ -953,7 +1002,6 @@ static int __init parse_kpti(char *str)
        return 0;
 }
 early_param("kpti", parse_kpti);
-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 #ifdef CONFIG_ARM64_HW_AFDBM
 static inline void __cpu_enable_hw_dbm(void)
@@ -1049,6 +1097,48 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
        WARN_ON(val & (7 << 27 | 7 << 21));
 }
 
+#ifdef CONFIG_ARM64_SSBD
+static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
+{
+       if (user_mode(regs))
+               return 1;
+
+       if (instr & BIT(CRm_shift))
+               regs->pstate |= PSR_SSBS_BIT;
+       else
+               regs->pstate &= ~PSR_SSBS_BIT;
+
+       arm64_skip_faulting_instruction(regs, 4);
+       return 0;
+}
+
+static struct undef_hook ssbs_emulation_hook = {
+       .instr_mask     = ~(1U << CRm_shift),
+       .instr_val      = 0xd500001f | REG_PSTATE_SSBS_IMM,
+       .fn             = ssbs_emulation_handler,
+};
+
+static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
+{
+       static bool undef_hook_registered = false;
+       static DEFINE_SPINLOCK(hook_lock);
+
+       spin_lock(&hook_lock);
+       if (!undef_hook_registered) {
+               register_undef_hook(&ssbs_emulation_hook);
+               undef_hook_registered = true;
+       }
+       spin_unlock(&hook_lock);
+
+       if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+               sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
+               arm64_set_ssbd_mitigation(false);
+       } else {
+               arm64_set_ssbd_mitigation(true);
+       }
+}
+#endif /* CONFIG_ARM64_SSBD */
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
@@ -1132,7 +1222,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .field_pos = ID_AA64PFR0_EL0_SHIFT,
                .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
        },
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
        {
                .desc = "Kernel page table isolation (KPTI)",
                .capability = ARM64_UNMAP_KERNEL_AT_EL0,
@@ -1148,7 +1237,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .matches = unmap_kernel_at_el0,
                .cpu_enable = kpti_install_ng_mappings,
        },
-#endif
        {
                /* FP/SIMD is not implemented */
                .capability = ARM64_HAS_NO_FPSIMD,
@@ -1235,6 +1323,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .matches = has_hw_dbm,
                .cpu_enable = cpu_enable_hw_dbm,
        },
+#endif
+#ifdef CONFIG_ARM64_SSBD
+       {
+               .desc = "Speculative Store Bypassing Safe (SSBS)",
+               .capability = ARM64_SSBS,
+               .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64PFR1_EL1,
+               .field_pos = ID_AA64PFR1_SSBS_SHIFT,
+               .sign = FTR_UNSIGNED,
+               .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
+               .cpu_enable = cpu_enable_ssbs,
+       },
 #endif
        {},
 };
@@ -1281,6 +1382,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
 #ifdef CONFIG_ARM64_SVE
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
 #endif
+       HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
        {},
 };
 
@@ -1775,3 +1877,15 @@ void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
        /* Firmware may have left a deferred SError in this register. */
        write_sysreg_s(0, SYS_DISR_EL1);
 }
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       if (__meltdown_safe)
+               return sprintf(buf, "Not affected\n");
+
+       if (arm64_kernel_unmapped_at_el0())
+               return sprintf(buf, "Mitigation: PTI\n");
+
+       return sprintf(buf, "Vulnerable\n");
+}
index e9ab7b3ed31765e2a915c9841d515679f8e8749a..dce971f2c1673ed8dd0dca5e6d4833d8a16d086f 100644 (file)
@@ -81,6 +81,7 @@ static const char *const hwcap_str[] = {
        "uscat",
        "ilrcpc",
        "flagm",
+       "ssbs",
        NULL
 };
 
index 8556876c91096065b8fba4a740eefdc816d6006b..5f800384cb9a8c141f97e55b6d3b15a56f31494a 100644 (file)
@@ -824,7 +824,7 @@ el0_dbg:
        mov     x1, x25
        mov     x2, sp
        bl      do_debug_exception
-       enable_daif
+       enable_da_f
        ct_user_exit
        b       ret_to_user
 el0_inv:
@@ -876,7 +876,7 @@ el0_error_naked:
        enable_dbg
        mov     x0, sp
        bl      do_serror
-       enable_daif
+       enable_da_f
        ct_user_exit
        b       ret_to_user
 ENDPROC(el0_error)
index 57e962290df3a0aee4aaeddbb6a8c9369b5c7931..7eff8afa035fdbca60383a946558ca06befde450 100644 (file)
@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        if (offset < -SZ_128M || offset >= SZ_128M) {
 #ifdef CONFIG_ARM64_MODULE_PLTS
-               struct plt_entry trampoline;
+               struct plt_entry trampoline, *dst;
                struct module *mod;
 
                /*
@@ -104,24 +104,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
                 * is added in the future, but for now, the pr_err() below
                 * deals with a theoretical issue only.
                 */
+               dst = mod->arch.ftrace_trampoline;
                trampoline = get_plt_entry(addr);
-               if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-                                      &trampoline)) {
-                       if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-                                              &(struct plt_entry){})) {
+               if (!plt_entries_equal(dst, &trampoline)) {
+                       if (!plt_entries_equal(dst, &(struct plt_entry){})) {
                                pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
                                return -EINVAL;
                        }
 
                        /* point the trampoline to our ftrace entry point */
                        module_disable_ro(mod);
-                       *mod->arch.ftrace_trampoline = trampoline;
+                       *dst = trampoline;
                        module_enable_ro(mod, true);
 
-                       /* update trampoline before patching in the branch */
-                       smp_wmb();
+                       /*
+                        * Ensure updated trampoline is visible to instruction
+                        * fetch before we patch in the branch.
+                        */
+                       __flush_icache_range((unsigned long)&dst[0],
+                                            (unsigned long)&dst[1]);
                }
-               addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
+               addr = (unsigned long)dst;
 #else /* CONFIG_ARM64_MODULE_PLTS */
                return -EINVAL;
 #endif /* CONFIG_ARM64_MODULE_PLTS */
index 8c9644376326fe96f05645d298c1b4fcd383d652..7c0611f5d2ce7594bd33b8e967db8aa9210befa3 100644 (file)
@@ -547,13 +547,14 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
                        /* Aligned */
                        break;
                case 1:
-                       /* Allow single byte watchpoint. */
-                       if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
-                               break;
                case 2:
                        /* Allow halfword watchpoints and breakpoints. */
                        if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
                                break;
+               case 3:
+                       /* Allow single byte watchpoint. */
+                       if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
+                               break;
                default:
                        return -EINVAL;
                }
index 8da289dc843a0aeeb6f8a27b67325c3de8678d8d..eff6a564ab8081c5726b4dce0258ff8d350dc9f1 100644 (file)
 
 #ifdef CONFIG_EFI
 
-__efistub_stext_offset = stext - _text;
+/*
+ * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
+ * https://github.com/ClangBuiltLinux/linux/issues/561
+ */
+__efistub_stext_offset = ABSOLUTE(stext - _text);
 
 /*
  * The EFI stub has its own symbol namespace prefixed by __efistub_, to
index 7f1628effe6d7b866e60712b338fedb6e885d6ed..d668c13218b807b5faad18d1d39c1f2817c6794a 100644 (file)
@@ -285,22 +285,27 @@ void arch_release_task_struct(struct task_struct *tsk)
        fpsimd_release_task(tsk);
 }
 
-/*
- * src and dst may temporarily have aliased sve_state after task_struct
- * is copied.  We cannot fix this properly here, because src may have
- * live SVE state and dst's thread_info may not exist yet, so tweaking
- * either src's or dst's TIF_SVE is not safe.
- *
- * The unaliasing is done in copy_thread() instead.  This works because
- * dst is not schedulable or traceable until both of these functions
- * have been called.
- */
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
        if (current->mm)
                fpsimd_preserve_current_state();
        *dst = *src;
 
+       /* We rely on the above assignment to initialize dst's thread_flags: */
+       BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
+
+       /*
+        * Detach src's sve_state (if any) from dst so that it does not
+        * get erroneously used or freed prematurely.  dst's sve_state
+        * will be allocated on demand later on if dst uses SVE.
+        * For consistency, also clear TIF_SVE here: this could be done
+        * later in copy_process(), but to avoid tripping up future
+        * maintainers it is best not to leave TIF_SVE and sve_state in
+        * an inconsistent state, even temporarily.
+        */
+       dst->thread.sve_state = NULL;
+       clear_tsk_thread_flag(dst, TIF_SVE);
+
        return 0;
 }
 
@@ -313,13 +318,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 
        memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
 
-       /*
-        * Unalias p->thread.sve_state (if any) from the parent task
-        * and disable discard SVE state for p:
-        */
-       clear_tsk_thread_flag(p, TIF_SVE);
-       p->thread.sve_state = NULL;
-
        /*
         * In case p was allocated the same task_struct pointer as some
         * other recently-exited task, make sure p is disassociated from
@@ -358,6 +356,10 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
                if (IS_ENABLED(CONFIG_ARM64_UAO) &&
                    cpus_have_const_cap(ARM64_HAS_UAO))
                        childregs->pstate |= PSR_UAO_BIT;
+
+               if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
+                       set_ssbs_bit(childregs);
+
                p->thread.cpu_context.x19 = stack_start;
                p->thread.cpu_context.x20 = stk_sz;
        }
@@ -397,6 +399,32 @@ void uao_thread_switch(struct task_struct *next)
        }
 }
 
+/*
+ * Force SSBS state on context-switch, since it may be lost after migrating
+ * from a CPU which treats the bit as RES0 in a heterogeneous system.
+ */
+static void ssbs_thread_switch(struct task_struct *next)
+{
+       struct pt_regs *regs = task_pt_regs(next);
+
+       /*
+        * Nothing to do for kernel threads, but 'regs' may be junk
+        * (e.g. idle task) so check the flags and bail early.
+        */
+       if (unlikely(next->flags & PF_KTHREAD))
+               return;
+
+       /* If the mitigation is enabled, then we leave SSBS clear. */
+       if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
+           test_tsk_thread_flag(next, TIF_SSBD))
+               return;
+
+       if (compat_user_mode(regs))
+               set_compat_ssbs_bit(regs);
+       else if (user_mode(regs))
+               set_ssbs_bit(regs);
+}
+
 /*
  * We store our current task in sp_el0, which is clobbered by userspace. Keep a
  * shadow copy so that we can restore this upon entry from userspace.
@@ -425,6 +453,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
        contextidr_thread_switch(next);
        entry_task_switch(next);
        uao_thread_switch(next);
+       ssbs_thread_switch(next);
 
        /*
         * Complete any pending TLB or cache maintenance on this CPU in case
index 6219486fa25fa4490d4397f6aee15e18fc208559..0211c3c7533b0a194e53cfbaca9fa3e7abd0d435 100644 (file)
@@ -1666,19 +1666,20 @@ void syscall_trace_exit(struct pt_regs *regs)
 }
 
 /*
- * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
- * We also take into account DIT (bit 24), which is not yet documented, and
- * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
- * allocated an EL0 meaning in future.
+ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
+ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
+ * not described in ARM DDI 0487D.a.
+ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
+ * be allocated an EL0 meaning in future.
  * Userspace cannot use these until they have an architectural meaning.
  * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
  * We also reserve IL for the kernel; SS is handled dynamically.
  */
 #define SPSR_EL1_AARCH64_RES0_BITS \
-       (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
-        GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
+       (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+        GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
 #define SPSR_EL1_AARCH32_RES0_BITS \
-       (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
+       (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
 
 static int valid_compat_regs(struct user_pt_regs *regs)
 {
index 933adbc0f654d84e2e7331a93455ee74c747a091..0311fe52c8ffb5be933f0546dd95cfe702cec6db 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/export.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
 
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
@@ -32,6 +33,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
                return 0;
        }
 }
+NOKPROBE_SYMBOL(save_return_addr);
 
 void *return_address(unsigned int level)
 {
@@ -55,3 +57,4 @@ void *return_address(unsigned int level)
                return NULL;
 }
 EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
index 388f8fc130800ff7d69c14a50b96f1086b964d6d..f496fb2f71227e4204d3c7d3bc2f8f7012a61cdc 100644 (file)
@@ -3,13 +3,31 @@
  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
  */
 
+#include <linux/compat.h>
 #include <linux/errno.h>
 #include <linux/prctl.h>
 #include <linux/sched.h>
+#include <linux/sched/task_stack.h>
 #include <linux/thread_info.h>
 
 #include <asm/cpufeature.h>
 
+static void ssbd_ssbs_enable(struct task_struct *task)
+{
+       u64 val = is_compat_thread(task_thread_info(task)) ?
+                 PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+       task_pt_regs(task)->pstate |= val;
+}
+
+static void ssbd_ssbs_disable(struct task_struct *task)
+{
+       u64 val = is_compat_thread(task_thread_info(task)) ?
+                 PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+       task_pt_regs(task)->pstate &= ~val;
+}
+
 /*
  * prctl interface for SSBD
  * FIXME: Drop the below ifdefery once merged in 4.18.
@@ -47,12 +65,14 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
                        return -EPERM;
                task_clear_spec_ssb_disable(task);
                clear_tsk_thread_flag(task, TIF_SSBD);
+               ssbd_ssbs_enable(task);
                break;
        case PR_SPEC_DISABLE:
                if (state == ARM64_SSBD_FORCE_DISABLE)
                        return -EPERM;
                task_set_spec_ssb_disable(task);
                set_tsk_thread_flag(task, TIF_SSBD);
+               ssbd_ssbs_disable(task);
                break;
        case PR_SPEC_FORCE_DISABLE:
                if (state == ARM64_SSBD_FORCE_DISABLE)
@@ -60,6 +80,7 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
                task_set_spec_ssb_disable(task);
                task_set_spec_ssb_force_disable(task);
                set_tsk_thread_flag(task, TIF_SSBD);
+               ssbd_ssbs_disable(task);
                break;
        default:
                return -ERANGE;
index 4989f7ea1e59925ff9eda5ac68c7ac3cedb3d4fe..bb482ec044b61d43fe5071605b6a039ba8985c28 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
@@ -85,6 +86,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 
        return 0;
 }
+NOKPROBE_SYMBOL(unwind_frame);
 
 void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
                     int (*fn)(struct stackframe *, void *), void *data)
@@ -99,6 +101,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
                        break;
        }
 }
+NOKPROBE_SYMBOL(walk_stackframe);
 
 #ifdef CONFIG_STACKTRACE
 struct stack_trace_data {
index 0825c4a856e33da0d794f403fd2a6464543467c3..6106c49f84bc8abb794fb6d51aa5444be973861d 100644 (file)
@@ -340,17 +340,28 @@ void remove_cpu_topology(unsigned int cpu)
 }
 
 #ifdef CONFIG_ACPI
+static bool __init acpi_cpu_is_threaded(int cpu)
+{
+       int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+       /*
+        * if the PPTT doesn't have thread information, assume a homogeneous
+        * machine and return the current CPU's thread state.
+        */
+       if (is_threaded < 0)
+               is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
+
+       return !!is_threaded;
+}
+
 /*
  * Propagate the topology information of the processor_topology_node tree to the
  * cpu_topology array.
  */
 static int __init parse_acpi_topology(void)
 {
-       bool is_threaded;
        int cpu, topology_id;
 
-       is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
-
        for_each_possible_cpu(cpu) {
                int i, cache_id;
 
@@ -358,7 +369,7 @@ static int __init parse_acpi_topology(void)
                if (topology_id < 0)
                        return topology_id;
 
-               if (is_threaded) {
+               if (acpi_cpu_is_threaded(cpu)) {
                        cpu_topology[cpu].thread_id = topology_id;
                        topology_id = find_acpi_cpu_topology(cpu, 1);
                        cpu_topology[cpu].core_id   = topology_id;
index 963d669ae3a2d3db799a96c89f02ce430e7bafb2..7414b76191c2c97376c9cf47c99cd3802978c9a9 100644 (file)
@@ -293,3 +293,14 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
 
        vcpu->arch.sysregs_loaded_on_cpu = false;
 }
+
+void __hyp_text __kvm_enable_ssbs(void)
+{
+       u64 tmp;
+
+       asm volatile(
+       "mrs    %0, sctlr_el2\n"
+       "orr    %0, %0, %1\n"
+       "msr    sctlr_el2, %0"
+       : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
+}
index 7a5173ea227648b4ec534ee60d557c8e5894cf73..4c2e96ef306ed9e81cabdb6dd6eb49303d668128 100644 (file)
@@ -189,13 +189,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
        switch (spsr_idx) {
        case KVM_SPSR_SVC:
                write_sysreg_el1(v, spsr);
+               break;
        case KVM_SPSR_ABT:
                write_sysreg(v, spsr_abt);
+               break;
        case KVM_SPSR_UND:
                write_sysreg(v, spsr_und);
+               break;
        case KVM_SPSR_IRQ:
                write_sysreg(v, spsr_irq);
+               break;
        case KVM_SPSR_FIQ:
                write_sysreg(v, spsr_fiq);
+               break;
        }
 }
index d112af75680bbdf7c409c2aadb824da7672d915d..6da2bbdb9648fa4944920a2e12c6760048f61f09 100644 (file)
@@ -626,7 +626,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
         */
        val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
               | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
-       __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+       __vcpu_sys_reg(vcpu, r->reg) = val;
 }
 
 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
@@ -968,13 +968,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)                                     \
        { SYS_DESC(SYS_DBGBVRn_EL1(n)),                                 \
-         trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr },                \
+         trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr },                \
        { SYS_DESC(SYS_DBGBCRn_EL1(n)),                                 \
-         trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr },                \
+         trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr },                \
        { SYS_DESC(SYS_DBGWVRn_EL1(n)),                                 \
-         trap_wvr, reset_wvr, n, 0,  get_wvr, set_wvr },               \
+         trap_wvr, reset_wvr, 0, 0,  get_wvr, set_wvr },               \
        { SYS_DESC(SYS_DBGWCRn_EL1(n)),                                 \
-         trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }
+         trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
 
 /* Macro to expand the PMEVCNTRn_EL0 register */
 #define PMU_PMEVCNTR_EL0(n)                                            \
@@ -1359,7 +1359,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 
        { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
 
-       { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
+       { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
        { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
        { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
        { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
@@ -2072,13 +2072,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
 }
 
 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
-                             const struct sys_reg_desc *table, size_t num)
+                               const struct sys_reg_desc *table, size_t num,
+                               unsigned long *bmap)
 {
        unsigned long i;
 
        for (i = 0; i < num; i++)
-               if (table[i].reset)
+               if (table[i].reset) {
+                       int reg = table[i].reg;
+
                        table[i].reset(vcpu, &table[i]);
+                       if (reg > 0 && reg < NR_SYS_REGS)
+                               set_bit(reg, bmap);
+               }
 }
 
 /**
@@ -2576,18 +2582,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
 {
        size_t num;
        const struct sys_reg_desc *table;
-
-       /* Catch someone adding a register without putting in reset entry. */
-       memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
+       DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
 
        /* Generic chip reset first (so target could override). */
-       reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+       reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
 
        table = get_target_table(vcpu->arch.target, true, &num);
-       reset_sys_reg_descs(vcpu, table, num);
+       reset_sys_reg_descs(vcpu, table, num, bmap);
 
        for (num = 1; num < NR_SYS_REGS; num++) {
-               if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
+               if (WARN(!test_bit(num, bmap),
                         "Didn't reset __vcpu_sys_reg(%zi)\n", num))
                        break;
        }
index 774c3e17c79824a0d118625020fff5b3b703e4e0..29d2f425806e3ff181d82c65fc6c6ab944599991 100644 (file)
@@ -233,8 +233,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES]  = {0};
 
-       if (IS_ENABLED(CONFIG_ZONE_DMA32))
-               max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
+#ifdef CONFIG_ZONE_DMA32
+       max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
+#endif
        max_zone_pfns[ZONE_NORMAL] = max;
 
        free_area_init_nodes(max_zone_pfns);
index 842c8a5fcd53c0f5573bdf79c072c671c441ae54..157f2caa13516b66db0538a67a148036912dd57c 100644 (file)
@@ -65,7 +65,11 @@ unsigned long arch_mmap_rnd(void)
 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 {
        unsigned long gap = rlim_stack->rlim_cur;
-       unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
+       unsigned long pad = stack_guard_gap;
+
+       /* Account for stack randomization if necessary */
+       if (current->flags & PF_RANDOMIZE)
+               pad += (STACK_RND_MASK << PAGE_SHIFT);
 
        /* Values close to RLIM_INFINITY can overflow. */
        if (gap + pad > gap)
index 8cce091b6c21e256fcac74f4cc7b5e59a46e1929..ec6aa1863316210129d6dde178cdc27cc8ff261a 100644 (file)
@@ -294,6 +294,15 @@ skip_pgd:
        msr     sctlr_el1, x18
        isb
 
+       /*
+        * Invalidate the local I-cache so that any instructions fetched
+        * speculatively from the PoC are discarded, since they may have
+        * been dynamically patched at the PoU.
+        */
+       ic      iallu
+       dsb     nsh
+       isb
+
        /* Set the flag to zero to indicate that we're all done */
        str     wzr, [flag_ptr]
        ret
index 326448f9df16068392e49af096afd1c8b2713131..1a42ba885188a5c8e865bb28108c685ed1a37515 100644 (file)
@@ -914,10 +914,14 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
 void
 module_arch_cleanup (struct module *mod)
 {
-       if (mod->arch.init_unw_table)
+       if (mod->arch.init_unw_table) {
                unw_remove_unwind_table(mod->arch.init_unw_table);
-       if (mod->arch.core_unw_table)
+               mod->arch.init_unw_table = NULL;
+       }
+       if (mod->arch.core_unw_table) {
                unw_remove_unwind_table(mod->arch.core_unw_table);
+               mod->arch.core_unw_table = NULL;
+       }
 }
 
 void *dereference_module_function_descriptor(struct module *mod, void *ptr)
index 9000b249d225e97bbc1cd4af21c50f327493e624..407a617fa3a2bca1d9dbe85675a7bfefddda0f95 100644 (file)
@@ -22,7 +22,6 @@
 
 #include <linux/types.h>
 #include <asm/bootinfo-atari.h>
-#include <asm/raw_io.h>
 #include <asm/kmap.h>
 
 extern u_long atari_mch_cookie;
@@ -126,14 +125,6 @@ extern struct atari_hw_present atari_hw_present;
  */
 
 
-#define atari_readb   raw_inb
-#define atari_writeb  raw_outb
-
-#define atari_inb_p   raw_inb
-#define atari_outb_p  raw_outb
-
-
-
 #include <linux/mm.h>
 #include <asm/cacheflush.h>
 
index 782b78f8a04890b315685b0ea78cab16a2d219af..e056feabbaf0b38ff08442edccb8e92ba524fef6 100644 (file)
 #include <asm-generic/iomap.h>
 
 #ifdef CONFIG_ATARI
-#include <asm/atarihw.h>
+#define atari_readb   raw_inb
+#define atari_writeb  raw_outb
+
+#define atari_inb_p   raw_inb
+#define atari_outb_p  raw_outb
 #endif
 
 
index 08cee11180e6998d2069b1591e78ad57288f9134..e441517785fda77020b6297826ddc33038fbf7b1 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 
 #include <asm/bootinfo-mac.h>
 
index 3c453a1f1ff10c218e809ec1f5efe2bb70b1ef7d..172801ed35b89994f6a52e492a7c486cb517d821 100644 (file)
@@ -78,6 +78,8 @@ OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \
 $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
        $(call if_changed,objcopy)
 
+HOSTCFLAGS_calc_vmlinuz_load_addr.o += $(LINUXINCLUDE)
+
 # Calculate the load address of the compressed kernel image
 hostprogs-y := calc_vmlinuz_load_addr
 
index 542c3ede97222faec0e38d8b88c280609068fd38..d14f75ec827323702d61d6ef6eb2871ecfda3513 100644 (file)
@@ -13,7 +13,7 @@
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include "../../../../include/linux/sizes.h"
+#include <linux/sizes.h>
 
 int main(int argc, char *argv[])
 {
index 2bae201aa365106ac262865b0ba407fe36905599..1c7bf11f8450b42f0c2bb032b901fc5124060fa5 100644 (file)
@@ -99,7 +99,7 @@
 
                        miscintc: interrupt-controller@18060010 {
                                compatible = "qca,ar7240-misc-intc";
-                               reg = <0x18060010 0x4>;
+                               reg = <0x18060010 0x8>;
 
                                interrupt-parent = <&cpuintc>;
                                interrupts = <6>;
index c3d0d0a6e04483247baeec72c6704966a4ee559c..6895430b5b2c47beb99a33de6c39a34ae7d94f28 100644 (file)
@@ -623,7 +623,6 @@ CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
 CONFIG_USB_ADUTUX=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYPRESS_CY7C63=m
index 5f71aa598b06df7d00038a5062a95e7e5dadfe42..1a3e1fec4e86f5bd34189d0d867b9b1b7cbc6f0b 100644 (file)
@@ -335,7 +335,6 @@ CONFIG_USB_SERIAL_SAFE_PADDED=y
 CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
index 0edba3e757471b74d441fa520c3ab44084c5f630..4e2ee743088fd4deedadfa4f780595b6ce4d0729 100644 (file)
 #define cpu_has_dsp3           __ase(MIPS_ASE_DSP3)
 #endif
 
+#ifndef cpu_has_loongson_mmi
+#define cpu_has_loongson_mmi           __ase(MIPS_ASE_LOONGSON_MMI)
+#endif
+
+#ifndef cpu_has_loongson_cam
+#define cpu_has_loongson_cam           __ase(MIPS_ASE_LOONGSON_CAM)
+#endif
+
+#ifndef cpu_has_loongson_ext
+#define cpu_has_loongson_ext           __ase(MIPS_ASE_LOONGSON_EXT)
+#endif
+
+#ifndef cpu_has_loongson_ext2
+#define cpu_has_loongson_ext2          __ase(MIPS_ASE_LOONGSON_EXT2)
+#endif
+
 #ifndef cpu_has_mipsmt
 #define cpu_has_mipsmt         __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
 #endif
index dacbdb84516a09134896edd704d129b4f5f516f7..2b4b14a56575ca005bd315f63d21198b3b14340e 100644 (file)
@@ -436,5 +436,9 @@ enum cpu_type_enum {
 #define MIPS_ASE_MSA           0x00000100 /* MIPS SIMD Architecture */
 #define MIPS_ASE_DSP3          0x00000200 /* Signal Processing ASE Rev 3*/
 #define MIPS_ASE_MIPS16E2      0x00000400 /* MIPS16e2 */
+#define MIPS_ASE_LOONGSON_MMI  0x00000800 /* Loongson MultiMedia extensions Instructions */
+#define MIPS_ASE_LOONGSON_CAM  0x00001000 /* Loongson CAM */
+#define MIPS_ASE_LOONGSON_EXT  0x00002000 /* Loongson EXTensions */
+#define MIPS_ASE_LOONGSON_EXT2 0x00004000 /* Loongson EXTensions R2 */
 
 #endif /* _ASM_CPU_H */
index c2917b39966bf18797b6e4885d5f713b52e2b11d..bba2c883795163a43056c61946285eddd80cf282 100644 (file)
@@ -27,8 +27,8 @@
 #define AR933X_UART_CS_PARITY_S                0
 #define AR933X_UART_CS_PARITY_M                0x3
 #define          AR933X_UART_CS_PARITY_NONE    0
-#define          AR933X_UART_CS_PARITY_ODD     1
-#define          AR933X_UART_CS_PARITY_EVEN    2
+#define          AR933X_UART_CS_PARITY_ODD     2
+#define          AR933X_UART_CS_PARITY_EVEN    3
 #define AR933X_UART_CS_IF_MODE_S       2
 #define AR933X_UART_CS_IF_MODE_M       0x3
 #define          AR933X_UART_CS_IF_MODE_NONE   0
index 01df9ad62fb83d3b8e50006b342a6f182f6fc862..1bb9448777c5c59ec9a4332bdfb63feb70f1b3ee 100644 (file)
 #define MIPS_CONF7_IAR         (_ULCAST_(1) << 10)
 #define MIPS_CONF7_AR          (_ULCAST_(1) << 16)
 
+/* Ingenic Config7 bits */
+#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4)
+
 /* Config7 Bits specific to MIPS Technologies. */
 
 /* Performance counters implemented Per TC */
@@ -2774,6 +2777,7 @@ __BUILD_SET_C0(status)
 __BUILD_SET_C0(cause)
 __BUILD_SET_C0(config)
 __BUILD_SET_C0(config5)
+__BUILD_SET_C0(config7)
 __BUILD_SET_C0(intcontrol)
 __BUILD_SET_C0(intctl)
 __BUILD_SET_C0(srsmap)
index a2aba4b059e63ff535ac3f46d35efa67ef258bf0..1ade1daa49210713c53bdd20af2564eeb628e3cd 100644 (file)
@@ -6,5 +6,16 @@
 #define HWCAP_MIPS_R6          (1 << 0)
 #define HWCAP_MIPS_MSA         (1 << 1)
 #define HWCAP_MIPS_CRC32       (1 << 2)
+#define HWCAP_MIPS_MIPS16      (1 << 3)
+#define HWCAP_MIPS_MDMX     (1 << 4)
+#define HWCAP_MIPS_MIPS3D   (1 << 5)
+#define HWCAP_MIPS_SMARTMIPS (1 << 6)
+#define HWCAP_MIPS_DSP      (1 << 7)
+#define HWCAP_MIPS_DSP2     (1 << 8)
+#define HWCAP_MIPS_DSP3     (1 << 9)
+#define HWCAP_MIPS_MIPS16E2 (1 << 10)
+#define HWCAP_LOONGSON_MMI  (1 << 11)
+#define HWCAP_LOONGSON_EXT  (1 << 12)
+#define HWCAP_LOONGSON_EXT2 (1 << 13)
 
 #endif /* _UAPI_ASM_HWCAP_H */
index 705593d40d120d24a3c4ac3c56fe3859a4ba7a9c..05c60fa4fa06b1c22a25546b2fd3c9a263631193 100644 (file)
@@ -471,27 +471,27 @@ static unsigned long pin_cfg_bias_disable[] = {
 static struct pinctrl_map pin_map[] __initdata = {
        /* NAND pin configuration */
        PIN_MAP_MUX_GROUP_DEFAULT("jz4740-nand",
-                       "10010000.jz4740-pinctrl", "nand", "nand-cs1"),
+                       "10010000.pin-controller", "nand-cs1", "nand"),
 
        /* fbdev pin configuration */
        PIN_MAP_MUX_GROUP("jz4740-fb", PINCTRL_STATE_DEFAULT,
-                       "10010000.jz4740-pinctrl", "lcd", "lcd-8bit"),
+                       "10010000.pin-controller", "lcd-8bit", "lcd"),
        PIN_MAP_MUX_GROUP("jz4740-fb", PINCTRL_STATE_SLEEP,
-                       "10010000.jz4740-pinctrl", "lcd", "lcd-no-pins"),
+                       "10010000.pin-controller", "lcd-no-pins", "lcd"),
 
        /* MMC pin configuration */
        PIN_MAP_MUX_GROUP_DEFAULT("jz4740-mmc.0",
-                       "10010000.jz4740-pinctrl", "mmc", "mmc-1bit"),
+                       "10010000.pin-controller", "mmc-1bit", "mmc"),
        PIN_MAP_MUX_GROUP_DEFAULT("jz4740-mmc.0",
-                       "10010000.jz4740-pinctrl", "mmc", "mmc-4bit"),
+                       "10010000.pin-controller", "mmc-4bit", "mmc"),
        PIN_MAP_CONFIGS_PIN_DEFAULT("jz4740-mmc.0",
-                       "10010000.jz4740-pinctrl", "PD0", pin_cfg_bias_disable),
+                       "10010000.pin-controller", "PD0", pin_cfg_bias_disable),
        PIN_MAP_CONFIGS_PIN_DEFAULT("jz4740-mmc.0",
-                       "10010000.jz4740-pinctrl", "PD2", pin_cfg_bias_disable),
+                       "10010000.pin-controller", "PD2", pin_cfg_bias_disable),
 
        /* PWM pin configuration */
        PIN_MAP_MUX_GROUP_DEFAULT("jz4740-pwm",
-                       "10010000.jz4740-pinctrl", "pwm4", "pwm4"),
+                       "10010000.pin-controller", "pwm4", "pwm4"),
 };
 
 
index 97d5239ca47baef7602b7500f46dc3d4cc8681f1..428ef218920398c6162b82e5688ab61dfe311580 100644 (file)
@@ -80,6 +80,8 @@ static int __populate_cache_leaves(unsigned int cpu)
        if (c->tcache.waysize)
                populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
 
+       this_cpu_ci->cpu_map_populated = true;
+
        return 0;
 }
 
index d535fc706a8b38a07c8e4c7de12ea111fcd95a60..581defb369c36049aaebc3f4055f09efceba8aa9 100644 (file)
@@ -1489,6 +1489,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                        __cpu_name[cpu] = "ICT Loongson-3";
                        set_elf_platform(cpu, "loongson3a");
                        set_isa(c, MIPS_CPU_ISA_M64R1);
+                       c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+                               MIPS_ASE_LOONGSON_EXT);
                        break;
                case PRID_REV_LOONGSON3B_R1:
                case PRID_REV_LOONGSON3B_R2:
@@ -1496,6 +1498,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                        __cpu_name[cpu] = "ICT Loongson-3";
                        set_elf_platform(cpu, "loongson3b");
                        set_isa(c, MIPS_CPU_ISA_M64R1);
+                       c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+                               MIPS_ASE_LOONGSON_EXT);
                        break;
                }
 
@@ -1861,6 +1865,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
                decode_configs(c);
                c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
                c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+               c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+                       MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
                break;
        default:
                panic("Unknown Loongson Processor ID!");
@@ -1879,6 +1885,13 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
                c->cputype = CPU_JZRISC;
                c->writecombine = _CACHE_UNCACHED_ACCELERATED;
                __cpu_name[cpu] = "Ingenic JZRISC";
+               /*
+                * The XBurst core by default attempts to avoid branch target
+                * buffer lookups by detecting & special casing loops. This
+                * feature will cause BogoMIPS and lpj calculate in error.
+                * Set cp0 config7 bit 4 to disable this feature.
+                */
+               set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
                break;
        default:
                panic("Unknown Ingenic Processor ID!");
@@ -2092,6 +2105,39 @@ void cpu_probe(void)
                elf_hwcap |= HWCAP_MIPS_MSA;
        }
 
+       if (cpu_has_mips16)
+               elf_hwcap |= HWCAP_MIPS_MIPS16;
+
+       if (cpu_has_mdmx)
+               elf_hwcap |= HWCAP_MIPS_MDMX;
+
+       if (cpu_has_mips3d)
+               elf_hwcap |= HWCAP_MIPS_MIPS3D;
+
+       if (cpu_has_smartmips)
+               elf_hwcap |= HWCAP_MIPS_SMARTMIPS;
+
+       if (cpu_has_dsp)
+               elf_hwcap |= HWCAP_MIPS_DSP;
+
+       if (cpu_has_dsp2)
+               elf_hwcap |= HWCAP_MIPS_DSP2;
+
+       if (cpu_has_dsp3)
+               elf_hwcap |= HWCAP_MIPS_DSP3;
+
+       if (cpu_has_mips16e2)
+               elf_hwcap |= HWCAP_MIPS_MIPS16E2;
+
+       if (cpu_has_loongson_mmi)
+               elf_hwcap |= HWCAP_LOONGSON_MMI;
+
+       if (cpu_has_loongson_ext)
+               elf_hwcap |= HWCAP_LOONGSON_EXT;
+
+       if (cpu_has_loongson_ext2)
+               elf_hwcap |= HWCAP_LOONGSON_EXT2;
+
        if (cpu_has_vz)
                cpu_probe_vz(c);
 
index 5f209f111e59e3ad9c6e31bee8352623cacc3e68..df7ddd246eaac730333bc24cc361f1893be100aa 100644 (file)
@@ -32,7 +32,8 @@ void __init setup_pit_timer(void)
 
 static int __init init_pit_clocksource(void)
 {
-       if (num_possible_cpus() > 1) /* PIT does not scale! */
+       if (num_possible_cpus() > 1 || /* PIT does not scale! */
+           !clockevent_state_periodic(&i8253_clockevent))
                return 0;
 
        return clocksource_i8253_init();
index b2de408a259e493b65a4b28abe0a746081f0aa78..f8d36710cd581f8b628056d09c406f8195c43619 100644 (file)
@@ -124,6 +124,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (cpu_has_eva)        seq_printf(m, "%s", " eva");
        if (cpu_has_htw)        seq_printf(m, "%s", " htw");
        if (cpu_has_xpa)        seq_printf(m, "%s", " xpa");
+       if (cpu_has_loongson_mmi)       seq_printf(m, "%s", " loongson-mmi");
+       if (cpu_has_loongson_cam)       seq_printf(m, "%s", " loongson-cam");
+       if (cpu_has_loongson_ext)       seq_printf(m, "%s", " loongson-ext");
+       if (cpu_has_loongson_ext2)      seq_printf(m, "%s", " loongson-ext2");
        seq_printf(m, "\n");
 
        if (cpu_has_mmips) {
index c4ef1c31e0c4f03ca22725668b2931d34a7cd749..37caeadb2964c956256040ff201743c140de310c 100644 (file)
@@ -156,8 +156,9 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
                        if (edge)
                                irq_set_handler(d->hwirq, handle_edge_irq);
 
-                       ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
-                               (val << (i * 4)), LTQ_EIU_EXIN_C);
+                       ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
+                                   (~(7 << (i * 4)))) | (val << (i * 4)),
+                                   LTQ_EIU_EXIN_C);
                }
        }
 
index 0fce4608aa88665febfcad2964690beff5000a0d..12abf14aed4a37569ff19044a74bf1b854205d97 100644 (file)
@@ -43,6 +43,10 @@ else
       $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
 endif
 
+# Some -march= flags enable MMI instructions, and GCC complains about that
+# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+cflags-y += $(call cc-option,-mno-loongson-mmi)
+
 #
 # Loongson Machines' Support
 #
index ffefc1cb26121e7b653b64b661b267cac4b4e1cd..98c3a7feb10f8b2391c968661e5e76c8d6c2770e 100644 (file)
@@ -110,7 +110,7 @@ static int __init serial_init(void)
 }
 module_init(serial_init);
 
-static void __init serial_exit(void)
+static void __exit serial_exit(void)
 {
        platform_device_unregister(&uart8250_device);
 }
index 1b705fb2f10c4c72b68192342debd4c4de92898a..233033f99d8fc62e2dc1d5dfc0c583f6ea4b1809 100644 (file)
@@ -21,8 +21,9 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
 EXPORT_SYMBOL(shm_align_mask);
 
 /* gap between mmap and stack */
-#define MIN_GAP (128*1024*1024UL)
-#define MAX_GAP ((TASK_SIZE)/6*5)
+#define MIN_GAP                (128*1024*1024UL)
+#define MAX_GAP                ((TASK_SIZE)/6*5)
+#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
 
 static int mmap_is_legacy(struct rlimit *rlim_stack)
 {
@@ -38,6 +39,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 {
        unsigned long gap = rlim_stack->rlim_cur;
+       unsigned long pad = stack_guard_gap;
+
+       /* Account for stack randomization if necessary */
+       if (current->flags & PF_RANDOMIZE)
+               pad += (STACK_RND_MASK << PAGE_SHIFT);
+
+       /* Values close to RLIM_INFINITY can overflow. */
+       if (gap + pad > gap)
+               gap += pad;
 
        if (gap < MIN_GAP)
                gap = MIN_GAP;
index 8c4fda52b91dc4d2f2841e69fba27fb635c5fb19..3944c49eee0c4c2ebb77e62a686e05efd3e37399 100644 (file)
@@ -630,7 +630,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
                return;
        }
 
-       if (cpu_has_rixi && _PAGE_NO_EXEC) {
+       if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
                if (fill_includes_sw_bits) {
                        UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
                } else {
@@ -654,6 +654,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
                                   int restore_scratch)
 {
        if (restore_scratch) {
+               /*
+                * Ensure the MFC0 below observes the value written to the
+                * KScratch register by the prior MTC0.
+                */
+               if (scratch_reg >= 0)
+                       uasm_i_ehb(p);
+
                /* Reset default page size */
                if (PM_DEFAULT_MASK >> 16) {
                        uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
@@ -668,12 +675,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
                        uasm_i_mtc0(p, 0, C0_PAGEMASK);
                        uasm_il_b(p, r, lid);
                }
-               if (scratch_reg >= 0) {
-                       uasm_i_ehb(p);
+               if (scratch_reg >= 0)
                        UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
-               } else {
+               else
                        UASM_i_LW(p, 1, scratchpad_offset(0), 0);
-               }
        } else {
                /* Reset default page size */
                if (PM_DEFAULT_MASK >> 16) {
@@ -922,6 +927,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
        }
        if (mode != not_refill && check_for_high_segbits) {
                uasm_l_large_segbits_fault(l, *p);
+
+               if (mode == refill_scratch && scratch_reg >= 0)
+                       uasm_i_ehb(p);
+
                /*
                 * We get here if we are an xsseg address, or if we are
                 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
@@ -938,12 +947,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
                uasm_i_jr(p, ptr);
 
                if (mode == refill_scratch) {
-                       if (scratch_reg >= 0) {
-                               uasm_i_ehb(p);
+                       if (scratch_reg >= 0)
                                UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
-                       } else {
+                       else
                                UASM_i_LW(p, 1, scratchpad_offset(0), 0);
-                       }
                } else {
                        uasm_i_nop(p);
                }
index 6f10312e0c76cbb52726648c0359915cf6c48a06..c99fa1c1bd9ccec73423197edc3a5c201ca0aa3e 100644 (file)
@@ -9,6 +9,7 @@ ccflags-vdso := \
        $(filter -mmicromips,$(KBUILD_CFLAGS)) \
        $(filter -march=%,$(KBUILD_CFLAGS)) \
        $(filter -m%-float,$(KBUILD_CFLAGS)) \
+       $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
        -D__VDSO__
 
 ifeq ($(cc-name),clang)
index 4ebd4e65524cd3cf6347a2948dede9163a4807b9..41ebe97fad1097f85a0c5d602db8bb74f474b946 100644 (file)
@@ -42,8 +42,8 @@ SECTIONS
 #endif
        _startcode_end = .;
 
-       /* bootloader code and data starts behind area of extracted kernel */
-       . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START);
+       /* bootloader code and data starts at least behind area of extracted kernel */
+       . = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START));
 
        /* align on next page boundary */
        . = ALIGN(4096);
index 0964c236e3e5a711056e058a8a5ee63343f9a496..de2998cb189e8814d4cc55121cd7e9fb03476807 100644 (file)
@@ -167,6 +167,9 @@ long arch_ptrace(struct task_struct *child, long request,
                if ((addr & (sizeof(unsigned long)-1)) ||
                     addr >= sizeof(struct pt_regs))
                        break;
+               if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
+                       data |= 3; /* ensure userspace privilege */
+               }
                if ((addr >= PT_GR1 && addr <= PT_GR31) ||
                                addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
                                (addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
@@ -228,16 +231,18 @@ long arch_ptrace(struct task_struct *child, long request,
 
 static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
 {
-       if (offset < 0)
-               return sizeof(struct pt_regs);
-       else if (offset <= 32*4)        /* gr[0..31] */
-               return offset * 2 + 4;
-       else if (offset <= 32*4+32*8)   /* gr[0..31] + fr[0..31] */
-               return offset + 32*4;
-       else if (offset < sizeof(struct pt_regs)/2 + 32*4)
-               return offset * 2 + 4 - 32*8;
+       compat_ulong_t pos;
+
+       if (offset < 32*4)      /* gr[0..31] */
+               pos = offset * 2 + 4;
+       else if (offset < 32*4+32*8)    /* fr[0] ... fr[31] */
+               pos = (offset - 32*4) + PT_FR0;
+       else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */
+               pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4;
        else
-               return sizeof(struct pt_regs);
+               pos = sizeof(struct pt_regs);
+
+       return pos;
 }
 
 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
@@ -281,9 +286,12 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        addr = translate_usr_offset(addr);
                        if (addr >= sizeof(struct pt_regs))
                                break;
+                       if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
+                               data |= 3; /* ensure userspace privilege */
+                       }
                        if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
                                /* Special case, fp regs are 64 bits anyway */
-                               *(__u64 *) ((char *) task_regs(child) + addr) = data;
+                               *(__u32 *) ((char *) task_regs(child) + addr) = data;
                                ret = 0;
                        }
                        else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
@@ -496,7 +504,8 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val)
                        return;
        case RI(iaoq[0]):
        case RI(iaoq[1]):
-                       regs->iaoq[num - RI(iaoq[0])] = val;
+                       /* set 2 lowest bits to ensure userspace privilege: */
+                       regs->iaoq[num - RI(iaoq[0])] = val | 3;
                        return;
        case RI(sar):   regs->sar = val;
                        return;
index 92a9b5f12f98adfe4e87e6a18ff381e528108946..f29f682352f017fd2c55dd0ba34df9ac82a8c0c5 100644 (file)
@@ -3,7 +3,7 @@
  * arch/parisc/mm/ioremap.c
  *
  * (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
  * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
  */
 
@@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
        addr = (void __iomem *) area->addr;
        if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
                               phys_addr, pgprot)) {
-               vfree(addr);
+               vunmap(addr);
                return NULL;
        }
 
@@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
 }
 EXPORT_SYMBOL(__ioremap);
 
-void iounmap(const volatile void __iomem *addr)
+void iounmap(const volatile void __iomem *io_addr)
 {
-       if (addr > high_memory)
-               return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
+       unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
+
+       if (is_vmalloc_addr((void *)addr))
+               vunmap((void *)addr);
 }
 EXPORT_SYMBOL(iounmap);
index e22e5b3770ddcc32b4ee37fecc63893b96522535..ebfadd39e1924a357bdb818a5a60d3353c1dfa6c 100644 (file)
@@ -20,10 +20,30 @@ static inline uint32_t swab32p(void *p)
 
 #ifdef __LITTLE_ENDIAN__
 #define get_le32(p) (*((uint32_t *) (p)))
+#define cpu_to_be32(x) swab32(x)
+static inline u32 be32_to_cpup(const u32 *p)
+{
+       return swab32p((u32 *)p);
+}
 #else
 #define get_le32(p) swab32p(p)
+#define cpu_to_be32(x) (x)
+static inline u32 be32_to_cpup(const u32 *p)
+{
+       return *p;
+}
 #endif
 
+static inline uint32_t get_unaligned_be32(const void *p)
+{
+       return be32_to_cpup(p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+       *((u32 *)p) = cpu_to_be32(val);
+}
+
 #define memeq(a, b, size) (memcmp(a, b, size) == 0)
 #define memzero(buf, size) memset(buf, 0, size)
 
index d5a8d7bf07594b0e0e127db45bf03d6129c1bfbc..b189f7aee222e3277e73c98e0a16ce704f8e8797 100644 (file)
  * not expect this type of fault. flush_cache_vmap is not exactly the right
  * place to put this, but it seems to work well enough.
  */
-#define flush_cache_vmap(start, end)           do { asm volatile("ptesync" ::: "memory"); } while (0)
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+       asm volatile("ptesync" ::: "memory");
+}
 #else
-#define flush_cache_vmap(start, end)           do { } while (0)
+static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
 #endif
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
index 29f49a35d6eecee903ee04ccf3732fadc2971070..6a6804c2e1b08656d67f9955d14fee6d2823c09d 100644 (file)
@@ -212,7 +212,7 @@ static inline void cpu_feature_keys_init(void) { }
 #define CPU_FTR_POWER9_DD2_1           LONG_ASM_CONST(0x0000080000000000)
 #define CPU_FTR_P9_TM_HV_ASSIST                LONG_ASM_CONST(0x0000100000000000)
 #define CPU_FTR_P9_TM_XER_SO_BUG       LONG_ASM_CONST(0x0000200000000000)
-#define CPU_FTR_P9_TLBIE_BUG           LONG_ASM_CONST(0x0000400000000000)
+#define CPU_FTR_P9_TLBIE_STQ_BUG       LONG_ASM_CONST(0x0000400000000000)
 #define CPU_FTR_P9_TIDR                        LONG_ASM_CONST(0x0000800000000000)
 
 #ifndef __ASSEMBLY__
@@ -460,7 +460,7 @@ static inline void cpu_feature_keys_init(void) { }
            CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
            CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
            CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
-           CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
+           CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TIDR)
 #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
 #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
 #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
index 94542776a62d630f6c9037e28264fc371d839245..2a7b01f97a56be1956a753b1934b1021d309ca7f 100644 (file)
@@ -59,8 +59,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
 
        pagefault_enable();
 
-       if (!ret)
-               *oval = oldval;
+       *oval = oldval;
 
        return ret;
 }
index 83a9aa3cf689172648d234da87ec7d4e95add69d..dd18d8174504f5550538a6536aff04f7f6cd38ea 100644 (file)
@@ -301,12 +301,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
 
 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
 {
-       vcpu->arch.cr = val;
+       vcpu->arch.regs.ccr = val;
 }
 
 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.cr;
+       return vcpu->arch.regs.ccr;
 }
 
 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
index dc435a5af7d6cfd04ddb81e81d0476f8b214bdb2..14fa07c73f44dfc9aaceaf805e89e5db34982aa7 100644 (file)
@@ -482,7 +482,7 @@ static inline u64 sanitize_msr(u64 msr)
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.cr  = vcpu->arch.cr_tm;
+       vcpu->arch.regs.ccr  = vcpu->arch.cr_tm;
        vcpu->arch.regs.xer = vcpu->arch.xer_tm;
        vcpu->arch.regs.link  = vcpu->arch.lr_tm;
        vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
@@ -499,7 +499,7 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
 
 static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.cr_tm  = vcpu->arch.cr;
+       vcpu->arch.cr_tm  = vcpu->arch.regs.ccr;
        vcpu->arch.xer_tm = vcpu->arch.regs.xer;
        vcpu->arch.lr_tm  = vcpu->arch.regs.link;
        vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
index d513e3ed1c659c711d4a68e5db5924f720c66833..f0cef625f17ce0afcb1d08510d27a43fce77f6fc 100644 (file)
@@ -46,12 +46,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
 
 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
 {
-       vcpu->arch.cr = val;
+       vcpu->arch.regs.ccr = val;
 }
 
 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.cr;
+       return vcpu->arch.regs.ccr;
 }
 
 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
index 2b6049e839706cf4c9ecf8fef3f9aac71c8e140a..2f95e38f05491a6dcb60a1adda6ff850ce39c1b9 100644 (file)
@@ -538,8 +538,6 @@ struct kvm_vcpu_arch {
        ulong tar;
 #endif
 
-       u32 cr;
-
 #ifdef CONFIG_PPC_BOOK3S
        ulong hflags;
        ulong guest_owned_ext;
index b694d6af115080765cad72b749bea690dbb6fb79..ae953958c0f33cf9d671b14af6fd11d682a8392f 100644 (file)
@@ -217,12 +217,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
 #endif
 }
 
-static inline int arch_dup_mmap(struct mm_struct *oldmm,
-                               struct mm_struct *mm)
-{
-       return 0;
-}
-
 #ifndef CONFIG_PPC_BOOK3S_64
 static inline void arch_exit_mmap(struct mm_struct *mm)
 {
@@ -247,6 +241,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
 #ifdef CONFIG_PPC_MEM_KEYS
 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
                               bool execute, bool foreign);
+void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
 #else /* CONFIG_PPC_MEM_KEYS */
 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
                bool write, bool execute, bool foreign)
@@ -259,6 +254,7 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 #define thread_pkey_regs_save(thread)
 #define thread_pkey_regs_restore(new_thread, old_thread)
 #define thread_pkey_regs_init(thread)
+#define arch_dup_pkeys(oldmm, mm)
 
 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
 {
@@ -267,5 +263,12 @@ static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
 
 #endif /* CONFIG_PPC_MEM_KEYS */
 
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+                               struct mm_struct *mm)
+{
+       arch_dup_pkeys(oldmm, mm);
+       return 0;
+}
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
index ff3866473afe411f63c3d4e968ff6b574694402b..d8d886dee54e3c92356d23f370dbc2a3dcc66b79 100644 (file)
@@ -275,7 +275,7 @@ int64_t opal_xive_get_vp_info(uint64_t vp,
 int64_t opal_xive_set_vp_info(uint64_t vp,
                              uint64_t flags,
                              uint64_t report_cl_pair);
-int64_t opal_xive_allocate_irq(uint32_t chip_id);
+int64_t opal_xive_allocate_irq_raw(uint32_t chip_id);
 int64_t opal_xive_free_irq(uint32_t girq);
 int64_t opal_xive_sync(uint32_t type, uint32_t id);
 int64_t opal_xive_dump(uint32_t type, uint32_t id);
index e5b314ed054e027d69782c9fd7261eaaefe85eb8..640a4d818772a4afe0e07ba1018b62fadaa7ee10 100644 (file)
 #define MSR_TS_S       __MASK(MSR_TS_S_LG)     /*  Transaction Suspended */
 #define MSR_TS_T       __MASK(MSR_TS_T_LG)     /*  Transaction Transactional */
 #define MSR_TS_MASK    (MSR_TS_T | MSR_TS_S)   /* Transaction State bits */
-#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
 #define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
 #define MSR_TM_TRANSACTIONAL(x)        (((x) & MSR_TS_MASK) == MSR_TS_T)
 #define MSR_TM_SUSPENDED(x)    (((x) & MSR_TS_MASK) == MSR_TS_S)
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
+#else
+#define MSR_TM_ACTIVE(x) 0
+#endif
+
 #if defined(CONFIG_PPC_BOOK3S_64)
 #define MSR_64BIT      MSR_SF
 
index 23bea99bf8d59e229fb0304508182542a81b4952..1ca9e37f7cc998c11eaeceab9ad6215431ae5ebb 100644 (file)
@@ -306,6 +306,7 @@ extern unsigned long __copy_tofrom_user(void __user *to,
 static inline unsigned long
 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
+       barrier_nospec();
        return __copy_tofrom_user(to, from, n);
 }
 #endif /* __powerpc64__ */
index 89cf15566c4e80ba4e81d500e1d86f204e1241b2..7c3738d890e8b21d93581c09ce46620afe88cef9 100644 (file)
@@ -438,7 +438,7 @@ int main(void)
 #ifdef CONFIG_PPC_BOOK3S
        OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
 #endif
-       OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
+       OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
        OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
        OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
@@ -695,7 +695,7 @@ int main(void)
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
 #else /* CONFIG_PPC_BOOK3S */
-       OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
+       OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
        OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
        OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
        OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
index f432054234a472eae7077afda818b6dcb51ccf00..f3b8e04eca9c3283e0e2c64ea8e2354a22b20ce8 100644 (file)
@@ -694,9 +694,35 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
        return true;
 }
 
+/*
+ * Handle POWER9 broadcast tlbie invalidation issue using
+ * cpu feature flag.
+ */
+static __init void update_tlbie_feature_flag(unsigned long pvr)
+{
+       if (PVR_VER(pvr) == PVR_POWER9) {
+               /*
+                * Set the tlbie feature flag for anything below
+                * Nimbus DD 2.3 and Cumulus DD 1.3
+                */
+               if ((pvr & 0xe000) == 0) {
+                       /* Nimbus */
+                       if ((pvr & 0xfff) < 0x203)
+                               cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
+               } else if ((pvr & 0xc000) == 0) {
+                       /* Cumulus */
+                       if ((pvr & 0xfff) < 0x103)
+                               cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
+               } else {
+                       WARN_ONCE(1, "Unknown PVR");
+                       cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
+               }
+       }
+}
+
 static __init void cpufeatures_cpu_quirks(void)
 {
-       int version = mfspr(SPRN_PVR);
+       unsigned long version = mfspr(SPRN_PVR);
 
        /*
         * Not all quirks can be derived from the cpufeatures device tree.
@@ -715,10 +741,10 @@ static __init void cpufeatures_cpu_quirks(void)
 
        if ((version & 0xffff0000) == 0x004e0000) {
                cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
-               cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
                cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
        }
 
+       update_tlbie_feature_flag(version);
        /*
         * PKEY was not in the initial base or feature node
         * specification, but it should become optional in the next
index c72767a5327ad63d5de84d5e3787ad0b24f6b7e0..fe3c6f3bd3b6226727fffc12b0ec0e3bef1fb4b4 100644 (file)
@@ -360,10 +360,19 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
        ptep = find_init_mm_pte(token, &hugepage_shift);
        if (!ptep)
                return token;
-       WARN_ON(hugepage_shift);
-       pa = pte_pfn(*ptep) << PAGE_SHIFT;
 
-       return pa | (token & (PAGE_SIZE-1));
+       pa = pte_pfn(*ptep);
+
+       /* On radix we can do hugepage mappings for io, so handle that */
+       if (hugepage_shift) {
+               pa <<= hugepage_shift;
+               pa |= token & ((1ul << hugepage_shift) - 1);
+       } else {
+               pa <<= PAGE_SHIFT;
+               pa |= token & (PAGE_SIZE - 1);
+       }
+
+       return pa;
 }
 
 /*
index 67619b4b3f96c721993ce8b94b0f56cca6b4f418..110eba400de7cc07cb1c3a9942f2bec64c4d5ad1 100644 (file)
@@ -811,6 +811,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
        pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
                pe->freeze_count, eeh_max_freezes);
 
+       eeh_for_each_pe(pe, tmp_pe)
+               eeh_pe_for_each_dev(tmp_pe, edev, tmp)
+                       edev->mode &= ~EEH_DEV_NO_HANDLER;
+
        /* Walk the various device drivers attached to this slot through
         * a reset sequence, giving each an opportunity to do what it needs
         * to accomplish the reset.  Each child gets a report of the
@@ -1004,7 +1008,8 @@ final:
  */
 void eeh_handle_special_event(void)
 {
-       struct eeh_pe *pe, *phb_pe;
+       struct eeh_pe *pe, *phb_pe, *tmp_pe;
+       struct eeh_dev *edev, *tmp_edev;
        struct pci_bus *bus;
        struct pci_controller *hose;
        unsigned long flags;
@@ -1075,6 +1080,10 @@ void eeh_handle_special_event(void)
                                    (phb_pe->state & EEH_PE_RECOVERING))
                                        continue;
 
+                               eeh_for_each_pe(pe, tmp_pe)
+                                       eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
+                                               edev->mode &= ~EEH_DEV_NO_HANDLER;
+
                                /* Notify all devices to be down */
                                eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                                eeh_set_channel_state(pe, pci_channel_io_perm_failure);
index 2d8fc8c9da7a1f210816bd9734c3d8453d8fc04e..90af86f143a911365ecaf53b64f5d2b7fb653abd 100644 (file)
@@ -520,6 +520,10 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
        RFI_TO_USER_OR_KERNEL
 9:
        /* Deliver the machine check to host kernel in V mode. */
+BEGIN_FTR_SECTION
+       ld      r10,ORIG_GPR3(r1)
+       mtspr   SPRN_CFAR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
        MACHINE_CHECK_HANDLER_WINDUP
        b       machine_check_pSeries
 
@@ -1745,7 +1749,7 @@ handle_page_fault:
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_page_fault
        cmpdi   r3,0
-       beq+    12f
+       beq+    ret_from_except_lite
        bl      save_nvgprs
        mr      r5,r3
        addi    r3,r1,STACK_FRAME_OVERHEAD
@@ -1760,7 +1764,12 @@ handle_dabr_fault:
        ld      r5,_DSISR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_break
-12:    b       ret_from_except_lite
+       /*
+        * do_break() may have changed the NV GPRS while handling a breakpoint.
+        * If so, we need to restore them with their updated values. Don't use
+        * ret_from_except_lite here.
+        */
+       b       ret_from_except
 
 
 #ifdef CONFIG_PPC_BOOK3S_64
index 9168a247e24ff23c46f1de495c1017ac91141abc..3fb564f3e8874eeddfb5b31c6e4b2a489f2fbbde 100644 (file)
@@ -906,6 +906,7 @@ p_toc:      .8byte  __toc_start + 0x8000 - 0b
 /*
  * This is where the main kernel code starts.
  */
+__REF
 start_here_multiplatform:
        /* set up the TOC */
        bl      relative_toc
@@ -981,6 +982,7 @@ start_here_multiplatform:
        RFI
        b       .       /* prevent speculative execution */
 
+       .previous
        /* This is where all platforms converge execution */
 
 start_here_common:
index efdd16a79075f699ecf866f4dfc22abd794699fa..93e06778b136b14db884e864f26f9befdee93016 100644 (file)
@@ -45,6 +45,7 @@ static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
                                        mce_ue_event_queue);
 
 static void machine_check_process_queued_event(struct irq_work *work);
+static void machine_check_ue_irq_work(struct irq_work *work);
 void machine_check_ue_event(struct machine_check_event *evt);
 static void machine_process_ue_event(struct work_struct *work);
 
@@ -52,6 +53,10 @@ static struct irq_work mce_event_process_work = {
         .func = machine_check_process_queued_event,
 };
 
+static struct irq_work mce_ue_event_irq_work = {
+       .func = machine_check_ue_irq_work,
+};
+
 DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
 
 static void mce_set_error_info(struct machine_check_event *mce,
@@ -208,6 +213,10 @@ void release_mce_event(void)
        get_mce_event(NULL, true);
 }
 
+static void machine_check_ue_irq_work(struct irq_work *work)
+{
+       schedule_work(&mce_ue_event_work);
+}
 
 /*
  * Queue up the MCE event which then can be handled later.
@@ -225,7 +234,7 @@ void machine_check_ue_event(struct machine_check_event *evt)
        memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
 
        /* Queue work to process this event later. */
-       schedule_work(&mce_ue_event_work);
+       irq_work_queue(&mce_ue_event_irq_work);
 }
 
 /*
index 3022d67f0c48b9ead4d7e76ae2590f78c0d1d617..37a110b8e7e1724571ee7e3d22036b5b9a51832b 100644 (file)
@@ -39,6 +39,7 @@
 static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
 {
        pte_t *ptep;
+       unsigned int shift;
        unsigned long flags;
        struct mm_struct *mm;
 
@@ -48,13 +49,18 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
                mm = &init_mm;
 
        local_irq_save(flags);
-       if (mm == current->mm)
-               ptep = find_current_mm_pte(mm->pgd, addr, NULL, NULL);
-       else
-               ptep = find_init_mm_pte(addr, NULL);
+       ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
        local_irq_restore(flags);
+
        if (!ptep || pte_special(*ptep))
                return ULONG_MAX;
+
+       if (shift > PAGE_SHIFT) {
+               unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
+
+               return pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
+       }
+
        return pte_pfn(*ptep);
 }
 
@@ -339,7 +345,7 @@ static const struct mce_derror_table mce_p9_derror_table[] = {
   MCE_INITIATOR_CPU,   MCE_SEV_ERROR_SYNC, },
 { 0, false, 0, 0, 0, 0 } };
 
-static int mce_find_instr_ea_and_pfn(struct pt_regs *regs, uint64_t *addr,
+static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
                                        uint64_t *phys_addr)
 {
        /*
@@ -530,7 +536,8 @@ static int mce_handle_derror(struct pt_regs *regs,
                         * kernel/exception-64s.h
                         */
                        if (get_paca()->in_mce < MAX_MCE_DEPTH)
-                               mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
+                               mce_find_instr_ea_and_phys(regs, addr,
+                                                          phys_addr);
                }
                found = 1;
        }
index 262ba948178107a8180a7aff69603e226a248cec..1bf6aaefd26a295553b063dc04c672fa95d3eb20 100644 (file)
@@ -135,7 +135,7 @@ _GLOBAL_TOC(flush_dcache_range)
        subf    r8,r6,r4                /* compute length */
        add     r8,r8,r5                /* ensure we get enough */
        lwz     r9,DCACHEL1LOGBLOCKSIZE(r10)    /* Get log-2 of dcache block size */
-       srw.    r8,r8,r9                /* compute line count */
+       srd.    r8,r8,r9                /* compute line count */
        beqlr                           /* nothing to do? */
        mtctr   r8
 0:     dcbst   0,r6
@@ -153,7 +153,7 @@ _GLOBAL(flush_inval_dcache_range)
        subf    r8,r6,r4                /* compute length */
        add     r8,r8,r5                /* ensure we get enough */
        lwz     r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
-       srw.    r8,r8,r9                /* compute line count */
+       srd.    r8,r8,r9                /* compute line count */
        beqlr                           /* nothing to do? */
        sync
        isync
index 98f04725def75f09dc924f7861d795c986ca98f0..c101b321dece8480f3474d709e7d10fcc17bc1e6 100644 (file)
@@ -45,6 +45,8 @@ unsigned int pci_parse_of_flags(u32 addr0, int bridge)
        if (addr0 & 0x02000000) {
                flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
                flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
+               if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
+                       flags |= IORESOURCE_MEM_64;
                flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
                if (addr0 & 0x40000000)
                        flags |= IORESOURCE_PREFETCH
index d29f2dca725b8bbbc20e4bc403d99b7208790908..909c9407e392a454222c7b8f7376e70a5d890e66 100644 (file)
@@ -102,27 +102,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
        }
 }
 
-static inline bool msr_tm_active(unsigned long msr)
-{
-       return MSR_TM_ACTIVE(msr);
-}
-
-static bool tm_active_with_fp(struct task_struct *tsk)
-{
-       return msr_tm_active(tsk->thread.regs->msr) &&
-               (tsk->thread.ckpt_regs.msr & MSR_FP);
-}
-
-static bool tm_active_with_altivec(struct task_struct *tsk)
-{
-       return msr_tm_active(tsk->thread.regs->msr) &&
-               (tsk->thread.ckpt_regs.msr & MSR_VEC);
-}
 #else
-static inline bool msr_tm_active(unsigned long msr) { return false; }
 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
-static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
-static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
 bool strict_msr_control;
@@ -247,7 +228,8 @@ void enable_kernel_fp(void)
                 * giveup as this would save  to the 'live' structure not the
                 * checkpointed structure.
                 */
-               if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+               if (!MSR_TM_ACTIVE(cpumsr) &&
+                    MSR_TM_ACTIVE(current->thread.regs->msr))
                        return;
                __giveup_fpu(current);
        }
@@ -256,7 +238,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
 
 static int restore_fp(struct task_struct *tsk)
 {
-       if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
+       if (tsk->thread.load_fp) {
                load_fp_state(&current->thread.fp_state);
                current->thread.load_fp++;
                return 1;
@@ -311,7 +293,8 @@ void enable_kernel_altivec(void)
                 * giveup as this would save  to the 'live' structure not the
                 * checkpointed structure.
                 */
-               if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+               if (!MSR_TM_ACTIVE(cpumsr) &&
+                    MSR_TM_ACTIVE(current->thread.regs->msr))
                        return;
                __giveup_altivec(current);
        }
@@ -337,8 +320,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
 
 static int restore_altivec(struct task_struct *tsk)
 {
-       if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
-               (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
+       if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
                load_vr_state(&tsk->thread.vr_state);
                tsk->thread.used_vr = 1;
                tsk->thread.load_vec++;
@@ -397,7 +379,8 @@ void enable_kernel_vsx(void)
                 * giveup as this would save  to the 'live' structure not the
                 * checkpointed structure.
                 */
-               if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+               if (!MSR_TM_ACTIVE(cpumsr) &&
+                    MSR_TM_ACTIVE(current->thread.regs->msr))
                        return;
                __giveup_vsx(current);
        }
@@ -499,13 +482,14 @@ void giveup_all(struct task_struct *tsk)
        if (!tsk->thread.regs)
                return;
 
+       check_if_tm_restore_required(tsk);
+
        usermsr = tsk->thread.regs->msr;
 
        if ((usermsr & msr_all_available) == 0)
                return;
 
        msr_check_and_set(msr_all_available);
-       check_if_tm_restore_required(tsk);
 
        WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
 
@@ -530,7 +514,7 @@ void restore_math(struct pt_regs *regs)
 {
        unsigned long msr;
 
-       if (!msr_tm_active(regs->msr) &&
+       if (!MSR_TM_ACTIVE(regs->msr) &&
                !current->thread.load_fp && !loadvec(current->thread))
                return;
 
index 8afd146bc9c70dc6480e2fff20d6239d327e33d3..9e41a9de432355414cdb9d6ce0057fecd5393369 100644 (file)
@@ -875,15 +875,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
                return 0;
 
        for_each_cpu(cpu, cpus) {
+               struct device *dev = get_cpu_device(cpu);
+
                switch (state) {
                case DOWN:
-                       cpuret = cpu_down(cpu);
+                       cpuret = device_offline(dev);
                        break;
                case UP:
-                       cpuret = cpu_up(cpu);
+                       cpuret = device_online(dev);
                        break;
                }
-               if (cpuret) {
+               if (cpuret < 0) {
                        pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
                                        __func__,
                                        ((state == UP) ? "up" : "down"),
@@ -972,6 +974,8 @@ int rtas_ibm_suspend_me(u64 handle)
        data.token = rtas_token("ibm,suspend-me");
        data.complete = &done;
 
+       lock_device_hotplug();
+
        /* All present CPUs must be online */
        cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
        cpuret = rtas_online_cpus_mask(offline_mask);
@@ -1003,6 +1007,7 @@ int rtas_ibm_suspend_me(u64 handle)
                                __func__);
 
 out:
+       unlock_device_hotplug();
        free_cpumask_var(offline_mask);
        return atomic_read(&data.error);
 }
index fd59fef9931bf1ca4721ec2729d482514616febb..906b05c2adae3f2d0770d7c6ae6a9603168e5776 100644 (file)
@@ -1202,6 +1202,9 @@ SYSCALL_DEFINE0(rt_sigreturn)
                        goto bad;
 
                if (MSR_TM_ACTIVE(msr_hi<<32)) {
+                       /* Trying to start TM on non TM system */
+                       if (!cpu_has_feature(CPU_FTR_TM))
+                               goto bad;
                        /* We only recheckpoint on return if we're
                         * transaction.
                         */
index 14b0f5b6a373da21f567c5e5cd4e27ca66430c4a..b5933d7219db60623868286ee386891c63b2cd4f 100644 (file)
@@ -750,6 +750,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
        if (MSR_TM_ACTIVE(msr)) {
                /* We recheckpoint on return. */
                struct ucontext __user *uc_transact;
+
+               /* Trying to start TM on non TM system */
+               if (!cpu_has_feature(CPU_FTR_TM))
+                       goto badframe;
+
                if (__get_user(uc_transact, &uc->uc_link))
                        goto badframe;
                if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
index 7a919e9a3400bb9a41cc98b42875b1285e9f5f02..cbdf86228eaaa7c0d64082979d265e332de42bbc 100644 (file)
 #define SL_IBAT2       0x48
 #define SL_DBAT3       0x50
 #define SL_IBAT3       0x58
-#define SL_TB          0x60
-#define SL_R2          0x68
-#define SL_CR          0x6c
-#define SL_LR          0x70
-#define SL_R12         0x74    /* r12 to r31 */
+#define SL_DBAT4       0x60
+#define SL_IBAT4       0x68
+#define SL_DBAT5       0x70
+#define SL_IBAT5       0x78
+#define SL_DBAT6       0x80
+#define SL_IBAT6       0x88
+#define SL_DBAT7       0x90
+#define SL_IBAT7       0x98
+#define SL_TB          0xa0
+#define SL_R2          0xa8
+#define SL_CR          0xac
+#define SL_LR          0xb0
+#define SL_R12         0xb4    /* r12 to r31 */
 #define SL_SIZE                (SL_R12 + 80)
 
        .section .data
@@ -114,6 +122,41 @@ _GLOBAL(swsusp_arch_suspend)
        mfibatl r4,3
        stw     r4,SL_IBAT3+4(r11)
 
+BEGIN_MMU_FTR_SECTION
+       mfspr   r4,SPRN_DBAT4U
+       stw     r4,SL_DBAT4(r11)
+       mfspr   r4,SPRN_DBAT4L
+       stw     r4,SL_DBAT4+4(r11)
+       mfspr   r4,SPRN_DBAT5U
+       stw     r4,SL_DBAT5(r11)
+       mfspr   r4,SPRN_DBAT5L
+       stw     r4,SL_DBAT5+4(r11)
+       mfspr   r4,SPRN_DBAT6U
+       stw     r4,SL_DBAT6(r11)
+       mfspr   r4,SPRN_DBAT6L
+       stw     r4,SL_DBAT6+4(r11)
+       mfspr   r4,SPRN_DBAT7U
+       stw     r4,SL_DBAT7(r11)
+       mfspr   r4,SPRN_DBAT7L
+       stw     r4,SL_DBAT7+4(r11)
+       mfspr   r4,SPRN_IBAT4U
+       stw     r4,SL_IBAT4(r11)
+       mfspr   r4,SPRN_IBAT4L
+       stw     r4,SL_IBAT4+4(r11)
+       mfspr   r4,SPRN_IBAT5U
+       stw     r4,SL_IBAT5(r11)
+       mfspr   r4,SPRN_IBAT5L
+       stw     r4,SL_IBAT5+4(r11)
+       mfspr   r4,SPRN_IBAT6U
+       stw     r4,SL_IBAT6(r11)
+       mfspr   r4,SPRN_IBAT6L
+       stw     r4,SL_IBAT6+4(r11)
+       mfspr   r4,SPRN_IBAT7U
+       stw     r4,SL_IBAT7(r11)
+       mfspr   r4,SPRN_IBAT7L
+       stw     r4,SL_IBAT7+4(r11)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
 #if  0
        /* Backup various CPU config stuffs */
        bl      __save_cpu_setup
@@ -279,27 +322,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
        mtibatu 3,r4
        lwz     r4,SL_IBAT3+4(r11)
        mtibatl 3,r4
-#endif
-
 BEGIN_MMU_FTR_SECTION
-       li      r4,0
+       lwz     r4,SL_DBAT4(r11)
        mtspr   SPRN_DBAT4U,r4
+       lwz     r4,SL_DBAT4+4(r11)
        mtspr   SPRN_DBAT4L,r4
+       lwz     r4,SL_DBAT5(r11)
        mtspr   SPRN_DBAT5U,r4
+       lwz     r4,SL_DBAT5+4(r11)
        mtspr   SPRN_DBAT5L,r4
+       lwz     r4,SL_DBAT6(r11)
        mtspr   SPRN_DBAT6U,r4
+       lwz     r4,SL_DBAT6+4(r11)
        mtspr   SPRN_DBAT6L,r4
+       lwz     r4,SL_DBAT7(r11)
        mtspr   SPRN_DBAT7U,r4
+       lwz     r4,SL_DBAT7+4(r11)
        mtspr   SPRN_DBAT7L,r4
+       lwz     r4,SL_IBAT4(r11)
        mtspr   SPRN_IBAT4U,r4
+       lwz     r4,SL_IBAT4+4(r11)
        mtspr   SPRN_IBAT4L,r4
+       lwz     r4,SL_IBAT5(r11)
        mtspr   SPRN_IBAT5U,r4
+       lwz     r4,SL_IBAT5+4(r11)
        mtspr   SPRN_IBAT5L,r4
+       lwz     r4,SL_IBAT6(r11)
        mtspr   SPRN_IBAT6U,r4
+       lwz     r4,SL_IBAT6+4(r11)
        mtspr   SPRN_IBAT6L,r4
+       lwz     r4,SL_IBAT7(r11)
        mtspr   SPRN_IBAT7U,r4
+       lwz     r4,SL_IBAT7+4(r11)
        mtspr   SPRN_IBAT7L,r4
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+#endif
 
        /* Flush all TLBs */
        lis     r4,0x1000
index 02fe6d02017415dbba758ae21a8c01651c654ea5..d5f351f02c1534847925bdf50db3b9489456d67a 100644 (file)
@@ -399,6 +399,7 @@ void system_reset_exception(struct pt_regs *regs)
        if (debugger(regs))
                goto out;
 
+       kmsg_dump(KMSG_DUMP_OOPS);
        /*
         * A system reset is a request to dump, so we always send
         * it through the crashdump code (if fadump or kdump are
index 68e14afecac85b1d1fd0eff661f6a3aabe0217a6..a488c105b9234858d669ebd936077a7b0820fc5b 100644 (file)
@@ -744,12 +744,15 @@ void kvmppc_rmap_reset(struct kvm *kvm)
        srcu_idx = srcu_read_lock(&kvm->srcu);
        slots = kvm_memslots(kvm);
        kvm_for_each_memslot(memslot, slots) {
+               /* Mutual exclusion with kvm_unmap_hva_range etc. */
+               spin_lock(&kvm->mmu_lock);
                /*
                 * This assumes it is acceptable to lose reference and
                 * change bits across a reset.
                 */
                memset(memslot->arch.rmap, 0,
                       memslot->npages * sizeof(*memslot->arch.rmap));
+               spin_unlock(&kvm->mmu_lock);
        }
        srcu_read_unlock(&kvm->srcu, srcu_idx);
 }
index 9a3f2646ecc7e87bd24c03c02198eb9f45763725..07a8004c3c237520954136dfd5409a80a692dfe9 100644 (file)
@@ -602,8 +602,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 
                if (kvmppc_gpa_to_ua(vcpu->kvm,
                                tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
-                               &ua, NULL))
-                       return H_PARAMETER;
+                               &ua, NULL)) {
+                       ret = H_PARAMETER;
+                       goto unlock_exit;
+               }
 
                list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
                        ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
index 6821ead4b4ebc128a9a772a712feefb213b1bad0..eb8b11515a7ffe8439c26995aa6eb4d9183fde59 100644 (file)
@@ -528,8 +528,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                ua = 0;
                if (kvmppc_gpa_to_ua(vcpu->kvm,
                                tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
-                               &ua, NULL))
-                       return H_PARAMETER;
+                               &ua, NULL)) {
+                       ret = H_PARAMETER;
+                       goto unlock_exit;
+               }
 
                list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
                        ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
index 36b11c5a0dbb968444da85b230cbc7020f69f583..2654df220d05487cfd27394b0bb8360e2ab11c20 100644 (file)
@@ -110,7 +110,7 @@ static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
        vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
        vcpu->arch.tar_tm = vcpu->arch.tar;
        vcpu->arch.lr_tm = vcpu->arch.regs.link;
-       vcpu->arch.cr_tm = vcpu->arch.cr;
+       vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
        vcpu->arch.xer_tm = vcpu->arch.regs.xer;
        vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
 }
@@ -129,7 +129,7 @@ static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
        vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
        vcpu->arch.tar = vcpu->arch.tar_tm;
        vcpu->arch.regs.link = vcpu->arch.lr_tm;
-       vcpu->arch.cr = vcpu->arch.cr_tm;
+       vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
        vcpu->arch.regs.xer = vcpu->arch.xer_tm;
        vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
 }
@@ -141,7 +141,7 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
        uint64_t texasr;
 
        /* CR0 = 0 | MSR[TS] | 0 */
-       vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
+       vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
                (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
                 << CR0_SHIFT);
 
@@ -220,7 +220,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
        tm_abort(ra_val);
 
        /* CR0 = 0 | MSR[TS] | 0 */
-       vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
+       vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
                (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
                 << CR0_SHIFT);
 
@@ -494,8 +494,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
                        if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
                                preempt_disable();
-                               vcpu->arch.cr = (CR0_TBEGIN_FAILURE |
-                                 (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)));
+                               vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
+                                 (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
 
                                vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
                                        (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
index 083dcedba11ce1331303e637c77ba46ca3d1702e..3ae3e8d141e3e19f593fbc5acfe0392c3f646d07 100644 (file)
@@ -410,8 +410,8 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
               vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
        pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
               vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
-       pr_err("cr = %.8x  xer = %.16lx  dsisr = %.8x\n",
-              vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
+       pr_err("cr = %.8lx  xer = %.16lx  dsisr = %.8x\n",
+              vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
        pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
        pr_err("fault dar = %.16lx dsisr = %.8x\n",
               vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
@@ -1407,7 +1407,14 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                *val = get_reg_val(id, vcpu->arch.pspb);
                break;
        case KVM_REG_PPC_DPDES:
-               *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
+               /*
+                * On POWER9, where we are emulating msgsndp etc.,
+                * we return 1 bit for each vcpu, which can come from
+                * either vcore->dpdes or doorbell_request.
+                * On POWER8, doorbell_request is 0.
+                */
+               *val = get_reg_val(id, vcpu->arch.vcore->dpdes |
+                                  vcpu->arch.doorbell_request);
                break;
        case KVM_REG_PPC_VTB:
                *val = get_reg_val(id, vcpu->arch.vcore->vtb);
@@ -2550,7 +2557,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
                if (!spin_trylock(&pvc->lock))
                        continue;
                prepare_threads(pvc);
-               if (!pvc->n_runnable) {
+               if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
                        list_del_init(&pvc->preempt_list);
                        if (pvc->runner == NULL) {
                                pvc->vcore_state = VCORE_INACTIVE;
@@ -2571,15 +2578,20 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
        spin_unlock(&lp->lock);
 }
 
-static bool recheck_signals(struct core_info *cip)
+static bool recheck_signals_and_mmu(struct core_info *cip)
 {
        int sub, i;
        struct kvm_vcpu *vcpu;
+       struct kvmppc_vcore *vc;
 
-       for (sub = 0; sub < cip->n_subcores; ++sub)
-               for_each_runnable_thread(i, vcpu, cip->vc[sub])
+       for (sub = 0; sub < cip->n_subcores; ++sub) {
+               vc = cip->vc[sub];
+               if (!vc->kvm->arch.mmu_ready)
+                       return true;
+               for_each_runnable_thread(i, vcpu, vc)
                        if (signal_pending(vcpu->arch.run_task))
                                return true;
+       }
        return false;
 }
 
@@ -2800,7 +2812,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        local_irq_disable();
        hard_irq_disable();
        if (lazy_irq_pending() || need_resched() ||
-           recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) {
+           recheck_signals_and_mmu(&core_info)) {
                local_irq_enable();
                vc->vcore_state = VCORE_INACTIVE;
                /* Unlock all except the primary vcore */
@@ -3813,12 +3825,15 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
 /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
 int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
 {
+       kvmppc_rmap_reset(kvm);
+       kvm->arch.process_table = 0;
+       /* Mutual exclusion with kvm_unmap_hva_range etc. */
+       spin_lock(&kvm->mmu_lock);
+       kvm->arch.radix = 0;
+       spin_unlock(&kvm->mmu_lock);
        kvmppc_free_radix(kvm);
        kvmppc_update_lpcr(kvm, LPCR_VPM1,
                           LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
-       kvmppc_rmap_reset(kvm);
-       kvm->arch.radix = 0;
-       kvm->arch.process_table = 0;
        return 0;
 }
 
@@ -3831,10 +3846,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
        if (err)
                return err;
 
+       kvmppc_rmap_reset(kvm);
+       /* Mutual exclusion with kvm_unmap_hva_range etc. */
+       spin_lock(&kvm->mmu_lock);
+       kvm->arch.radix = 1;
+       spin_unlock(&kvm->mmu_lock);
        kvmppc_free_hpt(&kvm->arch.hpt);
        kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
                           LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
-       kvm->arch.radix = 1;
        return 0;
 }
 
index a67cf1cdeda400cd659ba0858ab9040d10ced4c8..7c68d834c94a75dcfbc97a18fa0bcaa9fec63417 100644 (file)
@@ -452,7 +452,7 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
                                     "r" (rbvalues[i]), "r" (kvm->arch.lpid));
                }
 
-               if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+               if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
                        /*
                         * Need the extra ptesync to make sure we don't
                         * re-order the tlbie
index 1d14046124a01afffda02a70d73e7666b7b4b6bc..f1878e13dd5691b11743364d57e6f75a953fe8ec 100644 (file)
@@ -56,6 +56,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
 #define STACK_SLOT_DAWR                (SFS-56)
 #define STACK_SLOT_DAWRX       (SFS-64)
 #define STACK_SLOT_HFSCR       (SFS-72)
+#define STACK_SLOT_AMR         (SFS-80)
+#define STACK_SLOT_UAMOR       (SFS-88)
 
 /*
  * Call kvmppc_hv_entry in real mode.
@@ -760,11 +762,9 @@ BEGIN_FTR_SECTION
        mfspr   r5, SPRN_TIDR
        mfspr   r6, SPRN_PSSCR
        mfspr   r7, SPRN_PID
-       mfspr   r8, SPRN_IAMR
        std     r5, STACK_SLOT_TID(r1)
        std     r6, STACK_SLOT_PSSCR(r1)
        std     r7, STACK_SLOT_PID(r1)
-       std     r8, STACK_SLOT_IAMR(r1)
        mfspr   r5, SPRN_HFSCR
        std     r5, STACK_SLOT_HFSCR(r1)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
@@ -772,11 +772,18 @@ BEGIN_FTR_SECTION
        mfspr   r5, SPRN_CIABR
        mfspr   r6, SPRN_DAWR
        mfspr   r7, SPRN_DAWRX
+       mfspr   r8, SPRN_IAMR
        std     r5, STACK_SLOT_CIABR(r1)
        std     r6, STACK_SLOT_DAWR(r1)
        std     r7, STACK_SLOT_DAWRX(r1)
+       std     r8, STACK_SLOT_IAMR(r1)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
+       mfspr   r5, SPRN_AMR
+       std     r5, STACK_SLOT_AMR(r1)
+       mfspr   r6, SPRN_UAMOR
+       std     r6, STACK_SLOT_UAMOR(r1)
+
 BEGIN_FTR_SECTION
        /* Set partition DABR */
        /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -1202,7 +1209,7 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 
        ld      r5, VCPU_LR(r4)
-       lwz     r6, VCPU_CR(r4)
+       l     r6, VCPU_CR(r4)
        mtlr    r5
        mtcr    r6
 
@@ -1313,7 +1320,7 @@ kvmppc_interrupt_hv:
        std     r3, VCPU_GPR(R12)(r9)
        /* CR is in the high half of r12 */
        srdi    r4, r12, 32
-       stw     r4, VCPU_CR(r9)
+       std     r4, VCPU_CR(r9)
 BEGIN_FTR_SECTION
        ld      r3, HSTATE_CFAR(r13)
        std     r3, VCPU_CFAR(r9)
@@ -1713,22 +1720,25 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
        mtspr   SPRN_PSPB, r0
        mtspr   SPRN_WORT, r0
 BEGIN_FTR_SECTION
-       mtspr   SPRN_IAMR, r0
        mtspr   SPRN_TCSCR, r0
        /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
        li      r0, 1
        sldi    r0, r0, 31
        mtspr   SPRN_MMCRS, r0
 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
-8:
 
-       /* Save and reset AMR and UAMOR before turning on the MMU */
+       /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
+       ld      r8, STACK_SLOT_IAMR(r1)
+       mtspr   SPRN_IAMR, r8
+
+8:     /* Power7 jumps back in here */
        mfspr   r5,SPRN_AMR
        mfspr   r6,SPRN_UAMOR
        std     r5,VCPU_AMR(r9)
        std     r6,VCPU_UAMOR(r9)
-       li      r6,0
-       mtspr   SPRN_AMR,r6
+       ld      r5,STACK_SLOT_AMR(r1)
+       ld      r6,STACK_SLOT_UAMOR(r1)
+       mtspr   SPRN_AMR, r5
        mtspr   SPRN_UAMOR, r6
 
        /* Switch DSCR back to host value */
@@ -1897,11 +1907,9 @@ BEGIN_FTR_SECTION
        ld      r5, STACK_SLOT_TID(r1)
        ld      r6, STACK_SLOT_PSSCR(r1)
        ld      r7, STACK_SLOT_PID(r1)
-       ld      r8, STACK_SLOT_IAMR(r1)
        mtspr   SPRN_TIDR, r5
        mtspr   SPRN_PSSCR, r6
        mtspr   SPRN_PID, r7
-       mtspr   SPRN_IAMR, r8
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 
 #ifdef CONFIG_PPC_RADIX_MMU
@@ -2895,29 +2903,39 @@ kvm_cede_prodded:
 kvm_cede_exit:
        ld      r9, HSTATE_KVM_VCPU(r13)
 #ifdef CONFIG_KVM_XICS
-       /* Abort if we still have a pending escalation */
+       /* are we using XIVE with single escalation? */
+       ld      r10, VCPU_XIVE_ESC_VADDR(r9)
+       cmpdi   r10, 0
+       beq     3f
+       li      r6, XIVE_ESB_SET_PQ_00
+       /*
+        * If we still have a pending escalation, abort the cede,
+        * and we must set PQ to 10 rather than 00 so that we don't
+        * potentially end up with two entries for the escalation
+        * interrupt in the XIVE interrupt queue.  In that case
+        * we also don't want to set xive_esc_on to 1 here in
+        * case we race with xive_esc_irq().
+        */
        lbz     r5, VCPU_XIVE_ESC_ON(r9)
        cmpwi   r5, 0
-       beq     1f
+       beq     4f
        li      r0, 0
        stb     r0, VCPU_CEDED(r9)
-1:     /* Enable XIVE escalation */
-       li      r5, XIVE_ESB_SET_PQ_00
+       li      r6, XIVE_ESB_SET_PQ_10
+       b       5f
+4:     li      r0, 1
+       stb     r0, VCPU_XIVE_ESC_ON(r9)
+       /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
+       sync
+5:     /* Enable XIVE escalation */
        mfmsr   r0
        andi.   r0, r0, MSR_DR          /* in real mode? */
        beq     1f
-       ld      r10, VCPU_XIVE_ESC_VADDR(r9)
-       cmpdi   r10, 0
-       beq     3f
-       ldx     r0, r10, r5
+       ldx     r0, r10, r6
        b       2f
 1:     ld      r10, VCPU_XIVE_ESC_RADDR(r9)
-       cmpdi   r10, 0
-       beq     3f
-       ldcix   r0, r10, r5
+       ldcix   r0, r10, r6
 2:     sync
-       li      r0, 1
-       stb     r0, VCPU_XIVE_ESC_ON(r9)
 #endif /* CONFIG_KVM_XICS */
 3:     b       guest_exit_cont
 
index 008285058f9b554616cb52fdc53a85843826ae23..31cd0f327c8a2d5af48401001be86a1747b8ae51 100644 (file)
@@ -130,8 +130,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                        return RESUME_GUEST;
                }
                /* Set CR0 to indicate previous transactional state */
-               vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
-                       (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
+               vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+                       (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
                /* L=1 => tresume, L=0 => tsuspend */
                if (instr & (1 << 21)) {
                        if (MSR_TM_SUSPENDED(msr))
@@ -174,8 +174,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                copy_from_checkpoint(vcpu);
 
                /* Set CR0 to indicate previous transactional state */
-               vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
-                       (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
+               vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+                       (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
                vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
                return RESUME_GUEST;
 
@@ -204,8 +204,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                copy_to_checkpoint(vcpu);
 
                /* Set CR0 to indicate previous transactional state */
-               vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
-                       (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
+               vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+                       (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
                vcpu->arch.shregs.msr = msr | MSR_TS_S;
                return RESUME_GUEST;
        }
index b2c7c6fca4f96e5a315371e39ec9d25dc220da8d..3cf5863bc06e8513d5cb7d359f401846ba07aab5 100644 (file)
@@ -89,7 +89,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
                if (instr & (1 << 21))
                        vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
                /* Set CR0 to 0b0010 */
-               vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000;
+               vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+                       0x20000000;
                return 1;
        }
 
@@ -105,5 +106,5 @@ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
        vcpu->arch.shregs.msr &= ~MSR_TS_MASK;  /* go to N state */
        vcpu->arch.regs.nip = vcpu->arch.tfhar;
        copy_from_checkpoint(vcpu);
-       vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
+       vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000;
 }
index 614ebb4261f76593bb07f52f2fd0a2db7307d4fe..de9702219dee9442e23a59eeced647d99b476465 100644 (file)
@@ -167,7 +167,7 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
        svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
        svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
        svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
-       svcpu->cr  = vcpu->arch.cr;
+       svcpu->cr  = vcpu->arch.regs.ccr;
        svcpu->xer = vcpu->arch.regs.xer;
        svcpu->ctr = vcpu->arch.regs.ctr;
        svcpu->lr  = vcpu->arch.regs.link;
@@ -249,7 +249,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
        vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
        vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
        vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
-       vcpu->arch.cr  = svcpu->cr;
+       vcpu->arch.regs.ccr  = svcpu->cr;
        vcpu->arch.regs.xer = svcpu->xer;
        vcpu->arch.regs.ctr = svcpu->ctr;
        vcpu->arch.regs.link  = svcpu->lr;
index aae34f218ab455d2df214813831b3bfa63d4208a..031f07f048afde7c8761f2eaa8eab4e2f357e278 100644 (file)
@@ -1037,20 +1037,22 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
        /* Mask the VP IPI */
        xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
 
-       /* Disable the VP */
-       xive_native_disable_vp(xc->vp_id);
-
-       /* Free the queues & associated interrupts */
+       /* Free escalations */
        for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
-               struct xive_q *q = &xc->queues[i];
-
-               /* Free the escalation irq */
                if (xc->esc_virq[i]) {
                        free_irq(xc->esc_virq[i], vcpu);
                        irq_dispose_mapping(xc->esc_virq[i]);
                        kfree(xc->esc_virq_names[i]);
                }
-               /* Free the queue */
+       }
+
+       /* Disable the VP */
+       xive_native_disable_vp(xc->vp_id);
+
+       /* Free the queues */
+       for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+               struct xive_q *q = &xc->queues[i];
+
                xive_native_disable_queue(xc->vp_id, q, i);
                if (q->qpage) {
                        free_pages((unsigned long)q->qpage,
index 612b7f6a887f8a2508a2add10bd574b9417dbdea..4e5081e584098b38f64a8774b7e64b6352e1f674 100644 (file)
@@ -186,7 +186,7 @@ END_BTB_FLUSH_SECTION
         */
        PPC_LL  r4, PACACURRENT(r13)
        PPC_LL  r4, (THREAD + THREAD_KVM_VCPU)(r4)
-       stw     r10, VCPU_CR(r4)
+       PPC_STL r10, VCPU_CR(r4)
        PPC_STL r11, VCPU_GPR(R4)(r4)
        PPC_STL r5, VCPU_GPR(R5)(r4)
        PPC_STL r6, VCPU_GPR(R6)(r4)
@@ -296,7 +296,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
        PPC_STL r4, VCPU_GPR(R4)(r11)
        PPC_LL  r4, THREAD_NORMSAVE(0)(r10)
        PPC_STL r5, VCPU_GPR(R5)(r11)
-       stw     r13, VCPU_CR(r11)
+       PPC_STL r13, VCPU_CR(r11)
        mfspr   r5, \srr0
        PPC_STL r3, VCPU_GPR(R10)(r11)
        PPC_LL  r3, THREAD_NORMSAVE(2)(r10)
@@ -323,7 +323,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
        PPC_STL r4, VCPU_GPR(R4)(r11)
        PPC_LL  r4, GPR9(r8)
        PPC_STL r5, VCPU_GPR(R5)(r11)
-       stw     r9, VCPU_CR(r11)
+       PPC_STL r9, VCPU_CR(r11)
        mfspr   r5, \srr0
        PPC_STL r3, VCPU_GPR(R8)(r11)
        PPC_LL  r3, GPR10(r8)
@@ -647,7 +647,7 @@ lightweight_exit:
        PPC_LL  r3, VCPU_LR(r4)
        PPC_LL  r5, VCPU_XER(r4)
        PPC_LL  r6, VCPU_CTR(r4)
-       lwz     r7, VCPU_CR(r4)
+       PPC_LL  r7, VCPU_CR(r4)
        PPC_LL  r8, VCPU_PC(r4)
        PPC_LD(r9, VCPU_SHARED_MSR, r11)
        PPC_LL  r0, VCPU_GPR(R0)(r4)
index 75dce1ef3bc83473e23689ded4c7bfdf72637a7e..f91b1309a0a861688c9c794f691eacaf2e8351c2 100644 (file)
@@ -117,7 +117,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
 
        emulated = EMULATE_FAIL;
        vcpu->arch.regs.msr = vcpu->arch.shared->msr;
-       vcpu->arch.regs.ccr = vcpu->arch.cr;
        if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
                int type = op.type & INSTR_TYPE_MASK;
                int size = GETSIZE(op.type);
index 578174a33d229732c0fef060ef95d3a1f0176b81..51cd66dc1bb09955a0b4a7b42698f69911272b98 100644 (file)
@@ -61,6 +61,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
        return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
 }
 
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+       return kvm_arch_vcpu_runnable(vcpu);
+}
+
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 {
        return false;
index aaa28fd918fe4ce26a5bb0a46909ecff9ec877c3..0c13561d8b807d4695dc5ba0973b93c9daabb311 100644 (file)
@@ -203,7 +203,7 @@ static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
 
 static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
 {
-       if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
                /* Need the extra ptesync to ensure we don't reorder tlbie*/
                asm volatile("ptesync": : :"memory");
                ___tlbie(vpn, psize, apsize, ssize);
index f23a89d8e4ce6c8fecf0816d23b88d621c68428b..b1007e9a31ba7b16573803d4ce3f0beafff0ccde 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/context_tracking.h>
 #include <linux/libfdt.h>
 #include <linux/pkeys.h>
+#include <linux/cpu.h>
 
 #include <asm/debugfs.h>
 #include <asm/processor.h>
@@ -1859,11 +1860,20 @@ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
         *
         * For guests on platforms before POWER9, we clamp the it limit to 1G
         * to avoid some funky things such as RTAS bugs etc...
+        *
+        * On POWER9 we limit to 1TB in case the host erroneously told us that
+        * the RMA was >1TB. Effective address bits 0:23 are treated as zero
+        * (meaning the access is aliased to zero i.e. addr = addr % 1TB)
+        * for virtual real mode addressing and so it doesn't make sense to
+        * have an area larger than 1TB as it can't be addressed.
         */
        if (!early_cpu_has_feature(CPU_FTR_HVMODE)) {
                ppc64_rma_size = first_memblock_size;
                if (!early_cpu_has_feature(CPU_FTR_ARCH_300))
                        ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000);
+               else
+                       ppc64_rma_size = min_t(u64, ppc64_rma_size,
+                                              1UL << SID_SHIFT_1T);
 
                /* Finally limit subsequent allocations */
                memblock_set_current_limit(ppc64_rma_size);
@@ -1882,10 +1892,16 @@ static int hpt_order_get(void *data, u64 *val)
 
 static int hpt_order_set(void *data, u64 val)
 {
+       int ret;
+
        if (!mmu_hash_ops.resize_hpt)
                return -ENODEV;
 
-       return mmu_hash_ops.resize_hpt(val);
+       cpus_read_lock();
+       ret = mmu_hash_ops.resize_hpt(val);
+       cpus_read_unlock();
+
+       return ret;
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n");
index 7296a42eb62e42f3ad6561f5833d638c990d11ab..cef0b7ee1024646cdc0edeb4d9c5cbbc0a4837fc 100644 (file)
@@ -150,6 +150,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
        } else {
                pdshift = PUD_SHIFT;
                pu = pud_alloc(mm, pg, addr);
+               if (!pu)
+                       return NULL;
                if (pshift == PUD_SHIFT)
                        return (pte_t *)pu;
                else if (pshift > PMD_SHIFT) {
@@ -158,6 +160,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
                } else {
                        pdshift = PMD_SHIFT;
                        pm = pmd_alloc(mm, pu, addr);
+                       if (!pm)
+                               return NULL;
                        if (pshift == PMD_SHIFT)
                                /* 16MB hugepage */
                                return (pte_t *)pm;
@@ -174,12 +178,16 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
        } else {
                pdshift = PUD_SHIFT;
                pu = pud_alloc(mm, pg, addr);
+               if (!pu)
+                       return NULL;
                if (pshift >= PUD_SHIFT) {
                        ptl = pud_lockptr(mm, pu);
                        hpdp = (hugepd_t *)pu;
                } else {
                        pdshift = PMD_SHIFT;
                        pm = pmd_alloc(mm, pu, addr);
+                       if (!pm)
+                               return NULL;
                        ptl = pmd_lockptr(mm, pm);
                        hpdp = (hugepd_t *)pm;
                }
index c879979faa73b766cc1336bfacf0d3b6c65db5ea..3ea4c1f107d7ef99ad5699d16b6cc24dfc41d453 100644 (file)
@@ -521,14 +521,6 @@ void __init radix__early_init_devtree(void)
        mmu_psize_defs[MMU_PAGE_64K].shift = 16;
        mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
 found:
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-       if (mmu_psize_defs[MMU_PAGE_2M].shift) {
-               /*
-                * map vmemmap using 2M if available
-                */
-               mmu_vmemmap_psize = MMU_PAGE_2M;
-       }
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
        return;
 }
 
@@ -567,7 +559,13 @@ void __init radix__early_init_mmu(void)
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
        /* vmemmap mapping */
-       mmu_vmemmap_psize = mmu_virtual_psize;
+       if (mmu_psize_defs[MMU_PAGE_2M].shift) {
+               /*
+                * map vmemmap using 2M if available
+                */
+               mmu_vmemmap_psize = MMU_PAGE_2M;
+       } else
+               mmu_vmemmap_psize = mmu_virtual_psize;
 #endif
        /*
         * initialize page table size
index b271b283c785e3a07589ea81c6b8e40e7def5a69..25a8dd9cd71dbbae59a8768e86febe5ea22e93df 100644 (file)
@@ -414,3 +414,13 @@ bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
 
        return pkey_access_permitted(vma_pkey(vma), write, execute);
 }
+
+void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
+{
+       if (static_branch_likely(&pkey_disabled))
+               return;
+
+       /* Duplicate the oldmm pkey state in mm: */
+       mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm);
+       mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
+}
index fef3e1eb3a1998158287884cc08bbe0736cbbc30..0cddae4263f9699d5eddb6635cc630e85b6febd2 100644 (file)
@@ -220,7 +220,7 @@ static inline void fixup_tlbie(void)
        unsigned long pid = 0;
        unsigned long va = ((1UL << 52) - 1);
 
-       if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
                asm volatile("ptesync": : :"memory");
                __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
        }
@@ -230,7 +230,7 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
 {
        unsigned long va = ((1UL << 52) - 1);
 
-       if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
                asm volatile("ptesync": : :"memory");
                __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
        }
index 8b4dd0da08395556950c50029af05bcfad41085c..9e27cfe2702686fe1dc718e05341dd025c3bc3e6 100644 (file)
@@ -158,6 +158,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
 
        mtdcr(uic->dcrbase + UIC_PR, pr);
        mtdcr(uic->dcrbase + UIC_TR, tr);
+       mtdcr(uic->dcrbase + UIC_SR, ~mask);
 
        raw_spin_unlock_irqrestore(&uic->lock, flags);
 
index f89808b9713d0671d51d7a38ac0e8ae320b4d575..b0660ef691779407ec1296d80586093683aa8dc2 100644 (file)
 #define SL_IBAT2       0x48
 #define SL_DBAT3       0x50
 #define SL_IBAT3       0x58
-#define SL_TB          0x60
-#define SL_R2          0x68
-#define SL_CR          0x6c
-#define SL_R12         0x70    /* r12 to r31 */
+#define SL_DBAT4       0x60
+#define SL_IBAT4       0x68
+#define SL_DBAT5       0x70
+#define SL_IBAT5       0x78
+#define SL_DBAT6       0x80
+#define SL_IBAT6       0x88
+#define SL_DBAT7       0x90
+#define SL_IBAT7       0x98
+#define SL_TB          0xa0
+#define SL_R2          0xa8
+#define SL_CR          0xac
+#define SL_R12         0xb0    /* r12 to r31 */
 #define SL_SIZE                (SL_R12 + 80)
 
        .section .text
@@ -126,6 +134,41 @@ _GLOBAL(low_sleep_handler)
        mfibatl r4,3
        stw     r4,SL_IBAT3+4(r1)
 
+BEGIN_MMU_FTR_SECTION
+       mfspr   r4,SPRN_DBAT4U
+       stw     r4,SL_DBAT4(r1)
+       mfspr   r4,SPRN_DBAT4L
+       stw     r4,SL_DBAT4+4(r1)
+       mfspr   r4,SPRN_DBAT5U
+       stw     r4,SL_DBAT5(r1)
+       mfspr   r4,SPRN_DBAT5L
+       stw     r4,SL_DBAT5+4(r1)
+       mfspr   r4,SPRN_DBAT6U
+       stw     r4,SL_DBAT6(r1)
+       mfspr   r4,SPRN_DBAT6L
+       stw     r4,SL_DBAT6+4(r1)
+       mfspr   r4,SPRN_DBAT7U
+       stw     r4,SL_DBAT7(r1)
+       mfspr   r4,SPRN_DBAT7L
+       stw     r4,SL_DBAT7+4(r1)
+       mfspr   r4,SPRN_IBAT4U
+       stw     r4,SL_IBAT4(r1)
+       mfspr   r4,SPRN_IBAT4L
+       stw     r4,SL_IBAT4+4(r1)
+       mfspr   r4,SPRN_IBAT5U
+       stw     r4,SL_IBAT5(r1)
+       mfspr   r4,SPRN_IBAT5L
+       stw     r4,SL_IBAT5+4(r1)
+       mfspr   r4,SPRN_IBAT6U
+       stw     r4,SL_IBAT6(r1)
+       mfspr   r4,SPRN_IBAT6L
+       stw     r4,SL_IBAT6+4(r1)
+       mfspr   r4,SPRN_IBAT7U
+       stw     r4,SL_IBAT7(r1)
+       mfspr   r4,SPRN_IBAT7L
+       stw     r4,SL_IBAT7+4(r1)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
        /* Backup various CPU config stuffs */
        bl      __save_cpu_setup
 
@@ -326,22 +369,37 @@ grackle_wake_up:
        mtibatl 3,r4
 
 BEGIN_MMU_FTR_SECTION
-       li      r4,0
+       lwz     r4,SL_DBAT4(r1)
        mtspr   SPRN_DBAT4U,r4
+       lwz     r4,SL_DBAT4+4(r1)
        mtspr   SPRN_DBAT4L,r4
+       lwz     r4,SL_DBAT5(r1)
        mtspr   SPRN_DBAT5U,r4
+       lwz     r4,SL_DBAT5+4(r1)
        mtspr   SPRN_DBAT5L,r4
+       lwz     r4,SL_DBAT6(r1)
        mtspr   SPRN_DBAT6U,r4
+       lwz     r4,SL_DBAT6+4(r1)
        mtspr   SPRN_DBAT6L,r4
+       lwz     r4,SL_DBAT7(r1)
        mtspr   SPRN_DBAT7U,r4
+       lwz     r4,SL_DBAT7+4(r1)
        mtspr   SPRN_DBAT7L,r4
+       lwz     r4,SL_IBAT4(r1)
        mtspr   SPRN_IBAT4U,r4
+       lwz     r4,SL_IBAT4+4(r1)
        mtspr   SPRN_IBAT4L,r4
+       lwz     r4,SL_IBAT5(r1)
        mtspr   SPRN_IBAT5U,r4
+       lwz     r4,SL_IBAT5+4(r1)
        mtspr   SPRN_IBAT5L,r4
+       lwz     r4,SL_IBAT6(r1)
        mtspr   SPRN_IBAT6U,r4
+       lwz     r4,SL_IBAT6+4(r1)
        mtspr   SPRN_IBAT6L,r4
+       lwz     r4,SL_IBAT7(r1)
        mtspr   SPRN_IBAT7U,r4
+       lwz     r4,SL_IBAT7+4(r1)
        mtspr   SPRN_IBAT7L,r4
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
 
index 8006c54a91e3fb6c08fa22e87631b9297e5dca5e..fd8166ffbffa74c12d8aec8561631bedfba5af69 100644 (file)
@@ -56,9 +56,22 @@ static struct dentry *atsd_threshold_dentry;
 static struct pci_dev *get_pci_dev(struct device_node *dn)
 {
        struct pci_dn *pdn = PCI_DN(dn);
+       struct pci_dev *pdev;
 
-       return pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
+       pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
                                           pdn->busno, pdn->devfn);
+
+       /*
+        * pci_get_domain_bus_and_slot() increased the reference count of
+        * the PCI device, but callers don't need that actually as the PE
+        * already holds a reference to the device. Since callers aren't
+        * aware of the reference count change, call pci_dev_put() now to
+        * avoid leaks.
+        */
+       if (pdev)
+               pci_dev_put(pdev);
+
+       return pdev;
 }
 
 /* Given a NPU device get the associated PCI device. */
index 828f6656f8f745c36316b9c6bca5279fe5628b8e..649fb268f446141bd255f4414f1f692be187fba1 100644 (file)
@@ -57,9 +57,9 @@ static void export_imc_mode_and_cmd(struct device_node *node,
                                    struct imc_pmu *pmu_ptr)
 {
        static u64 loc, *imc_mode_addr, *imc_cmd_addr;
-       int chip = 0, nid;
        char mode[16], cmd[16];
        u32 cb_offset;
+       struct imc_mem_info *ptr = pmu_ptr->mem_info;
 
        imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
 
@@ -73,20 +73,20 @@ static void export_imc_mode_and_cmd(struct device_node *node,
        if (of_property_read_u32(node, "cb_offset", &cb_offset))
                cb_offset = IMC_CNTL_BLK_OFFSET;
 
-       for_each_node(nid) {
-               loc = (u64)(pmu_ptr->mem_info[chip].vbase) + cb_offset;
+       while (ptr->vbase != NULL) {
+               loc = (u64)(ptr->vbase) + cb_offset;
                imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
-               sprintf(mode, "imc_mode_%d", nid);
+               sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
                if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
                                            imc_mode_addr))
                        goto err;
 
                imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
-               sprintf(cmd, "imc_cmd_%d", nid);
+               sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
                if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
                                            imc_cmd_addr))
                        goto err;
-               chip++;
+               ptr++;
        }
        return;
 
index f4875fe3f8ff288827ca142a0a1b47c5da9bfa2c..74215ebda142da25794fff581fd6bfaffb5a7cb0 100644 (file)
@@ -303,7 +303,7 @@ OPAL_CALL(opal_xive_set_queue_info,         OPAL_XIVE_SET_QUEUE_INFO);
 OPAL_CALL(opal_xive_donate_page,               OPAL_XIVE_DONATE_PAGE);
 OPAL_CALL(opal_xive_alloc_vp_block,            OPAL_XIVE_ALLOCATE_VP_BLOCK);
 OPAL_CALL(opal_xive_free_vp_block,             OPAL_XIVE_FREE_VP_BLOCK);
-OPAL_CALL(opal_xive_allocate_irq,              OPAL_XIVE_ALLOCATE_IRQ);
+OPAL_CALL(opal_xive_allocate_irq_raw,          OPAL_XIVE_ALLOCATE_IRQ);
 OPAL_CALL(opal_xive_free_irq,                  OPAL_XIVE_FREE_IRQ);
 OPAL_CALL(opal_xive_get_vp_info,               OPAL_XIVE_GET_VP_INFO);
 OPAL_CALL(opal_xive_set_vp_info,               OPAL_XIVE_SET_VP_INFO);
index 38fe4087484a61e7ead4fed3ea89db130b596b8b..edf9032e2e5ccacac0de15e9d902a139c21941e9 100644 (file)
@@ -680,7 +680,10 @@ static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
                                       bin_attr->size);
 }
 
-static BIN_ATTR_RO(symbol_map, 0);
+static struct bin_attribute symbol_map_attr = {
+       .attr = {.name = "symbol_map", .mode = 0400},
+       .read = symbol_map_read
+};
 
 static void opal_export_symmap(void)
 {
@@ -697,10 +700,10 @@ static void opal_export_symmap(void)
                return;
 
        /* Setup attributes */
-       bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
-       bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
+       symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
+       symbol_map_attr.size = be64_to_cpu(syms[1]);
 
-       rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
+       rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr);
        if (rc)
                pr_warn("Error %d creating OPAL symbols file\n", rc);
 }
index f5adb6b756f7547507d07ae3f7ece52df68807fe..15a567128c0f1d99423e8bfef3881fd1330bee3c 100644 (file)
@@ -36,7 +36,8 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
        struct page *tce_mem = NULL;
        __be64 *addr;
 
-       tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT);
+       tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN,
+                       shift - PAGE_SHIFT);
        if (!tce_mem) {
                pr_err("Failed to allocate a TCE memory, level shift=%d\n",
                                shift);
@@ -48,6 +49,9 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
        return addr;
 }
 
+static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
+               unsigned long size, unsigned int levels);
+
 static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
 {
        __be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base;
@@ -57,9 +61,9 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
 
        while (level) {
                int n = (idx & mask) >> (level * shift);
-               unsigned long tce;
+               unsigned long oldtce, tce = be64_to_cpu(READ_ONCE(tmp[n]));
 
-               if (tmp[n] == 0) {
+               if (!tce) {
                        __be64 *tmp2;
 
                        if (!alloc)
@@ -70,10 +74,15 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
                        if (!tmp2)
                                return NULL;
 
-                       tmp[n] = cpu_to_be64(__pa(tmp2) |
-                                       TCE_PCI_READ | TCE_PCI_WRITE);
+                       tce = __pa(tmp2) | TCE_PCI_READ | TCE_PCI_WRITE;
+                       oldtce = be64_to_cpu(cmpxchg(&tmp[n], 0,
+                                       cpu_to_be64(tce)));
+                       if (oldtce) {
+                               pnv_pci_ioda2_table_do_free_pages(tmp2,
+                                       ilog2(tbl->it_level_size) + 3, 1);
+                               tce = oldtce;
+                       }
                }
-               tce = be64_to_cpu(tmp[n]);
 
                tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
                idx &= ~mask;
@@ -161,6 +170,9 @@ void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
 
                if (ptce)
                        *ptce = cpu_to_be64(0);
+               else
+                       /* Skip the rest of the level */
+                       i |= tbl->it_level_size - 1;
        }
 }
 
@@ -260,7 +272,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
        unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
                        PAGE_SHIFT);
        const unsigned long tce_table_size = 1UL << table_shift;
-       unsigned int tmplevels = levels;
 
        if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
                return -EINVAL;
@@ -268,9 +279,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
        if (!is_power_of_2(window_size))
                return -EINVAL;
 
-       if (alloc_userspace_copy && (window_size > (1ULL << 32)))
-               tmplevels = 1;
-
        /* Adjust direct table size from window_size and levels */
        entries_shift = (entries_shift + levels - 1) / levels;
        level_shift = entries_shift + 3;
@@ -281,7 +289,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
 
        /* Allocate TCE table */
        addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
-                       tmplevels, tce_table_size, &offset, &total_allocated);
+                       1, tce_table_size, &offset, &total_allocated);
 
        /* addr==NULL means that the first level allocation failed */
        if (!addr)
@@ -292,18 +300,18 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
         * we did not allocate as much as we wanted,
         * release partially allocated table.
         */
-       if (tmplevels == levels && offset < tce_table_size)
+       if (levels == 1 && offset < tce_table_size)
                goto free_tces_exit;
 
        /* Allocate userspace view of the TCE table */
        if (alloc_userspace_copy) {
                offset = 0;
                uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
-                               tmplevels, tce_table_size, &offset,
+                               1, tce_table_size, &offset,
                                &total_allocated_uas);
                if (!uas)
                        goto free_tces_exit;
-               if (tmplevels == levels && (offset < tce_table_size ||
+               if (levels == 1 && (offset < tce_table_size ||
                                total_allocated_uas != total_allocated))
                        goto free_uas_exit;
        }
@@ -318,7 +326,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
 
        pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n",
                        window_size, tce_table_size, bus_offset, tbl->it_base,
-                       tbl->it_userspace, tmplevels, levels);
+                       tbl->it_userspace, 1, levels);
 
        return 0;
 
index 8b37b28e383181bfd7fb8a0b24e44983d17efc81..e302aa092d4f1e09f8076873ea4ede348919ad4f 100644 (file)
@@ -243,7 +243,7 @@ extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe);
 extern int pnv_npu2_init(struct pnv_phb *phb);
 
 /* pci-ioda-tce.c */
-#define POWERNV_IOMMU_DEFAULT_LEVELS   1
+#define POWERNV_IOMMU_DEFAULT_LEVELS   2
 #define POWERNV_IOMMU_MAX_LEVELS       5
 
 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
index e4c658cda3a732e489c931d5985bd5d84fca354e..f99cd31b6fd1a96ed5a6886ba650412f6fe4d6df 100644 (file)
@@ -1012,6 +1012,9 @@ static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
        if (!memblock_size)
                return -EINVAL;
 
+       if (!pr->old_prop)
+               return 0;
+
        p = (__be32 *) pr->old_prop->value;
        if (!p)
                return -EINVAL;
index 9e52b686a8fa436674f8b19b277c7f98074520a6..ea602f7f97ce1df5386f02808438cdb3272dd6d5 100644 (file)
@@ -647,7 +647,10 @@ static int pseries_lpar_resize_hpt_commit(void *data)
        return 0;
 }
 
-/* Must be called in user context */
+/*
+ * Must be called in process context. The caller must hold the
+ * cpus_lock.
+ */
 static int pseries_lpar_resize_hpt(unsigned long shift)
 {
        struct hpt_resize_state state = {
@@ -699,7 +702,8 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
 
        t1 = ktime_get();
 
-       rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL);
+       rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit,
+                                    &state, NULL);
 
        t2 = ktime_get();
 
index f0e30dc949888a578ecfd6939c65b1eecc210604..e4ea713833832273bbc3e1b0a5fc9b13c214bb6b 100644 (file)
@@ -9,8 +9,10 @@
  * 2 as published by the Free Software Foundation.
  */
 
+#include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/kobject.h>
+#include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/stat.h>
 #include <linux/completion.h>
@@ -208,7 +210,11 @@ static int update_dt_node(__be32 phandle, s32 scope)
 
                                prop_data += vd;
                        }
+
+                       cond_resched();
                }
+
+               cond_resched();
        } while (rtas_rc == 1);
 
        of_node_put(dn);
@@ -317,8 +323,12 @@ int pseries_devicetree_update(s32 scope)
                                        add_dt_node(phandle, drc_index);
                                        break;
                                }
+
+                               cond_resched();
                        }
                }
+
+               cond_resched();
        } while (rc == 1);
 
        kfree(rtas_buf);
@@ -344,11 +354,19 @@ void post_mobility_fixup(void)
        if (rc)
                printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc);
 
+       /*
+        * We don't want CPUs to go online/offline while the device
+        * tree is being updated.
+        */
+       cpus_read_lock();
+
        rc = pseries_devicetree_update(MIGRATION_SCOPE);
        if (rc)
                printk(KERN_ERR "Post-mobility device tree update "
                        "failed: %d\n", rc);
 
+       cpus_read_unlock();
+
        /* Possibly switch to a new RFI flush type */
        pseries_setup_rfi_flush();
 
index ba1791fd3234dbbfd5fac997ea1433119d44f92d..67f49159ea708236a085a6293ced89c80fcdd76b 100644 (file)
@@ -325,6 +325,9 @@ static void pseries_lpar_idle(void)
         * low power mode by ceding processor to hypervisor
         */
 
+       if (!prep_irq_for_idle())
+               return;
+
        /* Indicate to hypervisor that we are idle. */
        get_lppaca()->idle = 1;
 
index 959a2a62f23329775beba45473e944395005192a..0b24b10312213e60d7aff0d1575cb075dab1f79e 100644 (file)
@@ -483,7 +483,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
         * Now go through the entire mask until we find a valid
         * target.
         */
-       for (;;) {
+       do {
                /*
                 * We re-check online as the fallback case passes us
                 * an untested affinity mask
@@ -491,12 +491,11 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
                if (cpu_online(cpu) && xive_try_pick_target(cpu))
                        return cpu;
                cpu = cpumask_next(cpu, mask);
-               if (cpu == first)
-                       break;
                /* Wrap around */
                if (cpu >= nr_cpu_ids)
                        cpu = cpumask_first(mask);
-       }
+       } while (cpu != first);
+
        return -1;
 }
 
index 5b20a678d755b6f3e2bdb29a0bec9132521e70d4..6d5b2802245285a700924258dea315b7c3629a44 100644 (file)
@@ -235,6 +235,17 @@ static bool xive_native_match(struct device_node *node)
        return of_device_is_compatible(node, "ibm,opal-xive-vc");
 }
 
+static s64 opal_xive_allocate_irq(u32 chip_id)
+{
+       s64 irq = opal_xive_allocate_irq_raw(chip_id);
+
+       /*
+        * Old versions of skiboot can incorrectly return 0xffffffff to
+        * indicate no space, fix it up here.
+        */
+       return irq == 0xffffffff ? OPAL_RESOURCE : irq;
+}
+
 #ifdef CONFIG_SMP
 static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
 {
index dd6badc31f458488b1f0a37209adbf7a5536dc43..bb5db7bfd8539edaaf68af6c6e67e6c5df6bf8de 100644 (file)
@@ -466,8 +466,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
        local_irq_save(flags);
        hard_irq_disable();
 
-       tracing_enabled = tracing_is_on();
-       tracing_off();
+       if (!fromipi) {
+               tracing_enabled = tracing_is_on();
+               tracing_off();
+       }
 
        bp = in_breakpoint_table(regs->nip, &offset);
        if (bp != NULL) {
@@ -2495,13 +2497,16 @@ static void dump_pacas(void)
 static void dump_one_xive(int cpu)
 {
        unsigned int hwid = get_hard_smp_processor_id(cpu);
+       bool hv = cpu_has_feature(CPU_FTR_HVMODE);
 
-       opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
-       opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
-       opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
-       opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
-       opal_xive_dump(XIVE_DUMP_VP, hwid);
-       opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
+       if (hv) {
+               opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
+               opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
+               opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
+               opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
+               opal_xive_dump(XIVE_DUMP_VP, hwid);
+               opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
+       }
 
        if (setjmp(bus_error_jmp) != 0) {
                catch_memory_errors = 0;
index dd6b05bff75b6fd41b3fd12239ace6edcf022bad..d911a8c2314d209871c8cb29ac9f1a81d4236f53 100644 (file)
@@ -23,7 +23,7 @@ extern void __fstate_restore(struct task_struct *restore_from);
 
 static inline void __fstate_clean(struct pt_regs *regs)
 {
-       regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
+       regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
 }
 
 static inline void fstate_save(struct task_struct *task,
index fa2c08e3c05e6ee74ea0258b62cdf6580f98a385..a03821b2656aa80d8879ae614ce67ae47b40ad04 100644 (file)
@@ -171,9 +171,13 @@ ENTRY(handle_exception)
        move a1, s4 /* scause */
        tail do_IRQ
 1:
-       /* Exceptions run with interrupts enabled */
+       /* Exceptions run with interrupts enabled or disabled
+          depending on the state of sstatus.SR_SPIE */
+       andi t0, s1, SR_SPIE
+       beqz t0, 1f
        csrs sstatus, SR_SIE
 
+1:
        /* Handle syscalls */
        li t0, EXC_SYSCALL
        beq s4, t0, handle_syscall
index c433f6d3dd64f0b4eec0a567854d1cfca59c650f..a840b7d074f7d3028bb04f7d10e849db68c08c06 100644 (file)
@@ -132,7 +132,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 {
        unsigned long return_hooker = (unsigned long)&return_to_handler;
        unsigned long old;
-       int err;
 
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
index 8ff7cb3da1cbac77c0b148145b71de92a9d09e77..2bc189187ed402fb5676709ee192d02ca8647dcd 100644 (file)
@@ -585,6 +585,9 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
        struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
+       if (!nbytes)
+               return -EINVAL;
+
        if (unlikely(!xts_ctx->fc))
                return xts_fallback_encrypt(desc, dst, src, nbytes);
 
@@ -599,6 +602,9 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
        struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
+       if (!nbytes)
+               return -EINVAL;
+
        if (unlikely(!xts_ctx->fc))
                return xts_fallback_decrypt(desc, dst, src, nbytes);
 
index c681329fdeec6ba42c95cb321b4ea8ae23b0df06..e4d17d9ea93d86692ca25985e3c0fe1fc0df983a 100644 (file)
@@ -269,7 +269,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
 static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct inode *root_inode;
-       struct dentry *root_dentry;
+       struct dentry *root_dentry, *update_file;
        int rc = 0;
        struct hypfs_sb_info *sbi;
 
@@ -300,9 +300,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
                rc = hypfs_diag_create_files(root_dentry);
        if (rc)
                return rc;
-       sbi->update_file = hypfs_create_update_file(root_dentry);
-       if (IS_ERR(sbi->update_file))
-               return PTR_ERR(sbi->update_file);
+       update_file = hypfs_create_update_file(root_dentry);
+       if (IS_ERR(update_file))
+               return PTR_ERR(update_file);
+       sbi->update_file = update_file;
        hypfs_update_update(sb);
        pr_info("Hypervisor filesystem mounted\n");
        return 0;
index 99c8ce30b3cd1a4f70540ffa0d03cbb99e35df4d..7ffbc5d7ccf380fde85b0c13df4c019d50a87a88 100644 (file)
@@ -59,6 +59,18 @@ static inline int test_facility(unsigned long nr)
        return __test_facility(nr, &S390_lowcore.stfle_fac_list);
 }
 
+static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
+{
+       register unsigned long reg0 asm("0") = size - 1;
+
+       asm volatile(
+               ".insn s,0xb2b00000,0(%1)" /* stfle */
+               : "+d" (reg0)
+               : "a" (stfle_fac_list)
+               : "memory", "cc");
+       return reg0;
+}
+
 /**
  * stfle - Store facility list extended
  * @stfle_fac_list: array where facility list can be stored
@@ -76,13 +88,8 @@ static inline void stfle(u64 *stfle_fac_list, int size)
        memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
        if (S390_lowcore.stfl_fac_list & 0x01000000) {
                /* More facility bits available with stfle */
-               register unsigned long reg0 asm("0") = size - 1;
-
-               asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
-                            : "+d" (reg0)
-                            : "a" (stfle_fac_list)
-                            : "memory", "cc");
-               nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
+               nr = __stfle_asm(stfle_fac_list, size);
+               nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
        }
        memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
        preempt_enable();
index 41e3908b397f8f2faa5bab59266fec25c635a6a7..0d753291c43c0f2427a2234b2fde9074b290ae3c 100644 (file)
@@ -176,6 +176,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
 #define VM_DATA_DEFAULT_FLAGS  (VM_READ | VM_WRITE | \
                                 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
+#define ARCH_ZONE_DMA_BITS     31
+
 #include <asm-generic/memory_model.h>
 #include <asm-generic/getorder.h>
 
index 6e758bb6cd29b70821bec49fd69b6d8d76111d2e..99ef537e548a372e920a825386431621f0a703c0 100644 (file)
@@ -183,20 +183,30 @@ unsigned long get_wchan(struct task_struct *p)
 
        if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
                return 0;
+
+       if (!try_get_task_stack(p))
+               return 0;
+
        low = task_stack_page(p);
        high = (struct stack_frame *) task_pt_regs(p);
        sf = (struct stack_frame *) p->thread.ksp;
-       if (sf <= low || sf > high)
-               return 0;
+       if (sf <= low || sf > high) {
+               return_address = 0;
+               goto out;
+       }
        for (count = 0; count < 16; count++) {
                sf = (struct stack_frame *) sf->back_chain;
-               if (sf <= low || sf > high)
-                       return 0;
+               if (sf <= low || sf > high) {
+                       return_address = 0;
+                       goto out;
+               }
                return_address = sf->gprs[8];
                if (!in_sched_functions(return_address))
-                       return return_address;
+                       goto out;
        }
-       return 0;
+out:
+       put_task_stack(p);
+       return return_address;
 }
 
 unsigned long arch_align_stack(unsigned long sp)
index e8184a15578a332eae08bcb60e21fbb3ec0dd780..7b96888974db11ece70015bf3caf3450c41174bf 100644 (file)
@@ -311,7 +311,8 @@ int arch_update_cpu_topology(void)
        on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
        for_each_online_cpu(cpu) {
                dev = get_cpu_device(cpu);
-               kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+               if (dev)
+                       kobject_uevent(&dev->kobj, KOBJ_CHANGE);
        }
        return rc;
 }
index b43f8d33a3697de32e7c9f7dae4cbf2e7cf3bc46..18ede6e806b917ce8a302bf864219de236282b4a 100644 (file)
@@ -31,10 +31,9 @@ PHDRS {
 SECTIONS
 {
        . = 0x100000;
-       _stext = .;             /* Start of text section */
        .text : {
-               /* Text and read-only data */
-               _text = .;
+               _stext = .;             /* Start of text section */
+               _text = .;              /* Text and read-only data */
                HEAD_TEXT
                TEXT_TEXT
                SCHED_TEXT
@@ -46,11 +45,10 @@ SECTIONS
                *(.text.*_indirect_*)
                *(.fixup)
                *(.gnu.warning)
+               . = ALIGN(PAGE_SIZE);
+               _etext = .;             /* End of text section */
        } :text = 0x0700
 
-       . = ALIGN(PAGE_SIZE);
-       _etext = .;             /* End of text section */
-
        NOTES :text :note
 
        .dummy : { *(.dummy) } :data
index fcb55b02990ef96e20148472828de2e324c6a56f..05ea466b9e403d23f29562523a4154ce2a31e6d8 100644 (file)
@@ -1879,6 +1879,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
        case KVM_S390_MCHK:
                irq->u.mchk.mcic = s390int->parm64;
                break;
+       case KVM_S390_INT_PFAULT_INIT:
+               irq->u.ext.ext_params = s390int->parm;
+               irq->u.ext.ext_params2 = s390int->parm64;
+               break;
+       case KVM_S390_RESTART:
+       case KVM_S390_INT_CLOCK_COMP:
+       case KVM_S390_INT_CPU_TIMER:
+               break;
+       default:
+               return -EINVAL;
        }
        return 0;
 }
index fc7de27960e73dab4ad96d8a6c987056a6b35cd2..fac1d4eaa426868882e8082bc840b3861d33a7cf 100644 (file)
@@ -928,6 +928,8 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
        /* mark all the pages in active slots as dirty */
        for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
                ms = slots->memslots + slotnr;
+               if (!ms->dirty_bitmap)
+                       return -EINVAL;
                /*
                 * The second half of the bitmap is only used on x86,
                 * and would be wasted otherwise, so we put it to good
@@ -3888,7 +3890,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
        const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
                                    | KVM_S390_MEMOP_F_CHECK_ONLY;
 
-       if (mop->flags & ~supported_flags)
+       if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
                return -EINVAL;
 
        if (mop->size > MEM_OP_MAX_SIZE)
@@ -3956,7 +3958,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
        }
        case KVM_S390_INTERRUPT: {
                struct kvm_s390_interrupt s390int;
-               struct kvm_s390_irq s390irq;
+               struct kvm_s390_irq s390irq = {};
 
                if (copy_from_user(&s390int, argp, sizeof(s390int)))
                        return -EFAULT;
index d7052cbe984f81c02d203a6b34e7af6d658bd4f9..2617e426c7926206b6e5ba8430c052d0557d0a60 100644 (file)
@@ -841,7 +841,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                break;
        case BPF_ALU64 | BPF_NEG: /* dst = -dst */
                /* lcgr %dst,%dst */
-               EMIT4(0xb9130000, dst_reg, dst_reg);
+               EMIT4(0xb9030000, dst_reg, dst_reg);
                break;
        /*
         * BPF_FROM_BE/LE
@@ -1015,8 +1015,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                /* llgf %w1,map.max_entries(%b2) */
                EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
                              offsetof(struct bpf_array, map.max_entries));
-               /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
-               EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
+               /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
+               EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
                                  REG_W1, 0, 0xa);
 
                /*
@@ -1042,8 +1042,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                 *         goto out;
                 */
 
-               /* sllg %r1,%b3,3: %r1 = index * 8 */
-               EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
+               /* llgfr %r1,%b3: %r1 = (u32) index */
+               EMIT4(0xb9160000, REG_1, BPF_REG_3);
+               /* sllg %r1,%r1,3: %r1 *= 8 */
+               EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
                /* lg %r1,prog(%b2,%r1) */
                EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
                              REG_1, offsetof(struct bpf_array, ptrs));
index 6394b4f0a69be95d4a44b1c347ceb4f09dfdb166..f42feab25dcf1deddf7deeb1e54daf2d50eb9f14 100644 (file)
@@ -8,27 +8,19 @@ config SH_ALPHA_BOARD
        bool
 
 config SH_DEVICE_TREE
-       bool "Board Described by Device Tree"
+       bool
        select OF
        select OF_EARLY_FLATTREE
        select TIMER_OF
        select COMMON_CLK
        select GENERIC_CALIBRATE_DELAY
-       help
-         Select Board Described by Device Tree to build a kernel that
-         does not hard-code any board-specific knowledge but instead uses
-         a device tree blob provided by the boot-loader. You must enable
-         drivers for any hardware you want to use separately. At this
-         time, only boards based on the open-hardware J-Core processors
-         have sufficient driver coverage to use this option; do not
-         select it if you are using original SuperH hardware.
 
 config SH_JCORE_SOC
        bool "J-Core SoC"
-       depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2)
+       select SH_DEVICE_TREE
        select CLKSRC_JCORE_PIT
        select JCORE_AIC
-       default y if CPU_J2
+       depends on CPU_J2
        help
          Select this option to include drivers core components of the
          J-Core SoC, including interrupt controllers and timers.
index 98cb8c802b1a8cccafb1cd52d4717a149490792c..0ae60d6800004f332b25f6e447077b2ee8227184 100644 (file)
@@ -371,7 +371,11 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
 
 #define ioremap_nocache        ioremap
 #define ioremap_uc     ioremap
-#define iounmap                __iounmap
+
+static inline void iounmap(void __iomem *addr)
+{
+       __iounmap(addr);
+}
 
 /*
  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
index d9ff3b42da7cb11a3e6d62ec39dcfc2c35ce40d2..2569ffc061f9c69e7896ef47eb0bc53ed9947768 100644 (file)
@@ -160,6 +160,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
        switch (sh_type) {
        case SH_BREAKPOINT_READ:
                *gen_type = HW_BREAKPOINT_R;
+               break;
        case SH_BREAKPOINT_WRITE:
                *gen_type = HW_BREAKPOINT_W;
                break;
index fca34b2177e28a055663055d01c4fb7d78420285..129fb1d1f1c5b346b06a2f5da43ce8eae858e940 100644 (file)
@@ -53,7 +53,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
         * when the new ->mm is used for the first time.
         */
        __switch_mm(&new->context.id);
-       down_write(&new->mmap_sem);
+       down_write_nested(&new->mmap_sem, 1);
        uml_setup_stubs(new);
        up_write(&new->mmap_sem);
 }
index ce0d0424a53d6a1d6ce3c53b160b53cd0a856476..4833dd7e2cc0311f6ed36cab657d7f7959787e48 100644 (file)
@@ -38,6 +38,7 @@ REALMODE_CFLAGS       := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
 
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
 export REALMODE_CFLAGS
 
index 8dd1d5ccae58023fb7cb1e3c0cf83255b6b1988b..0387d7a96c842b335ba836dfe4a44c1af5120f19 100644 (file)
@@ -17,6 +17,7 @@
 #include "pgtable.h"
 #include "../string.h"
 #include "../voffset.h"
+#include <asm/bootparam_utils.h>
 
 /*
  * WARNING!!
index a423bdb426862dc25c7df6ab8309b3d0d6108a38..47fd18db6b3bf54fa40e20f4856df7d07e8f15e0 100644 (file)
@@ -22,7 +22,6 @@
 #include <asm/page.h>
 #include <asm/boot.h>
 #include <asm/bootparam.h>
-#include <asm/bootparam_utils.h>
 
 #define BOOT_BOOT_H
 #include "../ctype.h"
index f8debf7aeb4c144b286c8a12570cad23d46decc8..76e1edf5bf12a0ffec3401fef160a2c5fe26e37f 100644 (file)
@@ -73,6 +73,8 @@ static unsigned long find_trampoline_placement(void)
 
        /* Find the first usable memory region under bios_start. */
        for (i = boot_params->e820_entries - 1; i >= 0; i--) {
+               unsigned long new = bios_start;
+
                entry = &boot_params->e820_table[i];
 
                /* Skip all entries above bios_start. */
@@ -85,15 +87,20 @@ static unsigned long find_trampoline_placement(void)
 
                /* Adjust bios_start to the end of the entry if needed. */
                if (bios_start > entry->addr + entry->size)
-                       bios_start = entry->addr + entry->size;
+                       new = entry->addr + entry->size;
 
                /* Keep bios_start page-aligned. */
-               bios_start = round_down(bios_start, PAGE_SIZE);
+               new = round_down(new, PAGE_SIZE);
 
                /* Skip the entry if it's too small. */
-               if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr)
+               if (new - TRAMPOLINE_32BIT_SIZE < entry->addr)
                        continue;
 
+               /* Protect against underflow. */
+               if (new - TRAMPOLINE_32BIT_SIZE > bios_start)
+                       break;
+
+               bios_start = new;
                break;
        }
 
index c4428a176973311950429e0ba00c4ce13e9f6fe6..2622c0742c92d0e4a6b515d37c2a2f6b1a461758 100644 (file)
@@ -34,6 +34,14 @@ int memcmp(const void *s1, const void *s2, size_t len)
        return diff;
 }
 
+/*
+ * Clang may lower `memcmp == 0` to `bcmp == 0`.
+ */
+int bcmp(const void *s1, const void *s2, size_t len)
+{
+       return memcmp(s1, s2, len);
+}
+
 int strcmp(const char *str1, const char *str2)
 {
        const unsigned char *s1 = (const unsigned char *)str1;
index e699b2041665360024b4904809c514bf481ff801..578b5455334f01cbd7daba2c8acc3196caac2461 100644 (file)
@@ -329,6 +329,23 @@ For 32-bit we have the following conventions - kernel is built with
 
 #endif
 
+/*
+ * Mitigate Spectre v1 for conditional swapgs code paths.
+ *
+ * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
+ * prevent a speculative swapgs when coming from kernel space.
+ *
+ * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
+ * to prevent the swapgs from getting speculatively skipped when coming from
+ * user space.
+ */
+.macro FENCE_SWAPGS_USER_ENTRY
+       ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
+.endm
+.macro FENCE_SWAPGS_KERNEL_ENTRY
+       ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
+.endm
+
 #endif /* CONFIG_X86_64 */
 
 /*
index b5c2b1091b18dbe7ef6acc57a4cb60c2f61bc886..8059d4fd915c923423f0907923dd564489c86ebf 100644 (file)
@@ -1098,6 +1098,30 @@ ENTRY(irq_entries_start)
     .endr
 END(irq_entries_start)
 
+#ifdef CONFIG_X86_LOCAL_APIC
+       .align 8
+ENTRY(spurious_entries_start)
+    vector=FIRST_SYSTEM_VECTOR
+    .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
+       pushl   $(~vector+0x80)                 /* Note: always in signed byte range */
+    vector=vector+1
+       jmp     common_spurious
+       .align  8
+    .endr
+END(spurious_entries_start)
+
+common_spurious:
+       ASM_CLAC
+       addl    $-0x80, (%esp)                  /* Adjust vector into the [-256, -1] range */
+       SAVE_ALL switch_stacks=1
+       ENCODE_FRAME_POINTER
+       TRACE_IRQS_OFF
+       movl    %esp, %eax
+       call    smp_spurious_interrupt
+       jmp     ret_from_intr
+ENDPROC(common_spurious)
+#endif
+
 /*
  * the CPU automatically disables interrupts when executing an IRQ vector,
  * so IRQ-flags tracing has to follow that:
index c90e00db5c13c6e215daec6cbb8b9449d9e2da7b..ccb5e3486aee79b485d05f0e99628461fc9019ec 100644 (file)
@@ -438,6 +438,18 @@ ENTRY(irq_entries_start)
     .endr
 END(irq_entries_start)
 
+       .align 8
+ENTRY(spurious_entries_start)
+    vector=FIRST_SYSTEM_VECTOR
+    .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
+       UNWIND_HINT_IRET_REGS
+       pushq   $(~vector+0x80)                 /* Note: always in signed byte range */
+       jmp     common_spurious
+       .align  8
+       vector=vector+1
+    .endr
+END(spurious_entries_start)
+
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
        pushq %rax
@@ -570,7 +582,7 @@ ENTRY(interrupt_entry)
        testb   $3, CS-ORIG_RAX+8(%rsp)
        jz      1f
        SWAPGS
-
+       FENCE_SWAPGS_USER_ENTRY
        /*
         * Switch to the thread stack. The IRET frame and orig_ax are
         * on the stack, as well as the return address. RDI..R12 are
@@ -600,8 +612,10 @@ ENTRY(interrupt_entry)
        UNWIND_HINT_FUNC
 
        movq    (%rdi), %rdi
+       jmp     2f
 1:
-
+       FENCE_SWAPGS_KERNEL_ENTRY
+2:
        PUSH_AND_CLEAR_REGS save_ret=1
        ENCODE_FRAME_POINTER 8
 
@@ -634,10 +648,20 @@ _ASM_NOKPROBE(interrupt_entry)
 
 /* Interrupt entry/exit. */
 
-       /*
-        * The interrupt stubs push (~vector+0x80) onto the stack and
-        * then jump to common_interrupt.
-        */
+/*
+ * The interrupt stubs push (~vector+0x80) onto the stack and
+ * then jump to common_spurious/interrupt.
+ */
+common_spurious:
+       addq    $-0x80, (%rsp)                  /* Adjust vector to [-256, -1] range */
+       call    interrupt_entry
+       UNWIND_HINT_REGS indirect=1
+       call    smp_spurious_interrupt          /* rdi points to pt_regs */
+       jmp     ret_from_intr
+END(common_spurious)
+_ASM_NOKPROBE(common_spurious)
+
+/* common_interrupt is a hotpath. Align it */
        .p2align CONFIG_X86_L1_CACHE_SHIFT
 common_interrupt:
        addq    $-0x80, (%rsp)                  /* Adjust vector to [-256, -1] range */
@@ -1174,7 +1198,6 @@ idtentry stack_segment            do_stack_segment        has_error_code=1
 #ifdef CONFIG_XEN
 idtentry xennmi                        do_nmi                  has_error_code=0
 idtentry xendebug              do_debug                has_error_code=0
-idtentry xenint3               do_int3                 has_error_code=0
 #endif
 
 idtentry general_protection    do_general_protection   has_error_code=1
@@ -1219,6 +1242,13 @@ ENTRY(paranoid_entry)
         */
        SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
 
+       /*
+        * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
+        * unconditional CR3 write, even in the PTI case.  So do an lfence
+        * to prevent GS speculation, regardless of whether PTI is enabled.
+        */
+       FENCE_SWAPGS_KERNEL_ENTRY
+
        ret
 END(paranoid_entry)
 
@@ -1269,6 +1299,7 @@ ENTRY(error_entry)
         * from user mode due to an IRET fault.
         */
        SWAPGS
+       FENCE_SWAPGS_USER_ENTRY
        /* We have user CR3.  Change to kernel CR3. */
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
 
@@ -1290,6 +1321,8 @@ ENTRY(error_entry)
        CALL_enter_from_user_mode
        ret
 
+.Lerror_entry_done_lfence:
+       FENCE_SWAPGS_KERNEL_ENTRY
 .Lerror_entry_done:
        TRACE_IRQS_OFF
        ret
@@ -1308,7 +1341,7 @@ ENTRY(error_entry)
        cmpq    %rax, RIP+8(%rsp)
        je      .Lbstep_iret
        cmpq    $.Lgs_change, RIP+8(%rsp)
-       jne     .Lerror_entry_done
+       jne     .Lerror_entry_done_lfence
 
        /*
         * hack: .Lgs_change can fail with user gsbase.  If this happens, fix up
@@ -1316,6 +1349,7 @@ ENTRY(error_entry)
         * .Lgs_change's error handler with kernel gsbase.
         */
        SWAPGS
+       FENCE_SWAPGS_USER_ENTRY
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
        jmp .Lerror_entry_done
 
@@ -1330,6 +1364,7 @@ ENTRY(error_entry)
         * gsbase and CR3.  Switch to kernel gsbase and CR3:
         */
        SWAPGS
+       FENCE_SWAPGS_USER_ENTRY
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
 
        /*
@@ -1421,6 +1456,7 @@ ENTRY(nmi)
 
        swapgs
        cld
+       FENCE_SWAPGS_USER_ENTRY
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
        movq    %rsp, %rdx
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
index e48ca3afa0912cc8bb03bd6dba84b0999abe1982..8a88e738f87db7d49265814cfc99e0d262677fa5 100644 (file)
@@ -29,12 +29,12 @@ extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
 extern time_t __vdso_time(time_t *t);
 
 #ifdef CONFIG_PARAVIRT_CLOCK
-extern u8 pvclock_page
+extern u8 pvclock_page[PAGE_SIZE]
        __attribute__((visibility("hidden")));
 #endif
 
 #ifdef CONFIG_HYPERV_TSCPAGE
-extern u8 hvclock_page
+extern u8 hvclock_page[PAGE_SIZE]
        __attribute__((visibility("hidden")));
 #endif
 
@@ -191,13 +191,24 @@ notrace static inline u64 vgetsns(int *mode)
 
        if (gtod->vclock_mode == VCLOCK_TSC)
                cycles = vread_tsc();
+
+       /*
+        * For any memory-mapped vclock type, we need to make sure that gcc
+        * doesn't cleverly hoist a load before the mode check.  Otherwise we
+        * might end up touching the memory-mapped page even if the vclock in
+        * question isn't enabled, which will segfault.  Hence the barriers.
+        */
 #ifdef CONFIG_PARAVIRT_CLOCK
-       else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
+       else if (gtod->vclock_mode == VCLOCK_PVCLOCK) {
+               barrier();
                cycles = vread_pvclock(mode);
+       }
 #endif
 #ifdef CONFIG_HYPERV_TSCPAGE
-       else if (gtod->vclock_mode == VCLOCK_HVCLOCK)
+       else if (gtod->vclock_mode == VCLOCK_HVCLOCK) {
+               barrier();
                cycles = vread_hvclock(mode);
+       }
 #endif
        else
                return 0;
index d50bb4dc065036181f7fc3b05182c7c8dce6b8ec..80c6d84cad67ba116c3e1881d4989f761a5adfc8 100644 (file)
@@ -672,10 +672,17 @@ fail:
 
        throttle = perf_event_overflow(event, &data, &regs);
 out:
-       if (throttle)
+       if (throttle) {
                perf_ibs_stop(event, 0);
-       else
-               perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
+       } else {
+               period >>= 4;
+
+               if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
+                   (*config & IBS_OP_CNT_CTL))
+                       period |= *config & IBS_OP_CUR_CNT_RAND;
+
+               perf_ibs_enable_event(perf_ibs, hwc, period);
+       }
 
        perf_event_update_userpage(event);
 
index 8671de126eac09e0a63358d72305ce0a5e9f4f31..baa7e36073f907b19a983459aa15e66a682b9f95 100644 (file)
@@ -210,15 +210,22 @@ static int amd_uncore_event_init(struct perf_event *event)
        hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
        hwc->idx = -1;
 
+       if (event->cpu < 0)
+               return -EINVAL;
+
        /*
         * SliceMask and ThreadMask need to be set for certain L3 events in
         * Family 17h. For other events, the two fields do not affect the count.
         */
-       if (l3_mask)
-               hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
+       if (l3_mask && is_llc_event(event)) {
+               int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4);
 
-       if (event->cpu < 0)
-               return -EINVAL;
+               if (smp_num_siblings > 1)
+                       thread += cpu_data(event->cpu).apicid & 1;
+
+               hwc->config |= (1ULL << (AMD64_L3_THREAD_SHIFT + thread) &
+                               AMD64_L3_THREAD_MASK) | AMD64_L3_SLICE_MASK;
+       }
 
        uncore = event_to_amd_uncore(event);
        if (!uncore)
index c8b0bf2b0d5e40b4eeae55f0437283080c1fe947..2dd8b0d64295a5f82c71a3360c3c5deae62a87ae 100644 (file)
@@ -2074,12 +2074,10 @@ static void intel_pmu_disable_event(struct perf_event *event)
        cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
        cpuc->intel_cp_status &= ~(1ull << hwc->idx);
 
-       if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
+       if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
                intel_pmu_disable_fixed(hwc);
-               return;
-       }
-
-       x86_pmu_disable_event(event);
+       else
+               x86_pmu_disable_event(event);
 
        /*
         * Needs to be called after x86_pmu_disable_event,
@@ -3321,6 +3319,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
        return left;
 }
 
+static u64 nhm_limit_period(struct perf_event *event, u64 left)
+{
+       return max(left, 32ULL);
+}
+
 PMU_FORMAT_ATTR(event, "config:0-7"    );
 PMU_FORMAT_ATTR(umask, "config:8-15"   );
 PMU_FORMAT_ATTR(edge,  "config:18"     );
@@ -4117,6 +4120,7 @@ __init int intel_pmu_init(void)
                x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
                x86_pmu.extra_regs = intel_nehalem_extra_regs;
+               x86_pmu.limit_period = nhm_limit_period;
 
                x86_pmu.cpu_events = nhm_events_attrs;
 
index cc6dd4f78158b91a7b56f960ef8b5acebe4c8b34..42fa3974c421c01ec79ccb9809bf023689919519 100644 (file)
@@ -402,6 +402,16 @@ static inline bool is_freerunning_event(struct perf_event *event)
               (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
 }
 
+/* Check and reject invalid config */
+static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
+                                              struct perf_event *event)
+{
+       if (is_freerunning_event(event))
+               return 0;
+
+       return -EINVAL;
+}
+
 static inline void uncore_disable_box(struct intel_uncore_box *box)
 {
        if (box->pmu->type->ops->disable_box)
index b10e04387f380342c9b9427012b09b9534a347d1..8e4e8e423839a1dc77e4e18279ce98f07fc9f2cc 100644 (file)
@@ -3585,6 +3585,7 @@ static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
 
 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
        .read_counter           = uncore_msr_read_counter,
+       .hw_config              = uncore_freerunning_hw_config,
 };
 
 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
index ef5f29f913d7b064f1a086ac0674bebb4c8a845a..2f34d52753526bf30a6457c93c94f09fe334c7f0 100644 (file)
@@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
                 * Lower 12 bits encode the number of additional
                 * pages to flush (in addition to the 'cur' page).
                 */
-               if (diff >= HV_TLB_FLUSH_UNIT)
+               if (diff >= HV_TLB_FLUSH_UNIT) {
                        gva_list[gva_n] |= ~PAGE_MASK;
-               else if (diff)
+                       cur += HV_TLB_FLUSH_UNIT;
+               }  else if (diff) {
                        gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
+                       cur = end;
+               }
 
-               cur += HV_TLB_FLUSH_UNIT;
                gva_n++;
 
        } while (cur < end);
index 130e81e10fc7cfae6dd6e193965f65648a0d1a79..050368db9d35763d3afae17bf2a28cb2343f4548 100644 (file)
@@ -48,7 +48,7 @@ static inline void generic_apic_probe(void)
 
 #ifdef CONFIG_X86_LOCAL_APIC
 
-extern unsigned int apic_verbosity;
+extern int apic_verbosity;
 extern int local_apic_timer_c2_ok;
 
 extern int disable_apic;
index ce84388e540c918a01e31599bc78881a5b9d2954..d266a4066289364e7af0733d42dcfecd6045802d 100644 (file)
@@ -54,7 +54,7 @@ static __always_inline void arch_atomic_add(int i, atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "addl %1,%0"
                     : "+m" (v->counter)
-                    : "ir" (i));
+                    : "ir" (i) : "memory");
 }
 
 /**
@@ -68,7 +68,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "subl %1,%0"
                     : "+m" (v->counter)
-                    : "ir" (i));
+                    : "ir" (i) : "memory");
 }
 
 /**
@@ -95,7 +95,7 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
 static __always_inline void arch_atomic_inc(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "incl %0"
-                    : "+m" (v->counter));
+                    : "+m" (v->counter) :: "memory");
 }
 #define arch_atomic_inc arch_atomic_inc
 
@@ -108,7 +108,7 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
 static __always_inline void arch_atomic_dec(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "decl %0"
-                    : "+m" (v->counter));
+                    : "+m" (v->counter) :: "memory");
 }
 #define arch_atomic_dec arch_atomic_dec
 
index 5f851d92eecd9ee8eaad86c6b002937633e9144f..55ca027f8c1c3c6c2a03ae17877085530b977f89 100644 (file)
@@ -45,7 +45,7 @@ static __always_inline void arch_atomic64_add(long i, atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "addq %1,%0"
                     : "=m" (v->counter)
-                    : "er" (i), "m" (v->counter));
+                    : "er" (i), "m" (v->counter) : "memory");
 }
 
 /**
@@ -59,7 +59,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "subq %1,%0"
                     : "=m" (v->counter)
-                    : "er" (i), "m" (v->counter));
+                    : "er" (i), "m" (v->counter) : "memory");
 }
 
 /**
@@ -87,7 +87,7 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "incq %0"
                     : "=m" (v->counter)
-                    : "m" (v->counter));
+                    : "m" (v->counter) : "memory");
 }
 #define arch_atomic64_inc arch_atomic64_inc
 
@@ -101,7 +101,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "decq %0"
                     : "=m" (v->counter)
-                    : "m" (v->counter));
+                    : "m" (v->counter) : "memory");
 }
 #define arch_atomic64_dec arch_atomic64_dec
 
index 14de0432d288414bd1437e44b8cb13facc6f12e9..84f848c2541a6e5febb218fc64d209270c80e9bc 100644 (file)
@@ -80,8 +80,8 @@ do {                                                                  \
 })
 
 /* Atomic operations are already serializing on x86 */
-#define __smp_mb__before_atomic()      barrier()
-#define __smp_mb__after_atomic()       barrier()
+#define __smp_mb__before_atomic()      do { } while (0)
+#define __smp_mb__after_atomic()       do { } while (0)
 
 #include <asm-generic/barrier.h>
 
index a07ffd23e4dd67d3e182bd803eb868eaef1bcdf5..8fa49cf1211d36ec9547363d15cb2ee3c73d1909 100644 (file)
  * Note: efi_info is commonly left uninitialized, but that field has a
  * private magic, so it is better to leave it unchanged.
  */
+
+#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
+
+#define BOOT_PARAM_PRESERVE(struct_member)                             \
+       {                                                               \
+               .start = offsetof(struct boot_params, struct_member),   \
+               .len   = sizeof_mbr(struct boot_params, struct_member), \
+       }
+
+struct boot_params_to_save {
+       unsigned int start;
+       unsigned int len;
+};
+
 static void sanitize_boot_params(struct boot_params *boot_params)
 {
        /* 
@@ -36,19 +50,41 @@ static void sanitize_boot_params(struct boot_params *boot_params)
         */
        if (boot_params->sentinel) {
                /* fields in boot_params are left uninitialized, clear them */
-               memset(&boot_params->ext_ramdisk_image, 0,
-                      (char *)&boot_params->efi_info -
-                       (char *)&boot_params->ext_ramdisk_image);
-               memset(&boot_params->kbd_status, 0,
-                      (char *)&boot_params->hdr -
-                      (char *)&boot_params->kbd_status);
-               memset(&boot_params->_pad7[0], 0,
-                      (char *)&boot_params->edd_mbr_sig_buffer[0] -
-                       (char *)&boot_params->_pad7[0]);
-               memset(&boot_params->_pad8[0], 0,
-                      (char *)&boot_params->eddbuf[0] -
-                       (char *)&boot_params->_pad8[0]);
-               memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
+               static struct boot_params scratch;
+               char *bp_base = (char *)boot_params;
+               char *save_base = (char *)&scratch;
+               int i;
+
+               const struct boot_params_to_save to_save[] = {
+                       BOOT_PARAM_PRESERVE(screen_info),
+                       BOOT_PARAM_PRESERVE(apm_bios_info),
+                       BOOT_PARAM_PRESERVE(tboot_addr),
+                       BOOT_PARAM_PRESERVE(ist_info),
+                       BOOT_PARAM_PRESERVE(hd0_info),
+                       BOOT_PARAM_PRESERVE(hd1_info),
+                       BOOT_PARAM_PRESERVE(sys_desc_table),
+                       BOOT_PARAM_PRESERVE(olpc_ofw_header),
+                       BOOT_PARAM_PRESERVE(efi_info),
+                       BOOT_PARAM_PRESERVE(alt_mem_k),
+                       BOOT_PARAM_PRESERVE(scratch),
+                       BOOT_PARAM_PRESERVE(e820_entries),
+                       BOOT_PARAM_PRESERVE(eddbuf_entries),
+                       BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
+                       BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
+                       BOOT_PARAM_PRESERVE(secure_boot),
+                       BOOT_PARAM_PRESERVE(hdr),
+                       BOOT_PARAM_PRESERVE(e820_table),
+                       BOOT_PARAM_PRESERVE(eddbuf),
+               };
+
+               memset(&scratch, 0, sizeof(scratch));
+
+               for (i = 0; i < ARRAY_SIZE(to_save); i++) {
+                       memcpy(save_base + to_save[i].start,
+                              bp_base + to_save[i].start, to_save[i].len);
+               }
+
+               memcpy(boot_params, save_base, sizeof(*boot_params));
        }
 }
 
index ce95b8cbd2296b1e33de2e0f520a00f3981e3f23..68889ace9c4c6b9f8e12752f8d66039ef02e375a 100644 (file)
@@ -22,8 +22,8 @@ enum cpuid_leafs
        CPUID_LNX_3,
        CPUID_7_0_EBX,
        CPUID_D_1_EAX,
-       CPUID_F_0_EDX,
-       CPUID_F_1_EDX,
+       CPUID_LNX_4,
+       CPUID_DUMMY,
        CPUID_8000_0008_EBX,
        CPUID_6_EAX,
        CPUID_8000_000A_EDX,
index 69037da75ea024602c66d4eb7602abfd4eec0141..759f0a1766124513cf015cf2f2a547710477b9f8 100644 (file)
 #define X86_FEATURE_BMI1               ( 9*32+ 3) /* 1st group bit manipulation extensions */
 #define X86_FEATURE_HLE                        ( 9*32+ 4) /* Hardware Lock Elision */
 #define X86_FEATURE_AVX2               ( 9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_FDP_EXCPTN_ONLY    ( 9*32+ 6) /* "" FPU data pointer updated only on x87 exceptions */
 #define X86_FEATURE_SMEP               ( 9*32+ 7) /* Supervisor Mode Execution Protection */
 #define X86_FEATURE_BMI2               ( 9*32+ 8) /* 2nd group bit manipulation extensions */
 #define X86_FEATURE_ERMS               ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
 #define X86_FEATURE_INVPCID            ( 9*32+10) /* Invalidate Processor Context ID */
 #define X86_FEATURE_RTM                        ( 9*32+11) /* Restricted Transactional Memory */
 #define X86_FEATURE_CQM                        ( 9*32+12) /* Cache QoS Monitoring */
+#define X86_FEATURE_ZERO_FCS_FDS       ( 9*32+13) /* "" Zero out FPU CS and FPU DS */
 #define X86_FEATURE_MPX                        ( 9*32+14) /* Memory Protection Extension */
 #define X86_FEATURE_RDT_A              ( 9*32+15) /* Resource Director Technology Allocation */
 #define X86_FEATURE_AVX512F            ( 9*32+16) /* AVX-512 Foundation */
 #define X86_FEATURE_XGETBV1            (10*32+ 2) /* XGETBV with ECX = 1 instruction */
 #define X86_FEATURE_XSAVES             (10*32+ 3) /* XSAVES/XRSTORS instructions */
 
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
-#define X86_FEATURE_CQM_LLC            (11*32+ 1) /* LLC QoS if 1 */
-
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
-#define X86_FEATURE_CQM_OCCUP_LLC      (12*32+ 0) /* LLC occupancy monitoring */
-#define X86_FEATURE_CQM_MBM_TOTAL      (12*32+ 1) /* LLC Total MBM monitoring */
-#define X86_FEATURE_CQM_MBM_LOCAL      (12*32+ 2) /* LLC Local MBM monitoring */
+/*
+ * Extended auxiliary flags: Linux defined - for features scattered in various
+ * CPUID levels like 0xf, etc.
+ *
+ * Reuse free bits when adding new feature flags!
+ */
+#define X86_FEATURE_CQM_LLC            (11*32+ 0) /* LLC QoS if 1 */
+#define X86_FEATURE_CQM_OCCUP_LLC      (11*32+ 1) /* LLC occupancy monitoring */
+#define X86_FEATURE_CQM_MBM_TOTAL      (11*32+ 2) /* LLC Total MBM monitoring */
+#define X86_FEATURE_CQM_MBM_LOCAL      (11*32+ 3) /* LLC Local MBM monitoring */
+#define X86_FEATURE_FENCE_SWAPGS_USER  (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
+#define X86_FEATURE_FENCE_SWAPGS_KERNEL        (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
 
 /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
 #define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
 #define X86_BUG_L1TF                   X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
 #define X86_BUG_MDS                    X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
 #define X86_BUG_MSBDS_ONLY             X86_BUG(20) /* CPU is only affected by the  MSDBS variant of BUG_MDS */
+#define X86_BUG_SWAPGS                 X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 32e666e1231e77f128252b2f9354f708081cafdb..cbd97e22d2f3197053f785dd45f59e08448e7d6a 100644 (file)
@@ -150,8 +150,11 @@ extern char irq_entries_start[];
 #define trace_irq_entries_start irq_entries_start
 #endif
 
+extern char spurious_entries_start[];
+
 #define VECTOR_UNUSED          NULL
-#define VECTOR_RETRIGGERED     ((void *)~0UL)
+#define VECTOR_SHUTDOWN                ((void *)~0UL)
+#define VECTOR_RETRIGGERED     ((void *)~1UL)
 
 typedef struct irq_desc* vector_irq_t[NR_VECTORS];
 DECLARE_PER_CPU(vector_irq_t, vector_irq);
index 2e38fb82b91d1286f1c4adc13a8741ab6dc828dd..5d0b72f2814029d8cb3b970e51f3e2c78056d9ef 100644 (file)
 #define INTEL_FAM6_ICELAKE_XEON_D      0x6C
 #define INTEL_FAM6_ICELAKE_DESKTOP     0x7D
 #define INTEL_FAM6_ICELAKE_MOBILE      0x7E
+#define INTEL_FAM6_ICELAKE_NNPI                0x9D
+
+#define INTEL_FAM6_TIGERLAKE_L         0x8C
+#define INTEL_FAM6_TIGERLAKE           0x8D
 
 /* "Small Core" Processors (Atom) */
 
index 7014dba23d20ceda17a760ad6dc9ee4f8ec020ac..0d3f5cf3ff3eae84e05d723bd76fdc44492d83dc 100644 (file)
@@ -117,7 +117,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 }
 
 #define KVM_PERMILLE_MMU_PAGES 20
-#define KVM_MIN_ALLOC_MMU_PAGES 64
+#define KVM_MIN_ALLOC_MMU_PAGES 64UL
 #define KVM_MMU_HASH_SHIFT 12
 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
 #define KVM_MIN_FREE_MMU_PAGES 5
@@ -784,6 +784,9 @@ struct kvm_hv {
        u64 hv_reenlightenment_control;
        u64 hv_tsc_emulation_control;
        u64 hv_tsc_emulation_status;
+
+       /* How many vCPUs have VP index != vCPU index */
+       atomic_t num_mismatched_vp_indexes;
 };
 
 enum kvm_irqchip_mode {
@@ -793,9 +796,9 @@ enum kvm_irqchip_mode {
 };
 
 struct kvm_arch {
-       unsigned int n_used_mmu_pages;
-       unsigned int n_requested_mmu_pages;
-       unsigned int n_max_mmu_pages;
+       unsigned long n_used_mmu_pages;
+       unsigned long n_requested_mmu_pages;
+       unsigned long n_max_mmu_pages;
        unsigned int indirect_shadow_pages;
        unsigned long mmu_valid_gen;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
@@ -1113,6 +1116,7 @@ struct kvm_x86_ops {
        int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
                              uint32_t guest_irq, bool set);
        void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+       bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
 
        int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
        void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
@@ -1197,8 +1201,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
                                   gfn_t gfn_offset, unsigned long mask);
 void kvm_mmu_zap_all(struct kvm *kvm);
 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
 
 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
 bool pdptrs_changed(struct kvm_vcpu *vcpu);
@@ -1427,25 +1431,29 @@ enum {
 #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
 #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
 
+asmlinkage void __noreturn kvm_spurious_fault(void);
+
 /*
  * Hardware virtualization extension instructions may fault if a
  * reboot turns off virtualization while processes are running.
- * Trap the fault and ignore the instruction if that happens.
+ * Usually after catching the fault we just panic; during reboot
+ * instead the instruction is ignored.
  */
-asmlinkage void kvm_spurious_fault(void);
-
-#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn)     \
-       "666: " insn "\n\t" \
-       "668: \n\t"                           \
-       ".pushsection .fixup, \"ax\" \n" \
-       "667: \n\t" \
-       cleanup_insn "\n\t"                   \
-       "cmpb $0, kvm_rebooting \n\t"         \
-       "jne 668b \n\t"                       \
-       __ASM_SIZE(push) " $666b \n\t"        \
-       "jmp kvm_spurious_fault \n\t"         \
-       ".popsection \n\t" \
-       _ASM_EXTABLE(666b, 667b)
+#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn)             \
+       "666: \n\t"                                                     \
+       insn "\n\t"                                                     \
+       "jmp    668f \n\t"                                              \
+       "667: \n\t"                                                     \
+       "call   kvm_spurious_fault \n\t"                                \
+       "668: \n\t"                                                     \
+       ".pushsection .fixup, \"ax\" \n\t"                              \
+       "700: \n\t"                                                     \
+       cleanup_insn "\n\t"                                             \
+       "cmpb   $0, kvm_rebooting\n\t"                                  \
+       "je     667b \n\t"                                              \
+       "jmp    668b \n\t"                                              \
+       ".popsection \n\t"                                              \
+       _ASM_EXTABLE(666b, 700b)
 
 #define __kvm_handle_fault_on_reboot(insn)             \
        ____kvm_handle_fault_on_reboot(insn, "")
index f85f43db922545d1a61344679c0a135e51aee483..a1d22e4428f63732330c4d68b146a58228dd63b2 100644 (file)
 #define MSR_AMD64_PATCH_LEVEL          0x0000008b
 #define MSR_AMD64_TSC_RATIO            0xc0000104
 #define MSR_AMD64_NB_CFG               0xc001001f
+#define MSR_AMD64_CPUID_FN_1           0xc0011004
 #define MSR_AMD64_PATCH_LOADER         0xc0010020
 #define MSR_AMD64_OSVW_ID_LENGTH       0xc0010140
 #define MSR_AMD64_OSVW_STATUS          0xc0010141
index eb0f80ce8524d415884d11ecbe555e6a778edfa5..3aa82deeab5ae862111e242138267bb53ddc622a 100644 (file)
@@ -21,7 +21,7 @@
 #define MWAIT_ECX_INTERRUPT_BREAK      0x1
 #define MWAITX_ECX_TIMER_ENABLE                BIT(1)
 #define MWAITX_MAX_LOOPS               ((u32)-1)
-#define MWAITX_DISABLE_CSTATES         0xf
+#define MWAITX_DISABLE_CSTATES         0xf0
 
 static inline void __monitor(const void *eax, unsigned long ecx,
                             unsigned long edx)
index 599c273f5d006a904e70e5639c906b482413d37d..28cb2b31527a3c5fe5e45f29fff540acef65c73c 100644 (file)
        "       lfence;\n"                                      \
        "       jmp    902b;\n"                                 \
        "       .align 16\n"                                    \
-       "903:   addl   $4, %%esp;\n"                            \
+       "903:   lea    4(%%esp), %%esp;\n"                      \
        "       pushl  %[thunk_target];\n"                      \
        "       ret;\n"                                         \
        "       .align 16\n"                                    \
index e375d4266b53e35bbe2383d7633d478f29871c4f..a04677038872c89f0cf8c0f00d4703630d8a4080 100644 (file)
@@ -768,6 +768,7 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
            PV_RESTORE_ALL_CALLER_REGS                                  \
            FRAME_END                                                   \
            "ret;"                                                      \
+           ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
            ".popsection")
 
 /* Get a reference to a callee-save function */
index 78241b736f2a04aa4ccac261727ecd8798042cda..f6c4915a863e03313d4ded9b7722eb6d74e52cd8 100644 (file)
@@ -209,16 +209,20 @@ struct x86_pmu_capability {
 #define IBSCTL_LVT_OFFSET_VALID                (1ULL<<8)
 #define IBSCTL_LVT_OFFSET_MASK         0x0F
 
-/* ibs fetch bits/masks */
+/* IBS fetch bits/masks */
 #define IBS_FETCH_RAND_EN      (1ULL<<57)
 #define IBS_FETCH_VAL          (1ULL<<49)
 #define IBS_FETCH_ENABLE       (1ULL<<48)
 #define IBS_FETCH_CNT          0xFFFF0000ULL
 #define IBS_FETCH_MAX_CNT      0x0000FFFFULL
 
-/* ibs op bits/masks */
-/* lower 4 bits of the current count are ignored: */
-#define IBS_OP_CUR_CNT         (0xFFFF0ULL<<32)
+/*
+ * IBS op bits/masks
+ * The lower 7 bits of the current count are random bits
+ * preloaded by hardware and ignored in software
+ */
+#define IBS_OP_CUR_CNT         (0xFFF80ULL<<32)
+#define IBS_OP_CUR_CNT_RAND    (0x0007FULL<<32)
 #define IBS_OP_CNT_CTL         (1ULL<<19)
 #define IBS_OP_VAL             (1ULL<<18)
 #define IBS_OP_ENABLE          (1ULL<<17)
index afbc87206886e76d73995fe4be47c3568485ea76..b771bb3d159bc8ce27f7897a01f3af4d8be2a78a 100644 (file)
@@ -40,7 +40,7 @@ asmlinkage void simd_coprocessor_error(void);
 asmlinkage void xen_divide_error(void);
 asmlinkage void xen_xennmi(void);
 asmlinkage void xen_xendebug(void);
-asmlinkage void xen_xenint3(void);
+asmlinkage void xen_int3(void);
 asmlinkage void xen_overflow(void);
 asmlinkage void xen_bounds(void);
 asmlinkage void xen_invalid_op(void);
index 4111edb3188e2531e04167ab41def2aa6422ec87..9718303410614a8a503408b290d5617bc656d0c5 100644 (file)
@@ -451,8 +451,10 @@ do {                                                                       \
 ({                                                                     \
        int __gu_err;                                                   \
        __inttype(*(ptr)) __gu_val;                                     \
+       __typeof__(ptr) __gu_ptr = (ptr);                               \
+       __typeof__(size) __gu_size = (size);                            \
        __uaccess_begin_nospec();                                       \
-       __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
+       __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT);      \
        __uaccess_end();                                                \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __builtin_expect(__gu_err, 0);                                  \
index 84132eddb5a858ff97b4c83a7ac6708eb49f4ab1..dfdd1caf0d55db514bacc3f3b881fc88b14f36b9 100644 (file)
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
 /*
  * Debug level, exported for io_apic.c
  */
-unsigned int apic_verbosity;
+int apic_verbosity;
 
 int pic_mode;
 
@@ -715,7 +715,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
 
 /*
- * Temporary interrupt handler.
+ * Temporary interrupt handler and polled calibration function.
  */
 static void __init lapic_cal_handler(struct clock_event_device *dev)
 {
@@ -799,7 +799,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
 static int __init calibrate_APIC_clock(void)
 {
        struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
-       void (*real_handler)(struct clock_event_device *dev);
+       u64 tsc_perj = 0, tsc_start = 0;
+       unsigned long jif_start;
        unsigned long deltaj;
        long delta, deltatsc;
        int pm_referenced = 0;
@@ -830,28 +831,64 @@ static int __init calibrate_APIC_clock(void)
        apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
                    "calibrating APIC timer ...\n");
 
+       /*
+        * There are platforms w/o global clockevent devices. Instead of
+        * making the calibration conditional on that, use a polling based
+        * approach everywhere.
+        */
        local_irq_disable();
 
-       /* Replace the global interrupt handler */
-       real_handler = global_clock_event->event_handler;
-       global_clock_event->event_handler = lapic_cal_handler;
-
        /*
         * Setup the APIC counter to maximum. There is no way the lapic
         * can underflow in the 100ms detection time frame
         */
        __setup_APIC_LVTT(0xffffffff, 0, 0);
 
-       /* Let the interrupts run */
+       /*
+        * Methods to terminate the calibration loop:
+        *  1) Global clockevent if available (jiffies)
+        *  2) TSC if available and frequency is known
+        */
+       jif_start = READ_ONCE(jiffies);
+
+       if (tsc_khz) {
+               tsc_start = rdtsc();
+               tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
+       }
+
+       /*
+        * Enable interrupts so the tick can fire, if a global
+        * clockevent device is available
+        */
        local_irq_enable();
 
-       while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
-               cpu_relax();
+       while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
+               /* Wait for a tick to elapse */
+               while (1) {
+                       if (tsc_khz) {
+                               u64 tsc_now = rdtsc();
+                               if ((tsc_now - tsc_start) >= tsc_perj) {
+                                       tsc_start += tsc_perj;
+                                       break;
+                               }
+                       } else {
+                               unsigned long jif_now = READ_ONCE(jiffies);
 
-       local_irq_disable();
+                               if (time_after(jif_now, jif_start)) {
+                                       jif_start = jif_now;
+                                       break;
+                               }
+                       }
+                       cpu_relax();
+               }
+
+               /* Invoke the calibration routine */
+               local_irq_disable();
+               lapic_cal_handler(NULL);
+               local_irq_enable();
+       }
 
-       /* Restore the real event handler */
-       global_clock_event->event_handler = real_handler;
+       local_irq_disable();
 
        /* Build delta t1-t2 as apic timer counts down */
        delta = lapic_cal_t1 - lapic_cal_t2;
@@ -904,10 +941,11 @@ static int __init calibrate_APIC_clock(void)
        levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
 
        /*
-        * PM timer calibration failed or not turned on
-        * so lets try APIC timer based calibration
+        * PM timer calibration failed or not turned on so lets try APIC
+        * timer based calibration, if a global clockevent device is
+        * available.
         */
-       if (!pm_referenced) {
+       if (!pm_referenced && global_clock_event) {
                apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
 
                /*
@@ -1412,53 +1450,72 @@ static void lapic_setup_esr(void)
                        oldvalue, value);
 }
 
-static void apic_pending_intr_clear(void)
+#define APIC_IR_REGS           APIC_ISR_NR
+#define APIC_IR_BITS           (APIC_IR_REGS * 32)
+#define APIC_IR_MAPSIZE                (APIC_IR_BITS / BITS_PER_LONG)
+
+union apic_ir {
+       unsigned long   map[APIC_IR_MAPSIZE];
+       u32             regs[APIC_IR_REGS];
+};
+
+static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
 {
-       long long max_loops = cpu_khz ? cpu_khz : 1000000;
-       unsigned long long tsc = 0, ntsc;
-       unsigned int queued;
-       unsigned long value;
-       int i, j, acked = 0;
+       int i, bit;
+
+       /* Read the IRRs */
+       for (i = 0; i < APIC_IR_REGS; i++)
+               irr->regs[i] = apic_read(APIC_IRR + i * 0x10);
+
+       /* Read the ISRs */
+       for (i = 0; i < APIC_IR_REGS; i++)
+               isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
 
-       if (boot_cpu_has(X86_FEATURE_TSC))
-               tsc = rdtsc();
        /*
-        * After a crash, we no longer service the interrupts and a pending
-        * interrupt from previous kernel might still have ISR bit set.
-        *
-        * Most probably by now CPU has serviced that pending interrupt and
-        * it might not have done the ack_APIC_irq() because it thought,
-        * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
-        * does not clear the ISR bit and cpu thinks it has already serivced
-        * the interrupt. Hence a vector might get locked. It was noticed
-        * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
+        * If the ISR map is not empty. ACK the APIC and run another round
+        * to verify whether a pending IRR has been unblocked and turned
+        * into a ISR.
         */
-       do {
-               queued = 0;
-               for (i = APIC_ISR_NR - 1; i >= 0; i--)
-                       queued |= apic_read(APIC_IRR + i*0x10);
-
-               for (i = APIC_ISR_NR - 1; i >= 0; i--) {
-                       value = apic_read(APIC_ISR + i*0x10);
-                       for_each_set_bit(j, &value, 32) {
-                               ack_APIC_irq();
-                               acked++;
-                       }
-               }
-               if (acked > 256) {
-                       pr_err("LAPIC pending interrupts after %d EOI\n", acked);
-                       break;
-               }
-               if (queued) {
-                       if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
-                               ntsc = rdtsc();
-                               max_loops = (cpu_khz << 10) - (ntsc - tsc);
-                       } else {
-                               max_loops--;
-                       }
-               }
-       } while (queued && max_loops > 0);
-       WARN_ON(max_loops <= 0);
+       if (!bitmap_empty(isr->map, APIC_IR_BITS)) {
+               /*
+                * There can be multiple ISR bits set when a high priority
+                * interrupt preempted a lower priority one. Issue an ACK
+                * per set bit.
+                */
+               for_each_set_bit(bit, isr->map, APIC_IR_BITS)
+                       ack_APIC_irq();
+               return true;
+       }
+
+       return !bitmap_empty(irr->map, APIC_IR_BITS);
+}
+
+/*
+ * After a crash, we no longer service the interrupts and a pending
+ * interrupt from previous kernel might still have ISR bit set.
+ *
+ * Most probably by now the CPU has serviced that pending interrupt and it
+ * might not have done the ack_APIC_irq() because it thought, interrupt
+ * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
+ * the ISR bit and cpu thinks it has already serivced the interrupt. Hence
+ * a vector might get locked. It was noticed for timer irq (vector
+ * 0x31). Issue an extra EOI to clear ISR.
+ *
+ * If there are pending IRR bits they turn into ISR bits after a higher
+ * priority ISR bit has been acked.
+ */
+static void apic_pending_intr_clear(void)
+{
+       union apic_ir irr, isr;
+       unsigned int i;
+
+       /* 512 loops are way oversized and give the APIC a chance to obey. */
+       for (i = 0; i < 512; i++) {
+               if (!apic_check_and_ack(&irr, &isr))
+                       return;
+       }
+       /* Dump the IRR/ISR content if that failed */
+       pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map);
 }
 
 /**
@@ -1481,6 +1538,14 @@ static void setup_local_APIC(void)
                return;
        }
 
+       /*
+        * If this comes from kexec/kcrash the APIC might be enabled in
+        * SPIV. Soft disable it before doing further initialization.
+        */
+       value = apic_read(APIC_SPIV);
+       value &= ~APIC_SPIV_APIC_ENABLED;
+       apic_write(APIC_SPIV, value);
+
 #ifdef CONFIG_X86_32
        /* Pound the ESR really hard over the head with a big hammer - mbligh */
        if (lapic_is_integrated() && apic->disable_esr) {
@@ -1526,6 +1591,7 @@ static void setup_local_APIC(void)
        value &= ~APIC_TPRI_MASK;
        apic_write(APIC_TASKPRI, value);
 
+       /* Clear eventually stale ISR/IRR bits */
        apic_pending_intr_clear();
 
        /*
@@ -2026,21 +2092,32 @@ __visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
        entering_irq();
        trace_spurious_apic_entry(vector);
 
+       inc_irq_stat(irq_spurious_count);
+
+       /*
+        * If this is a spurious interrupt then do not acknowledge
+        */
+       if (vector == SPURIOUS_APIC_VECTOR) {
+               /* See SDM vol 3 */
+               pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n",
+                       smp_processor_id());
+               goto out;
+       }
+
        /*
-        * Check if this really is a spurious interrupt and ACK it
-        * if it is a vectored one.  Just in case...
-        * Spurious interrupts should not be ACKed.
+        * If it is a vectored one, verify it's set in the ISR. If set,
+        * acknowledge it.
         */
        v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
-       if (v & (1 << (vector & 0x1f)))
+       if (v & (1 << (vector & 0x1f))) {
+               pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
+                       vector, smp_processor_id());
                ack_APIC_irq();
-
-       inc_irq_stat(irq_spurious_count);
-
-       /* see sw-dev-man vol 3, chapter 7.4.13.5 */
-       pr_info("spurious APIC interrupt through vector %02x on CPU#%d, "
-               "should never happen.\n", vector, smp_processor_id());
-
+       } else {
+               pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
+                       vector, smp_processor_id());
+       }
+out:
        trace_spurious_apic_exit(vector);
        exiting_irq();
 }
index afee386ff711e95132dd5c01e79317ac22bd9a56..caedd8d60d3610b8b9bc11f5669de16521b3ac2a 100644 (file)
@@ -38,32 +38,12 @@ static int bigsmp_early_logical_apicid(int cpu)
        return early_per_cpu(x86_cpu_to_apicid, cpu);
 }
 
-static inline unsigned long calculate_ldr(int cpu)
-{
-       unsigned long val, id;
-
-       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
-       id = per_cpu(x86_bios_cpu_apicid, cpu);
-       val |= SET_APIC_LOGICAL_ID(id);
-
-       return val;
-}
-
 /*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116).  So here it goes...
+ * bigsmp enables physical destination mode
+ * and doesn't use LDR and DFR
  */
 static void bigsmp_init_apic_ldr(void)
 {
-       unsigned long val;
-       int cpu = smp_processor_id();
-
-       apic_write(APIC_DFR, APIC_DFR_FLAT);
-       val = calculate_ldr(cpu);
-       apic_write(APIC_LDR, val);
 }
 
 static void bigsmp_setup_apic_routing(void)
index ff0d14cd9e827ef78b1ee4c8fdcebf08c56dfe71..ab22eded61d2587fc5cdade2e4ac31d3f37360b8 100644 (file)
@@ -1891,6 +1891,50 @@ static int ioapic_set_affinity(struct irq_data *irq_data,
        return ret;
 }
 
+/*
+ * Interrupt shutdown masks the ioapic pin, but the interrupt might already
+ * be in flight, but not yet serviced by the target CPU. That means
+ * __synchronize_hardirq() would return and claim that everything is calmed
+ * down. So free_irq() would proceed and deactivate the interrupt and free
+ * resources.
+ *
+ * Once the target CPU comes around to service it it will find a cleared
+ * vector and complain. While the spurious interrupt is harmless, the full
+ * release of resources might prevent the interrupt from being acknowledged
+ * which keeps the hardware in a weird state.
+ *
+ * Verify that the corresponding Remote-IRR bits are clear.
+ */
+static int ioapic_irq_get_chip_state(struct irq_data *irqd,
+                                  enum irqchip_irq_state which,
+                                  bool *state)
+{
+       struct mp_chip_data *mcd = irqd->chip_data;
+       struct IO_APIC_route_entry rentry;
+       struct irq_pin_list *p;
+
+       if (which != IRQCHIP_STATE_ACTIVE)
+               return -EINVAL;
+
+       *state = false;
+       raw_spin_lock(&ioapic_lock);
+       for_each_irq_pin(p, mcd->irq_2_pin) {
+               rentry = __ioapic_read_entry(p->apic, p->pin);
+               /*
+                * The remote IRR is only valid in level trigger mode. It's
+                * meaning is undefined for edge triggered interrupts and
+                * irrelevant because the IO-APIC treats them as fire and
+                * forget.
+                */
+               if (rentry.irr && rentry.trigger) {
+                       *state = true;
+                       break;
+               }
+       }
+       raw_spin_unlock(&ioapic_lock);
+       return 0;
+}
+
 static struct irq_chip ioapic_chip __read_mostly = {
        .name                   = "IO-APIC",
        .irq_startup            = startup_ioapic_irq,
@@ -1900,6 +1944,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
        .irq_eoi                = ioapic_ack_level,
        .irq_set_affinity       = ioapic_set_affinity,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
+       .irq_get_irqchip_state  = ioapic_irq_get_chip_state,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -1912,6 +1957,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
        .irq_eoi                = ioapic_ir_ack_level,
        .irq_set_affinity       = ioapic_set_affinity,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
+       .irq_get_irqchip_state  = ioapic_irq_get_chip_state,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -2386,7 +2432,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
         * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
         * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
         */
-       return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
+       if (!ioapic_initialized)
+               return gsi_top;
+       /*
+        * For DT enabled machines ioapic_dynirq_base is irrelevant and not
+        * updated. So simply return @from if ioapic_dynirq_base == 0.
+        */
+       return ioapic_dynirq_base ? : from;
 }
 
 #ifdef CONFIG_X86_32
index 652e7ffa9b9de5d616b8da67624466c00ad14722..c352ca2e1456f56150baff018416082e0b90db8a 100644 (file)
@@ -342,7 +342,7 @@ static void clear_irq_vector(struct irq_data *irqd)
        trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
                           apicd->prev_cpu);
 
-       per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED;
+       per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
        irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
        apicd->vector = 0;
 
@@ -351,7 +351,7 @@ static void clear_irq_vector(struct irq_data *irqd)
        if (!vector)
                return;
 
-       per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED;
+       per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
        irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
        apicd->prev_vector = 0;
        apicd->move_in_progress = 0;
@@ -400,6 +400,17 @@ static int activate_reserved(struct irq_data *irqd)
                if (!irqd_can_reserve(irqd))
                        apicd->can_reserve = false;
        }
+
+       /*
+        * Check to ensure that the effective affinity mask is a subset
+        * the user supplied affinity mask, and warn the user if it is not
+        */
+       if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
+                           irq_data_get_affinity_mask(irqd))) {
+               pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
+                       irqd->irq);
+       }
+
        return ret;
 }
 
index 7685444a106bb29a3994a5d85066e60b2b4c0d09..145517934171e17b9e6295850361859577a043bd 100644 (file)
@@ -158,7 +158,8 @@ static int x2apic_dead_cpu(unsigned int dead_cpu)
 {
        struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
 
-       cpumask_clear_cpu(dead_cpu, &cmsk->mask);
+       if (cmsk)
+               cpumask_clear_cpu(dead_cpu, &cmsk->mask);
        free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
        return 0;
 }
index da1f5e78363e91f4d9e53fd59e4344531f372553..f86f912ce215867cfb54a5c827c1947cc4bba4d3 100644 (file)
@@ -799,6 +799,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
        msr_set_bit(MSR_AMD64_DE_CFG, 31);
 }
 
+static bool rdrand_force;
+
+static int __init rdrand_cmdline(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       if (!strcmp(str, "force"))
+               rdrand_force = true;
+       else
+               return -EINVAL;
+
+       return 0;
+}
+early_param("rdrand", rdrand_cmdline);
+
+static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
+{
+       /*
+        * Saving of the MSR used to hide the RDRAND support during
+        * suspend/resume is done by arch/x86/power/cpu.c, which is
+        * dependent on CONFIG_PM_SLEEP.
+        */
+       if (!IS_ENABLED(CONFIG_PM_SLEEP))
+               return;
+
+       /*
+        * The nordrand option can clear X86_FEATURE_RDRAND, so check for
+        * RDRAND support using the CPUID function directly.
+        */
+       if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
+               return;
+
+       msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
+
+       /*
+        * Verify that the CPUID change has occurred in case the kernel is
+        * running virtualized and the hypervisor doesn't support the MSR.
+        */
+       if (cpuid_ecx(1) & BIT(30)) {
+               pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
+               return;
+       }
+
+       clear_cpu_cap(c, X86_FEATURE_RDRAND);
+       pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
+}
+
+static void init_amd_jg(struct cpuinfo_x86 *c)
+{
+       /*
+        * Some BIOS implementations do not restore proper RDRAND support
+        * across suspend and resume. Check on whether to hide the RDRAND
+        * instruction support via CPUID.
+        */
+       clear_rdrand_cpuid_bit(c);
+}
+
 static void init_amd_bd(struct cpuinfo_x86 *c)
 {
        u64 value;
@@ -813,6 +871,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
                        wrmsrl_safe(MSR_F15H_IC_CFG, value);
                }
        }
+
+       /*
+        * Some BIOS implementations do not restore proper RDRAND support
+        * across suspend and resume. Check on whether to hide the RDRAND
+        * instruction support via CPUID.
+        */
+       clear_rdrand_cpuid_bit(c);
 }
 
 static void init_amd_zn(struct cpuinfo_x86 *c)
@@ -855,6 +920,7 @@ static void init_amd(struct cpuinfo_x86 *c)
        case 0x10: init_amd_gh(c); break;
        case 0x12: init_amd_ln(c); break;
        case 0x15: init_amd_bd(c); break;
+       case 0x16: init_amd_jg(c); break;
        case 0x17: init_amd_zn(c); break;
        }
 
index a5cde748cf76bd39948f6d90c39b7a7c09b5c1e2..ee7d17611ead47f87774400982503b65553f209a 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/e820/api.h>
 #include <asm/hypervisor.h>
 
+static void __init spectre_v1_select_mitigation(void);
 static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
 static void __init l1tf_select_mitigation(void);
@@ -96,17 +97,11 @@ void __init check_bugs(void)
        if (boot_cpu_has(X86_FEATURE_STIBP))
                x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
 
-       /* Select the proper spectre mitigation before patching alternatives */
+       /* Select the proper CPU mitigations before patching alternatives: */
+       spectre_v1_select_mitigation();
        spectre_v2_select_mitigation();
-
-       /*
-        * Select proper mitigation for any exposure to the Speculative Store
-        * Bypass vulnerability.
-        */
        ssb_select_mitigation();
-
        l1tf_select_mitigation();
-
        mds_select_mitigation();
 
        arch_smt_update();
@@ -271,6 +266,98 @@ static int __init mds_cmdline(char *str)
 }
 early_param("mds", mds_cmdline);
 
+#undef pr_fmt
+#define pr_fmt(fmt)     "Spectre V1 : " fmt
+
+enum spectre_v1_mitigation {
+       SPECTRE_V1_MITIGATION_NONE,
+       SPECTRE_V1_MITIGATION_AUTO,
+};
+
+static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
+       SPECTRE_V1_MITIGATION_AUTO;
+
+static const char * const spectre_v1_strings[] = {
+       [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
+       [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
+};
+
+/*
+ * Does SMAP provide full mitigation against speculative kernel access to
+ * userspace?
+ */
+static bool smap_works_speculatively(void)
+{
+       if (!boot_cpu_has(X86_FEATURE_SMAP))
+               return false;
+
+       /*
+        * On CPUs which are vulnerable to Meltdown, SMAP does not
+        * prevent speculative access to user data in the L1 cache.
+        * Consider SMAP to be non-functional as a mitigation on these
+        * CPUs.
+        */
+       if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
+               return false;
+
+       return true;
+}
+
+static void __init spectre_v1_select_mitigation(void)
+{
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
+               spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
+               return;
+       }
+
+       if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
+               /*
+                * With Spectre v1, a user can speculatively control either
+                * path of a conditional swapgs with a user-controlled GS
+                * value.  The mitigation is to add lfences to both code paths.
+                *
+                * If FSGSBASE is enabled, the user can put a kernel address in
+                * GS, in which case SMAP provides no protection.
+                *
+                * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
+                *         FSGSBASE enablement patches have been merged. ]
+                *
+                * If FSGSBASE is disabled, the user can only put a user space
+                * address in GS.  That makes an attack harder, but still
+                * possible if there's no SMAP protection.
+                */
+               if (!smap_works_speculatively()) {
+                       /*
+                        * Mitigation can be provided from SWAPGS itself or
+                        * PTI as the CR3 write in the Meltdown mitigation
+                        * is serializing.
+                        *
+                        * If neither is there, mitigate with an LFENCE to
+                        * stop speculation through swapgs.
+                        */
+                       if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
+                           !boot_cpu_has(X86_FEATURE_PTI))
+                               setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
+
+                       /*
+                        * Enable lfences in the kernel entry (non-swapgs)
+                        * paths, to prevent user entry from speculatively
+                        * skipping swapgs.
+                        */
+                       setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
+               }
+       }
+
+       pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
+}
+
+static int __init nospectre_v1_cmdline(char *str)
+{
+       spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
+       return 0;
+}
+early_param("nospectre_v1", nospectre_v1_cmdline);
+
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V2 : " fmt
 
@@ -1196,7 +1283,7 @@ static ssize_t l1tf_show_state(char *buf)
 
 static ssize_t mds_show_state(char *buf)
 {
-       if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
                return sprintf(buf, "%s; SMT Host state unknown\n",
                               mds_strings[mds_mitigation]);
        }
@@ -1258,7 +1345,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
                break;
 
        case X86_BUG_SPECTRE_V1:
-               return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+               return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
 
        case X86_BUG_SPECTRE_V2:
                return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
index 0c5fcbd998cf11badefad906a2122400a3512d58..9d863e8f9b3f22b7119a7e98556a722c819c2e17 100644 (file)
@@ -651,8 +651,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
        if (c->x86 < 0x17) {
                /* LLC is at the node level. */
                per_cpu(cpu_llc_id, cpu) = node_id;
-       } else if (c->x86 == 0x17 &&
-                  c->x86_model >= 0 && c->x86_model <= 0x1F) {
+       } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) {
                /*
                 * LLC is at the core complex level.
                 * Core complex ID is ApicId[3] for these processors.
index 1073118b9bf03b43fad3bd523b1ca72fdedeabcc..b33fdfa0ff49e6f9e70754051216eee0d08eed16 100644 (file)
@@ -808,6 +808,30 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
        }
 }
 
+static void init_cqm(struct cpuinfo_x86 *c)
+{
+       if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
+               c->x86_cache_max_rmid  = -1;
+               c->x86_cache_occ_scale = -1;
+               return;
+       }
+
+       /* will be overridden if occupancy monitoring exists */
+       c->x86_cache_max_rmid = cpuid_ebx(0xf);
+
+       if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
+           cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
+           cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
+               u32 eax, ebx, ecx, edx;
+
+               /* QoS sub-leaf, EAX=0Fh, ECX=1 */
+               cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
+
+               c->x86_cache_max_rmid  = ecx;
+               c->x86_cache_occ_scale = ebx;
+       }
+}
+
 void get_cpu_cap(struct cpuinfo_x86 *c)
 {
        u32 eax, ebx, ecx, edx;
@@ -839,33 +863,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
                c->x86_capability[CPUID_D_1_EAX] = eax;
        }
 
-       /* Additional Intel-defined flags: level 0x0000000F */
-       if (c->cpuid_level >= 0x0000000F) {
-
-               /* QoS sub-leaf, EAX=0Fh, ECX=0 */
-               cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
-               c->x86_capability[CPUID_F_0_EDX] = edx;
-
-               if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
-                       /* will be overridden if occupancy monitoring exists */
-                       c->x86_cache_max_rmid = ebx;
-
-                       /* QoS sub-leaf, EAX=0Fh, ECX=1 */
-                       cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
-                       c->x86_capability[CPUID_F_1_EDX] = edx;
-
-                       if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
-                             ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
-                              (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
-                               c->x86_cache_max_rmid = ecx;
-                               c->x86_cache_occ_scale = ebx;
-                       }
-               } else {
-                       c->x86_cache_max_rmid = -1;
-                       c->x86_cache_occ_scale = -1;
-               }
-       }
-
        /* AMD-defined flags: level 0x80000001 */
        eax = cpuid_eax(0x80000000);
        c->extended_cpuid_level = eax;
@@ -896,6 +893,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
 
        init_scattered_cpuid_features(c);
        init_speculation_control(c);
+       init_cqm(c);
 
        /*
         * Clear/Set all flags overridden by options, after probe.
@@ -954,6 +952,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #define NO_L1TF                BIT(3)
 #define NO_MDS         BIT(4)
 #define MSBDS_ONLY     BIT(5)
+#define NO_SWAPGS      BIT(6)
 
 #define VULNWL(_vendor, _family, _model, _whitelist)   \
        { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -977,29 +976,37 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
        VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION),
        VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION),
 
-       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(ATOM_SILVERMONT_X,         NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY),
-       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY),
+       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_SILVERMONT_X,         NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
 
        VULNWL_INTEL(CORE_YONAH,                NO_SSB),
 
-       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY),
+       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
 
-       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF),
-       VULNWL_INTEL(ATOM_GOLDMONT_X,           NO_MDS | NO_L1TF),
-       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF),
+       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_GOLDMONT_X,           NO_MDS | NO_L1TF | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS),
+
+       /*
+        * Technically, swapgs isn't serializing on AMD (despite it previously
+        * being documented as such in the APM).  But according to AMD, %gs is
+        * updated non-speculatively, and the issuing of %gs-relative memory
+        * operands will be blocked until the %gs update completes, which is
+        * good enough for our purposes.
+        */
 
        /* AMD Family 0xf - 0x12 */
-       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
-       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
-       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
-       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
 
        /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
-       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS),
+       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
        {}
 };
 
@@ -1036,6 +1043,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
                        setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
        }
 
+       if (!cpu_matches(NO_SWAPGS))
+               setup_force_cpu_bug(X86_BUG_SWAPGS);
+
        if (cpu_matches(NO_MELTDOWN))
                return;
 
index 2c0bd38a44ab125c6a6f9627eea0cc69c3d882f5..fa07a224e7b98187469161cdfd86644e673acb82 100644 (file)
@@ -59,6 +59,9 @@ static const struct cpuid_dep cpuid_deps[] = {
        { X86_FEATURE_AVX512_4VNNIW,    X86_FEATURE_AVX512F   },
        { X86_FEATURE_AVX512_4FMAPS,    X86_FEATURE_AVX512F   },
        { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F   },
+       { X86_FEATURE_CQM_OCCUP_LLC,    X86_FEATURE_CQM_LLC   },
+       { X86_FEATURE_CQM_MBM_TOTAL,    X86_FEATURE_CQM_LLC   },
+       { X86_FEATURE_CQM_MBM_LOCAL,    X86_FEATURE_CQM_LLC   },
        {}
 };
 
index d0dfb892c72fe7e3648fba753e0d7351d19006f9..aed45b8895d5b5f5c293ae4f6a47499e580f5825 100644 (file)
@@ -4,6 +4,8 @@
 # Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
 #
 
+set -e
+
 IN=$1
 OUT=$2
 
index 772c219b688989926eac0e082d9dbdba1b4109f1..5a52672e3f8bada2e92305f1a83722f57e8a8088 100644 (file)
@@ -21,6 +21,10 @@ struct cpuid_bit {
 static const struct cpuid_bit cpuid_bits[] = {
        { X86_FEATURE_APERFMPERF,       CPUID_ECX,  0, 0x00000006, 0 },
        { X86_FEATURE_EPB,              CPUID_ECX,  3, 0x00000006, 0 },
+       { X86_FEATURE_CQM_LLC,          CPUID_EDX,  1, 0x0000000f, 0 },
+       { X86_FEATURE_CQM_OCCUP_LLC,    CPUID_EDX,  0, 0x0000000f, 1 },
+       { X86_FEATURE_CQM_MBM_TOTAL,    CPUID_EDX,  1, 0x0000000f, 1 },
+       { X86_FEATURE_CQM_MBM_LOCAL,    CPUID_EDX,  2, 0x0000000f, 1 },
        { X86_FEATURE_CAT_L3,           CPUID_EBX,  1, 0x00000010, 0 },
        { X86_FEATURE_CAT_L2,           CPUID_EBX,  2, 0x00000010, 0 },
        { X86_FEATURE_CDP_L3,           CPUID_ECX,  2, 0x00000010, 1 },
index 50d309662d78cd7721ba0bc46c374c42a11acc0c..5790671857e55822c15286dc1e257315056abcfb 100644 (file)
@@ -53,7 +53,7 @@ int ftrace_arch_code_modify_post_process(void)
 union ftrace_code_union {
        char code[MCOUNT_INSN_SIZE];
        struct {
-               unsigned char e8;
+               unsigned char op;
                int offset;
        } __attribute__((packed));
 };
@@ -63,20 +63,23 @@ static int ftrace_calc_offset(long ip, long addr)
        return (int)(addr - ip);
 }
 
-static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+static unsigned char *
+ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr)
 {
        static union ftrace_code_union calc;
 
-       calc.e8         = 0xe8;
+       calc.op         = op;
        calc.offset     = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
 
-       /*
-        * No locking needed, this must be called via kstop_machine
-        * which in essence is like running on a uniprocessor machine.
-        */
        return calc.code;
 }
 
+static unsigned char *
+ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+       return ftrace_text_replace(0xe8, ip, addr);
+}
+
 static inline int
 within(unsigned long addr, unsigned long start, unsigned long end)
 {
@@ -686,22 +689,6 @@ int __init ftrace_dyn_arch_init(void)
        return 0;
 }
 
-#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
-static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
-{
-       static union ftrace_code_union calc;
-
-       /* Jmp not a call (ignore the .e8) */
-       calc.e8         = 0xe9;
-       calc.offset     = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
-
-       /*
-        * ftrace external locks synchronize the access to the static variable.
-        */
-       return calc.code;
-}
-#endif
-
 /* Currently only x86_64 supports dynamic trampolines */
 #ifdef CONFIG_X86_64
 
@@ -923,8 +910,8 @@ static void *addr_from_call(void *ptr)
                return NULL;
 
        /* Make sure this is a call */
-       if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
-               pr_warn("Expected e8, got %x\n", calc.e8);
+       if (WARN_ON_ONCE(calc.op != 0xe8)) {
+               pr_warn("Expected e8, got %x\n", calc.op);
                return NULL;
        }
 
@@ -995,6 +982,11 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern void ftrace_graph_call(void);
 
+static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
+{
+       return ftrace_text_replace(0xe9, ip, addr);
+}
+
 static int ftrace_mod_jmp(unsigned long ip, void *func)
 {
        unsigned char *new;
index ddee1f0870c4b091cae110f35d92a2d87a05d8b6..88dc38b4a1479ed91d113d191203431cfd20fa49 100644 (file)
@@ -184,24 +184,25 @@ unsigned long __head __startup_64(unsigned long physaddr,
        pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
 
        if (la57) {
-               p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
+               p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
+                                   physaddr);
 
                i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
                pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
                pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
 
-               i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D;
-               p4d[i + 0] = (pgdval_t)pud + pgtable_flags;
-               p4d[i + 1] = (pgdval_t)pud + pgtable_flags;
+               i = physaddr >> P4D_SHIFT;
+               p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
+               p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
        } else {
                i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
                pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
                pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
        }
 
-       i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD;
-       pud[i + 0] = (pudval_t)pmd + pgtable_flags;
-       pud[i + 1] = (pudval_t)pmd + pgtable_flags;
+       i = physaddr >> PUD_SHIFT;
+       pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
+       pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
 
        pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
        /* Filter out unsupported __PAGE_KERNEL_* bits: */
@@ -211,8 +212,9 @@ unsigned long __head __startup_64(unsigned long physaddr,
        pmd_entry +=  physaddr;
 
        for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
-               int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD;
-               pmd[idx] = pmd_entry + i * PMD_SIZE;
+               int idx = i + (physaddr >> PMD_SHIFT);
+
+               pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
        }
 
        /*
@@ -220,13 +222,31 @@ unsigned long __head __startup_64(unsigned long physaddr,
         * we might write invalid pmds, when the kernel is relocated
         * cleanup_highmap() fixes this up along with the mappings
         * beyond _end.
+        *
+        * Only the region occupied by the kernel image has so far
+        * been checked against the table of usable memory regions
+        * provided by the firmware, so invalidate pages outside that
+        * region. A page table entry that maps to a reserved area of
+        * memory would allow processor speculation into that area,
+        * and on some hardware (particularly the UV platform) even
+        * speculative access to some reserved areas is caught as an
+        * error, causing the BIOS to halt the system.
         */
 
        pmd = fixup_pointer(level2_kernel_pgt, physaddr);
-       for (i = 0; i < PTRS_PER_PMD; i++) {
+
+       /* invalidate pages before the kernel image */
+       for (i = 0; i < pmd_index((unsigned long)_text); i++)
+               pmd[i] &= ~_PAGE_PRESENT;
+
+       /* fixup pages that are part of the kernel image */
+       for (; i <= pmd_index((unsigned long)_end); i++)
                if (pmd[i] & _PAGE_PRESENT)
                        pmd[i] += load_delta;
-       }
+
+       /* invalidate pages after the kernel image */
+       for (; i < PTRS_PER_PMD; i++)
+               pmd[i] &= ~_PAGE_PRESENT;
 
        /*
         * Fixup phys_base - remove the memory encryption mask to obtain
index 01adea278a71024076137a2571794fcd5ab9d246..a7e0e975043fd05cf6d7510c82f827728ac555fa 100644 (file)
@@ -321,7 +321,8 @@ void __init idt_setup_apic_and_irq_gates(void)
 #ifdef CONFIG_X86_LOCAL_APIC
        for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
                set_bit(i, system_vectors);
-               set_intr_gate(i, spurious_interrupt);
+               entry = spurious_entries_start + 8 * (i - FIRST_SYSTEM_VECTOR);
+               set_intr_gate(i, entry);
        }
 #endif
 }
index 59b5f2ea7c2f32d8c02181be432262b2df97ea1a..a975246074b5c571e7346228553467e8f8244895 100644 (file)
@@ -246,7 +246,7 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
        if (!handle_irq(desc, regs)) {
                ack_APIC_irq();
 
-               if (desc != VECTOR_RETRIGGERED) {
+               if (desc != VECTOR_RETRIGGERED && desc != VECTOR_SHUTDOWN) {
                        pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
                                             __func__, smp_processor_id(),
                                             vector);
index 7f89d609095acddecc45419deaebcb896c01a021..cee45d46e67dccdaf4382554450bca412d7ed1e1 100644 (file)
@@ -830,6 +830,7 @@ asm(
 "cmpb  $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
 "setne %al;"
 "ret;"
+".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
 ".popsection");
 
 #endif
index 013fe3d21dbb3f4d5f834f74e0ca3d791a6f0b06..2ec202cb9dfd45b789e6d3a31a843f726d5987a5 100644 (file)
@@ -117,12 +117,8 @@ static u64 kvm_sched_clock_read(void)
 
 static inline void kvm_sched_clock_init(bool stable)
 {
-       if (!stable) {
-               pv_time_ops.sched_clock = kvm_clock_read;
+       if (!stable)
                clear_sched_clock_stable();
-               return;
-       }
-
        kvm_sched_clock_offset = kvm_clock_read();
        pv_time_ops.sched_clock = kvm_sched_clock_read;
 
index ddb1ca6923b1bccda07a52fb5e98256d0c2a744f..5b4c3279909474aa8443e828dadfb580633361e5 100644 (file)
@@ -547,17 +547,15 @@ void __init default_get_smp_config(unsigned int early)
                         * local APIC has default address
                         */
                        mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-                       return;
+                       goto out;
                }
 
                pr_info("Default MP configuration #%d\n", mpf->feature1);
                construct_default_ISA_mptable(mpf->feature1);
 
        } else if (mpf->physptr) {
-               if (check_physptr(mpf, early)) {
-                       early_memunmap(mpf, sizeof(*mpf));
-                       return;
-               }
+               if (check_physptr(mpf, early))
+                       goto out;
        } else
                BUG();
 
@@ -566,7 +564,7 @@ void __init default_get_smp_config(unsigned int early)
        /*
         * Only use the first configuration found.
         */
-
+out:
        early_memunmap(mpf, sizeof(*mpf));
 }
 
index aeba77881d85476a2aa9a71306c757b3eb014fe1..516ec7586a5fbd9ce820b93f5d66a0816ab6d449 100644 (file)
@@ -652,11 +652,10 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
 {
        struct thread_struct *thread = &tsk->thread;
        unsigned long val = 0;
-       int index = n;
 
        if (n < HBP_NUM) {
+               int index = array_index_nospec(n, HBP_NUM);
                struct perf_event *bp = thread->ptrace_bps[index];
-               index = array_index_nospec(index, HBP_NUM);
 
                if (bp)
                        val = bp->hw.info.address;
index b4866badb235a64be119989e642764d0becac65f..90ecc108bc8a52ea1091544c8927c9755a75495f 100644 (file)
@@ -1251,7 +1251,7 @@ void __init setup_arch(char **cmdline_p)
        x86_init.hyper.guest_late_init();
 
        e820__reserve_resources();
-       e820__register_nosave_regions(max_low_pfn);
+       e820__register_nosave_regions(max_pfn);
 
        x86_init.resources.reserve_resources();
 
index 04adc8d60aed82178caf3a099d66b497a6c11bcf..b2b87b91f3361dc4ec2806685d8bc7b21052ba6e 100644 (file)
@@ -181,6 +181,12 @@ asmlinkage __visible void smp_reboot_interrupt(void)
        irq_exit();
 }
 
+static int register_stop_handler(void)
+{
+       return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
+                                   NMI_FLAG_FIRST, "smp_stop");
+}
+
 static void native_stop_other_cpus(int wait)
 {
        unsigned long flags;
@@ -214,39 +220,41 @@ static void native_stop_other_cpus(int wait)
                apic->send_IPI_allbutself(REBOOT_VECTOR);
 
                /*
-                * Don't wait longer than a second if the caller
-                * didn't ask us to wait.
+                * Don't wait longer than a second for IPI completion. The
+                * wait request is not checked here because that would
+                * prevent an NMI shutdown attempt in case that not all
+                * CPUs reach shutdown state.
                 */
                timeout = USEC_PER_SEC;
-               while (num_online_cpus() > 1 && (wait || timeout--))
+               while (num_online_cpus() > 1 && timeout--)
                        udelay(1);
        }
-       
-       /* if the REBOOT_VECTOR didn't work, try with the NMI */
-       if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi))  {
-               if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
-                                        NMI_FLAG_FIRST, "smp_stop"))
-                       /* Note: we ignore failures here */
-                       /* Hope the REBOOT_IRQ is good enough */
-                       goto finish;
-
-               /* sync above data before sending IRQ */
-               wmb();
 
-               pr_emerg("Shutting down cpus with NMI\n");
+       /* if the REBOOT_VECTOR didn't work, try with the NMI */
+       if (num_online_cpus() > 1) {
+               /*
+                * If NMI IPI is enabled, try to register the stop handler
+                * and send the IPI. In any case try to wait for the other
+                * CPUs to stop.
+                */
+               if (!smp_no_nmi_ipi && !register_stop_handler()) {
+                       /* Sync above data before sending IRQ */
+                       wmb();
 
-               apic->send_IPI_allbutself(NMI_VECTOR);
+                       pr_emerg("Shutting down cpus with NMI\n");
 
+                       apic->send_IPI_allbutself(NMI_VECTOR);
+               }
                /*
-                * Don't wait longer than a 10 ms if the caller
-                * didn't ask us to wait.
+                * Don't wait longer than 10 ms if the caller didn't
+                * reqeust it. If wait is true, the machine hangs here if
+                * one or more CPUs do not reach shutdown state.
                 */
                timeout = USEC_PER_MSEC * 10;
                while (num_online_cpus() > 1 && (wait || timeout--))
                        udelay(1);
        }
 
-finish:
        local_irq_save(flags);
        disable_local_APIC();
        mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
index 623965e86b65eda431b8e5fdbc2204c47bcb8b33..897da526e40e66027a34b9657c73aea3bdf78c51 100644 (file)
@@ -231,9 +231,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
        {},
 };
 
+/*
+ * Some devices have a portrait LCD but advertise a landscape resolution (and
+ * pitch). We simply swap width and height for these devices so that we can
+ * correctly deal with some of them coming with multiple resolutions.
+ */
+static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
+       {
+               /*
+                * Lenovo MIIX310-10ICR, only some batches have the troublesome
+                * 800x1280 portrait screen. Luckily the portrait version has
+                * its own BIOS version, so we match on that.
+                */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
+                       DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
+               },
+       },
+       {
+               /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+                                       "Lenovo MIIX 320-10ICR"),
+               },
+       },
+       {
+               /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+                                       "Lenovo ideapad D330-10IGM"),
+               },
+       },
+       {},
+};
+
 __init void sysfb_apply_efi_quirks(void)
 {
        if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
            !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
                dmi_check_system(efifb_dmi_system_table);
+
+       if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
+           dmi_check_system(efifb_dmi_swap_width_height)) {
+               u16 temp = screen_info.lfb_width;
+
+               screen_info.lfb_width = screen_info.lfb_height;
+               screen_info.lfb_height = temp;
+               screen_info.lfb_linelength = 4 * screen_info.lfb_width;
+       }
 }
index deb576b23b7cf49817533d00555d0dc976c42486..9119859ba78714d882a09fe49b263d2e085a5329 100644 (file)
@@ -521,9 +521,12 @@ struct uprobe_xol_ops {
        void    (*abort)(struct arch_uprobe *, struct pt_regs *);
 };
 
-static inline int sizeof_long(void)
+static inline int sizeof_long(struct pt_regs *regs)
 {
-       return in_ia32_syscall() ? 4 : 8;
+       /*
+        * Check registers for mode as in_xxx_syscall() does not apply here.
+        */
+       return user_64bit_mode(regs) ? 8 : 4;
 }
 
 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
@@ -534,9 +537,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
 
 static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
 {
-       unsigned long new_sp = regs->sp - sizeof_long();
+       unsigned long new_sp = regs->sp - sizeof_long(regs);
 
-       if (copy_to_user((void __user *)new_sp, &val, sizeof_long()))
+       if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
                return -EFAULT;
 
        regs->sp = new_sp;
@@ -569,7 +572,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
                long correction = utask->vaddr - utask->xol_vaddr;
                regs->ip += correction;
        } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
-               regs->sp += sizeof_long(); /* Pop incorrect return address */
+               regs->sp += sizeof_long(regs); /* Pop incorrect return address */
                if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
                        return -ERESTART;
        }
@@ -688,7 +691,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
         * "call" insn was executed out-of-line. Just restore ->sp and restart.
         * We could also restore ->ip and try to call branch_emulate_op() again.
         */
-       regs->sp += sizeof_long();
+       regs->sp += sizeof_long(regs);
        return -ERESTART;
 }
 
@@ -1068,7 +1071,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
 unsigned long
 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
 {
-       int rasize = sizeof_long(), nleft;
+       int rasize = sizeof_long(regs), nleft;
        unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
 
        if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
index 9a327d5b6d1f5bf420c7f15c22cefcdddc921f73..d78a61408243f20e8369bc5acaa3562d7c34d0aa 100644 (file)
@@ -47,8 +47,6 @@ static const struct cpuid_reg reverse_cpuid[] = {
        [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
        [CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
        [CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
-       [CPUID_F_0_EDX]       = {       0xf, 0, CPUID_EDX},
-       [CPUID_F_1_EDX]       = {       0xf, 1, CPUID_EDX},
        [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
        [CPUID_6_EAX]         = {         6, 0, CPUID_EAX},
        [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
index 4a688ef9e4481c1698b026e960a9a75b6acf4424..e699f4d2a450dbcd1a47eb61e69ef676e4a4c993 100644 (file)
@@ -2331,12 +2331,16 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
 
 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
 {
+#ifdef CONFIG_X86_64
        u32 eax, ebx, ecx, edx;
 
        eax = 0x80000001;
        ecx = 0;
        ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
        return edx & bit(X86_FEATURE_LM);
+#else
+       return false;
+#endif
 }
 
 #define GET_SMSTATE(type, smbase, offset)                                \
@@ -2381,6 +2385,7 @@ static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
        return X86EMUL_CONTINUE;
 }
 
+#ifdef CONFIG_X86_64
 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 {
        struct desc_struct desc;
@@ -2399,6 +2404,7 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
        return X86EMUL_CONTINUE;
 }
+#endif
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
                                    u64 cr0, u64 cr3, u64 cr4)
@@ -2499,6 +2505,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
        return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
+#ifdef CONFIG_X86_64
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
        struct desc_struct desc;
@@ -2560,6 +2567,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 
        return X86EMUL_CONTINUE;
 }
+#endif
 
 static int em_rsm(struct x86_emulate_ctxt *ctxt)
 {
@@ -2616,9 +2624,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
        if (ctxt->ops->pre_leave_smm(ctxt, smbase))
                return X86EMUL_UNHANDLEABLE;
 
+#ifdef CONFIG_X86_64
        if (emulator_has_longmode(ctxt))
                ret = rsm_load_state_64(ctxt, smbase + 0x8000);
        else
+#endif
                ret = rsm_load_state_32(ctxt, smbase + 0x8000);
 
        if (ret != X86EMUL_CONTINUE) {
@@ -5358,6 +5368,8 @@ done_prefixes:
                                        ctxt->memopp->addr.mem.ea + ctxt->_eip);
 
 done:
+       if (rc == X86EMUL_PROPAGATE_FAULT)
+               ctxt->have_exception = true;
        return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
 }
 
index 229d996051653ad48803d30c2cd68ba704dc2835..5842c5f587fe910b9358eda88cb619bd958413c4 100644 (file)
@@ -132,8 +132,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
        struct kvm_vcpu *vcpu = NULL;
        int i;
 
-       if (vpidx < KVM_MAX_VCPUS)
-               vcpu = kvm_get_vcpu(kvm, vpidx);
+       if (vpidx >= KVM_MAX_VCPUS)
+               return NULL;
+
+       vcpu = kvm_get_vcpu(kvm, vpidx);
        if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
                return vcpu;
        kvm_for_each_vcpu(i, vcpu, kvm)
@@ -689,6 +691,24 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
                stimer_cleanup(&hv_vcpu->stimer[i]);
 }
 
+bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
+{
+       if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
+               return false;
+       return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
+}
+EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
+
+bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
+                           struct hv_vp_assist_page *assist_page)
+{
+       if (!kvm_hv_assist_page_enabled(vcpu))
+               return false;
+       return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
+                                     assist_page, sizeof(*assist_page));
+}
+EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
+
 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
 {
        struct hv_message *msg = &stimer->msg;
@@ -1040,21 +1060,41 @@ static u64 current_task_runtime_100ns(void)
 
 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
 {
-       struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+       struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
 
        switch (msr) {
-       case HV_X64_MSR_VP_INDEX:
-               if (!host)
+       case HV_X64_MSR_VP_INDEX: {
+               struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+               int vcpu_idx = kvm_vcpu_get_idx(vcpu);
+               u32 new_vp_index = (u32)data;
+
+               if (!host || new_vp_index >= KVM_MAX_VCPUS)
                        return 1;
-               hv->vp_index = (u32)data;
+
+               if (new_vp_index == hv_vcpu->vp_index)
+                       return 0;
+
+               /*
+                * The VP index is initialized to vcpu_index by
+                * kvm_hv_vcpu_postcreate so they initially match.  Now the
+                * VP index is changing, adjust num_mismatched_vp_indexes if
+                * it now matches or no longer matches vcpu_idx.
+                */
+               if (hv_vcpu->vp_index == vcpu_idx)
+                       atomic_inc(&hv->num_mismatched_vp_indexes);
+               else if (new_vp_index == vcpu_idx)
+                       atomic_dec(&hv->num_mismatched_vp_indexes);
+
+               hv_vcpu->vp_index = new_vp_index;
                break;
+       }
        case HV_X64_MSR_VP_ASSIST_PAGE: {
                u64 gfn;
                unsigned long addr;
 
                if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
-                       hv->hv_vapic = data;
-                       if (kvm_lapic_enable_pv_eoi(vcpu, 0))
+                       hv_vcpu->hv_vapic = data;
+                       if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
                                return 1;
                        break;
                }
@@ -1064,10 +1104,11 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
                        return 1;
                if (__clear_user((void __user *)addr, PAGE_SIZE))
                        return 1;
-               hv->hv_vapic = data;
+               hv_vcpu->hv_vapic = data;
                kvm_vcpu_mark_page_dirty(vcpu, gfn);
                if (kvm_lapic_enable_pv_eoi(vcpu,
-                                           gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
+                                           gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
+                                           sizeof(struct hv_vp_assist_page)))
                        return 1;
                break;
        }
@@ -1080,7 +1121,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
        case HV_X64_MSR_VP_RUNTIME:
                if (!host)
                        return 1;
-               hv->runtime_offset = data - current_task_runtime_100ns();
+               hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
                break;
        case HV_X64_MSR_SCONTROL:
        case HV_X64_MSR_SVERSION:
@@ -1172,11 +1213,11 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
                          bool host)
 {
        u64 data = 0;
-       struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+       struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
 
        switch (msr) {
        case HV_X64_MSR_VP_INDEX:
-               data = hv->vp_index;
+               data = hv_vcpu->vp_index;
                break;
        case HV_X64_MSR_EOI:
                return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
@@ -1185,10 +1226,10 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
        case HV_X64_MSR_TPR:
                return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
        case HV_X64_MSR_VP_ASSIST_PAGE:
-               data = hv->hv_vapic;
+               data = hv_vcpu->hv_vapic;
                break;
        case HV_X64_MSR_VP_RUNTIME:
-               data = current_task_runtime_100ns() + hv->runtime_offset;
+               data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
                break;
        case HV_X64_MSR_SCONTROL:
        case HV_X64_MSR_SVERSION:
index d6aa969e20f19f518e420be2ff9d8b7f91282994..0e66c12ed2c3d5b9b50797cee84ad24de1878e01 100644 (file)
@@ -62,6 +62,10 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
 void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
 
+bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
+bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
+                           struct hv_vp_assist_page *assist_page);
+
 static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
                                                        int timer_index)
 {
index faa264822cee3c658280d122cc3495fc93587cf5..007bc654f928a17e731963f2ba1c6cae00f653d5 100644 (file)
@@ -172,3 +172,10 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
        __kvm_migrate_apic_timer(vcpu);
        __kvm_migrate_pit_timer(vcpu);
 }
+
+bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
+{
+       bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE;
+
+       return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm);
+}
index d5005cc265217c4fa4d5de7c2ecc2a29fb599c19..fd210cdd49839dea0f33367376e13a089177c3af 100644 (file)
@@ -114,6 +114,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
        return mode != KVM_IRQCHIP_NONE;
 }
 
+bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
index 031bd7f91f98a73c0e543d07ae65dfc54f175621..262e49301cae61caf76b7e875769f20301a71c83 100644 (file)
@@ -209,6 +209,9 @@ static void recalculate_apic_map(struct kvm *kvm)
                if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
                        new->phys_map[xapic_id] = apic;
 
+               if (!kvm_apic_sw_enabled(apic))
+                       continue;
+
                ldr = kvm_lapic_get_reg(apic, APIC_LDR);
 
                if (apic_x2apic_mode(apic)) {
@@ -252,6 +255,8 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
                        recalculate_apic_map(apic->vcpu->kvm);
                } else
                        static_key_slow_inc(&apic_sw_disabled.key);
+
+               recalculate_apic_map(apic->vcpu->kvm);
        }
 }
 
@@ -2628,17 +2633,25 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
        return 0;
 }
 
-int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
+int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
 {
        u64 addr = data & ~KVM_MSR_ENABLED;
+       struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
+       unsigned long new_len;
+
        if (!IS_ALIGNED(addr, 4))
                return 1;
 
        vcpu->arch.pv_eoi.msr_val = data;
        if (!pv_eoi_enabled(vcpu))
                return 0;
-       return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
-                                        addr, sizeof(u8));
+
+       if (addr == ghc->gpa && len <= ghc->len)
+               new_len = ghc->len;
+       else
+               new_len = len;
+
+       return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
 }
 
 void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
index ed0ed39abd36970601e39ed04edde1850c849ece..ff6ef9c3d760c7d6db6d5ee86d1a0bb21c1c63b5 100644 (file)
@@ -120,7 +120,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
        return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
 }
 
-int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
+int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len);
 void kvm_lapic_init(void);
 void kvm_lapic_exit(void);
 
index e0f982e35c96b8ac9a286d6472884709fa4350a4..88940261fb5379841c7e516f2c78aa912baddb57 100644 (file)
@@ -1954,7 +1954,7 @@ static int is_empty_shadow_page(u64 *spt)
  * aggregate version in order to make the slab shrinker
  * faster
  */
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
 {
        kvm->arch.n_used_mmu_pages += nr;
        percpu_counter_add(&kvm_total_used_mmu_pages, nr);
@@ -2704,7 +2704,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
  * Changing the number of mmu pages allocated to the vm
  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
  */
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
 {
        LIST_HEAD(invalid_list);
 
@@ -4532,11 +4532,11 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
                 */
 
                /* Faults from writes to non-writable pages */
-               u8 wf = (pfec & PFERR_WRITE_MASK) ? ~w : 0;
+               u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
                /* Faults from user mode accesses to supervisor pages */
-               u8 uf = (pfec & PFERR_USER_MASK) ? ~u : 0;
+               u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
                /* Faults from fetches of non-executable pages*/
-               u8 ff = (pfec & PFERR_FETCH_MASK) ? ~x : 0;
+               u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
                /* Faults from kernel mode fetches of user pages */
                u8 smepf = 0;
                /* Faults from kernel mode accesses of user pages */
@@ -5926,10 +5926,10 @@ out:
 /*
  * Caculate mmu pages needed for kvm.
  */
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
 {
-       unsigned int nr_mmu_pages;
-       unsigned int  nr_pages = 0;
+       unsigned long nr_mmu_pages;
+       unsigned long nr_pages = 0;
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
        int i;
@@ -5942,8 +5942,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
        }
 
        nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
-       nr_mmu_pages = max(nr_mmu_pages,
-                          (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+       nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
 
        return nr_mmu_pages;
 }
index 1fab69c0b2f32851b86ce62deb3e162848f985a8..65892288bf510fbf56916137cc194c6175df7d50 100644 (file)
@@ -69,7 +69,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
                                u64 fault_address, char *insn, int insn_len);
 
-static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
 {
        if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
                return kvm->arch.n_max_mmu_pages -
index e9ea2d45ae66baa65a2e3818e7120189bcd61e57..9f72cc427158e637b1852e9843c5eddfcf78c43a 100644 (file)
@@ -48,11 +48,6 @@ static bool msr_mtrr_valid(unsigned msr)
        return false;
 }
 
-static bool valid_pat_type(unsigned t)
-{
-       return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
-}
-
 static bool valid_mtrr_type(unsigned t)
 {
        return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
@@ -67,10 +62,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                return false;
 
        if (msr == MSR_IA32_CR_PAT) {
-               for (i = 0; i < 8; i++)
-                       if (!valid_pat_type((data >> (i * 8)) & 0xff))
-                               return false;
-               return true;
+               return kvm_pat_valid(data);
        } else if (msr == MSR_MTRRdefType) {
                if (data & ~0xcff)
                        return false;
index 952aebd0a8a34fa750993a3d6b7b626be31192a0..acc8d217f6565cfa877f5a72bd24b644f97bd198 100644 (file)
@@ -131,8 +131,8 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
                                                 intr ? kvm_perf_overflow_intr :
                                                 kvm_perf_overflow, pmc);
        if (IS_ERR(event)) {
-               printk_once("kvm_pmu: event creation failed %ld\n",
-                           PTR_ERR(event));
+               pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
+                           PTR_ERR(event), pmc->idx);
                return;
        }
 
index ea454d3f7763f260d798d3d76a99185f2aca505c..ac2cc2ed7a85f8b22c996ad71e9baf418dbdbbf7 100644 (file)
@@ -5146,6 +5146,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
                kvm_vcpu_wake_up(vcpu);
 }
 
+static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+
 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
 {
        unsigned long flags;
@@ -5617,6 +5622,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
        clgi();
+       kvm_load_guest_xcr0(vcpu);
 
        /*
         * If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -5764,6 +5770,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
                kvm_before_interrupt(&svm->vcpu);
 
+       kvm_put_guest_xcr0(vcpu);
        stgi();
 
        /* Any pending NMI will happen here */
@@ -7203,6 +7210,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 
        .pmu_ops = &amd_pmu_ops,
        .deliver_posted_interrupt = svm_deliver_avic_intr,
+       .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
        .update_pi_irte = svm_update_pi_irte,
        .setup_mce = svm_setup_mce,
 
index 73d6d585dd66d8567d62abe0d226a0ed9722bb13..6f7b3acdab263b6ef26ac70d55e02c11d6bf95b3 100644 (file)
@@ -4135,7 +4135,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
                                       &msr_info->data);
        case MSR_IA32_XSS:
-               if (!vmx_xsaves_supported())
+               if (!vmx_xsaves_supported() ||
+                   (!msr_info->host_initiated &&
+                    !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+                      guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
                        return 1;
                msr_info->data = vcpu->arch.ia32_xss;
                break;
@@ -4265,9 +4268,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                              MSR_TYPE_W);
                break;
        case MSR_IA32_CR_PAT:
+               if (!kvm_pat_valid(data))
+                       return 1;
+
                if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
-                       if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
-                               return 1;
                        vmcs_write64(GUEST_IA32_PAT, data);
                        vcpu->arch.pat = data;
                        break;
@@ -4301,7 +4305,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        return 1;
                return vmx_set_vmx_msr(vcpu, msr_index, data);
        case MSR_IA32_XSS:
-               if (!vmx_xsaves_supported())
+               if (!vmx_xsaves_supported() ||
+                   (!msr_info->host_initiated &&
+                    !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+                      guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
                        return 1;
                /*
                 * The only supported bit as of Skylake is bit 8, but
@@ -8457,6 +8464,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
 {
        vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
        vmcs_write64(VMCS_LINK_POINTER, -1ull);
+       vmx->nested.sync_shadow_vmcs = false;
 }
 
 static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
@@ -8468,7 +8476,6 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
                /* copy to memory all shadowed fields in case
                   they were modified */
                copy_shadow_to_vmcs12(vmx);
-               vmx->nested.sync_shadow_vmcs = false;
                vmx_disable_shadow_vmcs(vmx);
        }
        vmx->nested.posted_intr_nv = -1;
@@ -8490,6 +8497,8 @@ static void free_nested(struct vcpu_vmx *vmx)
        if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
                return;
 
+       kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, &vmx->vcpu);
+
        hrtimer_cancel(&vmx->nested.preemption_timer);
        vmx->nested.vmxon = false;
        vmx->nested.smm.vmxon = false;
@@ -8668,6 +8677,9 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
        u64 field_value;
        struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
 
+       if (WARN_ON(!shadow_vmcs))
+               return;
+
        preempt_disable();
 
        vmcs_load(shadow_vmcs);
@@ -8706,6 +8718,9 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
        u64 field_value = 0;
        struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
 
+       if (WARN_ON(!shadow_vmcs))
+               return;
+
        vmcs_load(shadow_vmcs);
 
        for (q = 0; q < ARRAY_SIZE(fields); q++) {
@@ -8742,6 +8757,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
        u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
        gva_t gva = 0;
        struct vmcs12 *vmcs12;
+       struct x86_exception e;
 
        if (!nested_vmx_check_permission(vcpu))
                return 1;
@@ -8783,8 +8799,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
                                vmx_instruction_info, true, &gva))
                        return 1;
                /* _system ok, nested_vmx_check_permission has verified cpl=0 */
-               kvm_write_guest_virt_system(vcpu, gva, &field_value,
-                                           (is_long_mode(vcpu) ? 8 : 4), NULL);
+               if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
+                                               (is_long_mode(vcpu) ? 8 : 4),
+                                               &e))
+                       kvm_inject_page_fault(vcpu, &e);
        }
 
        nested_vmx_succeed(vcpu);
@@ -10403,6 +10421,11 @@ static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
        return ((rvi & 0xf0) > (vppr & 0xf0));
 }
 
+static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+       return pi_test_on(vcpu_to_pi_desc(vcpu));
+}
+
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 {
        if (!kvm_vcpu_apicv_active(vcpu))
@@ -10424,28 +10447,21 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
 
 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
 {
-       u32 exit_intr_info = 0;
-       u16 basic_exit_reason = (u16)vmx->exit_reason;
-
-       if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
-             || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
+       if (vmx->exit_reason != EXIT_REASON_EXCEPTION_NMI)
                return;
 
-       if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
-               exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-       vmx->exit_intr_info = exit_intr_info;
+       vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
 
        /* if exit due to PF check for async PF */
-       if (is_page_fault(exit_intr_info))
+       if (is_page_fault(vmx->exit_intr_info))
                vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
 
        /* Handle machine checks before interrupts are enabled */
-       if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
-           is_machine_check(exit_intr_info))
+       if (is_machine_check(vmx->exit_intr_info))
                kvm_machine_check();
 
        /* We need to handle NMIs before interrupts are enabled */
-       if (is_nmi(exit_intr_info)) {
+       if (is_nmi(vmx->exit_intr_info)) {
                kvm_before_interrupt(&vmx->vcpu);
                asm("int $2");
                kvm_after_interrupt(&vmx->vcpu);
@@ -10743,6 +10759,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
                vmx_set_interrupt_shadow(vcpu, 0);
 
+       kvm_load_guest_xcr0(vcpu);
+
        if (static_cpu_has(X86_FEATURE_PKU) &&
            kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
            vcpu->arch.pkru != vmx->host_pkru)
@@ -10795,7 +10813,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                "mov %%" _ASM_AX", %%cr2 \n\t"
                "3: \n\t"
                /* Check if vmlaunch of vmresume is needed */
-               "cmpl $0, %c[launched](%0) \n\t"
+               "cmpb $0, %c[launched](%0) \n\t"
                /* Load guest registers.  Don't clobber flags. */
                "mov %c[rax](%0), %%" _ASM_AX " \n\t"
                "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
@@ -10958,10 +10976,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                        __write_pkru(vmx->host_pkru);
        }
 
+       kvm_put_guest_xcr0(vcpu);
+
        vmx->nested.nested_run_pending = 0;
        vmx->idt_vectoring_info = 0;
 
        vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
+       if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
+               kvm_machine_check();
+
        if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
                return;
 
@@ -12551,7 +12574,7 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
                /* VM-entry exception error code */
                if (has_error_code &&
-                   vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
+                   vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))
                        return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
 
                /* VM-entry interruption-info field: reserved bits */
@@ -14379,6 +14402,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
        .sync_pir_to_irr = vmx_sync_pir_to_irr,
        .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
+       .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
 
        .set_tss_addr = vmx_set_tss_addr,
        .set_identity_map_addr = vmx_set_identity_map_addr,
index cea6568667c425e9931e37b5a857d45a9f976055..6ae8a013af31ac36b7fcd57768cd3e5b85c9cd7d 100644 (file)
@@ -581,8 +581,14 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
                                       data, offset, len, access);
 }
 
+static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
+{
+       return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) |
+              rsvd_bits(1, 2);
+}
+
 /*
- * Load the pae pdptrs.  Return true is they are all valid.
+ * Load the pae pdptrs.  Return 1 if they are all valid, 0 otherwise.
  */
 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
 {
@@ -601,8 +607,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
        }
        for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
                if ((pdpte[i] & PT_PRESENT_MASK) &&
-                   (pdpte[i] &
-                    vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
+                   (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
                        ret = 0;
                        goto out;
                }
@@ -713,7 +718,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
 {
        if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
                        !vcpu->guest_xcr0_loaded) {
@@ -723,8 +728,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
                vcpu->guest_xcr0_loaded = 1;
        }
 }
+EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
 
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
 {
        if (vcpu->guest_xcr0_loaded) {
                if (vcpu->arch.xcr0 != host_xcr0)
@@ -732,6 +738,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
                vcpu->guest_xcr0_loaded = 0;
        }
 }
+EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
 
 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
@@ -784,34 +791,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 }
 EXPORT_SYMBOL_GPL(kvm_set_xcr);
 
-int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
-       unsigned long old_cr4 = kvm_read_cr4(vcpu);
-       unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
-                                  X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
-
        if (cr4 & CR4_RESERVED_BITS)
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
+               return -EINVAL;
+
+       return 0;
+}
+
+int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+       unsigned long old_cr4 = kvm_read_cr4(vcpu);
+       unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
+                                  X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
+
+       if (kvm_valid_cr4(vcpu, cr4))
                return 1;
 
        if (is_long_mode(vcpu)) {
@@ -2494,7 +2509,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
                break;
        case MSR_KVM_PV_EOI_EN:
-               if (kvm_lapic_enable_pv_eoi(vcpu, data))
+               if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
                        return 1;
                break;
 
@@ -4116,7 +4131,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
 }
 
 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
-                                         u32 kvm_nr_mmu_pages)
+                                        unsigned long kvm_nr_mmu_pages)
 {
        if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
                return -EINVAL;
@@ -4130,7 +4145,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
        return 0;
 }
 
-static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
+static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
 {
        return kvm->arch.n_max_mmu_pages;
 }
@@ -5014,6 +5029,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
        /* kvm_write_guest_virt_system can pull in tons of pages. */
        vcpu->arch.l1tf_flush_l1d = true;
 
+       /*
+        * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+        * is returned, but our callers are not ready for that and they blindly
+        * call kvm_inject_page_fault.  Ensure that they at least do not leak
+        * uninitialized kernel stack memory into cr2 and error code.
+        */
+       memset(exception, 0, sizeof(*exception));
        return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
                                           PFERR_WRITE_MASK, exception);
 }
@@ -6235,8 +6257,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                        if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
                                                emulation_type))
                                return EMULATE_DONE;
-                       if (ctxt->have_exception && inject_emulated_exception(vcpu))
+                       if (ctxt->have_exception) {
+                               /*
+                                * #UD should result in just EMULATION_FAILED, and trap-like
+                                * exception should not be encountered during decode.
+                                */
+                               WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
+                                            exception_type(ctxt->exception.vector) == EXCPT_TRAP);
+                               inject_emulated_exception(vcpu);
                                return EMULATE_DONE;
+                       }
                        if (emulation_type & EMULTYPE_SKIP)
                                return EMULATE_FAIL;
                        return handle_emulation_failure(vcpu, emulation_type);
@@ -6308,12 +6338,13 @@ restart:
                unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
                toggle_interruptibility(vcpu, ctxt->interruptibility);
                vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
-               kvm_rip_write(vcpu, ctxt->eip);
-               if (r == EMULATE_DONE && ctxt->tf)
-                       kvm_vcpu_do_singlestep(vcpu, &r);
                if (!ctxt->have_exception ||
-                   exception_type(ctxt->exception.vector) == EXCPT_TRAP)
+                   exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
+                       kvm_rip_write(vcpu, ctxt->eip);
+                       if (r == EMULATE_DONE && ctxt->tf)
+                               kvm_vcpu_do_singlestep(vcpu, &r);
                        __kvm_set_rflags(vcpu, ctxt->eflags);
+               }
 
                /*
                 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
@@ -7224,9 +7255,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
        put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
 }
 
+#ifdef CONFIG_X86_64
 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 {
-#ifdef CONFIG_X86_64
        struct desc_ptr dt;
        struct kvm_segment seg;
        unsigned long val;
@@ -7276,10 +7307,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 
        for (i = 0; i < 6; i++)
                enter_smm_save_seg_64(vcpu, buf, i);
-#else
-       WARN_ON_ONCE(1);
-#endif
 }
+#endif
 
 static void enter_smm(struct kvm_vcpu *vcpu)
 {
@@ -7290,9 +7319,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
 
        trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
        memset(buf, 0, 512);
+#ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                enter_smm_save_state_64(vcpu, buf);
        else
+#endif
                enter_smm_save_state_32(vcpu, buf);
 
        /*
@@ -7350,8 +7381,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
        kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
        kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
 
+#ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                kvm_x86_ops->set_efer(vcpu, 0);
+#endif
 
        kvm_update_cpuid(vcpu);
        kvm_mmu_reset_context(vcpu);
@@ -7648,8 +7681,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                goto cancel_injection;
        }
 
-       kvm_load_guest_xcr0(vcpu);
-
        if (req_immediate_exit) {
                kvm_make_request(KVM_REQ_EVENT, vcpu);
                kvm_x86_ops->request_immediate_exit(vcpu);
@@ -7702,8 +7733,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
 
-       kvm_put_guest_xcr0(vcpu);
-
        kvm_before_interrupt(vcpu);
        kvm_x86_ops->handle_external_intr(vcpu);
        kvm_after_interrupt(vcpu);
@@ -8216,10 +8245,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
 
 static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
-       if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
-                       (sregs->cr4 & X86_CR4_OSXSAVE))
-               return  -EINVAL;
-
        if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
                /*
                 * When EFER.LME and CR0.PG are set, the processor is in
@@ -8238,7 +8263,7 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
                        return -EINVAL;
        }
 
-       return 0;
+       return kvm_valid_cr4(vcpu, sregs->cr4);
 }
 
 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
@@ -9336,6 +9361,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
        return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+       if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
+               return true;
+
+       if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+               kvm_test_request(KVM_REQ_SMI, vcpu) ||
+                kvm_test_request(KVM_REQ_EVENT, vcpu))
+               return true;
+
+       if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
+               return true;
+
+       return false;
+}
+
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.preempted_in_kernel;
index 1826ed9dd1c8fa2064685c0cfe9a52560714a306..3a91ea760f073974a382d090cbe74ca4067a817b 100644 (file)
@@ -345,4 +345,16 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
        __this_cpu_write(current_vcpu, NULL);
 }
 
+
+static inline bool kvm_pat_valid(u64 data)
+{
+       if (data & 0xF8F8F8F8F8F8F8F8ull)
+               return false;
+       /* 0, 1, 4, 5, 6, 7 are valid values.  */
+       return (data | ((data & 0x0202020202020202ull) << 1)) == data;
+}
+
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
+
 #endif
index 2dd1fe13a37b36aacfeca12733178f62a89ba309..19f707992db22b3d2fdc5f8c9be85f63bf7aeb93 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/types.h>
 #include <linux/export.h>
+#include <asm/cpu.h>
 
 unsigned int x86_family(unsigned int sig)
 {
index f5b7f1b3b6d75ce6024f7ab7459586b1f124902d..614c2c6b195909fc5051f251100433a181b58195 100644 (file)
@@ -113,8 +113,8 @@ static void delay_mwaitx(unsigned long __loops)
                __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
 
                /*
-                * AMD, like Intel, supports the EAX hint and EAX=0xf
-                * means, do not enter any deep C-state and we use it
+                * AMD, like Intel's MWAIT version, supports the EAX hint and
+                * EAX=0xf0 means, do not enter any deep C-state and we use it
                 * here in delay() to minimize wakeup latency.
                 */
                __mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
index a5a41ec5807211d8cb634f2ef009a9ad867da4a4..0c122226ca56f5b2e98549868939ea2457ef5adf 100644 (file)
@@ -177,7 +177,7 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
 #define setexponentpos(x,y) { (*(short *)&((x)->exp)) = \
   ((y) + EXTENDED_Ebias) & 0x7fff; }
 #define exponent16(x)         (*(short *)&((x)->exp))
-#define setexponent16(x,y)  { (*(short *)&((x)->exp)) = (y); }
+#define setexponent16(x,y)  { (*(short *)&((x)->exp)) = (u16)(y); }
 #define addexponent(x,y)    { (*(short *)&((x)->exp)) += (y); }
 #define stdexp(x)           { (*(short *)&((x)->exp)) += EXTENDED_Ebias; }
 
index 8dc9095bab224daab73338b7551d26b71319b9a2..742619e94bdf281a0dc19a36b1fe2b5a63a19a7b 100644 (file)
@@ -18,7 +18,7 @@
 #include "control_w.h"
 
 #define MAKE_REG(s, e, l, h) { l, h, \
-               ((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
+               (u16)((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
 
 FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
 #if 0
index 9d9765e4d1ef19661a0c3765755340f0814408b5..1bcb7242ad79a099880e055cb378df1ee695e9d9 100644 (file)
@@ -261,13 +261,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 
        pmd = pmd_offset(pud, address);
        pmd_k = pmd_offset(pud_k, address);
-       if (!pmd_present(*pmd_k))
-               return NULL;
 
-       if (!pmd_present(*pmd))
+       if (pmd_present(*pmd) != pmd_present(*pmd_k))
                set_pmd(pmd, *pmd_k);
+
+       if (!pmd_present(*pmd_k))
+               return NULL;
        else
-               BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+               BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
 
        return pmd_k;
 }
@@ -287,17 +288,13 @@ void vmalloc_sync_all(void)
                spin_lock(&pgd_lock);
                list_for_each_entry(page, &pgd_list, lru) {
                        spinlock_t *pgt_lock;
-                       pmd_t *ret;
 
                        /* the pgt_lock only for Xen */
                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 
                        spin_lock(pgt_lock);
-                       ret = vmalloc_sync_one(page_address(page), address);
+                       vmalloc_sync_one(page_address(page), address);
                        spin_unlock(pgt_lock);
-
-                       if (!ret)
-                               break;
                }
                spin_unlock(&pgd_lock);
        }
index 4df3e5c89d57c99ad05785128ac5b8ed965d576d..622d5968c9795198471e8f2271a52762322b12f4 100644 (file)
@@ -338,13 +338,15 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
 
                pud = pud_offset(p4d, addr);
                if (pud_none(*pud)) {
-                       addr += PUD_SIZE;
+                       WARN_ON_ONCE(addr & ~PUD_MASK);
+                       addr = round_up(addr + 1, PUD_SIZE);
                        continue;
                }
 
                pmd = pmd_offset(pud, addr);
                if (pmd_none(*pmd)) {
-                       addr += PMD_SIZE;
+                       WARN_ON_ONCE(addr & ~PMD_MASK);
+                       addr = round_up(addr + 1, PMD_SIZE);
                        continue;
                }
 
@@ -643,6 +645,8 @@ void __init pti_init(void)
  */
 void pti_finalize(void)
 {
+       if (!boot_cpu_has(X86_FEATURE_PTI))
+               return;
        /*
         * We need to clone everything (again) that maps parts of the
         * kernel image.
index 513ce09e99504368eaac0c39f6a1d1346bad1d36..3aa3149df07f9d5e596867f4afd11604ec62dcd0 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 #include <linux/perf_event.h>
 #include <linux/tboot.h>
+#include <linux/dmi.h>
 
 #include <asm/pgtable.h>
 #include <asm/proto.h>
@@ -24,7 +25,7 @@
 #include <asm/debugreg.h>
 #include <asm/cpu.h>
 #include <asm/mmu_context.h>
-#include <linux/dmi.h>
+#include <asm/cpu_device_id.h>
 
 #ifdef CONFIG_X86_32
 __visible unsigned long saved_context_ebx;
@@ -398,15 +399,14 @@ static int __init bsp_pm_check_init(void)
 
 core_initcall(bsp_pm_check_init);
 
-static int msr_init_context(const u32 *msr_id, const int total_num)
+static int msr_build_context(const u32 *msr_id, const int num)
 {
-       int i = 0;
+       struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
        struct saved_msr *msr_array;
+       int total_num;
+       int i, j;
 
-       if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
-               pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
-               return -EINVAL;
-       }
+       total_num = saved_msrs->num + num;
 
        msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
        if (!msr_array) {
@@ -414,19 +414,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
                return -ENOMEM;
        }
 
-       for (i = 0; i < total_num; i++) {
-               msr_array[i].info.msr_no        = msr_id[i];
+       if (saved_msrs->array) {
+               /*
+                * Multiple callbacks can invoke this function, so copy any
+                * MSR save requests from previous invocations.
+                */
+               memcpy(msr_array, saved_msrs->array,
+                      sizeof(struct saved_msr) * saved_msrs->num);
+
+               kfree(saved_msrs->array);
+       }
+
+       for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
+               msr_array[i].info.msr_no        = msr_id[j];
                msr_array[i].valid              = false;
                msr_array[i].info.reg.q         = 0;
        }
-       saved_context.saved_msrs.num    = total_num;
-       saved_context.saved_msrs.array  = msr_array;
+       saved_msrs->num   = total_num;
+       saved_msrs->array = msr_array;
 
        return 0;
 }
 
 /*
- * The following section is a quirk framework for problematic BIOSen:
+ * The following sections are a quirk framework for problematic BIOSen:
  * Sometimes MSRs are modified by the BIOSen after suspended to
  * RAM, this might cause unexpected behavior after wakeup.
  * Thus we save/restore these specified MSRs across suspend/resume
@@ -441,7 +452,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
        u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
 
        pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
-       return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
+       return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
 }
 
 static const struct dmi_system_id msr_save_dmi_table[] = {
@@ -456,9 +467,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
        {}
 };
 
+static int msr_save_cpuid_features(const struct x86_cpu_id *c)
+{
+       u32 cpuid_msr_id[] = {
+               MSR_AMD64_CPUID_FN_1,
+       };
+
+       pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
+               c->family);
+
+       return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
+}
+
+static const struct x86_cpu_id msr_save_cpu_table[] = {
+       {
+               .vendor = X86_VENDOR_AMD,
+               .family = 0x15,
+               .model = X86_MODEL_ANY,
+               .feature = X86_FEATURE_ANY,
+               .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+       },
+       {
+               .vendor = X86_VENDOR_AMD,
+               .family = 0x16,
+               .model = X86_MODEL_ANY,
+               .feature = X86_FEATURE_ANY,
+               .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+       },
+       {}
+};
+
+typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
+static int pm_cpu_check(const struct x86_cpu_id *c)
+{
+       const struct x86_cpu_id *m;
+       int ret = 0;
+
+       m = x86_match_cpu(msr_save_cpu_table);
+       if (m) {
+               pm_cpu_match_t fn;
+
+               fn = (pm_cpu_match_t)m->driver_data;
+               ret = fn(m);
+       }
+
+       return ret;
+}
+
 static int pm_check_save_msr(void)
 {
        dmi_check_system(msr_save_dmi_table);
+       pm_cpu_check(msr_save_cpu_table);
+
        return 0;
 }
 
index 3cf302b2633222ff45323b2528f3499233b650c5..b81b5172cf994761f0aea779e686ce742868c685 100644 (file)
@@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
 targets += $(purgatory-y)
 PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
 
+$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE
+       $(call if_changed_rule,cc_o_c)
+
 $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
        $(call if_changed_rule,cc_o_c)
 
@@ -15,13 +18,40 @@ targets += purgatory.ro
 KASAN_SANITIZE := n
 KCOV_INSTRUMENT := n
 
+# These are adjustments to the compiler flags used for objects that
+# make up the standalone purgatory.ro
+
+PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
+PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
+PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
+
 # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
 # in turn leaves some undefined symbols like __fentry__ in purgatory and not
-# sure how to relocate those. Like kexec-tools, use custom flags.
+# sure how to relocate those.
+ifdef CONFIG_FUNCTION_TRACER
+PURGATORY_CFLAGS_REMOVE                += $(CC_FLAGS_FTRACE)
+endif
+
+ifdef CONFIG_STACKPROTECTOR
+PURGATORY_CFLAGS_REMOVE                += -fstack-protector
+endif
+
+ifdef CONFIG_STACKPROTECTOR_STRONG
+PURGATORY_CFLAGS_REMOVE                += -fstack-protector-strong
+endif
+
+ifdef CONFIG_RETPOLINE
+PURGATORY_CFLAGS_REMOVE                += $(RETPOLINE_CFLAGS)
+endif
+
+CFLAGS_REMOVE_purgatory.o      += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_purgatory.o             += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_sha256.o         += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_sha256.o                        += $(PURGATORY_CFLAGS)
 
-KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large
-KBUILD_CFLAGS += -m$(BITS)
-KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+CFLAGS_REMOVE_string.o         += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_string.o                        += $(PURGATORY_CFLAGS)
 
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
                $(call if_changed,ld)
index 025c34ac0d848f642a4b4b1c2c9577c52aa72614..7971f7a8af59f1aa84bec3e9ea2129c8b0c629ee 100644 (file)
@@ -70,3 +70,9 @@ void purgatory(void)
        }
        copy_backup_region();
 }
+
+/*
+ * Defined in order to reuse memcpy() and memset() from
+ * arch/x86/boot/compressed/string.c
+ */
+void warn(const char *msg) {}
diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c
deleted file mode 100644 (file)
index 795ca4f..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Simple string functions.
- *
- * Copyright (C) 2014 Red Hat Inc.
- *
- * Author:
- *       Vivek Goyal <vgoyal@redhat.com>
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2.  See the file COPYING for more details.
- */
-
-#include <linux/types.h>
-
-#include "../boot/string.c"
-
-void *memcpy(void *dst, const void *src, size_t len)
-{
-       return __builtin_memcpy(dst, src, len);
-}
-
-void *memset(void *dst, int c, size_t len)
-{
-       return __builtin_memset(dst, c, len);
-}
index 1804b27f9632a12b4ed9877c8e97bce35177a2be..66bcdeeee639a0a81b3a2ab4f24a59d9327e2593 100644 (file)
@@ -77,7 +77,9 @@ static efi_system_table_t __init *xen_efi_probe(void)
        efi.get_variable             = xen_efi_get_variable;
        efi.get_next_variable        = xen_efi_get_next_variable;
        efi.set_variable             = xen_efi_set_variable;
+       efi.set_variable_nonblocking = xen_efi_set_variable;
        efi.query_variable_info      = xen_efi_query_variable_info;
+       efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
        efi.update_capsule           = xen_efi_update_capsule;
        efi.query_capsule_caps       = xen_efi_query_capsule_caps;
        efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
index 782f98b332f05b9632bd156163a719a5f7cd745f..1730a26ff6abcc2a1f4a682748b88c1ff0de8752 100644 (file)
@@ -597,12 +597,12 @@ struct trap_array_entry {
 
 static struct trap_array_entry trap_array[] = {
        { debug,                       xen_xendebug,                    true },
-       { int3,                        xen_xenint3,                     true },
        { double_fault,                xen_double_fault,                true },
 #ifdef CONFIG_X86_MCE
        { machine_check,               xen_machine_check,               true },
 #endif
        { nmi,                         xen_xennmi,                      true },
+       { int3,                        xen_int3,                        false },
        { overflow,                    xen_overflow,                    false },
 #ifdef CONFIG_IA32_EMULATION
        { entry_INT80_compat,          xen_entry_INT80_compat,          false },
index 417b339e5c8e1aadedd20231c9be82ac93dbe728..3a6feed76dfc167736984315390e9b8b0cb81e6c 100644 (file)
@@ -30,7 +30,6 @@ xen_pv_trap divide_error
 xen_pv_trap debug
 xen_pv_trap xendebug
 xen_pv_trap int3
-xen_pv_trap xenint3
 xen_pv_trap xennmi
 xen_pv_trap overflow
 xen_pv_trap bounds
index a285fbd0fd9be96660260c10f7420be687ad1b20..15580e4fc766a24191fe93dadf47590c424a1818 100644 (file)
@@ -515,6 +515,7 @@ void cpu_reset(void)
                                      "add      %2, %2, %7\n\t"
                                      "addi     %0, %0, -1\n\t"
                                      "bnez     %0, 1b\n\t"
+                                     "isync\n\t"
                                      /* Jump to identity mapping */
                                      "jx       %3\n"
                                      "2:\n\t"
index 04f19de4670033142537a6271b749c30836ea47e..4092555828b13a25305f250863ad2e4d42016727 100644 (file)
@@ -119,13 +119,6 @@ EXPORT_SYMBOL(__invalidate_icache_range);
 // FIXME EXPORT_SYMBOL(screen_info);
 #endif
 
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(insb);
-EXPORT_SYMBOL(insw);
-EXPORT_SYMBOL(insl);
-
 extern long common_exception_return;
 EXPORT_SYMBOL(common_exception_return);
 
index becd793a258c81b19db969b2b34404a35c29efd9..d8d2ac294b0c092ddae80eac160f27ae5357b5c6 100644 (file)
@@ -1886,9 +1886,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
            blk_rq_pos(container_of(rb_prev(&req->rb_node),
                                    struct request, rb_node))) {
                struct bfq_queue *bfqq = bfq_init_rq(req);
-               struct bfq_data *bfqd = bfqq->bfqd;
+               struct bfq_data *bfqd;
                struct request *prev, *next_rq;
 
+               if (!bfqq)
+                       return;
+
+               bfqd = bfqq->bfqd;
+
                /* Reposition request in its sort_list */
                elv_rb_del(&bfqq->sort_list, req);
                elv_rb_add(&bfqq->sort_list, req);
@@ -1930,6 +1935,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
        struct bfq_queue *bfqq = bfq_init_rq(rq),
                *next_bfqq = bfq_init_rq(next);
 
+       if (!bfqq)
+               return;
+
        /*
         * If next and rq belong to the same bfq_queue and next is older
         * than rq, then reposition rq in the fifo (by substituting next
@@ -4590,12 +4598,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 
        spin_lock_irq(&bfqd->lock);
        bfqq = bfq_init_rq(rq);
-       if (at_head || blk_rq_is_passthrough(rq)) {
+       if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
                if (at_head)
                        list_add(&rq->queuelist, &bfqd->dispatch);
                else
                        list_add_tail(&rq->queuelist, &bfqd->dispatch);
-       } else { /* bfqq is assumed to be non null here */
+       } else {
                idle_timer_disabled = __bfq_insert_request(bfqd, rq);
                /*
                 * Update bfqq, because, if a queue merge has occurred
index 67b5fb861a5100c5294e572668881a187e478a6b..5bd90cd4b51e3c3251cf794395e96a9ab0963395 100644 (file)
@@ -291,8 +291,12 @@ bool bio_integrity_prep(struct bio *bio)
                ret = bio_integrity_add_page(bio, virt_to_page(buf),
                                             bytes, offset);
 
-               if (ret == 0)
-                       return false;
+               if (ret == 0) {
+                       printk(KERN_ERR "could not attach integrity payload\n");
+                       kfree(buf);
+                       status = BLK_STS_RESOURCE;
+                       goto err_end_io;
+               }
 
                if (ret < bytes)
                        break;
index c630e02836a80d7d406778208c659aebda8fcf06..527524134693005624d508b6720f8dfeefbcc47b 100644 (file)
@@ -1016,8 +1016,12 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
                }
 next:
                if (has_stats) {
-                       off += scnprintf(buf+off, size-off, "\n");
-                       seq_commit(sf, off);
+                       if (off < size - 1) {
+                               off += scnprintf(buf+off, size-off, "\n");
+                               seq_commit(sf, off);
+                       } else {
+                               seq_commit(sf, -1);
+                       }
                }
        }
 
index 682bc561b77b85199e6b70986aeef1a0d73578fd..074ae9376189b346e0eef9487eef6863a5a348d8 100644 (file)
@@ -198,6 +198,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        rq->internal_tag = -1;
        rq->start_time_ns = ktime_get_ns();
        rq->part = NULL;
+       refcount_set(&rq->ref, 1);
 }
 EXPORT_SYMBOL(blk_rq_init);
 
@@ -420,24 +421,25 @@ void blk_sync_queue(struct request_queue *q)
 EXPORT_SYMBOL(blk_sync_queue);
 
 /**
- * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY
+ * blk_set_pm_only - increment pm_only counter
  * @q: request queue pointer
- *
- * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not
- * set and 1 if the flag was already set.
  */
-int blk_set_preempt_only(struct request_queue *q)
+void blk_set_pm_only(struct request_queue *q)
 {
-       return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
+       atomic_inc(&q->pm_only);
 }
-EXPORT_SYMBOL_GPL(blk_set_preempt_only);
+EXPORT_SYMBOL_GPL(blk_set_pm_only);
 
-void blk_clear_preempt_only(struct request_queue *q)
+void blk_clear_pm_only(struct request_queue *q)
 {
-       blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
-       wake_up_all(&q->mq_freeze_wq);
+       int pm_only;
+
+       pm_only = atomic_dec_return(&q->pm_only);
+       WARN_ON_ONCE(pm_only < 0);
+       if (pm_only == 0)
+               wake_up_all(&q->mq_freeze_wq);
 }
-EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
+EXPORT_SYMBOL_GPL(blk_clear_pm_only);
 
 /**
  * __blk_run_queue_uncond - run a queue whether or not it has been stopped
@@ -814,7 +816,8 @@ void blk_cleanup_queue(struct request_queue *q)
        blk_exit_queue(q);
 
        if (q->mq_ops)
-               blk_mq_free_queue(q);
+               blk_mq_exit_queue(q);
+
        percpu_ref_exit(&q->q_usage_counter);
 
        spin_lock_irq(lock);
@@ -915,7 +918,7 @@ EXPORT_SYMBOL(blk_alloc_queue);
  */
 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 {
-       const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
+       const bool pm = flags & BLK_MQ_REQ_PREEMPT;
 
        while (true) {
                bool success = false;
@@ -923,11 +926,11 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
                rcu_read_lock();
                if (percpu_ref_tryget_live(&q->q_usage_counter)) {
                        /*
-                        * The code that sets the PREEMPT_ONLY flag is
-                        * responsible for ensuring that that flag is globally
-                        * visible before the queue is unfrozen.
+                        * The code that increments the pm_only counter is
+                        * responsible for ensuring that that counter is
+                        * globally visible before the queue is unfrozen.
                         */
-                       if (preempt || !blk_queue_preempt_only(q)) {
+                       if (pm || !blk_queue_pm_only(q)) {
                                success = true;
                        } else {
                                percpu_ref_put(&q->q_usage_counter);
@@ -952,7 +955,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 
                wait_event(q->mq_freeze_wq,
                           (atomic_read(&q->mq_freeze_depth) == 0 &&
-                           (preempt || !blk_queue_preempt_only(q))) ||
+                           (pm || !blk_queue_pm_only(q))) ||
                           blk_queue_dying(q));
                if (blk_queue_dying(q))
                        return -ENODEV;
@@ -1162,7 +1165,7 @@ int blk_init_allocated_queue(struct request_queue *q)
 {
        WARN_ON_ONCE(q->mq_ops);
 
-       q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
+       q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL);
        if (!q->fq)
                return -ENOMEM;
 
index 76487948a27faeb258538a65f73a9219587a551b..256fa1ccc2bd6e1bc5be35ed992350fd7fec28d8 100644 (file)
@@ -232,6 +232,16 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
 
                /* release the tag's ownership to the req cloned from */
                spin_lock_irqsave(&fq->mq_flush_lock, flags);
+
+               if (!refcount_dec_and_test(&flush_rq->ref)) {
+                       fq->rq_status = error;
+                       spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
+                       return;
+               }
+
+               if (fq->rq_status != BLK_STS_OK)
+                       error = fq->rq_status;
+
                hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
                if (!q->elevator) {
                        blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
@@ -566,12 +576,12 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
 EXPORT_SYMBOL(blkdev_issue_flush);
 
 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
-               int node, int cmd_size)
+               int node, int cmd_size, gfp_t flags)
 {
        struct blk_flush_queue *fq;
        int rq_sz = sizeof(struct request);
 
-       fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
+       fq = kzalloc_node(sizeof(*fq), flags, node);
        if (!fq)
                goto fail;
 
@@ -579,7 +589,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
                spin_lock_init(&fq->mq_flush_lock);
 
        rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
-       fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
+       fq->flush_rq = kzalloc_node(rq_sz, flags, node);
        if (!fq->flush_rq)
                goto fail_rq;
 
index 6b8396ccb5c44b70cb4205d584665bde592f15fc..0529e94a20f7f54f7bb8019a5cefdc0d53d69114 100644 (file)
@@ -560,6 +560,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
        u64 now = ktime_to_ns(ktime_get());
        bool issue_as_root = bio_issue_as_root_blkg(bio);
        bool enabled = false;
+       int inflight = 0;
 
        blkg = bio->bi_blkg;
        if (!blkg)
@@ -581,45 +582,28 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
                }
                rqw = &iolat->rq_wait;
 
-               atomic_dec(&rqw->inflight);
-               if (iolat->min_lat_nsec == 0)
-                       goto next;
-               iolatency_record_time(iolat, &bio->bi_issue, now,
-                                     issue_as_root);
-               window_start = atomic64_read(&iolat->window_start);
-               if (now > window_start &&
-                   (now - window_start) >= iolat->cur_win_nsec) {
-                       if (atomic64_cmpxchg(&iolat->window_start,
-                                       window_start, now) == window_start)
-                               iolatency_check_latencies(iolat, now);
+               inflight = atomic_dec_return(&rqw->inflight);
+               WARN_ON_ONCE(inflight < 0);
+               /*
+                * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
+                * submitted, so do not account for it.
+                */
+               if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
+                       iolatency_record_time(iolat, &bio->bi_issue, now,
+                                             issue_as_root);
+                       window_start = atomic64_read(&iolat->window_start);
+                       if (now > window_start &&
+                           (now - window_start) >= iolat->cur_win_nsec) {
+                               if (atomic64_cmpxchg(&iolat->window_start,
+                                            window_start, now) == window_start)
+                                       iolatency_check_latencies(iolat, now);
+                       }
                }
-next:
                wake_up(&rqw->wait);
                blkg = blkg->parent;
        }
 }
 
-static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
-{
-       struct blkcg_gq *blkg;
-
-       blkg = bio->bi_blkg;
-       while (blkg && blkg->parent) {
-               struct rq_wait *rqw;
-               struct iolatency_grp *iolat;
-
-               iolat = blkg_to_lat(blkg);
-               if (!iolat)
-                       goto next;
-
-               rqw = &iolat->rq_wait;
-               atomic_dec(&rqw->inflight);
-               wake_up(&rqw->wait);
-next:
-               blkg = blkg->parent;
-       }
-}
-
 static void blkcg_iolatency_exit(struct rq_qos *rqos)
 {
        struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
@@ -631,7 +615,6 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
 
 static struct rq_qos_ops blkcg_iolatency_ops = {
        .throttle = blkcg_iolatency_throttle,
-       .cleanup = blkcg_iolatency_cleanup,
        .done_bio = blkcg_iolatency_done_bio,
        .exit = blkcg_iolatency_exit,
 };
@@ -742,8 +725,10 @@ static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
 
        if (!oldval && val)
                return 1;
-       if (oldval && !val)
+       if (oldval && !val) {
+               blkcg_clear_delay(blkg);
                return -1;
+       }
        return 0;
 }
 
index cb1e6cf7ac48f4e187e915896376cdfec79c9d2a..a5ea86835fcb2dc2bb7f1a1593637709bce62b2a 100644 (file)
@@ -102,6 +102,14 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags,
        return 0;
 }
 
+static int queue_pm_only_show(void *data, struct seq_file *m)
+{
+       struct request_queue *q = data;
+
+       seq_printf(m, "%d\n", atomic_read(&q->pm_only));
+       return 0;
+}
+
 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
 static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(QUEUED),
@@ -132,7 +140,6 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(REGISTERED),
        QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
        QUEUE_FLAG_NAME(QUIESCED),
-       QUEUE_FLAG_NAME(PREEMPT_ONLY),
 };
 #undef QUEUE_FLAG_NAME
 
@@ -209,6 +216,7 @@ static ssize_t queue_write_hint_store(void *data, const char __user *buf,
 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
        { "poll_stat", 0400, queue_poll_stat_show },
        { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
+       { "pm_only", 0600, queue_pm_only_show, NULL },
        { "state", 0600, queue_state_show, queue_state_write },
        { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
        { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
index aafb44224c896ca24782032da5bb6193dfb9c269..0b7297a43ccd25748c8b088b351b812fe346dc55 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/smp.h>
 
 #include <linux/blk-mq.h>
+#include "blk.h"
 #include "blk-mq.h"
 #include "blk-mq-tag.h"
 
@@ -21,6 +22,11 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
 {
        struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
                                                  kobj);
+
+       if (hctx->flags & BLK_MQ_F_BLOCKING)
+               cleanup_srcu_struct(hctx->srcu);
+       blk_free_flush_queue(hctx->fq);
+       sbitmap_free(&hctx->ctx_map);
        free_cpumask_var(hctx->cpumask);
        kfree(hctx->ctxs);
        kfree(hctx);
index 70d839b9c3b09cdde54bf3606d0dd638cc7c97cc..684acaa96db7e11893b8ddd56b05de75e05ffaef 100644 (file)
@@ -844,7 +844,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
         */
        if (blk_mq_req_expired(rq, next))
                blk_mq_rq_timed_out(rq, reserved);
-       if (refcount_dec_and_test(&rq->ref))
+
+       if (is_flush_rq(rq, hctx))
+               rq->end_io(rq, 0);
+       else if (refcount_dec_and_test(&rq->ref))
                __blk_mq_free_request(rq);
 }
 
@@ -2157,12 +2160,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
        if (set->ops->exit_hctx)
                set->ops->exit_hctx(hctx, hctx_idx);
 
-       if (hctx->flags & BLK_MQ_F_BLOCKING)
-               cleanup_srcu_struct(hctx->srcu);
-
        blk_mq_remove_cpuhp(hctx);
-       blk_free_flush_queue(hctx->fq);
-       sbitmap_free(&hctx->ctx_map);
 }
 
 static void blk_mq_exit_hw_queues(struct request_queue *q,
@@ -2203,12 +2201,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
         * runtime
         */
        hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
-                                       GFP_KERNEL, node);
+                       GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
        if (!hctx->ctxs)
                goto unregister_cpu_notifier;
 
-       if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
-                             node))
+       if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
+                               GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
                goto free_ctxs;
 
        hctx->nr_ctx = 0;
@@ -2221,7 +2219,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
            set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
                goto free_bitmap;
 
-       hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
+       hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
+                       GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
        if (!hctx->fq)
                goto exit_hctx;
 
@@ -2465,8 +2464,6 @@ void blk_mq_release(struct request_queue *q)
        struct blk_mq_hw_ctx *hctx;
        unsigned int i;
 
-       cancel_delayed_work_sync(&q->requeue_work);
-
        /* hctx kobj stays in hctx */
        queue_for_each_hw_ctx(q, hctx, i) {
                if (!hctx)
@@ -2535,12 +2532,14 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
 
                node = blk_mq_hw_queue_to_node(q->mq_map, i);
                hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
-                                       GFP_KERNEL, node);
+                               GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+                               node);
                if (!hctxs[i])
                        break;
 
-               if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
-                                               node)) {
+               if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask,
+                                       GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+                                       node)) {
                        kfree(hctxs[i]);
                        hctxs[i] = NULL;
                        break;
@@ -2662,7 +2661,8 @@ err_exit:
 }
 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
 
-void blk_mq_free_queue(struct request_queue *q)
+/* tags can _not_ be used after returning from blk_mq_exit_queue */
+void blk_mq_exit_queue(struct request_queue *q)
 {
        struct blk_mq_tag_set   *set = q->tag_set;
 
index 9497b47e2526c62006f00101cc7acffd35e7e299..5ad9251627f80567e1fb459cf699f5d1a76a3342 100644 (file)
@@ -31,7 +31,7 @@ struct blk_mq_ctx {
 } ____cacheline_aligned_in_smp;
 
 void blk_mq_freeze_queue(struct request_queue *q);
-void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_exit_queue(struct request_queue *q);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 void blk_mq_wake_waiters(struct request_queue *q);
 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
index 0005dfd568dd5baaf3804106b25ef6a37b2a79ee..43bcd4e7a7f9ae5da1fa005b5bca0110e7be9aa9 100644 (file)
@@ -148,24 +148,27 @@ bool rq_depth_calc_max_depth(struct rq_depth *rqd)
        return ret;
 }
 
-void rq_depth_scale_up(struct rq_depth *rqd)
+/* Returns true on success and false if scaling up wasn't possible */
+bool rq_depth_scale_up(struct rq_depth *rqd)
 {
        /*
         * Hit max in previous round, stop here
         */
        if (rqd->scaled_max)
-               return;
+               return false;
 
        rqd->scale_step--;
 
        rqd->scaled_max = rq_depth_calc_max_depth(rqd);
+       return true;
 }
 
 /*
  * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
- * had a latency violation.
+ * had a latency violation. Returns true on success and returns false if
+ * scaling down wasn't possible.
  */
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 {
        /*
         * Stop scaling down when we've hit the limit. This also prevents
@@ -173,7 +176,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
         * keep up.
         */
        if (rqd->max_depth == 1)
-               return;
+               return false;
 
        if (rqd->scale_step < 0 && hard_throttle)
                rqd->scale_step = 0;
@@ -182,6 +185,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 
        rqd->scaled_max = false;
        rq_depth_calc_max_depth(rqd);
+       return true;
 }
 
 void rq_qos_exit(struct request_queue *q)
index 32b02efbfa66dda638a02d07aa00c63df48a5dab..98caba3e962eeb63bd9bbdc78ce0398d6fa414ca 100644 (file)
@@ -80,22 +80,19 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
 
 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
 {
-       struct rq_qos *cur, *prev = NULL;
-       for (cur = q->rq_qos; cur; cur = cur->next) {
-               if (cur == rqos) {
-                       if (prev)
-                               prev->next = rqos->next;
-                       else
-                               q->rq_qos = cur;
+       struct rq_qos **cur;
+
+       for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
+               if (*cur == rqos) {
+                       *cur = rqos->next;
                        break;
                }
-               prev = cur;
        }
 }
 
 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
-void rq_depth_scale_up(struct rq_depth *rqd);
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
+bool rq_depth_scale_up(struct rq_depth *rqd);
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
 
 void rq_qos_cleanup(struct request_queue *, struct bio *);
index 3772671cf2bc5ad6322786b9531e9eb8bb6c7edd..bab47a17b96f4b2dc786d81b4a7d4c88435b821c 100644 (file)
@@ -836,6 +836,9 @@ static void __blk_release_queue(struct work_struct *work)
 
        blk_free_queue_stats(q->stats);
 
+       if (q->mq_ops)
+               cancel_delayed_work_sync(&q->requeue_work);
+
        blk_exit_rl(q, &q->root_rl);
 
        if (q->queue_tags)
index 01d0620a4e4a5e829c9de5c2dec0ac5e0b2b3ab3..caee658609d7385937bf7d08d4258aec70346692 100644 (file)
@@ -892,13 +892,10 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
        unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
        u64 tmp;
 
-       jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
-
-       /* Slice has just started. Consider one slice interval */
-       if (!jiffy_elapsed)
-               jiffy_elapsed_rnd = tg->td->throtl_slice;
+       jiffy_elapsed = jiffies - tg->slice_start[rw];
 
-       jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
+       /* Round up to the next throttle slice, wait time must be nonzero */
+       jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
 
        /*
         * jiffy_elapsed_rnd should not be a big value as minimum iops can be
index 0c62bf4eca7574637739b6e6b243a1cc5f72a5a7..f1de8ba483a978bd40b4fa1de8af56313e8ab52f 100644 (file)
@@ -307,7 +307,8 @@ static void calc_wb_limits(struct rq_wb *rwb)
 
 static void scale_up(struct rq_wb *rwb)
 {
-       rq_depth_scale_up(&rwb->rq_depth);
+       if (!rq_depth_scale_up(&rwb->rq_depth))
+               return;
        calc_wb_limits(rwb);
        rwb->unknown_cnt = 0;
        rwb_wake_all(rwb);
@@ -316,7 +317,8 @@ static void scale_up(struct rq_wb *rwb)
 
 static void scale_down(struct rq_wb *rwb, bool hard_throttle)
 {
-       rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
+       if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
+               return;
        calc_wb_limits(rwb);
        rwb->unknown_cnt = 0;
        rwb_trace_step(rwb, "scale down");
index 977d4b5d968d542c966b094c9ff68472dcc2c1bf..1a5b67b57e6b247c12a64476b722d8dd72c685de 100644 (file)
@@ -23,6 +23,7 @@ struct blk_flush_queue {
        unsigned int            flush_queue_delayed:1;
        unsigned int            flush_pending_idx:1;
        unsigned int            flush_running_idx:1;
+       blk_status_t            rq_status;
        unsigned long           flush_pending_since;
        struct list_head        flush_queue[2];
        struct list_head        flush_data_in_flight;
@@ -123,8 +124,14 @@ static inline void __blk_get_queue(struct request_queue *q)
        kobject_get(&q->kobj);
 }
 
+static inline bool
+is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
+{
+       return hctx->fq->flush_rq == req;
+}
+
 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
-               int node, int cmd_size);
+               int node, int cmd_size, gfp_t flags);
 void blk_free_flush_queue(struct blk_flush_queue *q);
 
 int blk_init_rl(struct request_list *rl, struct request_queue *q,
index d5e21ce44d2ccfe79dc8bd90fa9efecb3c4706a4..69094d64106238057be1ded6766916e03b23b369 100644 (file)
@@ -376,13 +376,6 @@ done:
  * hardware queue, but we may return a request that is for a
  * different hardware queue. This is because mq-deadline has shared
  * state for all hardware queues, in terms of sorting, FIFOs, etc.
- *
- * For a zoned block device, __dd_dispatch_request() may return NULL
- * if all the queued write requests are directed at zones that are already
- * locked due to on-going write requests. In this case, make sure to mark
- * the queue as needing a restart to ensure that the queue is run again
- * and the pending writes dispatched once the target zones for the ongoing
- * write requests are unlocked in dd_finish_request().
  */
 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
 {
@@ -391,9 +384,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
 
        spin_lock(&dd->lock);
        rq = __dd_dispatch_request(dd);
-       if (!rq && blk_queue_is_zoned(hctx->queue) &&
-           !list_empty(&dd->fifo_list[WRITE]))
-               blk_mq_sched_mark_restart_hctx(hctx);
        spin_unlock(&dd->lock);
 
        return rq;
@@ -559,6 +549,13 @@ static void dd_prepare_request(struct request *rq, struct bio *bio)
  * spinlock so that the zone is never unlocked while deadline_fifo_request()
  * or deadline_next_request() are executing. This function is called for
  * all requests, whether or not these requests complete successfully.
+ *
+ * For a zoned block device, __dd_dispatch_request() may have stopped
+ * dispatching requests if all the queued requests are write requests directed
+ * at zones that are already locked due to on-going write requests. To ensure
+ * write request dispatch progress in this case, mark the queue as needing a
+ * restart to ensure that the queue is run again after completion of the
+ * request and zones being unlocked.
  */
 static void dd_finish_request(struct request *rq)
 {
@@ -570,6 +567,12 @@ static void dd_finish_request(struct request *rq)
 
                spin_lock_irqsave(&dd->zone_lock, flags);
                blk_req_zone_write_unlock(rq);
+               if (!list_empty(&dd->fifo_list[WRITE])) {
+                       struct blk_mq_hw_ctx *hctx;
+
+                       hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
+                       blk_mq_sched_mark_restart_hctx(hctx);
+               }
                spin_unlock_irqrestore(&dd->zone_lock, flags);
        }
 }
index f3702e533ff41044694625aad813abc58b8af4dd..d8a73d94bb309a524e47cca954a66a55f385b1f4 100644 (file)
@@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
        select MPILIB
        select CRYPTO_HASH_INFO
        select CRYPTO_AKCIPHER
+       select CRYPTO_HASH
        help
          This option provides support for asymmetric public key type handling.
          If signature generation and/or verification are to be used,
@@ -34,6 +35,7 @@ config X509_CERTIFICATE_PARSER
 config PKCS7_MESSAGE_PARSER
        tristate "PKCS#7 message parser"
        depends on X509_CERTIFICATE_PARSER
+       select CRYPTO_HASH
        select ASN1
        select OID_REGISTRY
        help
@@ -56,6 +58,7 @@ config SIGNED_PE_FILE_VERIFICATION
        bool "Support for PE file signature verification"
        depends on PKCS7_MESSAGE_PARSER=y
        depends on SYSTEM_DATA_VERIFICATION
+       select CRYPTO_HASH
        select ASN1
        select OID_REGISTRY
        help
index 4d6f51bcdfabe903c804845ecaa6182f21a3c702..af8afe5c06ea9a827df39c65b0bda11176943043 100644 (file)
@@ -67,6 +67,8 @@ struct chachapoly_req_ctx {
        unsigned int cryptlen;
        /* Actual AD, excluding IV */
        unsigned int assoclen;
+       /* request flags, with MAY_SLEEP cleared if needed */
+       u32 flags;
        union {
                struct poly_req poly;
                struct chacha_req chacha;
@@ -76,8 +78,12 @@ struct chachapoly_req_ctx {
 static inline void async_done_continue(struct aead_request *req, int err,
                                       int (*cont)(struct aead_request *))
 {
-       if (!err)
+       if (!err) {
+               struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+
+               rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
                err = cont(req);
+       }
 
        if (err != -EINPROGRESS && err != -EBUSY)
                aead_request_complete(req, err);
@@ -144,7 +150,7 @@ static int chacha_decrypt(struct aead_request *req)
                dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
        }
 
-       skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+       skcipher_request_set_callback(&creq->req, rctx->flags,
                                      chacha_decrypt_done, req);
        skcipher_request_set_tfm(&creq->req, ctx->chacha);
        skcipher_request_set_crypt(&creq->req, src, dst,
@@ -188,7 +194,7 @@ static int poly_tail(struct aead_request *req)
        memcpy(&preq->tail.cryptlen, &len, sizeof(len));
        sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail));
 
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_tail_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
        ahash_request_set_crypt(&preq->req, preq->src,
@@ -219,7 +225,7 @@ static int poly_cipherpad(struct aead_request *req)
        sg_init_table(preq->src, 1);
        sg_set_buf(preq->src, &preq->pad, padlen);
 
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_cipherpad_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
        ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
@@ -250,7 +256,7 @@ static int poly_cipher(struct aead_request *req)
        sg_init_table(rctx->src, 2);
        crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
 
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_cipher_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
        ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
@@ -280,7 +286,7 @@ static int poly_adpad(struct aead_request *req)
        sg_init_table(preq->src, 1);
        sg_set_buf(preq->src, preq->pad, padlen);
 
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_adpad_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
        ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
@@ -304,7 +310,7 @@ static int poly_ad(struct aead_request *req)
        struct poly_req *preq = &rctx->u.poly;
        int err;
 
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_ad_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
        ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
@@ -331,7 +337,7 @@ static int poly_setkey(struct aead_request *req)
        sg_init_table(preq->src, 1);
        sg_set_buf(preq->src, rctx->key, sizeof(rctx->key));
 
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_setkey_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
        ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
@@ -355,7 +361,7 @@ static int poly_init(struct aead_request *req)
        struct poly_req *preq = &rctx->u.poly;
        int err;
 
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_init_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
 
@@ -393,7 +399,7 @@ static int poly_genkey(struct aead_request *req)
 
        chacha_iv(creq->iv, req, 0);
 
-       skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+       skcipher_request_set_callback(&creq->req, rctx->flags,
                                      poly_genkey_done, req);
        skcipher_request_set_tfm(&creq->req, ctx->chacha);
        skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
@@ -433,7 +439,7 @@ static int chacha_encrypt(struct aead_request *req)
                dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
        }
 
-       skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+       skcipher_request_set_callback(&creq->req, rctx->flags,
                                      chacha_encrypt_done, req);
        skcipher_request_set_tfm(&creq->req, ctx->chacha);
        skcipher_request_set_crypt(&creq->req, src, dst,
@@ -451,6 +457,7 @@ static int chachapoly_encrypt(struct aead_request *req)
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 
        rctx->cryptlen = req->cryptlen;
+       rctx->flags = aead_request_flags(req);
 
        /* encrypt call chain:
         * - chacha_encrypt/done()
@@ -472,6 +479,7 @@ static int chachapoly_decrypt(struct aead_request *req)
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 
        rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
+       rctx->flags = aead_request_flags(req);
 
        /* decrypt call chain:
         * - poly_genkey/done()
index d9f192b953b22b06ea4adc6b921a1d59a1dde1f2..591b52d3bdca31f4546c4f6b7dd1e1cd6727ed95 100644 (file)
@@ -34,6 +34,7 @@ static int ghash_setkey(struct crypto_shash *tfm,
                        const u8 *key, unsigned int keylen)
 {
        struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+       be128 k;
 
        if (keylen != GHASH_BLOCK_SIZE) {
                crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -42,7 +43,12 @@ static int ghash_setkey(struct crypto_shash *tfm,
 
        if (ctx->gf128)
                gf128mul_free_4k(ctx->gf128);
-       ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
+
+       BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
+       memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
+       ctx->gf128 = gf128mul_init_4k_lle(&k);
+       memzero_explicit(&k, GHASH_BLOCK_SIZE);
+
        if (!ctx->gf128)
                return -ENOMEM;
 
index 7c3382facc82e8bb706a48029d90875cafb6a156..600bd288881ddd69c6bb5d5487eb73eaea5c1789 100644 (file)
        x4 ^= x2;                                       \
        })
 
-static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k)
+/*
+ * both gcc and clang have misoptimized this function in the past,
+ * producing horrible object code from spilling temporary variables
+ * on the stack. Forcing this part out of line avoids that.
+ */
+static noinline void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2,
+                                          u32 r3, u32 r4, u32 *k)
 {
        k += 100;
        S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
index b664cf867f5fb2cb358234f139b16b90795c5f64..a8750b4ebf264cd4a52724199e389fcb06b55d9f 100644 (file)
@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
        return max(start, end_page);
 }
 
-static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
 {
        u8 *addr;
 
@@ -103,19 +103,21 @@ static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
        addr = skcipher_get_spot(addr, bsize);
        scatterwalk_copychunks(addr, &walk->out, bsize,
                               (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
+       return 0;
 }
 
 int skcipher_walk_done(struct skcipher_walk *walk, int err)
 {
-       unsigned int n; /* bytes processed */
-       bool more;
+       unsigned int n = walk->nbytes;
+       unsigned int nbytes = 0;
 
-       if (unlikely(err < 0))
+       if (!n)
                goto finish;
 
-       n = walk->nbytes - err;
-       walk->total -= n;
-       more = (walk->total != 0);
+       if (likely(err >= 0)) {
+               n -= err;
+               nbytes = walk->total - n;
+       }
 
        if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
                                    SKCIPHER_WALK_SLOW |
@@ -131,7 +133,7 @@ unmap_src:
                memcpy(walk->dst.virt.addr, walk->page, n);
                skcipher_unmap_dst(walk);
        } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
-               if (err) {
+               if (err > 0) {
                        /*
                         * Didn't process all bytes.  Either the algorithm is
                         * broken, or this was the last step and it turned out
@@ -139,27 +141,29 @@ unmap_src:
                         * the algorithm requires it.
                         */
                        err = -EINVAL;
-                       goto finish;
-               }
-               skcipher_done_slow(walk, n);
-               goto already_advanced;
+                       nbytes = 0;
+               } else
+                       n = skcipher_done_slow(walk, n);
        }
 
+       if (err > 0)
+               err = 0;
+
+       walk->total = nbytes;
+       walk->nbytes = 0;
+
        scatterwalk_advance(&walk->in, n);
        scatterwalk_advance(&walk->out, n);
-already_advanced:
-       scatterwalk_done(&walk->in, 0, more);
-       scatterwalk_done(&walk->out, 1, more);
+       scatterwalk_done(&walk->in, 0, nbytes);
+       scatterwalk_done(&walk->out, 1, nbytes);
 
-       if (more) {
+       if (nbytes) {
                crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
                             CRYPTO_TFM_REQ_MAY_SLEEP : 0);
                return skcipher_walk_next(walk);
        }
-       err = 0;
-finish:
-       walk->nbytes = 0;
 
+finish:
        /* Short-circuit for the common/fast path. */
        if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
                goto out;
index fc447410ae4d17709adc0e1c56aba6ff7e02f633..a448cdf567188ea50f533a2cbfdb4d527c7a2576 100644 (file)
@@ -282,9 +282,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
        }
 
        if (acpi_duplicate_processor_id(pr->acpi_id)) {
-               dev_err(&device->dev,
-                       "Failed to get unique processor _UID (0x%x)\n",
-                       pr->acpi_id);
+               if (pr->acpi_id == 0xff)
+                       dev_info_once(&device->dev,
+                               "Entry not well-defined, consider updating BIOS\n");
+               else
+                       dev_err(&device->dev,
+                               "Failed to get unique processor _UID (0x%x)\n",
+                               pr->acpi_id);
                return -ENODEV;
        }
 
index d73afb562ad9587b828ce9471958dc77046b082b..1a23e7aa74df7f258361f72eb94a4349949e862e 100644 (file)
@@ -73,6 +73,12 @@ module_param(report_key_events, int, 0644);
 MODULE_PARM_DESC(report_key_events,
        "0: none, 1: output changes, 2: brightness changes, 3: all");
 
+static int hw_changes_brightness = -1;
+module_param(hw_changes_brightness, int, 0644);
+MODULE_PARM_DESC(hw_changes_brightness,
+       "Set this to 1 on buggy hw which changes the brightness itself when "
+       "a hotkey is pressed: -1: auto, 0: normal 1: hw-changes-brightness");
+
 /*
  * Whether the struct acpi_video_device_attrib::device_id_scheme bit should be
  * assumed even if not actually set.
@@ -418,6 +424,14 @@ static int video_set_report_key_events(const struct dmi_system_id *id)
        return 0;
 }
 
+static int video_hw_changes_brightness(
+       const struct dmi_system_id *d)
+{
+       if (hw_changes_brightness == -1)
+               hw_changes_brightness = 1;
+       return 0;
+}
+
 static const struct dmi_system_id video_dmi_table[] = {
        /*
         * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -542,6 +556,21 @@ static const struct dmi_system_id video_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
                },
        },
+       /*
+        * Some machines change the brightness themselves when a brightness
+        * hotkey gets pressed, despite us telling them not to. In this case
+        * acpi_video_device_notify() should only call backlight_force_update(
+        * BACKLIGHT_UPDATE_HOTKEY) and not do anything else.
+        */
+       {
+        /* https://bugzilla.kernel.org/show_bug.cgi?id=204077 */
+        .callback = video_hw_changes_brightness,
+        .ident = "Packard Bell EasyNote MZ35",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Packard Bell"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "EasyNote MZ35"),
+               },
+       },
        {}
 };
 
@@ -1625,6 +1654,14 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
        bus = video_device->video;
        input = bus->input;
 
+       if (hw_changes_brightness > 0) {
+               if (video_device->backlight)
+                       backlight_force_update(video_device->backlight,
+                                              BACKLIGHT_UPDATE_HOTKEY);
+               acpi_notifier_call_chain(device, event, 0);
+               return;
+       }
+
        switch (event) {
        case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS:        /* Cycle brightness */
                brightness_switch_event(video_device, event);
index 704bebbd35b06adc0d9faeb98f22535586330d61..298180bf7e3c16d73ac3d71d2d4d208633ba44f5 100644 (file)
@@ -69,7 +69,8 @@ acpi_status
 acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked);
 
 acpi_status
-acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
+acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
+                         u8 clear_on_enable);
 
 acpi_status
 acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
index e10fec99a182eca363167e27e1a6338e42586c09..4b5d3b4c627a723f931bc94713f3cacbb799e1bb 100644 (file)
@@ -146,6 +146,7 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
  * FUNCTION:    acpi_ev_add_gpe_reference
  *
  * PARAMETERS:  gpe_event_info          - Add a reference to this GPE
+ *              clear_on_enable         - Clear GPE status before enabling it
  *
  * RETURN:      Status
  *
@@ -155,7 +156,8 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
  ******************************************************************************/
 
 acpi_status
-acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
+acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
+                         u8 clear_on_enable)
 {
        acpi_status status = AE_OK;
 
@@ -170,6 +172,10 @@ acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
 
                /* Enable on first reference */
 
+               if (clear_on_enable) {
+                       (void)acpi_hw_clear_gpe(gpe_event_info);
+               }
+
                status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
                if (ACPI_SUCCESS(status)) {
                        status = acpi_ev_enable_gpe(gpe_event_info);
index b253063b09d39c1c3c5bb3c81a375c8cec8756cf..8d96270ed8c738e6376280877562745238a801cf 100644 (file)
@@ -453,7 +453,7 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
                                continue;
                        }
 
-                       status = acpi_ev_add_gpe_reference(gpe_event_info);
+                       status = acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
                        if (ACPI_FAILURE(status)) {
                                ACPI_EXCEPTION((AE_INFO, status,
                                        "Could not enable GPE 0x%02X",
index febc332b00ac1313716b96e01c6fc3cc977e3ac3..841557bda64191602f2766425ee19d85a6d8f88b 100644 (file)
@@ -971,7 +971,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
              ACPI_GPE_DISPATCH_METHOD) ||
             (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
              ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) {
-               (void)acpi_ev_add_gpe_reference(gpe_event_info);
+               (void)acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
                if (ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
 
                        /* Poll edge triggered GPEs to handle existing events */
index b2d5f66cc1b055863492567007fee2444bfa1150..4188731e7c406ea20d4be04c65eb3a478e121d40 100644 (file)
@@ -108,7 +108,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
        if (gpe_event_info) {
                if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
                    ACPI_GPE_DISPATCH_NONE) {
-                       status = acpi_ev_add_gpe_reference(gpe_event_info);
+                       status = acpi_ev_add_gpe_reference(gpe_event_info, TRUE);
                        if (ACPI_SUCCESS(status) &&
                            ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
 
index 43c2615434b48b8a6576d70662f9fd5af4e6f79b..e11b5da6f828f3bd8fffa23094b1a31e797d7c9c 100644 (file)
@@ -616,8 +616,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
 
        /* Move to ITS specific data */
        its = (struct acpi_iort_its_group *)node->node_data;
-       if (idx > its->its_count) {
-               dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
+       if (idx >= its->its_count) {
+               dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
                        idx, its->its_count);
                return -ENXIO;
        }
index 995c4d8922b12eef963a9cc1cab591ee7b404b1d..761f0c19a451266856838ad221afae623f7f6610 100644 (file)
@@ -30,7 +30,9 @@
 
 #include "internal.h"
 
+#ifdef CONFIG_DMI
 static const struct dmi_system_id acpi_rev_dmi_table[] __initconst;
+#endif
 
 /*
  * POLICY: If *anything* doesn't work, put it on the blacklist.
@@ -74,7 +76,9 @@ int __init acpi_blacklisted(void)
        }
 
        (void)early_acpi_osi_init();
+#ifdef CONFIG_DMI
        dmi_check_system(acpi_rev_dmi_table);
+#endif
 
        return blacklisted;
 }
index d9ce4b162e2ce0533039cbe2d5ef4704ecb24642..41228e545e82ada88268f2f28157ad05554733c6 100644 (file)
@@ -369,8 +369,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
        union acpi_object  *psd = NULL;
        struct acpi_psd_package *pdomain;
 
-       status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
-                       ACPI_TYPE_PACKAGE);
+       status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
+                                           &buffer, ACPI_TYPE_PACKAGE);
+       if (status == AE_NOT_FOUND)     /* _PSD is optional */
+               return 0;
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
@@ -907,8 +909,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
                        pcc_data[pcc_ss_id]->refcount--;
                        if (!pcc_data[pcc_ss_id]->refcount) {
                                pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
-                               pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
                                kfree(pcc_data[pcc_ss_id]);
+                               pcc_data[pcc_ss_id] = NULL;
                        }
                }
        }
index e967c1173ba3280ac1cb5b11c9785a785619b320..222ea3f12f41e132f1d14b3ade837bbf205987c5 100644 (file)
@@ -48,8 +48,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
        if ((*ppos > max_size) ||
            (*ppos + count > max_size) ||
            (*ppos + count < count) ||
-           (count > uncopied_bytes))
+           (count > uncopied_bytes)) {
+               kfree(buf);
                return -EINVAL;
+       }
 
        if (copy_from_user(buf + (*ppos), user_buf, count)) {
                kfree(buf);
@@ -69,6 +71,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
                add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
        }
 
+       kfree(buf);
        return count;
 }
 
index c576a6fe4ebb3044fc8f268fe338d3a50084cd6f..94ded9513c73b0bede043e85448251813b32806c 100644 (file)
@@ -462,8 +462,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
                 * No IRQ known to the ACPI subsystem - maybe the BIOS /
                 * driver reported one, then use it. Exit in any case.
                 */
-               if (!acpi_pci_irq_valid(dev, pin))
+               if (!acpi_pci_irq_valid(dev, pin)) {
+                       kfree(entry);
                        return 0;
+               }
 
                if (acpi_isa_register_gsi(dev))
                        dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
index da031b1df6f5c4fcfa7ed603995b8e910e729d4d..9dbf86a0c8277aa6d19ce183536049bc88d755bd 100644 (file)
@@ -509,6 +509,44 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
        return retval;
 }
 
+/**
+ * check_acpi_cpu_flag() - Determine if CPU node has a flag set
+ * @cpu: Kernel logical CPU number
+ * @rev: The minimum PPTT revision defining the flag
+ * @flag: The flag itself
+ *
+ * Check the node representing a CPU for a given flag.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or
+ *        the table revision isn't new enough.
+ *        1, any passed flag set
+ *        0, flag unset
+ */
+static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
+{
+       struct acpi_table_header *table;
+       acpi_status status;
+       u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+       struct acpi_pptt_processor *cpu_node = NULL;
+       int ret = -ENOENT;
+
+       status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+       if (ACPI_FAILURE(status)) {
+               pr_warn_once("No PPTT table found, cpu topology may be inaccurate\n");
+               return ret;
+       }
+
+       if (table->revision >= rev)
+               cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+
+       if (cpu_node)
+               ret = (cpu_node->flags & flag) != 0;
+
+       acpi_put_table(table);
+
+       return ret;
+}
+
 /**
  * acpi_find_last_cache_level() - Determines the number of cache levels for a PE
  * @cpu: Kernel logical cpu number
@@ -573,6 +611,20 @@ int cache_setup_acpi(unsigned int cpu)
        return status;
 }
 
+/**
+ * acpi_pptt_cpu_is_thread() - Determine if CPU is a thread
+ * @cpu: Kernel logical CPU number
+ *
+ * Return: 1, a thread
+ *         0, not a thread
+ *         -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or
+ *         the table revision isn't new enough.
+ */
+int acpi_pptt_cpu_is_thread(unsigned int cpu)
+{
+       return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD);
+}
+
 /**
  * find_acpi_cpu_topology() - Determine a unique topology value for a given cpu
  * @cpu: Kernel logical cpu number
index 5d67f5fec6c1bf82bff8fa90113a812d4292a1f2..6e04e7a707a12c3ab5d4bb7fd7eef6172e588a4e 100644 (file)
@@ -1960,8 +1960,18 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
 
 static void binder_free_transaction(struct binder_transaction *t)
 {
-       if (t->buffer)
-               t->buffer->transaction = NULL;
+       struct binder_proc *target_proc = t->to_proc;
+
+       if (target_proc) {
+               binder_inner_proc_lock(target_proc);
+               if (t->buffer)
+                       t->buffer->transaction = NULL;
+               binder_inner_proc_unlock(target_proc);
+       }
+       /*
+        * If the transaction has no target_proc, then
+        * t->buffer->transaction has already been cleared.
+        */
        kfree(t);
        binder_stats_deleted(BINDER_STAT_TRANSACTION);
 }
@@ -2838,7 +2848,7 @@ static void binder_transaction(struct binder_proc *proc,
                        else
                                return_error = BR_DEAD_REPLY;
                        mutex_unlock(&context->context_mgr_node_lock);
-                       if (target_node && target_proc == proc) {
+                       if (target_node && target_proc->pid == proc->pid) {
                                binder_user_error("%d:%d got transaction to context manager from process owning it\n",
                                                  proc->pid, thread->pid);
                                return_error = BR_FAILED_REPLY;
@@ -3484,10 +3494,12 @@ static int binder_thread_write(struct binder_proc *proc,
                                     buffer->debug_id,
                                     buffer->transaction ? "active" : "finished");
 
+                       binder_inner_proc_lock(proc);
                        if (buffer->transaction) {
                                buffer->transaction->buffer = NULL;
                                buffer->transaction = NULL;
                        }
+                       binder_inner_proc_unlock(proc);
                        if (buffer->async_transaction && buffer->target_node) {
                                struct binder_node *buf_node;
                                struct binder_work *w;
index 021ce46e2e57343b181976a980abef2e16e798d5..fa1c5a4429579c6a8396ed075852c2e33c8135ec 100644 (file)
@@ -81,6 +81,12 @@ enum board_ids {
        board_ahci_sb700,       /* for SB700 and SB800 */
        board_ahci_vt8251,
 
+       /*
+        * board IDs for Intel chipsets that support more than 6 ports
+        * *and* end up needing the PCS quirk.
+        */
+       board_ahci_pcs7,
+
        /* aliases */
        board_ahci_mcp_linux    = board_ahci_mcp65,
        board_ahci_mcp67        = board_ahci_mcp65,
@@ -236,6 +242,12 @@ static const struct ata_port_info ahci_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &ahci_vt8251_ops,
        },
+       [board_ahci_pcs7] = {
+               .flags          = AHCI_FLAG_COMMON,
+               .pio_mask       = ATA_PIO4,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &ahci_ops,
+       },
 };
 
 static const struct pci_device_id ahci_pci_tbl[] = {
@@ -280,26 +292,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
        { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
        { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
-       { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
-       { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
        { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
        { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
        { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
@@ -639,30 +651,6 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
        ahci_save_initial_config(&pdev->dev, hpriv);
 }
 
-static int ahci_pci_reset_controller(struct ata_host *host)
-{
-       struct pci_dev *pdev = to_pci_dev(host->dev);
-       int rc;
-
-       rc = ahci_reset_controller(host);
-       if (rc)
-               return rc;
-
-       if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
-               struct ahci_host_priv *hpriv = host->private_data;
-               u16 tmp16;
-
-               /* configure PCS */
-               pci_read_config_word(pdev, 0x92, &tmp16);
-               if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
-                       tmp16 |= hpriv->port_map;
-                       pci_write_config_word(pdev, 0x92, tmp16);
-               }
-       }
-
-       return 0;
-}
-
 static void ahci_pci_init_controller(struct ata_host *host)
 {
        struct ahci_host_priv *hpriv = host->private_data;
@@ -865,7 +853,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
        struct ata_host *host = pci_get_drvdata(pdev);
        int rc;
 
-       rc = ahci_pci_reset_controller(host);
+       rc = ahci_reset_controller(host);
        if (rc)
                return rc;
        ahci_pci_init_controller(host);
@@ -900,7 +888,7 @@ static int ahci_pci_device_resume(struct device *dev)
                ahci_mcp89_apple_enable(pdev);
 
        if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
-               rc = ahci_pci_reset_controller(host);
+               rc = ahci_reset_controller(host);
                if (rc)
                        return rc;
 
@@ -1635,6 +1623,36 @@ update_policy:
                ap->target_lpm_policy = policy;
 }
 
+static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
+{
+       const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev);
+       u16 tmp16;
+
+       /*
+        * Only apply the 6-port PCS quirk for known legacy platforms.
+        */
+       if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
+               return;
+
+       /* Skip applying the quirk on Denverton and beyond */
+       if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
+               return;
+
+       /*
+        * port_map is determined from PORTS_IMPL PCI register which is
+        * implemented as write or write-once register.  If the register
+        * isn't programmed, ahci automatically generates it from number
+        * of ports, which is good enough for PCS programming. It is
+        * otherwise expected that platform firmware enables the ports
+        * before the OS boots.
+        */
+       pci_read_config_word(pdev, PCS_6, &tmp16);
+       if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
+               tmp16 |= hpriv->port_map;
+               pci_write_config_word(pdev, PCS_6, tmp16);
+       }
+}
+
 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        unsigned int board_id = ent->driver_data;
@@ -1747,6 +1765,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* save initial config */
        ahci_pci_save_initial_config(pdev, hpriv);
 
+       /*
+        * If platform firmware failed to enable ports, try to enable
+        * them here.
+        */
+       ahci_intel_pcs_quirk(pdev, hpriv);
+
        /* prepare host */
        if (hpriv->cap & HOST_CAP_NCQ) {
                pi.flags |= ATA_FLAG_NCQ;
@@ -1856,7 +1880,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc)
                return rc;
 
-       rc = ahci_pci_reset_controller(host);
+       rc = ahci_reset_controller(host);
        if (rc)
                return rc;
 
index 6a1515f0da4021d755d433bf618cc423487fe930..9290e787abdc4579cee228f366a26b53e08ec843 100644 (file)
@@ -261,6 +261,8 @@ enum {
                                          ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
 
        ICH_MAP                         = 0x90, /* ICH MAP register */
+       PCS_6                           = 0x92, /* 6 port PCS */
+       PCS_7                           = 0x94, /* 7+ port PCS (Denverton) */
 
        /* em constants */
        EM_MAX_SLOTS                    = 8,
index c92c10d553746da95702677b9b380a0da099b242..5bece9752ed6892f69bf54d61d94316d84aba9d0 100644 (file)
@@ -313,6 +313,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
                hpriv->phys[port] = NULL;
                rc = 0;
                break;
+       case -EPROBE_DEFER:
+               /* Do not complain yet */
+               break;
 
        default:
                dev_err(dev,
index 01306c018398fa16583cab46bd1e51b9ccf86309..ccc80ff57eb2018adf9657aaa284a5aa3b1e52a4 100644 (file)
@@ -1490,7 +1490,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
        tf->hob_lbah = buf[10];
        tf->nsect = buf[12];
        tf->hob_nsect = buf[13];
-       if (ata_id_has_ncq_autosense(dev->id))
+       if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
                tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
 
        return 0;
@@ -1737,7 +1737,8 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
        memcpy(&qc->result_tf, &tf, sizeof(tf));
        qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
        qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
-       if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) {
+       if (dev->class == ATA_DEV_ZAC &&
+           ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
                char sense_key, asc, ascq;
 
                sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
@@ -1791,10 +1792,11 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
        }
 
        switch (qc->dev->class) {
-       case ATA_DEV_ATA:
        case ATA_DEV_ZAC:
                if (stat & ATA_SENSE)
                        ata_eh_request_sense(qc, qc->scsicmd);
+               /* fall through */
+       case ATA_DEV_ATA:
                if (err & ATA_ICRC)
                        qc->err_mask |= AC_ERR_ATA_BUS;
                if (err & (ATA_UNC | ATA_AMNF))
index 1984fc78c750b42505a5178761366dce33fa4089..3a64fa4aaf7e34257a30b0141b40b589fbbd3005 100644 (file)
@@ -1803,6 +1803,21 @@ nothing_to_do:
        return 1;
 }
 
+static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
+{
+       struct request *rq = scmd->request;
+       u32 req_blocks;
+
+       if (!blk_rq_is_passthrough(rq))
+               return true;
+
+       req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
+       if (n_blocks > req_blocks)
+               return false;
+
+       return true;
+}
+
 /**
  *     ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
  *     @qc: Storage for translated ATA taskfile
@@ -1847,6 +1862,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
                scsi_10_lba_len(cdb, &block, &n_block);
                if (cdb[1] & (1 << 3))
                        tf_flags |= ATA_TFLAG_FUA;
+               if (!ata_check_nblocks(scmd, n_block))
+                       goto invalid_fld;
                break;
        case READ_6:
        case WRITE_6:
@@ -1861,6 +1878,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
                 */
                if (!n_block)
                        n_block = 256;
+               if (!ata_check_nblocks(scmd, n_block))
+                       goto invalid_fld;
                break;
        case READ_16:
        case WRITE_16:
@@ -1871,6 +1890,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
                scsi_16_lba_len(cdb, &block, &n_block);
                if (cdb[1] & (1 << 3))
                        tf_flags |= ATA_TFLAG_FUA;
+               if (!ata_check_nblocks(scmd, n_block))
+                       goto invalid_fld;
                break;
        default:
                DPRINTK("no-byte command\n");
index c5ea0fc635e54eb800cb12d8812ea5e508c388cc..873cc0906055129eff641428db31c118cbdbeb21 100644 (file)
@@ -674,6 +674,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
        unsigned int offset;
        unsigned char *buf;
 
+       if (!qc->cursg) {
+               qc->curbytes = qc->nbytes;
+               return;
+       }
        if (qc->curbytes == qc->nbytes - qc->sect_size)
                ap->hsm_task_state = HSM_ST_LAST;
 
@@ -699,6 +703,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
 
        if (qc->cursg_ofs == qc->cursg->length) {
                qc->cursg = sg_next(qc->cursg);
+               if (!qc->cursg)
+                       ap->hsm_task_state = HSM_ST_LAST;
                qc->cursg_ofs = 0;
        }
 }
index 173e6f2dd9af0f12afdc1fee7e372cfa4291e0aa..eefda51f97d351bda5d8437e2d83aaeae16b30f6 100644 (file)
@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
        unsigned int ret;
        struct rm_feature_desc *desc;
        struct ata_taskfile tf;
-       static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
+       static const char cdb[ATAPI_CDB_LEN] = {  GPCMD_GET_CONFIGURATION,
                        2,      /* only 1 feature descriptor requested */
                        0, 3,   /* 3, removable medium feature */
                        0, 0, 0,/* reserved */
index 2e2efa577437e82ecdeb20d9d41b6ce63635bf16..8c37294f1d1ee4c07dacaec843a0190b0320d5fc 100644 (file)
@@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI
          make the card work).
 
 config ATM_NICSTAR_USE_IDT77105
-       bool "Use IDT77015 PHY driver (25Mbps)"
+       bool "Use IDT77105 PHY driver (25Mbps)"
        depends on ATM_NICSTAR
        help
          Support for the PHYsical layer chip in ForeRunner LE25 cards. In
index 82532c299bb5964a429e81353b9c5f94d9bb5ed2..008905d4152a39e843ac339f59cbfb989ab1fd4e 100644 (file)
@@ -63,6 +63,7 @@
 #include <asm/byteorder.h>  
 #include <linux/vmalloc.h>
 #include <linux/jiffies.h>
+#include <linux/nospec.h>
 #include "iphase.h"              
 #include "suni.h"                
 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
@@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
    }
    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
    board = ia_cmds.status;
-   if ((board < 0) || (board > iadev_count))
-         board = 0;    
+
+       if ((board < 0) || (board > iadev_count))
+               board = 0;
+       board = array_index_nospec(board, iadev_count + 1);
+
    iadev = ia_dev[board];
    switch (ia_cmds.cmd) {
    case MEMDUMP:
index 3b25a643058c9dde38511d646da8700e53c46837..0b8e2a7d6e9344009c48d78115ebf853f8ce924e 100644 (file)
@@ -1618,6 +1618,8 @@ static void panel_attach(struct parport *port)
        return;
 
 err_lcd_unreg:
+       if (scan_timer.function)
+               del_timer_sync(&scan_timer);
        if (lcd.enabled)
                charlcd_unregister(lcd.charlcd);
 err_unreg_device:
index 7a419a7a6235b166625bcc4216de79a66249188c..559b047de9f757db3788874d7cfe05b59402b4d7 100644 (file)
@@ -66,6 +66,9 @@ struct driver_private {
  *     probed first.
  * @device - pointer back to the struct device that this structure is
  * associated with.
+ * @dead - This device is currently either in the process of or has been
+ *     removed from the system. Any asynchronous events scheduled for this
+ *     device should exit without taking any action.
  *
  * Nothing outside of the driver core should ever touch these fields.
  */
@@ -76,6 +79,7 @@ struct device_private {
        struct klist_node knode_bus;
        struct list_head deferred_probe;
        struct device *device;
+       u8 dead:1;
 };
 #define to_device_private_parent(obj)  \
        container_of(obj, struct device_private, knode_parent)
index dd6a6850cb450d5029474bc805dcd9624a65e529..ce015ce2977c47a898b6c1d0de66ac2e9b2d89c4 100644 (file)
@@ -653,7 +653,8 @@ static int cacheinfo_cpu_pre_down(unsigned int cpu)
 
 static int __init cacheinfo_sysfs_init(void)
 {
-       return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
+       return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
+                                "base/cacheinfo:online",
                                 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
 }
 device_initcall(cacheinfo_sysfs_init);
index 92e2c32c2227015140ecad6def65e765b54942ed..985ccced33a2152f0b49e11f8149bc4ec6f4cdc8 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright (c) 2006 Novell, Inc.
  */
 
+#include <linux/cpufreq.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/fwnode.h>
@@ -1648,12 +1649,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
  */
 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
 {
+       unsigned int ref;
+
        /* see if we live in a "glue" directory */
        if (!live_in_glue_dir(glue_dir, dev))
                return;
 
        mutex_lock(&gdp_mutex);
-       if (!kobject_has_children(glue_dir))
+       /**
+        * There is a race condition between removing glue directory
+        * and adding a new device under the glue directory.
+        *
+        * CPU1:                                         CPU2:
+        *
+        * device_add()
+        *   get_device_parent()
+        *     class_dir_create_and_add()
+        *       kobject_add_internal()
+        *         create_dir()    // create glue_dir
+        *
+        *                                               device_add()
+        *                                                 get_device_parent()
+        *                                                   kobject_get() // get glue_dir
+        *
+        * device_del()
+        *   cleanup_glue_dir()
+        *     kobject_del(glue_dir)
+        *
+        *                                               kobject_add()
+        *                                                 kobject_add_internal()
+        *                                                   create_dir() // in glue_dir
+        *                                                     sysfs_create_dir_ns()
+        *                                                       kernfs_create_dir_ns(sd)
+        *
+        *       sysfs_remove_dir() // glue_dir->sd=NULL
+        *       sysfs_put()        // free glue_dir->sd
+        *
+        *                                                         // sd is freed
+        *                                                         kernfs_new_node(sd)
+        *                                                           kernfs_get(glue_dir)
+        *                                                           kernfs_add_one()
+        *                                                           kernfs_put()
+        *
+        * Before CPU1 remove last child device under glue dir, if CPU2 add
+        * a new device under glue dir, the glue_dir kobject reference count
+        * will be increase to 2 in kobject_get(k). And CPU2 has been called
+        * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
+        * and sysfs_put(). This result in glue_dir->sd is freed.
+        *
+        * Then the CPU2 will see a stale "empty" but still potentially used
+        * glue dir around in kernfs_new_node().
+        *
+        * In order to avoid this happening, we also should make sure that
+        * kernfs_node for glue_dir is released in CPU1 only when refcount
+        * for glue_dir kobj is 1.
+        */
+       ref = kref_read(&glue_dir->kref);
+       if (!kobject_has_children(glue_dir) && !--ref)
                kobject_del(glue_dir);
        kobject_put(glue_dir);
        mutex_unlock(&gdp_mutex);
@@ -2031,6 +2083,24 @@ void put_device(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(put_device);
 
+bool kill_device(struct device *dev)
+{
+       /*
+        * Require the device lock and set the "dead" flag to guarantee that
+        * the update behavior is consistent with the other bitfields near
+        * it and that we cannot have an asynchronous probe routine trying
+        * to run while we are tearing out the bus/class/sysfs from
+        * underneath the device.
+        */
+       lockdep_assert_held(&dev->mutex);
+
+       if (dev->p->dead)
+               return false;
+       dev->p->dead = true;
+       return true;
+}
+EXPORT_SYMBOL_GPL(kill_device);
+
 /**
  * device_del - delete device from system.
  * @dev: device.
@@ -2050,6 +2120,10 @@ void device_del(struct device *dev)
        struct kobject *glue_dir = NULL;
        struct class_interface *class_intf;
 
+       device_lock(dev);
+       kill_device(dev);
+       device_unlock(dev);
+
        /* Notify clients of device removal.  This call must come
         * before dpm_sysfs_remove().
         */
@@ -2870,6 +2944,8 @@ void device_shutdown(void)
        wait_for_device_probe();
        device_block_probing();
 
+       cpufreq_suspend();
+
        spin_lock(&devices_kset->list_lock);
        /*
         * Walk the devices list backward, shutting down each in turn.
index d48b310c4760377408863f5ac017cb4901ba2f15..11d24a552ee499103a35b9ef18d858d98cf4798b 100644 (file)
@@ -725,15 +725,6 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
        bool async_allowed;
        int ret;
 
-       /*
-        * Check if device has already been claimed. This may
-        * happen with driver loading, device discovery/registration,
-        * and deferred probe processing happens all at once with
-        * multiple threads.
-        */
-       if (dev->driver)
-               return -EBUSY;
-
        ret = driver_match_device(drv, dev);
        if (ret == 0) {
                /* no match */
@@ -768,6 +759,15 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
 
        device_lock(dev);
 
+       /*
+        * Check if device has already been removed or claimed. This may
+        * happen with driver loading, device discovery/registration,
+        * and deferred probe processing happens all at once with
+        * multiple threads.
+        */
+       if (dev->p->dead || dev->driver)
+               goto out_unlock;
+
        if (dev->parent)
                pm_runtime_get_sync(dev->parent);
 
@@ -778,7 +778,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
 
        if (dev->parent)
                pm_runtime_put(dev->parent);
-
+out_unlock:
        device_unlock(dev);
 
        put_device(dev);
@@ -891,7 +891,7 @@ static int __driver_attach(struct device *dev, void *data)
        if (dev->parent && dev->bus->need_parent_lock)
                device_lock(dev->parent);
        device_lock(dev);
-       if (!dev->driver)
+       if (!dev->p->dead && !dev->driver)
                driver_probe_device(drv, dev);
        device_unlock(dev);
        if (dev->parent && dev->bus->need_parent_lock)
index b5c865fe263b25303b72bc42a5715e4fba0e746d..818d8c37d70a9b7dbf7d93fb1383574bcc275c00 100644 (file)
@@ -659,7 +659,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
        /* Also permit LSMs and IMA to fail firmware sysfs fallback */
        ret = security_kernel_load_data(LOADING_FIRMWARE);
        if (ret < 0)
-               return ret;
+               return false;
 
        return fw_force_sysfs_fallback(opt_flags);
 }
index 817320c7c4c1b72cf248b73a184bc8dee6ef28ac..85ee64d0a44e9009a7d92dc38af6b2888141c675 100644 (file)
@@ -554,6 +554,9 @@ store_soft_offline_page(struct device *dev,
        pfn >>= PAGE_SHIFT;
        if (!pfn_valid(pfn))
                return -ENXIO;
+       /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
+       if (!pfn_to_online_page(pfn))
+               return -EIO;
        ret = soft_offline_page(pfn_to_page(pfn), 0);
        return ret == 0 ? count : ret;
 }
index 6ad5ef48b61eee488e81ec4364b9097f31973a1f..8cd2ac650b50551bf716b8d9fd699bbec6d5b42f 100644 (file)
@@ -44,7 +44,7 @@ config REGMAP_IRQ
 
 config REGMAP_SOUNDWIRE
        tristate
-       depends on SOUNDWIRE_BUS
+       depends on SOUNDWIRE
 
 config REGMAP_SCCB
        tristate
index 87b562e49a435ffedac0f71b4652b669611e9a0e..c9687c8b23478138fe5824be4a7bf1737a384d88 100644 (file)
@@ -575,6 +575,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
        }
 
        if (!strcmp(name, "dummy")) {
+               kfree(map->debugfs_name);
+
                map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
                                                dummy_index);
                name = map->debugfs_name;
index 429ca8ed7e518087bc1ddaebd9f4c8393eb5179e..982c7ac311b8524eb2b5bbda1275d4a3c81aab50 100644 (file)
@@ -91,6 +91,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
         * suppress pointless writes.
         */
        for (i = 0; i < d->chip->num_regs; i++) {
+               if (!d->chip->mask_base)
+                       continue;
+
                reg = d->chip->mask_base +
                        (i * map->reg_stride * d->irq_reg_stride);
                if (d->chip->mask_invert) {
@@ -526,6 +529,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
        /* Mask all the interrupts by default */
        for (i = 0; i < chip->num_regs; i++) {
                d->mask_buf[i] = d->mask_buf_def[i];
+               if (!chip->mask_base)
+                       continue;
+
                reg = chip->mask_base +
                        (i * map->reg_stride * d->irq_reg_stride);
                if (chip->mask_invert)
index 0360a90ad6b623530f0053dbc3b3d29748266aae..6c9f6988bc09331ca12054da6d73f959d5f40de0 100644 (file)
@@ -1618,6 +1618,8 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
                                             map->format.reg_bytes +
                                             map->format.pad_bytes,
                                             val, val_len);
+       else
+               ret = -ENOTSUPP;
 
        /* If that didn't work fall back on linearising by hand. */
        if (ret == -ENOTSUPP) {
index 10b280f30217bcc6ec72cea2d7ca9612fc299293..7e91894a380b5387fbabe8ac13cb56e2900409da 100644 (file)
@@ -157,6 +157,7 @@ out2:
 out1:
        return ERR_PTR(ret);
 }
+EXPORT_SYMBOL_GPL(soc_device_register);
 
 /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
 void soc_device_unregister(struct soc_device *soc_dev)
@@ -166,6 +167,7 @@ void soc_device_unregister(struct soc_device *soc_dev)
        device_unregister(&soc_dev->dev);
        early_soc_dev_attr = NULL;
 }
+EXPORT_SYMBOL_GPL(soc_device_unregister);
 
 static int __init soc_bus_register(void)
 {
index cb919b964066022f9099cead35bb443f07a073ae..3cdadf75c82da1835a81953dfd3e1daf349c3aad 100644 (file)
@@ -5240,7 +5240,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
        unsigned int key_len;
        char secret[SHARED_SECRET_MAX]; /* 64 byte */
        unsigned int resp_size;
-       SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
+       struct shash_desc *desc;
        struct packet_info pi;
        struct net_conf *nc;
        int err, rv;
@@ -5253,6 +5253,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
        memcpy(secret, nc->shared_secret, key_len);
        rcu_read_unlock();
 
+       desc = kmalloc(sizeof(struct shash_desc) +
+                      crypto_shash_descsize(connection->cram_hmac_tfm),
+                      GFP_KERNEL);
+       if (!desc) {
+               rv = -1;
+               goto fail;
+       }
        desc->tfm = connection->cram_hmac_tfm;
        desc->flags = 0;
 
@@ -5395,7 +5402,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
        kfree(peers_ch);
        kfree(response);
        kfree(right_response);
-       shash_desc_zero(desc);
+       if (desc) {
+               shash_desc_zero(desc);
+               kfree(desc);
+       }
 
        return rv;
 }
index a8de56f1936db166919c6bb1afa0c0e910be3a43..e71589e244fb24c3b97640c38d6acfd25c712d6e 100644 (file)
@@ -2119,6 +2119,9 @@ static void setup_format_params(int track)
        raw_cmd->kernel_data = floppy_track_buffer;
        raw_cmd->length = 4 * F_SECT_PER_TRACK;
 
+       if (!F_SECT_PER_TRACK)
+               return;
+
        /* allow for about 30ms for data transport per track */
        head_shift = (F_SECT_PER_TRACK + 5) / 6;
 
@@ -3241,8 +3244,12 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
        int cnt;
 
        /* sanity checking for parameters. */
-       if (g->sect <= 0 ||
-           g->head <= 0 ||
+       if ((int)g->sect <= 0 ||
+           (int)g->head <= 0 ||
+           /* check for overflow in max_sector */
+           (int)(g->sect * g->head) <= 0 ||
+           /* check for zero in F_SECT_PER_TRACK */
+           (unsigned char)((g->sect << 2) >> FD_SIZECODE(g)) == 0 ||
            g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
            /* check if reserved bits are set */
            (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
@@ -3386,6 +3393,24 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
        return 0;
 }
 
+static bool valid_floppy_drive_params(const short autodetect[8],
+               int native_format)
+{
+       size_t floppy_type_size = ARRAY_SIZE(floppy_type);
+       size_t i = 0;
+
+       for (i = 0; i < 8; ++i) {
+               if (autodetect[i] < 0 ||
+                   autodetect[i] >= floppy_type_size)
+                       return false;
+       }
+
+       if (native_format < 0 || native_format >= floppy_type_size)
+               return false;
+
+       return true;
+}
+
 static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
                    unsigned long param)
 {
@@ -3512,6 +3537,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
                SUPBOUND(size, strlen((const char *)outparam) + 1);
                break;
        case FDSETDRVPRM:
+               if (!valid_floppy_drive_params(inparam.dp.autodetect,
+                               inparam.dp.native_format))
+                       return -EINVAL;
                *UDP = inparam.dp;
                break;
        case FDGETDRVPRM:
@@ -3709,6 +3737,8 @@ static int compat_setdrvprm(int drive,
                return -EPERM;
        if (copy_from_user(&v, arg, sizeof(struct compat_floppy_drive_params)))
                return -EFAULT;
+       if (!valid_floppy_drive_params(v.autodetect, v.native_format))
+               return -EINVAL;
        mutex_lock(&floppy_mutex);
        UDP->cmos = v.cmos;
        UDP->max_dtr = v.max_dtr;
@@ -3761,7 +3791,7 @@ static int compat_getdrvprm(int drive,
        v.native_format = UDP->native_format;
        mutex_unlock(&floppy_mutex);
 
-       if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
+       if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
                return -EFAULT;
        return 0;
 }
@@ -3797,7 +3827,7 @@ static int compat_getdrvstat(int drive, bool poll,
        v.bufblocks = UDRS->bufblocks;
        mutex_unlock(&floppy_mutex);
 
-       if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
+       if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
                return -EFAULT;
        return 0;
 Eintr:
index f1e63eb7cbca6dc11dea38581a53bef34a761873..126c2c51467328bc2190cca1003bcb270e9930ad 100644 (file)
@@ -886,7 +886,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
 
 static int loop_kthread_worker_fn(void *worker_ptr)
 {
-       current->flags |= PF_LESS_THROTTLE;
+       current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
        return kthread_worker_fn(worker_ptr);
 }
 
@@ -1719,6 +1719,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
        case LOOP_SET_FD:
        case LOOP_CHANGE_FD:
        case LOOP_SET_BLOCK_SIZE:
+       case LOOP_SET_DIRECT_IO:
                err = lo_ioctl(bdev, mode, cmd, arg);
                break;
        default:
index c13a6d1796a776938be1d9e4c002be471e8a0431..bc2fa4e85f0cac87a7949a3b472ff2f35504dced 100644 (file)
@@ -106,6 +106,7 @@ struct nbd_device {
        struct nbd_config *config;
        struct mutex config_lock;
        struct gendisk *disk;
+       struct workqueue_struct *recv_workq;
 
        struct list_head list;
        struct task_struct *task_recv;
@@ -132,9 +133,10 @@ static struct dentry *nbd_dbg_dir;
 
 #define NBD_MAGIC 0x68797548
 
+#define NBD_DEF_BLKSIZE 1024
+
 static unsigned int nbds_max = 16;
 static int max_part = 16;
-static struct workqueue_struct *recv_workqueue;
 static int part_shift;
 
 static int nbd_dev_dbg_init(struct nbd_device *nbd);
@@ -353,8 +355,10 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        }
        config = nbd->config;
 
-       if (!mutex_trylock(&cmd->lock))
+       if (!mutex_trylock(&cmd->lock)) {
+               nbd_config_put(nbd);
                return BLK_EH_RESET_TIMER;
+       }
 
        if (config->num_connections > 1) {
                dev_err_ratelimited(nbd_to_dev(nbd),
@@ -1023,7 +1027,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
                /* We take the tx_mutex in an error path in the recv_work, so we
                 * need to queue_work outside of the tx_mutex.
                 */
-               queue_work(recv_workqueue, &args->work);
+               queue_work(nbd->recv_workq, &args->work);
 
                atomic_inc(&config->live_connections);
                wake_up(&config->conn_wait);
@@ -1124,6 +1128,10 @@ static void nbd_config_put(struct nbd_device *nbd)
                kfree(nbd->config);
                nbd->config = NULL;
 
+               if (nbd->recv_workq)
+                       destroy_workqueue(nbd->recv_workq);
+               nbd->recv_workq = NULL;
+
                nbd->tag_set.timeout = 0;
                nbd->disk->queue->limits.discard_granularity = 0;
                nbd->disk->queue->limits.discard_alignment = 0;
@@ -1152,6 +1160,14 @@ static int nbd_start_device(struct nbd_device *nbd)
                return -EINVAL;
        }
 
+       nbd->recv_workq = alloc_workqueue("knbd%d-recv",
+                                         WQ_MEM_RECLAIM | WQ_HIGHPRI |
+                                         WQ_UNBOUND, 0, nbd->index);
+       if (!nbd->recv_workq) {
+               dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
+               return -ENOMEM;
+       }
+
        blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
        nbd->task_recv = current;
 
@@ -1182,7 +1198,7 @@ static int nbd_start_device(struct nbd_device *nbd)
                INIT_WORK(&args->work, recv_work);
                args->nbd = nbd;
                args->index = i;
-               queue_work(recv_workqueue, &args->work);
+               queue_work(nbd->recv_workq, &args->work);
        }
        nbd_size_update(nbd);
        return error;
@@ -1202,8 +1218,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
        mutex_unlock(&nbd->config_lock);
        ret = wait_event_interruptible(config->recv_wq,
                                         atomic_read(&config->recv_threads) == 0);
-       if (ret)
+       if (ret) {
                sock_shutdown(nbd);
+               flush_workqueue(nbd->recv_workq);
+       }
        mutex_lock(&nbd->config_lock);
        nbd_bdev_reset(bdev);
        /* user requested, ignore socket errors */
@@ -1218,13 +1236,21 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
                                 struct block_device *bdev)
 {
        sock_shutdown(nbd);
-       kill_bdev(bdev);
+       __invalidate_device(bdev, true);
        nbd_bdev_reset(bdev);
        if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
                               &nbd->config->runtime_flags))
                nbd_config_put(nbd);
 }
 
+static bool nbd_is_valid_blksize(unsigned long blksize)
+{
+       if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
+           blksize > PAGE_SIZE)
+               return false;
+       return true;
+}
+
 /* Must be called with config_lock held */
 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
                       unsigned int cmd, unsigned long arg)
@@ -1240,8 +1266,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
        case NBD_SET_SOCK:
                return nbd_add_socket(nbd, arg, false);
        case NBD_SET_BLKSIZE:
-               if (!arg || !is_power_of_2(arg) || arg < 512 ||
-                   arg > PAGE_SIZE)
+               if (!arg)
+                       arg = NBD_DEF_BLKSIZE;
+               if (!nbd_is_valid_blksize(arg))
                        return -EINVAL;
                nbd_size_set(nbd, arg,
                             div_s64(config->bytesize, arg));
@@ -1321,7 +1348,7 @@ static struct nbd_config *nbd_alloc_config(void)
        atomic_set(&config->recv_threads, 0);
        init_waitqueue_head(&config->recv_wq);
        init_waitqueue_head(&config->conn_wait);
-       config->blksize = 1024;
+       config->blksize = NBD_DEF_BLKSIZE;
        atomic_set(&config->live_connections, 0);
        try_module_get(THIS_MODULE);
        return config;
@@ -1757,6 +1784,12 @@ again:
        if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
                u64 bsize =
                        nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
+               if (!bsize)
+                       bsize = NBD_DEF_BLKSIZE;
+               if (!nbd_is_valid_blksize(bsize)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
                nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
        }
        if (info->attrs[NBD_ATTR_TIMEOUT]) {
@@ -1833,6 +1866,12 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
        nbd_disconnect(nbd);
        nbd_clear_sock(nbd);
        mutex_unlock(&nbd->config_lock);
+       /*
+        * Make sure recv thread has finished, so it does not drop the last
+        * config ref and try to destroy the workqueue from inside the work
+        * queue.
+        */
+       flush_workqueue(nbd->recv_workq);
        if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
                               &nbd->config->runtime_flags))
                nbd_config_put(nbd);
@@ -2213,20 +2252,12 @@ static int __init nbd_init(void)
 
        if (nbds_max > 1UL << (MINORBITS - part_shift))
                return -EINVAL;
-       recv_workqueue = alloc_workqueue("knbd-recv",
-                                        WQ_MEM_RECLAIM | WQ_HIGHPRI |
-                                        WQ_UNBOUND, 0);
-       if (!recv_workqueue)
-               return -ENOMEM;
 
-       if (register_blkdev(NBD_MAJOR, "nbd")) {
-               destroy_workqueue(recv_workqueue);
+       if (register_blkdev(NBD_MAJOR, "nbd"))
                return -EIO;
-       }
 
        if (genl_register_family(&nbd_genl_family)) {
                unregister_blkdev(NBD_MAJOR, "nbd");
-               destroy_workqueue(recv_workqueue);
                return -EINVAL;
        }
        nbd_dbg_init();
@@ -2268,7 +2299,6 @@ static void __exit nbd_cleanup(void)
 
        idr_destroy(&nbd_index_idr);
        genl_unregister_family(&nbd_genl_family);
-       destroy_workqueue(recv_workqueue);
        unregister_blkdev(NBD_MAJOR, "nbd");
 }
 
index 093b614d652445a337db00ea8beaded767073415..c5c0b7c89481555074bda218c8a545a2539d1ee4 100644 (file)
@@ -321,11 +321,12 @@ static ssize_t nullb_device_power_store(struct config_item *item,
                set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
                dev->power = newp;
        } else if (dev->power && !newp) {
-               mutex_lock(&lock);
-               dev->power = newp;
-               null_del_dev(dev->nullb);
-               mutex_unlock(&lock);
-               clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+               if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
+                       mutex_lock(&lock);
+                       dev->power = newp;
+                       null_del_dev(dev->nullb);
+                       mutex_unlock(&lock);
+               }
                clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
        }
 
index 6f1d25c1eb640b8a0cffe94c3bd7e0cfa0ea691b..0bc344d22f0135c68e66a36311d8cb35085d7c10 100644 (file)
@@ -2596,7 +2596,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
        if (ret)
                return ret;
        if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
-               WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
                blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
                return -EINVAL;
        }
index a4bc74e72c394965f31dcbe7b55c8e5cd0fc6cd5..55869b362fdfb7c500177890443a6617e2107081 100644 (file)
@@ -974,6 +974,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
        }
        blkif->nr_ring_pages = nr_grefs;
 
+       err = -ENOMEM;
        for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
                req = kzalloc(sizeof(*req), GFP_KERNEL);
                if (!req)
@@ -996,7 +997,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
        err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
        if (err) {
                xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
-               return err;
+               goto fail;
        }
 
        return 0;
@@ -1016,8 +1017,7 @@ fail:
                }
                kfree(req);
        }
-       return -ENOMEM;
-
+       return err;
 }
 
 static int connect_ring(struct backend_info *be)
index ec9e03a6b7786084b634b73f24ee5cf0dfc2c70f..9e70f7c7e5659b87dedfaddc529c04bbd9cd7cb2 100644 (file)
@@ -363,6 +363,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
                return err;
        }
 
+       /* Give the controller some time to get ready to receive the NVM */
+       msleep(10);
+
        /* Download NVM configuration */
        config.type = TLV_TYPE_NVM;
        if (soc_type == QCA_WCN3990)
index 1342f8e6025ccb1d39d02f8e145001f013a96142..8d1cd2479e36f5dd5c0d057b3cdf4dd92d7ef019 100644 (file)
@@ -639,6 +639,26 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
 }
 EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
 
+int btrtl_shutdown_realtek(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+       int ret;
+
+       /* According to the vendor driver, BT must be reset on close to avoid
+        * firmware crash.
+        */
+       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               ret = PTR_ERR(skb);
+               bt_dev_err(hdev, "HCI reset during shutdown failed");
+               return ret;
+       }
+       kfree_skb(skb);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(btrtl_shutdown_realtek);
+
 static unsigned int btrtl_convert_baudrate(u32 device_baudrate)
 {
        switch (device_baudrate) {
index f5e36f3993a8163e945619ede58a01e03496a9cc..852f27d4ee289ecf161245fb29b258776088b66c 100644 (file)
@@ -65,6 +65,7 @@ void btrtl_free(struct btrtl_device_info *btrtl_dev);
 int btrtl_download_firmware(struct hci_dev *hdev,
                            struct btrtl_device_info *btrtl_dev);
 int btrtl_setup_realtek(struct hci_dev *hdev);
+int btrtl_shutdown_realtek(struct hci_dev *hdev);
 int btrtl_get_uart_settings(struct hci_dev *hdev,
                            struct btrtl_device_info *btrtl_dev,
                            unsigned int *controller_baudrate,
@@ -93,6 +94,11 @@ static inline int btrtl_setup_realtek(struct hci_dev *hdev)
        return -EOPNOTSUPP;
 }
 
+static inline int btrtl_shutdown_realtek(struct hci_dev *hdev)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline int btrtl_get_uart_settings(struct hci_dev *hdev,
                                          struct btrtl_device_info *btrtl_dev,
                                          unsigned int *controller_baudrate,
index 40a4f95f6178158a3212fd7424c1813ef13a2aba..08936bf696d33b901be1b5f36693b4727e5aeddf 100644 (file)
@@ -277,7 +277,9 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME },
+       { USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME },
+       { USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME },
 
        /* Broadcom BCM2035 */
        { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -389,6 +391,9 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK },
        { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
 
+       /* Additional Realtek 8822CE Bluetooth devices */
+       { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
+
        /* Silicon Wave based devices */
        { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
 
@@ -1137,10 +1142,6 @@ static int btusb_open(struct hci_dev *hdev)
        }
 
        data->intf->needs_remote_wakeup = 1;
-       /* device specific wakeup source enabled and required for USB
-        * remote wakeup while host is suspended
-        */
-       device_wakeup_enable(&data->udev->dev);
 
        if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
                goto done;
@@ -1204,7 +1205,6 @@ static int btusb_close(struct hci_dev *hdev)
                goto failed;
 
        data->intf->needs_remote_wakeup = 0;
-       device_wakeup_disable(&data->udev->dev);
        usb_autopm_put_interface(data->intf);
 
 failed:
@@ -3131,6 +3131,7 @@ static int btusb_probe(struct usb_interface *intf,
 #ifdef CONFIG_BT_HCIBTUSB_RTL
        if (id->driver_info & BTUSB_REALTEK) {
                hdev->setup = btrtl_setup_realtek;
+               hdev->shutdown = btrtl_shutdown_realtek;
 
                /* Realtek devices lose their updated firmware over suspend,
                 * but the USB hub doesn't notice any status change.
index d568fbd94d6c84cc0045be68204f46b995e76cdb..20235925344dd224327a12ededac2589906be334 100644 (file)
@@ -112,6 +112,9 @@ static int ath_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        ath = kzalloc(sizeof(*ath), GFP_KERNEL);
        if (!ath)
                return -ENOMEM;
index 800132369134973122122a07e4de434f87462d3d..aa6b7ed9fdf12552c3bdbe4fe95e24ae4589bf4f 100644 (file)
@@ -369,6 +369,9 @@ static int bcm_open(struct hci_uart *hu)
 
        bt_dev_dbg(hu->hdev, "hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
        if (!bcm)
                return -ENOMEM;
index 1a7f0c82fb362ec2e200c2b6428b923fb24aa3ab..66fe1e6dc631feaf401b9f9c900262f570e7a68d 100644 (file)
@@ -759,6 +759,11 @@ static int bcsp_close(struct hci_uart *hu)
        skb_queue_purge(&bcsp->rel);
        skb_queue_purge(&bcsp->unrel);
 
+       if (bcsp->rx_skb) {
+               kfree_skb(bcsp->rx_skb);
+               bcsp->rx_skb = NULL;
+       }
+
        kfree(bcsp);
        return 0;
 }
index 46ace321bf60ebb1a1cfa3446838765cb025d812..e9228520e4c7af163b9ddeefedd200a01d4ca46c 100644 (file)
@@ -406,6 +406,9 @@ static int intel_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        intel = kzalloc(sizeof(*intel), GFP_KERNEL);
        if (!intel)
                return -ENOMEM;
index c915daf01a89d22475f615717a7ea51b92bc2aad..efeb8137ec67fdaac80c6c14660df7acb3e508b2 100644 (file)
@@ -299,6 +299,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        return 0;
 }
 
+/* Check the underlying device or tty has flow control support */
+bool hci_uart_has_flow_control(struct hci_uart *hu)
+{
+       /* serdev nodes check if the needed operations are present */
+       if (hu->serdev)
+               return true;
+
+       if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
+               return true;
+
+       return false;
+}
+
 /* Flow control or un-flow control the device */
 void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
 {
index ffb00669346f0ee70045043e82af0651e80591ba..23791df081bab0935fed1a91ec3e36f67da9f3e7 100644 (file)
@@ -66,6 +66,9 @@ static int mrvl_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
        if (!mrvl)
                return -ENOMEM;
index 77004c29da089f9d3a21423af5f024b19bebfdcd..f96e58de049b3b98ad46420695d7b9db2487e80f 100644 (file)
@@ -450,6 +450,9 @@ static int qca_open(struct hci_uart *hu)
 
        BT_DBG("hu %p qca_open", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
        if (!qca)
                return -ENOMEM;
index 00cab2fd7a1b8302ef164940a784485a9198784d..067a610f1372a4b171a9e0446f76d3eaf52fbfc8 100644 (file)
@@ -118,6 +118,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu);
 int hci_uart_init_ready(struct hci_uart *hu);
 void hci_uart_init_work(struct work_struct *work);
 void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
+bool hci_uart_has_flow_control(struct hci_uart *hu);
 void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
 void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
                         unsigned int oper_speed);
index d5f85455fa6216fc2c7660e5f543f5aab71f50f5..e31c02dc777098ca99901e033a34a5b66f63dbb0 100644 (file)
@@ -456,6 +456,17 @@ struct hisi_lpc_acpi_cell {
        size_t pdata_size;
 };
 
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+       struct acpi_device *adev = ACPI_COMPANION(hostdev);
+       struct acpi_device *child;
+
+       device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
+
+       list_for_each_entry(child, &adev->children, node)
+               acpi_device_clear_enumerated(child);
+}
+
 /*
  * hisi_lpc_acpi_probe - probe children for ACPI FW
  * @hostdev: LPC host device pointer
@@ -556,8 +567,7 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
        return 0;
 
 fail:
-       device_for_each_child(hostdev, NULL,
-                             hisi_lpc_acpi_remove_subdev);
+       hisi_lpc_acpi_remove(hostdev);
        return ret;
 }
 
@@ -570,6 +580,10 @@ static int hisi_lpc_acpi_probe(struct device *dev)
 {
        return -ENODEV;
 }
+
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+}
 #endif // CONFIG_ACPI
 
 /*
@@ -607,24 +621,27 @@ static int hisi_lpc_probe(struct platform_device *pdev)
        range->fwnode = dev->fwnode;
        range->flags = LOGIC_PIO_INDIRECT;
        range->size = PIO_INDIRECT_SIZE;
+       range->hostdata = lpcdev;
+       range->ops = &hisi_lpc_ops;
+       lpcdev->io_host = range;
 
        ret = logic_pio_register_range(range);
        if (ret) {
                dev_err(dev, "register IO range failed (%d)!\n", ret);
                return ret;
        }
-       lpcdev->io_host = range;
 
        /* register the LPC host PIO resources */
        if (acpi_device)
                ret = hisi_lpc_acpi_probe(dev);
        else
                ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
-       if (ret)
+       if (ret) {
+               logic_pio_unregister_range(range);
                return ret;
+       }
 
-       lpcdev->io_host->hostdata = lpcdev;
-       lpcdev->io_host->ops = &hisi_lpc_ops;
+       dev_set_drvdata(dev, lpcdev);
 
        io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
        dev_info(dev, "registered range [%pa - %pa]\n",
@@ -633,6 +650,23 @@ static int hisi_lpc_probe(struct platform_device *pdev)
        return ret;
 }
 
+static int hisi_lpc_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct acpi_device *acpi_device = ACPI_COMPANION(dev);
+       struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
+       struct logic_pio_hwaddr *range = lpcdev->io_host;
+
+       if (acpi_device)
+               hisi_lpc_acpi_remove(dev);
+       else
+               of_platform_depopulate(dev);
+
+       logic_pio_unregister_range(range);
+
+       return 0;
+}
+
 static const struct of_device_id hisi_lpc_of_match[] = {
        { .compatible = "hisilicon,hip06-lpc", },
        { .compatible = "hisilicon,hip07-lpc", },
@@ -646,5 +680,6 @@ static struct platform_driver hisi_lpc_driver = {
                .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
        },
        .probe = hisi_lpc_probe,
+       .remove = hisi_lpc_remove,
 };
 builtin_platform_driver(hisi_lpc_driver);
index e4fe954e63a9be53b74397c825bf4f57b06dcca5..e95b26319cd91076695cacf94737902afe1d255b 100644 (file)
@@ -1022,10 +1022,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata)
        if (error)
                return 0;
 
-       if (val)
-               ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
-       else
-               ddata->cfg.sysc_val = ddata->cap->sysc_mask;
+       ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
 
        return 0;
 }
@@ -1688,7 +1685,7 @@ static int sysc_probe(struct platform_device *pdev)
 
        error = sysc_init_dts_quirks(ddata);
        if (error)
-               goto unprepare;
+               return error;
 
        error = sysc_get_clocks(ddata);
        if (error)
@@ -1696,27 +1693,27 @@ static int sysc_probe(struct platform_device *pdev)
 
        error = sysc_map_and_check_registers(ddata);
        if (error)
-               goto unprepare;
+               return error;
 
        error = sysc_init_sysc_mask(ddata);
        if (error)
-               goto unprepare;
+               return error;
 
        error = sysc_init_idlemodes(ddata);
        if (error)
-               goto unprepare;
+               return error;
 
        error = sysc_init_syss_mask(ddata);
        if (error)
-               goto unprepare;
+               return error;
 
        error = sysc_init_pdata(ddata);
        if (error)
-               goto unprepare;
+               return error;
 
        error = sysc_init_resets(ddata);
        if (error)
-               return error;
+               goto unprepare;
 
        pm_runtime_enable(ddata->dev);
        error = sysc_init_module(ddata);
index 9bffcd37cc7bd65b93fbac59f996b62d118d084f..c0732f032248479fb6ea2386ec76bb46e3800798 100644 (file)
@@ -570,8 +570,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
        unsigned long long m;
 
        m = hpets->hp_tick_freq + (dis >> 1);
-       do_div(m, dis);
-       return (unsigned long)m;
+       return div64_ul(m, dis);
 }
 
 static int
index aaf9e5afaad435e2342a15fc963aa91367079957..0ef7cb0448e8635bc5b17bbe5cba84017e5033ea 100644 (file)
@@ -67,7 +67,7 @@ static void add_early_randomness(struct hwrng *rng)
        size_t size = min_t(size_t, 16, rng_buffer_size());
 
        mutex_lock(&reading_mutex);
-       bytes_read = rng_get_data(rng, rng_buffer, size, 1);
+       bytes_read = rng_get_data(rng, rng_buffer, size, 0);
        mutex_unlock(&reading_mutex);
        if (bytes_read > 0)
                add_device_randomness(rng_buffer, bytes_read);
index 75e5006f395a5fc5d5050b79fe587097364a7d78..006d765256782c0cd46b87f2c70b7c0838ff256b 100644 (file)
@@ -221,6 +221,9 @@ struct smi_info {
         */
        bool irq_enable_broken;
 
+       /* Is the driver in maintenance mode? */
+       bool in_maintenance_mode;
+
        /*
         * Did we get an attention that we did not handle?
         */
@@ -1013,11 +1016,20 @@ static int ipmi_thread(void *data)
                spin_unlock_irqrestore(&(smi_info->si_lock), flags);
                busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
                                                  &busy_until);
-               if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
+               if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
                        ; /* do nothing */
-               else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
-                       schedule();
-               else if (smi_result == SI_SM_IDLE) {
+               } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
+                       /*
+                        * In maintenance mode we run as fast as
+                        * possible to allow firmware updates to
+                        * complete as fast as possible, but normally
+                        * don't bang on the scheduler.
+                        */
+                       if (smi_info->in_maintenance_mode)
+                               schedule();
+                       else
+                               usleep_range(100, 200);
+               } else if (smi_result == SI_SM_IDLE) {
                        if (atomic_read(&smi_info->need_watch)) {
                                schedule_timeout_interruptible(100);
                        } else {
@@ -1025,8 +1037,9 @@ static int ipmi_thread(void *data)
                                __set_current_state(TASK_INTERRUPTIBLE);
                                schedule();
                        }
-               } else
+               } else {
                        schedule_timeout_interruptible(1);
+               }
        }
        return 0;
 }
@@ -1201,6 +1214,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
 
        if (!enable)
                atomic_set(&smi_info->req_events, 0);
+       smi_info->in_maintenance_mode = enable;
 }
 
 static void shutdown_smi(void *send_info);
index 7b4e4de778e45f7900732a243f6d53f783089d32..54b86490d9cabc41f4a20bb9d56c95daba4b8007 100644 (file)
@@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
 }
 #endif
 
+static inline bool should_stop_iteration(void)
+{
+       if (need_resched())
+               cond_resched();
+       return fatal_signal_pending(current);
+}
+
 /*
  * This funcion reads the *physical* memory. The f_pos points directly to the
  * memory location.
@@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
                p += sz;
                count -= sz;
                read += sz;
+               if (should_stop_iteration())
+                       break;
        }
        kfree(bounce);
 
@@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
                p += sz;
                count -= sz;
                written += sz;
+               if (should_stop_iteration())
+                       break;
        }
 
        *ppos += written;
@@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
                        read += sz;
                        low_count -= sz;
                        count -= sz;
+                       if (should_stop_iteration()) {
+                               count = 0;
+                               break;
+                       }
                }
        }
 
@@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
                        buf += sz;
                        read += sz;
                        p += sz;
+                       if (should_stop_iteration())
+                               break;
                }
                free_page((unsigned long)kbuf);
        }
@@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
                p += sz;
                count -= sz;
                written += sz;
+               if (should_stop_iteration())
+                       break;
        }
 
        *ppos += written;
@@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
                        buf += sz;
                        virtr += sz;
                        p += sz;
+                       if (should_stop_iteration())
+                               break;
                }
                free_page((unsigned long)kbuf);
        }
index be5d1abd3e8ef05d9ce66ba8d61d120750303c9e..8390c5b54c3bedbdd712bc55de00769d7fd9ca0a 100644 (file)
@@ -33,7 +33,7 @@
 
 struct st33zp24_i2c_phy {
        struct i2c_client *client;
-       u8 buf[TPM_BUFSIZE + 1];
+       u8 buf[ST33ZP24_BUFSIZE + 1];
        int io_lpcpd;
 };
 
index d7909ab287a85c7b75d2f44e70a443451e84a22f..ff019a1e3c68f9b9a672c9fe5946acaaea624504 100644 (file)
@@ -63,7 +63,7 @@
  * some latency byte before the answer is available (max 15).
  * We have 2048 + 1024 + 15.
  */
-#define ST33ZP24_SPI_BUFFER_SIZE (TPM_BUFSIZE + (TPM_BUFSIZE / 2) +\
+#define ST33ZP24_SPI_BUFFER_SIZE (ST33ZP24_BUFSIZE + (ST33ZP24_BUFSIZE / 2) +\
                                  MAX_SPI_LATENCY)
 
 
index 6f4a4198af6aa2637fb2347dff6cdb9109054b24..20da0a84988d6bde88c2bd95296484c366d1b206 100644 (file)
@@ -18,8 +18,8 @@
 #ifndef __LOCAL_ST33ZP24_H__
 #define __LOCAL_ST33ZP24_H__
 
-#define TPM_WRITE_DIRECTION             0x80
-#define TPM_BUFSIZE                     2048
+#define TPM_WRITE_DIRECTION    0x80
+#define ST33ZP24_BUFSIZE       2048
 
 struct st33zp24_dev {
        struct tpm_chip *chip;
index 46caadca916a0a0d3e23b35ca53d3c87142dc97e..0b01eb7b14e536052381f953c5c2582ca936f5d9 100644 (file)
@@ -187,12 +187,13 @@ static int tpm_class_shutdown(struct device *dev)
 {
        struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
 
+       down_write(&chip->ops_sem);
        if (chip->flags & TPM_CHIP_FLAG_TPM2) {
-               down_write(&chip->ops_sem);
                tpm2_shutdown(chip, TPM2_SU_CLEAR);
                chip->ops = NULL;
-               up_write(&chip->ops_sem);
        }
+       chip->ops = NULL;
+       up_write(&chip->ops_sem);
 
        return 0;
 }
index 83a77a4455380276ef8d4786cac2c330f3242460..177a60e5c6ec9a725c48c1d8e53e7fe0e7c22326 100644 (file)
@@ -39,7 +39,6 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
 {
        struct tpm_buf tpm_buf;
        struct tpm_readpubek_out *out;
-       ssize_t rc;
        int i;
        char *str = buf;
        struct tpm_chip *chip = to_tpm_chip(dev);
@@ -47,19 +46,18 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
 
        memset(&anti_replay, 0, sizeof(anti_replay));
 
-       rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK);
-       if (rc)
-               return rc;
+       if (tpm_try_get_ops(chip))
+               return 0;
+
+       if (tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK))
+               goto out_ops;
 
        tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
 
-       rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
+       if (tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
                              READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
-                             "attempting to read the PUBEK");
-       if (rc) {
-               tpm_buf_destroy(&tpm_buf);
-               return 0;
-       }
+                             "attempting to read the PUBEK"))
+               goto out_buf;
 
        out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
        str +=
@@ -90,9 +88,11 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
                        str += sprintf(str, "\n");
        }
 
-       rc = str - buf;
+out_buf:
        tpm_buf_destroy(&tpm_buf);
-       return rc;
+out_ops:
+       tpm_put_ops(chip);
+       return str - buf;
 }
 static DEVICE_ATTR_RO(pubek);
 
@@ -106,12 +106,16 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
        char *str = buf;
        struct tpm_chip *chip = to_tpm_chip(dev);
 
-       rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
-                       "attempting to determine the number of PCRS",
-                       sizeof(cap.num_pcrs));
-       if (rc)
+       if (tpm_try_get_ops(chip))
                return 0;
 
+       if (tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
+                      "attempting to determine the number of PCRS",
+                      sizeof(cap.num_pcrs))) {
+               tpm_put_ops(chip);
+               return 0;
+       }
+
        num_pcrs = be32_to_cpu(cap.num_pcrs);
        for (i = 0; i < num_pcrs; i++) {
                rc = tpm_pcr_read_dev(chip, i, digest);
@@ -122,6 +126,7 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
                        str += sprintf(str, "%02X ", digest[j]);
                str += sprintf(str, "\n");
        }
+       tpm_put_ops(chip);
        return str - buf;
 }
 static DEVICE_ATTR_RO(pcrs);
@@ -129,16 +134,21 @@ static DEVICE_ATTR_RO(pcrs);
 static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
                     char *buf)
 {
+       struct tpm_chip *chip = to_tpm_chip(dev);
+       ssize_t rc = 0;
        cap_t cap;
-       ssize_t rc;
 
-       rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
-                       "attempting to determine the permanent enabled state",
-                       sizeof(cap.perm_flags));
-       if (rc)
+       if (tpm_try_get_ops(chip))
                return 0;
 
+       if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
+                      "attempting to determine the permanent enabled state",
+                      sizeof(cap.perm_flags)))
+               goto out_ops;
+
        rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
+out_ops:
+       tpm_put_ops(chip);
        return rc;
 }
 static DEVICE_ATTR_RO(enabled);
@@ -146,16 +156,21 @@ static DEVICE_ATTR_RO(enabled);
 static ssize_t active_show(struct device *dev, struct device_attribute *attr,
                    char *buf)
 {
+       struct tpm_chip *chip = to_tpm_chip(dev);
+       ssize_t rc = 0;
        cap_t cap;
-       ssize_t rc;
 
-       rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
-                       "attempting to determine the permanent active state",
-                       sizeof(cap.perm_flags));
-       if (rc)
+       if (tpm_try_get_ops(chip))
                return 0;
 
+       if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
+                      "attempting to determine the permanent active state",
+                      sizeof(cap.perm_flags)))
+               goto out_ops;
+
        rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
+out_ops:
+       tpm_put_ops(chip);
        return rc;
 }
 static DEVICE_ATTR_RO(active);
@@ -163,16 +178,21 @@ static DEVICE_ATTR_RO(active);
 static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
                          char *buf)
 {
+       struct tpm_chip *chip = to_tpm_chip(dev);
+       ssize_t rc = 0;
        cap_t cap;
-       ssize_t rc;
 
-       rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
-                       "attempting to determine the owner state",
-                       sizeof(cap.owned));
-       if (rc)
+       if (tpm_try_get_ops(chip))
                return 0;
 
+       if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
+                      "attempting to determine the owner state",
+                      sizeof(cap.owned)))
+               goto out_ops;
+
        rc = sprintf(buf, "%d\n", cap.owned);
+out_ops:
+       tpm_put_ops(chip);
        return rc;
 }
 static DEVICE_ATTR_RO(owned);
@@ -180,16 +200,21 @@ static DEVICE_ATTR_RO(owned);
 static ssize_t temp_deactivated_show(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
+       struct tpm_chip *chip = to_tpm_chip(dev);
+       ssize_t rc = 0;
        cap_t cap;
-       ssize_t rc;
 
-       rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
-                       "attempting to determine the temporary state",
-                       sizeof(cap.stclear_flags));
-       if (rc)
+       if (tpm_try_get_ops(chip))
                return 0;
 
+       if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
+                      "attempting to determine the temporary state",
+                      sizeof(cap.stclear_flags)))
+               goto out_ops;
+
        rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
+out_ops:
+       tpm_put_ops(chip);
        return rc;
 }
 static DEVICE_ATTR_RO(temp_deactivated);
@@ -198,15 +223,18 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
        struct tpm_chip *chip = to_tpm_chip(dev);
-       cap_t cap;
-       ssize_t rc;
+       ssize_t rc = 0;
        char *str = buf;
+       cap_t cap;
 
-       rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
-                       "attempting to determine the manufacturer",
-                       sizeof(cap.manufacturer_id));
-       if (rc)
+       if (tpm_try_get_ops(chip))
                return 0;
+
+       if (tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
+                      "attempting to determine the manufacturer",
+                      sizeof(cap.manufacturer_id)))
+               goto out_ops;
+
        str += sprintf(str, "Manufacturer: 0x%x\n",
                       be32_to_cpu(cap.manufacturer_id));
 
@@ -223,20 +251,22 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
                               cap.tpm_version_1_2.revMinor);
        } else {
                /* Otherwise just use TPM_STRUCT_VER */
-               rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
-                               "attempting to determine the 1.1 version",
-                               sizeof(cap.tpm_version));
-               if (rc)
-                       return 0;
+               if (tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+                              "attempting to determine the 1.1 version",
+                              sizeof(cap.tpm_version)))
+                       goto out_ops;
+
                str += sprintf(str,
                               "TCG version: %d.%d\nFirmware version: %d.%d\n",
                               cap.tpm_version.Major,
                               cap.tpm_version.Minor,
                               cap.tpm_version.revMajor,
                               cap.tpm_version.revMinor);
-       }
-
-       return str - buf;
+}
+       rc = str - buf;
+out_ops:
+       tpm_put_ops(chip);
+       return rc;
 }
 static DEVICE_ATTR_RO(caps);
 
@@ -244,10 +274,12 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
                            const char *buf, size_t count)
 {
        struct tpm_chip *chip = to_tpm_chip(dev);
-       if (chip == NULL)
+
+       if (tpm_try_get_ops(chip))
                return 0;
 
        chip->ops->cancel(chip);
+       tpm_put_ops(chip);
        return count;
 }
 static DEVICE_ATTR_WO(cancel);
index 977fd42daa1b1fcadc81d7338d21c8919d88d5e6..3b4e9672ff6cdb622fd063021c146975120f8fb8 100644 (file)
@@ -26,8 +26,7 @@
 #include <linux/wait.h>
 #include "tpm.h"
 
-/* max. buffer size supported by our TPM */
-#define TPM_BUFSIZE 1260
+#define TPM_I2C_INFINEON_BUFSIZE 1260
 
 /* max. number of iterations after I2C NAK */
 #define MAX_COUNT 3
@@ -63,11 +62,13 @@ enum i2c_chip_type {
        UNKNOWN,
 };
 
-/* Structure to store I2C TPM specific stuff */
 struct tpm_inf_dev {
        struct i2c_client *client;
        int locality;
-       u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */
+       /* In addition to the data itself, the buffer must fit the 7-bit I2C
+        * address and the direction bit.
+        */
+       u8 buf[TPM_I2C_INFINEON_BUFSIZE + 1];
        struct tpm_chip *chip;
        enum i2c_chip_type chip_type;
        unsigned int adapterlimit;
@@ -219,7 +220,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
                .buf = tpm_dev.buf
        };
 
-       if (len > TPM_BUFSIZE)
+       if (len > TPM_I2C_INFINEON_BUFSIZE)
                return -EINVAL;
 
        if (!tpm_dev.client->adapter->algo->master_xfer)
@@ -527,8 +528,8 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
        u8 retries = 0;
        u8 sts = TPM_STS_GO;
 
-       if (len > TPM_BUFSIZE)
-               return -E2BIG;  /* command is too long for our tpm, sorry */
+       if (len > TPM_I2C_INFINEON_BUFSIZE)
+               return -E2BIG;
 
        if (request_locality(chip, 0) < 0)
                return -EBUSY;
index b8defdfdf2dc6e3bac94bcf9b02d036d5d9cba7d..2803080097841ed96f1b7f3dbf177c060517d18d 100644 (file)
 #include "tpm.h"
 
 /* I2C interface offsets */
-#define TPM_STS                0x00
-#define TPM_BURST_COUNT        0x01
-#define TPM_DATA_FIFO_W        0x20
-#define TPM_DATA_FIFO_R        0x40
-#define TPM_VID_DID_RID        0x60
-/* TPM command header size */
-#define TPM_HEADER_SIZE        10
-#define TPM_RETRY      5
+#define TPM_STS                        0x00
+#define TPM_BURST_COUNT                0x01
+#define TPM_DATA_FIFO_W                0x20
+#define TPM_DATA_FIFO_R                0x40
+#define TPM_VID_DID_RID                0x60
+#define TPM_I2C_RETRIES                5
 /*
  * I2C bus device maximum buffer size w/o counting I2C address or command
  * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data
@@ -292,7 +290,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
                dev_err(dev, "%s() count < header size\n", __func__);
                return -EIO;
        }
-       for (retries = 0; retries < TPM_RETRY; retries++) {
+       for (retries = 0; retries < TPM_I2C_RETRIES; retries++) {
                if (retries > 0) {
                        /* if this is not the first trial, set responseRetry */
                        i2c_nuvoton_write_status(client,
index 61c1071b5180a8168cff7382e31f1d81ee536b49..e9be34b17f3f565e370b8e44230bc2fb4eebcd5e 100644 (file)
@@ -67,16 +67,17 @@ int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks)
        struct clk_hw *hw;
 
        for (i = 0; i < hw_clks->num; i++) {
+               const char *name;
 
                hw = hw_clks->hws[i];
-
                if (IS_ERR_OR_NULL(hw))
                        continue;
 
+               name = hw->init->name;
                ret = devm_clk_hw_register(dev, hw);
                if (ret) {
                        dev_err(dev, "Couldn't register clock %d - %s\n",
-                               i, hw->init->name);
+                               i, name);
                        return ret;
                }
        }
index 33481368740e7dc038fe1e32065e2659d8cec730..113152425a95dc102d1affc016706a6866aeefd8 100644 (file)
@@ -153,6 +153,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
                        continue;
 
                div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
+               if (div > GENERATED_MAX_DIV + 1)
+                       div = GENERATED_MAX_DIV + 1;
 
                clk_generated_best_diff(req, parent, parent_rate, div,
                                        &best_diff, &best_rate);
index c813c27f2e58c6f8e62c523a1a9a8b1d212a96d8..2f97a843d6d6bcbd7a6fe7f90849dabfbbe54db6 100644 (file)
 
 #define MOR_KEY_MASK           (0xff << 16)
 
+#define clk_main_parent_select(s)      (((s) & \
+                                       (AT91_PMC_MOSCEN | \
+                                       AT91_PMC_OSCBYPASS)) ? 1 : 0)
+
 struct clk_main_osc {
        struct clk_hw hw;
        struct regmap *regmap;
@@ -119,7 +123,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
 
        regmap_read(regmap, AT91_PMC_SR, &status);
 
-       return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
+       return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
 }
 
 static const struct clk_ops main_osc_ops = {
@@ -530,7 +534,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
 
        regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
 
-       return status & AT91_PMC_MOSCEN ? 1 : 0;
+       return clk_main_parent_select(status);
 }
 
 static const struct clk_ops sam9x5_main_ops = {
@@ -572,7 +576,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
        clkmain->hw.init = &init;
        clkmain->regmap = regmap;
        regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
-       clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
+       clkmain->parent = clk_main_parent_select(status);
 
        hw = &clkmain->hw;
        ret = clk_hw_register(NULL, &clkmain->hw);
index 3a1812f65e5d823242d4428672736ce04f865a33..8abc5c8cb8b8c2535bc355f9c03ed7c9cd234b57 100644 (file)
@@ -610,7 +610,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
                .guts_compat = "fsl,qoriq-device-config-1.0",
                .init_periph = p5020_init_periph,
                .cmux_groups = {
-                       &p2041_cmux_grp1, &p2041_cmux_grp2
+                       &p5020_cmux_grp1, &p5020_cmux_grp2
                },
                .cmux_to_group = {
                        0, 1, -1
index 0934d3724495a50ede3d9873eb7c1c2c45ddb3cd..4080d4e78e8e404b0af645619442508992c4e06f 100644 (file)
@@ -255,7 +255,7 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
  * This requires of_device_id table.  In the same time this will not change the
  * actual *device* matching so do not add .of_match_table.
  */
-static const struct of_device_id s2mps11_dt_match[] = {
+static const struct of_device_id s2mps11_dt_match[] __used = {
        {
                .compatible = "samsung,s2mps11-clk",
                .data = (void *)S2MPS11X,
index 3bf11a6200942f8ac8a3c89f3a04c1c85510856e..ada3e4aeb38f96d039dac687a44bd80bf8e28d2b 100644 (file)
@@ -647,7 +647,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
                .name = "gcc_sdcc2_apps_clk_src",
                .parent_names = gcc_parent_names_10,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_floor_ops,
        },
 };
 
@@ -671,7 +671,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
                .name = "gcc_sdcc4_apps_clk_src",
                .parent_names = gcc_parent_names_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_floor_ops,
        },
 };
 
index e82adcb16a52a3b790c34fc2a49079615a7bf907..45d94fb9703d27e818a8fb25ef869aa8d74eccab 100644 (file)
@@ -341,7 +341,8 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
                return;
 
        pd->name = np->name;
-       pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+       pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
+                   GENPD_FLAG_ACTIVE_WAKEUP;
        pd->attach_dev = cpg_mstp_attach_dev;
        pd->detach_dev = cpg_mstp_detach_dev;
        pm_genpd_init(pd, &pm_domain_always_on_gov, false);
index f4b013e9352d9efca6260c5ca763e8fcc53eb6f5..d7a2ad6173694c2f3dfaf54812356da11299c78e 100644 (file)
@@ -514,7 +514,8 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
 
        genpd = &pd->genpd;
        genpd->name = np->name;
-       genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+       genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
+                      GENPD_FLAG_ACTIVE_WAKEUP;
        genpd->attach_dev = cpg_mssr_attach_dev;
        genpd->detach_dev = cpg_mssr_detach_dev;
        pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
@@ -535,17 +536,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
        unsigned int reg = id / 32;
        unsigned int bit = id % 32;
        u32 bitmask = BIT(bit);
-       unsigned long flags;
-       u32 value;
 
        dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
 
        /* Reset module */
-       spin_lock_irqsave(&priv->rmw_lock, flags);
-       value = readl(priv->base + SRCR(reg));
-       value |= bitmask;
-       writel(value, priv->base + SRCR(reg));
-       spin_unlock_irqrestore(&priv->rmw_lock, flags);
+       writel(bitmask, priv->base + SRCR(reg));
 
        /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
        udelay(35);
@@ -562,16 +557,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
        unsigned int reg = id / 32;
        unsigned int bit = id % 32;
        u32 bitmask = BIT(bit);
-       unsigned long flags;
-       u32 value;
 
        dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
 
-       spin_lock_irqsave(&priv->rmw_lock, flags);
-       value = readl(priv->base + SRCR(reg));
-       value |= bitmask;
-       writel(value, priv->base + SRCR(reg));
-       spin_unlock_irqrestore(&priv->rmw_lock, flags);
+       writel(bitmask, priv->base + SRCR(reg));
        return 0;
 }
 
index 026a26bb702d9b9f6ab53a6ffaa8864149dfc01b..dbec84238ecdc7936ddc0c098dcb174a5d70e65a 100644 (file)
@@ -61,10 +61,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
        u32 delay_num = 0;
 
        /* See the comment for rockchip_mmc_set_phase below */
-       if (!rate) {
-               pr_err("%s: invalid clk rate\n", __func__);
+       if (!rate)
                return -EINVAL;
-       }
 
        raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
 
index d8f9efa5129adf4d7e9731ba1dd7967f71d9c946..25351d6a55ba24bc2dbf357d6649b800c33db01e 100644 (file)
@@ -298,9 +298,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw)
 {
        struct clk_dmn *clk = to_dmnclk(hw);
        u32 cfg = clkc_readl(clk->regofs);
+       const char *name = clk_hw_get_name(hw);
 
        /* parent of io domain can only be pll3 */
-       if (strcmp(hw->init->name, "io") == 0)
+       if (strcmp(name, "io") == 0)
                return 4;
 
        WARN_ON((cfg & (BIT(3) - 1)) > 4);
@@ -312,9 +313,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent)
 {
        struct clk_dmn *clk = to_dmnclk(hw);
        u32 cfg = clkc_readl(clk->regofs);
+       const char *name = clk_hw_get_name(hw);
 
        /* parent of io domain can only be pll3 */
-       if (strcmp(hw->init->name, "io") == 0)
+       if (strcmp(name, "io") == 0)
                return -EINVAL;
 
        cfg &= ~(BIT(3) - 1);
@@ -354,7 +356,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate,
 {
        unsigned long fin;
        unsigned ratio, wait, hold;
-       unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
+       const char *name = clk_hw_get_name(hw);
+       unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
 
        fin = *parent_rate;
        ratio = fin / rate;
@@ -376,7 +379,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
        struct clk_dmn *clk = to_dmnclk(hw);
        unsigned long fin;
        unsigned ratio, wait, hold, reg;
-       unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
+       const char *name = clk_hw_get_name(hw);
+       unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
 
        fin = parent_rate;
        ratio = fin / rate;
index 568f59b58ddfa94852462ed8fd3531d48fff151c..e7c877d354c7bff5777fec95eef40a48b3879188 100644 (file)
@@ -37,7 +37,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
        if (socfpgaclk->fixed_div) {
                div = socfpgaclk->fixed_div;
        } else {
-               if (!socfpgaclk->bypass_reg)
+               if (socfpgaclk->hw.reg)
                        div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
        }
 
index 87892471eb96c3549ced203976eee9cbf7cfbb43..bad8099832d4806cd1a300549b2df66efc1df6f8 100644 (file)
@@ -2,6 +2,7 @@ config SPRD_COMMON_CLK
        tristate "Clock support for Spreadtrum SoCs"
        depends on ARCH_SPRD || COMPILE_TEST
        default ARCH_SPRD
+       select REGMAP_MMIO
 
 if SPRD_COMMON_CLK
 
index e038b044720611ab5a30382a3bc7772e2649fbe8..8bdab1c3013b82859df8a0391b54c1f61bf607fa 100644 (file)
@@ -71,16 +71,17 @@ int sprd_clk_probe(struct device *dev, struct clk_hw_onecell_data *clkhw)
        struct clk_hw *hw;
 
        for (i = 0; i < clkhw->num; i++) {
+               const char *name;
 
                hw = clkhw->hws[i];
-
                if (!hw)
                        continue;
 
+               name = hw->init->name;
                ret = devm_clk_hw_register(dev, hw);
                if (ret) {
                        dev_err(dev, "Couldn't register clock %d - %s\n",
-                               i, hw->init->name);
+                               i, name);
                        return ret;
                }
        }
index 36b4402bf09e363618ff3453e43ba6a02158cc73..640270f51aa56ce413c5528a9854ad4919005f86 100644 (file)
@@ -136,6 +136,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
                                         k2 + refin * nint * CLK_PLL_1M;
        }
 
+       kfree(cfg);
        return rate;
 }
 
@@ -222,6 +223,7 @@ static int _sprd_pll_set_rate(const struct sprd_pll *pll,
        if (!ret)
                udelay(pll->udelay);
 
+       kfree(cfg);
        return ret;
 }
 
index 9980ab55271ba2f71f7ad09ba04d2bbd23468eb7..f76305b4bc8df9488bf099a3fa2e5e2d7f09a56d 100644 (file)
@@ -2023,6 +2023,7 @@ static int sc9860_clk_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match;
        const struct sprd_clk_desc *desc;
+       int ret;
 
        match = of_match_node(sprd_sc9860_clk_ids, pdev->dev.of_node);
        if (!match) {
@@ -2031,7 +2032,9 @@ static int sc9860_clk_probe(struct platform_device *pdev)
        }
 
        desc = match->data;
-       sprd_clk_regmap_init(pdev, desc);
+       ret = sprd_clk_regmap_init(pdev, desc);
+       if (ret)
+               return ret;
 
        return sprd_clk_probe(&pdev->dev, desc->hw_clks);
 }
index ac12f261f8caa3f76d0b8407be287506ed7ff7cc..9e3f4088724b430f111c2996e8d5e51cf368877b 100644 (file)
@@ -499,6 +499,9 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
                [CLK_MMC1]              = &mmc1_clk.common.hw,
                [CLK_MMC1_SAMPLE]       = &mmc1_sample_clk.common.hw,
                [CLK_MMC1_OUTPUT]       = &mmc1_output_clk.common.hw,
+               [CLK_MMC2]              = &mmc2_clk.common.hw,
+               [CLK_MMC2_SAMPLE]       = &mmc2_sample_clk.common.hw,
+               [CLK_MMC2_OUTPUT]       = &mmc2_output_clk.common.hw,
                [CLK_CE]                = &ce_clk.common.hw,
                [CLK_SPI0]              = &spi0_clk.common.hw,
                [CLK_USB_PHY0]          = &usb_phy0_clk.common.hw,
index 92d04ce2dee6b7e4e5131a1f9052bbee7d5de134..53cdc0ec40f33a752aef895f8157a4f65258ed1d 100644 (file)
@@ -55,7 +55,7 @@ const struct clk_ops tegra_clk_sync_source_ops = {
 };
 
 struct clk *tegra_clk_register_sync_source(const char *name,
-               unsigned long rate, unsigned long max_rate)
+                                          unsigned long max_rate)
 {
        struct tegra_clk_sync_source *sync;
        struct clk_init_data init;
@@ -67,7 +67,6 @@ struct clk *tegra_clk_register_sync_source(const char *name,
                return ERR_PTR(-ENOMEM);
        }
 
-       sync->rate = rate;
        sync->max_rate = max_rate;
 
        init.ops = &tegra_clk_sync_source_ops;
index b37cae7af26da031c01a6fb584802bb4640ba099..02dd6487d855d001083579b9c7841383bd8e380b 100644 (file)
@@ -49,8 +49,6 @@ struct tegra_sync_source_initdata {
 #define SYNC(_name) \
        {\
                .name           = #_name,\
-               .rate           = 24000000,\
-               .max_rate       = 24000000,\
                .clk_id         = tegra_clk_ ## _name,\
        }
 
@@ -176,7 +174,7 @@ static void __init tegra_audio_sync_clk_init(void __iomem *clk_base,
 void __init tegra_audio_clk_init(void __iomem *clk_base,
                        void __iomem *pmc_base, struct tegra_clk *tegra_clks,
                        struct tegra_audio_clk_info *audio_info,
-                       unsigned int num_plls)
+                       unsigned int num_plls, unsigned long sync_max_rate)
 {
        struct clk *clk;
        struct clk **dt_clk;
@@ -221,8 +219,7 @@ void __init tegra_audio_clk_init(void __iomem *clk_base,
                if (!dt_clk)
                        continue;
 
-               clk = tegra_clk_register_sync_source(data->name,
-                                       data->rate, data->max_rate);
+               clk = tegra_clk_register_sync_source(data->name, sync_max_rate);
                *dt_clk = clk;
        }
 
index 1824f014202b0351f2587a7dcc3b9e666a69b15a..625d11091330896149c051910b3f5b791117d034 100644 (file)
@@ -1190,6 +1190,13 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        { TEGRA114_CLK_XUSB_FALCON_SRC, TEGRA114_CLK_PLL_P, 204000000, 0 },
        { TEGRA114_CLK_XUSB_HOST_SRC, TEGRA114_CLK_PLL_P, 102000000, 0 },
        { TEGRA114_CLK_VDE, TEGRA114_CLK_CLK_MAX, 600000000, 0 },
+       { TEGRA114_CLK_SPDIF_IN_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA114_CLK_I2S0_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA114_CLK_I2S1_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA114_CLK_I2S2_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA114_CLK_I2S3_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA114_CLK_I2S4_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA114_CLK_VIMCLK_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
        /* must be the last entry */
        { TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0 },
 };
@@ -1362,7 +1369,7 @@ static void __init tegra114_clock_init(struct device_node *np)
        tegra114_periph_clk_init(clk_base, pmc_base);
        tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks,
                             tegra114_audio_plls,
-                            ARRAY_SIZE(tegra114_audio_plls));
+                            ARRAY_SIZE(tegra114_audio_plls), 24000000);
        tegra_pmc_clk_init(pmc_base, tegra114_clks);
        tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
                                        &pll_x_params);
index b6cf28ca2ed291174e7a24576f89afe90638f2d6..df0018f7bf7ed8668d04a6ab5ef47994b4ef3ac3 100644 (file)
@@ -1291,6 +1291,13 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
        { TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1 },
        { TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0 },
        { TEGRA124_CLK_VIC03, TEGRA124_CLK_PLL_C3, 0, 0 },
+       { TEGRA124_CLK_SPDIF_IN_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA124_CLK_I2S0_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA124_CLK_I2S1_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA124_CLK_I2S2_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA124_CLK_I2S3_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA124_CLK_I2S4_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA124_CLK_VIMCLK_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
        /* must be the last entry */
        { TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0 },
 };
@@ -1455,7 +1462,7 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
        tegra124_periph_clk_init(clk_base, pmc_base);
        tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks,
                             tegra124_audio_plls,
-                            ARRAY_SIZE(tegra124_audio_plls));
+                            ARRAY_SIZE(tegra124_audio_plls), 24576000);
        tegra_pmc_clk_init(pmc_base, tegra124_clks);
 
        /* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
index 9eb1cb14fce11ca5dd0ddd725102a343712d3b24..080bfa24863ee46284c9427b8937655e97d21aee 100644 (file)
@@ -2214,9 +2214,9 @@ static struct div_nmp pllu_nmp = {
 };
 
 static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
-       { 12000000, 480000000, 40, 1, 0, 0 },
-       { 13000000, 480000000, 36, 1, 0, 0 }, /* actual: 468.0 MHz */
-       { 38400000, 480000000, 25, 2, 0, 0 },
+       { 12000000, 480000000, 40, 1, 1, 0 },
+       { 13000000, 480000000, 36, 1, 1, 0 }, /* actual: 468.0 MHz */
+       { 38400000, 480000000, 25, 2, 1, 0 },
        {        0,         0,  0, 0, 0, 0 },
 };
 
@@ -3343,6 +3343,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
        { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
        { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
+       { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
        { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
        { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
        { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
@@ -3367,8 +3368,16 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        { TEGRA210_CLK_PLL_DP, TEGRA210_CLK_CLK_MAX, 270000000, 0 },
        { TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 },
        { TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 },
-       { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
        { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
+       { TEGRA210_CLK_SPDIF_IN_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA210_CLK_I2S0_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA210_CLK_I2S1_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA210_CLK_I2S2_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+       { TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 },
+       { TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 },
        /* This MUST be the last entry. */
        { TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
 };
@@ -3562,7 +3571,7 @@ static void __init tegra210_clock_init(struct device_node *np)
        tegra210_periph_clk_init(clk_base, pmc_base);
        tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks,
                             tegra210_audio_plls,
-                            ARRAY_SIZE(tegra210_audio_plls));
+                            ARRAY_SIZE(tegra210_audio_plls), 24576000);
        tegra_pmc_clk_init(pmc_base, tegra210_clks);
 
        /* For Tegra210, PLLD is the only source for DSIA & DSIB */
index acfe661b2ae7249db0c130e0897e1b6fea164d67..e0aaecd98fbff1635c2ee78f877ad84df734eed5 100644 (file)
@@ -1267,6 +1267,13 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        { TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
        { TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
        { TEGRA30_CLK_VDE, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
+       { TEGRA30_CLK_SPDIF_IN_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA30_CLK_I2S0_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA30_CLK_I2S1_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA30_CLK_I2S2_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
        /* must be the last entry */
        { TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
 };
@@ -1344,7 +1351,7 @@ static void __init tegra30_clock_init(struct device_node *np)
        tegra30_periph_clk_init();
        tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks,
                             tegra30_audio_plls,
-                            ARRAY_SIZE(tegra30_audio_plls));
+                            ARRAY_SIZE(tegra30_audio_plls), 24000000);
        tegra_pmc_clk_init(pmc_base, tegra30_clks);
 
        tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
index d2c3a010f8e9b358906cc99f4eaf80b54fb1c18a..09bccbb9640c48c802dff657d166645641ebada0 100644 (file)
@@ -41,7 +41,7 @@ extern const struct clk_ops tegra_clk_sync_source_ops;
 extern int *periph_clk_enb_refcnt;
 
 struct clk *tegra_clk_register_sync_source(const char *name,
-               unsigned long fixed_rate, unsigned long max_rate);
+                                          unsigned long max_rate);
 
 /**
  * struct tegra_clk_frac_div - fractional divider clock
@@ -796,7 +796,7 @@ void tegra_register_devclks(struct tegra_devclk *dev_clks, int num);
 void tegra_audio_clk_init(void __iomem *clk_base,
                        void __iomem *pmc_base, struct tegra_clk *tegra_clks,
                        struct tegra_audio_clk_info *audio_info,
-                       unsigned int num_plls);
+                       unsigned int num_plls, unsigned long sync_max_rate);
 
 void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base,
                        struct tegra_clk *tegra_clks,
index 7bb9afbe4058974112ac7a025a136777d67e414e..9bd2e50e4359994f0a40b0612a37c18600ee600f 100644 (file)
@@ -47,6 +47,9 @@ int omap2_clk_deny_idle(struct clk *clk)
 {
        struct clk_hw_omap *c;
 
+       if (!clk)
+               return -EINVAL;
+
        c = to_clk_hw_omap(__clk_get_hw(clk));
        if (c->ops && c->ops->deny_idle)
                c->ops->deny_idle(c);
@@ -63,6 +66,9 @@ int omap2_clk_allow_idle(struct clk *clk)
 {
        struct clk_hw_omap *c;
 
+       if (!clk)
+               return -EINVAL;
+
        c = to_clk_hw_omap(__clk_get_hw(clk));
        if (c->ops && c->ops->allow_idle)
                c->ops->allow_idle(c);
index 14881547043130d1e686055387a6276e49fd11f9..beb672a215b6cca9dbc8f1c58dd7bf1bbc57864b 100644 (file)
@@ -174,7 +174,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
        struct clk_init_data init = { NULL };
        const char **parent_names = NULL;
        struct clk *clk;
-       int ret;
 
        clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
        if (!clk_hw) {
@@ -207,11 +206,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
        clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
 
        if (!IS_ERR(clk)) {
-               ret = ti_clk_add_alias(NULL, clk, node->name);
-               if (ret) {
-                       clk_unregister(clk);
-                       goto cleanup;
-               }
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
                kfree(parent_names);
                return;
index e31ac136c7653f987ea44f4c3c6b33046a079a47..6051a2267dbed129577aedceca1efcd6401da791 100644 (file)
@@ -229,6 +229,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
 {
        struct omap_clkctrl_provider *provider = data;
        struct omap_clkctrl_clk *entry;
+       bool found = false;
 
        if (clkspec->args_count != 2)
                return ERR_PTR(-EINVAL);
@@ -238,11 +239,13 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
 
        list_for_each_entry(entry, &provider->clocks, node) {
                if (entry->reg_offset == clkspec->args[0] &&
-                   entry->bit_offset == clkspec->args[1])
+                   entry->bit_offset == clkspec->args[1]) {
+                       found = true;
                        break;
+               }
        }
 
-       if (!entry)
+       if (!found)
                return ERR_PTR(-EINVAL);
 
        return entry->clk;
index 07a805125e98cff774ed864f40f25032b49c8822..11d92311e162f7818185a36b20ca4b0d11c79c14 100644 (file)
@@ -146,10 +146,12 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
                if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) {
                        pr_warn("can't setup clkdm for basic clk %s\n",
                                __clk_get_name(clk));
+                       clk_put(clk);
                        continue;
                }
                to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
                omap2_init_clk_clkdm(clk_hw);
+               clk_put(clk);
        }
 }
 
index 354dd508c51692b540255edaa3edde19cf56d506..8dfb8523b79db137a2af81784de34b2a62f7f330 100644 (file)
@@ -567,6 +567,7 @@ static int __init top_clocks_init(struct device_node *np)
 {
        void __iomem *reg_base;
        int i, ret;
+       const char *name;
 
        reg_base = of_iomap(np, 0);
        if (!reg_base) {
@@ -576,11 +577,10 @@ static int __init top_clocks_init(struct device_node *np)
 
        for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) {
                zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base;
+               name = zx296718_pll_clk[i].hw.init->name;
                ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw);
-               if (ret) {
-                       pr_warn("top clk %s init error!\n",
-                               zx296718_pll_clk[i].hw.init->name);
-               }
+               if (ret)
+                       pr_warn("top clk %s init error!\n", name);
        }
 
        for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) {
@@ -588,11 +588,10 @@ static int __init top_clocks_init(struct device_node *np)
                        top_hw_onecell_data.hws[top_ffactor_clk[i].id] =
                                        &top_ffactor_clk[i].factor.hw;
 
+               name = top_ffactor_clk[i].factor.hw.init->name;
                ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw);
-               if (ret) {
-                       pr_warn("top clk %s init error!\n",
-                               top_ffactor_clk[i].factor.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("top clk %s init error!\n", name);
        }
 
        for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) {
@@ -601,11 +600,10 @@ static int __init top_clocks_init(struct device_node *np)
                                        &top_mux_clk[i].mux.hw;
 
                top_mux_clk[i].mux.reg += (uintptr_t)reg_base;
+               name = top_mux_clk[i].mux.hw.init->name;
                ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw);
-               if (ret) {
-                       pr_warn("top clk %s init error!\n",
-                               top_mux_clk[i].mux.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("top clk %s init error!\n", name);
        }
 
        for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) {
@@ -614,11 +612,10 @@ static int __init top_clocks_init(struct device_node *np)
                                        &top_gate_clk[i].gate.hw;
 
                top_gate_clk[i].gate.reg += (uintptr_t)reg_base;
+               name = top_gate_clk[i].gate.hw.init->name;
                ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw);
-               if (ret) {
-                       pr_warn("top clk %s init error!\n",
-                               top_gate_clk[i].gate.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("top clk %s init error!\n", name);
        }
 
        for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) {
@@ -627,11 +624,10 @@ static int __init top_clocks_init(struct device_node *np)
                                        &top_div_clk[i].div.hw;
 
                top_div_clk[i].div.reg += (uintptr_t)reg_base;
+               name = top_div_clk[i].div.hw.init->name;
                ret = clk_hw_register(NULL, &top_div_clk[i].div.hw);
-               if (ret) {
-                       pr_warn("top clk %s init error!\n",
-                               top_div_clk[i].div.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("top clk %s init error!\n", name);
        }
 
        ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
@@ -757,6 +753,7 @@ static int __init lsp0_clocks_init(struct device_node *np)
 {
        void __iomem *reg_base;
        int i, ret;
+       const char *name;
 
        reg_base = of_iomap(np, 0);
        if (!reg_base) {
@@ -770,11 +767,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
                                        &lsp0_mux_clk[i].mux.hw;
 
                lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base;
+               name = lsp0_mux_clk[i].mux.hw.init->name;
                ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw);
-               if (ret) {
-                       pr_warn("lsp0 clk %s init error!\n",
-                               lsp0_mux_clk[i].mux.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("lsp0 clk %s init error!\n", name);
        }
 
        for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) {
@@ -783,11 +779,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
                                        &lsp0_gate_clk[i].gate.hw;
 
                lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base;
+               name = lsp0_gate_clk[i].gate.hw.init->name;
                ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw);
-               if (ret) {
-                       pr_warn("lsp0 clk %s init error!\n",
-                               lsp0_gate_clk[i].gate.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("lsp0 clk %s init error!\n", name);
        }
 
        for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) {
@@ -796,11 +791,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
                                        &lsp0_div_clk[i].div.hw;
 
                lsp0_div_clk[i].div.reg += (uintptr_t)reg_base;
+               name = lsp0_div_clk[i].div.hw.init->name;
                ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw);
-               if (ret) {
-                       pr_warn("lsp0 clk %s init error!\n",
-                               lsp0_div_clk[i].div.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("lsp0 clk %s init error!\n", name);
        }
 
        ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
@@ -865,6 +859,7 @@ static int __init lsp1_clocks_init(struct device_node *np)
 {
        void __iomem *reg_base;
        int i, ret;
+       const char *name;
 
        reg_base = of_iomap(np, 0);
        if (!reg_base) {
@@ -878,11 +873,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
                                        &lsp0_mux_clk[i].mux.hw;
 
                lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base;
+               name = lsp1_mux_clk[i].mux.hw.init->name;
                ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw);
-               if (ret) {
-                       pr_warn("lsp1 clk %s init error!\n",
-                               lsp1_mux_clk[i].mux.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("lsp1 clk %s init error!\n", name);
        }
 
        for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) {
@@ -891,11 +885,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
                                        &lsp1_gate_clk[i].gate.hw;
 
                lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base;
+               name = lsp1_gate_clk[i].gate.hw.init->name;
                ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw);
-               if (ret) {
-                       pr_warn("lsp1 clk %s init error!\n",
-                               lsp1_gate_clk[i].gate.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("lsp1 clk %s init error!\n", name);
        }
 
        for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) {
@@ -904,11 +897,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
                                        &lsp1_div_clk[i].div.hw;
 
                lsp1_div_clk[i].div.reg += (uintptr_t)reg_base;
+               name = lsp1_div_clk[i].div.hw.init->name;
                ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw);
-               if (ret) {
-                       pr_warn("lsp1 clk %s init error!\n",
-                               lsp1_div_clk[i].div.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("lsp1 clk %s init error!\n", name);
        }
 
        ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
@@ -982,6 +974,7 @@ static int __init audio_clocks_init(struct device_node *np)
 {
        void __iomem *reg_base;
        int i, ret;
+       const char *name;
 
        reg_base = of_iomap(np, 0);
        if (!reg_base) {
@@ -995,11 +988,10 @@ static int __init audio_clocks_init(struct device_node *np)
                                        &audio_mux_clk[i].mux.hw;
 
                audio_mux_clk[i].mux.reg += (uintptr_t)reg_base;
+               name = audio_mux_clk[i].mux.hw.init->name;
                ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw);
-               if (ret) {
-                       pr_warn("audio clk %s init error!\n",
-                               audio_mux_clk[i].mux.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("audio clk %s init error!\n", name);
        }
 
        for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) {
@@ -1008,11 +1000,10 @@ static int __init audio_clocks_init(struct device_node *np)
                                        &audio_adiv_clk[i].hw;
 
                audio_adiv_clk[i].reg_base += (uintptr_t)reg_base;
+               name = audio_adiv_clk[i].hw.init->name;
                ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw);
-               if (ret) {
-                       pr_warn("audio clk %s init error!\n",
-                               audio_adiv_clk[i].hw.init->name);
-               }
+               if (ret)
+                       pr_warn("audio clk %s init error!\n", name);
        }
 
        for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) {
@@ -1021,11 +1012,10 @@ static int __init audio_clocks_init(struct device_node *np)
                                        &audio_div_clk[i].div.hw;
 
                audio_div_clk[i].div.reg += (uintptr_t)reg_base;
+               name = audio_div_clk[i].div.hw.init->name;
                ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw);
-               if (ret) {
-                       pr_warn("audio clk %s init error!\n",
-                               audio_div_clk[i].div.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("audio clk %s init error!\n", name);
        }
 
        for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) {
@@ -1034,11 +1024,10 @@ static int __init audio_clocks_init(struct device_node *np)
                                        &audio_gate_clk[i].gate.hw;
 
                audio_gate_clk[i].gate.reg += (uintptr_t)reg_base;
+               name = audio_gate_clk[i].gate.hw.init->name;
                ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw);
-               if (ret) {
-                       pr_warn("audio clk %s init error!\n",
-                               audio_gate_clk[i].gate.hw.init->name);
-               }
+               if (ret)
+                       pr_warn("audio clk %s init error!\n", name);
        }
 
        ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
index d55c30f6981dcc377b7ad6a2bb76b2b37a182ce1..aaf5bfa9bd9c915efdca1b2ac5fba58d86199a01 100644 (file)
@@ -211,7 +211,7 @@ static void exynos4_frc_resume(struct clocksource *cs)
 
 static struct clocksource mct_frc = {
        .name           = "mct-frc",
-       .rating         = 400,
+       .rating         = 450,  /* use value higher than ARM arch timer */
        .read           = exynos4_frc_read,
        .mask           = CLOCKSOURCE_MASK(32),
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -466,7 +466,7 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
        evt->set_state_oneshot_stopped = set_state_shutdown;
        evt->tick_resume = set_state_shutdown;
        evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
-       evt->rating = 450;
+       evt->rating = 500;      /* use value higher than ARM arch timer */
 
        exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
 
index d7adbc102649619bef817a522e35934d2612a111..b2b7613f7602aa3bc91a36c132c8cc17bdb07e0a 100644 (file)
@@ -94,6 +94,13 @@ static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
 
 static void omap_timer_restore_context(struct omap_dm_timer *timer)
 {
+       /*
+        * Do not restore the context during late attach. Kernel data
+        * structure is not in sync with the register settings of the timer.
+        */
+       if (timer->late_attach)
+               return;
+
        omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
                                timer->context.twer);
        omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
@@ -194,6 +201,20 @@ static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
        return ret;
 }
 
+static int omap_dm_timer_is_enabled(struct omap_dm_timer *timer)
+{
+       u32 val;
+
+       val = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+
+       /* Check if timer ST bit is set or the Counter register is loaded */
+       if (val & OMAP_TIMER_CTRL_ST ||
+           omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG))
+               return 1;
+       else
+               return 0;
+}
+
 static void omap_dm_timer_enable(struct omap_dm_timer *timer)
 {
        int c;
@@ -247,6 +268,14 @@ static int omap_dm_timer_prepare(struct omap_dm_timer *timer)
        __omap_dm_timer_enable_posted(timer);
        omap_dm_timer_disable(timer);
 
+       /*
+        * During late attach, do not set the timer source during prepare
+        * as the timer might be clocked from a different source. It will
+        * be set properly from remoteproc.
+        */
+       if (timer->late_attach)
+               return 0;
+
        rc = omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
 
        return rc;
@@ -503,6 +532,16 @@ static int omap_dm_timer_start(struct omap_dm_timer *timer)
 
        /* Save the context */
        timer->context.tclr = l;
+
+       /*
+        * Now that timer has been started, call pm_runtime_put_noidle to
+        * balance the pm_runtime device usage count to the proper value as
+        * the regular case, and reset the late_attach flag.
+        */
+       if (timer->late_attach)
+               pm_runtime_put_noidle(&timer->pdev->dev);
+       timer->late_attach = 0;
+
        return 0;
 }
 
@@ -543,10 +582,18 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
                l |= OMAP_TIMER_CTRL_AR;
        else
                l &= ~OMAP_TIMER_CTRL_AR;
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
 
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
+       /*
+        * If late attach is enabled, do not modify the dmtimer registers.
+        * The registers would have been configured already.
+        */
+       if (!timer->late_attach) {
+               omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+               omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
+
+               omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
+       }
+
        /* Save the context */
        timer->context.tclr = l;
        timer->context.tldr = load;
@@ -568,13 +615,21 @@ int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
        l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
        if (autoreload) {
                l |= OMAP_TIMER_CTRL_AR;
-               omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
+               /*
+                * If late attach is enabled, do not modify the dmtimer
+                * registers. The registers would have been configured
+                * already.
+                */
+               if (!timer->late_attach)
+                       omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG,
+                                               load);
        } else {
                l &= ~OMAP_TIMER_CTRL_AR;
        }
        l |= OMAP_TIMER_CTRL_ST;
 
-       __omap_dm_timer_load_start(timer, l, load, timer->posted);
+       if (!timer->late_attach)
+               __omap_dm_timer_load_start(timer, l, load, timer->posted);
 
        /* Save the context */
        timer->context.tclr = l;
@@ -847,6 +902,16 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
                        goto err_get_sync;
                }
                __omap_dm_timer_init_regs(timer);
+
+               if (omap_dm_timer_is_enabled(timer))
+                       timer->late_attach = 1;
+               /*
+                * Increase the pm_runtime usage count and prevent kernel power
+                * management from idling or disabling the timer.
+                */
+               if (timer->late_attach)
+                       pm_runtime_get_noresume(dev);
+
                pm_runtime_put(dev);
        }
 
@@ -884,6 +949,12 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
                if (!strcmp(dev_name(&timer->pdev->dev),
                            dev_name(&pdev->dev))) {
                        list_del(&timer->node);
+                       /*
+                        * Reset device usage counter if late_attach is still
+                        * set
+                        */
+                       if (timer->late_attach)
+                               pm_runtime_put_noidle(&timer->pdev->dev);
                        ret = 0;
                        break;
                }
@@ -905,6 +976,7 @@ const static struct omap_dm_timer_ops dmtimer_ops = {
        .free = omap_dm_timer_free,
        .enable = omap_dm_timer_enable,
        .disable = omap_dm_timer_disable,
+       .is_enabled = omap_dm_timer_is_enabled,
        .get_fclk = omap_dm_timer_get_fclk,
        .start = omap_dm_timer_start,
        .stop = omap_dm_timer_stop,
index d3213594d1a7a9027f5b013b7b72f0525a53d21d..ace5ec65e36f2ccd2989dc18e63603efc93937dd 100644 (file)
@@ -2578,14 +2578,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
 }
 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
 
-/*
- * Stop cpufreq at shutdown to make sure it isn't holding any locks
- * or mutexes when secondary CPUs are halted.
- */
-static struct syscore_ops cpufreq_syscore_ops = {
-       .shutdown = cpufreq_suspend,
-};
-
 struct kobject *cpufreq_global_kobject;
 EXPORT_SYMBOL(cpufreq_global_kobject);
 
@@ -2597,8 +2589,6 @@ static int __init cpufreq_core_init(void)
        cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
        BUG_ON(!cpufreq_global_kobject);
 
-       register_syscore_ops(&cpufreq_syscore_ops);
-
        return 0;
 }
 module_param(off, int, 0444);
index c7710c149de85a57fc6fa9e784dee83a0b6cf11d..a0620c9ec064957c963f31a665a92106def6c929 100644 (file)
@@ -145,10 +145,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
        int err = -ENODEV;
 
        cpu = of_get_cpu_node(policy->cpu, NULL);
+       if (!cpu)
+               goto out;
 
+       max_freqp = of_get_property(cpu, "clock-frequency", NULL);
        of_node_put(cpu);
-       if (!cpu)
+       if (!max_freqp) {
+               err = -EINVAL;
                goto out;
+       }
+
+       /* we need the freq in kHz */
+       max_freq = *max_freqp / 1000;
 
        dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
        if (!dn)
@@ -185,16 +193,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
        }
 
        pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
-       max_freqp = of_get_property(cpu, "clock-frequency", NULL);
-       if (!max_freqp) {
-               err = -EINVAL;
-               goto out_unmap_sdcpwr;
-       }
-
-       /* we need the freq in kHz */
-       max_freq = *max_freqp / 1000;
-
        pr_debug("max clock-frequency is at %u kHz\n", max_freq);
        pr_debug("initializing frequency table\n");
 
@@ -212,9 +210,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
        return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
 
-out_unmap_sdcpwr:
-       iounmap(sdcpwr_mapbase);
-
 out_unmap_sdcasr:
        iounmap(sdcasr_mapbase);
 out:
index 98ec18200fb5c9697c9eacb24d9b1bf1d8846433..30c072cfd0fd06ad3a7bc495101d5f7250f14372 100644 (file)
@@ -769,9 +769,12 @@ config CRYPTO_DEV_SA2UL
        select CRYPTO_AES
        select CRYPTO_AES_ARM64
        select CRYPTO_SHA1
+       select CRYPTO_SHA512
+       select CRYPTO_SHA256
        select CRYPTO_MD5
        select CRYPTO_ALGAPI
        select CRYPTO_AUTHENC
+       select CRYPTO_HMAC
        select HW_RANDOM
        default m if ARCH_K3
        help
index 0c85a5123f85d976d6b808d24e78b12b5508b852..1d87deca32ed55be2290e186073b6ed183804566 100644 (file)
@@ -76,12 +76,16 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
 }
 
 static inline int crypto4xx_crypt(struct skcipher_request *req,
-                                 const unsigned int ivlen, bool decrypt)
+                                 const unsigned int ivlen, bool decrypt,
+                                 bool check_blocksize)
 {
        struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
        struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
        __le32 iv[AES_IV_SIZE];
 
+       if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+               return -EINVAL;
+
        if (ivlen)
                crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
 
@@ -90,24 +94,34 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
                ctx->sa_len, 0, NULL);
 }
 
-int crypto4xx_encrypt_noiv(struct skcipher_request *req)
+int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
+{
+       return crypto4xx_crypt(req, 0, false, true);
+}
+
+int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
+{
+       return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
+}
+
+int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
 {
-       return crypto4xx_crypt(req, 0, false);
+       return crypto4xx_crypt(req, 0, true, true);
 }
 
-int crypto4xx_encrypt_iv(struct skcipher_request *req)
+int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
 {
-       return crypto4xx_crypt(req, AES_IV_SIZE, false);
+       return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
 }
 
-int crypto4xx_decrypt_noiv(struct skcipher_request *req)
+int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
 {
-       return crypto4xx_crypt(req, 0, true);
+       return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
 }
 
-int crypto4xx_decrypt_iv(struct skcipher_request *req)
+int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
 {
-       return crypto4xx_crypt(req, AES_IV_SIZE, true);
+       return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
 }
 
 /**
@@ -278,8 +292,8 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
                return ret;
        }
 
-       return encrypt ? crypto4xx_encrypt_iv(req)
-                      : crypto4xx_decrypt_iv(req);
+       return encrypt ? crypto4xx_encrypt_iv_stream(req)
+                      : crypto4xx_decrypt_iv_stream(req);
 }
 
 static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
index d2ec9fd1b8bb03f5e869b8a1122ccd5196ed4fff..6386e1784fe4198b385c063a93e4f5c3e2ca0ad4 100644 (file)
@@ -1153,8 +1153,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
                .max_keysize = AES_MAX_KEY_SIZE,
                .ivsize = AES_IV_SIZE,
                .setkey = crypto4xx_setkey_aes_cbc,
-               .encrypt = crypto4xx_encrypt_iv,
-               .decrypt = crypto4xx_decrypt_iv,
+               .encrypt = crypto4xx_encrypt_iv_block,
+               .decrypt = crypto4xx_decrypt_iv_block,
                .init = crypto4xx_sk_init,
                .exit = crypto4xx_sk_exit,
        } },
@@ -1173,8 +1173,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
                .max_keysize = AES_MAX_KEY_SIZE,
                .ivsize = AES_IV_SIZE,
                .setkey = crypto4xx_setkey_aes_cfb,
-               .encrypt = crypto4xx_encrypt_iv,
-               .decrypt = crypto4xx_decrypt_iv,
+               .encrypt = crypto4xx_encrypt_iv_stream,
+               .decrypt = crypto4xx_decrypt_iv_stream,
                .init = crypto4xx_sk_init,
                .exit = crypto4xx_sk_exit,
        } },
@@ -1186,7 +1186,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
                        .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
                                CRYPTO_ALG_ASYNC |
                                CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_blocksize = 1,
                        .cra_ctxsize = sizeof(struct crypto4xx_ctx),
                        .cra_module = THIS_MODULE,
                },
@@ -1206,7 +1206,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
                        .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
                        .cra_flags = CRYPTO_ALG_ASYNC |
                                CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_blocksize = 1,
                        .cra_ctxsize = sizeof(struct crypto4xx_ctx),
                        .cra_module = THIS_MODULE,
                },
@@ -1226,15 +1226,15 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
                        .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
                        .cra_flags = CRYPTO_ALG_ASYNC |
                                CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_blocksize = 1,
                        .cra_ctxsize = sizeof(struct crypto4xx_ctx),
                        .cra_module = THIS_MODULE,
                },
                .min_keysize = AES_MIN_KEY_SIZE,
                .max_keysize = AES_MAX_KEY_SIZE,
                .setkey = crypto4xx_setkey_aes_ecb,
-               .encrypt = crypto4xx_encrypt_noiv,
-               .decrypt = crypto4xx_decrypt_noiv,
+               .encrypt = crypto4xx_encrypt_noiv_block,
+               .decrypt = crypto4xx_decrypt_noiv_block,
                .init = crypto4xx_sk_init,
                .exit = crypto4xx_sk_exit,
        } },
@@ -1245,7 +1245,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
                        .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
                        .cra_flags = CRYPTO_ALG_ASYNC |
                                CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_blocksize = 1,
                        .cra_ctxsize = sizeof(struct crypto4xx_ctx),
                        .cra_module = THIS_MODULE,
                },
@@ -1253,8 +1253,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
                .max_keysize = AES_MAX_KEY_SIZE,
                .ivsize = AES_IV_SIZE,
                .setkey = crypto4xx_setkey_aes_ofb,
-               .encrypt = crypto4xx_encrypt_iv,
-               .decrypt = crypto4xx_decrypt_iv,
+               .encrypt = crypto4xx_encrypt_iv_stream,
+               .decrypt = crypto4xx_decrypt_iv_stream,
                .init = crypto4xx_sk_init,
                .exit = crypto4xx_sk_exit,
        } },
index e2ca56722f077242a0a356d14e15638db48ab49c..21a6bbcedc55db7741b770b91ab33e3a4750d86b 100644 (file)
@@ -179,10 +179,12 @@ int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
                             const u8 *key, unsigned int keylen);
 int crypto4xx_encrypt_ctr(struct skcipher_request *req);
 int crypto4xx_decrypt_ctr(struct skcipher_request *req);
-int crypto4xx_encrypt_iv(struct skcipher_request *req);
-int crypto4xx_decrypt_iv(struct skcipher_request *req);
-int crypto4xx_encrypt_noiv(struct skcipher_request *req);
-int crypto4xx_decrypt_noiv(struct skcipher_request *req);
+int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
+int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
+int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
+int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
+int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
+int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
 int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
 int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
 int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
index 53ab1f140a2658ca521b60de0be0ce69b4185134..8a3ed40312061f9e877c2219602a177a3491d5a0 100644 (file)
@@ -111,7 +111,6 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
        return;
 
 err_out:
-       of_node_put(trng);
        iounmap(dev->trng_base);
        kfree(rng);
        dev->trng_base = NULL;
index 9bc54c3c2cb97d9b539cffe29b7ece48d07214f9..1907945f82b787bf4baa49fa4aaf818d458b4839 100644 (file)
@@ -887,6 +887,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
        struct ablkcipher_request *req = context;
        struct ablkcipher_edesc *edesc;
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 
 #ifdef DEBUG
@@ -911,10 +912,11 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 
        /*
         * The crypto API expects us to set the IV (req->info) to the last
-        * ciphertext block. This is used e.g. by the CTS mode.
+        * ciphertext block when running in CBC mode.
         */
-       scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
-                                ivsize, 0);
+       if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
+               scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
+                                        ivsize, ivsize, 0);
 
        /* In case initial IV was generated, copy it in GIVCIPHER request */
        if (edesc->iv_dir == DMA_FROM_DEVICE) {
@@ -1651,10 +1653,11 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
 
        /*
         * The crypto API expects us to set the IV (req->info) to the last
-        * ciphertext block.
+        * ciphertext block when running in CBC mode.
         */
-       scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
-                                ivsize, 0);
+       if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
+               scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
+                                        ivsize, ivsize, 0);
 
        /* Create and submit job descriptor*/
        init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
index a408edd84f34634b8cae6bfa6af2035c8788394e..edacf9b39b638ce543a20eec9b0259654c11e088 100644 (file)
@@ -509,6 +509,7 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
                               const bool is_qi, int era)
 {
        u32 geniv, moveiv;
+       u32 *wait_cmd;
 
        /* Note: Context registers are saved. */
        init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
@@ -604,6 +605,14 @@ copy_iv:
 
        /* Will read cryptlen */
        append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+       /*
+        * Wait for IV transfer (ofifo -> class2) to finish before starting
+        * ciphertext transfer (ofifo -> external memory).
+        */
+       wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
+       set_jump_tgt_here(desc, wait_cmd);
+
        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
                             FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
        append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
index a917af5776ce160dba8d5208f6ff9a4874d5998c..05516b0a4240c379b02da04ab5760453abcdd68d 100644 (file)
@@ -12,7 +12,7 @@
 #define DESC_AEAD_BASE                 (4 * CAAM_CMD_SZ)
 #define DESC_AEAD_ENC_LEN              (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
 #define DESC_AEAD_DEC_LEN              (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN           (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN           (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ)
 #define DESC_QI_AEAD_ENC_LEN           (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
 #define DESC_QI_AEAD_DEC_LEN           (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
 #define DESC_QI_AEAD_GIVENC_LEN                (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
index 6183f9128a8a09aa5c1e55fada7f19027bb83d39..ea901bc5733ccd479966d3ca0bd24f646abc1c22 100644 (file)
@@ -593,6 +593,7 @@ static const struct file_operations zip_stats_fops = {
        .owner = THIS_MODULE,
        .open  = zip_stats_open,
        .read  = seq_read,
+       .release = single_release,
 };
 
 static int zip_clear_open(struct inode *inode, struct file *file)
@@ -604,6 +605,7 @@ static const struct file_operations zip_clear_fops = {
        .owner = THIS_MODULE,
        .open  = zip_clear_open,
        .read  = seq_read,
+       .release = single_release,
 };
 
 static int zip_regs_open(struct inode *inode, struct file *file)
@@ -615,6 +617,7 @@ static const struct file_operations zip_regs_fops = {
        .owner = THIS_MODULE,
        .open  = zip_regs_open,
        .read  = seq_read,
+       .release = single_release,
 };
 
 /* Root directory for thunderx_zip debugfs entry */
index ca1f0d780b61cee959cb326d60bbde7c43782a78..e5dcb29b687f6324621405218ed20ab2d781894e 100644 (file)
@@ -61,6 +61,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
 static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
                                   unsigned int authsize)
 {
+       switch (authsize) {
+       case 16:
+       case 15:
+       case 14:
+       case 13:
+       case 12:
+       case 8:
+       case 4:
+               break;
+       default:
+               return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -107,6 +120,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
        memset(&rctx->cmd, 0, sizeof(rctx->cmd));
        INIT_LIST_HEAD(&rctx->cmd.entry);
        rctx->cmd.engine = CCP_ENGINE_AES;
+       rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
        rctx->cmd.u.aes.type = ctx->u.aes.type;
        rctx->cmd.u.aes.mode = ctx->u.aes.mode;
        rctx->cmd.u.aes.action = encrypt;
index 1b5035d562880a6d0b66458fae9fa15adda33c1c..b8c94a01cfc941c0808b24d88c639216086cdd8c 100644 (file)
@@ -35,56 +35,62 @@ struct ccp_tasklet_data {
 };
 
 /* Human-readable error strings */
+#define CCP_MAX_ERROR_CODE     64
 static char *ccp_error_codes[] = {
        "",
-       "ERR 01: ILLEGAL_ENGINE",
-       "ERR 02: ILLEGAL_KEY_ID",
-       "ERR 03: ILLEGAL_FUNCTION_TYPE",
-       "ERR 04: ILLEGAL_FUNCTION_MODE",
-       "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
-       "ERR 06: ILLEGAL_FUNCTION_SIZE",
-       "ERR 07: Zlib_MISSING_INIT_EOM",
-       "ERR 08: ILLEGAL_FUNCTION_RSVD",
-       "ERR 09: ILLEGAL_BUFFER_LENGTH",
-       "ERR 10: VLSB_FAULT",
-       "ERR 11: ILLEGAL_MEM_ADDR",
-       "ERR 12: ILLEGAL_MEM_SEL",
-       "ERR 13: ILLEGAL_CONTEXT_ID",
-       "ERR 14: ILLEGAL_KEY_ADDR",
-       "ERR 15: 0xF Reserved",
-       "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
-       "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
-       "ERR 18: CMD_TIMEOUT",
-       "ERR 19: IDMA0_AXI_SLVERR",
-       "ERR 20: IDMA0_AXI_DECERR",
-       "ERR 21: 0x15 Reserved",
-       "ERR 22: IDMA1_AXI_SLAVE_FAULT",
-       "ERR 23: IDMA1_AIXI_DECERR",
-       "ERR 24: 0x18 Reserved",
-       "ERR 25: ZLIBVHB_AXI_SLVERR",
-       "ERR 26: ZLIBVHB_AXI_DECERR",
-       "ERR 27: 0x1B Reserved",
-       "ERR 27: ZLIB_UNEXPECTED_EOM",
-       "ERR 27: ZLIB_EXTRA_DATA",
-       "ERR 30: ZLIB_BTYPE",
-       "ERR 31: ZLIB_UNDEFINED_SYMBOL",
-       "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
-       "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
-       "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
-       "ERR 35: ZLIB_UNCOMPRESSED_LEN",
-       "ERR 36: ZLIB_LIMIT_REACHED",
-       "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
-       "ERR 38: ODMA0_AXI_SLVERR",
-       "ERR 39: ODMA0_AXI_DECERR",
-       "ERR 40: 0x28 Reserved",
-       "ERR 41: ODMA1_AXI_SLVERR",
-       "ERR 42: ODMA1_AXI_DECERR",
-       "ERR 43: LSB_PARITY_ERR",
+       "ILLEGAL_ENGINE",
+       "ILLEGAL_KEY_ID",
+       "ILLEGAL_FUNCTION_TYPE",
+       "ILLEGAL_FUNCTION_MODE",
+       "ILLEGAL_FUNCTION_ENCRYPT",
+       "ILLEGAL_FUNCTION_SIZE",
+       "Zlib_MISSING_INIT_EOM",
+       "ILLEGAL_FUNCTION_RSVD",
+       "ILLEGAL_BUFFER_LENGTH",
+       "VLSB_FAULT",
+       "ILLEGAL_MEM_ADDR",
+       "ILLEGAL_MEM_SEL",
+       "ILLEGAL_CONTEXT_ID",
+       "ILLEGAL_KEY_ADDR",
+       "0xF Reserved",
+       "Zlib_ILLEGAL_MULTI_QUEUE",
+       "Zlib_ILLEGAL_JOBID_CHANGE",
+       "CMD_TIMEOUT",
+       "IDMA0_AXI_SLVERR",
+       "IDMA0_AXI_DECERR",
+       "0x15 Reserved",
+       "IDMA1_AXI_SLAVE_FAULT",
+       "IDMA1_AIXI_DECERR",
+       "0x18 Reserved",
+       "ZLIBVHB_AXI_SLVERR",
+       "ZLIBVHB_AXI_DECERR",
+       "0x1B Reserved",
+       "ZLIB_UNEXPECTED_EOM",
+       "ZLIB_EXTRA_DATA",
+       "ZLIB_BTYPE",
+       "ZLIB_UNDEFINED_SYMBOL",
+       "ZLIB_UNDEFINED_DISTANCE_S",
+       "ZLIB_CODE_LENGTH_SYMBOL",
+       "ZLIB _VHB_ILLEGAL_FETCH",
+       "ZLIB_UNCOMPRESSED_LEN",
+       "ZLIB_LIMIT_REACHED",
+       "ZLIB_CHECKSUM_MISMATCH0",
+       "ODMA0_AXI_SLVERR",
+       "ODMA0_AXI_DECERR",
+       "0x28 Reserved",
+       "ODMA1_AXI_SLVERR",
+       "ODMA1_AXI_DECERR",
 };
 
-void ccp_log_error(struct ccp_device *d, int e)
+void ccp_log_error(struct ccp_device *d, unsigned int e)
 {
-       dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
+       if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
+               return;
+
+       if (e < ARRAY_SIZE(ccp_error_codes))
+               dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
+       else
+               dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
 }
 
 /* List of CCPs, CCP count, read-write access lock, and access functions
@@ -537,6 +543,10 @@ int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
        unsigned long flags;
        unsigned int i;
 
+       /* If there's no device there's nothing to do */
+       if (!ccp)
+               return 0;
+
        spin_lock_irqsave(&ccp->cmd_lock, flags);
 
        ccp->suspending = 1;
@@ -561,6 +571,10 @@ int ccp_dev_resume(struct sp_device *sp)
        unsigned long flags;
        unsigned int i;
 
+       /* If there's no device there's nothing to do */
+       if (!ccp)
+               return 0;
+
        spin_lock_irqsave(&ccp->cmd_lock, flags);
 
        ccp->suspending = 0;
index 6810b65c1939c88abee1448f06e9d9c2b71460df..7442b0422f8ac032f3c0c7ee87f352b522671927 100644 (file)
@@ -632,7 +632,7 @@ struct ccp5_desc {
 void ccp_add_device(struct ccp_device *ccp);
 void ccp_del_device(struct ccp_device *ccp);
 
-extern void ccp_log_error(struct ccp_device *, int);
+extern void ccp_log_error(struct ccp_device *, unsigned int);
 
 struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
 bool ccp_queues_suspended(struct ccp_device *ccp);
index 0ea43cdeb05f0f4c5c38a332b30faf1cc67f4c6e..1e2e42106dee070d92ac543c8e1448327caffb8a 100644 (file)
@@ -625,6 +625,8 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 
        unsigned long long *final;
        unsigned int dm_offset;
+       unsigned int authsize;
+       unsigned int jobid;
        unsigned int ilen;
        bool in_place = true; /* Default value */
        int ret;
@@ -645,6 +647,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
        if (!aes->key) /* Gotta have a key SGL */
                return -EINVAL;
 
+       /* Zero defaults to 16 bytes, the maximum size */
+       authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
+       switch (authsize) {
+       case 16:
+       case 15:
+       case 14:
+       case 13:
+       case 12:
+       case 8:
+       case 4:
+               break;
+       default:
+               return -EINVAL;
+       }
+
        /* First, decompose the source buffer into AAD & PT,
         * and the destination buffer into AAD, CT & tag, or
         * the input into CT & tag.
@@ -659,13 +676,15 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
                p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
        } else {
                /* Input length for decryption includes tag */
-               ilen = aes->src_len - AES_BLOCK_SIZE;
+               ilen = aes->src_len - authsize;
                p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
        }
 
+       jobid = CCP_NEW_JOBID(cmd_q->ccp);
+
        memset(&op, 0, sizeof(op));
        op.cmd_q = cmd_q;
-       op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+       op.jobid = jobid;
        op.sb_key = cmd_q->sb_key; /* Pre-allocated */
        op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
        op.init = 1;
@@ -766,8 +785,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
                while (src.sg_wa.bytes_left) {
                        ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
                        if (!src.sg_wa.bytes_left) {
-                               unsigned int nbytes = aes->src_len
-                                                     % AES_BLOCK_SIZE;
+                               unsigned int nbytes = ilen % AES_BLOCK_SIZE;
 
                                if (nbytes) {
                                        op.eom = 1;
@@ -816,6 +834,13 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
        final[0] = cpu_to_be64(aes->aad_len * 8);
        final[1] = cpu_to_be64(ilen * 8);
 
+       memset(&op, 0, sizeof(op));
+       op.cmd_q = cmd_q;
+       op.jobid = jobid;
+       op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+       op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+       op.init = 1;
+       op.u.aes.type = aes->type;
        op.u.aes.mode = CCP_AES_MODE_GHASH;
        op.u.aes.action = CCP_AES_GHASHFINAL;
        op.src.type = CCP_MEMTYPE_SYSTEM;
@@ -832,18 +857,19 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
 
        if (aes->action == CCP_AES_ACTION_ENCRYPT) {
                /* Put the ciphered tag after the ciphertext. */
-               ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
+               ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
        } else {
                /* Does this ciphered tag match the input? */
-               ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
+               ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
                                           DMA_BIDIRECTIONAL);
                if (ret)
                        goto e_tag;
-               ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
+               ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
                if (ret)
                        goto e_tag;
 
-               ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
+               ret = crypto_memneq(tag.address, final_wa.address,
+                                   authsize) ? -EBADMSG : 0;
                ccp_dm_free(&tag);
        }
 
@@ -851,11 +877,11 @@ e_tag:
        ccp_dm_free(&final_wa);
 
 e_dst:
-       if (aes->src_len && !in_place)
+       if (ilen > 0 && !in_place)
                ccp_free_data(&dst, cmd_q);
 
 e_src:
-       if (aes->src_len)
+       if (ilen > 0)
                ccp_free_data(&src, cmd_q);
 
 e_aad:
index 0669033f5be507260bfc098b4c36592b247ebbfb..aa6b45bc13b983ca382df769d4e0df9b2935d603 100644 (file)
@@ -227,7 +227,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
                        /* In case of payload authentication failure, MUST NOT
                         * revealed the decrypted message --> zero its memory.
                         */
-                       cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
+                       cc_zero_sgl(areq->dst, areq->cryptlen);
                        err = -EBADMSG;
                }
        } else { /*ENCRYPT*/
index 1ff229c2aeab13a464ad573af163fb61783959cf..186a2536fb8b9c8a3cbb44e5f33a329175ea4490 100644 (file)
@@ -364,7 +364,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
        rc = cc_ivgen_init(new_drvdata);
        if (rc) {
                dev_err(dev, "cc_ivgen_init failed\n");
-               goto post_power_mgr_err;
+               goto post_buf_mgr_err;
        }
 
        /* Allocate crypto algs */
@@ -387,6 +387,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
                goto post_hash_err;
        }
 
+       /* All set, we can allow autosuspend */
+       cc_pm_go(new_drvdata);
+
        /* If we got here and FIPS mode is enabled
         * it means all FIPS test passed, so let TEE
         * know we're good.
@@ -401,8 +404,6 @@ post_cipher_err:
        cc_cipher_free(new_drvdata);
 post_ivgen_err:
        cc_ivgen_fini(new_drvdata);
-post_power_mgr_err:
-       cc_pm_fini(new_drvdata);
 post_buf_mgr_err:
         cc_buffer_mgr_fini(new_drvdata);
 post_req_mgr_err:
index 09f708f6418ed63c11b051ac993685b1a0d2e4ac..bac278d274b0fde15e711c950f45e02e3c151c85 100644 (file)
@@ -21,7 +21,13 @@ static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
        u32 reg;
 
        reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
-       return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
+       /* Did the TEE report status? */
+       if (reg & CC_FIPS_SYNC_TEE_STATUS)
+               /* Yes. Is it OK? */
+               return (reg & CC_FIPS_SYNC_MODULE_OK);
+
+       /* No. It's either not in use or will be reported later */
+       return true;
 }
 
 /*
index 79fc0a37ba6e42a8e09e6460c79a46ca278ec3cf..638082dff183ac7c92ed7f4c5fc183fff21dcab4 100644 (file)
@@ -103,20 +103,19 @@ int cc_pm_put_suspend(struct device *dev)
 
 int cc_pm_init(struct cc_drvdata *drvdata)
 {
-       int rc = 0;
        struct device *dev = drvdata_to_dev(drvdata);
 
        /* must be before the enabling to avoid resdundent suspending */
        pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
        pm_runtime_use_autosuspend(dev);
        /* activate the PM module */
-       rc = pm_runtime_set_active(dev);
-       if (rc)
-               return rc;
-       /* enable the PM module*/
-       pm_runtime_enable(dev);
+       return pm_runtime_set_active(dev);
+}
 
-       return rc;
+/* enable the PM module*/
+void cc_pm_go(struct cc_drvdata *drvdata)
+{
+       pm_runtime_enable(drvdata_to_dev(drvdata));
 }
 
 void cc_pm_fini(struct cc_drvdata *drvdata)
index 020a5403c58bac36961632075856a916d87fca60..907a6db4d6c036fe1c5433d5bda095d157e166b0 100644 (file)
@@ -16,6 +16,7 @@
 extern const struct dev_pm_ops ccree_pm;
 
 int cc_pm_init(struct cc_drvdata *drvdata);
+void cc_pm_go(struct cc_drvdata *drvdata);
 void cc_pm_fini(struct cc_drvdata *drvdata);
 int cc_pm_suspend(struct device *dev);
 int cc_pm_resume(struct device *dev);
@@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
        return 0;
 }
 
+static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
+
 static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
 
 static inline int cc_pm_suspend(struct device *dev)
index cdc4f9a171d986625352319d76ccf243e417410a..db2983c51f1e667b40f7cc793eca6d4b877c21d5 100644 (file)
@@ -215,17 +215,18 @@ static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
                            dma_addr_t psec_sgl, struct sec_dev_info *info)
 {
        struct sec_hw_sgl *sgl_current, *sgl_next;
+       dma_addr_t sgl_next_dma;
 
-       if (!hw_sgl)
-               return;
        sgl_current = hw_sgl;
-       while (sgl_current->next) {
+       while (sgl_current) {
                sgl_next = sgl_current->next;
-               dma_pool_free(info->hw_sgl_pool, sgl_current,
-                             sgl_current->next_sgl);
+               sgl_next_dma = sgl_current->next_sgl;
+
+               dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
+
                sgl_current = sgl_next;
+               psec_sgl = sgl_next_dma;
        }
-       dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
 }
 
 static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
index 3aef1d43e43510130dc359d61c911e51209e9a7f..42a3830fbd1901f02529d10071a9a6453bf42cbe 100644 (file)
@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {
 
 struct safexcel_cipher_req {
        enum safexcel_cipher_direction direction;
+       /* Number of result descriptors associated to the request */
+       unsigned int rdescs;
        bool needs_inv;
 };
 
@@ -333,7 +335,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 
        *ret = 0;
 
-       do {
+       if (unlikely(!sreq->rdescs))
+               return 0;
+
+       while (sreq->rdescs--) {
                rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
                if (IS_ERR(rdesc)) {
                        dev_err(priv->dev,
@@ -346,7 +351,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
                        *ret = safexcel_rdesc_check_errors(priv, rdesc);
 
                ndesc++;
-       } while (!rdesc->last_seg);
+       }
 
        safexcel_complete(priv, ring);
 
@@ -501,6 +506,7 @@ cdesc_rollback:
 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
                                      int ring,
                                      struct crypto_async_request *base,
+                                     struct safexcel_cipher_req *sreq,
                                      bool *should_complete, int *ret)
 {
        struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
@@ -509,7 +515,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
 
        *ret = 0;
 
-       do {
+       if (unlikely(!sreq->rdescs))
+               return 0;
+
+       while (sreq->rdescs--) {
                rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
                if (IS_ERR(rdesc)) {
                        dev_err(priv->dev,
@@ -522,7 +531,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
                        *ret = safexcel_rdesc_check_errors(priv, rdesc);
 
                ndesc++;
-       } while (!rdesc->last_seg);
+       }
 
        safexcel_complete(priv, ring);
 
@@ -564,7 +573,7 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
 
        if (sreq->needs_inv) {
                sreq->needs_inv = false;
-               err = safexcel_handle_inv_result(priv, ring, async,
+               err = safexcel_handle_inv_result(priv, ring, async, sreq,
                                                 should_complete, ret);
        } else {
                err = safexcel_handle_req_result(priv, ring, async, req->src,
@@ -587,7 +596,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
 
        if (sreq->needs_inv) {
                sreq->needs_inv = false;
-               err = safexcel_handle_inv_result(priv, ring, async,
+               err = safexcel_handle_inv_result(priv, ring, async, sreq,
                                                 should_complete, ret);
        } else {
                err = safexcel_handle_req_result(priv, ring, async, req->src,
@@ -633,6 +642,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
                ret = safexcel_send_req(async, ring, sreq, req->src,
                                        req->dst, req->cryptlen, 0, 0, req->iv,
                                        commands, results);
+
+       sreq->rdescs = *results;
        return ret;
 }
 
@@ -655,6 +666,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
                                        req->cryptlen, req->assoclen,
                                        crypto_aead_authsize(tfm), req->iv,
                                        commands, results);
+       sreq->rdescs = *results;
        return ret;
 }
 
index c68df7e8bee185487cd6e9544fd6c4f31ab0a9c2..7ce2467c771eb63d7a2b932329670904ec40b027 100644 (file)
@@ -36,8 +36,6 @@ MODULE_ALIAS_CRYPTO("842-nx");
 #define WORKMEM_ALIGN  (CRB_ALIGN)
 #define CSB_WAIT_MAX   (5000) /* ms */
 #define VAS_RETRIES    (10)
-/* # of requests allowed per RxFIFO at a time. 0 for unlimited */
-#define MAX_CREDITS_PER_RXFIFO (1024)
 
 struct nx842_workmem {
        /* Below fields must be properly aligned */
@@ -821,7 +819,11 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
        rxattr.lnotify_lpid = lpid;
        rxattr.lnotify_pid = pid;
        rxattr.lnotify_tid = tid;
-       rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO;
+       /*
+        * Maximum RX window credits can not be more than #CRBs in
+        * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
+        */
+       rxattr.wcreds_max = fifo_size / CRB_SIZE;
 
        /*
         * Open a VAS receice window which is used to configure RxFIFO
index 5c4c0a2531296a157ae146f19226d9d8f608830f..d78f8d5c89c3fe4f6906fca888ca4bc192255ef4 100644 (file)
@@ -95,7 +95,7 @@ struct service_hndl {
 
 static inline int get_current_node(void)
 {
-       return topology_physical_package_id(smp_processor_id());
+       return topology_physical_package_id(raw_smp_processor_id());
 }
 
 int adf_service_register(struct service_hndl *service);
index 474e79ef5e457f125471d0f2b992aa13d77b4b08..747fceaff998c7cfa6ba4aeb06955c102a209a0a 100644 (file)
@@ -1511,6 +1511,9 @@ static int zero_message_process(struct ahash_request *req)
        case SHA256_DIGEST_SIZE:
                memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
                break;
+       case SHA512_DIGEST_SIZE:
+               memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
+               break;
        default:
                return -EINVAL;
        }
@@ -1719,6 +1722,11 @@ static int sa_sha256_digest(struct ahash_request *req)
        return sa_sham_digest(req);
 }
 
+static int sa_sha512_digest(struct ahash_request *req)
+{
+       return sa_sham_digest(req);
+}
+
 static int sa_sham_init(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -1899,6 +1907,33 @@ static int sa_sham_cra_sha256_init(struct crypto_tfm *tfm)
        return 0;
 }
 
+static int sa_sham_cra_sha512_init(struct crypto_tfm *tfm)
+{
+       struct algo_data *ad = kzalloc(sizeof(*ad), GFP_KERNEL);
+       struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       sa_sham_cra_init_alg(tfm, "sha512");
+
+       ad->enc_eng.eng_id = SA_ENG_ID_NONE;
+       ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
+       ad->auth_eng.eng_id = SA_ENG_ID_AM1;
+       ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
+       ad->mci_enc = NULL;
+       ad->mci_dec = NULL;
+       ad->inv_key = false;
+       ad->keyed_mac = false;
+       ad->ealg_id = SA_EALG_ID_NONE;
+       ad->aalg_id = SA_AALG_ID_SHA2_512;
+       ad->hash_size = SHA512_DIGEST_SIZE;
+       ad->auth_ctrl = 0x6;
+
+       sa_sha_setup(ctx, ad);
+
+       kfree(ad);
+
+       return 0;
+}
+
 static void sa_sham_cra_exit(struct crypto_tfm *tfm)
 {
        struct crypto_alg *alg = tfm->__crt_alg;
@@ -2190,6 +2225,30 @@ static struct ahash_alg algs_sha[] = {
                .cra_exit               = sa_sham_cra_exit,
        }
 },
+{
+       .init           = sa_sham_init,
+       .update         = sa_sham_update,
+       .final          = sa_sham_final,
+       .finup          = sa_sham_finup,
+       .digest         = sa_sha512_digest,
+       .export         = sa_sham_export,
+       .import         = sa_sham_import,
+       .halg.digestsize        = SHA512_DIGEST_SIZE,
+       .halg.statesize         = 256,
+       .halg.base      = {
+               .cra_name               = "sha512",
+               .cra_driver_name        = "sa-sha512",
+               .cra_priority           = 400,
+               .cra_flags              = CRYPTO_ALG_ASYNC |
+                                               CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize          = SHA512_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct sa_tfm_ctx),
+               .cra_alignmask          = SA_ALIGN_MASK,
+               .cra_module             = THIS_MODULE,
+               .cra_init               = sa_sham_cra_sha512_init,
+               .cra_exit               = sa_sham_cra_exit,
+       }
+},
 {
        .init           = sa_sham_init,
        .update         = sa_sham_update,
@@ -2444,6 +2503,7 @@ static int sa_ul_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       spin_lock_init(&dev_data->scid_lock);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        saul_base = devm_ioremap_resource(dev, res);
 
index 6265ab91b8afaa7f3fbdb6bae4f02980073af57b..87573b0f2f6ec63ff1bbf405c28259e155979180 100644 (file)
@@ -351,6 +351,7 @@ enum sa_aalg_id {
        SA_AALG_ID_SHA1,          /* SHA1 mode */
        SA_AALG_ID_SHA2_224,      /* 224-bit SHA2 mode */
        SA_AALG_ID_SHA2_256,      /* 256-bit SHA2 mode */
+       SA_AALG_ID_SHA2_512,      /* 512-bit SHA2 mode */
        SA_AALG_ID_HMAC_MD5,      /* HMAC with MD5 mode */
        SA_AALG_ID_HMAC_SHA1,     /* HMAC with SHA1 mode */
        SA_AALG_ID_HMAC_SHA2_224, /* HMAC with 224-bit SHA2 mode */
index c5859d3cb82547d18992daf2d5bc1951ff2e0096..634ae487c372e0213ae9d8b123d8def8a6abcd80 100644 (file)
@@ -334,6 +334,21 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
 }
 EXPORT_SYMBOL(talitos_submit);
 
+static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
+{
+       struct talitos_edesc *edesc;
+
+       if (!is_sec1)
+               return request->desc->hdr;
+
+       if (!request->desc->next_desc)
+               return request->desc->hdr1;
+
+       edesc = container_of(request->desc, struct talitos_edesc, desc);
+
+       return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
+}
+
 /*
  * process what was done, notify callback of error if not
  */
@@ -355,12 +370,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
 
                /* descriptors with their done bits set don't get the error */
                rmb();
-               if (!is_sec1)
-                       hdr = request->desc->hdr;
-               else if (request->desc->next_desc)
-                       hdr = (request->desc + 1)->hdr1;
-               else
-                       hdr = request->desc->hdr1;
+               hdr = get_request_hdr(request, is_sec1);
 
                if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
                        status = 0;
@@ -490,8 +500,14 @@ static u32 current_desc_hdr(struct device *dev, int ch)
                }
        }
 
-       if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
-               return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
+       if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
+               struct talitos_edesc *edesc;
+
+               edesc = container_of(priv->chan[ch].fifo[iter].desc,
+                                    struct talitos_edesc, desc);
+               return ((struct talitos_desc *)
+                       (edesc->buf + edesc->dma_len))->hdr;
+       }
 
        return priv->chan[ch].fifo[iter].desc->hdr;
 }
@@ -913,36 +929,6 @@ badkey:
        return -EINVAL;
 }
 
-/*
- * talitos_edesc - s/w-extended descriptor
- * @src_nents: number of segments in input scatterlist
- * @dst_nents: number of segments in output scatterlist
- * @icv_ool: whether ICV is out-of-line
- * @iv_dma: dma address of iv for checking continuity and link table
- * @dma_len: length of dma mapped link_tbl space
- * @dma_link_tbl: bus physical address of link_tbl/buf
- * @desc: h/w descriptor
- * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
- * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
- *
- * if decrypting (with authcheck), or either one of src_nents or dst_nents
- * is greater than 1, an integrity check value is concatenated to the end
- * of link_tbl data
- */
-struct talitos_edesc {
-       int src_nents;
-       int dst_nents;
-       bool icv_ool;
-       dma_addr_t iv_dma;
-       int dma_len;
-       dma_addr_t dma_link_tbl;
-       struct talitos_desc desc;
-       union {
-               struct talitos_ptr link_tbl[0];
-               u8 buf[0];
-       };
-};
-
 static void talitos_sg_unmap(struct device *dev,
                             struct talitos_edesc *edesc,
                             struct scatterlist *src,
@@ -973,11 +959,13 @@ static void talitos_sg_unmap(struct device *dev,
 
 static void ipsec_esp_unmap(struct device *dev,
                            struct talitos_edesc *edesc,
-                           struct aead_request *areq)
+                           struct aead_request *areq, bool encrypt)
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(areq);
        struct talitos_ctx *ctx = crypto_aead_ctx(aead);
        unsigned int ivsize = crypto_aead_ivsize(aead);
+       unsigned int authsize = crypto_aead_authsize(aead);
+       unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
        bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
        struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
 
@@ -986,7 +974,7 @@ static void ipsec_esp_unmap(struct device *dev,
                                         DMA_FROM_DEVICE);
        unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
 
-       talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
+       talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
                         areq->assoclen);
 
        if (edesc->dma_len)
@@ -997,7 +985,7 @@ static void ipsec_esp_unmap(struct device *dev,
                unsigned int dst_nents = edesc->dst_nents ? : 1;
 
                sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
-                                  areq->assoclen + areq->cryptlen - ivsize);
+                                  areq->assoclen + cryptlen - ivsize);
        }
 }
 
@@ -1015,12 +1003,11 @@ static void ipsec_esp_encrypt_done(struct device *dev,
        unsigned int authsize = crypto_aead_authsize(authenc);
        unsigned int ivsize = crypto_aead_ivsize(authenc);
        struct talitos_edesc *edesc;
-       struct scatterlist *sg;
        void *icvdata;
 
        edesc = container_of(desc, struct talitos_edesc, desc);
 
-       ipsec_esp_unmap(dev, edesc, areq);
+       ipsec_esp_unmap(dev, edesc, areq, true);
 
        /* copy the generated ICV to dst */
        if (edesc->icv_ool) {
@@ -1029,9 +1016,8 @@ static void ipsec_esp_encrypt_done(struct device *dev,
                else
                        icvdata = &edesc->link_tbl[edesc->src_nents +
                                                   edesc->dst_nents + 2];
-               sg = sg_last(areq->dst, edesc->dst_nents);
-               memcpy((char *)sg_virt(sg) + sg->length - authsize,
-                      icvdata, authsize);
+               sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
+                                    authsize, areq->assoclen + areq->cryptlen);
        }
 
        dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
@@ -1049,19 +1035,27 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
        unsigned int authsize = crypto_aead_authsize(authenc);
        struct talitos_edesc *edesc;
-       struct scatterlist *sg;
        char *oicv, *icv;
        struct talitos_private *priv = dev_get_drvdata(dev);
        bool is_sec1 = has_ftr_sec1(priv);
 
        edesc = container_of(desc, struct talitos_edesc, desc);
 
-       ipsec_esp_unmap(dev, edesc, req);
+       ipsec_esp_unmap(dev, edesc, req, false);
 
        if (!err) {
+               char icvdata[SHA512_DIGEST_SIZE];
+               int nents = edesc->dst_nents ? : 1;
+               unsigned int len = req->assoclen + req->cryptlen;
+
                /* auth check */
-               sg = sg_last(req->dst, edesc->dst_nents ? : 1);
-               icv = (char *)sg_virt(sg) + sg->length - authsize;
+               if (nents > 1) {
+                       sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
+                                          len - authsize);
+                       icv = icvdata;
+               } else {
+                       icv = (char *)sg_virt(req->dst) + len - authsize;
+               }
 
                if (edesc->dma_len) {
                        if (is_sec1)
@@ -1093,7 +1087,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
 
        edesc = container_of(desc, struct talitos_edesc, desc);
 
-       ipsec_esp_unmap(dev, edesc, req);
+       ipsec_esp_unmap(dev, edesc, req, false);
 
        /* check ICV auth status */
        if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
@@ -1196,6 +1190,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
  * fill in and submit ipsec_esp descriptor
  */
 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+                    bool encrypt,
                     void (*callback)(struct device *dev,
                                      struct talitos_desc *desc,
                                      void *context, int error))
@@ -1205,7 +1200,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
        struct talitos_ctx *ctx = crypto_aead_ctx(aead);
        struct device *dev = ctx->dev;
        struct talitos_desc *desc = &edesc->desc;
-       unsigned int cryptlen = areq->cryptlen;
+       unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
        unsigned int ivsize = crypto_aead_ivsize(aead);
        int tbl_off = 0;
        int sg_count, ret;
@@ -1332,7 +1327,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
 
        ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
        if (ret != -EINPROGRESS) {
-               ipsec_esp_unmap(dev, edesc, areq);
+               ipsec_esp_unmap(dev, edesc, areq, encrypt);
                kfree(edesc);
        }
        return ret;
@@ -1431,15 +1426,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
        edesc->dst_nents = dst_nents;
        edesc->iv_dma = iv_dma;
        edesc->dma_len = dma_len;
-       if (dma_len) {
-               void *addr = &edesc->link_tbl[0];
-
-               if (is_sec1 && !dst)
-                       addr += sizeof(struct talitos_desc);
-               edesc->dma_link_tbl = dma_map_single(dev, addr,
+       if (dma_len)
+               edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
                                                     edesc->dma_len,
                                                     DMA_BIDIRECTIONAL);
-       }
+
        return edesc;
 }
 
@@ -1450,9 +1441,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
        unsigned int authsize = crypto_aead_authsize(authenc);
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
        unsigned int ivsize = crypto_aead_ivsize(authenc);
+       unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
 
        return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
-                                  iv, areq->assoclen, areq->cryptlen,
+                                  iv, areq->assoclen, cryptlen,
                                   authsize, ivsize, icv_stashing,
                                   areq->base.flags, encrypt);
 }
@@ -1471,7 +1463,7 @@ static int aead_encrypt(struct aead_request *req)
        /* set encrypt */
        edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
 
-       return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
+       return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
 }
 
 static int aead_decrypt(struct aead_request *req)
@@ -1481,17 +1473,15 @@ static int aead_decrypt(struct aead_request *req)
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
        struct talitos_private *priv = dev_get_drvdata(ctx->dev);
        struct talitos_edesc *edesc;
-       struct scatterlist *sg;
        void *icvdata;
 
-       req->cryptlen -= authsize;
-
        /* allocate extended descriptor */
        edesc = aead_edesc_alloc(req, req->iv, 1, false);
        if (IS_ERR(edesc))
                return PTR_ERR(edesc);
 
-       if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
+       if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+           (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
            ((!edesc->src_nents && !edesc->dst_nents) ||
             priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
 
@@ -1502,7 +1492,8 @@ static int aead_decrypt(struct aead_request *req)
 
                /* reset integrity check result bits */
 
-               return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
+               return ipsec_esp(edesc, req, false,
+                                ipsec_esp_decrypt_hwauth_done);
        }
 
        /* Have to check the ICV with software */
@@ -1515,11 +1506,10 @@ static int aead_decrypt(struct aead_request *req)
        else
                icvdata = &edesc->link_tbl[0];
 
-       sg = sg_last(req->src, edesc->src_nents ? : 1);
+       sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
+                          req->assoclen + req->cryptlen - authsize);
 
-       memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
-
-       return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
+       return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
 }
 
 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@ -1552,6 +1542,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
        return 0;
 }
 
+static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
+                                 const u8 *key, unsigned int keylen)
+{
+       if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
+           keylen == AES_KEYSIZE_256)
+               return ablkcipher_setkey(cipher, key, keylen);
+
+       crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+       return -EINVAL;
+}
+
 static void common_nonsnoop_unmap(struct device *dev,
                                  struct talitos_edesc *edesc,
                                  struct ablkcipher_request *areq)
@@ -1571,11 +1573,15 @@ static void ablkcipher_done(struct device *dev,
                            int err)
 {
        struct ablkcipher_request *areq = context;
+       struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+       unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
        struct talitos_edesc *edesc;
 
        edesc = container_of(desc, struct talitos_edesc, desc);
 
        common_nonsnoop_unmap(dev, edesc, areq);
+       memcpy(areq->info, ctx->iv, ivsize);
 
        kfree(edesc);
 
@@ -1670,6 +1676,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
        struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
        struct talitos_edesc *edesc;
+       unsigned int blocksize =
+                       crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+       if (!areq->nbytes)
+               return 0;
+
+       if (areq->nbytes % blocksize)
+               return -EINVAL;
 
        /* allocate extended descriptor */
        edesc = ablkcipher_edesc_alloc(areq, true);
@@ -1687,6 +1701,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
        struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
        struct talitos_edesc *edesc;
+       unsigned int blocksize =
+                       crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+       if (!areq->nbytes)
+               return 0;
+
+       if (areq->nbytes % blocksize)
+               return -EINVAL;
 
        /* allocate extended descriptor */
        edesc = ablkcipher_edesc_alloc(areq, false);
@@ -1706,14 +1728,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
        struct talitos_private *priv = dev_get_drvdata(dev);
        bool is_sec1 = has_ftr_sec1(priv);
        struct talitos_desc *desc = &edesc->desc;
-       struct talitos_desc *desc2 = desc + 1;
+       struct talitos_desc *desc2 = (struct talitos_desc *)
+                                    (edesc->buf + edesc->dma_len);
 
        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
        if (desc->next_desc &&
            desc->ptr[5].ptr != desc2->ptr[5].ptr)
                unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
 
-       talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
+       if (req_ctx->psrc)
+               talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
 
        /* When using hashctx-in, must unmap it. */
        if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
@@ -1780,7 +1804,6 @@ static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
 
 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
                                struct ahash_request *areq, unsigned int length,
-                               unsigned int offset,
                                void (*callback) (struct device *dev,
                                                  struct talitos_desc *desc,
                                                  void *context, int error))
@@ -1819,9 +1842,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
 
        sg_count = edesc->src_nents ?: 1;
        if (is_sec1 && sg_count > 1)
-               sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
-                                  edesc->buf + sizeof(struct talitos_desc),
-                                  length, req_ctx->nbuf);
+               sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
        else if (length)
                sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
                                      DMA_TO_DEVICE);
@@ -1834,7 +1855,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
                                       DMA_TO_DEVICE);
        } else {
                sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
-                                         &desc->ptr[3], sg_count, offset, 0);
+                                         &desc->ptr[3], sg_count, 0, 0);
                if (sg_count > 1)
                        sync_needed = true;
        }
@@ -1858,7 +1879,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
                talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
 
        if (is_sec1 && req_ctx->nbuf && length) {
-               struct talitos_desc *desc2 = desc + 1;
+               struct talitos_desc *desc2 = (struct talitos_desc *)
+                                            (edesc->buf + edesc->dma_len);
                dma_addr_t next_desc;
 
                memset(desc2, 0, sizeof(*desc2));
@@ -1879,7 +1901,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
                                                      DMA_TO_DEVICE);
                copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
                sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
-                                         &desc2->ptr[3], sg_count, offset, 0);
+                                         &desc2->ptr[3], sg_count, 0, 0);
                if (sg_count > 1)
                        sync_needed = true;
                copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
@@ -1990,7 +2012,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
        struct device *dev = ctx->dev;
        struct talitos_private *priv = dev_get_drvdata(dev);
        bool is_sec1 = has_ftr_sec1(priv);
-       int offset = 0;
        u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
 
        if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
@@ -2030,6 +2051,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
                        sg_chain(req_ctx->bufsl, 2, areq->src);
                req_ctx->psrc = req_ctx->bufsl;
        } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
+               int offset;
+
                if (nbytes_to_hash > blocksize)
                        offset = blocksize - req_ctx->nbuf;
                else
@@ -2042,7 +2065,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
                sg_copy_to_buffer(areq->src, nents,
                                  ctx_buf + req_ctx->nbuf, offset);
                req_ctx->nbuf += offset;
-               req_ctx->psrc = areq->src;
+               req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
+                                                offset);
        } else
                req_ctx->psrc = areq->src;
 
@@ -2082,8 +2106,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
        if (ctx->keylen && (req_ctx->first || req_ctx->last))
                edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
 
-       return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
-                                   ahash_done);
+       return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
 }
 
 static int ahash_update(struct ahash_request *areq)
@@ -2714,6 +2737,7 @@ static struct talitos_alg_template driver_algs[] = {
                                .min_keysize = AES_MIN_KEY_SIZE,
                                .max_keysize = AES_MAX_KEY_SIZE,
                                .ivsize = AES_BLOCK_SIZE,
+                               .setkey = ablkcipher_aes_setkey,
                        }
                },
                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2724,13 +2748,13 @@ static struct talitos_alg_template driver_algs[] = {
                .alg.crypto = {
                        .cra_name = "ctr(aes)",
                        .cra_driver_name = "ctr-aes-talitos",
-                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_blocksize = 1,
                        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
                                     CRYPTO_ALG_ASYNC,
                        .cra_ablkcipher = {
                                .min_keysize = AES_MIN_KEY_SIZE,
                                .max_keysize = AES_MAX_KEY_SIZE,
-                               .ivsize = AES_BLOCK_SIZE,
+                               .setkey = ablkcipher_aes_setkey,
                        }
                },
                .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
@@ -3100,6 +3124,7 @@ static int talitos_remove(struct platform_device *ofdev)
                        break;
                case CRYPTO_ALG_TYPE_AEAD:
                        crypto_unregister_aead(&t_alg->algt.alg.aead);
+                       break;
                case CRYPTO_ALG_TYPE_AHASH:
                        crypto_unregister_ahash(&t_alg->algt.alg.hash);
                        break;
@@ -3202,7 +3227,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
                alg->cra_priority = t_alg->algt.priority;
        else
                alg->cra_priority = TALITOS_CRA_PRIORITY;
-       alg->cra_alignmask = 0;
+       if (has_ftr_sec1(priv))
+               alg->cra_alignmask = 3;
+       else
+               alg->cra_alignmask = 0;
        alg->cra_ctxsize = sizeof(struct talitos_ctx);
        alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
 
index a65a63e0d6c10e7d31c0010a97344802896f0679..979f6a61e545f1aa54d9d895932b838572f3fd86 100644 (file)
@@ -65,6 +65,36 @@ struct talitos_desc {
 
 #define TALITOS_DESC_SIZE      (sizeof(struct talitos_desc) - sizeof(__be32))
 
+/*
+ * talitos_edesc - s/w-extended descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @icv_ool: whether ICV is out-of-line
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @dma_len: length of dma mapped link_tbl space
+ * @dma_link_tbl: bus physical address of link_tbl/buf
+ * @desc: h/w descriptor
+ * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
+ * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
+ *
+ * if decrypting (with authcheck), or either one of src_nents or dst_nents
+ * is greater than 1, an integrity check value is concatenated to the end
+ * of link_tbl data
+ */
+struct talitos_edesc {
+       int src_nents;
+       int dst_nents;
+       bool icv_ool;
+       dma_addr_t iv_dma;
+       int dma_len;
+       dma_addr_t dma_link_tbl;
+       struct talitos_desc desc;
+       union {
+               struct talitos_ptr link_tbl[0];
+               u8 buf[0];
+       };
+};
+
 /**
  * talitos_request - descriptor submission request
  * @desc: descriptor pointer (kernel virtual)
index c25658b265988b4fdd2a3f4304fdbfab83b7b9f3..24a9658348d7861470354af7246ae632f25815cf 100644 (file)
@@ -194,11 +194,10 @@ static void exynos_bus_exit(struct device *dev)
        if (ret < 0)
                dev_warn(dev, "failed to disable the devfreq-event devices\n");
 
-       if (bus->regulator)
-               regulator_disable(bus->regulator);
-
        dev_pm_opp_of_remove_table(dev);
        clk_disable_unprepare(bus->clk);
+       if (bus->regulator)
+               regulator_disable(bus->regulator);
 }
 
 /*
@@ -386,6 +385,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
        struct exynos_bus *bus;
        int ret, max_state;
        unsigned long min_freq, max_freq;
+       bool passive = false;
 
        if (!np) {
                dev_err(dev, "failed to find devicetree node\n");
@@ -399,27 +399,27 @@ static int exynos_bus_probe(struct platform_device *pdev)
        bus->dev = &pdev->dev;
        platform_set_drvdata(pdev, bus);
 
-       /* Parse the device-tree to get the resource information */
-       ret = exynos_bus_parse_of(np, bus);
-       if (ret < 0)
-               return ret;
-
        profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
-       if (!profile) {
-               ret = -ENOMEM;
-               goto err;
-       }
+       if (!profile)
+               return -ENOMEM;
 
        node = of_parse_phandle(dev->of_node, "devfreq", 0);
        if (node) {
                of_node_put(node);
-               goto passive;
+               passive = true;
        } else {
                ret = exynos_bus_parent_parse_of(np, bus);
+               if (ret < 0)
+                       return ret;
        }
 
+       /* Parse the device-tree to get the resource information */
+       ret = exynos_bus_parse_of(np, bus);
        if (ret < 0)
-               goto err;
+               goto err_reg;
+
+       if (passive)
+               goto passive;
 
        /* Initialize the struct profile and governor data for parent device */
        profile->polling_ms = 50;
@@ -510,6 +510,9 @@ out:
 err:
        dev_pm_opp_of_remove_table(dev);
        clk_disable_unprepare(bus->clk);
+err_reg:
+       if (!passive)
+               regulator_disable(bus->regulator);
 
        return ret;
 }
index 3bc29acbd54e85480d375514fd56e2878681fa96..8cfb69749d4984036ee29de4a403361fadbdebdd 100644 (file)
@@ -152,7 +152,6 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb,
 static int devfreq_passive_event_handler(struct devfreq *devfreq,
                                unsigned int event, void *data)
 {
-       struct device *dev = devfreq->dev.parent;
        struct devfreq_passive_data *p_data
                        = (struct devfreq_passive_data *)devfreq->data;
        struct devfreq *parent = (struct devfreq *)p_data->parent;
@@ -168,12 +167,12 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
                        p_data->this = devfreq;
 
                nb->notifier_call = devfreq_passive_notifier_call;
-               ret = devm_devfreq_register_notifier(dev, parent, nb,
+               ret = devfreq_register_notifier(parent, nb,
                                        DEVFREQ_TRANSITION_NOTIFIER);
                break;
        case DEVFREQ_GOV_STOP:
-               devm_devfreq_unregister_notifier(dev, parent, nb,
-                                       DEVFREQ_TRANSITION_NOTIFIER);
+               WARN_ON(devfreq_unregister_notifier(parent, nb,
+                                       DEVFREQ_TRANSITION_NOTIFIER));
                break;
        default:
                break;
index c59d2eee5d3091ce0315092975a2692e1589ce34..06768074d2d822cb0e837e953edb68ddcb02917d 100644 (file)
@@ -486,11 +486,11 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
 {
        struct tegra_devfreq *tegra = dev_get_drvdata(dev);
        struct dev_pm_opp *opp;
-       unsigned long rate = *freq * KHZ;
+       unsigned long rate;
 
-       opp = devfreq_recommended_opp(dev, &rate, flags);
+       opp = devfreq_recommended_opp(dev, freq, flags);
        if (IS_ERR(opp)) {
-               dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
+               dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
                return PTR_ERR(opp);
        }
        rate = dev_pm_opp_get_freq(opp);
@@ -499,8 +499,6 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
        clk_set_min_rate(tegra->emc_clock, rate);
        clk_set_rate(tegra->emc_clock, 0);
 
-       *freq = rate;
-
        return 0;
 }
 
@@ -510,7 +508,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
        struct tegra_devfreq *tegra = dev_get_drvdata(dev);
        struct tegra_devfreq_device *actmon_dev;
 
-       stat->current_frequency = tegra->cur_freq;
+       stat->current_frequency = tegra->cur_freq * KHZ;
 
        /* To be used by the tegra governor */
        stat->private_data = tegra;
@@ -565,7 +563,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
                target_freq = max(target_freq, dev->target_freq);
        }
 
-       *freq = target_freq;
+       *freq = target_freq * KHZ;
 
        return 0;
 }
index 007986e501b001645825cd944606b845bd7aa90e..f47fdd5b22de9cd4438c408162369c91636fdda6 100644 (file)
@@ -1075,6 +1075,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
                                   fence->ops->get_driver_name(fence),
                                   fence->ops->get_timeline_name(fence),
                                   dma_fence_is_signaled(fence) ? "" : "un");
+                       dma_fence_put(fence);
                }
                rcu_read_unlock();
 
index 6c95f61a32e73d54ed70f461e676826075419f45..49ab09468ba15c12ec80e3e34b1656e450f1fa70 100644 (file)
@@ -416,6 +416,10 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
                                           GFP_NOWAIT | __GFP_NOWARN);
                        if (!nshared) {
                                rcu_read_unlock();
+
+                               dma_fence_put(fence_excl);
+                               fence_excl = NULL;
+
                                nshared = krealloc(shared, sz, GFP_KERNEL);
                                if (nshared) {
                                        shared = nshared;
index 53c1d6d36a642f04ec49aa0e3cfeabeb84205609..81ba4eb34890952468b877fa2107363adcdc42bb 100644 (file)
@@ -141,17 +141,14 @@ static void timeline_fence_release(struct dma_fence *fence)
 {
        struct sync_pt *pt = dma_fence_to_sync_pt(fence);
        struct sync_timeline *parent = dma_fence_parent(fence);
+       unsigned long flags;
 
+       spin_lock_irqsave(fence->lock, flags);
        if (!list_empty(&pt->link)) {
-               unsigned long flags;
-
-               spin_lock_irqsave(fence->lock, flags);
-               if (!list_empty(&pt->link)) {
-                       list_del(&pt->link);
-                       rb_erase(&pt->node, &parent->pt_tree);
-               }
-               spin_unlock_irqrestore(fence->lock, flags);
+               list_del(&pt->link);
+               rb_erase(&pt->node, &parent->pt_tree);
        }
+       spin_unlock_irqrestore(fence->lock, flags);
 
        sync_timeline_put(parent);
        dma_fence_free(fence);
@@ -274,7 +271,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
                                p = &parent->rb_left;
                        } else {
                                if (dma_fence_get_rcu(&other->base)) {
-                                       dma_fence_put(&pt->base);
+                                       sync_timeline_put(obj);
+                                       kfree(pt);
                                        pt = other;
                                        goto unlock;
                                }
index 2b11d967acd0285b5e18f64458b173eac3712a90..9d782cc95c6a0589a9db6be6fc495e4a73baa33a 100644 (file)
@@ -898,8 +898,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
                pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
 
        rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-       if (rc)
+       if (rc) {
+               dev_err(&pdev->dev, "Unable to set DMA mask\n");
                return rc;
+       }
 
        od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
        if (!od)
index 1c658ec3cbf42d5e515067d48a2bb943948d8158..3f5a01cb4ab45b05c37aaac65512be88587615db 100644 (file)
@@ -2039,27 +2039,6 @@ static int sdma_probe(struct platform_device *pdev)
        if (pdata && pdata->script_addrs)
                sdma_add_scripts(sdma, pdata->script_addrs);
 
-       if (pdata) {
-               ret = sdma_get_firmware(sdma, pdata->fw_name);
-               if (ret)
-                       dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
-       } else {
-               /*
-                * Because that device tree does not encode ROM script address,
-                * the RAM script in firmware is mandatory for device tree
-                * probe, otherwise it fails.
-                */
-               ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
-                                             &fw_name);
-               if (ret)
-                       dev_warn(&pdev->dev, "failed to get firmware name\n");
-               else {
-                       ret = sdma_get_firmware(sdma, fw_name);
-                       if (ret)
-                               dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
-               }
-       }
-
        sdma->dma_device.dev = &pdev->dev;
 
        sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
@@ -2103,6 +2082,33 @@ static int sdma_probe(struct platform_device *pdev)
                of_node_put(spba_bus);
        }
 
+       /*
+        * Kick off firmware loading as the very last step:
+        * attempt to load firmware only if we're not on the error path, because
+        * the firmware callback requires a fully functional and allocated sdma
+        * instance.
+        */
+       if (pdata) {
+               ret = sdma_get_firmware(sdma, pdata->fw_name);
+               if (ret)
+                       dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
+       } else {
+               /*
+                * Because that device tree does not encode ROM script address,
+                * the RAM script in firmware is mandatory for device tree
+                * probe, otherwise it fails.
+                */
+               ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
+                                             &fw_name);
+               if (ret) {
+                       dev_warn(&pdev->dev, "failed to get firmware name\n");
+               } else {
+                       ret = sdma_get_firmware(sdma, fw_name);
+                       if (ret)
+                               dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
+               }
+       }
+
        return 0;
 
 err_register:
index a410657f7bcd6f61c0335281e1d0997e540f6dbc..012584cf3c17bf727f843afe70505c59255b0a9d 100644 (file)
@@ -125,9 +125,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
        list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
                                        chain_node) {
                pr_debug("\tcookie: %d slot: %d busy: %d "
-                       "this_desc: %#x next_desc: %#x ack: %d\n",
+                       "this_desc: %#x next_desc: %#llx ack: %d\n",
                        iter->async_tx.cookie, iter->idx, busy,
-                       iter->async_tx.phys, iop_desc_get_next_desc(iter),
+                       iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
                        async_tx_test_ack(&iter->async_tx));
                prefetch(_iter);
                prefetch(&_iter->async_tx);
@@ -315,9 +315,9 @@ retry:
                                int i;
                                dev_dbg(iop_chan->device->common.dev,
                                        "allocated slot: %d "
-                                       "(desc %p phys: %#x) slots_per_op %d\n",
+                                       "(desc %p phys: %#llx) slots_per_op %d\n",
                                        iter->idx, iter->hw_desc,
-                                       iter->async_tx.phys, slots_per_op);
+                                       (u64)iter->async_tx.phys, slots_per_op);
 
                                /* pre-ack all but the last descriptor */
                                if (num_slots != slots_per_op)
@@ -525,7 +525,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
                return NULL;
        BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
 
-       dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
+       dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
                __func__, len);
 
        spin_lock_bh(&iop_chan->lock);
@@ -558,7 +558,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
        BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 
        dev_dbg(iop_chan->device->common.dev,
-               "%s src_cnt: %d len: %u flags: %lx\n",
+               "%s src_cnt: %d len: %zu flags: %lx\n",
                __func__, src_cnt, len, flags);
 
        spin_lock_bh(&iop_chan->lock);
@@ -591,7 +591,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
        if (unlikely(!len))
                return NULL;
 
-       dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+       dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
                __func__, src_cnt, len);
 
        spin_lock_bh(&iop_chan->lock);
@@ -629,7 +629,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
        BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 
        dev_dbg(iop_chan->device->common.dev,
-               "%s src_cnt: %d len: %u flags: %lx\n",
+               "%s src_cnt: %d len: %zu flags: %lx\n",
                __func__, src_cnt, len, flags);
 
        if (dmaf_p_disabled_continue(flags))
@@ -692,7 +692,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
                return NULL;
        BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 
-       dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+       dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
                __func__, src_cnt, len);
 
        spin_lock_bh(&iop_chan->lock);
index 0b05a1e08d213a649dc45219e1a3d28df4a85385..041ce864097e49804f6b9f8dcdd4cd3cf344cb6a 100644 (file)
@@ -1164,7 +1164,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
 
        /* Someone calling slave DMA on a generic channel? */
-       if (rchan->mid_rid < 0 || !sg_len) {
+       if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
                dev_warn(chan->device->dev,
                         "%s: bad parameter: len=%d, id=%d\n",
                         __func__, sg_len, rchan->mid_rid);
index f4edfc56f34ef65dc34e50e38fd3c9aa258364fd..3d55405c49cacc409c937a0048f31d0de18edd64 100644 (file)
@@ -142,7 +142,7 @@ enum d40_events {
  * when the DMA hw is powered off.
  * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
  */
-static u32 d40_backup_regs[] = {
+static __maybe_unused u32 d40_backup_regs[] = {
        D40_DREG_LCPA,
        D40_DREG_LCLA,
        D40_DREG_PRMSE,
@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
 
 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
 
-static u32 d40_backup_regs_chan[] = {
+static __maybe_unused u32 d40_backup_regs_chan[] = {
        D40_CHAN_REG_SSCFG,
        D40_CHAN_REG_SSELT,
        D40_CHAN_REG_SSPTR,
index 06dd1725375e514710c245305069d61586ac2961..8c3c3e5b812a85d66c1b6b04797cdd2bf5ff6e64 100644 (file)
@@ -1376,7 +1376,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
 
        chan = &dmadev->chan[id];
        if (!chan) {
-               dev_err(chan2dev(chan), "MDMA channel not initialized\n");
+               dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
                goto exit;
        }
 
index 8219ab88a507cddbca62a89f2465dd82eea1ee69..fb23993430d31490b66da870954187f0b80e05f7 100644 (file)
@@ -981,8 +981,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
                csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
        }
 
-       if (flags & DMA_PREP_INTERRUPT)
+       if (flags & DMA_PREP_INTERRUPT) {
                csr |= TEGRA_APBDMA_CSR_IE_EOC;
+       } else {
+               WARN_ON_ONCE(1);
+               return NULL;
+       }
 
        apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
 
@@ -1124,8 +1128,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
                csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
        }
 
-       if (flags & DMA_PREP_INTERRUPT)
+       if (flags & DMA_PREP_INTERRUPT) {
                csr |= TEGRA_APBDMA_CSR_IE_EOC;
+       } else {
+               WARN_ON_ONCE(1);
+               return NULL;
+       }
 
        apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
 
index 9272b173c74655203ec4c1997988dccaa9ba02d3..6574cb5a12fee8dd30d2ec4a20b10c9bb7065146 100644 (file)
@@ -395,8 +395,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
 
                ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
                                                 nelm * 2);
-               if (ret)
+               if (ret) {
+                       kfree(rsv_events);
                        return ret;
+               }
 
                for (i = 0; i < nelm; i++) {
                        ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
index ceabdea40ae0fd20aca3e54fb30f2d1e7f562a28..982631d4e1f8af4b83d9952dcfe30512d185c639 100644 (file)
@@ -2273,9 +2273,6 @@ static int edma_probe(struct platform_device *pdev)
 
        ecc->default_queue = info->default_queue;
 
-       for (i = 0; i < ecc->num_slots; i++)
-               edma_write_slot(ecc, i, &dummy_paramset);
-
        if (info->rsv) {
                /* Set the reserved slots in inuse list */
                rsv_slots = info->rsv->rsv_slots;
@@ -2288,6 +2285,12 @@ static int edma_probe(struct platform_device *pdev)
                }
        }
 
+       for (i = 0; i < ecc->num_slots; i++) {
+               /* Reset only unused - not reserved - paRAM slots */
+               if (!test_bit(i, ecc->slot_inuse))
+                       edma_write_slot(ecc, i, &dummy_paramset);
+       }
+
        /* Clear the xbar mapped channels in unused list */
        xbar_chans = info->xbar_chans;
        if (xbar_chans) {
index c0f03dc4b670d15dc12550b0a33d6f7d53b21791..49ecc5dc17873ad420204e28e3d4a3da5fdd3f04 100644 (file)
@@ -552,7 +552,9 @@ static int k3_nav_udmax_cfg_rx_chn(struct k3_nav_udmax_rx_channel *rx_chn)
 
        req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
-                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
 
        req.nav_id = tisci_rm->tisci_dev_id;
        req.index = rx_chn->udma_rchan_id;
@@ -567,9 +569,6 @@ static int k3_nav_udmax_cfg_rx_chn(struct k3_nav_udmax_rx_channel *rx_chn)
                /* Default flow + extra ones */
                req.flowid_start = rx_chn->flow_id_base;
                req.flowid_cnt = rx_chn->flow_num;
-               req.valid_params |=
-                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
-                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
        }
        req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
 
index a9255609de24feb1609feb68f4ada036670041e2..84980fea86af4891f44d8a097149463c833753e5 100644 (file)
@@ -1629,7 +1629,9 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |
-                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID;
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
 
                req_rx.nav_id = tisci_rm->tisci_dev_id;
                req_rx.index = rchan->id;
@@ -1639,6 +1641,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
                req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
                req_rx.rx_ignore_short = 0;
                req_rx.rx_ignore_long = 0;
+               req_rx.flowid_start = 0;
+               req_rx.flowid_cnt = 0;
 
                ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
                if (ret) {
@@ -1711,7 +1715,9 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |
-                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID;
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
 
                        req_rx.nav_id = tisci_rm->tisci_dev_id;
                        req_rx.index = rchan->id;
@@ -1721,6 +1727,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
                        req_rx.rx_chan_type = mode;
                        req_rx.rx_ignore_short = 0;
                        req_rx.rx_ignore_long = 0;
+                       req_rx.flowid_start = 0;
+                       req_rx.flowid_cnt = 0;
 
                        ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
                        if (ret) {
index a4a931ddf6f695fa21a25a359a94ee0c57f92beb..c192bdc30aae1170ca6d3a3df6e3dd6a0519c659 100644 (file)
@@ -1237,7 +1237,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
        if (src_icg) {
                d->ccr |= CCR_SRC_AMODE_DBLIDX;
                d->ei = 1;
-               d->fi = src_icg;
+               d->fi = src_icg + 1;
        } else if (xt->src_inc) {
                d->ccr |= CCR_SRC_AMODE_POSTINC;
                d->fi = 0;
@@ -1252,7 +1252,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
        if (dst_icg) {
                d->ccr |= CCR_DST_AMODE_DBLIDX;
                sg->ei = 1;
-               sg->fi = dst_icg;
+               sg->fi = dst_icg + 1;
        } else if (xt->dst_inc) {
                d->ccr |= CCR_DST_AMODE_POSTINC;
                sg->fi = 0;
@@ -1543,8 +1543,10 @@ static int omap_dma_probe(struct platform_device *pdev)
 
                rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
                                      IRQF_SHARED, "omap-dma-engine", od);
-               if (rc)
+               if (rc) {
+                       omap_dma_free(od);
                        return rc;
+               }
        }
 
        if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
index 5762c3c383f2ee05d67641bc1e2bd18f60c07f92..56de378ad13dce008e92131df715b22c8b580100 100644 (file)
@@ -1956,6 +1956,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
        struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
        struct irq_chip *chip = irq_desc_get_chip(desc);
        int irq = irq_desc_get_irq(desc);
+       unsigned long bits;
 
        dberr = (irq == edac->db_irq) ? 1 : 0;
        sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
@@ -1965,7 +1966,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
 
        regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
 
-       for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
+       bits = irq_status;
+       for_each_set_bit(bit, &bits, 32) {
                irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
                if (irq)
                        generic_handle_irq(irq);
index e2addb2bca29669240acaed03d09b6f601b729b9..94265e4385146558cef181a5d4637fccd6ea5efa 100644 (file)
@@ -2501,13 +2501,6 @@ static void decode_umc_error(int node_id, struct mce *m)
                goto log_error;
        }
 
-       if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
-               err.err_code = ERR_NORM_ADDR;
-               goto log_error;
-       }
-
-       error_address_to_page_and_offset(sys_addr, &err);
-
        if (!(m->status & MCI_STATUS_SYNDV)) {
                err.err_code = ERR_SYND;
                goto log_error;
@@ -2524,6 +2517,13 @@ static void decode_umc_error(int node_id, struct mce *m)
 
        err.csrow = m->synd & 0x7;
 
+       if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+               err.err_code = ERR_NORM_ADDR;
+               goto log_error;
+       }
+
+       error_address_to_page_and_offset(sys_addr, &err);
+
 log_error:
        __log_ecc_error(mci, &err, ecc_type);
 }
@@ -3101,12 +3101,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
 static inline void
 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
 {
-       u8 i, ecc_en = 1, cpk_en = 1;
+       u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
 
        for (i = 0; i < NUM_UMCS; i++) {
                if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
                        ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
                        cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
+
+                       dev_x4  &= !!(pvt->umc[i].dimm_cfg & BIT(6));
+                       dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
                }
        }
 
@@ -3114,8 +3117,15 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
        if (ecc_en) {
                mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
 
-               if (cpk_en)
+               if (!cpk_en)
+                       return;
+
+               if (dev_x4)
                        mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+               else if (dev_x16)
+                       mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
+               else
+                       mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
        }
 }
 
index 7d3edd7139328b5466dfe2041d24e353e166ea7a..f59511bd99261b5797b19bdc6c969516f756eef5 100644 (file)
@@ -1246,9 +1246,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
        if (p > e->location)
                *(p - 1) = '\0';
 
-       /* Report the error via the trace interface */
-       grain_bits = fls_long(e->grain) + 1;
+       /* Sanity-check driver-supplied grain value. */
+       if (WARN_ON_ONCE(!e->grain))
+               e->grain = 1;
+
+       grain_bits = fls_long(e->grain - 1);
 
+       /* Report the error via the trace interface */
        if (IS_ENABLED(CONFIG_RAS))
                trace_mc_event(type, e->msg, e->label, e->error_count,
                               mci->mc_idx, e->top_layer, e->mid_layer,
index 20374b8248f087e343738bee152a8b34e746d6d6..d4545a9222a07c6684938521d8395385b00ac2ed 100644 (file)
@@ -26,7 +26,7 @@
 static int edac_mc_log_ue = 1;
 static int edac_mc_log_ce = 1;
 static int edac_mc_panic_on_ue;
-static int edac_mc_poll_msec = 1000;
+static unsigned int edac_mc_poll_msec = 1000;
 
 /* Getter functions for above */
 int edac_mc_get_log_ue(void)
@@ -45,30 +45,30 @@ int edac_mc_get_panic_on_ue(void)
 }
 
 /* this is temporary */
-int edac_mc_get_poll_msec(void)
+unsigned int edac_mc_get_poll_msec(void)
 {
        return edac_mc_poll_msec;
 }
 
 static int edac_set_poll_msec(const char *val, const struct kernel_param *kp)
 {
-       unsigned long l;
+       unsigned int i;
        int ret;
 
        if (!val)
                return -EINVAL;
 
-       ret = kstrtoul(val, 0, &l);
+       ret = kstrtouint(val, 0, &i);
        if (ret)
                return ret;
 
-       if (l < 1000)
+       if (i < 1000)
                return -EINVAL;
 
-       *((unsigned long *)kp->arg) = l;
+       *((unsigned int *)kp->arg) = i;
 
        /* notify edac_mc engine to reset the poll period */
-       edac_mc_reset_delay_period(l);
+       edac_mc_reset_delay_period(i);
 
        return 0;
 }
@@ -82,7 +82,7 @@ MODULE_PARM_DESC(edac_mc_log_ue,
 module_param(edac_mc_log_ce, int, 0644);
 MODULE_PARM_DESC(edac_mc_log_ce,
                 "Log correctable error to console: 0=off 1=on");
-module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
+module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_uint,
                  &edac_mc_poll_msec, 0644);
 MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
 
@@ -404,6 +404,8 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow)
 static int edac_create_csrow_object(struct mem_ctl_info *mci,
                                    struct csrow_info *csrow, int index)
 {
+       int err;
+
        csrow->dev.type = &csrow_attr_type;
        csrow->dev.bus = mci->bus;
        csrow->dev.groups = csrow_dev_groups;
@@ -416,7 +418,11 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
        edac_dbg(0, "creating (virtual) csrow node %s\n",
                 dev_name(&csrow->dev));
 
-       return device_add(&csrow->dev);
+       err = device_add(&csrow->dev);
+       if (err)
+               put_device(&csrow->dev);
+
+       return err;
 }
 
 /* Create a CSROW object under specifed edac_mc_device */
index dec88dcea036f5b7672409fb2f2cfd78bc863609..c9f0e73872a6445b66e7669937fe5e014f8cff4a 100644 (file)
@@ -36,7 +36,7 @@ extern int edac_mc_get_log_ue(void);
 extern int edac_mc_get_log_ce(void);
 extern int edac_mc_get_panic_on_ue(void);
 extern int edac_get_poll_msec(void);
-extern int edac_mc_get_poll_msec(void);
+extern unsigned int edac_mc_get_poll_msec(void);
 
 unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
                                 unsigned len);
index 473aeec4b1da44ec4c069d1225db85c67ea691a8..574bce603337fe46d4b34ed0f95ae8ac3ecd4628 100644 (file)
@@ -532,7 +532,11 @@ void ghes_edac_unregister(struct ghes *ghes)
        if (!ghes_pvt)
                return;
 
+       if (atomic_dec_return(&ghes_init))
+               return;
+
        mci = ghes_pvt->mci;
+       ghes_pvt = NULL;
        edac_mc_del_mc(mci->pdev);
        edac_mc_free(mci);
 }
index 903a4f1fadcc394adec7a41ed6c4926694b96047..0153c730750e5b10ea91280d77cdf87db09fe0f8 100644 (file)
@@ -268,11 +268,14 @@ static u64 get_sideband_reg_base_addr(void)
        }
 }
 
+#define DNV_MCHBAR_SIZE  0x8000
+#define DNV_SB_PORT_SIZE 0x10000
 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
 {
        struct pci_dev *pdev;
        char *base;
        u64 addr;
+       unsigned long size;
 
        if (op == 4) {
                pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
@@ -287,15 +290,17 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
                        addr = get_mem_ctrl_hub_base_addr();
                        if (!addr)
                                return -ENODEV;
+                       size = DNV_MCHBAR_SIZE;
                } else {
                        /* MMIO via sideband register base address */
                        addr = get_sideband_reg_base_addr();
                        if (!addr)
                                return -ENODEV;
                        addr += (port << 16);
+                       size = DNV_SB_PORT_SIZE;
                }
 
-               base = ioremap((resource_size_t)addr, 0x10000);
+               base = ioremap((resource_size_t)addr, size);
                if (!base)
                        return -ENODEV;
 
index 6e83880046d787d978cddaa1fcf2c5b930843186..ed212c8b4108370152fda28275843e65fc4d50c9 100644 (file)
@@ -198,7 +198,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
 
 config ISCSI_IBFT_FIND
        bool "iSCSI Boot Firmware Table Attributes"
-       depends on X86 && ACPI
+       depends on X86 && ISCSI_IBFT
        default n
        help
          This option enables the kernel to find the region of memory
@@ -209,7 +209,8 @@ config ISCSI_IBFT_FIND
 config ISCSI_IBFT
        tristate "iSCSI Boot Firmware Table Attributes module"
        select ISCSI_BOOT_SYSFS
-       depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
+       select ISCSI_IBFT_FIND if X86
+       depends on ACPI && SCSI && SCSI_LOWLEVEL
        default n
        help
          This option enables support for detection and exposing of iSCSI
index 8f952f2f1a29203f8b7729cbfd10aafeba3cd9f6..09119e3f5c018b9495068eeda50ef6ad63534d89 100644 (file)
@@ -271,6 +271,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
        struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
        struct scmi_shared_mem __iomem *mem = cinfo->payload;
 
+       /*
+        * Ideally channel must be free by now unless OS timeout last
+        * request and platform continued to process the same, wait
+        * until it releases the shared memory, otherwise we may endup
+        * overwriting its response with new message payload or vice-versa
+        */
+       spin_until_cond(ioread32(&mem->channel_status) &
+                       SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
        /* Mark channel busy + clear error */
        iowrite32(0x0, &mem->channel_status);
        iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
index 6090d25dce85e301ff66988553384caf726e8ad3..4045098ddb860ee4a32528246f6add85ccc8b0b2 100644 (file)
@@ -402,6 +402,21 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
                printk(
        "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
        pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+
+       /* Fatal errors call __ghes_panic() before AER handler prints this */
+       if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
+           (gdata->error_severity & CPER_SEV_FATAL)) {
+               struct aer_capability_regs *aer;
+
+               aer = (struct aer_capability_regs *)pcie->aer_info;
+               printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
+                      pfx, aer->uncor_status, aer->uncor_mask);
+               printk("%saer_uncor_severity: 0x%08x\n",
+                      pfx, aer->uncor_severity);
+               printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
+                      aer->header_log.dw0, aer->header_log.dw1,
+                      aer->header_log.dw2, aer->header_log.dw3);
+       }
 }
 
 static void cper_print_tstamp(const char *pfx,
index b22ccfb0c991bde8c6d222d0a44ce527e304c7ae..2bf4d31f4967566e2cb5e72853ddad0876e3aad1 100644 (file)
@@ -50,11 +50,6 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
                       bgrt->version);
                goto out;
        }
-       if (bgrt->status & 0xfe) {
-               pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
-                      bgrt->status);
-               goto out;
-       }
        if (bgrt->image_type != 0) {
                pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
                       bgrt->image_type);
index 2a29dd9c986d4e2df7663aa1305df8adbaa59739..d54fca902e64f8ba565d6540f4a51cc511370a94 100644 (file)
@@ -281,6 +281,9 @@ static __init int efivar_ssdt_load(void)
        void *data;
        int ret;
 
+       if (!efivar_ssdt[0])
+               return 0;
+
        ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
 
        list_for_each_entry_safe(entry, aux, &entries, list) {
index 1aa67bb5d8c0b02f10f49f0f8cdf6a17aca56955..ebd3ffc7ca0063f0ef9b525334e543578b45e4c7 100644 (file)
@@ -100,8 +100,8 @@ static int vpd_section_check_key_name(const u8 *key, s32 key_len)
        return VPD_OK;
 }
 
-static int vpd_section_attrib_add(const u8 *key, s32 key_len,
-                                 const u8 *value, s32 value_len,
+static int vpd_section_attrib_add(const u8 *key, u32 key_len,
+                                 const u8 *value, u32 value_len,
                                  void *arg)
 {
        int ret;
index 943acaa8aa765f8ee433954306822f6149b19f94..6c7ab2ba85d2fa8942361b563b222f73d54e18e9 100644 (file)
@@ -19,8 +19,8 @@
 
 #include "vpd_decode.h"
 
-static int vpd_decode_len(const s32 max_len, const u8 *in,
-                         s32 *length, s32 *decoded_len)
+static int vpd_decode_len(const u32 max_len, const u8 *in,
+                         u32 *length, u32 *decoded_len)
 {
        u8 more;
        int i = 0;
@@ -40,18 +40,39 @@ static int vpd_decode_len(const s32 max_len, const u8 *in,
        } while (more);
 
        *decoded_len = i;
+       return VPD_OK;
+}
+
+static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
+                           u32 *_consumed, const u8 **entry, u32 *entry_len)
+{
+       u32 decoded_len;
+       u32 consumed = *_consumed;
+
+       if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
+                          entry_len, &decoded_len) != VPD_OK)
+               return VPD_FAIL;
+       if (max_len - consumed < decoded_len)
+               return VPD_FAIL;
+
+       consumed += decoded_len;
+       *entry = input_buf + consumed;
+
+       /* entry_len is untrusted data and must be checked again. */
+       if (max_len - consumed < *entry_len)
+               return VPD_FAIL;
 
+       consumed += *entry_len;
+       *_consumed = consumed;
        return VPD_OK;
 }
 
-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
                      vpd_decode_callback callback, void *callback_arg)
 {
        int type;
-       int res;
-       s32 key_len;
-       s32 value_len;
-       s32 decoded_len;
+       u32 key_len;
+       u32 value_len;
        const u8 *key;
        const u8 *value;
 
@@ -66,26 +87,14 @@ int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
        case VPD_TYPE_STRING:
                (*consumed)++;
 
-               /* key */
-               res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
-                                    &key_len, &decoded_len);
-               if (res != VPD_OK || *consumed + decoded_len >= max_len)
+               if (vpd_decode_entry(max_len, input_buf, consumed, &key,
+                                    &key_len) != VPD_OK)
                        return VPD_FAIL;
 
-               *consumed += decoded_len;
-               key = &input_buf[*consumed];
-               *consumed += key_len;
-
-               /* value */
-               res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
-                                    &value_len, &decoded_len);
-               if (res != VPD_OK || *consumed + decoded_len > max_len)
+               if (vpd_decode_entry(max_len, input_buf, consumed, &value,
+                                    &value_len) != VPD_OK)
                        return VPD_FAIL;
 
-               *consumed += decoded_len;
-               value = &input_buf[*consumed];
-               *consumed += value_len;
-
                if (type == VPD_TYPE_STRING)
                        return callback(key, key_len, value, value_len,
                                        callback_arg);
index be3d62c5ca2fb967e51f7ae292627f014ac90970..e921456b8e78a34dcf1a280481c37377b09802ad 100644 (file)
@@ -33,8 +33,8 @@ enum {
 };
 
 /* Callback for vpd_decode_string to invoke. */
-typedef int vpd_decode_callback(const u8 *key, s32 key_len,
-                               const u8 *value, s32 value_len,
+typedef int vpd_decode_callback(const u8 *key, u32 key_len,
+                               const u8 *value, u32 value_len,
                                void *arg);
 
 /*
@@ -52,7 +52,7 @@ typedef int vpd_decode_callback(const u8 *key, s32 key_len,
  * If one entry is successfully decoded, sends it to callback and returns the
  * result.
  */
-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
                      vpd_decode_callback callback, void *callback_arg);
 
 #endif  /* __VPD_DECODE_H */
index c51462f5aa1e4f52d01cb7c88db42bdc006f7767..966aef334c420f3015e475c349dae84adb151f41 100644 (file)
@@ -93,6 +93,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(IBFT_ISCSI_VERSION);
 
+#ifndef CONFIG_ISCSI_IBFT_FIND
+struct acpi_table_ibft *ibft_addr;
+#endif
+
 struct ibft_hdr {
        u8 id;
        u8 version;
index 3469436579622b957fcf805aff1edb0da41adc1c..cbd53cb1b2d4783bbacca41f4ada309f24af642f 100644 (file)
@@ -366,16 +366,16 @@ static int suspend_test_thread(void *arg)
        for (;;) {
                /* Needs to be set first to avoid missing a wakeup. */
                set_current_state(TASK_INTERRUPTIBLE);
-               if (kthread_should_stop()) {
-                       __set_current_state(TASK_RUNNING);
+               if (kthread_should_park())
                        break;
-               }
                schedule();
        }
 
        pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
                cpu, nb_suspend, nb_shallow_sleep, nb_err);
 
+       kthread_parkme();
+
        return nb_err;
 }
 
@@ -440,8 +440,10 @@ static int suspend_tests(void)
 
 
        /* Stop and destroy all threads, get return status. */
-       for (i = 0; i < nb_threads; ++i)
+       for (i = 0; i < nb_threads; ++i) {
+               err += kthread_park(threads[i]);
                err += kthread_stop(threads[i]);
+       }
  out:
        cpuidle_resume_and_unlock();
        kfree(threads);
index e778af766fae3c2c88d20e8f7ae6f47f9114935c..98c987188835bcf8e5fb0e39ef3218fd6cab7370 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/cpumask.h>
 #include <linux/export.h>
+#include <linux/dma-direct.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -449,6 +450,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
        phys_addr_t mem_to_map_phys;
        phys_addr_t dest_phys;
        phys_addr_t ptr_phys;
+       dma_addr_t ptr_dma;
        size_t mem_to_map_sz;
        size_t dest_sz;
        size_t src_sz;
@@ -466,9 +468,10 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
        ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
                        ALIGN(dest_sz, SZ_64);
 
-       ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+       ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
        if (!ptr)
                return -ENOMEM;
+       ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
 
        /* Fill source vmid detail */
        src = ptr;
@@ -498,7 +501,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
 
        ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
                                    ptr_phys, src_sz, dest_phys, dest_sz);
-       dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
+       dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
        if (ret) {
                dev_err(__scm->dev,
                        "Assign memory protection call failed %d.\n", ret);
index 1ebcef4bab5b8f52b4676ab63d732b42ffd8d349..87337fcfbc0d2bd862127bea2208b8afa143ee81 100644 (file)
@@ -39,6 +39,7 @@ config ALTERA_PR_IP_CORE_PLAT
 config FPGA_MGR_ALTERA_PS_SPI
        tristate "Altera FPGA Passive Serial over SPI"
        depends on SPI
+       select BITREVERSE
        help
          FPGA manager driver support for Altera Arria/Cyclone/Stratix
          using the passive serial interface over SPI.
index 24b25c62603665aee105e394121110c59f65ad91..4925cae7dcddeb7aa6c8c47ca954f6ca6cf5aefd 100644 (file)
@@ -207,7 +207,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
                return -EIO;
        }
 
-       if (!IS_ERR(conf->confd)) {
+       if (conf->confd) {
                if (!gpiod_get_raw_value_cansleep(conf->confd)) {
                        dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
                        return -EIO;
@@ -265,10 +265,13 @@ static int altera_ps_probe(struct spi_device *spi)
                return PTR_ERR(conf->status);
        }
 
-       conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN);
+       conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);
        if (IS_ERR(conf->confd)) {
-               dev_warn(&spi->dev, "Not using confd gpio: %ld\n",
-                        PTR_ERR(conf->confd));
+               dev_err(&spi->dev, "Failed to get confd gpio: %ld\n",
+                       PTR_ERR(conf->confd));
+               return PTR_ERR(conf->confd);
+       } else if (!conf->confd) {
+               dev_warn(&spi->dev, "Not using confd gpio");
        }
 
        /* Register manager with unique name */
index df94021dd9d12bc32b18873076151d3fccbae5c7..fdc0e458dbaaf9c2b59488e6b1a4d7102926657a 100644 (file)
@@ -47,8 +47,7 @@
 #define SCOM_STATUS_PIB_RESP_MASK      0x00007000
 #define SCOM_STATUS_PIB_RESP_SHIFT     12
 
-#define SCOM_STATUS_ANY_ERR            (SCOM_STATUS_ERR_SUMMARY | \
-                                        SCOM_STATUS_PROTECTION | \
+#define SCOM_STATUS_ANY_ERR            (SCOM_STATUS_PROTECTION | \
                                         SCOM_STATUS_PARITY |     \
                                         SCOM_STATUS_PIB_ABORT | \
                                         SCOM_STATUS_PIB_RESP_MASK)
@@ -260,11 +259,6 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
        /* Return -EBUSY on PIB abort to force a retry */
        if (status & SCOM_STATUS_PIB_ABORT)
                return -EBUSY;
-       if (status & SCOM_STATUS_ERR_SUMMARY) {
-               fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
-                                sizeof(uint32_t));
-               return -EIO;
-       }
        return 0;
 }
 
index 6cf2e2ce40933dae63de7c924b127d244a4cef30..4935cda5301ea8e7ff414807a3675b10a1337062 100644 (file)
@@ -529,11 +529,12 @@ static void sprd_eic_handle_one_type(struct gpio_chip *chip)
                }
 
                for_each_set_bit(n, &reg, SPRD_EIC_PER_BANK_NR) {
-                       girq = irq_find_mapping(chip->irq.domain,
-                                       bank * SPRD_EIC_PER_BANK_NR + n);
+                       u32 offset = bank * SPRD_EIC_PER_BANK_NR + n;
+
+                       girq = irq_find_mapping(chip->irq.domain, offset);
 
                        generic_handle_irq(girq);
-                       sprd_eic_toggle_trigger(chip, girq, n);
+                       sprd_eic_toggle_trigger(chip, girq, offset);
                }
        }
 }
index 7789f09f3dafd5bcc13ce5703ee8c2ee3f07592b..2a4a11634dd1815aa499fba798cd50870f032653 100644 (file)
@@ -837,9 +837,9 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
 
        raw_spin_lock_irqsave(&bank->lock, flags);
        bank->irq_usage &= ~(BIT(offset));
-       omap_set_gpio_irqenable(bank, offset, 0);
-       omap_clear_gpio_irqstatus(bank, offset);
        omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+       omap_clear_gpio_irqstatus(bank, offset);
+       omap_set_gpio_irqenable(bank, offset, 0);
        if (!LINE_USED(bank->mod_usage, offset))
                omap_clear_gpio_debounce(bank, offset);
        omap_disable_gpio_module(bank, offset);
@@ -881,8 +881,8 @@ static void omap_gpio_mask_irq(struct irq_data *d)
        unsigned long flags;
 
        raw_spin_lock_irqsave(&bank->lock, flags);
-       omap_set_gpio_irqenable(bank, offset, 0);
        omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+       omap_set_gpio_irqenable(bank, offset, 0);
        raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
@@ -894,9 +894,6 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
        unsigned long flags;
 
        raw_spin_lock_irqsave(&bank->lock, flags);
-       if (trigger)
-               omap_set_gpio_triggering(bank, offset, trigger);
-
        omap_set_gpio_irqenable(bank, offset, 1);
 
        /*
@@ -904,9 +901,13 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
         * is cleared, thus after the handler has run. OMAP4 needs this done
         * after enabing the interrupt to clear the wakeup status.
         */
-       if (bank->level_mask & BIT(offset))
+       if (bank->regs->leveldetect0 && bank->regs->wkup_en &&
+           trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
                omap_clear_gpio_irqstatus(bank, offset);
 
+       if (trigger)
+               omap_set_gpio_triggering(bank, offset, trigger);
+
        raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
@@ -1689,6 +1690,8 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
        .clr_dataout =          OMAP4_GPIO_CLEARDATAOUT,
        .irqstatus =            OMAP4_GPIO_IRQSTATUS0,
        .irqstatus2 =           OMAP4_GPIO_IRQSTATUS1,
+       .irqstatus_raw0 =       OMAP4_GPIO_IRQSTATUSRAW0,
+       .irqstatus_raw1 =       OMAP4_GPIO_IRQSTATUSRAW1,
        .irqenable =            OMAP4_GPIO_IRQSTATUSSET0,
        .irqenable2 =           OMAP4_GPIO_IRQSTATUSSET1,
        .set_irqenable =        OMAP4_GPIO_IRQSTATUSSET0,
index c5e009f610210d5781dfef0e90f8049ce19eaa86..cf2604e635999dcace0818728af94771961f7fb2 100644 (file)
@@ -10,6 +10,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/dmi.h>
 #include <linux/errno.h>
 #include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
 
 #include "gpiolib.h"
 
+static int run_edge_events_on_boot = -1;
+module_param(run_edge_events_on_boot, int, 0444);
+MODULE_PARM_DESC(run_edge_events_on_boot,
+                "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
+
 /**
  * struct acpi_gpio_event - ACPI GPIO event handler data
  *
@@ -174,10 +180,13 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
        event->irq_requested = true;
 
        /* Make sure we trigger the initial state of edge-triggered IRQs */
-       value = gpiod_get_raw_value_cansleep(event->desc);
-       if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
-           ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
-               event->handler(event->irq, event);
+       if (run_edge_events_on_boot &&
+           (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
+               value = gpiod_get_raw_value_cansleep(event->desc);
+               if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+                   ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+                       event->handler(event->irq, event);
+       }
 }
 
 static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
@@ -1253,3 +1262,28 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
 }
 /* We must use _sync so that this runs after the first deferred_probe run */
 late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
+
+static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
+               }
+       },
+       {} /* Terminating entry */
+};
+
+static int acpi_gpio_setup_params(void)
+{
+       if (run_edge_events_on_boot < 0) {
+               if (dmi_check_system(run_edge_events_on_boot_blacklist))
+                       run_edge_events_on_boot = 0;
+               else
+                       run_edge_events_on_boot = 1;
+       }
+
+       return 0;
+}
+
+/* Directly after dmi_setup() which runs as core_initcall() */
+postcore_initcall(acpi_gpio_setup_params);
index fd713326dcfcffa9bd746962f789afb7642e76b8..565ab945698caa2b5ddbfd827f3ee6bba10b8aaa 100644 (file)
@@ -524,6 +524,14 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
        if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
                return -EINVAL;
 
+       /*
+        * Do not allow both INPUT & OUTPUT flags to be set as they are
+        * contradictory.
+        */
+       if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
+           (lflags & GPIOHANDLE_REQUEST_OUTPUT))
+               return -EINVAL;
+
        /*
         * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
         * the hardware actually supports enabling both at the same time the
@@ -916,7 +924,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
        }
 
        /* This is just wrong: we don't look for events on output lines */
-       if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+       if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
+           (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+           (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
                ret = -EINVAL;
                goto out_free_label;
        }
@@ -930,10 +940,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
 
        if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
                set_bit(FLAG_ACTIVE_LOW, &desc->flags);
-       if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
-               set_bit(FLAG_OPEN_DRAIN, &desc->flags);
-       if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
-               set_bit(FLAG_OPEN_SOURCE, &desc->flags);
 
        ret = gpiod_direction_input(desc);
        if (ret)
@@ -946,9 +952,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
        }
 
        if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
-               irqflags |= IRQF_TRIGGER_RISING;
+               irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+                       IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
        if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
-               irqflags |= IRQF_TRIGGER_FALLING;
+               irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+                       IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
        irqflags |= IRQF_ONESHOT;
        irqflags |= IRQF_SHARED;
 
@@ -1080,9 +1088,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
                        lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
                if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
-                       lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
+                       lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
+                                          GPIOLINE_FLAG_IS_OUT);
                if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
-                       lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
+                       lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
+                                          GPIOLINE_FLAG_IS_OUT);
 
                if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
                        return -EFAULT;
@@ -2639,8 +2649,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
                if (!ret)
                        goto set_output_value;
                /* Emulate open drain by not actively driving the line high */
-               if (value)
-                       return gpiod_direction_input(desc);
+               if (value) {
+                       ret = gpiod_direction_input(desc);
+                       goto set_output_flag;
+               }
        }
        else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
                ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
@@ -2648,8 +2660,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
                if (!ret)
                        goto set_output_value;
                /* Emulate open source by not actively driving the line low */
-               if (!value)
-                       return gpiod_direction_input(desc);
+               if (!value) {
+                       ret = gpiod_direction_input(desc);
+                       goto set_output_flag;
+               }
        } else {
                gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
                                            PIN_CONFIG_DRIVE_PUSH_PULL);
@@ -2657,6 +2671,17 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
 
 set_output_value:
        return gpiod_direction_output_raw_commit(desc, value);
+
+set_output_flag:
+       /*
+        * When emulating open-source or open-drain functionalities by not
+        * actively driving the line (setting mode to input) we still need to
+        * set the IS_OUT flag or otherwise we won't be able to set the line
+        * value anymore.
+        */
+       if (ret == 0)
+               set_bit(FLAG_IS_OUT, &desc->flags);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(gpiod_direction_output);
 
@@ -2877,7 +2902,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
 int gpiod_get_raw_value(const struct gpio_desc *desc)
 {
        VALIDATE_DESC(desc);
-       /* Should be using gpio_get_value_cansleep() */
+       /* Should be using gpiod_get_raw_value_cansleep() */
        WARN_ON(desc->gdev->chip->can_sleep);
        return gpiod_get_raw_value_commit(desc);
 }
@@ -2898,7 +2923,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
        int value;
 
        VALIDATE_DESC(desc);
-       /* Should be using gpio_get_value_cansleep() */
+       /* Should be using gpiod_get_value_cansleep() */
        WARN_ON(desc->gdev->chip->can_sleep);
 
        value = gpiod_get_raw_value_commit(desc);
@@ -2970,8 +2995,6 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
 
        if (value) {
                err = chip->direction_input(chip, offset);
-               if (!err)
-                       clear_bit(FLAG_IS_OUT, &desc->flags);
        } else {
                err = chip->direction_output(chip, offset, 0);
                if (!err)
@@ -3001,8 +3024,6 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value
                        set_bit(FLAG_IS_OUT, &desc->flags);
        } else {
                err = chip->direction_input(chip, offset);
-               if (!err)
-                       clear_bit(FLAG_IS_OUT, &desc->flags);
        }
        trace_gpio_direction(desc_to_gpio(desc), !value, err);
        if (err < 0)
@@ -3123,7 +3144,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
 void gpiod_set_raw_value(struct gpio_desc *desc, int value)
 {
        VALIDATE_DESC_VOID(desc);
-       /* Should be using gpiod_set_value_cansleep() */
+       /* Should be using gpiod_set_raw_value_cansleep() */
        WARN_ON(desc->gdev->chip->can_sleep);
        gpiod_set_raw_value_commit(desc, value);
 }
@@ -3164,6 +3185,7 @@ static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
 void gpiod_set_value(struct gpio_desc *desc, int value)
 {
        VALIDATE_DESC_VOID(desc);
+       /* Should be using gpiod_set_value_cansleep() */
        WARN_ON(desc->gdev->chip->can_sleep);
        gpiod_set_value_nocheck(desc, value);
 }
index 92b11de1958132c28e4ffd68e1fd782a8e2e5771..354c8b6106dc273b52f1d9e898d060038e4d6214 100644 (file)
@@ -575,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
        { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+       { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0, 0, 0, 0, 0 },
index f5fb93795a69a8955cd0327db6e6eddac37a7c1e..65cecfdd9b454f344416d14115ef85574579f98d 100644 (file)
@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
        thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
        bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
 
-       data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+       data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
index b40e9c76af0c33f2927057b08f673d93b9174fc4..5e29f14f4b301bea74c28153724668c9c6c0bef3 100644 (file)
@@ -841,6 +841,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
        if (ret == -EPROBE_DEFER)
                return ret;
 
+#ifdef CONFIG_DRM_AMDGPU_SI
+       if (!amdgpu_si_support) {
+               switch (flags & AMD_ASIC_MASK) {
+               case CHIP_TAHITI:
+               case CHIP_PITCAIRN:
+               case CHIP_VERDE:
+               case CHIP_OLAND:
+               case CHIP_HAINAN:
+                       dev_info(&pdev->dev,
+                                "SI support provided by radeon.\n");
+                       dev_info(&pdev->dev,
+                                "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
+                               );
+                       return -ENODEV;
+               }
+       }
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+       if (!amdgpu_cik_support) {
+               switch (flags & AMD_ASIC_MASK) {
+               case CHIP_KAVERI:
+               case CHIP_BONAIRE:
+               case CHIP_HAWAII:
+               case CHIP_KABINI:
+               case CHIP_MULLINS:
+                       dev_info(&pdev->dev,
+                                "CIK support provided by radeon.\n");
+                       dev_info(&pdev->dev,
+                                "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
+                               );
+                       return -ENODEV;
+               }
+       }
+#endif
+
        /* Get rid of things like offb */
        ret = amdgpu_kick_out_firmware_fb(pdev);
        if (ret)
index 51b5e977ca885ef1f7d7df49698f3c6843bab437..f4e9d1b10e3edef7de36431f2a0085958b9a1278 100644 (file)
@@ -139,7 +139,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        /* ring tests don't use a job */
        if (job) {
                vm = job->vm;
-               fence_ctx = job->base.s_fence->scheduled.context;
+               fence_ctx = job->base.s_fence ?
+                       job->base.s_fence->scheduled.context : 0;
        } else {
                vm = NULL;
                fence_ctx = 0;
index c0396e83f352671ab67b3284a52175624b86dec4..ba10577569f856d42e8393a8d7e6010430c3a803 100644 (file)
@@ -87,41 +87,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
        struct amdgpu_device *adev;
        int r, acpi_status;
 
-#ifdef CONFIG_DRM_AMDGPU_SI
-       if (!amdgpu_si_support) {
-               switch (flags & AMD_ASIC_MASK) {
-               case CHIP_TAHITI:
-               case CHIP_PITCAIRN:
-               case CHIP_VERDE:
-               case CHIP_OLAND:
-               case CHIP_HAINAN:
-                       dev_info(dev->dev,
-                                "SI support provided by radeon.\n");
-                       dev_info(dev->dev,
-                                "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
-                               );
-                       return -ENODEV;
-               }
-       }
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
-       if (!amdgpu_cik_support) {
-               switch (flags & AMD_ASIC_MASK) {
-               case CHIP_KAVERI:
-               case CHIP_BONAIRE:
-               case CHIP_HAWAII:
-               case CHIP_KABINI:
-               case CHIP_MULLINS:
-                       dev_info(dev->dev,
-                                "CIK support provided by radeon.\n");
-                       dev_info(dev->dev,
-                                "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
-                               );
-                       return -ENODEV;
-               }
-       }
-#endif
-
        adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
        if (adev == NULL) {
                return -ENOMEM;
@@ -562,6 +527,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
                        sh_num = 0xffffffff;
 
+               if (info->read_mmr_reg.count > 128)
+                       return -EINVAL;
+
                regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
                if (!regs)
                        return -ENOMEM;
index 5f3f540738187c6db03a7975bced71ea4163c9e0..17862b9ecccd73914af0b2659bed33498f8b255a 100644 (file)
@@ -1070,7 +1070,7 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       uint32_t rptr = amdgpu_ring_get_rptr(ring);
+       uint32_t rptr;
        unsigned i;
        int r, timeout = adev->usec_timeout;
 
@@ -1084,6 +1084,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
                          ring->idx, r);
                return r;
        }
+
+       rptr = amdgpu_ring_get_rptr(ring);
+
        amdgpu_ring_write(ring, VCE_CMD_END);
        amdgpu_ring_commit(ring);
 
index 400fc74bbae27e878aebe4e6e27f6eaf22ca8e15..205e683fb92060ad407fdbb18696fe7cf2a699df 100644 (file)
@@ -431,7 +431,7 @@ error:
 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       uint32_t rptr = amdgpu_ring_get_rptr(ring);
+       uint32_t rptr;
        unsigned i;
        int r;
 
@@ -441,6 +441,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
                          ring->idx, r);
                return r;
        }
+
+       rptr = amdgpu_ring_get_rptr(ring);
+
        amdgpu_ring_write(ring, VCN_ENC_CMD_END);
        amdgpu_ring_commit(ring);
 
index 46568497ef181af6828e6bed9e50f416fab1327c..7824116498169b71a21adf6c0ca5e05ebcca055c 100644 (file)
@@ -82,7 +82,8 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
 
 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
 {
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
index 72f8018fa2a836572b9c898785bb99deecc1ca91..ede27dab675facf137f2a80b2b738b485c864874 100644 (file)
@@ -1037,6 +1037,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
        tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
        WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
 
+       WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
+       WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+
        /* After HDP is initialized, flush HDP.*/
        adev->nbio_funcs->hdp_flush(adev, NULL);
 
index c364ef94cc366e515533728dc05997492de58cab..77c9f4d8668adf96e2042240206c90d606ec1b7c 100644 (file)
@@ -1813,7 +1813,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
                        if (orig != data)
                                si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
 
-                       if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
+                       if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
                                orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
                                data &= ~PLL_RAMP_UP_TIME_0_MASK;
                                if (orig != data)
@@ -1862,14 +1862,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
 
                        orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
                        data &= ~LS2_EXIT_TIME_MASK;
-                       if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+                       if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
                                data |= LS2_EXIT_TIME(5);
                        if (orig != data)
                                si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
 
                        orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
                        data &= ~LS2_EXIT_TIME_MASK;
-                       if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+                       if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
                                data |= LS2_EXIT_TIME(5);
                        if (orig != data)
                                si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
index d4070839ac809759c310b0b9f7ba38fca79d30b2..80613a74df4207827281f3c17b691fb81763419e 100644 (file)
@@ -170,7 +170,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       uint32_t rptr = amdgpu_ring_get_rptr(ring);
+       uint32_t rptr;
        unsigned i;
        int r;
 
@@ -180,6 +180,9 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
                          ring->idx, r);
                return r;
        }
+
+       rptr = amdgpu_ring_get_rptr(ring);
+
        amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
        amdgpu_ring_commit(ring);
 
index 057151b17b456a208a7abe5b22aca762666257b8..ce16b8329af044da43d19a000bd6e0848dc80ab9 100644 (file)
@@ -175,7 +175,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       uint32_t rptr = amdgpu_ring_get_rptr(ring);
+       uint32_t rptr;
        unsigned i;
        int r;
 
@@ -188,6 +188,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
                          ring->me, ring->idx, r);
                return r;
        }
+
+       rptr = amdgpu_ring_get_rptr(ring);
+
        amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
        amdgpu_ring_commit(ring);
 
index 5aba50f63ac6f0f736341d2ff0172f8f6199e5b0..938d0053a820810de353cb07d1411b730c533a22 100644 (file)
@@ -310,6 +310,7 @@ static const struct kfd_deviceid supported_devices[] = {
        { 0x67CF, &polaris10_device_info },     /* Polaris10 */
        { 0x67D0, &polaris10_vf_device_info },  /* Polaris10 vf*/
        { 0x67DF, &polaris10_device_info },     /* Polaris10 */
+       { 0x6FDF, &polaris10_device_info },     /* Polaris10 */
        { 0x67E0, &polaris11_device_info },     /* Polaris11 */
        { 0x67E1, &polaris11_device_info },     /* Polaris11 */
        { 0x67E3, &polaris11_device_info },     /* Polaris11 */
index 4f22e745df51b4c2aad4ec842f04ac96b4070f68..189212cb3547585b4c5f6f6e64309953a4982a5e 100644 (file)
@@ -1268,12 +1268,17 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
        return 0;
 }
 
-static int unmap_sdma_queues(struct device_queue_manager *dqm,
-                               unsigned int sdma_engine)
+static int unmap_sdma_queues(struct device_queue_manager *dqm)
 {
-       return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
-                       KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
-                       sdma_engine);
+       int i, retval = 0;
+
+       for (i = 0; i < dqm->dev->device_info->num_sdma_engines; i++) {
+               retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
+                       KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
+               if (retval)
+                       return retval;
+       }
+       return retval;
 }
 
 /* dqm->lock mutex has to be locked before calling this function */
@@ -1312,10 +1317,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
        pr_debug("Before destroying queues, sdma queue count is : %u\n",
                dqm->sdma_queue_count);
 
-       if (dqm->sdma_queue_count > 0) {
-               unmap_sdma_queues(dqm, 0);
-               unmap_sdma_queues(dqm, 1);
-       }
+       if (dqm->sdma_queue_count > 0)
+               unmap_sdma_queues(dqm);
 
        retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
                        filter, filter_param, false, 0);
index 0cedb37cf513563dc6fea50e6b40ef0889c3bb61..985bebde5a343a04e473711e975be7726176acdb 100644 (file)
@@ -75,6 +75,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
        struct v9_mqd *m;
        struct kfd_dev *kfd = mm->dev;
 
+       *mqd_mem_obj = NULL;
        /* From V9,  for CWSR, the control stack is located on the next page
         * boundary after the mqd, we will use the gtt allocation function
         * instead of sub-allocation function.
@@ -92,8 +93,10 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
        } else
                retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
                                mqd_mem_obj);
-       if (retval != 0)
+       if (retval) {
+               kfree(*mqd_mem_obj);
                return -ENOMEM;
+       }
 
        m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr;
        addr = (*mqd_mem_obj)->gpu_addr;
index dac7978f5ee1f91b32b72464189daaf52147c9ff..3b07a316680c23d53e27f7814740aa5448752e6b 100644 (file)
@@ -1462,6 +1462,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
 }
 
 static const struct backlight_ops amdgpu_dm_backlight_ops = {
+       .options = BL_CORE_SUSPENDRESUME,
        .get_brightness = amdgpu_dm_backlight_get_brightness,
        .update_status  = amdgpu_dm_backlight_update_status,
 };
@@ -3644,6 +3645,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
 {
        struct amdgpu_device *adev = dm->ddev->dev_private;
 
+       /*
+        * Some of the properties below require access to state, like bpc.
+        * Allocate some default initial connector state with our reset helper.
+        */
+       if (aconnector->base.funcs->reset)
+               aconnector->base.funcs->reset(&aconnector->base);
+
        aconnector->connector_id = link_index;
        aconnector->dc_link = link;
        aconnector->base.interlace_allowed = false;
@@ -3811,9 +3819,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
                        &aconnector->base,
                        &amdgpu_dm_connector_helper_funcs);
 
-       if (aconnector->base.funcs->reset)
-               aconnector->base.funcs->reset(&aconnector->base);
-
        amdgpu_dm_connector_init_helper(
                dm,
                aconnector,
index 59445c83f02389a1e6b839c74863ddbabcaeeb80..c85bea70d96522a841d61543e5a8674d244f420e 100644 (file)
@@ -377,9 +377,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        drm_connector_attach_encoder(&aconnector->base,
                                     &aconnector->mst_encoder->base);
 
-       /*
-        * TODO: understand why this one is needed
-        */
        drm_object_attach_property(
                &connector->base,
                dev->mode_config.path_property,
index 95f332ee3e7e6e3858294d485c03c23c1c25f613..16614d73a5fcf61535fc1aac6a3a8e75753c589c 100644 (file)
@@ -32,6 +32,10 @@ endif
 
 calcs_ccflags := -mhard-float -msse $(cc_stack_align)
 
+ifdef CONFIG_CC_IS_CLANG
+calcs_ccflags += -msse2
+endif
+
 CFLAGS_dcn_calcs.o := $(calcs_ccflags)
 CFLAGS_dcn_calc_auto.o := $(calcs_ccflags)
 CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
index e3f5e5d6f0c18ece19bf88a3a0ee30fdd041318a..2b2efe443c36d27b70b4591193efe2a19648378f 100644 (file)
@@ -462,8 +462,10 @@ void dc_link_set_test_pattern(struct dc_link *link,
 
 static void destruct(struct dc *dc)
 {
-       dc_release_state(dc->current_state);
-       dc->current_state = NULL;
+       if (dc->current_state) {
+               dc_release_state(dc->current_state);
+               dc->current_state = NULL;
+       }
 
        destroy_links(dc);
 
@@ -1583,6 +1585,14 @@ void dc_set_power_state(
                dc_resource_state_construct(dc, dc->current_state);
 
                dc->hwss.init_hw(dc);
+
+#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+               if (dc->hwss.init_sys_ctx != NULL &&
+                       dc->vm_pa_config.valid) {
+                       dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
+               }
+#endif
+
                break;
        default:
 
index e0a96abb3c46c7196c4c09770a038ac864c10849..d440b28ee43fb94ce99ae8dadfae321fb80625a2 100644 (file)
@@ -222,19 +222,17 @@ bool resource_construct(
                 * PORT_CONNECTIVITY == 1 (as instructed by HW team).
                 */
                update_num_audio(&straps, &num_audio, &pool->audio_support);
-               for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
+               for (i = 0; i < caps->num_audio; i++) {
                        struct audio *aud = create_funcs->create_audio(ctx, i);
 
                        if (aud == NULL) {
                                DC_ERR("DC: failed to create audio!\n");
                                return false;
                        }
-
                        if (!aud->funcs->endpoint_valid(aud)) {
                                aud->funcs->destroy(&aud);
                                break;
                        }
-
                        pool->audios[i] = aud;
                        pool->audio_count++;
                }
@@ -1703,18 +1701,25 @@ static struct audio *find_first_free_audio(
                const struct resource_pool *pool,
                enum engine_id id)
 {
-       int i;
-       for (i = 0; i < pool->audio_count; i++) {
+       int i, available_audio_count;
+
+       available_audio_count = pool->audio_count;
+
+       for (i = 0; i < available_audio_count; i++) {
                if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
                        /*we have enough audio endpoint, find the matching inst*/
                        if (id != i)
                                continue;
-
                        return pool->audios[i];
                }
        }
+
+       /* use engine id to find free audio */
+       if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
+               return pool->audios[id];
+       }
        /*not found the matching one, first come first serve*/
-       for (i = 0; i < pool->audio_count; i++) {
+       for (i = 0; i < available_audio_count; i++) {
                if (res_ctx->is_audio_acquired[i] == false) {
                        return pool->audios[i];
                }
@@ -1866,6 +1871,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
                pix_clk /= 2;
        if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
                switch (timing->display_color_depth) {
+               case COLOR_DEPTH_666:
                case COLOR_DEPTH_888:
                        normalized_pix_clk = pix_clk;
                        break;
@@ -1949,7 +1955,7 @@ enum dc_status resource_map_pool_resources(
        /* TODO: Add check if ASIC support and EDID audio */
        if (!stream->sink->converter_disable_audio &&
            dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
-           stream->audio_info.mode_count) {
+           stream->audio_info.mode_count && stream->audio_info.flags.all) {
                pipe_ctx->stream_res.audio = find_first_free_audio(
                &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
 
index 29294db1a96b775013635d080725e392cda4766b..da8b198538e5fdea8ba6dadbab666589ab28c1da 100644 (file)
@@ -242,6 +242,10 @@ static void dmcu_set_backlight_level(
        s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
 
        REG_WRITE(BIOS_SCRATCH_2, s2);
+
+       /* waitDMCUReadyForCmd */
+       REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
+                       0, 1, 80000);
 }
 
 static void dce_abm_init(struct abm *abm)
@@ -474,6 +478,8 @@ void dce_abm_destroy(struct abm **abm)
 {
        struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
 
+       abm_dce->base.funcs->set_abm_immediate_disable(*abm);
+
        kfree(abm_dce);
        *abm = NULL;
 }
index 7f6d724686f1adc7255c244568d140565b3a8fd3..abb559ce640850f38a6a9cb8c36a673c1730a30c 100644 (file)
@@ -611,6 +611,8 @@ void dce_aud_az_configure(
 
        AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
                value);
+       DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n",
+               audio->inst, value, audio_info->display_name);
 
        /*
        *write the port ID:
@@ -922,7 +924,6 @@ static const struct audio_funcs funcs = {
        .az_configure = dce_aud_az_configure,
        .destroy = dce_aud_destroy,
 };
-
 void dce_aud_destroy(struct audio **audio)
 {
        struct dce_audio *aud = DCE_AUD(*audio);
@@ -953,7 +954,6 @@ struct audio *dce_audio_create(
        audio->regs = reg;
        audio->shifts = shifts;
        audio->masks = masks;
-
        return &audio->base;
 }
 
index 53ccacf99eca416d129045add530367d41634685..c3ad2bbec1a5278beb92a46c5c84204455f8ee01 100644 (file)
@@ -242,6 +242,9 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,
        prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
 
        switch (plane_state->format) {
+       case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+               prescale_params->scale = 0x2082;
+               break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
                prescale_params->scale = 0x2020;
index 5d95a997fd9f96e5539cf6e91fa3d05483cf9a8f..f8904f73f57b0b1d59b3520a4cf432a91e56c98a 100644 (file)
@@ -292,9 +292,10 @@ bool cm_helper_translate_curve_to_hw_format(
                seg_distr[7] = 4;
                seg_distr[8] = 4;
                seg_distr[9] = 4;
+               seg_distr[10] = 1;
 
                region_start = -10;
-               region_end = 0;
+               region_end = 1;
        }
 
        for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
index 7736ef123e9be097836e217e9e9b6d55747e19b6..ead221ccb93e0aa78f84c5152a1716c13ca85983 100644 (file)
@@ -23,6 +23,7 @@
  *
  */
 
+#include <linux/delay.h>
 #include "dm_services.h"
 #include "core_types.h"
 #include "resource.h"
index d97ca6528f9d9d943f41bb416466277d2fcdf2bc..934ffe1b4b00e2e11423292021a7388432c4dc2a 100644 (file)
@@ -32,6 +32,10 @@ endif
 
 dml_ccflags := -mhard-float -msse $(cc_stack_align)
 
+ifdef CONFIG_CC_IS_CLANG
+dml_ccflags += -msse2
+endif
+
 CFLAGS_display_mode_lib.o := $(dml_ccflags)
 CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
 CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)
index c0b9ca13393b61a502902b4950dc43e462b8c718..f4469fa5afb553e9828c5c25e412926bb6c9508f 100644 (file)
@@ -159,7 +159,7 @@ struct resource_pool {
        struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
        unsigned int clk_src_count;
 
-       struct audio *audios[MAX_PIPES];
+       struct audio *audios[MAX_AUDIOS];
        unsigned int audio_count;
        struct audio_support audio_support;
 
index cf7433ebf91a07557b1f4328980ba4bdcf15ab07..71901743a9387b705e5e54b465f2ffbea1afa333 100644 (file)
@@ -34,6 +34,7 @@
  * Data types shared between different Virtual HW blocks
  ******************************************************************************/
 
+#define MAX_AUDIOS 7
 #define MAX_PIPES 6
 
 struct gamma_curve {
index b52ccab428a9ed8b5b11a7ce07c881e78483f987..c7c505095402df3d5f8291c8592ae391e8ec65fb 100644 (file)
@@ -4052,6 +4052,11 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
 
        data->frame_time_x2 = frame_time_in_us * 2 / 100;
 
+       if (data->frame_time_x2 < 280) {
+               pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
+               data->frame_time_x2 = 280;
+       }
+
        display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
 
        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
index c9a15baf2c10fc004551af4f02b6b13efee2d2c6..0adfc5392cd375f425da25ffd49767518c927c23 100644 (file)
@@ -1222,17 +1222,14 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 
 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
-       if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
-               smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
+       if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
                return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
-       }
        return 0;
 }
 
 static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
 {
        if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
-               smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
                return smum_send_msg_to_smc_with_parameter(
                        hwmgr,
                        PPSMC_MSG_UVDPowerON,
index 2aab1b4759459fb421443b30d563a1a4e1860e3a..cede78cdf28db5c271895be9b94857717a1974ab 100644 (file)
@@ -669,20 +669,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
        for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
                table->WatermarkRow[1][i].MinClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+                       1000));
                table->WatermarkRow[1][i].MaxClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
-                       100);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+                       1000));
                table->WatermarkRow[1][i].MinUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+                       1000));
                table->WatermarkRow[1][i].MaxUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+                       1000));
                table->WatermarkRow[1][i].WmSetting = (uint8_t)
                                wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
        }
@@ -690,20 +690,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
        for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
                table->WatermarkRow[0][i].MinClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+                       1000));
                table->WatermarkRow[0][i].MaxClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+                       1000));
                table->WatermarkRow[0][i].MinUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+                       1000));
                table->WatermarkRow[0][i].MaxUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+                       1000));
                table->WatermarkRow[0][i].WmSetting = (uint8_t)
                                wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
        }
index 373700c05a00f9f3890d48fee79317edf77cedd8..224fa1ef87ff92125c5ada667f41c0defac43e86 100644 (file)
@@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
 
 
        /* Enable extended register access */
-       ast_enable_mmio(dev);
        ast_open_key(ast);
+       ast_enable_mmio(dev);
 
        /* Find out whether P2A works or whether to use device-tree */
        ast_detect_config_mode(dev, &scu_rev);
@@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev)
 {
        struct ast_private *ast = dev->dev_private;
 
+       /* enable standard VGA decode */
+       ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
+
        ast_release_firmware(dev);
        kfree(ast->dp501_fw_addr);
        ast_mode_fini(dev);
index 8bb355d5d43d80169fbfeb73954008b772616e15..9d92d2d2fcfc7c1ccb85fa6ffce1485fc874f979 100644 (file)
@@ -600,7 +600,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
                return -EINVAL;
        ast_open_key(ast);
 
-       ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+       ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
 
        ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
        ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
index f7d421359d564756ff86d78c64293e37eea74c7e..c1d1ac51d1c207c0cb0b2f08825aa19ca7761bde 100644 (file)
@@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev)
 {
        struct ast_private *ast = dev->dev_private;
 
-       ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+       ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
 }
 
 
index f5985a77072e1e06c877a866e570c7157b33be09..5b70211147d11b03bba08cbb9f30540887969114 100644 (file)
@@ -65,6 +65,7 @@ config DRM_DUMB_VGA_DAC
 config DRM_LVDS_ENCODER
        tristate "Transparent parallel to LVDS encoder support"
        depends on OF
+       select DRM_KMS_HELPER
        select DRM_PANEL_BRIDGE
        help
          Support for transparent parallel to LVDS encoders that don't require
index 786bd649684036adb8354b266a712d5550f1d222..befcf8119e1d5080f5b2783478551611d0255970 100644 (file)
@@ -18,7 +18,7 @@ obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
 obj-$(CONFIG_DRM_CDNS_MHDP) += mhdp8546.o
 obj-y += synopsys/
 
-mhdp8546-objs := cdns-mhdp-common.o cdns-mhdp.o
+mhdp8546-objs := cdns-mhdp.o
 ifeq ($(CONFIG_DRM_CDNS_MHDP_J721E),y)
        mhdp8546-objs += cdns-mhdp-j721e.o
 endif
index d68986cea13258bed331d8d023db7b493f720e5b..84abf5d6f760a4c6530c613e718baf512e599258 100644 (file)
@@ -1040,16 +1040,17 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
        if (ret)
                return ret;
 
+       /* Check whether panel supports fast training */
+       ret = analogix_dp_fast_link_train_detection(dp);
+       if (ret)
+               dp->psr_enable = false;
+
        if (dp->psr_enable) {
                ret = analogix_dp_enable_sink_psr(dp);
                if (ret)
                        return ret;
        }
 
-       /* Check whether panel supports fast training */
-       ret =  analogix_dp_fast_link_train_detection(dp);
-       if (ret)
-               dp->psr_enable = false;
 
        return ret;
 }
diff --git a/drivers/gpu/drm/bridge/cdns-mhdp-common.c b/drivers/gpu/drm/bridge/cdns-mhdp-common.c
deleted file mode 100644 (file)
index 21acd6b..0000000
+++ /dev/null
@@ -1,1103 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
- * Author: Chris Zhong <zyw@rock-chips.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/reset.h>
-
-#include <asm/unaligned.h>
-
-#include <drm/bridge/cdns-mhdp-common.h>
-#include <drm/drm_modes.h>
-#include <drm/drm_print.h>
-
-#define CDNS_DP_SPDIF_CLK              200000000
-#define FW_ALIVE_TIMEOUT_US            1000000
-#define MAILBOX_RETRY_US               1000
-#define MAILBOX_TIMEOUT_US             5000000
-#define LINK_TRAINING_RETRY_MS         20
-#define LINK_TRAINING_TIMEOUT_MS       500
-
-static inline u32 get_unaligned_be24(const void *p)
-{
-       const u8 *_p = p;
-
-       return _p[0] << 16 | _p[1] << 8 | _p[2];
-}
-
-static inline void put_unaligned_be24(u32 val, void *p)
-{
-       u8 *_p = p;
-
-       _p[0] = val >> 16;
-       _p[1] = val >> 8;
-       _p[2] = val;
-}
-
-void cdns_mhdp_set_fw_clk(struct cdns_mhdp_device *mhdp, unsigned long clk)
-{
-       writel(clk / 1000000, mhdp->regs + SW_CLK_H);
-}
-EXPORT_SYMBOL(cdns_mhdp_set_fw_clk);
-
-void cdns_mhdp_clock_reset(struct cdns_mhdp_device *mhdp)
-{
-       u32 val;
-
-       val = DPTX_FRMR_DATA_CLK_RSTN_EN |
-             DPTX_FRMR_DATA_CLK_EN |
-             DPTX_PHY_DATA_RSTN_EN |
-             DPTX_PHY_DATA_CLK_EN |
-             DPTX_PHY_CHAR_RSTN_EN |
-             DPTX_PHY_CHAR_CLK_EN |
-             SOURCE_AUX_SYS_CLK_RSTN_EN |
-             SOURCE_AUX_SYS_CLK_EN |
-             DPTX_SYS_CLK_RSTN_EN |
-             DPTX_SYS_CLK_EN |
-             CFG_DPTX_VIF_CLK_RSTN_EN |
-             CFG_DPTX_VIF_CLK_EN;
-       writel(val, mhdp->regs + SOURCE_DPTX_CAR);
-
-       val = SOURCE_PHY_RSTN_EN | SOURCE_PHY_CLK_EN;
-       writel(val, mhdp->regs + SOURCE_PHY_CAR);
-
-       val = SOURCE_PKT_SYS_RSTN_EN |
-             SOURCE_PKT_SYS_CLK_EN |
-             SOURCE_PKT_DATA_RSTN_EN |
-             SOURCE_PKT_DATA_CLK_EN;
-       writel(val, mhdp->regs + SOURCE_PKT_CAR);
-
-       val = SPDIF_CDR_CLK_RSTN_EN |
-             SPDIF_CDR_CLK_EN |
-             SOURCE_AIF_SYS_RSTN_EN |
-             SOURCE_AIF_SYS_CLK_EN |
-             SOURCE_AIF_CLK_RSTN_EN |
-             SOURCE_AIF_CLK_EN;
-       writel(val, mhdp->regs + SOURCE_AIF_CAR);
-
-       val = SOURCE_CIPHER_SYSTEM_CLK_RSTN_EN |
-             SOURCE_CIPHER_SYS_CLK_EN |
-             SOURCE_CIPHER_CHAR_CLK_RSTN_EN |
-             SOURCE_CIPHER_CHAR_CLK_EN;
-       writel(val, mhdp->regs + SOURCE_CIPHER_CAR);
-
-       val = SOURCE_CRYPTO_SYS_CLK_RSTN_EN |
-             SOURCE_CRYPTO_SYS_CLK_EN;
-       writel(val, mhdp->regs + SOURCE_CRYPTO_CAR);
-
-       /* enable Mailbox and PIF interrupt */
-       writel(0, mhdp->regs + APB_INT_MASK);
-}
-EXPORT_SYMBOL(cdns_mhdp_clock_reset);
-
-static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
-{
-       int val, ret;
-
-       ret = readx_poll_timeout(readl, mhdp->regs + MAILBOX_EMPTY_ADDR,
-                                val, !val, MAILBOX_RETRY_US,
-                                MAILBOX_TIMEOUT_US);
-       if (ret < 0)
-               return ret;
-
-       return readl(mhdp->regs + MAILBOX0_RD_DATA) & 0xff;
-}
-
-static int cdp_dp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
-{
-       int ret, full;
-
-       ret = readx_poll_timeout(readl, mhdp->regs + MAILBOX_FULL_ADDR,
-                                full, !full, MAILBOX_RETRY_US,
-                                MAILBOX_TIMEOUT_US);
-       if (ret < 0)
-               return ret;
-
-       writel(val, mhdp->regs + MAILBOX0_WR_DATA);
-
-       return 0;
-}
-
-static int cdns_mhdp_mailbox_validate_receive(struct cdns_mhdp_device *mhdp,
-                                             u8 module_id, u8 opcode,
-                                             u16 req_size)
-{
-       u32 mbox_size, i;
-       u8 header[4];
-       int ret;
-
-       /* read the header of the message */
-       for (i = 0; i < 4; i++) {
-               ret = cdns_mhdp_mailbox_read(mhdp);
-               if (ret < 0)
-                       return ret;
-
-               header[i] = ret;
-       }
-
-       mbox_size = get_unaligned_be16(header + 2);
-
-       if (opcode != header[0] || module_id != header[1] ||
-           req_size != mbox_size) {
-               /*
-                * If the message in mailbox is not what we want, we need to
-                * clear the mailbox by reading its contents.
-                */
-               for (i = 0; i < mbox_size; i++)
-                       if (cdns_mhdp_mailbox_read(mhdp) < 0)
-                               break;
-
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int cdns_mhdp_mailbox_read_receive(struct cdns_mhdp_device *mhdp,
-                                         u8 *buff, u16 buff_size)
-{
-       u32 i;
-       int ret;
-
-       for (i = 0; i < buff_size; i++) {
-               ret = cdns_mhdp_mailbox_read(mhdp);
-               if (ret < 0)
-                       return ret;
-
-               buff[i] = ret;
-       }
-
-       return 0;
-}
-
-static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
-                                 u8 opcode, u16 size, u8 *message)
-{
-       u8 header[4];
-       int ret, i;
-
-       header[0] = opcode;
-       header[1] = module_id;
-       put_unaligned_be16(size, header + 2);
-
-       for (i = 0; i < 4; i++) {
-               ret = cdp_dp_mailbox_write(mhdp, header[i]);
-               if (ret)
-                       return ret;
-       }
-
-       for (i = 0; i < size; i++) {
-               ret = cdp_dp_mailbox_write(mhdp, message[i]);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
-{
-       u8 msg[4], resp[8];
-       int ret;
-
-       if (addr == 0) {
-               ret = -EINVAL;
-               goto err_reg_read;
-       }
-
-       put_unaligned_be32(addr, msg);
-
-       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
-                                    GENERAL_REGISTER_READ,
-                                    sizeof(msg), msg);
-       if (ret)
-               goto err_reg_read;
-
-       ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_GENERAL,
-                                                GENERAL_REGISTER_READ,
-                                                sizeof(resp));
-       if (ret)
-               goto err_reg_read;
-
-       ret = cdns_mhdp_mailbox_read_receive(mhdp, resp, sizeof(resp));
-       if (ret)
-               goto err_reg_read;
-
-       /* Returned address value should be the same as requested */
-       if (memcmp(msg, resp, sizeof(msg))) {
-               ret = -EINVAL;
-               goto err_reg_read;
-       }
-
-       *value = get_unaligned_be32(resp + 4);
-
-err_reg_read:
-       if (ret) {
-               DRM_DEV_ERROR(mhdp->dev, "Failed to read register.\n");
-               *value = 0;
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_reg_read);
-
-int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
-{
-       u8 msg[6];
-
-       put_unaligned_be16(addr, msg);
-       put_unaligned_be32(val, msg + 2);
-
-       return cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                     DPTX_WRITE_REGISTER, sizeof(msg), msg);
-}
-EXPORT_SYMBOL(cdns_mhdp_reg_write);
-
-int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
-                                  u8 start_bit, u8 bits_no, u32 val)
-{
-       u8 field[8];
-
-       put_unaligned_be16(addr, field);
-       field[2] = start_bit;
-       field[3] = bits_no;
-       put_unaligned_be32(val, field + 4);
-
-       return cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                     DPTX_WRITE_FIELD, sizeof(field), field);
-}
-EXPORT_SYMBOL(cdns_mhdp_reg_write_bit);
-
-int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
-                       u32 addr, u8 *data, u16 len)
-{
-       u8 msg[5], reg[5];
-       int ret;
-
-       put_unaligned_be16(len, msg);
-       put_unaligned_be24(addr, msg + 2);
-
-       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                    DPTX_READ_DPCD, sizeof(msg), msg);
-       if (ret)
-               goto err_dpcd_read;
-
-       ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
-                                                DPTX_READ_DPCD,
-                                                sizeof(reg) + len);
-       if (ret)
-               goto err_dpcd_read;
-
-       ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
-       if (ret)
-               goto err_dpcd_read;
-
-       ret = cdns_mhdp_mailbox_read_receive(mhdp, data, len);
-
-err_dpcd_read:
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_dpcd_read);
-
-int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
-{
-       u8 msg[6], reg[5];
-       int ret;
-
-       put_unaligned_be16(1, msg);
-       put_unaligned_be24(addr, msg + 2);
-       msg[5] = value;
-
-       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                    DPTX_WRITE_DPCD, sizeof(msg), msg);
-       if (ret)
-               goto err_dpcd_write;
-
-       ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
-                                                DPTX_WRITE_DPCD, sizeof(reg));
-       if (ret)
-               goto err_dpcd_write;
-
-       ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
-       if (ret)
-               goto err_dpcd_write;
-
-       if (addr != get_unaligned_be24(reg + 2))
-               ret = -EINVAL;
-
-err_dpcd_write:
-       if (ret)
-               DRM_DEV_ERROR(mhdp->dev, "dpcd write failed: %d\n", ret);
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_dpcd_write);
-
-int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp, const u32 *i_mem,
-                           u32 i_size, const u32 *d_mem, u32 d_size)
-{
-       u32 reg;
-       int i, ret;
-
-       /* reset ucpu before load firmware*/
-       writel(APB_IRAM_PATH | APB_DRAM_PATH | APB_XT_RESET,
-              mhdp->regs + APB_CTRL);
-
-       for (i = 0; i < i_size; i += 4)
-               writel(*i_mem++, mhdp->regs + ADDR_IMEM + i);
-
-       for (i = 0; i < d_size; i += 4)
-               writel(*d_mem++, mhdp->regs + ADDR_DMEM + i);
-
-       /* un-reset ucpu */
-       writel(0, mhdp->regs + APB_CTRL);
-
-       /* check the keep alive register to make sure fw working */
-       ret = readx_poll_timeout(readl, mhdp->regs + KEEP_ALIVE,
-                                reg, reg, 2000, FW_ALIVE_TIMEOUT_US);
-       if (ret < 0) {
-               DRM_DEV_ERROR(mhdp->dev, "failed to loaded the FW reg = %x\n",
-                             reg);
-               return -EINVAL;
-       }
-
-       reg = readl(mhdp->regs + VER_L) & 0xff;
-       mhdp->fw_version = reg;
-       reg = readl(mhdp->regs + VER_H) & 0xff;
-       mhdp->fw_version |= reg << 8;
-       reg = readl(mhdp->regs + VER_LIB_L_ADDR) & 0xff;
-       mhdp->fw_version |= reg << 16;
-       reg = readl(mhdp->regs + VER_LIB_H_ADDR) & 0xff;
-       mhdp->fw_version |= reg << 24;
-
-       DRM_DEV_DEBUG(mhdp->dev, "firmware version: %x\n", mhdp->fw_version);
-
-       return 0;
-}
-EXPORT_SYMBOL(cdns_mhdp_load_firmware);
-
-int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
-{
-       u8 msg[5];
-       int ret, i;
-
-       msg[0] = GENERAL_MAIN_CONTROL;
-       msg[1] = MB_MODULE_ID_GENERAL;
-       msg[2] = 0;
-       msg[3] = 1;
-       msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
-
-       for (i = 0; i < sizeof(msg); i++) {
-               ret = cdp_dp_mailbox_write(mhdp, msg[i]);
-               if (ret)
-                       goto err_set_firmware_active;
-       }
-
-       /* read the firmware state */
-       for (i = 0; i < sizeof(msg); i++)  {
-               ret = cdns_mhdp_mailbox_read(mhdp);
-               if (ret < 0)
-                       goto err_set_firmware_active;
-
-               msg[i] = ret;
-       }
-
-       ret = 0;
-
-err_set_firmware_active:
-       if (ret < 0)
-               DRM_DEV_ERROR(mhdp->dev, "set firmware active failed\n");
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_set_firmware_active);
-
-int cdns_mhdp_set_host_cap(struct cdns_mhdp_device *mhdp, u8 lanes, bool flip)
-{
-       u8 msg[8];
-       int ret;
-
-       msg[0] = CDNS_DP_MAX_LINK_RATE;
-       msg[1] = lanes | SCRAMBLER_EN;
-       msg[2] = VOLTAGE_LEVEL_2;
-       msg[3] = PRE_EMPHASIS_LEVEL_3;
-       msg[4] = PTS1 | PTS2 | PTS3 | PTS4;
-       msg[5] = FAST_LT_NOT_SUPPORT;
-       msg[6] = flip ? LANE_MAPPING_FLIPPED : LANE_MAPPING_NORMAL;
-       msg[7] = ENHANCED;
-
-       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                    DPTX_SET_HOST_CAPABILITIES,
-                                    sizeof(msg), msg);
-       if (ret)
-               goto err_set_host_cap;
-
-       ret = cdns_mhdp_reg_write(mhdp, DP_AUX_SWAP_INVERSION_CONTROL,
-                                 AUX_HOST_INVERT);
-
-err_set_host_cap:
-       if (ret)
-               DRM_DEV_ERROR(mhdp->dev, "set host cap failed: %d\n", ret);
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_set_host_cap);
-
-int cdns_mhdp_event_config(struct cdns_mhdp_device *mhdp)
-{
-       u8 msg[5];
-       int ret;
-
-       memset(msg, 0, sizeof(msg));
-
-       msg[0] = DPTX_EVENT_ENABLE_HPD | DPTX_EVENT_ENABLE_TRAINING;
-
-       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                    DPTX_ENABLE_EVENT, sizeof(msg), msg);
-       if (ret)
-               DRM_DEV_ERROR(mhdp->dev, "set event config failed: %d\n", ret);
-
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_event_config);
-
-u32 cdns_mhdp_get_event(struct cdns_mhdp_device *mhdp)
-{
-       return readl(mhdp->regs + SW_EVENTS0);
-}
-EXPORT_SYMBOL(cdns_mhdp_get_event);
-
-int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
-{
-       u8 status;
-       int ret;
-
-       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                    DPTX_HPD_STATE, 0, NULL);
-       if (ret)
-               goto err_get_hpd;
-
-       ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
-                                                DPTX_HPD_STATE,
-                                                sizeof(status));
-       if (ret)
-               goto err_get_hpd;
-
-       ret = cdns_mhdp_mailbox_read_receive(mhdp, &status, sizeof(status));
-       if (ret)
-               goto err_get_hpd;
-
-       return status;
-
-err_get_hpd:
-       DRM_DEV_ERROR(mhdp->dev, "get hpd status failed: %d\n", ret);
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_get_hpd_status);
-
-int cdns_mhdp_get_edid_block(void *data, u8 *edid,
-                         unsigned int block, size_t length)
-{
-       struct cdns_mhdp_device *mhdp = data;
-       u8 msg[2], reg[2], i;
-       int ret;
-
-       for (i = 0; i < 4; i++) {
-               msg[0] = block / 2;
-               msg[1] = block % 2;
-
-               ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                            DPTX_GET_EDID, sizeof(msg), msg);
-               if (ret)
-                       continue;
-
-               ret = cdns_mhdp_mailbox_validate_receive(mhdp,
-                                                        MB_MODULE_ID_DP_TX,
-                                                        DPTX_GET_EDID,
-                                                        sizeof(reg) + length);
-               if (ret)
-                       continue;
-
-               ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
-               if (ret)
-                       continue;
-
-               ret = cdns_mhdp_mailbox_read_receive(mhdp, edid, length);
-               if (ret)
-                       continue;
-
-               if (reg[0] == length && reg[1] == block / 2)
-                       break;
-       }
-
-       if (ret)
-               DRM_DEV_ERROR(mhdp->dev, "get block[%d] edid failed: %d\n",
-                             block, ret);
-
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_get_edid_block);
-
-static int cdns_mhdp_training_start(struct cdns_mhdp_device *mhdp)
-{
-       unsigned long timeout;
-       u8 msg, event[2];
-       int ret;
-
-       msg = LINK_TRAINING_RUN;
-
-       /* start training */
-       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                    DPTX_TRAINING_CONTROL, sizeof(msg), &msg);
-       if (ret)
-               goto err_training_start;
-
-       timeout = jiffies + msecs_to_jiffies(LINK_TRAINING_TIMEOUT_MS);
-       while (time_before(jiffies, timeout)) {
-               msleep(LINK_TRAINING_RETRY_MS);
-               ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                            DPTX_READ_EVENT, 0, NULL);
-               if (ret)
-                       goto err_training_start;
-
-               ret = cdns_mhdp_mailbox_validate_receive(mhdp,
-                                                        MB_MODULE_ID_DP_TX,
-                                                        DPTX_READ_EVENT,
-                                                        sizeof(event));
-               if (ret)
-                       goto err_training_start;
-
-               ret = cdns_mhdp_mailbox_read_receive(mhdp, event,
-                                                    sizeof(event));
-               if (ret)
-                       goto err_training_start;
-
-               if (event[1] & EQ_PHASE_FINISHED)
-                       return 0;
-       }
-
-       ret = -ETIMEDOUT;
-
-err_training_start:
-       DRM_DEV_ERROR(mhdp->dev, "training failed: %d\n", ret);
-       return ret;
-}
-
-static int cdns_mhdp_get_training_status(struct cdns_mhdp_device *mhdp)
-{
-       u8 status[10];
-       int ret;
-
-       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                    DPTX_READ_LINK_STAT, 0, NULL);
-       if (ret)
-               goto err_get_training_status;
-
-       ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
-                                                DPTX_READ_LINK_STAT,
-                                                sizeof(status));
-       if (ret)
-               goto err_get_training_status;
-
-       ret = cdns_mhdp_mailbox_read_receive(mhdp, status, sizeof(status));
-       if (ret)
-               goto err_get_training_status;
-
-       mhdp->link.rate = status[0];
-       mhdp->link.num_lanes = status[1];
-
-err_get_training_status:
-       if (ret)
-               DRM_DEV_ERROR(mhdp->dev, "get training status failed: %d\n",
-                             ret);
-       return ret;
-}
-
-int cdns_mhdp_train_link(struct cdns_mhdp_device *mhdp)
-{
-       int ret;
-
-       ret = cdns_mhdp_training_start(mhdp);
-       if (ret) {
-               DRM_DEV_ERROR(mhdp->dev, "Failed to start training %d\n",
-                             ret);
-               return ret;
-       }
-
-       ret = cdns_mhdp_get_training_status(mhdp);
-       if (ret) {
-               DRM_DEV_ERROR(mhdp->dev, "Failed to get training stat %d\n",
-                             ret);
-               return ret;
-       }
-
-       DRM_DEV_DEBUG_KMS(mhdp->dev, "rate:0x%x, lanes:%d\n", mhdp->link.rate,
-                         mhdp->link.num_lanes);
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_train_link);
-
-int cdns_mhdp_set_video_status(struct cdns_mhdp_device *mhdp, int active)
-{
-       u8 msg;
-       int ret;
-
-       msg = !!active;
-
-       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                    DPTX_SET_VIDEO, sizeof(msg), &msg);
-       if (ret)
-               DRM_DEV_ERROR(mhdp->dev, "set video status failed: %d\n", ret);
-
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_set_video_status);
-
-static int cdns_mhdp_get_msa_misc(struct video_info *video,
-                                 struct drm_display_mode *mode)
-{
-       u32 msa_misc;
-       u8 val[2] = {0};
-
-       switch (video->color_fmt) {
-       case PXL_RGB:
-       case Y_ONLY:
-               val[0] = 0;
-               break;
-       /* set YUV default color space conversion to BT601 */
-       case YCBCR_4_4_4:
-               val[0] = 6 + BT_601 * 8;
-               break;
-       case YCBCR_4_2_2:
-               val[0] = 5 + BT_601 * 8;
-               break;
-       case YCBCR_4_2_0:
-               val[0] = 5;
-               break;
-       }
-
-       switch (video->color_depth) {
-       case 6:
-               val[1] = 0;
-               break;
-       case 8:
-               val[1] = 1;
-               break;
-       case 10:
-               val[1] = 2;
-               break;
-       case 12:
-               val[1] = 3;
-               break;
-       case 16:
-               val[1] = 4;
-               break;
-       }
-
-       msa_misc = 2 * val[0] + 32 * val[1] +
-                  ((video->color_fmt == Y_ONLY) ? (1 << 14) : 0);
-
-       return msa_misc;
-}
-
-int cdns_mhdp_config_video(struct cdns_mhdp_device *mhdp)
-{
-       struct video_info *video = &mhdp->video_info;
-       struct drm_display_mode *mode = &mhdp->mode;
-       u64 symbol;
-       u32 val, link_rate, rem;
-       u8 bit_per_pix, tu_size_reg = TU_SIZE;
-       int ret;
-
-       bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ?
-                     (video->color_depth * 2) : (video->color_depth * 3);
-
-       link_rate = drm_dp_bw_code_to_link_rate(mhdp->link.rate) / 1000;
-
-       ret = cdns_mhdp_reg_write(mhdp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE);
-       if (ret)
-               goto err_config_video;
-
-       ret = cdns_mhdp_reg_write(mhdp, HSYNC2VSYNC_POL_CTRL, 0);
-       if (ret)
-               goto err_config_video;
-
-       /*
-        * get a best tu_size and valid symbol:
-        * 1. chose Lclk freq(162Mhz, 270Mhz, 540Mhz), set TU to 32
-        * 2. calculate VS(valid symbol) = TU * Pclk * Bpp / (Lclk * Lanes)
-        * 3. if VS > *.85 or VS < *.1 or VS < 2 or TU < VS + 4, then set
-        *    TU += 2 and repeat 2nd step.
-        */
-       do {
-               tu_size_reg += 2;
-               symbol = tu_size_reg * mode->clock * bit_per_pix;
-               do_div(symbol, mhdp->link.num_lanes * link_rate * 8);
-               rem = do_div(symbol, 1000);
-               if (tu_size_reg > 64) {
-                       ret = -EINVAL;
-                       DRM_DEV_ERROR(mhdp->dev,
-                                     "tu error, clk:%d, lanes:%d, rate:%d\n",
-                                     mode->clock, mhdp->link.num_lanes,
-                                     link_rate);
-                       goto err_config_video;
-               }
-       } while ((symbol <= 1) || (tu_size_reg - symbol < 4) ||
-                (rem > 850) || (rem < 100));
-
-       val = symbol + (tu_size_reg << 8);
-       val |= TU_CNT_RST_EN;
-       ret = cdns_mhdp_reg_write(mhdp, DP_FRAMER_TU, val);
-       if (ret)
-               goto err_config_video;
-
-       /* set the FIFO Buffer size */
-       val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate;
-       val /= (mhdp->link.num_lanes * link_rate);
-       val = div_u64(8 * (symbol + 1), bit_per_pix) - val;
-       val += 2;
-       ret = cdns_mhdp_reg_write(mhdp, DP_VC_TABLE(15), val);
-
-       switch (video->color_depth) {
-       case 6:
-               val = BCS_6;
-               break;
-       case 8:
-               val = BCS_8;
-               break;
-       case 10:
-               val = BCS_10;
-               break;
-       case 12:
-               val = BCS_12;
-               break;
-       case 16:
-               val = BCS_16;
-               break;
-       }
-
-       val += video->color_fmt << 8;
-       ret = cdns_mhdp_reg_write(mhdp, DP_FRAMER_PXL_REPR, val);
-       if (ret)
-               goto err_config_video;
-
-       val = video->h_sync_polarity ? DP_FRAMER_SP_HSP : 0;
-       val |= video->v_sync_polarity ? DP_FRAMER_SP_VSP : 0;
-       ret = cdns_mhdp_reg_write(mhdp, DP_FRAMER_SP, val);
-       if (ret)
-               goto err_config_video;
-
-       val = (mode->hsync_start - mode->hdisplay) << 16;
-       val |= mode->htotal - mode->hsync_end;
-       ret = cdns_mhdp_reg_write(mhdp, DP_FRONT_BACK_PORCH, val);
-       if (ret)
-               goto err_config_video;
-
-       val = mode->hdisplay * bit_per_pix / 8;
-       ret = cdns_mhdp_reg_write(mhdp, DP_BYTE_COUNT, val);
-       if (ret)
-               goto err_config_video;
-
-       val = mode->htotal | ((mode->htotal - mode->hsync_start) << 16);
-       ret = cdns_mhdp_reg_write(mhdp, MSA_HORIZONTAL_0, val);
-       if (ret)
-               goto err_config_video;
-
-       val = mode->hsync_end - mode->hsync_start;
-       val |= (mode->hdisplay << 16) | (video->h_sync_polarity << 15);
-       ret = cdns_mhdp_reg_write(mhdp, MSA_HORIZONTAL_1, val);
-       if (ret)
-               goto err_config_video;
-
-       val = mode->vtotal;
-       val |= (mode->vtotal - mode->vsync_start) << 16;
-       ret = cdns_mhdp_reg_write(mhdp, MSA_VERTICAL_0, val);
-       if (ret)
-               goto err_config_video;
-
-       val = mode->vsync_end - mode->vsync_start;
-       val |= (mode->vdisplay << 16) | (video->v_sync_polarity << 15);
-       ret = cdns_mhdp_reg_write(mhdp, MSA_VERTICAL_1, val);
-       if (ret)
-               goto err_config_video;
-
-       val = cdns_mhdp_get_msa_misc(video, mode);
-       ret = cdns_mhdp_reg_write(mhdp, MSA_MISC, val);
-       if (ret)
-               goto err_config_video;
-
-       ret = cdns_mhdp_reg_write(mhdp, STREAM_CONFIG, 1);
-       if (ret)
-               goto err_config_video;
-
-       val = mode->hsync_end - mode->hsync_start;
-       val |= mode->hdisplay << 16;
-       ret = cdns_mhdp_reg_write(mhdp, DP_HORIZONTAL, val);
-       if (ret)
-               goto err_config_video;
-
-       val = mode->vdisplay;
-       val |= (mode->vtotal - mode->vsync_start) << 16;
-       ret = cdns_mhdp_reg_write(mhdp, DP_VERTICAL_0, val);
-       if (ret)
-               goto err_config_video;
-
-       val = mode->vtotal;
-       ret = cdns_mhdp_reg_write(mhdp, DP_VERTICAL_1, val);
-       if (ret)
-               goto err_config_video;
-
-       ret = cdns_mhdp_reg_write_bit(mhdp, DP_VB_ID, 2, 1, 0);
-
-err_config_video:
-       if (ret)
-               DRM_DEV_ERROR(mhdp->dev, "config video failed: %d\n", ret);
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_config_video);
-
-int cdns_mhdp_audio_stop(struct cdns_mhdp_device *mhdp,
-                        struct audio_info *audio)
-{
-       int ret;
-
-       ret = cdns_mhdp_reg_write(mhdp, AUDIO_PACK_CONTROL, 0);
-       if (ret) {
-               DRM_DEV_ERROR(mhdp->dev, "audio stop failed: %d\n", ret);
-               return ret;
-       }
-
-       writel(0, mhdp->regs + SPDIF_CTRL_ADDR);
-
-       /* clearn the audio config and reset */
-       writel(0, mhdp->regs + AUDIO_SRC_CNTL);
-       writel(0, mhdp->regs + AUDIO_SRC_CNFG);
-       writel(AUDIO_SW_RST, mhdp->regs + AUDIO_SRC_CNTL);
-       writel(0, mhdp->regs + AUDIO_SRC_CNTL);
-
-       /* reset smpl2pckt component  */
-       writel(0, mhdp->regs + SMPL2PKT_CNTL);
-       writel(AUDIO_SW_RST, mhdp->regs + SMPL2PKT_CNTL);
-       writel(0, mhdp->regs + SMPL2PKT_CNTL);
-
-       /* reset FIFO */
-       writel(AUDIO_SW_RST, mhdp->regs + FIFO_CNTL);
-       writel(0, mhdp->regs + FIFO_CNTL);
-
-       if (audio->format == AFMT_SPDIF)
-               clk_disable_unprepare(mhdp->spdif_clk);
-
-       return 0;
-}
-EXPORT_SYMBOL(cdns_mhdp_audio_stop);
-
-int cdns_mhdp_audio_mute(struct cdns_mhdp_device *mhdp, bool enable)
-{
-       int ret;
-
-       ret = cdns_mhdp_reg_write_bit(mhdp, DP_VB_ID, 4, 1, enable);
-       if (ret)
-               DRM_DEV_ERROR(mhdp->dev, "audio mute failed: %d\n", ret);
-
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_audio_mute);
-
-static void cdns_mhdp_audio_config_i2s(struct cdns_mhdp_device *mhdp,
-                                      struct audio_info *audio)
-{
-       int sub_pckt_num = 1, i2s_port_en_val = 0xf, i;
-       u32 val;
-
-       if (audio->channels == 2) {
-               if (mhdp->link.num_lanes == 1)
-                       sub_pckt_num = 2;
-               else
-                       sub_pckt_num = 4;
-
-               i2s_port_en_val = 1;
-       } else if (audio->channels == 4) {
-               i2s_port_en_val = 3;
-       }
-
-       writel(0x0, mhdp->regs + SPDIF_CTRL_ADDR);
-
-       writel(SYNC_WR_TO_CH_ZERO, mhdp->regs + FIFO_CNTL);
-
-       val = MAX_NUM_CH(audio->channels);
-       val |= NUM_OF_I2S_PORTS(audio->channels);
-       val |= AUDIO_TYPE_LPCM;
-       val |= CFG_SUB_PCKT_NUM(sub_pckt_num);
-       writel(val, mhdp->regs + SMPL2PKT_CNFG);
-
-       if (audio->sample_width == 16)
-               val = 0;
-       else if (audio->sample_width == 24)
-               val = 1 << 9;
-       else
-               val = 2 << 9;
-
-       val |= AUDIO_CH_NUM(audio->channels);
-       val |= I2S_DEC_PORT_EN(i2s_port_en_val);
-       val |= TRANS_SMPL_WIDTH_32;
-       writel(val, mhdp->regs + AUDIO_SRC_CNFG);
-
-       for (i = 0; i < (audio->channels + 1) / 2; i++) {
-               if (audio->sample_width == 16)
-                       val = (0x02 << 8) | (0x02 << 20);
-               else if (audio->sample_width == 24)
-                       val = (0x0b << 8) | (0x0b << 20);
-
-               val |= ((2 * i) << 4) | ((2 * i + 1) << 16);
-               writel(val, mhdp->regs + STTS_BIT_CH(i));
-       }
-
-       switch (audio->sample_rate) {
-       case 32000:
-               val = SAMPLING_FREQ(3) |
-                     ORIGINAL_SAMP_FREQ(0xc);
-               break;
-       case 44100:
-               val = SAMPLING_FREQ(0) |
-                     ORIGINAL_SAMP_FREQ(0xf);
-               break;
-       case 48000:
-               val = SAMPLING_FREQ(2) |
-                     ORIGINAL_SAMP_FREQ(0xd);
-               break;
-       case 88200:
-               val = SAMPLING_FREQ(8) |
-                     ORIGINAL_SAMP_FREQ(0x7);
-               break;
-       case 96000:
-               val = SAMPLING_FREQ(0xa) |
-                     ORIGINAL_SAMP_FREQ(5);
-               break;
-       case 176400:
-               val = SAMPLING_FREQ(0xc) |
-                     ORIGINAL_SAMP_FREQ(3);
-               break;
-       case 192000:
-               val = SAMPLING_FREQ(0xe) |
-                     ORIGINAL_SAMP_FREQ(1);
-               break;
-       }
-       val |= 4;
-       writel(val, mhdp->regs + COM_CH_STTS_BITS);
-
-       writel(SMPL2PKT_EN, mhdp->regs + SMPL2PKT_CNTL);
-       writel(I2S_DEC_START, mhdp->regs + AUDIO_SRC_CNTL);
-}
-
-static void cdns_mhdp_audio_config_spdif(struct cdns_mhdp_device *mhdp)
-{
-       u32 val;
-
-       writel(SYNC_WR_TO_CH_ZERO, mhdp->regs + FIFO_CNTL);
-
-       val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
-       writel(val, mhdp->regs + SMPL2PKT_CNFG);
-       writel(SMPL2PKT_EN, mhdp->regs + SMPL2PKT_CNTL);
-
-       val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
-       writel(val, mhdp->regs + SPDIF_CTRL_ADDR);
-
-       clk_prepare_enable(mhdp->spdif_clk);
-       clk_set_rate(mhdp->spdif_clk, CDNS_DP_SPDIF_CLK);
-}
-
-int cdns_mhdp_audio_config(struct cdns_mhdp_device *mhdp,
-                          struct audio_info *audio)
-{
-       int ret;
-
-       /* reset the spdif clk before config */
-       if (audio->format == AFMT_SPDIF) {
-               reset_control_assert(mhdp->spdif_rst);
-               reset_control_deassert(mhdp->spdif_rst);
-       }
-
-       ret = cdns_mhdp_reg_write(mhdp, CM_LANE_CTRL, LANE_REF_CYC);
-       if (ret)
-               goto err_audio_config;
-
-       ret = cdns_mhdp_reg_write(mhdp, CM_CTRL, 0);
-       if (ret)
-               goto err_audio_config;
-
-       if (audio->format == AFMT_I2S)
-               cdns_mhdp_audio_config_i2s(mhdp, audio);
-       else if (audio->format == AFMT_SPDIF)
-               cdns_mhdp_audio_config_spdif(mhdp);
-
-       ret = cdns_mhdp_reg_write(mhdp, AUDIO_PACK_CONTROL, AUDIO_PACK_EN);
-
-err_audio_config:
-       if (ret)
-               DRM_DEV_ERROR(mhdp->dev, "audio config failed: %d\n", ret);
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_audio_config);
-
-int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp,
-                       u8 nlanes, u16 udelay, u8 *lanes_data, u8 *dpcd)
-{
-       u8 payload[7];
-       u8 hdr[5]; /* For DPCD read response header */
-       u32 addr;
-       u8 const nregs = 6; /* Registers 0x202-0x207 */
-       int ret;
-
-       if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
-               DRM_DEV_ERROR(mhdp->dev, "invalid number of lanes: %d\n",
-                             nlanes);
-               ret = -EINVAL;
-               goto err_adjust_lt;
-       }
-
-       payload[0] = nlanes;
-       put_unaligned_be16(udelay, payload + 1);
-       memcpy(payload + 3, lanes_data, nlanes);
-
-       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-                                    DPTX_ADJUST_LT,
-                                    sizeof(payload), payload);
-       if (ret)
-               goto err_adjust_lt;
-
-       /* Yes, read the DPCD read command response */
-       ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
-                                                DPTX_READ_DPCD,
-                                                sizeof(hdr) + nregs);
-       if (ret)
-               goto err_adjust_lt;
-
-       ret = cdns_mhdp_mailbox_read_receive(mhdp, hdr, sizeof(hdr));
-       if (ret)
-               goto err_adjust_lt;
-
-       addr = get_unaligned_be24(hdr + 2);
-       if (addr != DP_LANE0_1_STATUS)
-               goto err_adjust_lt;
-
-       ret = cdns_mhdp_mailbox_read_receive(mhdp, dpcd, nregs);
-
-err_adjust_lt:
-       if (ret)
-               DRM_DEV_ERROR(mhdp->dev, "Failed to adjust Link Training.\n");
-
-       return ret;
-}
-EXPORT_SYMBOL(cdns_mhdp_adjust_lt);
index be397a34c875daf853b2f964747ee8fd57381657..bd53508efaae72e5078595c610fbf081280921c4 100644 (file)
@@ -14,7 +14,7 @@
 #define CDNS_MHDP_J721E_H
 
 #include <linux/platform_device.h>
-#include <drm/bridge/cdns-mhdp-common.h>
+#include "cdns-mhdp.h"
 
 struct cdns_mhdp_j721e_wrap;
 
index d1f21f260c19327560a8fb3b5a1c42d4391c5a9f..fffd048c2187c91444b7958394f2b21af5c929a3 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/phy/phy.h>
 #include <linux/phy/phy-dp.h>
 
-#include <drm/bridge/cdns-mhdp-common.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
 #include <drm/drm_connector.h>
 #include <drm/drm_print.h>
 #include <drm/drm_crtc_helper.h>
 
-#include <sound/hdmi-codec.h>
 #include <linux/irq.h>
 #include <linux/of_irq.h>
+#include <linux/of_device.h>
+
+#include <asm/unaligned.h>
 
 #include "cdns-mhdp.h"
 #include "cdns-mhdp-j721e.h"
 
+/* CDNS MHDP Helpers */
+#define MAILBOX_RETRY_US               1000
+#define MAILBOX_TIMEOUT_US             5000000
+
+/* mailbox */
+#define MB_OPCODE_ID                   0
+#define MB_MODULE_ID                   1
+#define MB_SIZE_MSB_ID                 2
+#define MB_SIZE_LSB_ID                 3
+#define MB_DATA_ID                     4
+
+#define MB_MODULE_ID_DP_TX             0x01
+#define MB_MODULE_ID_HDCP_TX           0x07
+#define MB_MODULE_ID_HDCP_RX           0x08
+#define MB_MODULE_ID_HDCP_GENERAL      0x09
+#define MB_MODULE_ID_GENERAL           0x0a
+
+/* general opcode */
+#define GENERAL_MAIN_CONTROL           0x01
+#define GENERAL_TEST_ECHO              0x02
+#define GENERAL_BUS_SETTINGS           0x03
+#define GENERAL_TEST_ACCESS            0x04
+#define GENERAL_REGISTER_READ          0x07
+
+#define DPTX_SET_POWER_MNG                     0x00
+#define DPTX_SET_HOST_CAPABILITIES             0x01
+#define DPTX_GET_EDID                          0x02
+#define DPTX_READ_DPCD                         0x03
+#define DPTX_WRITE_DPCD                                0x04
+#define DPTX_ENABLE_EVENT                      0x05
+#define DPTX_WRITE_REGISTER                    0x06
+#define DPTX_READ_REGISTER                     0x07
+#define DPTX_WRITE_FIELD                       0x08
+#define DPTX_TRAINING_CONTROL                  0x09
+#define DPTX_READ_EVENT                                0x0a
+#define DPTX_READ_LINK_STAT                    0x0b
+#define DPTX_SET_VIDEO                         0x0c
+#define DPTX_SET_AUDIO                         0x0d
+#define DPTX_GET_LAST_AUX_STAUS                        0x0e
+#define DPTX_SET_LINK_BREAK_POINT              0x0f
+#define DPTX_FORCE_LANES                       0x10
+#define DPTX_HPD_STATE                         0x11
+#define DPTX_ADJUST_LT                         0x12
+
+#define FW_STANDBY                             0
+#define FW_ACTIVE                              1
+
+#define DPTX_READ_EVENT_HPD_TO_HIGH            BIT(0)
+#define DPTX_READ_EVENT_HPD_TO_LOW             BIT(1)
+#define DPTX_READ_EVENT_HPD_PULSE              BIT(2)
+#define DPTX_READ_EVENT_HPD_STATE              BIT(3)
+
+static inline u32 get_unaligned_be24(const void *p)
+{
+       const u8 *_p = p;
+
+       return _p[0] << 16 | _p[1] << 8 | _p[2];
+}
+
+static inline void put_unaligned_be24(u32 val, void *p)
+{
+       u8 *_p = p;
+
+       _p[0] = val >> 16;
+       _p[1] = val >> 8;
+       _p[2] = val;
+}
+
+static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
+{
+       int val, ret;
+
+       WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
+
+       ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
+                                val, !val, MAILBOX_RETRY_US,
+                                MAILBOX_TIMEOUT_US);
+       if (ret < 0)
+               return ret;
+
+       return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
+}
+
+static int cdp_dp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
+{
+       int ret, full;
+
+       WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
+
+       ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
+                                full, !full, MAILBOX_RETRY_US,
+                                MAILBOX_TIMEOUT_US);
+       if (ret < 0)
+               return ret;
+
+       writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
+
+       return 0;
+}
+
+static int cdns_mhdp_mailbox_validate_receive(struct cdns_mhdp_device *mhdp,
+                                             u8 module_id, u8 opcode,
+                                             u16 req_size)
+{
+       u32 mbox_size, i;
+       u8 header[4];
+       int ret;
+
+       /* read the header of the message */
+       for (i = 0; i < 4; i++) {
+               ret = cdns_mhdp_mailbox_read(mhdp);
+               if (ret < 0)
+                       return ret;
+
+               header[i] = ret;
+       }
+
+       mbox_size = get_unaligned_be16(header + 2);
+
+       if (opcode != header[0] || module_id != header[1] ||
+           req_size != mbox_size) {
+               /*
+                * If the message in mailbox is not what we want, we need to
+                * clear the mailbox by reading its contents.
+                */
+               for (i = 0; i < mbox_size; i++)
+                       if (cdns_mhdp_mailbox_read(mhdp) < 0)
+                               break;
+
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int cdns_mhdp_mailbox_read_receive(struct cdns_mhdp_device *mhdp,
+                                         u8 *buff, u16 buff_size)
+{
+       u32 i;
+       int ret;
+
+       for (i = 0; i < buff_size; i++) {
+               ret = cdns_mhdp_mailbox_read(mhdp);
+               if (ret < 0)
+                       return ret;
+
+               buff[i] = ret;
+       }
+
+       return 0;
+}
+
+static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
+                                 u8 opcode, u16 size, u8 *message)
+{
+       u8 header[4];
+       int ret, i;
+
+       header[0] = opcode;
+       header[1] = module_id;
+       put_unaligned_be16(size, header + 2);
+
+       for (i = 0; i < 4; i++) {
+               ret = cdp_dp_mailbox_write(mhdp, header[i]);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < size; i++) {
+               ret = cdp_dp_mailbox_write(mhdp, message[i]);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static
+int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
+{
+       u8 msg[4], resp[8];
+       int ret;
+
+       if (addr == 0) {
+               ret = -EINVAL;
+               goto err_reg_read;
+       }
+
+       put_unaligned_be32(addr, msg);
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
+                                    GENERAL_REGISTER_READ,
+                                    sizeof(msg), msg);
+       if (ret)
+               goto err_reg_read;
+
+       ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_GENERAL,
+                                                GENERAL_REGISTER_READ,
+                                                sizeof(resp));
+       if (ret)
+               goto err_reg_read;
+
+       ret = cdns_mhdp_mailbox_read_receive(mhdp, resp, sizeof(resp));
+       if (ret)
+               goto err_reg_read;
+
+       /* Returned address value should be the same as requested */
+       if (memcmp(msg, resp, sizeof(msg))) {
+               ret = -EINVAL;
+               goto err_reg_read;
+       }
+
+       *value = get_unaligned_be32(resp + 4);
+
+err_reg_read:
+       mutex_unlock(&mhdp->mbox_mutex);
+       if (ret) {
+               DRM_DEV_ERROR(mhdp->dev, "Failed to read register.\n");
+               *value = 0;
+       }
+
+       return ret;
+}
+
+static
+int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
+{
+       u8 msg[6];
+       int ret;
+
+       put_unaligned_be16(addr, msg);
+       put_unaligned_be32(val, msg + 2);
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_WRITE_REGISTER, sizeof(msg), msg);
+
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       return ret;
+}
+
+static
+int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
+                           u8 start_bit, u8 bits_no, u32 val)
+{
+       u8 field[8];
+       int ret;
+
+       put_unaligned_be16(addr, field);
+       field[2] = start_bit;
+       field[3] = bits_no;
+       put_unaligned_be32(val, field + 4);
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_WRITE_FIELD, sizeof(field), field);
+
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       return ret;
+}
+
+static
+int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
+                       u32 addr, u8 *data, u16 len)
+{
+       u8 msg[5], reg[5];
+       int ret;
+
+       put_unaligned_be16(len, msg);
+       put_unaligned_be24(addr, msg + 2);
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_READ_DPCD, sizeof(msg), msg);
+       if (ret)
+               goto err_dpcd_read;
+
+       ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
+                                                DPTX_READ_DPCD,
+                                                sizeof(reg) + len);
+       if (ret)
+               goto err_dpcd_read;
+
+       ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
+       if (ret)
+               goto err_dpcd_read;
+
+       ret = cdns_mhdp_mailbox_read_receive(mhdp, data, len);
+
+err_dpcd_read:
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       return ret;
+}
+
+static
+int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
+{
+       u8 msg[6], reg[5];
+       int ret;
+
+       put_unaligned_be16(1, msg);
+       put_unaligned_be24(addr, msg + 2);
+       msg[5] = value;
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_WRITE_DPCD, sizeof(msg), msg);
+       if (ret)
+               goto err_dpcd_write;
+
+       ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
+                                                DPTX_WRITE_DPCD, sizeof(reg));
+       if (ret)
+               goto err_dpcd_write;
+
+       ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
+       if (ret)
+               goto err_dpcd_write;
+
+       if (addr != get_unaligned_be24(reg + 2))
+               ret = -EINVAL;
+
+err_dpcd_write:
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       if (ret)
+               DRM_DEV_ERROR(mhdp->dev, "dpcd write failed: %d\n", ret);
+       return ret;
+}
+
+static
+int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
+{
+       u8 msg[5];
+       int ret, i;
+
+       msg[0] = GENERAL_MAIN_CONTROL;
+       msg[1] = MB_MODULE_ID_GENERAL;
+       msg[2] = 0;
+       msg[3] = 1;
+       msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       for (i = 0; i < sizeof(msg); i++) {
+               ret = cdp_dp_mailbox_write(mhdp, msg[i]);
+               if (ret)
+                       goto err_set_firmware_active;
+       }
+
+       /* read the firmware state */
+       for (i = 0; i < sizeof(msg); i++)  {
+               ret = cdns_mhdp_mailbox_read(mhdp);
+               if (ret < 0)
+                       goto err_set_firmware_active;
+
+               msg[i] = ret;
+       }
+
+       ret = 0;
+
+err_set_firmware_active:
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       if (ret < 0)
+               DRM_DEV_ERROR(mhdp->dev, "set firmware active failed\n");
+       return ret;
+}
+
+static
+int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
+{
+       u8 status;
+       int ret;
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_HPD_STATE, 0, NULL);
+       if (ret)
+               goto err_get_hpd;
+
+       ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
+                                                DPTX_HPD_STATE,
+                                                sizeof(status));
+       if (ret)
+               goto err_get_hpd;
+
+       ret = cdns_mhdp_mailbox_read_receive(mhdp, &status, sizeof(status));
+       if (ret)
+               goto err_get_hpd;
+
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       return status;
+
+err_get_hpd:
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       DRM_DEV_ERROR(mhdp->dev, "get hpd status failed: %d\n", ret);
+       return ret;
+}
+
+static
+int cdns_mhdp_get_edid_block(void *data, u8 *edid,
+                            unsigned int block, size_t length)
+{
+       struct cdns_mhdp_device *mhdp = data;
+       u8 msg[2], reg[2], i;
+       int ret;
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       for (i = 0; i < 4; i++) {
+               msg[0] = block / 2;
+               msg[1] = block % 2;
+
+               ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                            DPTX_GET_EDID, sizeof(msg), msg);
+               if (ret)
+                       continue;
+
+               ret = cdns_mhdp_mailbox_validate_receive(mhdp,
+                                                        MB_MODULE_ID_DP_TX,
+                                                        DPTX_GET_EDID,
+                                                        sizeof(reg) + length);
+               if (ret)
+                       continue;
+
+               ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
+               if (ret)
+                       continue;
+
+               ret = cdns_mhdp_mailbox_read_receive(mhdp, edid, length);
+               if (ret)
+                       continue;
+
+               if (reg[0] == length && reg[1] == block / 2)
+                       break;
+       }
+
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       if (ret)
+               DRM_DEV_ERROR(mhdp->dev, "get block[%d] edid failed: %d\n",
+                             block, ret);
+
+       return ret;
+}
+
+static
+int cdns_mhdp_read_event(struct cdns_mhdp_device *mhdp)
+{
+       u8 event = 0;
+       int ret;
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_READ_EVENT, 0, NULL);
+       if (ret)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_validate_receive(mhdp,
+                                                MB_MODULE_ID_DP_TX,
+                                                DPTX_READ_EVENT,
+                                                sizeof(event));
+       if (ret < 0)
+               goto out;
+
+       ret = cdns_mhdp_mailbox_read_receive(mhdp, &event,
+                                            sizeof(event));
+out:
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       if (ret < 0)
+               return ret;
+
+       return event;
+}
+
+static
+int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp,
+                       u8 nlanes, u16 udelay, u8 *lanes_data, u8 *link_status)
+{
+       u8 payload[7];
+       u8 hdr[5]; /* For DPCD read response header */
+       u32 addr;
+       u8 const nregs = 6; /* Registers 0x202-0x207 */
+       int ret;
+
+       if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
+               DRM_DEV_ERROR(mhdp->dev, "invalid number of lanes: %d\n",
+                             nlanes);
+               ret = -EINVAL;
+               goto err_adjust_lt;
+       }
+
+       payload[0] = nlanes;
+       put_unaligned_be16(udelay, payload + 1);
+       memcpy(payload + 3, lanes_data, nlanes);
+
+       mutex_lock(&mhdp->mbox_mutex);
+
+       ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+                                    DPTX_ADJUST_LT,
+                                    sizeof(payload), payload);
+       if (ret)
+               goto err_adjust_lt;
+
+       /* Yes, read the DPCD read command response */
+       ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
+                                                DPTX_READ_DPCD,
+                                                sizeof(hdr) + nregs);
+       if (ret)
+               goto err_adjust_lt;
+
+       ret = cdns_mhdp_mailbox_read_receive(mhdp, hdr, sizeof(hdr));
+       if (ret)
+               goto err_adjust_lt;
+
+       addr = get_unaligned_be24(hdr + 2);
+       if (addr != DP_LANE0_1_STATUS)
+               goto err_adjust_lt;
+
+       ret = cdns_mhdp_mailbox_read_receive(mhdp, link_status, nregs);
+
+err_adjust_lt:
+       mutex_unlock(&mhdp->mbox_mutex);
+
+       if (ret)
+               DRM_DEV_ERROR(mhdp->dev, "Failed to adjust Link Training.\n");
+
+       return ret;
+}
+
+/* EOF CDNS MHDP Helpers */
+
 #define FW_NAME                                        "cadence/mhdp8546.bin"
 #define CDNS_MHDP_IMEM                         0x10000
 
 
 #define CDNS_KEEP_ALIVE_TIMEOUT                        2000
 
+#ifdef CONFIG_DRM_CDNS_MHDP_J721E
+static const struct mhdp_platform_ops mhdp_ti_j721e_ops = {
+       .init = cdns_mhdp_j721e_init,
+       .exit = cdns_mhdp_j721e_fini,
+       .enable = cdns_mhdp_j721e_enable,
+       .disable = cdns_mhdp_j721e_disable,
+};
+#endif
+
 static const struct of_device_id mhdp_ids[] = {
        { .compatible = "cdns,mhdp8546", },
+#ifdef CONFIG_DRM_CDNS_MHDP_J721E
+       { .compatible = "ti,j721e-mhdp8546", .data = &mhdp_ti_j721e_ops },
+#endif
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, mhdp_ids);
@@ -75,38 +635,218 @@ MODULE_DEVICE_TABLE(of, mhdp_ids);
 #define CDNS_DP_TEST_VSC_SDP                   (1 << 6) /* 1.3+ */
 #define CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY   (1 << 7)
 
-static inline struct cdns_mhdp_device *connector_to_mhdp(
-       struct drm_connector *conn)
+static unsigned int max_link_rate(struct cdns_mhdp_host host,
+                                 struct cdns_mhdp_sink sink)
 {
-       struct cdns_mhdp_connector *mhdp_connector = to_mhdp_connector(conn);
+       return min(host.link_rate, sink.link_rate);
+}
 
-       return mhdp_connector->bridge->mhdp;
+static u8 mhdp_max_num_lanes(struct cdns_mhdp_host host,
+                            struct cdns_mhdp_sink sink)
+{
+       return min_t(u8, sink.lanes_cnt, host.lanes_cnt);
 }
 
-static inline struct cdns_mhdp_device *bridge_to_mhdp(
-       struct drm_bridge *bridge)
+static u8 eq_training_pattern_supported(struct cdns_mhdp_host host,
+                                       struct cdns_mhdp_sink sink)
 {
-       struct cdns_mhdp_bridge *mhdp_bridge = to_mhdp_bridge(bridge);
+       return fls(host.pattern_supp & sink.pattern_supp);
+}
 
-       return mhdp_bridge->mhdp;
+static bool mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
+{
+       /* Check if SSC is supported by both sides */
+       return (mhdp->host.ssc) && (mhdp->sink.ssc);
 }
 
-static unsigned int max_link_rate(struct cdns_mhdp_host host,
-                                 struct cdns_mhdp_sink sink)
+static int mhdp_fw_activate(const struct firmware *fw,
+                           struct cdns_mhdp_device *mhdp)
 {
-       return min(host.link_rate, sink.link_rate);
+       unsigned int reg;
+       int ret = 0;
+
+       dev_dbg(mhdp->dev, "%s\n", __func__);
+
+       if (!fw || !fw->data) {
+               dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
+               return -EINVAL;
+       }
+
+       spin_lock(&mhdp->start_lock);
+       if (mhdp->hw_state != MHDP_HW_INACTIVE) {
+               spin_unlock(&mhdp->start_lock);
+               if (mhdp->hw_state != MHDP_HW_STOPPED)
+                       dev_err(mhdp->dev, "%s: Bad HW state: %d\n",
+                               __func__, mhdp->hw_state);
+               return -EBUSY;
+       }
+       mhdp->hw_state = MHDP_HW_LOADING;
+       spin_unlock(&mhdp->start_lock);
+
+       /* Release uCPU reset and stall it. */
+       writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
+
+       memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
+
+       /* Leave debug mode, release stall */
+       writel(0, mhdp->regs + CDNS_APB_CTRL);
+
+       /*
+        * Wait for the KEEP_ALIVE "message" on the first 8 bits.
+        * Updated each sched "tick" (~2ms)
+        */
+       ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
+                                reg & CDNS_KEEP_ALIVE_MASK, 500,
+                                CDNS_KEEP_ALIVE_TIMEOUT);
+       if (ret) {
+               dev_err(mhdp->dev,
+                       "device didn't give any life sign: reg %d\n", reg);
+               goto error;
+       }
+
+       /* Init events to 0 as it's not cleared by FW at boot but on read */
+       readl(mhdp->regs + CDNS_SW_EVENT0);
+       readl(mhdp->regs + CDNS_SW_EVENT1);
+       readl(mhdp->regs + CDNS_SW_EVENT2);
+       readl(mhdp->regs + CDNS_SW_EVENT3);
+
+       /* Activate uCPU */
+       ret = cdns_mhdp_set_firmware_active(mhdp, true);
+       if (ret) {
+               dev_err(mhdp->dev, "%s: Failed to activate FW: %d\n",
+                       __func__, ret);
+               goto error;
+       }
+
+       spin_lock(&mhdp->start_lock);
+
+       mhdp->hw_state = MHDP_HW_READY;
+
+       /*
+        * Here we must keep the lock while enabling the interrupts
+        * since it would otherwise be possible that interrupt enable
+        * code is executed after the bridge is detached. The similar
+        * situation is not possible in attach()/detach() callbacks
+        * since the hw_state changes from MHDP_HW_READY to
+        * MHDP_HW_STOPPED happens only due to driver removal when
+        * bridge should already be detached.
+        */
+       if (mhdp->bridge_attached) {
+               /* enable interrupts */
+               writel(0, mhdp->regs + CDNS_APB_INT_MASK);
+               writel(0, mhdp->regs + CDNS_MB_INT_MASK);
+       }
+
+       spin_unlock(&mhdp->start_lock);
+
+       dev_dbg(mhdp->dev, "DP FW activated\n");
+
+       return 0;
+error:
+       spin_lock(&mhdp->start_lock);
+       mhdp->hw_state = MHDP_HW_INACTIVE;
+       spin_unlock(&mhdp->start_lock);
+
+       return ret;
 }
 
-static u8 eq_training_pattern_supported(struct cdns_mhdp_host host,
-                                       struct cdns_mhdp_sink sink)
+static void mhdp_fw_cb(const struct firmware *fw, void *context)
 {
-       return fls(host.pattern_supp & sink.pattern_supp);
+       struct cdns_mhdp_device *mhdp = context;
+       bool bridge_attached;
+       int ret;
+
+       dev_dbg(mhdp->dev, "firmware callback\n");
+
+       ret = mhdp_fw_activate(fw, mhdp);
+
+       release_firmware(fw);
+
+       if (ret)
+               return;
+
+       /*
+        *  XXX how to make sure the bridge is still attached when
+        *      calling drm_kms_helper_hotplug_event() after releasing
+        *      the lock? We should not hold the spin lock when
+        *      calling drm_kms_helper_hotplug_event() since it may
+        *      cause a dead lock. FB-dev console calls detect from the
+        *      same thread just down the call stack started here.
+        */
+       spin_lock(&mhdp->start_lock);
+       bridge_attached = mhdp->bridge_attached;
+       spin_unlock(&mhdp->start_lock);
+       if (bridge_attached)
+               drm_kms_helper_hotplug_event(mhdp->bridge.dev);
+}
+
+static int load_firmware(struct cdns_mhdp_device *mhdp)
+{
+       int ret;
+
+       ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
+                                     GFP_KERNEL, mhdp, mhdp_fw_cb);
+       if (ret) {
+               dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
+                       FW_NAME, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void mhdp_check_link(struct cdns_mhdp_device *mhdp)
+{
+       struct drm_connector *conn = &mhdp->connector;
+       u8 status[DP_LINK_STATUS_SIZE];
+       bool hpd_state;
+       int hpd_event;
+       int ret;
+
+       /* Nothing to check if there is no link */
+       if (!mhdp->link_up)
+               return;
+
+       hpd_event = cdns_mhdp_read_event(mhdp);
+
+       /* Geting event bits failed, bail out */
+       if (hpd_event < 0) {
+               dev_warn(mhdp->dev, "%s: read event failed: %d\n",
+                        __func__, hpd_event);
+               return;
+       }
+
+       hpd_state = !!(hpd_event & DPTX_READ_EVENT_HPD_STATE);
+
+       /* No point the check the link if HPD is down (cable is unplugged) */
+       if (!hpd_state)
+               return;
+
+       /*
+        * Prevent display reconfiguration between link check and link
+        * status property setting. We must use the legacy giant-lock
+        * since drm_connector_set_link_status_property()'s fine
+        * grained DRM locking implementation is broken.
+        */
+       mutex_lock(&conn->dev->mode_config.mutex);
+
+       /* Check if the link is still up */
+       ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
+
+       if (ret < 0 || /* If dpcd read fails, assume the link is down too */
+           !drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) ||
+           !drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
+               /* Link is broken, indicate it with the link status property */
+               drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
+
+       mutex_unlock(&conn->dev->mode_config.mutex);
 }
 
 static irqreturn_t mhdp_irq_handler(int irq, void *data)
 {
        struct cdns_mhdp_device *mhdp = (struct cdns_mhdp_device *)data;
        u32 mbox_stat, apb_stat, sw_ev0, sw_ev1, sw_ev2, sw_ev3;
+       bool bridge_attached;
 
        apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
        mbox_stat = readl(mhdp->regs + CDNS_MB_INT_STATUS);
@@ -117,8 +857,21 @@ static irqreturn_t mhdp_irq_handler(int irq, void *data)
 
        //dev_dbg(mhdp->dev, "MHDP IRQ apb %x, mbox %x, sw_ev %x/%x/%x/%x\n", apb_stat, mbox_stat, sw_ev0, sw_ev1, sw_ev2, sw_ev3);
 
-       if (sw_ev0 & CDNS_DPTX_HPD)
-               drm_kms_helper_hotplug_event(mhdp->bridge.base.dev);
+       /*
+        *  Calling drm_kms_helper_hotplug_event() when not attached
+        *  to drm device causes an oops because the drm_bridge->dev
+        *  is NULL. See mhdp_fw_cb() comments for details about the
+        *  problems related drm_kms_helper_hotplug_event() call.
+        */
+       spin_lock(&mhdp->start_lock);
+       bridge_attached = mhdp->bridge_attached;
+       spin_unlock(&mhdp->start_lock);
+
+       if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
+               mhdp_check_link(mhdp);
+
+               drm_kms_helper_hotplug_event(mhdp->bridge.dev);
+       }
 
        return IRQ_HANDLED;
 }
@@ -181,13 +934,15 @@ static int cdns_mhdp_get_modes(struct drm_connector *connector)
         * HACK: Warn about unsupported display formats until we deal
         *       with them correctly.
         */
-       if (!(connector->display_info.color_formats &
+       if (connector->display_info.color_formats &&
+           !(connector->display_info.color_formats &
              mhdp->display_fmt.color_format))
                dev_warn(mhdp->dev,
                         "%s: No supported color_format found (0x%08x)\n",
                        __func__, connector->display_info.color_formats);
 
-       if (connector->display_info.bpc < mhdp->display_fmt.bpc)
+       if (connector->display_info.bpc &&
+           connector->display_info.bpc < mhdp->display_fmt.bpc)
                dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
                         __func__, connector->display_info.bpc,
                         mhdp->display_fmt.bpc);
@@ -200,8 +955,20 @@ static int cdns_mhdp_detect(struct drm_connector *conn,
                            bool force)
 {
        struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
+       bool hw_ready;
        int ret;
 
+       dev_dbg(mhdp->dev, "%s\n", __func__);
+
+       spin_lock(&mhdp->start_lock);
+
+       hw_ready = mhdp->hw_state == MHDP_HW_READY;
+
+       spin_unlock(&mhdp->start_lock);
+
+       if (!hw_ready || WARN_ON(!mhdp->bridge_attached))
+               return connector_status_disconnected;
+
        ret = cdns_mhdp_get_hpd_status(mhdp);
        if (ret > 0) {
                mhdp->plugged = true;
@@ -215,9 +982,46 @@ static int cdns_mhdp_detect(struct drm_connector *conn,
        return connector_status_disconnected;
 }
 
+static
+bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
+                           const struct drm_display_mode *mode,
+                           int lanes, int rate)
+{
+       u32 max_bw, req_bw, bpp;
+
+       bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
+       req_bw = mode->clock * bpp / 8;
+
+       max_bw = lanes * rate;
+
+       if (req_bw > max_bw) {
+               dev_dbg(mhdp->dev, "%s: %s (%u * %u/8 =) %u > %u (= %u * %u)\n",
+                       __func__, mode->name, mode->clock, bpp, req_bw,
+                       max_bw, lanes, rate);
+
+               return false;
+       }
+
+       return true;
+}
+
+static
+enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
+                                         struct drm_display_mode *mode)
+{
+       struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
+
+       if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->host.lanes_cnt,
+                                   mhdp->host.link_rate))
+               return MODE_CLOCK_HIGH;
+
+       return MODE_OK;
+}
+
 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
        .detect_ctx = cdns_mhdp_detect,
        .get_modes = cdns_mhdp_get_modes,
+       .mode_valid = cdns_mhdp_mode_valid,
 };
 
 static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
@@ -232,10 +1036,13 @@ static int cdns_mhdp_attach(struct drm_bridge *bridge)
 {
        struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
        u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
-       struct drm_connector *conn = &mhdp->connector.base;
+       struct drm_connector *conn = &mhdp->connector;
+       bool hw_ready;
        int ret;
 
-       if (&mhdp->bridge.base != bridge)
+       dev_dbg(mhdp->dev, "%s\n", __func__);
+
+       if (&mhdp->bridge != bridge)
                return -ENODEV;
 
        conn->polled = DRM_CONNECTOR_POLL_HPD;
@@ -269,19 +1076,30 @@ static int cdns_mhdp_attach(struct drm_bridge *bridge)
                return ret;
        }
 
-       /* enable interrupts */
+       spin_lock(&mhdp->start_lock);
+
+       mhdp->bridge_attached = true;
+       hw_ready = mhdp->hw_state == MHDP_HW_READY;
+
+       spin_unlock(&mhdp->start_lock);
+
+       if (hw_ready) {
+               /* enable interrupts */
+               writel(0, mhdp->regs + CDNS_APB_INT_MASK);
+               writel(0, mhdp->regs + CDNS_MB_INT_MASK);
+       }
+
        //writel(~CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
-       writel(0, mhdp->regs + CDNS_APB_INT_MASK);
-       writel(0, mhdp->regs + CDNS_MB_INT_MASK);
 
        return 0;
 }
 
-static void mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
+static int mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
 {
        u32 reg32;
        u8 i;
        union phy_configure_opts phy_cfg;
+       int ret;
 
        drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
                           DP_TRAINING_PATTERN_DISABLE);
@@ -306,11 +1124,16 @@ static void mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
                phy_cfg.dp.voltage[i] = 0;
                phy_cfg.dp.pre[i] = 0;
        }
-       phy_cfg.dp.ssc = false;
+       phy_cfg.dp.ssc = mhdp_get_ssc_supported(mhdp);
        phy_cfg.dp.set_lanes = true;
        phy_cfg.dp.set_rate = true;
        phy_cfg.dp.set_voltages = true;
-       phy_configure(mhdp->phy,  &phy_cfg);
+       ret = phy_configure(mhdp->phy,  &phy_cfg);
+       if (ret) {
+               dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
+                       __func__, ret);
+               return ret;
+       }
 
        cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
                            CDNS_PHY_COMMON_CONFIG |
@@ -320,6 +1143,8 @@ static void mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
 
        drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
                           DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
+
+       return 0;
 }
 
 static void mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
@@ -416,9 +1241,10 @@ static bool mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
                                          unsigned int training_interval)
 {
        u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
-       u8 dpcd[DP_LINK_STATUS_SIZE];
+       u8 link_status[DP_LINK_STATUS_SIZE];
        u32 reg32;
        union phy_configure_opts phy_cfg;
+       int ret;
 
        dev_dbg(mhdp->dev, "Starting EQ phase\n");
 
@@ -433,31 +1259,36 @@ static bool mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
                           (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
                           CDNS_DP_TRAINING_PATTERN_4);
 
-       drm_dp_dpcd_read_link_status(&mhdp->aux, dpcd);
+       drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
 
        do {
-               mhdp_get_adjust_train(mhdp, dpcd, lanes_data, &phy_cfg);
+               mhdp_get_adjust_train(mhdp, link_status, lanes_data, &phy_cfg);
                phy_cfg.dp.lanes = (mhdp->link.num_lanes);
-               phy_cfg.dp.ssc = false;
+               phy_cfg.dp.ssc = mhdp_get_ssc_supported(mhdp);
                phy_cfg.dp.set_lanes = false;
                phy_cfg.dp.set_rate = false;
                phy_cfg.dp.set_voltages = true;
-               phy_configure(mhdp->phy,  &phy_cfg);
+               ret = phy_configure(mhdp->phy,  &phy_cfg);
+               if (ret) {
+                       dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
+                               __func__, ret);
+                       goto err;
+               }
 
                cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
-                                   training_interval, lanes_data, dpcd);
+                                   training_interval, lanes_data, link_status);
 
-               if (!drm_dp_clock_recovery_ok(dpcd, mhdp->link.num_lanes))
+               if (!drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes))
                        goto err;
 
-               if (drm_dp_channel_eq_ok(dpcd, mhdp->link.num_lanes)) {
+               if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
                        dev_dbg(mhdp->dev, "EQ phase succeeded\n");
                        return true;
                }
 
                fail_counter_short++;
 
-               mhdp_adjust_requested_eq(mhdp, dpcd);
+               mhdp_adjust_requested_eq(mhdp, link_status);
        } while (fail_counter_short < 5);
 
 err:
@@ -528,38 +1359,46 @@ static void mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
        }
 }
 
-static bool mhdp_link_training_clock_recovery(struct cdns_mhdp_device *mhdp)
+static bool mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
 {
        u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
        fail_counter_short = 0, fail_counter_cr_long = 0;
-       u8 dpcd[DP_LINK_STATUS_SIZE];
+       u8 link_status[DP_LINK_STATUS_SIZE];
        bool cr_done;
        union phy_configure_opts phy_cfg;
+       int ret;
 
        dev_dbg(mhdp->dev, "Starting CR phase\n");
 
-       mhdp_link_training_init(mhdp);
+       ret = mhdp_link_training_init(mhdp);
+       if (ret)
+               goto err;
 
-       drm_dp_dpcd_read_link_status(&mhdp->aux, dpcd);
+       drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
 
        do {
-               u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {},
-                                                                       requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
+               u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
+               u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
                bool same_before_adjust, max_swing_reached;
 
-               mhdp_get_adjust_train(mhdp, dpcd, lanes_data, &phy_cfg);
+               mhdp_get_adjust_train(mhdp, link_status, lanes_data, &phy_cfg);
                phy_cfg.dp.lanes = (mhdp->link.num_lanes);
-               phy_cfg.dp.ssc = false;
+               phy_cfg.dp.ssc = mhdp_get_ssc_supported(mhdp);
                phy_cfg.dp.set_lanes = false;
                phy_cfg.dp.set_rate = false;
                phy_cfg.dp.set_voltages = true;
-               phy_configure(mhdp->phy,  &phy_cfg);
+               ret = phy_configure(mhdp->phy,  &phy_cfg);
+               if (ret) {
+                       dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
+                               __func__, ret);
+                       goto err;
+               }
 
                cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
-                                   lanes_data, dpcd);
+                                   lanes_data, link_status);
 
                mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
-                                &max_swing_reached, lanes_data, dpcd,
+                                &max_swing_reached, lanes_data, link_status,
                                 requested_adjust_volt_swing,
                                 requested_adjust_pre_emphasis);
 
@@ -586,7 +1425,7 @@ static bool mhdp_link_training_clock_recovery(struct cdns_mhdp_device *mhdp)
                 * Voltage swing/pre-emphasis adjust requested
                 * during CR phase
                 */
-               mhdp_adjust_requested_cr(mhdp, dpcd,
+               mhdp_adjust_requested_cr(mhdp, link_status,
                                         requested_adjust_volt_swing,
                                         requested_adjust_pre_emphasis);
        } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
@@ -617,24 +1456,15 @@ static int mhdp_link_training(struct cdns_mhdp_device *mhdp,
                              unsigned int training_interval)
 {
        u32 reg32;
-       union phy_configure_opts phy_cfg;
        const u8 eq_tps = eq_training_pattern_supported(mhdp->host, mhdp->sink);
 
        while (1) {
-               if (!mhdp_link_training_clock_recovery(mhdp)) {
+               if (!mhdp_link_training_cr(mhdp)) {
                        if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
                            DP_LINK_BW_1_62) {
                                dev_dbg(mhdp->dev,
                                        "Reducing link rate during CR phase\n");
                                lower_link_rate(&mhdp->link);
-                               drm_dp_link_configure(&mhdp->aux, &mhdp->link);
-                               phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
-                               phy_cfg.dp.lanes = (mhdp->link.num_lanes);
-                               phy_cfg.dp.ssc = false;
-                               phy_cfg.dp.set_lanes = false;
-                               phy_cfg.dp.set_rate = true;
-                               phy_cfg.dp.set_voltages = false;
-                               phy_configure(mhdp->phy,  &phy_cfg);
 
                                continue;
                        } else if (mhdp->link.num_lanes > 1) {
@@ -643,14 +1473,6 @@ static int mhdp_link_training(struct cdns_mhdp_device *mhdp,
                                mhdp->link.num_lanes >>= 1;
                                mhdp->link.rate = max_link_rate(mhdp->host,
                                                                mhdp->sink);
-                               drm_dp_link_configure(&mhdp->aux, &mhdp->link);
-                               phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
-                               phy_cfg.dp.lanes = (mhdp->link.num_lanes);
-                               phy_cfg.dp.ssc = false;
-                               phy_cfg.dp.set_lanes = true;
-                               phy_cfg.dp.set_rate = false;
-                               phy_cfg.dp.set_voltages = false;
-                               phy_configure(mhdp->phy,  &phy_cfg);
 
                                continue;
                        }
@@ -668,14 +1490,6 @@ static int mhdp_link_training(struct cdns_mhdp_device *mhdp,
                        dev_dbg(mhdp->dev,
                                "Reducing lanes number during EQ phase\n");
                        mhdp->link.num_lanes >>= 1;
-                       drm_dp_link_configure(&mhdp->aux, &mhdp->link);
-                       phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
-                       phy_cfg.dp.lanes = (mhdp->link.num_lanes);
-                       phy_cfg.dp.ssc = false;
-                       phy_cfg.dp.set_lanes = true;
-                       phy_cfg.dp.set_rate = false;
-                       phy_cfg.dp.set_voltages = false;
-                       phy_configure(mhdp->phy,  &phy_cfg);
 
                        continue;
                } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
@@ -683,14 +1497,7 @@ static int mhdp_link_training(struct cdns_mhdp_device *mhdp,
                        dev_dbg(mhdp->dev,
                                "Reducing link rate during EQ phase\n");
                        lower_link_rate(&mhdp->link);
-                       drm_dp_link_configure(&mhdp->aux, &mhdp->link);
-                       phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
-                       phy_cfg.dp.lanes = (mhdp->link.num_lanes);
-                       phy_cfg.dp.ssc = false;
-                       phy_cfg.dp.set_lanes = false;
-                       phy_cfg.dp.set_rate = true;
-                       phy_cfg.dp.set_voltages = false;
-                       phy_configure(mhdp->phy,  &phy_cfg);
+                       mhdp->link.num_lanes = mhdp_max_num_lanes(mhdp->host, mhdp->sink);
 
                        continue;
                }
@@ -708,6 +1515,7 @@ static int mhdp_link_training(struct cdns_mhdp_device *mhdp,
        cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &reg32);
        reg32 &= ~GENMASK(1, 0);
        reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
+       reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
        cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
 
        /* Reset PHY config */
@@ -735,7 +1543,7 @@ static void cdns_mhdp_disable(struct drm_bridge *bridge)
        struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
        u32 resp;
 
-       dev_dbg(mhdp->dev, "bridge disable\n");
+       dev_dbg(mhdp->dev, "%s\n", __func__);
 
        cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
        resp &= ~CDNS_DP_FRAMER_EN;
@@ -752,7 +1560,8 @@ static void cdns_mhdp_disable(struct drm_bridge *bridge)
        cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
                            resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
 
-       cdns_mhdp_j721e_disable(mhdp);
+       if (mhdp->ops && mhdp->ops->disable)
+               mhdp->ops->disable(mhdp);
 }
 
 static u32 get_training_interval_us(struct cdns_mhdp_device *mhdp,
@@ -767,16 +1576,37 @@ static u32 get_training_interval_us(struct cdns_mhdp_device *mhdp,
        return 0;
 }
 
+static void mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
+                               u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+       mhdp->sink.link_rate = mhdp->link.rate;
+       mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
+       mhdp->sink.enhanced = !!(mhdp->link.capabilities &
+                                DP_LINK_CAP_ENHANCED_FRAMING);
+
+       /* Set SSC support */
+       mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
+                                 DP_MAX_DOWNSPREAD_0_5);
+
+       /* Set TPS support */
+       mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
+       if (drm_dp_tps3_supported(dpcd))
+               mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
+       if (drm_dp_tps4_supported(dpcd))
+               mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
+
+       /* Set fast link support */
+       mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
+                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
+}
+
 static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
 {
        u32 resp;
        u8 reg0[DP_RECEIVER_CAP_SIZE], amp[2];
-
-       /*
-        * Upon power-on reset/device disconnection: [2:0] bits should be 0b001
-        * and [7:5] bits 0b000.
-        */
-       drm_dp_dpcd_writeb(&mhdp->aux, DP_SET_POWER, 1);
+       u8 ext_cap_chk = 0;
+       unsigned int addr;
+       int err;
 
        drm_dp_link_probe(&mhdp->aux, &mhdp->link);
 
@@ -785,25 +1615,23 @@ static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
        /* FIXME (CDNS): do we have to wait for 100ms before going on? */
        mdelay(100);
 
-       mhdp->sink.link_rate = mhdp->link.rate;
-       mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
-       mhdp->sink.enhanced = !!(mhdp->link.capabilities &
-                                DP_LINK_CAP_ENHANCED_FRAMING);
+       drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL, &ext_cap_chk);
 
-       drm_dp_dpcd_read(&mhdp->aux, DP_DPCD_REV, reg0, DP_RECEIVER_CAP_SIZE);
+       if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
+               addr = DP_DP13_DPCD_REV;
+       else
+               addr = DP_DPCD_REV;
 
-       mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
-       if (drm_dp_tps3_supported(reg0))
-               mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
-       if (drm_dp_tps4_supported(reg0))
-               mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
+       err = drm_dp_dpcd_read(&mhdp->aux, addr, reg0, DP_RECEIVER_CAP_SIZE);
+       if (err < 0) {
+               dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
+               return err;
+       }
 
-       mhdp->sink.fast_link = !!(reg0[DP_MAX_DOWNSPREAD] &
-                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
+       mhdp_fill_sink_caps(mhdp, reg0);
 
        mhdp->link.rate = max_link_rate(mhdp->host, mhdp->sink);
-       mhdp->link.num_lanes = min_t(u8, mhdp->sink.lanes_cnt,
-                                    mhdp->host.lanes_cnt & GENMASK(2, 0));
+       mhdp->link.num_lanes = mhdp_max_num_lanes(mhdp->host, mhdp->sink);
 
        /* Disable framer for link training */
        cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
@@ -811,7 +1639,7 @@ static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
        cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
 
        /* Spread AMP if required, enable 8b/10b coding */
-       amp[0] = mhdp->host.ssc ? DP_SPREAD_AMP_0_5 : 0;
+       amp[0] = mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
        amp[1] = DP_SET_ANSI_8B10B;
        drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
 
@@ -863,8 +1691,7 @@ u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
 
 static int cdns_mhdp_sst_enable(struct drm_bridge *bridge)
 {
-       struct cdns_mhdp_bridge *mhdp_bridge = to_mhdp_bridge(bridge);
-       struct cdns_mhdp_device *mhdp = mhdp_bridge->mhdp;
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
        u32 rate, vs, vs_f, required_bandwidth, available_bandwidth;
        u32 tu_size = 30, line_thresh1, line_thresh2, line_thresh = 0;
        struct drm_display_mode *mode;
@@ -877,12 +1704,20 @@ static int cdns_mhdp_sst_enable(struct drm_bridge *bridge)
        mode = &bridge->encoder->crtc->state->mode;
        pxlclock = mode->crtc_clock;
 
-       mhdp_bridge->stream_id = 0;
+       mhdp->stream_id = 0;
 
        rate = mhdp->link.rate / 1000;
 
        bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
 
+       if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
+                                   mhdp->link.rate)) {
+               dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
+                       __func__, mode->name, mhdp->link.num_lanes,
+                       mhdp->link.rate / 100);
+               return -EINVAL;
+       }
+
        /* find optimal tu_size */
        required_bandwidth = pxlclock * bpp / 8;
        available_bandwidth = mhdp->link.num_lanes * rate;
@@ -899,8 +1734,13 @@ static int cdns_mhdp_sst_enable(struct drm_bridge *bridge)
        } while ((vs == 1 || ((vs_f > 850 || vs_f < 100) && vs_f != 0) ||
                  tu_size - vs < 2) && tu_size < 64);
 
-       if (vs > 64)
+       if (vs > 64) {
+               dev_err(mhdp->dev,
+                       "%s: No space for framing %s (%u lanes at %u Mbps)\n",
+                       __func__, mode->name, mhdp->link.num_lanes,
+                       mhdp->link.rate / 100);
                return -EINVAL;
+       }
 
        cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
                            CDNS_DP_FRAMER_TU_VS(vs) |
@@ -925,8 +1765,7 @@ static int cdns_mhdp_sst_enable(struct drm_bridge *bridge)
 
 void cdns_mhdp_configure_video(struct drm_bridge *bridge)
 {
-       struct cdns_mhdp_bridge *mhdp_bridge = to_mhdp_bridge(bridge);
-       struct cdns_mhdp_device *mhdp = mhdp_bridge->mhdp;
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
        unsigned int dp_framer_sp = 0, msa_horizontal_1,
                           msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
                           misc0 = 0, misc1 = 0, pxl_repr,
@@ -935,7 +1774,7 @@ void cdns_mhdp_configure_video(struct drm_bridge *bridge)
        struct drm_display_mode *mode;
        u32 bpp, bpc, pxlfmt;
        u32 tmp;
-       u8 stream_id = mhdp_bridge->stream_id;
+       u8 stream_id = mhdp->stream_id;
 
        mode = &bridge->encoder->crtc->state->mode;
 
@@ -1094,13 +1933,13 @@ void cdns_mhdp_configure_video(struct drm_bridge *bridge)
 
 void cdns_mhdp_enable(struct drm_bridge *bridge)
 {
-       struct cdns_mhdp_bridge *mhdp_bridge = to_mhdp_bridge(bridge);
-       struct cdns_mhdp_device *mhdp = mhdp_bridge->mhdp;
+       struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
        u32 resp;
 
        dev_dbg(mhdp->dev, "bridge enable\n");
 
-       cdns_mhdp_j721e_enable(mhdp);
+       if (mhdp->ops && mhdp->ops->enable)
+               mhdp->ops->enable(mhdp);
 
        /* Enable VIF clock for stream 0 */
        cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
@@ -1117,6 +1956,14 @@ static void cdns_mhdp_detach(struct drm_bridge *bridge)
 {
        struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
 
+       dev_dbg(mhdp->dev, "%s\n", __func__);
+
+       spin_lock(&mhdp->start_lock);
+
+       mhdp->bridge_attached = false;
+
+       spin_unlock(&mhdp->start_lock);
+
        writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
        writel(~0, mhdp->regs + CDNS_MB_INT_MASK);
 }
@@ -1128,105 +1975,17 @@ static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
        .detach = cdns_mhdp_detach,
 };
 
-static int load_firmware(struct cdns_mhdp_device *mhdp, const char *name,
-                        unsigned int addr)
-{
-       const struct firmware *fw;
-       int ret;
-
-       ret = request_firmware(&fw, name, mhdp->dev);
-       if (ret) {
-               dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
-                       name, ret);
-               return ret;
-       }
-
-       memcpy_toio(mhdp->regs + addr, fw->data, fw->size);
-
-       release_firmware(fw);
-
-       return 0;
-}
-
-static int cdns_mhdp_audio_hw_params(struct device *dev, void *data,
-                                    struct hdmi_codec_daifmt *daifmt,
-                                    struct hdmi_codec_params *params)
-{
-       struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
-       struct audio_info audio = {
-               .sample_width = params->sample_width,
-               .sample_rate = params->sample_rate,
-               .channels = params->channels,
-       };
-       int ret;
-
-       if (daifmt->fmt != HDMI_I2S) {
-               DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
-               return -EINVAL;
-       }
-
-       audio.format = AFMT_I2S;
-
-       ret = cdns_mhdp_audio_config(mhdp, &audio);
-       if (!ret)
-               mhdp->audio_info = audio;
-
-       return 0;
-}
-
-static void cdns_mhdp_audio_shutdown(struct device *dev, void *data)
-{
-       struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
-       int ret;
-
-       ret = cdns_mhdp_audio_stop(mhdp, &mhdp->audio_info);
-       if (!ret)
-               mhdp->audio_info.format = AFMT_UNUSED;
-}
-
-static int cdns_mhdp_audio_digital_mute(struct device *dev, void *data,
-                                       bool enable)
-{
-       struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
-
-       return cdns_mhdp_audio_mute(mhdp, enable);
-}
-
-static int cdns_mhdp_audio_get_eld(struct device *dev, void *data,
-                                  u8 *buf, size_t len)
-{
-       struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
-
-       memcpy(buf, mhdp->connector.base.eld,
-              min(sizeof(mhdp->connector.base.eld), len));
-
-       return 0;
-}
-
-static const struct hdmi_codec_ops audio_codec_ops = {
-       .hw_params = cdns_mhdp_audio_hw_params,
-       .audio_shutdown = cdns_mhdp_audio_shutdown,
-       .digital_mute = cdns_mhdp_audio_digital_mute,
-       .get_eld = cdns_mhdp_audio_get_eld,
-};
-
 static int mhdp_probe(struct platform_device *pdev)
 {
+       const struct of_device_id *match;
        struct resource *regs;
        struct cdns_mhdp_device *mhdp;
        struct clk *clk;
        int ret;
-       unsigned int reg;
        unsigned long rate;
        int irq;
        u32 lanes_prop;
 
-       struct hdmi_codec_pdata codec_data = {
-               .i2s = 1,
-               .max_i2s_channels = 8,
-               .ops = &audio_codec_ops,
-       };
-
        mhdp = devm_kzalloc(&pdev->dev, sizeof(struct cdns_mhdp_device),
                            GFP_KERNEL);
        if (!mhdp)
@@ -1240,6 +1999,8 @@ static int mhdp_probe(struct platform_device *pdev)
 
        mhdp->clk = clk;
        mhdp->dev = &pdev->dev;
+       mutex_init(&mhdp->mbox_mutex);
+       spin_lock_init(&mhdp->start_lock);
        dev_set_drvdata(&pdev->dev, mhdp);
 
        drm_dp_aux_init(&mhdp->aux);
@@ -1261,36 +2022,34 @@ static int mhdp_probe(struct platform_device *pdev)
 
        clk_prepare_enable(clk);
 
+       match = of_match_device(mhdp_ids, &pdev->dev);
+       if (!match)
+               return -ENODEV;
+       mhdp->ops = (struct mhdp_platform_ops *)match->data;
+
        pm_runtime_enable(&pdev->dev);
        ret = pm_runtime_get_sync(&pdev->dev);
        if (ret < 0) {
                dev_err(&pdev->dev, "pm_runtime_get_sync failed\n");
-               return ret;
+               pm_runtime_disable(&pdev->dev);
+               goto clk_disable;
        }
 
-       ret = cdns_mhdp_j721e_init(mhdp);
-       if (ret != 0) {
-               dev_err(&pdev->dev, "J721E Wrapper initialization failed: %d\n",
-                       ret);
-               return ret;
+       if (mhdp->ops && mhdp->ops->init) {
+               ret = mhdp->ops->init(mhdp);
+               if (ret != 0) {
+                       dev_err(&pdev->dev, "MHDP platform initialization failed: %d\n",
+                               ret);
+                       goto runtime_put;
+               }
        }
 
-       /* Release uCPU reset and stall it. */
-       writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
-
-       ret = load_firmware(mhdp, FW_NAME, CDNS_MHDP_IMEM);
-       if (ret)
-               return ret;
-
        rate = clk_get_rate(clk);
        writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
        writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
 
        dev_dbg(&pdev->dev, "func clk rate %lu Hz\n", rate);
 
-       /* Leave debug mode, release stall */
-       writel(0, mhdp->regs + CDNS_APB_CTRL);
-
        writel(~0, mhdp->regs + CDNS_MB_INT_MASK);
        writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
 
@@ -1298,22 +2057,9 @@ static int mhdp_probe(struct platform_device *pdev)
        ret = devm_request_threaded_irq(mhdp->dev, irq, NULL, mhdp_irq_handler,
                                        IRQF_ONESHOT, "mhdp8546", mhdp);
        if (ret) {
-               dev_err(&pdev->dev,
-                       "cannot install IRQ %d\n", irq);
-               return -EIO;
-       }
-
-       /*
-        * Wait for the KEEP_ALIVE "message" on the first 8 bits.
-        * Updated each sched "tick" (~2ms)
-        */
-       ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
-                                reg & CDNS_KEEP_ALIVE_MASK, 500,
-                                CDNS_KEEP_ALIVE_TIMEOUT);
-       if (ret) {
-               dev_err(&pdev->dev,
-                       "device didn't give any life sign: reg %d\n", reg);
-               return -EIO;
+               dev_err(&pdev->dev, "cannot install IRQ %d\n", irq);
+               ret = -EIO;
+               goto j721e_fini;
        }
 
        /* Read source capabilities, based on PHY's device tree properties. */
@@ -1348,40 +2094,34 @@ static int mhdp_probe(struct platform_device *pdev)
        mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
        mhdp->display_fmt.bpc = 8;
 
-       mhdp->bridge.base.of_node = pdev->dev.of_node;
-       mhdp->bridge.base.funcs = &cdns_mhdp_bridge_funcs;
-
-       /* Init events to 0 as it's not cleared by FW at boot but on read */
-       readl(mhdp->regs + CDNS_SW_EVENT0);
-       readl(mhdp->regs + CDNS_SW_EVENT1);
-       readl(mhdp->regs + CDNS_SW_EVENT2);
-       readl(mhdp->regs + CDNS_SW_EVENT3);
-
-       /* Activate uCPU */
-       ret = cdns_mhdp_set_firmware_active(mhdp, true);
-       if (ret) {
-               dev_err(mhdp->dev, "Failed to activate DP\n");
-               return ret;
-       }
-
-       mhdp->audio_pdev = platform_device_register_data(
-                                  mhdp->dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
-                                  &codec_data, sizeof(codec_data));
+       mhdp->bridge.of_node = pdev->dev.of_node;
+       mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
 
        ret = phy_init(mhdp->phy);
        if (ret) {
                dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
-               return ret;
+               goto runtime_put;
        }
 
-       mhdp->bridge.connector = &mhdp->connector;
-       mhdp->connector.bridge = &mhdp->bridge;
-       mhdp->bridge.mhdp = mhdp;
-       mhdp->bridge.is_active = false;
+       drm_bridge_add(&mhdp->bridge);
 
-       drm_bridge_add(&mhdp->bridge.base);
+       ret = load_firmware(mhdp);
+       if (ret)
+               goto phy_exit;
 
        return 0;
+
+phy_exit:
+       phy_exit(mhdp->phy);
+j721e_fini:
+       cdns_mhdp_j721e_fini(mhdp);
+runtime_put:
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+clk_disable:
+       clk_disable_unprepare(mhdp->clk);
+
+       return ret;
 }
 
 MODULE_FIRMWARE(FW_NAME);
@@ -1389,17 +2129,42 @@ MODULE_FIRMWARE(FW_NAME);
 static int mhdp_remove(struct platform_device *pdev)
 {
        struct cdns_mhdp_device *mhdp = dev_get_drvdata(&pdev->dev);
-       int ret;
+       unsigned int timeout = 10;
+       bool stop_fw = false;
+       int ret = 0;
+
+       if (mhdp->ops && mhdp->ops->exit)
+               mhdp->ops->exit(mhdp);
+
+       drm_bridge_remove(&mhdp->bridge);
+
+wait_loading:
+       spin_lock(&mhdp->start_lock);
+       if (mhdp->hw_state == MHDP_HW_LOADING && timeout-- > 0) {
+               spin_unlock(&mhdp->start_lock);
+               msleep(100);
+               goto wait_loading;
+       } else if (mhdp->hw_state == MHDP_HW_READY) {
+               stop_fw = true;
+               timeout = 1; /* We were succesful even if counter reached 0 */
+       }
+       mhdp->hw_state = MHDP_HW_STOPPED;
+       spin_unlock(&mhdp->start_lock);
+
+       if (timeout == 0)
+               dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
+                       __func__);
+
+       if (stop_fw) {
+               ret = cdns_mhdp_set_firmware_active(mhdp, false);
+               if (ret)
+                       dev_err(mhdp->dev, "%s: De-activate FW failed: %d\n",
+                               __func__, ret);
+       }
 
-       platform_device_unregister(mhdp->audio_pdev);
+       phy_exit(mhdp->phy);
 
-       drm_bridge_remove(&mhdp->bridge.base);
-
-       ret = cdns_mhdp_set_firmware_active(mhdp, false);
-       if (ret) {
-               dev_err(mhdp->dev, "Failed to de-activate DP\n");
-               return ret;
-       }
+       cdns_mhdp_j721e_fini(mhdp);
 
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
@@ -1408,7 +2173,7 @@ static int mhdp_remove(struct platform_device *pdev)
 
        /* FIXME: check for missing functions */
 
-       return 0;
+       return ret;
 }
 
 static struct platform_driver mhdp_driver = {
index 0861e4ffda60297cfb94cc2d8c0e3a6cd8b9b2f0..acf748b09c04e5f335fa10d8d04e6d875d421d95 100644 (file)
 #ifndef CDNS_MHDP_H
 #define CDNS_MHDP_H
 
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_dp_helper.h>
+
 #define CDNS_APB_CFG                           0x00000
 #define CDNS_APB_CTRL                          (CDNS_APB_CFG + 0x00)
 #define CDNS_CPU_STALL                         BIT(3)
 #define CDNS_DP_LANE_EN_LANES(x)               GENMASK(x - 1, 0)
 #define CDNS_DP_ENHNCD                         (CDNS_DPTX_GLOBAL + 0x04)
 
+struct cdns_mhdp_host {
+       unsigned int link_rate;
+       u8 lanes_cnt;
+       u8 volt_swing;
+       u8 pre_emphasis;
+       u8 pattern_supp;
+       u8 lane_mapping;
+       u8 fast_link : 1;
+       u8 enhanced : 1;
+       u8 scrambler : 1;
+       u8 ssc : 1;
+};
+
+struct cdns_mhdp_sink {
+       unsigned int link_rate;
+       u8 lanes_cnt;
+       u8 pattern_supp;
+       u8 fast_link : 1;
+       u8 enhanced : 1;
+       u8 ssc : 1;
+};
+
+struct cdns_mhdp_display_fmt {
+       u32 color_format;
+       u32 bpc;
+       u8 y_only : 1;
+};
 
-#define to_mhdp_connector(x) container_of(x, struct cdns_mhdp_connector, base)
-#define to_mhdp_bridge(x) container_of(x, struct cdns_mhdp_bridge, base)
+/*
+ * These enums present MHDP hw initialization state
+ * Legal state transitions are:
+ * MHDP_HW_INACTIVE <-> MHDP_HW_LOADING -> MHDP_HW_READY
+ *        |                                     |
+ *        '----------> MHDP_HW_STOPPED <--------'
+ */
+enum mhdp_hw_state { MHDP_HW_INACTIVE = 0, /* HW not initialized */
+                    MHDP_HW_LOADING,      /* HW initialization in progress */
+                    MHDP_HW_READY,        /* HW ready, FW active*/
+                    MHDP_HW_STOPPED };    /* Driver removal FW to be stopped */
+
+struct cdns_mhdp_device;
+
+struct mhdp_platform_ops {
+       int (*init)(struct cdns_mhdp_device *mhdp);
+       void (*exit)(struct cdns_mhdp_device *mhdp);
+       void (*enable)(struct cdns_mhdp_device *mhdp);
+       void (*disable)(struct cdns_mhdp_device *mhdp);
+};
+
+struct cdns_mhdp_device {
+       void __iomem *regs;
+       void __iomem *j721e_regs;
+
+       struct device *dev;
+       struct clk *clk;
+       struct phy *phy;
+
+       const struct mhdp_platform_ops *ops;
+
+       /* This is to protect mailbox communications with the firmware */
+       struct mutex mbox_mutex;
+
+       struct drm_connector connector;
+       struct drm_bridge bridge;
+
+       struct drm_dp_link link;
+       struct drm_dp_aux aux;
+
+       struct cdns_mhdp_host host;
+       struct cdns_mhdp_sink sink;
+       struct cdns_mhdp_display_fmt display_fmt;
+       s8 stream_id;
+
+       u8 link_up : 1;
+       u8 plugged : 1;
+
+       /*
+        * "start_lock" protects the access to bridge_attached and
+        * hw_state data members that control the delayed firmware
+        * loading and attaching the bridge. They are accessed from
+        * both the DRM core and mhdp_fw_cb(). In most cases just
+        * protecting the data members is enough, but the irq mask
+        * setting needs to be protected when enabling the FW.
+        */
+       spinlock_t start_lock;
+       u8 bridge_attached : 1;
+       enum mhdp_hw_state hw_state;
+};
+
+#define connector_to_mhdp(x) container_of(x, struct cdns_mhdp_device, connector)
+#define bridge_to_mhdp(x) container_of(x, struct cdns_mhdp_device, bridge)
 
 #define CDNS_MHDP_MAX_STREAMS   4
 
index 77c24883a65d971ee706836a4cd02f9032364f17..de6223428c604a783270243068ffc762145c5c80 100644 (file)
@@ -366,7 +366,7 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
        u16 pixel_clock_10kHz = adj->clock / 10;
        int ret;
 
-       buf[0] = pixel_clock_10kHz & 0xFF;
+       buf[0] = pixel_clock_10kHz & 0xff;
        buf[1] = pixel_clock_10kHz >> 8;
        buf[2] = adj->vrefresh;
        buf[3] = 0x00;
@@ -662,7 +662,7 @@ static void sii902x_audio_shutdown(struct device *dev, void *data)
        }
 }
 
-int sii902x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int sii902x_audio_digital_mute(struct device *dev, void *data, bool enable)
 {
        struct sii902x *sii902x = dev_get_drvdata(dev);
 
index 665bc3f382759fc0fbad20de93c4c650bbc467ad..1c2f0cc926c0dc673379c3dc42c3ddb0e9c5ae37 100644 (file)
@@ -322,7 +322,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
                               struct drm_dp_aux_msg *msg)
 {
        struct tc_data *tc = aux_to_tc(aux);
-       size_t size = min_t(size_t, 8, msg->size);
+       size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size);
        u8 request = msg->request & ~DP_AUX_I2C_MOT;
        u8 *buf = msg->buffer;
        u32 tmp = 0;
@@ -686,6 +686,8 @@ static int tc_set_video_mode(struct tc_data *tc,
        int upper_margin = mode->vtotal - mode->vsync_end;
        int lower_margin = mode->vsync_start - mode->vdisplay;
        int vsync_len = mode->vsync_end - mode->vsync_start;
+       u32 bits_per_pixel = 24;
+       u32 in_bw, out_bw;
 
        /*
         * Recommended maximum number of symbols transferred in a transfer unit:
@@ -693,7 +695,10 @@ static int tc_set_video_mode(struct tc_data *tc,
         *              (output active video bandwidth in bytes))
         * Must be less than tu_size.
         */
-       max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
+       in_bw = mode->clock * bits_per_pixel / 8;
+       out_bw = tc->link.base.num_lanes * tc->link.base.rate;
+       max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw);
 
        dev_dbg(tc->dev, "set mode %dx%d\n",
                mode->hdisplay, mode->vdisplay);
@@ -1200,6 +1205,13 @@ static int tc_connector_get_modes(struct drm_connector *connector)
        struct tc_data *tc = connector_to_tc(connector);
        struct edid *edid;
        unsigned int count;
+       int ret;
+
+       ret = tc_get_display_props(tc);
+       if (ret < 0) {
+               dev_err(tc->dev, "failed to read display props: %d\n", ret);
+               return 0;
+       }
 
        if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
                count = tc->panel->funcs->get_modes(tc->panel);
index b50910bd500eae8082647dc0c6e9e4c397f9fd75..905e504df6992549ccaf535c9c83d8332fce9d96 100644 (file)
@@ -1713,6 +1713,27 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
        struct drm_connector *connector = conn_state->connector;
        struct drm_crtc_state *crtc_state;
 
+       /*
+        * For compatibility with legacy users, we want to make sure that
+        * we allow DPMS On<->Off modesets on unregistered connectors, since
+        * legacy modesetting users will not be expecting these to fail. We do
+        * not however, want to allow legacy users to assign a connector
+        * that's been unregistered from sysfs to another CRTC, since doing
+        * this with a now non-existent connector could potentially leave us
+        * in an invalid state.
+        *
+        * Since the connector can be unregistered at any point during an
+        * atomic check or commit, this is racy. But that's OK: all we care
+        * about is ensuring that userspace can't use this connector for new
+        * configurations after it's been notified that the connector is no
+        * longer present.
+        */
+       if (!READ_ONCE(connector->registered) && crtc) {
+               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
+                                connector->base.id, connector->name);
+               return -EINVAL;
+       }
+
        if (conn_state->crtc == crtc)
                return 0;
 
index 99961192bf034f893cbac5521c996dc98aa49887..c88e5ff41add6a4898186672070b03293c3e9ffa 100644 (file)
@@ -379,12 +379,13 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
        struct drm_crtc_crc *crc = &crtc->crc;
        struct drm_crtc_crc_entry *entry;
        int head, tail;
+       unsigned long flags;
 
-       spin_lock(&crc->lock);
+       spin_lock_irqsave(&crc->lock, flags);
 
        /* Caller may not have noticed yet that userspace has stopped reading */
        if (!crc->entries) {
-               spin_unlock(&crc->lock);
+               spin_unlock_irqrestore(&crc->lock, flags);
                return -EINVAL;
        }
 
@@ -395,7 +396,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
                bool was_overflow = crc->overflow;
 
                crc->overflow = true;
-               spin_unlock(&crc->lock);
+               spin_unlock_irqrestore(&crc->lock, flags);
 
                if (!was_overflow)
                        DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
@@ -411,7 +412,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
        head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
        crc->head = head;
 
-       spin_unlock(&crc->lock);
+       spin_unlock_irqrestore(&crc->lock, flags);
 
        wake_up_interruptible(&crc->wq);
 
index 5965f6383ada343708ec64a37280d43dc719f008..f5926bf5dabd9a80d1d71b596aa550d07c8a2db2 100644 (file)
@@ -166,6 +166,9 @@ static const struct edid_quirk {
        /* Medion MD 30217 PG */
        { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
 
+       /* Lenovo G50 */
+       { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+
        /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
        { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
 
@@ -1349,6 +1352,7 @@ MODULE_PARM_DESC(edid_fixup,
 
 static void drm_get_displayid(struct drm_connector *connector,
                              struct edid *edid);
+static int validate_displayid(u8 *displayid, int length, int idx);
 
 static int drm_edid_block_checksum(const u8 *raw_edid)
 {
@@ -2932,16 +2936,46 @@ static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
        return edid_ext;
 }
 
-static u8 *drm_find_cea_extension(const struct edid *edid)
-{
-       return drm_find_edid_extension(edid, CEA_EXT);
-}
 
 static u8 *drm_find_displayid_extension(const struct edid *edid)
 {
        return drm_find_edid_extension(edid, DISPLAYID_EXT);
 }
 
+static u8 *drm_find_cea_extension(const struct edid *edid)
+{
+       int ret;
+       int idx = 1;
+       int length = EDID_LENGTH;
+       struct displayid_block *block;
+       u8 *cea;
+       u8 *displayid;
+
+       /* Look for a top level CEA extension block */
+       cea = drm_find_edid_extension(edid, CEA_EXT);
+       if (cea)
+               return cea;
+
+       /* CEA blocks can also be found embedded in a DisplayID block */
+       displayid = drm_find_displayid_extension(edid);
+       if (!displayid)
+               return NULL;
+
+       ret = validate_displayid(displayid, length, idx);
+       if (ret)
+               return NULL;
+
+       idx += sizeof(struct displayid_hdr);
+       for_each_displayid_db(displayid, block, idx, length) {
+               if (block->tag == DATA_BLOCK_CTA) {
+                       cea = (u8 *)block;
+                       break;
+               }
+       }
+
+       return cea;
+}
+
 /*
  * Calculate the alternate clock for the CEA mode
  * (60Hz vs. 59.94Hz etc.)
@@ -3665,13 +3699,38 @@ cea_revision(const u8 *cea)
 static int
 cea_db_offsets(const u8 *cea, int *start, int *end)
 {
-       /* Data block offset in CEA extension block */
-       *start = 4;
-       *end = cea[2];
-       if (*end == 0)
-               *end = 127;
-       if (*end < 4 || *end > 127)
-               return -ERANGE;
+       /* DisplayID CTA extension blocks and top-level CEA EDID
+        * block header definitions differ in the following bytes:
+        *   1) Byte 2 of the header specifies length differently,
+        *   2) Byte 3 is only present in the CEA top level block.
+        *
+        * The different definitions for byte 2 follow.
+        *
+        * DisplayID CTA extension block defines byte 2 as:
+        *   Number of payload bytes
+        *
+        * CEA EDID block defines byte 2 as:
+        *   Byte number (decimal) within this block where the 18-byte
+        *   DTDs begin. If no non-DTD data is present in this extension
+        *   block, the value should be set to 04h (the byte after next).
+        *   If set to 00h, there are no DTDs present in this block and
+        *   no non-DTD data.
+        */
+       if (cea[0] == DATA_BLOCK_CTA) {
+               *start = 3;
+               *end = *start + cea[2];
+       } else if (cea[0] == CEA_EXT) {
+               /* Data block offset in CEA extension block */
+               *start = 4;
+               *end = cea[2];
+               if (*end == 0)
+                       *end = 127;
+               if (*end < 4 || *end > 127)
+                       return -ERANGE;
+       } else {
+               return -ENOTSUPP;
+       }
+
        return 0;
 }
 
@@ -5218,6 +5277,9 @@ static int drm_parse_display_id(struct drm_connector *connector,
                case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
                        /* handled in mode gathering code. */
                        break;
+               case DATA_BLOCK_CTA:
+                       /* handled in the cea parser code. */
+                       break;
                default:
                        DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
                        break;
index a4915099aaa99223b56de4181c26510303769966..a0e107abc40d722c7d5bf06c74c5c4bbb509dd68 100644 (file)
@@ -290,6 +290,8 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector)
         * the last one found one as a fallback.
         */
        fwstr = kstrdup(edid_firmware, GFP_KERNEL);
+       if (!fwstr)
+               return ERR_PTR(-ENOMEM);
        edidstr = fwstr;
 
        while ((edidname = strsep(&edidstr, ","))) {
index 781af1d42d766bf63db12801ace4703132db84fa..b64a6ffc0aed72c6429e81b68527bb60b4a447a7 100644 (file)
@@ -793,7 +793,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
        struct drm_device *dev = fb->dev;
        struct drm_atomic_state *state;
        struct drm_plane *plane;
-       struct drm_connector *conn;
+       struct drm_connector *conn __maybe_unused;
        struct drm_connector_state *conn_state;
        int i, ret;
        unsigned plane_mask;
index 138680b37c709aa3216bddb1897c6a96ed57ee75..f8672238d444b705b6512244183c7986e74735b6 100644 (file)
@@ -185,7 +185,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
        m32.size = map.size;
        m32.type = map.type;
        m32.flags = map.flags;
-       m32.handle = ptr_to_compat(map.handle);
+       m32.handle = ptr_to_compat((void __user *)map.handle);
        m32.mtrr = map.mtrr;
        if (copy_to_user(argp, &m32, sizeof(m32)))
                return -EFAULT;
@@ -216,7 +216,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
 
        m32.offset = map.offset;
        m32.mtrr = map.mtrr;
-       m32.handle = ptr_to_compat(map.handle);
+       m32.handle = ptr_to_compat((void __user *)map.handle);
        if (map.handle != compat_ptr(m32.handle))
                pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
                                   map.handle, m32.type, m32.offset);
@@ -529,7 +529,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
        if (err)
                return err;
 
-       req32.handle = ptr_to_compat(req.handle);
+       req32.handle = ptr_to_compat((void __user *)req.handle);
        if (copy_to_user(argp, &req32, sizeof(req32)))
                return -EFAULT;
 
index b44bed554211705cc0ca8ffa92cf3fadf3146f15..cc354b49177422e265c96d00ef2afaef3d1b66e5 100644 (file)
@@ -82,6 +82,12 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
        .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
 };
 
+static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
+       .width = 720,
+       .height = 1280,
+       .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
 static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
        .width = 800,
        .height = 1280,
@@ -109,6 +115,12 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
                },
                .driver_data = (void *)&gpd_micropc,
+       }, {    /* GPD MicroPC (later BIOS versions with proper DMI strings) */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"),
+               },
+               .driver_data = (void *)&lcd720x1280_rightside_up,
        }, {    /*
                 * GPD Pocket, note that the the DMI data is less generic then
                 * it seems, devices with a board-vendor of "AMI Corporation"
index d18b7e27ef64c9324b35300a46f14315f5e3cd84..c0b26135dbd5b52f7dd1aa64e49dbce003a7238a 100644 (file)
@@ -581,6 +581,9 @@ static void output_poll_execute(struct work_struct *work)
        enum drm_connector_status old_status;
        bool repoll = false, changed;
 
+       if (!dev->mode_config.poll_enabled)
+               return;
+
        /* Pick up any changes detected by the probe functions. */
        changed = dev->mode_config.delayed_event;
        dev->mode_config.delayed_event = false;
@@ -735,7 +738,11 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init);
  */
 void drm_kms_helper_poll_fini(struct drm_device *dev)
 {
-       drm_kms_helper_poll_disable(dev);
+       if (!dev->mode_config.poll_enabled)
+               return;
+
+       dev->mode_config.poll_enabled = false;
+       cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 
index 28cdcf76b6f9988a24d6126090264d99fd8bd4c3..d1859bcc7ccbc172c7b7f3bff122b95769d3ded3 100644 (file)
@@ -105,13 +105,20 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
        write_sequnlock(&vblank->seqlock);
 }
 
+static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe)
+{
+       struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+       return vblank->max_vblank_count ?: dev->max_vblank_count;
+}
+
 /*
  * "No hw counter" fallback implementation of .get_vblank_counter() hook,
  * if there is no useable hardware frame counter available.
  */
 static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
 {
-       WARN_ON_ONCE(dev->max_vblank_count != 0);
+       WARN_ON_ONCE(drm_max_vblank_count(dev, pipe) != 0);
        return 0;
 }
 
@@ -198,6 +205,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
        ktime_t t_vblank;
        int count = DRM_TIMESTAMP_MAXRETRIES;
        int framedur_ns = vblank->framedur_ns;
+       u32 max_vblank_count = drm_max_vblank_count(dev, pipe);
 
        /*
         * Interrupts were disabled prior to this call, so deal with counter
@@ -216,9 +224,9 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
                rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq);
        } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
 
-       if (dev->max_vblank_count != 0) {
+       if (max_vblank_count) {
                /* trust the hw counter when it's around */
-               diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
+               diff = (cur_vblank - vblank->last) & max_vblank_count;
        } else if (rc && framedur_ns) {
                u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
 
@@ -1204,6 +1212,37 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc)
 }
 EXPORT_SYMBOL(drm_crtc_vblank_reset);
 
+/**
+ * drm_crtc_set_max_vblank_count - configure the hw max vblank counter value
+ * @crtc: CRTC in question
+ * @max_vblank_count: max hardware vblank counter value
+ *
+ * Update the maximum hardware vblank counter value for @crtc
+ * at runtime. Useful for hardware where the operation of the
+ * hardware vblank counter depends on the currently active
+ * display configuration.
+ *
+ * For example, if the hardware vblank counter does not work
+ * when a specific connector is active the maximum can be set
+ * to zero. And when that specific connector isn't active the
+ * maximum can again be set to the appropriate non-zero value.
+ *
+ * If used, must be called before drm_vblank_on().
+ */
+void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
+                                  u32 max_vblank_count)
+{
+       struct drm_device *dev = crtc->dev;
+       unsigned int pipe = drm_crtc_index(crtc);
+       struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+       WARN_ON(dev->max_vblank_count);
+       WARN_ON(!READ_ONCE(vblank->inmodeset));
+
+       vblank->max_vblank_count = max_vblank_count;
+}
+EXPORT_SYMBOL(drm_crtc_set_max_vblank_count);
+
 /**
  * drm_crtc_vblank_on - enable vblank events on a CRTC
  * @crtc: CRTC in question
index 0ddb6eec7b113ea306fea4bde563e8ecb9945495..df228436a03d92965d65178598df9c923a21f7c6 100644 (file)
@@ -108,12 +108,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
        scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
        do {
                cpu_relax();
-       } while (retry > 1 &&
+       } while (--retry > 1 &&
                 scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
        do {
                cpu_relax();
                scaler_write(1, SCALER_INT_EN);
-       } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+       } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
 
        return retry ? 0 : -EIO;
 }
index 12e4203c06dbd1e817e4afece3eee0f6c232c57b..66abe061f07b09397c228d76a177bea184ac2329 100644 (file)
@@ -1741,6 +1741,18 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
 
        entry = __gvt_cache_find_gfn(info->vgpu, gfn);
        if (!entry) {
+               ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
+               if (ret)
+                       goto err_unlock;
+
+               ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+               if (ret)
+                       goto err_unmap;
+       } else if (entry->size != size) {
+               /* the same gfn with different size: unmap and re-map */
+               gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
+               __gvt_cache_remove_entry(vgpu, entry);
+
                ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
                if (ret)
                        goto err_unlock;
index 663a7c9ca3d3042dfef8b373e6fb769ac9c7babb..d0e216d85a22e80896aac3effa29a422741a13f2 100644 (file)
@@ -1276,9 +1276,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
                ((a)->lrca == (b)->lrca))
 
-#define get_last_workload(q) \
-       (list_empty(q) ? NULL : container_of(q->prev, \
-       struct intel_vgpu_workload, list))
 /**
  * intel_vgpu_create_workload - create a vGPU workload
  * @vgpu: a vGPU
@@ -1297,7 +1294,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 {
        struct intel_vgpu_submission *s = &vgpu->submission;
        struct list_head *q = workload_q_head(vgpu, ring_id);
-       struct intel_vgpu_workload *last_workload = get_last_workload(q);
+       struct intel_vgpu_workload *last_workload = NULL;
        struct intel_vgpu_workload *workload = NULL;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        u64 ring_context_gpa;
@@ -1320,15 +1317,20 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
        head &= RB_HEAD_OFF_MASK;
        tail &= RB_TAIL_OFF_MASK;
 
-       if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
-               gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
-               gvt_dbg_el("ctx head %x real head %lx\n", head,
-                               last_workload->rb_tail);
-               /*
-                * cannot use guest context head pointer here,
-                * as it might not be updated at this time
-                */
-               head = last_workload->rb_tail;
+       list_for_each_entry_reverse(last_workload, q, list) {
+
+               if (same_context(&last_workload->ctx_desc, desc)) {
+                       gvt_dbg_el("ring id %d cur workload == last\n",
+                                       ring_id);
+                       gvt_dbg_el("ctx head %x real head %lx\n", head,
+                                       last_workload->rb_tail);
+                       /*
+                        * cannot use guest context head pointer here,
+                        * as it might not be updated at this time
+                        */
+                       head = last_workload->rb_tail;
+                       break;
+               }
        }
 
        gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
index f9ce35da4123ec52657f55f6a704c12c9c286080..e063e98d1e82ec993ce357898952318f38f9c324 100644 (file)
@@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
        if (!IS_GEN5(dev_priv))
                return -ENODEV;
 
+       intel_runtime_pm_get(dev_priv);
+
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
@@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
        seq_printf(m, "GFX power: %ld\n", gfx);
        seq_printf(m, "Total power: %ld\n", chipset + gfx);
 
+       intel_runtime_pm_put(dev_priv);
+
        return 0;
 }
 
index f8cfd16be534cf3eece97c4a59a456e5d8769bde..a4b4ab7b9f8ef323592b9e8f162de009c21673b3 100644 (file)
@@ -1120,6 +1120,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
        pci_set_master(pdev);
 
+       /*
+        * We don't have a max segment size, so set it to the max so sg's
+        * debugging layer doesn't complain
+        */
+       dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
        /* overlay on gen2 is broken and can't address above 1G */
        if (IS_GEN2(dev_priv)) {
                ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
index 03cda197fb6b8c46873b7bfe39b40ac33e45c09f..9372877100420ba7fc9c7129b273b763144e266c 100644 (file)
@@ -1874,20 +1874,28 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
         * pages from.
         */
        if (!obj->base.filp) {
-               i915_gem_object_put(obj);
-               return -ENXIO;
+               addr = -ENXIO;
+               goto err;
+       }
+
+       if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
+               addr = -EINVAL;
+               goto err;
        }
 
        addr = vm_mmap(obj->base.filp, 0, args->size,
                       PROT_READ | PROT_WRITE, MAP_SHARED,
                       args->offset);
+       if (IS_ERR_VALUE(addr))
+               goto err;
+
        if (args->flags & I915_MMAP_WC) {
                struct mm_struct *mm = current->mm;
                struct vm_area_struct *vma;
 
                if (down_write_killable(&mm->mmap_sem)) {
-                       i915_gem_object_put(obj);
-                       return -EINTR;
+                       addr = -EINTR;
+                       goto err;
                }
                vma = find_vma(mm, addr);
                if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
@@ -1896,17 +1904,20 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                else
                        addr = -ENOMEM;
                up_write(&mm->mmap_sem);
+               if (IS_ERR_VALUE(addr))
+                       goto err;
 
                /* This may race, but that's ok, it only gets set */
                WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
        }
        i915_gem_object_put(obj);
-       if (IS_ERR((void *)addr))
-               return addr;
 
        args->addr_ptr = (uint64_t) addr;
-
        return 0;
+
+err:
+       i915_gem_object_put(obj);
+       return addr;
 }
 
 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
@@ -5595,6 +5606,8 @@ err_uc_misc:
                i915_gem_cleanup_userptr(dev_priv);
 
        if (ret == -EIO) {
+               mutex_lock(&dev_priv->drm.struct_mutex);
+
                /*
                 * Allow engine initialisation to fail by marking the GPU as
                 * wedged. But we only want to do this where the GPU is angry,
@@ -5605,7 +5618,14 @@ err_uc_misc:
                                        "Failed to initialize GPU, declaring it wedged!\n");
                        i915_gem_set_wedged(dev_priv);
                }
-               ret = 0;
+
+               /* Minimal basic recovery for KMS */
+               ret = i915_ggtt_enable_hw(dev_priv);
+               i915_gem_restore_gtt_mappings(dev_priv);
+               i915_gem_restore_fences(dev_priv);
+               intel_init_clock_gating(dev_priv);
+
+               mutex_unlock(&dev_priv->drm.struct_mutex);
        }
 
        i915_gem_drain_freed_objects(dev_priv);
@@ -5615,6 +5635,7 @@ err_uc_misc:
 void i915_gem_fini(struct drm_i915_private *dev_priv)
 {
        i915_gem_suspend_late(dev_priv);
+       intel_disable_gt_powersave(dev_priv);
 
        /* Flush any outstanding unpin_work. */
        i915_gem_drain_workqueue(dev_priv);
@@ -5626,6 +5647,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
        i915_gem_contexts_fini(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
+       intel_cleanup_gt_powersave(dev_priv);
+
        intel_uc_fini_misc(dev_priv);
        i915_gem_cleanup_userptr(dev_priv);
 
index 16f5d2d9380149db4c0c18882a79318fbaa836a6..4e070afb2738b741700ffe510951159334a9fcb0 100644 (file)
@@ -6531,7 +6531,7 @@ enum {
 #define   PLANE_CTL_YUV422_UYVY                        (1 << 16)
 #define   PLANE_CTL_YUV422_YVYU                        (2 << 16)
 #define   PLANE_CTL_YUV422_VYUY                        (3 << 16)
-#define   PLANE_CTL_DECOMPRESSION_ENABLE       (1 << 15)
+#define   PLANE_CTL_RENDER_DECOMPRESSION_ENABLE        (1 << 15)
 #define   PLANE_CTL_TRICKLE_FEED_DISABLE       (1 << 14)
 #define   PLANE_CTL_PLANE_GAMMA_DISABLE                (1 << 13) /* Pre-GLK */
 #define   PLANE_CTL_TILED_MASK                 (0x7 << 10)
index 869cf4a3b6de75fee593c0f66c953cc1035434a6..a6cb3e034dd5a72bfb388c7e414ba6005f97e507 100644 (file)
@@ -100,6 +100,9 @@ static struct _balloon_info_ bl_info;
 static void vgt_deballoon_space(struct i915_ggtt *ggtt,
                                struct drm_mm_node *node)
 {
+       if (!drm_mm_node_allocated(node))
+               return;
+
        DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
                         node->start,
                         node->start + node->size,
index 29075c763428055ddb3625a80b59643e694f3d76..7b4906ede148b425b663c894573d1fc613b36f15 100644 (file)
@@ -2208,6 +2208,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
        if (INTEL_GEN(dev_priv) >= 9)
                min_cdclk = max(2 * 96000, min_cdclk);
 
+       /*
+        * "For DP audio configuration, cdclk frequency shall be set to
+        *  meet the following requirements:
+        *  DP Link Frequency(MHz) | Cdclk frequency(MHz)
+        *  270                    | 320 or higher
+        *  162                    | 200 or higher"
+        */
+       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+           intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
+               min_cdclk = max(crtc_state->port_clock, min_cdclk);
+
        /*
         * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
         * than 320000KHz.
index 0ef0c6448d53a835fbdf5319a8010c64d613bd0f..01fa98299bae65a125862e57c307cdbce07c3d32 100644 (file)
@@ -474,7 +474,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
                        u8 eu_disabled_mask;
                        u32 n_disabled;
 
-                       if (!(sseu->subslice_mask[ss] & BIT(ss)))
+                       if (!(sseu->subslice_mask[s] & BIT(ss)))
                                /* skip disabled subslice */
                                continue;
 
index 3bd44d042a1d900d0eee9e7524bc297524af32ce..6902fd2da19ca43eb8337774d79f732a24723716 100644 (file)
@@ -2712,6 +2712,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
        if (size_aligned * 2 > dev_priv->stolen_usable_size)
                return false;
 
+       switch (fb->modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+       case I915_FORMAT_MOD_Y_TILED:
+               break;
+       default:
+               DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
+                                fb->modifier);
+               return false;
+       }
+
        mutex_lock(&dev->struct_mutex);
        obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
                                                             base_aligned,
@@ -2721,8 +2732,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
        if (!obj)
                return false;
 
-       if (plane_config->tiling == I915_TILING_X)
-               obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
+       switch (plane_config->tiling) {
+       case I915_TILING_NONE:
+               break;
+       case I915_TILING_X:
+       case I915_TILING_Y:
+               obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
+               break;
+       default:
+               MISSING_CASE(plane_config->tiling);
+               return false;
+       }
 
        mode_cmd.pixel_format = fb->format->format;
        mode_cmd.width = fb->width;
@@ -3561,11 +3581,11 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
        case I915_FORMAT_MOD_Y_TILED:
                return PLANE_CTL_TILED_Y;
        case I915_FORMAT_MOD_Y_TILED_CCS:
-               return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE;
+               return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
        case I915_FORMAT_MOD_Yf_TILED:
                return PLANE_CTL_TILED_YF;
        case I915_FORMAT_MOD_Yf_TILED_CCS:
-               return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE;
+               return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
        default:
                MISSING_CASE(fb_modifier);
        }
@@ -8812,13 +8832,14 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
                fb->modifier = I915_FORMAT_MOD_X_TILED;
                break;
        case PLANE_CTL_TILED_Y:
-               if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+               plane_config->tiling = I915_TILING_Y;
+               if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
                        fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
                else
                        fb->modifier = I915_FORMAT_MOD_Y_TILED;
                break;
        case PLANE_CTL_TILED_YF:
-               if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+               if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
                        fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
                else
                        fb->modifier = I915_FORMAT_MOD_Yf_TILED;
@@ -15951,8 +15972,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
        flush_work(&dev_priv->atomic_helper.free_work);
        WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
 
-       intel_disable_gt_powersave(dev_priv);
-
        /*
         * Interrupts and polling as the first thing to avoid creating havoc.
         * Too much stuff here (turning of connectors, ...) would
@@ -15980,8 +15999,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
        intel_cleanup_overlay(dev_priv);
 
-       intel_cleanup_gt_powersave(dev_priv);
-
        intel_teardown_gmbus(dev_priv);
 
        destroy_workqueue(dev_priv->modeset_wq);
index f92079e19de8df139be974237b04ddcc784110dc..20cd4c8acecc31d22da45116738a82259971cfa6 100644 (file)
@@ -4739,6 +4739,22 @@ intel_dp_long_pulse(struct intel_connector *connector,
                 */
                status = connector_status_disconnected;
                goto out;
+       } else {
+               /*
+                * If display is now connected check links status,
+                * there has been known issues of link loss triggering
+                * long pulse.
+                *
+                * Some sinks (eg. ASUS PB287Q) seem to perform some
+                * weird HPD ping pong during modesets. So we can apparently
+                * end up with HPD going low during a modeset, and then
+                * going back up soon after. And once that happens we must
+                * retrain the link to get a picture. That's in case no
+                * userspace component reacted to intermittent HPD dip.
+                */
+               struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+               intel_dp_retrain_link(encoder, ctx);
        }
 
        /*
index 1fec0c71b4d95a12baf8acc06c4fc58cbb910d1b..58ba14966d4f1128ec396da03db01370033f6212 100644 (file)
@@ -408,8 +408,6 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
        struct intel_dp *intel_dp = intel_connector->mst_port;
        struct intel_crtc *crtc = to_intel_crtc(state->crtc);
 
-       if (!READ_ONCE(connector->registered))
-               return NULL;
        return &intel_dp->mst_encoders[crtc->pipe]->base.base;
 }
 
index a132a8037ecc6b2a317229918e8d2471cdbcb9dd..77df7903e071e3e74550ee1567fdfa970b44a650 100644 (file)
@@ -413,8 +413,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
        else
                txesc2_div = 10;
 
-       I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
-       I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
+       I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
+       I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
 }
 
 /* Program BXT Mipi clocks and dividers */
index fd83046d8376bd38ca76746d333a0ec1f0fe2828..947bc6d6230205eb141df8b6ea471ca7053dc35c 100644 (file)
@@ -220,6 +220,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
        struct mtk_drm_private *private = drm->dev_private;
        struct platform_device *pdev;
        struct device_node *np;
+       struct device *dma_dev;
        int ret;
 
        if (!iommu_present(&platform_bus_type))
@@ -282,7 +283,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
                goto err_component_unbind;
        }
 
-       private->dma_dev = &pdev->dev;
+       dma_dev = &pdev->dev;
+       private->dma_dev = dma_dev;
+
+       /*
+        * Configure the DMA segment size to make sure we get contiguous IOVA
+        * when importing PRIME buffers.
+        */
+       if (!dma_dev->dma_parms) {
+               private->dma_parms_allocated = true;
+               dma_dev->dma_parms =
+                       devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
+                                    GFP_KERNEL);
+       }
+       if (!dma_dev->dma_parms) {
+               ret = -ENOMEM;
+               goto err_component_unbind;
+       }
+
+       ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
+       if (ret) {
+               dev_err(dma_dev, "Failed to set DMA segment size\n");
+               goto err_unset_dma_parms;
+       }
 
        /*
         * We don't use the drm_irq_install() helpers provided by the DRM
@@ -292,13 +315,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
        drm->irq_enabled = true;
        ret = drm_vblank_init(drm, MAX_CRTC);
        if (ret < 0)
-               goto err_component_unbind;
+               goto err_unset_dma_parms;
 
        drm_kms_helper_poll_init(drm);
        drm_mode_config_reset(drm);
 
        return 0;
 
+err_unset_dma_parms:
+       if (private->dma_parms_allocated)
+               dma_dev->dma_parms = NULL;
 err_component_unbind:
        component_unbind_all(drm->dev, drm);
 err_config_cleanup:
@@ -309,9 +335,14 @@ err_config_cleanup:
 
 static void mtk_drm_kms_deinit(struct drm_device *drm)
 {
+       struct mtk_drm_private *private = drm->dev_private;
+
        drm_kms_helper_poll_fini(drm);
        drm_atomic_helper_shutdown(drm);
 
+       if (private->dma_parms_allocated)
+               private->dma_dev->dma_parms = NULL;
+
        component_unbind_all(drm->dev, drm);
        drm_mode_config_cleanup(drm);
 }
@@ -327,6 +358,18 @@ static const struct file_operations mtk_drm_fops = {
        .compat_ioctl = drm_compat_ioctl,
 };
 
+/*
+ * We need to override this because the device used to import the memory is
+ * not dev->dev, as drm_gem_prime_import() expects.
+ */
+struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
+                                               struct dma_buf *dma_buf)
+{
+       struct mtk_drm_private *private = dev->dev_private;
+
+       return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
+}
+
 static struct drm_driver mtk_drm_driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
                           DRIVER_ATOMIC,
@@ -338,7 +381,7 @@ static struct drm_driver mtk_drm_driver = {
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_export = drm_gem_prime_export,
-       .gem_prime_import = drm_gem_prime_import,
+       .gem_prime_import = mtk_drm_gem_prime_import,
        .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
        .gem_prime_mmap = mtk_drm_gem_mmap_buf,
@@ -523,12 +566,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
                        comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
                        if (!comp) {
                                ret = -ENOMEM;
+                               of_node_put(node);
                                goto err_node;
                        }
 
                        ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
-                       if (ret)
+                       if (ret) {
+                               of_node_put(node);
                                goto err_node;
+                       }
 
                        private->ddp_comp[comp_id] = comp;
                }
index ecc00ca3221daa80f6952f3083f37dc5f1fad0cf..8fa60d46f8605c05203670b35204b5fabd3ddffb 100644 (file)
@@ -59,6 +59,8 @@ struct mtk_drm_private {
        } commit;
 
        struct drm_atomic_state *suspend_state;
+
+       bool dma_parms_allocated;
 };
 
 extern struct platform_driver mtk_ddp_driver;
index 12c80dfcff59bc9cb40d2e4ccbf2dbb98b6af3a5..c7daae53fa1f5f6b541f45da3b68193ba32f897f 100644 (file)
@@ -120,6 +120,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
                priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
                                              OSD_COLOR_MATRIX_32_ARGB;
                break;
+       case DRM_FORMAT_XBGR8888:
+               /* For XRGB, replace the pixel's alpha by 0xFF */
+               writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
+                                   priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+               priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+                                             OSD_COLOR_MATRIX_32_ABGR;
+               break;
        case DRM_FORMAT_ARGB8888:
                /* For ARGB, use the pixel's alpha */
                writel_bits_relaxed(OSD_REPLACE_EN, 0,
@@ -127,6 +134,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
                priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
                                              OSD_COLOR_MATRIX_32_ARGB;
                break;
+       case DRM_FORMAT_ABGR8888:
+               /* For ARGB, use the pixel's alpha */
+               writel_bits_relaxed(OSD_REPLACE_EN, 0,
+                                   priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+               priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+                                             OSD_COLOR_MATRIX_32_ABGR;
+               break;
        case DRM_FORMAT_RGB888:
                priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
                                              OSD_COLOR_MATRIX_24_RGB;
@@ -196,7 +210,9 @@ static const struct drm_plane_funcs meson_plane_funcs = {
 
 static const uint32_t supported_drm_formats[] = {
        DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
        DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
        DRM_FORMAT_RGB888,
        DRM_FORMAT_RGB565,
 };
index 96fb5f63531482fcf688cb4d69b56f9eec2d719c..cc4ea5502d6c3ac57a32728aea7857d823e630bf 100644 (file)
@@ -429,15 +429,15 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
        }
 
        msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
-       if (!msm_host->byte_clk_src) {
-               ret = -ENODEV;
+       if (IS_ERR(msm_host->byte_clk_src)) {
+               ret = PTR_ERR(msm_host->byte_clk_src);
                pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
                goto exit;
        }
 
        msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
-       if (!msm_host->pixel_clk_src) {
-               ret = -ENODEV;
+       if (IS_ERR(msm_host->pixel_clk_src)) {
+               ret = PTR_ERR(msm_host->pixel_clk_src);
                pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
                goto exit;
        }
index c1abad8a8612683a237dfae4fec766c806c9caf8..dbfd2c006f7406ed31614dc3de1b412c8ee37b09 100644 (file)
@@ -1284,7 +1284,8 @@ static int add_gpu_components(struct device *dev,
        if (!np)
                return 0;
 
-       drm_of_component_match_add(dev, matchptr, compare_of, np);
+       if (of_device_is_available(np))
+               drm_of_component_match_add(dev, matchptr, compare_of, np);
 
        of_node_put(np);
 
@@ -1321,16 +1322,24 @@ static int msm_pdev_probe(struct platform_device *pdev)
 
        ret = add_gpu_components(&pdev->dev, &match);
        if (ret)
-               return ret;
+               goto fail;
 
        /* on all devices that I am aware of, iommu's which can map
         * any address the cpu can see are used:
         */
        ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
        if (ret)
-               return ret;
+               goto fail;
+
+       ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
+       if (ret)
+               goto fail;
 
-       return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
+       return 0;
+
+fail:
+       of_platform_depopulate(&pdev->dev);
+       return ret;
 }
 
 static int msm_pdev_remove(struct platform_device *pdev)
index f889d41a281fa6e093e3ac18484139643c9e3aff..10107e551fac35d5c18a1e92402df77c05174acc 100644 (file)
@@ -759,7 +759,8 @@ nv50_msto_enable(struct drm_encoder *encoder)
 
        slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
        r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
-       WARN_ON(!r);
+       if (!r)
+               DRM_DEBUG_KMS("Failed to allocate VCPI\n");
 
        if (!mstm->links++)
                nv50_outp_acquire(mstm->outp);
@@ -1516,7 +1517,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
                        nv_encoder->aux = aux;
                }
 
-               if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
+               if (nv_connector->type != DCB_CONNECTOR_eDP &&
+                   (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
                    ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
                        ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
                                            nv_connector->base.base.id,
index d81a99bb2ac319cbfad9ea72edf60618dd421f3b..b041ffb3af27049c8a4df327b9d51657eefe22e1 100644 (file)
@@ -169,14 +169,34 @@ nv50_head_atomic_check_view(struct nv50_head_atom *armh,
         */
        switch (mode) {
        case DRM_MODE_SCALE_CENTER:
-               asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
-               asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
-               /* fall-through */
+               /* NOTE: This will cause scaling when the input is
+                * larger than the output.
+                */
+               asyh->view.oW = min(asyh->view.iW, asyh->view.oW);
+               asyh->view.oH = min(asyh->view.iH, asyh->view.oH);
+               break;
        case DRM_MODE_SCALE_ASPECT:
-               if (asyh->view.oH < asyh->view.oW) {
+               /* Determine whether the scaling should be on width or on
+                * height. This is done by comparing the aspect ratios of the
+                * sizes. If the output AR is larger than input AR, that means
+                * we want to change the width (letterboxed on the
+                * left/right), otherwise on the height (letterboxed on the
+                * top/bottom).
+                *
+                * E.g. 4:3 (1.333) AR image displayed on a 16:10 (1.6) AR
+                * screen will have letterboxes on the left/right. However a
+                * 16:9 (1.777) AR image on that same screen will have
+                * letterboxes on the top/bottom.
+                *
+                * inputAR = iW / iH; outputAR = oW / oH
+                * outputAR > inputAR is equivalent to oW * iH > iW * oH
+                */
+               if (asyh->view.oW * asyh->view.iH > asyh->view.iW * asyh->view.oH) {
+                       /* Recompute output width, i.e. left/right letterbox */
                        u32 r = (asyh->view.iW << 19) / asyh->view.iH;
                        asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
                } else {
+                       /* Recompute output height, i.e. top/bottom letterbox */
                        u32 r = (asyh->view.iH << 19) / asyh->view.iW;
                        asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
                }
index 247f72cc4d10a4547309effb6b8904e9c0d60d49..fb0094fc55834aa7ba69090a517d38ad2b332001 100644 (file)
@@ -251,7 +251,7 @@ nouveau_conn_reset(struct drm_connector *connector)
                return;
 
        if (connector->state)
-               __drm_atomic_helper_connector_destroy_state(connector->state);
+               nouveau_conn_atomic_destroy_state(connector, connector->state);
        __drm_atomic_helper_connector_reset(connector, &asyc->state);
        asyc->dither.mode = DITHERING_MODE_AUTO;
        asyc->dither.depth = DITHERING_DEPTH_AUTO;
index 7143ea4611aa3ef7d809d1d1b90148a97b1c7c67..33a9fb5ac558577fe7879c2b760d5b5d1beef3aa 100644 (file)
@@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
                info->min     = min(info->base,
                                    info->base + info->step * info->vidmask);
                info->max     = nvbios_rd32(bios, volt + 0x0e);
+               if (!info->max)
+                       info->max = max(info->base, info->base + info->step * info->vidmask);
                break;
        case 0x50:
                info->min     = nvbios_rd32(bios, volt + 0x0a);
index b4e7404fe660e24937bde1e518533feecad376d9..a11637b0f6ccf43cc39c417dc64a8fb349815a22 100644 (file)
@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
                u8 *ptr = msg->buf;
 
                while (remaining) {
-                       u8 cnt = (remaining > 16) ? 16 : remaining;
-                       u8 cmd;
+                       u8 cnt, retries, cmd;
 
                        if (msg->flags & I2C_M_RD)
                                cmd = 1;
@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
                        if (mcnt || remaining > 16)
                                cmd |= 4; /* MOT */
 
-                       ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
-                       if (ret < 0) {
-                               nvkm_i2c_aux_release(aux);
-                               return ret;
+                       for (retries = 0, cnt = 0;
+                            retries < 32 && !cnt;
+                            retries++) {
+                               cnt = min_t(u8, remaining, 16);
+                               ret = aux->func->xfer(aux, true, cmd,
+                                                     msg->addr, ptr, &cnt);
+                               if (ret < 0)
+                                       goto out;
+                       }
+                       if (!cnt) {
+                               AUX_TRACE(aux, "no data after 32 retries");
+                               ret = -EIO;
+                               goto out;
                        }
 
                        ptr += cnt;
@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
                msg++;
        }
 
+       ret = num;
+out:
        nvkm_i2c_aux_release(aux);
-       return num;
+       return ret;
 }
 
 static u32
index ecacb22834d76d24c9ee656748715ca552ed10ae..719345074711144d13b85b7e75ccac2c8eeb0c5b 100644 (file)
@@ -184,6 +184,25 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
        return 0;
 }
 
+static int
+nvkm_i2c_preinit(struct nvkm_subdev *subdev)
+{
+       struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+       struct nvkm_i2c_bus *bus;
+       struct nvkm_i2c_pad *pad;
+
+       /*
+        * We init our i2c busses as early as possible, since they may be
+        * needed by the vbios init scripts on some cards
+        */
+       list_for_each_entry(pad, &i2c->pad, head)
+               nvkm_i2c_pad_init(pad);
+       list_for_each_entry(bus, &i2c->bus, head)
+               nvkm_i2c_bus_init(bus);
+
+       return 0;
+}
+
 static int
 nvkm_i2c_init(struct nvkm_subdev *subdev)
 {
@@ -238,6 +257,7 @@ nvkm_i2c_dtor(struct nvkm_subdev *subdev)
 static const struct nvkm_subdev_func
 nvkm_i2c = {
        .dtor = nvkm_i2c_dtor,
+       .preinit = nvkm_i2c_preinit,
        .init = nvkm_i2c_init,
        .fini = nvkm_i2c_fini,
        .intr = nvkm_i2c_intr,
index 39fe89e5312ea0c322b3ab0f3ce46b1a0694d737..ca12dfe4fec160fdb52e78176271f3c47901610f 100644 (file)
@@ -1101,7 +1101,7 @@ static const struct dss_features omap34xx_dss_feats = {
 
 static const struct dss_features omap3630_dss_feats = {
        .model                  =       DSS_MODEL_OMAP3,
-       .fck_div_max            =       32,
+       .fck_div_max            =       31,
        .fck_freq_max           =       173000000,
        .dss_fck_multiplier     =       1,
        .parent_clk_name        =       "dpll4_ck",
index 813ba42f27539ce94b85afc20295e411f6b4c123..52517a8ab9dee61cb72bab6c4b76e12edb2df141 100644 (file)
@@ -553,8 +553,9 @@ static void hdmi_core_audio_config(struct hdmi_core_data *core,
        }
 
        /* Set ACR clock divisor */
-       REG_FLD_MOD(av_base,
-                       HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
+       if (cfg->use_mclk)
+               REG_FLD_MOD(av_base, HDMI_CORE_AV_FREQ_SVAL,
+                           cfg->mclk_mode, 2, 0);
 
        r = hdmi_read_reg(av_base, HDMI_CORE_AV_ACR_CTRL);
        /*
index f311d581f2a8899b1c3f16a01515cfd6e97e2e5a..54d66a493c352f976bc28993aca520f8801a7b3b 100644 (file)
@@ -283,7 +283,13 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
                }
        }
 
-       min_scale = FRAC_16_16(1, 4);
+       /*
+        * Note: these are just sanity checks to filter out totally bad scaling
+        * factors. The real limits must be calculated case by case, and
+        * unfortunately we currently do those checks only at the commit
+        * phase in dispc.
+        */
+       min_scale = FRAC_16_16(1, 8);
        max_scale = FRAC_16_16(8, 1);
 
        ret = drm_atomic_helper_check_plane_state(state, crtc_state,
index 2c9c9722734f586d27af64d9df5ca156a46a31fd..9a2cb8aeab3a480d047933068210811c9fd140a9 100644 (file)
@@ -400,7 +400,13 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
 
        /* Look up the DSI host.  It needs to probe before we do. */
        endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+       if (!endpoint)
+               return -ENODEV;
+
        dsi_host_node = of_graph_get_remote_port_parent(endpoint);
+       if (!dsi_host_node)
+               goto error;
+
        host = of_find_mipi_dsi_host_by_node(dsi_host_node);
        of_node_put(dsi_host_node);
        if (!host) {
@@ -409,6 +415,9 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
        }
 
        info.node = of_graph_get_remote_port(endpoint);
+       if (!info.node)
+               goto error;
+
        of_node_put(endpoint);
 
        ts->dsi = mipi_dsi_device_register_full(host, &info);
@@ -429,6 +438,10 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
                return ret;
 
        return 0;
+
+error:
+       of_node_put(endpoint);
+       return -ENODEV;
 }
 
 static int rpi_touchscreen_remove(struct i2c_client *i2c)
index a3d5a08971c01969a59caf69a9cbdadd8cd041e7..02ddc58b84d3c0dc9978c9e8ac7b4c2622cadbd8 100644 (file)
@@ -436,6 +436,32 @@ static const struct panel_desc ampire_am800480r3tmqwa1h = {
        .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
+static const struct display_timing santek_st0700i5y_rbslw_f_timing = {
+       .pixelclock = { 26400000, 33300000, 46800000 },
+       .hactive = { 800, 800, 800 },
+       .hfront_porch = { 16, 210, 354 },
+       .hback_porch = { 45, 36, 6 },
+       .hsync_len = { 1, 10, 40 },
+       .vactive = { 480, 480, 480 },
+       .vfront_porch = { 7, 22, 147 },
+       .vback_porch = { 22, 13, 3 },
+       .vsync_len = { 1, 10, 20 },
+       .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+               DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE
+};
+
+static const struct panel_desc armadeus_st0700_adapt = {
+       .timings = &santek_st0700i5y_rbslw_f_timing,
+       .num_timings = 1,
+       .bpc = 6,
+       .size = {
+               .width = 154,
+               .height = 86,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
 static const struct drm_display_mode auo_b101aw03_mode = {
        .clock = 51450,
        .hdisplay = 1024,
@@ -689,9 +715,9 @@ static const struct panel_desc auo_g133han01 = {
 static const struct display_timing auo_g185han01_timings = {
        .pixelclock = { 120000000, 144000000, 175000000 },
        .hactive = { 1920, 1920, 1920 },
-       .hfront_porch = { 18, 60, 74 },
-       .hback_porch = { 12, 44, 54 },
-       .hsync_len = { 10, 24, 32 },
+       .hfront_porch = { 36, 120, 148 },
+       .hback_porch = { 24, 88, 108 },
+       .hsync_len = { 20, 48, 64 },
        .vactive = { 1080, 1080, 1080 },
        .vfront_porch = { 6, 10, 40 },
        .vback_porch = { 2, 5, 20 },
@@ -2410,6 +2436,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "ampire,am800480r3tmqwa1h",
                .data = &ampire_am800480r3tmqwa1h,
+       }, {
+               .compatible = "armadeus,st0700-adapt",
+               .data = &armadeus_st0700_adapt,
        }, {
                .compatible = "auo,b101aw03",
                .data = &auo_b101aw03,
index 414642e5b7a3110353bafb3ad022f0d7fee4e78f..de656f55538392fef2ec2d74365b250ff97418d0 100644 (file)
@@ -751,7 +751,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
 
                radeon_encoder->output_csc = val;
 
-               if (connector->encoder->crtc) {
+               if (connector->encoder && connector->encoder->crtc) {
                        struct drm_crtc *crtc  = connector->encoder->crtc;
                        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 
index 2a7977a23b31cdf84c391ab86d55406ccc74d591..c26f09b47ecb2b1d305de20cbded4332047d69c0 100644 (file)
@@ -340,8 +340,39 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
 static int radeon_pci_probe(struct pci_dev *pdev,
                            const struct pci_device_id *ent)
 {
+       unsigned long flags = 0;
        int ret;
 
+       if (!ent)
+               return -ENODEV; /* Avoid NULL-ptr deref in drm_get_pci_dev */
+
+       flags = ent->driver_data;
+
+       if (!radeon_si_support) {
+               switch (flags & RADEON_FAMILY_MASK) {
+               case CHIP_TAHITI:
+               case CHIP_PITCAIRN:
+               case CHIP_VERDE:
+               case CHIP_OLAND:
+               case CHIP_HAINAN:
+                       dev_info(&pdev->dev,
+                                "SI support disabled by module param\n");
+                       return -ENODEV;
+               }
+       }
+       if (!radeon_cik_support) {
+               switch (flags & RADEON_FAMILY_MASK) {
+               case CHIP_KAVERI:
+               case CHIP_BONAIRE:
+               case CHIP_HAWAII:
+               case CHIP_KABINI:
+               case CHIP_MULLINS:
+                       dev_info(&pdev->dev,
+                                "CIK support disabled by module param\n");
+                       return -ENODEV;
+               }
+       }
+
        if (vga_switcheroo_client_probe_defer(pdev))
                return -EPROBE_DEFER;
 
index 6a8fb6fd183c3fc80a6b557fba5e4751307f9b53..3ff835767ac58fbab6526d7cb4df6ab7b53b217a 100644 (file)
@@ -95,31 +95,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
        struct radeon_device *rdev;
        int r, acpi_status;
 
-       if (!radeon_si_support) {
-               switch (flags & RADEON_FAMILY_MASK) {
-               case CHIP_TAHITI:
-               case CHIP_PITCAIRN:
-               case CHIP_VERDE:
-               case CHIP_OLAND:
-               case CHIP_HAINAN:
-                       dev_info(dev->dev,
-                                "SI support disabled by module param\n");
-                       return -ENODEV;
-               }
-       }
-       if (!radeon_cik_support) {
-               switch (flags & RADEON_FAMILY_MASK) {
-               case CHIP_KAVERI:
-               case CHIP_BONAIRE:
-               case CHIP_HAWAII:
-               case CHIP_KABINI:
-               case CHIP_MULLINS:
-                       dev_info(dev->dev,
-                                "CIK support disabled by module param\n");
-                       return -ENODEV;
-               }
-       }
-
        rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
        if (rdev == NULL) {
                return -ENOMEM;
index 341572184b17f7403bc47ef21a0963d041fa5e6c..0ccc76217ee4e08c31993438da1e7d600d131053 100644 (file)
@@ -27,9 +27,7 @@ config ROCKCHIP_ANALOGIX_DP
 
 config ROCKCHIP_CDN_DP
         bool "Rockchip cdn DP"
-       depends on DRM_ROCKCHIP
-       select EXTCON
-       select DRM_CDNS_MHDP
+       depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
         help
          This selects support for Rockchip SoC specific extensions
          for the cdn DP driver. If you want to enable Dp on
index f2d462779186f9e44f34990d51eb87e47b5a26a1..a314e2109e76ce532b901886579e6f63a9c55b63 100644 (file)
@@ -9,7 +9,7 @@ rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
 rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
 
 rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
-rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o
+rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
 rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
 rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
 rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
index 080f0535219502306774ea799c10cc30793006ca..6a4da3a0ff1c3f8c4fb765d6d03fcf3c0428200c 100644 (file)
@@ -436,7 +436,7 @@ static int rockchip_dp_resume(struct device *dev)
 
 static const struct dev_pm_ops rockchip_dp_pm_ops = {
 #ifdef CONFIG_PM_SLEEP
-       .suspend = rockchip_dp_suspend,
+       .suspend_late = rockchip_dp_suspend,
        .resume_early = rockchip_dp_resume,
 #endif
 };
index a5a668f1912b87d96fda7980045e9598b91e50a4..8ad0d773dc33a63c09a8bc246b4ceb7920e10f94 100644 (file)
 #include <sound/hdmi-codec.h>
 
 #include "cdn-dp-core.h"
+#include "cdn-dp-reg.h"
 #include "rockchip_drm_vop.h"
 
 #define connector_to_dp(c) \
-               container_of(c, struct cdn_dp_device, mhdp.connector.base)
+               container_of(c, struct cdn_dp_device, connector)
 
 #define encoder_to_dp(c) \
                container_of(c, struct cdn_dp_device, encoder)
@@ -69,18 +70,17 @@ MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
 static int cdn_dp_grf_write(struct cdn_dp_device *dp,
                            unsigned int reg, unsigned int val)
 {
-       struct device *dev = dp->mhdp.dev;
        int ret;
 
        ret = clk_prepare_enable(dp->grf_clk);
        if (ret) {
-               DRM_DEV_ERROR(dev, "Failed to prepare_enable grf clock\n");
+               DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
                return ret;
        }
 
        ret = regmap_write(dp->grf, reg, val);
        if (ret) {
-               DRM_DEV_ERROR(dev, "Could not write to GRF: %d\n", ret);
+               DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
                return ret;
        }
 
@@ -91,25 +91,24 @@ static int cdn_dp_grf_write(struct cdn_dp_device *dp,
 
 static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
 {
-       struct device *dev = dp->mhdp.dev;
        int ret;
        unsigned long rate;
 
        ret = clk_prepare_enable(dp->pclk);
        if (ret < 0) {
-               DRM_DEV_ERROR(dev, "cannot enable dp pclk %d\n", ret);
+               DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
                goto err_pclk;
        }
 
        ret = clk_prepare_enable(dp->core_clk);
        if (ret < 0) {
-               DRM_DEV_ERROR(dev, "cannot enable core_clk %d\n", ret);
+               DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
                goto err_core_clk;
        }
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_get_sync(dp->dev);
        if (ret < 0) {
-               DRM_DEV_ERROR(dev, "cannot get pm runtime %d\n", ret);
+               DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
                goto err_pm_runtime_get;
        }
 
@@ -122,18 +121,18 @@ static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
 
        rate = clk_get_rate(dp->core_clk);
        if (!rate) {
-               DRM_DEV_ERROR(dev, "get clk rate failed\n");
+               DRM_DEV_ERROR(dp->dev, "get clk rate failed\n");
                ret = -EINVAL;
                goto err_set_rate;
        }
 
-       cdns_mhdp_set_fw_clk(&dp->mhdp, rate);
-       cdns_mhdp_clock_reset(&dp->mhdp);
+       cdn_dp_set_fw_clk(dp, rate);
+       cdn_dp_clock_reset(dp);
 
        return 0;
 
 err_set_rate:
-       pm_runtime_put(dev);
+       pm_runtime_put(dp->dev);
 err_pm_runtime_get:
        clk_disable_unprepare(dp->core_clk);
 err_core_clk:
@@ -144,7 +143,7 @@ err_pclk:
 
 static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
 {
-       pm_runtime_put_sync(dp->mhdp.dev);
+       pm_runtime_put_sync(dp->dev);
        clk_disable_unprepare(dp->pclk);
        clk_disable_unprepare(dp->core_clk);
 }
@@ -177,7 +176,7 @@ static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
        u8 value;
 
        *sink_count = 0;
-       ret = cdns_mhdp_dpcd_read(&dp->mhdp, DP_SINK_COUNT, &value, 1);
+       ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
        if (ret)
                return ret;
 
@@ -201,13 +200,12 @@ static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
 
 static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
 {
-       struct device *dev = dp->mhdp.dev;
        unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
        struct cdn_dp_port *port;
        u8 sink_count = 0;
 
        if (dp->active_port < 0 || dp->active_port >= dp->ports) {
-               DRM_DEV_ERROR(dev, "active_port is wrong!\n");
+               DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
                return false;
        }
 
@@ -229,7 +227,7 @@ static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
                usleep_range(5000, 10000);
        }
 
-       DRM_DEV_ERROR(dev, "Get sink capability timed out\n");
+       DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
        return false;
 }
 
@@ -271,8 +269,7 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
        mutex_lock(&dp->lock);
        edid = dp->edid;
        if (edid) {
-               DRM_DEV_DEBUG_KMS(dp->mhdp.dev,
-                                 "got edid: width[%d] x height[%d]\n",
+               DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
                                  edid->width_cm, edid->height_cm);
 
                dp->sink_has_audio = drm_detect_monitor_audio(edid);
@@ -290,8 +287,7 @@ static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
                                       struct drm_display_mode *mode)
 {
        struct cdn_dp_device *dp = connector_to_dp(connector);
-       struct drm_display_info *display_info =
-               &dp->mhdp.connector.base.display_info;
+       struct drm_display_info *display_info = &dp->connector.display_info;
        u32 requested, actual, rate, sink_max, source_max = 0;
        u8 lanes, bpc;
 
@@ -317,7 +313,7 @@ static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
        sink_max = drm_dp_max_lane_count(dp->dpcd);
        lanes = min(source_max, sink_max);
 
-       source_max = drm_dp_bw_code_to_link_rate(CDNS_DP_MAX_LINK_RATE);
+       source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
        sink_max = drm_dp_max_link_rate(dp->dpcd);
        rate = min(source_max, sink_max);
 
@@ -327,7 +323,7 @@ static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
        actual = actual * 8 / 10;
 
        if (requested > actual) {
-               DRM_DEV_DEBUG_KMS(dp->mhdp.dev,
+               DRM_DEV_DEBUG_KMS(dp->dev,
                                  "requested=%d, actual=%d, clock=%d\n",
                                  requested, actual, mode->clock);
                return MODE_CLOCK_HIGH;
@@ -347,29 +343,28 @@ static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
        const u32 *iram_data, *dram_data;
        const struct firmware *fw = dp->fw;
        const struct cdn_firmware_header *hdr;
-       struct device *dev = dp->mhdp.dev;
 
        hdr = (struct cdn_firmware_header *)fw->data;
        if (fw->size != le32_to_cpu(hdr->size_bytes)) {
-               DRM_DEV_ERROR(dev, "firmware is invalid\n");
+               DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
                return -EINVAL;
        }
 
        iram_data = (const u32 *)(fw->data + hdr->header_size);
        dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
 
-       ret = cdns_mhdp_load_firmware(&dp->mhdp, iram_data, hdr->iram_size,
-                                     dram_data, hdr->dram_size);
+       ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
+                                  dram_data, hdr->dram_size);
        if (ret)
                return ret;
 
-       ret = cdns_mhdp_set_firmware_active(&dp->mhdp, true);
+       ret = cdn_dp_set_firmware_active(dp, true);
        if (ret) {
-               DRM_DEV_ERROR(dev, "active ucpu failed: %d\n", ret);
+               DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
                return ret;
        }
 
-       return cdns_mhdp_event_config(&dp->mhdp);
+       return cdn_dp_event_config(dp);
 }
 
 static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
@@ -379,29 +374,28 @@ static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
        if (!cdn_dp_check_sink_connection(dp))
                return -ENODEV;
 
-       ret = cdns_mhdp_dpcd_read(&dp->mhdp, DP_DPCD_REV, dp->dpcd,
-                                 DP_RECEIVER_CAP_SIZE);
+       ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
+                              DP_RECEIVER_CAP_SIZE);
        if (ret) {
-               DRM_DEV_ERROR(dp->mhdp.dev, "Failed to get caps %d\n", ret);
+               DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
                return ret;
        }
 
        kfree(dp->edid);
-       dp->edid = drm_do_get_edid(&dp->mhdp.connector.base,
-                                  cdns_mhdp_get_edid_block, &dp->mhdp);
+       dp->edid = drm_do_get_edid(&dp->connector,
+                                  cdn_dp_get_edid_block, dp);
        return 0;
 }
 
 static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
 {
-       struct device *dev = dp->mhdp.dev;
        union extcon_property_value property;
        int ret;
 
        if (!port->phy_enabled) {
                ret = phy_power_on(port->phy);
                if (ret) {
-                       DRM_DEV_ERROR(dev, "phy power on failed: %d\n",
+                       DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
                                      ret);
                        goto err_phy;
                }
@@ -411,28 +405,28 @@ static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
        ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
                               DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
        if (ret) {
-               DRM_DEV_ERROR(dev, "Failed to write HPD_SEL %d\n", ret);
+               DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
                goto err_power_on;
        }
 
-       ret = cdns_mhdp_get_hpd_status(&dp->mhdp);
+       ret = cdn_dp_get_hpd_status(dp);
        if (ret <= 0) {
                if (!ret)
-                       DRM_DEV_ERROR(dev, "hpd does not exist\n");
+                       DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
                goto err_power_on;
        }
 
        ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
                                  EXTCON_PROP_USB_TYPEC_POLARITY, &property);
        if (ret) {
-               DRM_DEV_ERROR(dev, "get property failed\n");
+               DRM_DEV_ERROR(dp->dev, "get property failed\n");
                goto err_power_on;
        }
 
        port->lanes = cdn_dp_get_port_lanes(port);
-       ret = cdns_mhdp_set_host_cap(&dp->mhdp, port->lanes, property.intval);
+       ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
        if (ret) {
-               DRM_DEV_ERROR(dev, "set host capabilities failed: %d\n",
+               DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
                              ret);
                goto err_power_on;
        }
@@ -442,7 +436,7 @@ static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
 
 err_power_on:
        if (phy_power_off(port->phy))
-               DRM_DEV_ERROR(dev, "phy power off failed: %d", ret);
+               DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
        else
                port->phy_enabled = false;
 
@@ -460,8 +454,7 @@ static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
        if (port->phy_enabled) {
                ret = phy_power_off(port->phy);
                if (ret) {
-                       DRM_DEV_ERROR(dp->mhdp.dev,
-                                     "phy power off failed: %d", ret);
+                       DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
                        return ret;
                }
        }
@@ -485,16 +478,16 @@ static int cdn_dp_disable(struct cdn_dp_device *dp)
        ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
                               DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
        if (ret) {
-               DRM_DEV_ERROR(dp->mhdp.dev, "Failed to clear hpd sel %d\n",
+               DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
                              ret);
                return ret;
        }
 
-       cdns_mhdp_set_firmware_active(&dp->mhdp, false);
+       cdn_dp_set_firmware_active(dp, false);
        cdn_dp_clk_disable(dp);
        dp->active = false;
-       dp->mhdp.link.rate = 0;
-       dp->mhdp.link.num_lanes = 0;
+       dp->link.rate = 0;
+       dp->link.num_lanes = 0;
        if (!dp->connected) {
                kfree(dp->edid);
                dp->edid = NULL;
@@ -507,11 +500,11 @@ static int cdn_dp_enable(struct cdn_dp_device *dp)
 {
        int ret, i, lanes;
        struct cdn_dp_port *port;
-       struct device *dev = dp->mhdp.dev;
 
        port = cdn_dp_connected_port(dp);
        if (!port) {
-               DRM_DEV_ERROR(dev, "Can't enable without connection\n");
+               DRM_DEV_ERROR(dp->dev,
+                             "Can't enable without connection\n");
                return -ENODEV;
        }
 
@@ -524,7 +517,7 @@ static int cdn_dp_enable(struct cdn_dp_device *dp)
 
        ret = cdn_dp_firmware_init(dp);
        if (ret) {
-               DRM_DEV_ERROR(dp->mhdp.dev, "firmware init failed: %d", ret);
+               DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
                goto err_clk_disable;
        }
 
@@ -558,9 +551,8 @@ static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
                                    struct drm_display_mode *adjusted)
 {
        struct cdn_dp_device *dp = encoder_to_dp(encoder);
-       struct drm_display_info *display_info =
-               &dp->mhdp.connector.base.display_info;
-       struct video_info *video = &dp->mhdp.video_info;
+       struct drm_display_info *display_info = &dp->connector.display_info;
+       struct video_info *video = &dp->video_info;
 
        switch (display_info->bpc) {
        case 10:
@@ -578,7 +570,7 @@ static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
        video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
        video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
 
-       memcpy(&dp->mhdp.mode, adjusted, sizeof(*mode));
+       memcpy(&dp->mode, adjusted, sizeof(*mode));
 }
 
 static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
@@ -587,11 +579,11 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
        struct cdn_dp_port *port = cdn_dp_connected_port(dp);
        u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
 
-       if (!port || !dp->mhdp.link.rate || !dp->mhdp.link.num_lanes)
+       if (!port || !dp->link.rate || !dp->link.num_lanes)
                return false;
 
-       if (cdns_mhdp_dpcd_read(&dp->mhdp, DP_LANE0_1_STATUS, link_status,
-                               DP_LINK_STATUS_SIZE)) {
+       if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
+                            DP_LINK_STATUS_SIZE)) {
                DRM_ERROR("Failed to get link status\n");
                return false;
        }
@@ -603,16 +595,15 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
 static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
 {
        struct cdn_dp_device *dp = encoder_to_dp(encoder);
-       struct device *dev = dp->mhdp.dev;
        int ret, val;
 
-       ret = drm_of_encoder_active_endpoint_id(dev->of_node, encoder);
+       ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
        if (ret < 0) {
-               DRM_DEV_ERROR(dev, "Could not get vop id, %d", ret);
+               DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
                return;
        }
 
-       DRM_DEV_DEBUG_KMS(dev, "vop %s output to cdn-dp\n",
+       DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
                          (ret) ? "LIT" : "BIG");
        if (ret)
                val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
@@ -627,33 +618,33 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
 
        ret = cdn_dp_enable(dp);
        if (ret) {
-               DRM_DEV_ERROR(dev, "Failed to enable encoder %d\n",
+               DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
                              ret);
                goto out;
        }
        if (!cdn_dp_check_link_status(dp)) {
-               ret = cdns_mhdp_train_link(&dp->mhdp);
+               ret = cdn_dp_train_link(dp);
                if (ret) {
-                       DRM_DEV_ERROR(dev, "Failed link train %d\n", ret);
+                       DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
                        goto out;
                }
        }
 
-       ret = cdns_mhdp_set_video_status(&dp->mhdp, CONTROL_VIDEO_IDLE);
+       ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
        if (ret) {
-               DRM_DEV_ERROR(dev, "Failed to idle video %d\n", ret);
+               DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
                goto out;
        }
 
-       ret = cdns_mhdp_config_video(&dp->mhdp);
+       ret = cdn_dp_config_video(dp);
        if (ret) {
-               DRM_DEV_ERROR(dev, "Failed to config video %d\n", ret);
+               DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
                goto out;
        }
 
-       ret = cdns_mhdp_set_video_status(&dp->mhdp, CONTROL_VIDEO_VALID);
+       ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
        if (ret) {
-               DRM_DEV_ERROR(dev, "Failed to valid video %d\n", ret);
+               DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
                goto out;
        }
 out:
@@ -669,8 +660,7 @@ static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
        if (dp->active) {
                ret = cdn_dp_disable(dp);
                if (ret) {
-                       DRM_DEV_ERROR(dp->mhdp.dev,
-                                     "Failed to disable encoder %d\n",
+                       DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
                                      ret);
                }
        }
@@ -714,7 +704,7 @@ static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
 
 static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
 {
-       struct device *dev = dp->mhdp.dev;
+       struct device *dev = dp->dev;
        struct device_node *np = dev->of_node;
        struct platform_device *pdev = to_platform_device(dev);
        struct resource *res;
@@ -726,10 +716,10 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       dp->mhdp.regs = devm_ioremap_resource(dev, res);
-       if (IS_ERR(dp->mhdp.regs)) {
+       dp->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(dp->regs)) {
                DRM_DEV_ERROR(dev, "ioremap reg failed\n");
-               return PTR_ERR(dp->mhdp.regs);
+               return PTR_ERR(dp->regs);
        }
 
        dp->core_clk = devm_clk_get(dev, "core-clk");
@@ -744,10 +734,10 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
                return PTR_ERR(dp->pclk);
        }
 
-       dp->mhdp.spdif_clk = devm_clk_get(dev, "spdif");
-       if (IS_ERR(dp->mhdp.spdif_clk)) {
+       dp->spdif_clk = devm_clk_get(dev, "spdif");
+       if (IS_ERR(dp->spdif_clk)) {
                DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
-               return PTR_ERR(dp->mhdp.spdif_clk);
+               return PTR_ERR(dp->spdif_clk);
        }
 
        dp->grf_clk = devm_clk_get(dev, "grf");
@@ -756,10 +746,10 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
                return PTR_ERR(dp->grf_clk);
        }
 
-       dp->mhdp.spdif_rst = devm_reset_control_get(dev, "spdif");
-       if (IS_ERR(dp->mhdp.spdif_rst)) {
+       dp->spdif_rst = devm_reset_control_get(dev, "spdif");
+       if (IS_ERR(dp->spdif_rst)) {
                DRM_DEV_ERROR(dev, "no spdif reset control found\n");
-               return PTR_ERR(dp->mhdp.spdif_rst);
+               return PTR_ERR(dp->spdif_rst);
        }
 
        dp->dptx_rst = devm_reset_control_get(dev, "dptx");
@@ -814,9 +804,9 @@ static int cdn_dp_audio_hw_params(struct device *dev,  void *data,
                goto out;
        }
 
-       ret = cdns_mhdp_audio_config(&dp->mhdp, &audio);
+       ret = cdn_dp_audio_config(dp, &audio);
        if (!ret)
-               dp->mhdp.audio_info = audio;
+               dp->audio_info = audio;
 
 out:
        mutex_unlock(&dp->lock);
@@ -832,9 +822,9 @@ static void cdn_dp_audio_shutdown(struct device *dev, void *data)
        if (!dp->active)
                goto out;
 
-       ret = cdns_mhdp_audio_stop(&dp->mhdp, &dp->mhdp.audio_info);
+       ret = cdn_dp_audio_stop(dp, &dp->audio_info);
        if (!ret)
-               dp->mhdp.audio_info.format = AFMT_UNUSED;
+               dp->audio_info.format = AFMT_UNUSED;
 out:
        mutex_unlock(&dp->lock);
 }
@@ -851,7 +841,7 @@ static int cdn_dp_audio_digital_mute(struct device *dev, void *data,
                goto out;
        }
 
-       ret = cdns_mhdp_audio_mute(&dp->mhdp, enable);
+       ret = cdn_dp_audio_mute(dp, enable);
 
 out:
        mutex_unlock(&dp->lock);
@@ -863,8 +853,7 @@ static int cdn_dp_audio_get_eld(struct device *dev, void *data,
 {
        struct cdn_dp_device *dp = dev_get_drvdata(dev);
 
-       memcpy(buf, dp->mhdp.connector.base.eld,
-              min(sizeof(dp->mhdp.connector.base.eld), len));
+       memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
 
        return 0;
 }
@@ -886,11 +875,11 @@ static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
                .max_i2s_channels = 8,
        };
 
-       dp->mhdp.audio_pdev = platform_device_register_data(
-                             dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
-                             &codec_data, sizeof(codec_data));
+       dp->audio_pdev = platform_device_register_data(
+                        dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+                        &codec_data, sizeof(codec_data));
 
-       return PTR_ERR_OR_ZERO(dp->mhdp.audio_pdev);
+       return PTR_ERR_OR_ZERO(dp->audio_pdev);
 }
 
 static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
@@ -898,7 +887,6 @@ static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
        int ret;
        unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
        unsigned long sleep = 1000;
-       struct device *dev = dp->mhdp.dev;
 
        WARN_ON(!mutex_is_locked(&dp->lock));
 
@@ -909,13 +897,13 @@ static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
        mutex_unlock(&dp->lock);
 
        while (time_before(jiffies, timeout)) {
-               ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dev);
+               ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
                if (ret == -ENOENT) {
                        msleep(sleep);
                        sleep *= 2;
                        continue;
                } else if (ret) {
-                       DRM_DEV_ERROR(dev,
+                       DRM_DEV_ERROR(dp->dev,
                                      "failed to request firmware: %d\n", ret);
                        goto out;
                }
@@ -925,7 +913,7 @@ static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
                goto out;
        }
 
-       DRM_DEV_ERROR(dev, "Timed out trying to load firmware\n");
+       DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
        ret = -ETIMEDOUT;
 out:
        mutex_lock(&dp->lock);
@@ -936,9 +924,8 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
 {
        struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
                                                event_work);
-       struct drm_connector *connector = &dp->mhdp.connector.base;
+       struct drm_connector *connector = &dp->connector;
        enum drm_connector_status old_status;
-       struct device *dev = dp->mhdp.dev;
 
        int ret;
 
@@ -955,45 +942,44 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
 
        /* Not connected, notify userspace to disable the block */
        if (!cdn_dp_connected_port(dp)) {
-               DRM_DEV_INFO(dev, "Not connected. Disabling cdn\n");
+               DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
                dp->connected = false;
 
        /* Connected but not enabled, enable the block */
        } else if (!dp->active) {
-               DRM_DEV_INFO(dev, "Connected, not enabled. Enabling cdn\n");
+               DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
                ret = cdn_dp_enable(dp);
                if (ret) {
-                       DRM_DEV_ERROR(dev, "Enable dp failed %d\n", ret);
+                       DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
                        dp->connected = false;
                }
 
        /* Enabled and connected to a dongle without a sink, notify userspace */
        } else if (!cdn_dp_check_sink_connection(dp)) {
-               DRM_DEV_INFO(dev, "Connected without sink. Assert hpd\n");
+               DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
                dp->connected = false;
 
        /* Enabled and connected with a sink, re-train if requested */
        } else if (!cdn_dp_check_link_status(dp)) {
-               unsigned int rate = dp->mhdp.link.rate;
-               unsigned int lanes = dp->mhdp.link.num_lanes;
-               struct drm_display_mode *mode = &dp->mhdp.mode;
+               unsigned int rate = dp->link.rate;
+               unsigned int lanes = dp->link.num_lanes;
+               struct drm_display_mode *mode = &dp->mode;
 
-               DRM_DEV_INFO(dev, "Connected with sink. Re-train link\n");
-               ret = cdns_mhdp_train_link(&dp->mhdp);
+               DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
+               ret = cdn_dp_train_link(dp);
                if (ret) {
                        dp->connected = false;
-                       DRM_DEV_ERROR(dev, "Train link failed %d\n", ret);
+                       DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
                        goto out;
                }
 
                /* If training result is changed, update the video config */
                if (mode->clock &&
-                   (rate != dp->mhdp.link.rate ||
-                    lanes != dp->mhdp.link.num_lanes)) {
-                       ret = cdns_mhdp_config_video(&dp->mhdp);
+                   (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
+                       ret = cdn_dp_config_video(dp);
                        if (ret) {
                                dp->connected = false;
-                               DRM_DEV_ERROR(dev,
+                               DRM_DEV_ERROR(dp->dev,
                                              "Failed to config video %d\n",
                                              ret);
                        }
@@ -1062,7 +1048,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
 
        drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
 
-       connector = &dp->mhdp.connector.base;
+       connector = &dp->connector;
        connector->polled = DRM_CONNECTOR_POLL_HPD;
        connector->dpms = DRM_MODE_DPMS_OFF;
 
@@ -1086,7 +1072,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
                port = dp->port[i];
 
                port->event_nb.notifier_call = cdn_dp_pd_event;
-               ret = devm_extcon_register_notifier(dp->mhdp.dev, port->extcon,
+               ret = devm_extcon_register_notifier(dp->dev, port->extcon,
                                                    EXTCON_DISP_DP,
                                                    &port->event_nb);
                if (ret) {
@@ -1113,7 +1099,7 @@ static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
 {
        struct cdn_dp_device *dp = dev_get_drvdata(dev);
        struct drm_encoder *encoder = &dp->encoder;
-       struct drm_connector *connector = &dp->mhdp.connector.base;
+       struct drm_connector *connector = &dp->connector;
 
        cancel_work_sync(&dp->event_work);
        cdn_dp_encoder_disable(encoder);
@@ -1173,7 +1159,7 @@ static int cdn_dp_probe(struct platform_device *pdev)
        dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
        if (!dp)
                return -ENOMEM;
-       dp->mhdp.dev = dev;
+       dp->dev = dev;
 
        match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
        dp_data = (struct cdn_dp_data *)match->data;
@@ -1217,8 +1203,8 @@ static int cdn_dp_remove(struct platform_device *pdev)
 {
        struct cdn_dp_device *dp = platform_get_drvdata(pdev);
 
-       platform_device_unregister(dp->mhdp.audio_pdev);
-       cdn_dp_suspend(dp->mhdp.dev);
+       platform_device_unregister(dp->audio_pdev);
+       cdn_dp_suspend(dp->dev);
        component_del(&pdev->dev, &cdn_dp_component_ops);
 
        return 0;
@@ -1228,7 +1214,7 @@ static void cdn_dp_shutdown(struct platform_device *pdev)
 {
        struct cdn_dp_device *dp = platform_get_drvdata(pdev);
 
-       cdn_dp_suspend(dp->mhdp.dev);
+       cdn_dp_suspend(dp->dev);
 }
 
 static const struct dev_pm_ops cdn_dp_pm_ops = {
index da66d1e1f2af94be382dabed1d908ea68771d3ff..f57e296401b8906b5bd0fb0fb259cd92d391009c 100644 (file)
@@ -15,7 +15,6 @@
 #ifndef _CDN_DP_CORE_H
 #define _CDN_DP_CORE_H
 
-#include <drm/bridge/cdns-mhdp-common.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
 
 #define MAX_PHY                2
 
+enum audio_format {
+       AFMT_I2S = 0,
+       AFMT_SPDIF = 1,
+       AFMT_UNUSED,
+};
+
+struct audio_info {
+       enum audio_format format;
+       int sample_rate;
+       int channels;
+       int sample_width;
+};
+
+enum vic_pxl_encoding_format {
+       PXL_RGB = 0x1,
+       YCBCR_4_4_4 = 0x2,
+       YCBCR_4_2_2 = 0x4,
+       YCBCR_4_2_0 = 0x8,
+       Y_ONLY = 0x10,
+};
+
+struct video_info {
+       bool h_sync_polarity;
+       bool v_sync_polarity;
+       bool interlaced;
+       int color_depth;
+       enum vic_pxl_encoding_format color_fmt;
+};
+
 struct cdn_firmware_header {
        u32 size_bytes; /* size of the entire header+image(s) in bytes */
        u32 header_size; /* size of just the header in bytes */
@@ -42,9 +70,12 @@ struct cdn_dp_port {
 };
 
 struct cdn_dp_device {
-       struct cdns_mhdp_device mhdp;
+       struct device *dev;
        struct drm_device *drm_dev;
+       struct drm_connector connector;
        struct drm_encoder encoder;
+       struct drm_display_mode mode;
+       struct platform_device *audio_pdev;
        struct work_struct event_work;
        struct edid *edid;
 
@@ -54,15 +85,22 @@ struct cdn_dp_device {
        bool suspended;
 
        const struct firmware *fw;      /* cdn dp firmware */
+       unsigned int fw_version;        /* cdn fw version */
        bool fw_loaded;
 
+       void __iomem *regs;
        struct regmap *grf;
        struct clk *core_clk;
        struct clk *pclk;
+       struct clk *spdif_clk;
        struct clk *grf_clk;
+       struct reset_control *spdif_rst;
        struct reset_control *dptx_rst;
        struct reset_control *apb_rst;
        struct reset_control *core_rst;
+       struct audio_info audio_info;
+       struct video_info video_info;
+       struct drm_dp_link link;
        struct cdn_dp_port *port[MAX_PHY];
        u8 ports;
        u8 lanes;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
new file mode 100644 (file)
index 0000000..6c8b14f
--- /dev/null
@@ -0,0 +1,969 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Chris Zhong <zyw@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#include "cdn-dp-core.h"
+#include "cdn-dp-reg.h"
+
+#define CDN_DP_SPDIF_CLK               200000000
+#define FW_ALIVE_TIMEOUT_US            1000000
+#define MAILBOX_RETRY_US               1000
+#define MAILBOX_TIMEOUT_US             5000000
+#define LINK_TRAINING_RETRY_MS         20
+#define LINK_TRAINING_TIMEOUT_MS       500
+
+void cdn_dp_set_fw_clk(struct cdn_dp_device *dp, unsigned long clk)
+{
+       writel(clk / 1000000, dp->regs + SW_CLK_H);
+}
+
+void cdn_dp_clock_reset(struct cdn_dp_device *dp)
+{
+       u32 val;
+
+       val = DPTX_FRMR_DATA_CLK_RSTN_EN |
+             DPTX_FRMR_DATA_CLK_EN |
+             DPTX_PHY_DATA_RSTN_EN |
+             DPTX_PHY_DATA_CLK_EN |
+             DPTX_PHY_CHAR_RSTN_EN |
+             DPTX_PHY_CHAR_CLK_EN |
+             SOURCE_AUX_SYS_CLK_RSTN_EN |
+             SOURCE_AUX_SYS_CLK_EN |
+             DPTX_SYS_CLK_RSTN_EN |
+             DPTX_SYS_CLK_EN |
+             CFG_DPTX_VIF_CLK_RSTN_EN |
+             CFG_DPTX_VIF_CLK_EN;
+       writel(val, dp->regs + SOURCE_DPTX_CAR);
+
+       val = SOURCE_PHY_RSTN_EN | SOURCE_PHY_CLK_EN;
+       writel(val, dp->regs + SOURCE_PHY_CAR);
+
+       val = SOURCE_PKT_SYS_RSTN_EN |
+             SOURCE_PKT_SYS_CLK_EN |
+             SOURCE_PKT_DATA_RSTN_EN |
+             SOURCE_PKT_DATA_CLK_EN;
+       writel(val, dp->regs + SOURCE_PKT_CAR);
+
+       val = SPDIF_CDR_CLK_RSTN_EN |
+             SPDIF_CDR_CLK_EN |
+             SOURCE_AIF_SYS_RSTN_EN |
+             SOURCE_AIF_SYS_CLK_EN |
+             SOURCE_AIF_CLK_RSTN_EN |
+             SOURCE_AIF_CLK_EN;
+       writel(val, dp->regs + SOURCE_AIF_CAR);
+
+       val = SOURCE_CIPHER_SYSTEM_CLK_RSTN_EN |
+             SOURCE_CIPHER_SYS_CLK_EN |
+             SOURCE_CIPHER_CHAR_CLK_RSTN_EN |
+             SOURCE_CIPHER_CHAR_CLK_EN;
+       writel(val, dp->regs + SOURCE_CIPHER_CAR);
+
+       val = SOURCE_CRYPTO_SYS_CLK_RSTN_EN |
+             SOURCE_CRYPTO_SYS_CLK_EN;
+       writel(val, dp->regs + SOURCE_CRYPTO_CAR);
+
+       /* enable Mailbox and PIF interrupt */
+       writel(0, dp->regs + APB_INT_MASK);
+}
+
+static int cdn_dp_mailbox_read(struct cdn_dp_device *dp)
+{
+       int val, ret;
+
+       ret = readx_poll_timeout(readl, dp->regs + MAILBOX_EMPTY_ADDR,
+                                val, !val, MAILBOX_RETRY_US,
+                                MAILBOX_TIMEOUT_US);
+       if (ret < 0)
+               return ret;
+
+       return readl(dp->regs + MAILBOX0_RD_DATA) & 0xff;
+}
+
+static int cdp_dp_mailbox_write(struct cdn_dp_device *dp, u8 val)
+{
+       int ret, full;
+
+       ret = readx_poll_timeout(readl, dp->regs + MAILBOX_FULL_ADDR,
+                                full, !full, MAILBOX_RETRY_US,
+                                MAILBOX_TIMEOUT_US);
+       if (ret < 0)
+               return ret;
+
+       writel(val, dp->regs + MAILBOX0_WR_DATA);
+
+       return 0;
+}
+
+static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
+                                          u8 module_id, u8 opcode,
+                                          u16 req_size)
+{
+       u32 mbox_size, i;
+       u8 header[4];
+       int ret;
+
+       /* read the header of the message */
+       for (i = 0; i < 4; i++) {
+               ret = cdn_dp_mailbox_read(dp);
+               if (ret < 0)
+                       return ret;
+
+               header[i] = ret;
+       }
+
+       mbox_size = (header[2] << 8) | header[3];
+
+       if (opcode != header[0] || module_id != header[1] ||
+           req_size != mbox_size) {
+               /*
+                * If the message in mailbox is not what we want, we need to
+                * clear the mailbox by reading its contents.
+                */
+               for (i = 0; i < mbox_size; i++)
+                       if (cdn_dp_mailbox_read(dp) < 0)
+                               break;
+
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
+                                      u8 *buff, u16 buff_size)
+{
+       u32 i;
+       int ret;
+
+       for (i = 0; i < buff_size; i++) {
+               ret = cdn_dp_mailbox_read(dp);
+               if (ret < 0)
+                       return ret;
+
+               buff[i] = ret;
+       }
+
+       return 0;
+}
+
+static int cdn_dp_mailbox_send(struct cdn_dp_device *dp, u8 module_id,
+                              u8 opcode, u16 size, u8 *message)
+{
+       u8 header[4];
+       int ret, i;
+
+       header[0] = opcode;
+       header[1] = module_id;
+       header[2] = (size >> 8) & 0xff;
+       header[3] = size & 0xff;
+
+       for (i = 0; i < 4; i++) {
+               ret = cdp_dp_mailbox_write(dp, header[i]);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < size; i++) {
+               ret = cdp_dp_mailbox_write(dp, message[i]);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int cdn_dp_reg_write(struct cdn_dp_device *dp, u16 addr, u32 val)
+{
+       u8 msg[6];
+
+       msg[0] = (addr >> 8) & 0xff;
+       msg[1] = addr & 0xff;
+       msg[2] = (val >> 24) & 0xff;
+       msg[3] = (val >> 16) & 0xff;
+       msg[4] = (val >> 8) & 0xff;
+       msg[5] = val & 0xff;
+       return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_REGISTER,
+                                  sizeof(msg), msg);
+}
+
+static int cdn_dp_reg_write_bit(struct cdn_dp_device *dp, u16 addr,
+                               u8 start_bit, u8 bits_no, u32 val)
+{
+       u8 field[8];
+
+       field[0] = (addr >> 8) & 0xff;
+       field[1] = addr & 0xff;
+       field[2] = start_bit;
+       field[3] = bits_no;
+       field[4] = (val >> 24) & 0xff;
+       field[5] = (val >> 16) & 0xff;
+       field[6] = (val >> 8) & 0xff;
+       field[7] = val & 0xff;
+
+       return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_FIELD,
+                                  sizeof(field), field);
+}
+
+int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len)
+{
+       u8 msg[5], reg[5];
+       int ret;
+
+       msg[0] = (len >> 8) & 0xff;
+       msg[1] = len & 0xff;
+       msg[2] = (addr >> 16) & 0xff;
+       msg[3] = (addr >> 8) & 0xff;
+       msg[4] = addr & 0xff;
+       ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_DPCD,
+                                 sizeof(msg), msg);
+       if (ret)
+               goto err_dpcd_read;
+
+       ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+                                             DPTX_READ_DPCD,
+                                             sizeof(reg) + len);
+       if (ret)
+               goto err_dpcd_read;
+
+       ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
+       if (ret)
+               goto err_dpcd_read;
+
+       ret = cdn_dp_mailbox_read_receive(dp, data, len);
+
+err_dpcd_read:
+       return ret;
+}
+
+int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value)
+{
+       u8 msg[6], reg[5];
+       int ret;
+
+       msg[0] = 0;
+       msg[1] = 1;
+       msg[2] = (addr >> 16) & 0xff;
+       msg[3] = (addr >> 8) & 0xff;
+       msg[4] = addr & 0xff;
+       msg[5] = value;
+       ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_DPCD,
+                                 sizeof(msg), msg);
+       if (ret)
+               goto err_dpcd_write;
+
+       ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+                                             DPTX_WRITE_DPCD, sizeof(reg));
+       if (ret)
+               goto err_dpcd_write;
+
+       ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
+       if (ret)
+               goto err_dpcd_write;
+
+       if (addr != (reg[2] << 16 | reg[3] << 8 | reg[4]))
+               ret = -EINVAL;
+
+err_dpcd_write:
+       if (ret)
+               DRM_DEV_ERROR(dp->dev, "dpcd write failed: %d\n", ret);
+       return ret;
+}
+
+int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem,
+                        u32 i_size, const u32 *d_mem, u32 d_size)
+{
+       u32 reg;
+       int i, ret;
+
+       /* reset ucpu before load firmware*/
+       writel(APB_IRAM_PATH | APB_DRAM_PATH | APB_XT_RESET,
+              dp->regs + APB_CTRL);
+
+       for (i = 0; i < i_size; i += 4)
+               writel(*i_mem++, dp->regs + ADDR_IMEM + i);
+
+       for (i = 0; i < d_size; i += 4)
+               writel(*d_mem++, dp->regs + ADDR_DMEM + i);
+
+       /* un-reset ucpu */
+       writel(0, dp->regs + APB_CTRL);
+
+       /* check the keep alive register to make sure fw working */
+       ret = readx_poll_timeout(readl, dp->regs + KEEP_ALIVE,
+                                reg, reg, 2000, FW_ALIVE_TIMEOUT_US);
+       if (ret < 0) {
+               DRM_DEV_ERROR(dp->dev, "failed to loaded the FW reg = %x\n",
+                             reg);
+               return -EINVAL;
+       }
+
+       reg = readl(dp->regs + VER_L) & 0xff;
+       dp->fw_version = reg;
+       reg = readl(dp->regs + VER_H) & 0xff;
+       dp->fw_version |= reg << 8;
+       reg = readl(dp->regs + VER_LIB_L_ADDR) & 0xff;
+       dp->fw_version |= reg << 16;
+       reg = readl(dp->regs + VER_LIB_H_ADDR) & 0xff;
+       dp->fw_version |= reg << 24;
+
+       DRM_DEV_DEBUG(dp->dev, "firmware version: %x\n", dp->fw_version);
+
+       return 0;
+}
+
+int cdn_dp_set_firmware_active(struct cdn_dp_device *dp, bool enable)
+{
+       u8 msg[5];
+       int ret, i;
+
+       msg[0] = GENERAL_MAIN_CONTROL;
+       msg[1] = MB_MODULE_ID_GENERAL;
+       msg[2] = 0;
+       msg[3] = 1;
+       msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
+
+       for (i = 0; i < sizeof(msg); i++) {
+               ret = cdp_dp_mailbox_write(dp, msg[i]);
+               if (ret)
+                       goto err_set_firmware_active;
+       }
+
+       /* read the firmware state */
+       for (i = 0; i < sizeof(msg); i++)  {
+               ret = cdn_dp_mailbox_read(dp);
+               if (ret < 0)
+                       goto err_set_firmware_active;
+
+               msg[i] = ret;
+       }
+
+       ret = 0;
+
+err_set_firmware_active:
+       if (ret < 0)
+               DRM_DEV_ERROR(dp->dev, "set firmware active failed\n");
+       return ret;
+}
+
+int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip)
+{
+       u8 msg[8];
+       int ret;
+
+       msg[0] = CDN_DP_MAX_LINK_RATE;
+       msg[1] = lanes | SCRAMBLER_EN;
+       msg[2] = VOLTAGE_LEVEL_2;
+       msg[3] = PRE_EMPHASIS_LEVEL_3;
+       msg[4] = PTS1 | PTS2 | PTS3 | PTS4;
+       msg[5] = FAST_LT_NOT_SUPPORT;
+       msg[6] = flip ? LANE_MAPPING_FLIPPED : LANE_MAPPING_NORMAL;
+       msg[7] = ENHANCED;
+
+       ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX,
+                                 DPTX_SET_HOST_CAPABILITIES,
+                                 sizeof(msg), msg);
+       if (ret)
+               goto err_set_host_cap;
+
+       ret = cdn_dp_reg_write(dp, DP_AUX_SWAP_INVERSION_CONTROL,
+                              AUX_HOST_INVERT);
+
+err_set_host_cap:
+       if (ret)
+               DRM_DEV_ERROR(dp->dev, "set host cap failed: %d\n", ret);
+       return ret;
+}
+
+int cdn_dp_event_config(struct cdn_dp_device *dp)
+{
+       u8 msg[5];
+       int ret;
+
+       memset(msg, 0, sizeof(msg));
+
+       msg[0] = DPTX_EVENT_ENABLE_HPD | DPTX_EVENT_ENABLE_TRAINING;
+
+       ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_ENABLE_EVENT,
+                                 sizeof(msg), msg);
+       if (ret)
+               DRM_DEV_ERROR(dp->dev, "set event config failed: %d\n", ret);
+
+       return ret;
+}
+
+u32 cdn_dp_get_event(struct cdn_dp_device *dp)
+{
+       return readl(dp->regs + SW_EVENTS0);
+}
+
+int cdn_dp_get_hpd_status(struct cdn_dp_device *dp)
+{
+       u8 status;
+       int ret;
+
+       ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_HPD_STATE,
+                                 0, NULL);
+       if (ret)
+               goto err_get_hpd;
+
+       ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+                                             DPTX_HPD_STATE, sizeof(status));
+       if (ret)
+               goto err_get_hpd;
+
+       ret = cdn_dp_mailbox_read_receive(dp, &status, sizeof(status));
+       if (ret)
+               goto err_get_hpd;
+
+       return status;
+
+err_get_hpd:
+       DRM_DEV_ERROR(dp->dev, "get hpd status failed: %d\n", ret);
+       return ret;
+}
+
+int cdn_dp_get_edid_block(void *data, u8 *edid,
+                         unsigned int block, size_t length)
+{
+       struct cdn_dp_device *dp = data;
+       u8 msg[2], reg[2], i;
+       int ret;
+
+       for (i = 0; i < 4; i++) {
+               msg[0] = block / 2;
+               msg[1] = block % 2;
+
+               ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_GET_EDID,
+                                         sizeof(msg), msg);
+               if (ret)
+                       continue;
+
+               ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+                                                     DPTX_GET_EDID,
+                                                     sizeof(reg) + length);
+               if (ret)
+                       continue;
+
+               ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
+               if (ret)
+                       continue;
+
+               ret = cdn_dp_mailbox_read_receive(dp, edid, length);
+               if (ret)
+                       continue;
+
+               if (reg[0] == length && reg[1] == block / 2)
+                       break;
+       }
+
+       if (ret)
+               DRM_DEV_ERROR(dp->dev, "get block[%d] edid failed: %d\n", block,
+                             ret);
+
+       return ret;
+}
+
+static int cdn_dp_training_start(struct cdn_dp_device *dp)
+{
+       unsigned long timeout;
+       u8 msg, event[2];
+       int ret;
+
+       msg = LINK_TRAINING_RUN;
+
+       /* start training */
+       ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_TRAINING_CONTROL,
+                                 sizeof(msg), &msg);
+       if (ret)
+               goto err_training_start;
+
+       timeout = jiffies + msecs_to_jiffies(LINK_TRAINING_TIMEOUT_MS);
+       while (time_before(jiffies, timeout)) {
+               msleep(LINK_TRAINING_RETRY_MS);
+               ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX,
+                                         DPTX_READ_EVENT, 0, NULL);
+               if (ret)
+                       goto err_training_start;
+
+               ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+                                                     DPTX_READ_EVENT,
+                                                     sizeof(event));
+               if (ret)
+                       goto err_training_start;
+
+               ret = cdn_dp_mailbox_read_receive(dp, event, sizeof(event));
+               if (ret)
+                       goto err_training_start;
+
+               if (event[1] & EQ_PHASE_FINISHED)
+                       return 0;
+       }
+
+       ret = -ETIMEDOUT;
+
+err_training_start:
+       DRM_DEV_ERROR(dp->dev, "training failed: %d\n", ret);
+       return ret;
+}
+
+static int cdn_dp_get_training_status(struct cdn_dp_device *dp)
+{
+       u8 status[10];
+       int ret;
+
+       ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_LINK_STAT,
+                                 0, NULL);
+       if (ret)
+               goto err_get_training_status;
+
+       ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+                                             DPTX_READ_LINK_STAT,
+                                             sizeof(status));
+       if (ret)
+               goto err_get_training_status;
+
+       ret = cdn_dp_mailbox_read_receive(dp, status, sizeof(status));
+       if (ret)
+               goto err_get_training_status;
+
+       dp->link.rate = status[0];
+       dp->link.num_lanes = status[1];
+
+err_get_training_status:
+       if (ret)
+               DRM_DEV_ERROR(dp->dev, "get training status failed: %d\n", ret);
+       return ret;
+}
+
+int cdn_dp_train_link(struct cdn_dp_device *dp)
+{
+       int ret;
+
+       ret = cdn_dp_training_start(dp);
+       if (ret) {
+               DRM_DEV_ERROR(dp->dev, "Failed to start training %d\n", ret);
+               return ret;
+       }
+
+       ret = cdn_dp_get_training_status(dp);
+       if (ret) {
+               DRM_DEV_ERROR(dp->dev, "Failed to get training stat %d\n", ret);
+               return ret;
+       }
+
+       DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate,
+                         dp->link.num_lanes);
+       return ret;
+}
+
+int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active)
+{
+       u8 msg;
+       int ret;
+
+       msg = !!active;
+
+       ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_SET_VIDEO,
+                                 sizeof(msg), &msg);
+       if (ret)
+               DRM_DEV_ERROR(dp->dev, "set video status failed: %d\n", ret);
+
+       return ret;
+}
+
+static int cdn_dp_get_msa_misc(struct video_info *video,
+                              struct drm_display_mode *mode)
+{
+       u32 msa_misc;
+       u8 val[2] = {0};
+
+       switch (video->color_fmt) {
+       case PXL_RGB:
+       case Y_ONLY:
+               val[0] = 0;
+               break;
+       /* set YUV default color space conversion to BT601 */
+       case YCBCR_4_4_4:
+               val[0] = 6 + BT_601 * 8;
+               break;
+       case YCBCR_4_2_2:
+               val[0] = 5 + BT_601 * 8;
+               break;
+       case YCBCR_4_2_0:
+               val[0] = 5;
+               break;
+       };
+
+       switch (video->color_depth) {
+       case 6:
+               val[1] = 0;
+               break;
+       case 8:
+               val[1] = 1;
+               break;
+       case 10:
+               val[1] = 2;
+               break;
+       case 12:
+               val[1] = 3;
+               break;
+       case 16:
+               val[1] = 4;
+               break;
+       };
+
+       msa_misc = 2 * val[0] + 32 * val[1] +
+                  ((video->color_fmt == Y_ONLY) ? (1 << 14) : 0);
+
+       return msa_misc;
+}
+
+int cdn_dp_config_video(struct cdn_dp_device *dp)
+{
+       struct video_info *video = &dp->video_info;
+       struct drm_display_mode *mode = &dp->mode;
+       u64 symbol;
+       u32 val, link_rate, rem;
+       u8 bit_per_pix, tu_size_reg = TU_SIZE;
+       int ret;
+
+       bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ?
+                     (video->color_depth * 2) : (video->color_depth * 3);
+
+       link_rate = drm_dp_bw_code_to_link_rate(dp->link.rate) / 1000;
+
+       ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE);
+       if (ret)
+               goto err_config_video;
+
+       ret = cdn_dp_reg_write(dp, HSYNC2VSYNC_POL_CTRL, 0);
+       if (ret)
+               goto err_config_video;
+
+       /*
+        * get a best tu_size and valid symbol:
+        * 1. chose Lclk freq(162Mhz, 270Mhz, 540Mhz), set TU to 32
+        * 2. calculate VS(valid symbol) = TU * Pclk * Bpp / (Lclk * Lanes)
+        * 3. if VS > *.85 or VS < *.1 or VS < 2 or TU < VS + 4, then set
+        *    TU += 2 and repeat 2nd step.
+        */
+       do {
+               tu_size_reg += 2;
+               symbol = tu_size_reg * mode->clock * bit_per_pix;
+               do_div(symbol, dp->link.num_lanes * link_rate * 8);
+               rem = do_div(symbol, 1000);
+               if (tu_size_reg > 64) {
+                       ret = -EINVAL;
+                       DRM_DEV_ERROR(dp->dev,
+                                     "tu error, clk:%d, lanes:%d, rate:%d\n",
+                                     mode->clock, dp->link.num_lanes,
+                                     link_rate);
+                       goto err_config_video;
+               }
+       } while ((symbol <= 1) || (tu_size_reg - symbol < 4) ||
+                (rem > 850) || (rem < 100));
+
+       val = symbol + (tu_size_reg << 8);
+       val |= TU_CNT_RST_EN;
+       ret = cdn_dp_reg_write(dp, DP_FRAMER_TU, val);
+       if (ret)
+               goto err_config_video;
+
+       /* set the FIFO Buffer size */
+       val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate;
+       val /= (dp->link.num_lanes * link_rate);
+       val = div_u64(8 * (symbol + 1), bit_per_pix) - val;
+       val += 2;
+       ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val);
+
+       switch (video->color_depth) {
+       case 6:
+               val = BCS_6;
+               break;
+       case 8:
+               val = BCS_8;
+               break;
+       case 10:
+               val = BCS_10;
+               break;
+       case 12:
+               val = BCS_12;
+               break;
+       case 16:
+               val = BCS_16;
+               break;
+       };
+
+       val += video->color_fmt << 8;
+       ret = cdn_dp_reg_write(dp, DP_FRAMER_PXL_REPR, val);
+       if (ret)
+               goto err_config_video;
+
+       val = video->h_sync_polarity ? DP_FRAMER_SP_HSP : 0;
+       val |= video->v_sync_polarity ? DP_FRAMER_SP_VSP : 0;
+       ret = cdn_dp_reg_write(dp, DP_FRAMER_SP, val);
+       if (ret)
+               goto err_config_video;
+
+       val = (mode->hsync_start - mode->hdisplay) << 16;
+       val |= mode->htotal - mode->hsync_end;
+       ret = cdn_dp_reg_write(dp, DP_FRONT_BACK_PORCH, val);
+       if (ret)
+               goto err_config_video;
+
+       val = mode->hdisplay * bit_per_pix / 8;
+       ret = cdn_dp_reg_write(dp, DP_BYTE_COUNT, val);
+       if (ret)
+               goto err_config_video;
+
+       val = mode->htotal | ((mode->htotal - mode->hsync_start) << 16);
+       ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_0, val);
+       if (ret)
+               goto err_config_video;
+
+       val = mode->hsync_end - mode->hsync_start;
+       val |= (mode->hdisplay << 16) | (video->h_sync_polarity << 15);
+       ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_1, val);
+       if (ret)
+               goto err_config_video;
+
+       val = mode->vtotal;
+       val |= (mode->vtotal - mode->vsync_start) << 16;
+       ret = cdn_dp_reg_write(dp, MSA_VERTICAL_0, val);
+       if (ret)
+               goto err_config_video;
+
+       val = mode->vsync_end - mode->vsync_start;
+       val |= (mode->vdisplay << 16) | (video->v_sync_polarity << 15);
+       ret = cdn_dp_reg_write(dp, MSA_VERTICAL_1, val);
+       if (ret)
+               goto err_config_video;
+
+       val = cdn_dp_get_msa_misc(video, mode);
+       ret = cdn_dp_reg_write(dp, MSA_MISC, val);
+       if (ret)
+               goto err_config_video;
+
+       ret = cdn_dp_reg_write(dp, STREAM_CONFIG, 1);
+       if (ret)
+               goto err_config_video;
+
+       val = mode->hsync_end - mode->hsync_start;
+       val |= mode->hdisplay << 16;
+       ret = cdn_dp_reg_write(dp, DP_HORIZONTAL, val);
+       if (ret)
+               goto err_config_video;
+
+       val = mode->vdisplay;
+       val |= (mode->vtotal - mode->vsync_start) << 16;
+       ret = cdn_dp_reg_write(dp, DP_VERTICAL_0, val);
+       if (ret)
+               goto err_config_video;
+
+       val = mode->vtotal;
+       ret = cdn_dp_reg_write(dp, DP_VERTICAL_1, val);
+       if (ret)
+               goto err_config_video;
+
+       ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 2, 1, 0);
+
+err_config_video:
+       if (ret)
+               DRM_DEV_ERROR(dp->dev, "config video failed: %d\n", ret);
+       return ret;
+}
+
+int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
+{
+       int ret;
+
+       ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0);
+       if (ret) {
+               DRM_DEV_ERROR(dp->dev, "audio stop failed: %d\n", ret);
+               return ret;
+       }
+
+       writel(0, dp->regs + SPDIF_CTRL_ADDR);
+
+       /* clearn the audio config and reset */
+       writel(0, dp->regs + AUDIO_SRC_CNTL);
+       writel(0, dp->regs + AUDIO_SRC_CNFG);
+       writel(AUDIO_SW_RST, dp->regs + AUDIO_SRC_CNTL);
+       writel(0, dp->regs + AUDIO_SRC_CNTL);
+
+       /* reset smpl2pckt component  */
+       writel(0, dp->regs + SMPL2PKT_CNTL);
+       writel(AUDIO_SW_RST, dp->regs + SMPL2PKT_CNTL);
+       writel(0, dp->regs + SMPL2PKT_CNTL);
+
+       /* reset FIFO */
+       writel(AUDIO_SW_RST, dp->regs + FIFO_CNTL);
+       writel(0, dp->regs + FIFO_CNTL);
+
+       if (audio->format == AFMT_SPDIF)
+               clk_disable_unprepare(dp->spdif_clk);
+
+       return 0;
+}
+
+int cdn_dp_audio_mute(struct cdn_dp_device *dp, bool enable)
+{
+       int ret;
+
+       ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 4, 1, enable);
+       if (ret)
+               DRM_DEV_ERROR(dp->dev, "audio mute failed: %d\n", ret);
+
+       return ret;
+}
+
+static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp,
+                                   struct audio_info *audio)
+{
+       int sub_pckt_num = 1, i2s_port_en_val = 0xf, i;
+       u32 val;
+
+       if (audio->channels == 2) {
+               if (dp->link.num_lanes == 1)
+                       sub_pckt_num = 2;
+               else
+                       sub_pckt_num = 4;
+
+               i2s_port_en_val = 1;
+       } else if (audio->channels == 4) {
+               i2s_port_en_val = 3;
+       }
+
+       writel(0x0, dp->regs + SPDIF_CTRL_ADDR);
+
+       writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
+
+       val = MAX_NUM_CH(audio->channels);
+       val |= NUM_OF_I2S_PORTS(audio->channels);
+       val |= AUDIO_TYPE_LPCM;
+       val |= CFG_SUB_PCKT_NUM(sub_pckt_num);
+       writel(val, dp->regs + SMPL2PKT_CNFG);
+
+       if (audio->sample_width == 16)
+               val = 0;
+       else if (audio->sample_width == 24)
+               val = 1 << 9;
+       else
+               val = 2 << 9;
+
+       val |= AUDIO_CH_NUM(audio->channels);
+       val |= I2S_DEC_PORT_EN(i2s_port_en_val);
+       val |= TRANS_SMPL_WIDTH_32;
+       writel(val, dp->regs + AUDIO_SRC_CNFG);
+
+       for (i = 0; i < (audio->channels + 1) / 2; i++) {
+               if (audio->sample_width == 16)
+                       val = (0x02 << 8) | (0x02 << 20);
+               else if (audio->sample_width == 24)
+                       val = (0x0b << 8) | (0x0b << 20);
+
+               val |= ((2 * i) << 4) | ((2 * i + 1) << 16);
+               writel(val, dp->regs + STTS_BIT_CH(i));
+       }
+
+       switch (audio->sample_rate) {
+       case 32000:
+               val = SAMPLING_FREQ(3) |
+                     ORIGINAL_SAMP_FREQ(0xc);
+               break;
+       case 44100:
+               val = SAMPLING_FREQ(0) |
+                     ORIGINAL_SAMP_FREQ(0xf);
+               break;
+       case 48000:
+               val = SAMPLING_FREQ(2) |
+                     ORIGINAL_SAMP_FREQ(0xd);
+               break;
+       case 88200:
+               val = SAMPLING_FREQ(8) |
+                     ORIGINAL_SAMP_FREQ(0x7);
+               break;
+       case 96000:
+               val = SAMPLING_FREQ(0xa) |
+                     ORIGINAL_SAMP_FREQ(5);
+               break;
+       case 176400:
+               val = SAMPLING_FREQ(0xc) |
+                     ORIGINAL_SAMP_FREQ(3);
+               break;
+       case 192000:
+               val = SAMPLING_FREQ(0xe) |
+                     ORIGINAL_SAMP_FREQ(1);
+               break;
+       }
+       val |= 4;
+       writel(val, dp->regs + COM_CH_STTS_BITS);
+
+       writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
+       writel(I2S_DEC_START, dp->regs + AUDIO_SRC_CNTL);
+}
+
+static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
+{
+       u32 val;
+
+       writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
+
+       val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
+       writel(val, dp->regs + SMPL2PKT_CNFG);
+       writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
+
+       val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
+       writel(val, dp->regs + SPDIF_CTRL_ADDR);
+
+       clk_prepare_enable(dp->spdif_clk);
+       clk_set_rate(dp->spdif_clk, CDN_DP_SPDIF_CLK);
+}
+
+int cdn_dp_audio_config(struct cdn_dp_device *dp, struct audio_info *audio)
+{
+       int ret;
+
+       /* reset the spdif clk before config */
+       if (audio->format == AFMT_SPDIF) {
+               reset_control_assert(dp->spdif_rst);
+               reset_control_deassert(dp->spdif_rst);
+       }
+
+       ret = cdn_dp_reg_write(dp, CM_LANE_CTRL, LANE_REF_CYC);
+       if (ret)
+               goto err_audio_config;
+
+       ret = cdn_dp_reg_write(dp, CM_CTRL, 0);
+       if (ret)
+               goto err_audio_config;
+
+       if (audio->format == AFMT_I2S)
+               cdn_dp_audio_config_i2s(dp, audio);
+       else if (audio->format == AFMT_SPDIF)
+               cdn_dp_audio_config_spdif(dp);
+
+       ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, AUDIO_PACK_EN);
+
+err_audio_config:
+       if (ret)
+               DRM_DEV_ERROR(dp->dev, "audio config failed: %d\n", ret);
+       return ret;
+}
similarity index 77%
rename from include/drm/bridge/cdns-mhdp-common.h
rename to drivers/gpu/drm/rockchip/cdn-dp-reg.h
index 74dda5593c2874b906021938466e31b87aa17581..c4bbb4a833197f366d822fc87d3b5b9d928a4308 100644 (file)
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  * Author: Chris Zhong <zyw@rock-chips.com>
  * GNU General Public License for more details.
  */
 
-#ifndef CDNS_MHDP_COMMON_H_
-#define CDNS_MHDP_COMMON_H_
-
-#include <drm/drm_bridge.h>
-#include <drm/drm_connector.h>
-#include <drm/drm_dp_helper.h>
+#ifndef _CDN_DP_REG_H
+#define _CDN_DP_REG_H
 
 #include <linux/bitops.h>
 
 #define MB_MODULE_ID_GENERAL           0x0a
 
 /* general opcode */
-#define GENERAL_MAIN_CONTROL           0x01
-#define GENERAL_TEST_ECHO              0x02
-#define GENERAL_BUS_SETTINGS           0x03
-#define GENERAL_TEST_ACCESS            0x04
-#define GENERAL_REGISTER_READ          0x07
+#define GENERAL_MAIN_CONTROL            0x01
+#define GENERAL_TEST_ECHO               0x02
+#define GENERAL_BUS_SETTINGS            0x03
+#define GENERAL_TEST_ACCESS             0x04
 
 #define DPTX_SET_POWER_MNG                     0x00
 #define DPTX_SET_HOST_CAPABILITIES             0x01
 #define DPTX_SET_LINK_BREAK_POINT              0x0f
 #define DPTX_FORCE_LANES                       0x10
 #define DPTX_HPD_STATE                         0x11
-#define DPTX_ADJUST_LT                         0x12
 
 #define FW_STANDBY                             0
 #define FW_ACTIVE                              1
 #define HDCP_TX_IS_RECEIVER_ID_VALID_EVENT     BIT(7)
 
 #define TU_SIZE                                        30
-#define CDNS_DP_MAX_LINK_RATE                  DP_LINK_BW_5_4
+#define CDN_DP_MAX_LINK_RATE                   DP_LINK_BW_5_4
 
 /* audio */
 #define AUDIO_PACK_EN                          BIT(8)
@@ -466,139 +459,24 @@ enum vic_bt_type {
        BT_709 = 0x1,
 };
 
-enum audio_format {
-       AFMT_I2S = 0,
-       AFMT_SPDIF = 1,
-       AFMT_UNUSED,
-};
-
-struct audio_info {
-       enum audio_format format;
-       int sample_rate;
-       int channels;
-       int sample_width;
-};
-
-enum vic_pxl_encoding_format {
-       PXL_RGB = 0x1,
-       YCBCR_4_4_4 = 0x2,
-       YCBCR_4_2_2 = 0x4,
-       YCBCR_4_2_0 = 0x8,
-       Y_ONLY = 0x10,
-};
-
-struct video_info {
-       bool h_sync_polarity;
-       bool v_sync_polarity;
-       bool interlaced;
-       int color_depth;
-       enum vic_pxl_encoding_format color_fmt;
-};
-
-struct cdns_mhdp_host {
-       unsigned int    link_rate;
-       u8      lanes_cnt;
-       u8      volt_swing;
-       u8      pre_emphasis;
-       u8      pattern_supp;
-       u8      lane_mapping;
-       u8      fast_link : 1;
-       u8      enhanced : 1;
-       u8      scrambler : 1;
-       u8      ssc : 1;
-};
-
-struct cdns_mhdp_sink {
-       unsigned int    link_rate;
-       u8      lanes_cnt;
-       u8      pattern_supp;
-       u8      fast_link;
-       u8      enhanced;
-};
-
-struct cdns_mhdp_display_fmt {
-       u32 color_format;
-       u32 bpc;
-       bool y_only;
-};
-
-struct cdns_mhdp_bridge;
-struct cdns_mhdp_connector;
-
-struct cdns_mhdp_bridge {
-       struct cdns_mhdp_device *mhdp;
-       struct drm_bridge base;
-       int pbn;
-       int8_t stream_id;
-       struct cdns_mhdp_connector *connector;
-       bool is_active;
-};
-
-
-struct cdns_mhdp_connector {
-       struct drm_connector base;
-       struct cdns_mhdp_bridge *bridge;
-};
-
-
-struct cdns_mhdp_device {
-       void __iomem            *regs;
-
-       struct device           *dev;
-       struct clk              *clk;
-
-       struct drm_dp_link      link;
-       struct cdns_mhdp_connector  connector;
-       struct clk              *spdif_clk;
-       struct reset_control    *spdif_rst;
-
-       struct platform_device  *audio_pdev;
-       struct audio_info       audio_info;
-
-       struct drm_dp_aux       aux;
-       struct cdns_mhdp_host   host;
-       struct cdns_mhdp_sink   sink;
-       struct cdns_mhdp_display_fmt display_fmt;
-       struct cdns_mhdp_bridge bridge;
-       struct phy              *phy;
-
-       struct video_info       video_info;
-       struct drm_display_mode mode;
-       unsigned int            fw_version;
-
-       bool link_up;
-       bool plugged;
-
-       void __iomem            *j721e_regs;
-};
-
-void cdns_mhdp_clock_reset(struct cdns_mhdp_device *mhdp);
-void cdns_mhdp_set_fw_clk(struct cdns_mhdp_device *mhdp, unsigned long clk);
-int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp, const u32 *i_mem,
-                           u32 i_size, const u32 *d_mem, u32 d_size);
-int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable);
-int cdns_mhdp_set_host_cap(struct cdns_mhdp_device *mhdp, u8 lanes, bool flip);
-int cdns_mhdp_event_config(struct cdns_mhdp_device *mhdp);
-u32 cdns_mhdp_get_event(struct cdns_mhdp_device *mhdp);
-int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp);
-int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value);
-int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
-                       u32 addr, u8 *data, u16 len);
-int cdns_mhdp_get_edid_block(void *mhdp, u8 *edid,
-                            unsigned int block, size_t length);
-int cdns_mhdp_train_link(struct cdns_mhdp_device *mhdp);
-int cdns_mhdp_set_video_status(struct cdns_mhdp_device *mhdp, int active);
-int cdns_mhdp_config_video(struct cdns_mhdp_device *mhdp);
-int cdns_mhdp_audio_stop(struct cdns_mhdp_device *mhdp,
-                        struct audio_info *audio);
-int cdns_mhdp_audio_mute(struct cdns_mhdp_device *mhdp, bool enable);
-int cdns_mhdp_audio_config(struct cdns_mhdp_device *mhdp,
-                          struct audio_info *audio);
-int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value);
-int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val);
-int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
-                           u8 start_bit, u8 bits_no, u32 val);
-int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, u8 nlanes,
-                       u16 udelay, u8 *lanes_data,
-                       u8 *dpcd);
-#endif /* CDNS_MHDP_COMMON_H_ */
+void cdn_dp_clock_reset(struct cdn_dp_device *dp);
+
+void cdn_dp_set_fw_clk(struct cdn_dp_device *dp, unsigned long clk);
+int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem,
+                        u32 i_size, const u32 *d_mem, u32 d_size);
+int cdn_dp_set_firmware_active(struct cdn_dp_device *dp, bool enable);
+int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip);
+int cdn_dp_event_config(struct cdn_dp_device *dp);
+u32 cdn_dp_get_event(struct cdn_dp_device *dp);
+int cdn_dp_get_hpd_status(struct cdn_dp_device *dp);
+int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value);
+int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len);
+int cdn_dp_get_edid_block(void *dp, u8 *edid,
+                         unsigned int block, size_t length);
+int cdn_dp_train_link(struct cdn_dp_device *dp);
+int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active);
+int cdn_dp_config_video(struct cdn_dp_device *dp);
+int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio);
+int cdn_dp_audio_mute(struct cdn_dp_device *dp, bool enable);
+int cdn_dp_audio_config(struct cdn_dp_device *dp, struct audio_info *audio);
+#endif /* _CDN_DP_REG_H */
index f8f9ae6622eb539783be0c8a02ef3b616bdea1b3..873624a11ce8805c702e3544eae227af957edb27 100644 (file)
@@ -880,7 +880,8 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
        struct vop *vop = to_vop(crtc);
 
        adjusted_mode->clock =
-               clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
+               DIV_ROUND_UP(clk_round_rate(vop->dclk, mode->clock * 1000),
+                            1000);
 
        return true;
 }
index 808d9fb627e97ab07562c17183ade0508abfe0b7..477d0a27b9a5d7a0a4a4ee919e4515845015d6f0 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_of.h>
 #include <drm/drm_bridge.h>
 #include <drm/drm_plane_helper.h>
@@ -825,6 +826,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
 };
 
 static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
+       .prepare_fb = drm_gem_fb_prepare_fb,
        .atomic_check = ltdc_plane_atomic_check,
        .atomic_update = ltdc_plane_atomic_update,
        .atomic_disable = ltdc_plane_atomic_disable,
index daf01189cf28f76a0d43b19122dfc169c26c369e..832806963f6c5874d3dd9db1055dad874ddefa06 100644 (file)
@@ -574,8 +574,7 @@ static enum drm_mode_status dispc6_vp_mode_valid(struct dispc_device *dispc,
                return MODE_BAD_HVALUE;
 
        if (vsw < 1 || vsw > 256 ||
-           vfp < 0 || vfp > 4095 ||
-           vbp < 0 || vbp > 4095)
+           vfp > 4095 || vbp > 4095)
                return MODE_BAD_VVALUE;
 
        if (dispc->memory_bandwidth_limit) {
@@ -743,26 +742,6 @@ static const struct dispc6_vid_fir_coefs dispc6_fir_coefs_null = {
        .c0 = { 512, 512, 512, 512, 512, 512, 512, 512, 256,  },
 };
 
-/* M=8, Upscale x >= 1 */
-static const struct dispc6_vid_fir_coefs dispc6_fir_coefs_m8 = {
-       .c2 = { 0, -4, -8, -16, -24, -32, -40, -48, 0, 2, 4, 6, 8, 6, 4, 2,  },
-       .c1 = { 0, 28, 56, 94, 132, 176, 220, 266, -56, -60, -64, -62, -60, -50, -40, -20,  },
-       .c0 = { 512, 506, 500, 478, 456, 424, 392, 352, 312,  },
-};
-
-/* 5-tap, M=22, Downscale Ratio 2.5 < x < 3 */
-static const struct dispc6_vid_fir_coefs dispc6_fir_coefs_m22_5tap = {
-       .c2 = { 16, 20, 24, 30, 36, 42, 48, 56, 0, 0, 0, 2, 4, 8, 12, 14,  },
-       .c1 = { 132, 140, 148, 156, 164, 172, 180, 186, 64, 72, 80, 88, 96, 104, 112, 122,  },
-       .c0 = { 216, 216, 216, 214, 212, 208, 204, 198, 192,  },
-};
-
-/* 3-tap, M=22, Downscale Ratio 2.5 < x < 3 */
-static const struct dispc6_vid_fir_coefs dispc6_fir_coefs_m22_3tap = {
-       .c1 = { 100, 118, 136, 156, 176, 196, 216, 236, 0, 10, 20, 30, 40, 54, 68, 84,  },
-       .c0 = { 312, 310, 308, 302, 296, 286, 276, 266, 256,  },
-};
-
 enum dispc6_vid_fir_coef_set {
        DISPC6_VID_FIR_COEF_HORIZ,
        DISPC6_VID_FIR_COEF_HORIZ_UV,
index e9463d1a490b49d1cee6a07ce42c85a61d23e561..fecb377105a64fcb1bd2615b793d5de09de67040 100644 (file)
 #include "tidss_scale_coefs.h"
 #include "tidss_dispc7.h"
 
+static const char *dispc7_plane_name(struct dispc_device *dispc, u32 hw_plane);
+
 static const struct dispc7_features dispc7_am6_feats = {
-       .min_pclk = 1000,
-       .max_pclk = 200000000,
+       .max_pclk_kHz = {
+               [DISPC7_VP_DPI] = 165000,
+               [DISPC7_VP_OLDI] = 165000,
+       },
 
        .num_commons = 1,
        .common_name = { "common" },
@@ -86,18 +90,19 @@ static const struct dispc7_features dispc7_am6_feats = {
 };
 
 static const struct dispc7_features dispc7_j721e_feats = {
-       .min_pclk = 1000,
-       .max_pclk = 600000000,
+       .max_pclk_kHz = {
+               [DISPC7_VP_DPI] = 170000,
+               [DISPC7_VP_INTERNAL] = 600000,
+       },
 
        .num_commons = 4,
        .common_name = { "common_m", "common_s0", "common_s1", "common_s2" },
        .common_cfg = { true, false, false, false },
 
-       /* XXX: Scaling features are copied from AM6 and should be checked */
        .scaling = {
-               .in_width_max_5tap_rgb = 1280,
-               .in_width_max_3tap_rgb = 2560,
-               .in_width_max_5tap_yuv = 2560,
+               .in_width_max_5tap_rgb = 2048,
+               .in_width_max_3tap_rgb = 4096,
+               .in_width_max_5tap_yuv = 4096,
                .in_width_max_3tap_yuv = 4096,
                .upscale_limit = 16,
                .downscale_limit_5tap = 4,
@@ -118,8 +123,9 @@ static const struct dispc7_features dispc7_j721e_feats = {
        .vp_name = { "vp1", "vp2", "vp3", "vp4" },
        .ovr_name = { "ovr1", "ovr2", "ovr3", "ovr4" },
        .vpclk_name = { "vp1", "vp2", "vp3", "vp4" },
-       .vp_bus_type =  { DISPC7_VP_DPI, DISPC7_VP_DPI,
-                         DISPC7_VP_DPI, DISPC7_VP_DPI, },
+       /* Currently hard coded VP routing (see dispc7_initial_config()) */
+       .vp_bus_type =  { DISPC7_VP_INTERNAL, DISPC7_VP_DPI,
+                         DISPC7_VP_INTERNAL, DISPC7_VP_DPI, },
        .vp_feat = { .color = {
                        .has_ctm = true,
                        .gamma_size = 1024,
@@ -708,25 +714,25 @@ static void dispc7_write_irqenable(struct dispc_device *dispc, u64 mask)
        dispc7_intr_read(dispc, DISPC_IRQENABLE_SET);
 }
 
-enum dispc7_oldi_mode { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
+enum dispc7_oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
 
 struct dispc7_bus_format {
        u32 bus_fmt;
        u32 data_width;
-       enum dispc7_vp_bus_type bus_type;
-       enum dispc7_oldi_mode oldi_mode;
+       bool is_oldi_fmt;
+       enum dispc7_oldi_mode_reg_val oldi_mode_reg_val;
 };
 
 static const struct dispc7_bus_format dispc7_bus_formats[] = {
-       { MEDIA_BUS_FMT_RGB444_1X12,            12, DISPC7_VP_DPI, 0 },
-       { MEDIA_BUS_FMT_RGB565_1X16,            16, DISPC7_VP_DPI, 0 },
-       { MEDIA_BUS_FMT_RGB666_1X18,            18, DISPC7_VP_DPI, 0 },
-       { MEDIA_BUS_FMT_RGB888_1X24,            24, DISPC7_VP_DPI, 0 },
-       { MEDIA_BUS_FMT_RGB101010_1X30,         30, DISPC7_VP_DPI, 0 },
-       { MEDIA_BUS_FMT_RGB121212_1X36,         36, DISPC7_VP_DPI, 0 },
-       { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,      18, DISPC7_VP_OLDI, SPWG_18 },
-       { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,      24, DISPC7_VP_OLDI, SPWG_24 },
-       { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,     24, DISPC7_VP_OLDI, JEIDA_24 },
+       { MEDIA_BUS_FMT_RGB444_1X12,            12, false, 0 },
+       { MEDIA_BUS_FMT_RGB565_1X16,            16, false, 0 },
+       { MEDIA_BUS_FMT_RGB666_1X18,            18, false, 0 },
+       { MEDIA_BUS_FMT_RGB888_1X24,            24, false, 0 },
+       { MEDIA_BUS_FMT_RGB101010_1X30,         30, false, 0 },
+       { MEDIA_BUS_FMT_RGB121212_1X36,         36, false, 0 },
+       { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,      18, true, SPWG_18 },
+       { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,      24, true, SPWG_24 },
+       { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,     24, true, JEIDA_24 },
 };
 
 static const
@@ -809,7 +815,7 @@ static void dispc7_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
 
        oldi_cfg |= BIT(7); /* DEPOL */
 
-       oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode, 3, 1);
+       oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode_reg_val, 3, 1);
 
        oldi_cfg |= BIT(12); /* SOFTRST */
 
@@ -838,9 +844,6 @@ static void dispc7_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
        if (WARN_ON(!fmt))
                return;
 
-       if (WARN_ON(dispc->feat->vp_bus_type[hw_videoport] != fmt->bus_type))
-               return;
-
        if (dispc->feat->vp_bus_type[hw_videoport] == DISPC7_VP_OLDI) {
                dispc7_oldi_tx_power(dispc, true);
 
@@ -1003,11 +1006,17 @@ static enum drm_mode_status dispc7_vp_mode_valid(struct dispc_device *dispc,
                                                 const struct drm_display_mode *mode)
 {
        u32 hsw, hfp, hbp, vsw, vfp, vbp;
+       enum dispc7_vp_bus_type bus_type;
+       int max_pclk;
+
+       bus_type = dispc->feat->vp_bus_type[hw_videoport];
+
+       max_pclk = dispc->feat->max_pclk_kHz[bus_type];
 
-       if (mode->clock * 1000 < dispc->feat->min_pclk)
-               return MODE_CLOCK_LOW;
+       if (WARN_ON(max_pclk == 0))
+               return MODE_BAD;
 
-       if (mode->clock * 1000 > dispc->feat->max_pclk)
+       if (mode->clock max_pclk)
                return MODE_CLOCK_HIGH;
 
        if (mode->hdisplay > 4096)
@@ -1020,6 +1029,17 @@ static enum drm_mode_status dispc7_vp_mode_valid(struct dispc_device *dispc,
        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
                return MODE_NO_INTERLACE;
 
+       /*
+        * Enforce the output width is divisible by 2. Actually this
+        * is only needed in following cases:
+        * - YUV output selected (BT656, BT1120)
+        * - Dithering enabled
+        * - TDM with TDMCycleFormat == 3
+        * But for simplicity we enforce that always.
+        */
+       if ((mode->hdisplay % 2) != 0)
+               return MODE_BAD_HVALUE;
+
        hfp = mode->hsync_start - mode->hdisplay;
        hsw = mode->hsync_end - mode->hsync_start;
        hbp = mode->htotal - mode->hsync_end;
@@ -1034,8 +1054,7 @@ static enum drm_mode_status dispc7_vp_mode_valid(struct dispc_device *dispc,
                return MODE_BAD_HVALUE;
 
        if (vsw < 1 || vsw > 256 ||
-           vfp < 0 || vfp > 4095 ||
-           vbp < 0 || vbp > 4095)
+           vfp > 4095 || vbp > 4095)
                return MODE_BAD_VVALUE;
 
        return MODE_OK;
@@ -1064,10 +1083,10 @@ static int dispc7_vp_check(struct dispc_device *dispc, u32 hw_videoport,
                return -EINVAL;
        }
 
-       if (dispc->feat->vp_bus_type[hw_videoport] != fmt->bus_type) {
-               dev_dbg(dispc->dev, "%s: %s is not %s-port\n",
-                       __func__, dispc->feat->vp_name[hw_videoport],
-                       fmt->bus_type == DISPC7_VP_OLDI ? "OLDI" : "DPI");
+       if (dispc->feat->vp_bus_type[hw_videoport] != DISPC7_VP_OLDI &&
+           fmt->is_oldi_fmt) {
+               dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n",
+                       __func__, dispc->feat->vp_name[hw_videoport]);
                return -EINVAL;
        }
 
@@ -1297,7 +1316,7 @@ static void dispc7_wb_write_csc(struct dispc_device *dispc,
 }
 
 /* YUV -> RGB, ITU-R BT.601, full range */
-const static struct dispc7_csc_coef csc_yuv2rgb_bt601_full = {
+static const struct dispc7_csc_coef csc_yuv2rgb_bt601_full = {
        dispc7_csc_yuv2rgb_regval,
        { 256,   0,  358,       /* ry, rcb, rcr |1.000  0.000  1.402|*/
          256, -88, -182,       /* gy, gcb, gcr |1.000 -0.344 -0.714|*/
@@ -1309,7 +1328,7 @@ const static struct dispc7_csc_coef csc_yuv2rgb_bt601_full = {
 };
 
 /* YUV -> RGB, ITU-R BT.601, limited range */
-const static struct dispc7_csc_coef csc_yuv2rgb_bt601_lim = {
+static const struct dispc7_csc_coef csc_yuv2rgb_bt601_lim = {
        dispc7_csc_yuv2rgb_regval,
        { 298,    0,  409,      /* ry, rcb, rcr |1.164  0.000  1.596|*/
          298, -100, -208,      /* gy, gcb, gcr |1.164 -0.392 -0.813|*/
@@ -1321,7 +1340,7 @@ const static struct dispc7_csc_coef csc_yuv2rgb_bt601_lim = {
 };
 
 /* YUV -> RGB, ITU-R BT.709, full range */
-const static struct dispc7_csc_coef csc_yuv2rgb_bt709_full = {
+static const struct dispc7_csc_coef csc_yuv2rgb_bt709_full = {
        dispc7_csc_yuv2rgb_regval,
        { 256,    0,  402,      /* ry, rcb, rcr |1.000  0.000  1.570|*/
          256,  -48, -120,      /* gy, gcb, gcr |1.000 -0.187 -0.467|*/
@@ -1333,7 +1352,7 @@ const static struct dispc7_csc_coef csc_yuv2rgb_bt709_full = {
 };
 
 /* YUV -> RGB, ITU-R BT.709, limited range */
-const static struct dispc7_csc_coef csc_yuv2rgb_bt709_lim = {
+static const struct dispc7_csc_coef csc_yuv2rgb_bt709_lim = {
        dispc7_csc_yuv2rgb_regval,
        { 298,    0,  459,      /* ry, rcb, rcr |1.164  0.000  1.793|*/
          298,  -55, -136,      /* gy, gcb, gcr |1.164 -0.213 -0.533|*/
@@ -1345,7 +1364,7 @@ const static struct dispc7_csc_coef csc_yuv2rgb_bt709_lim = {
 };
 
 /* RGB -> YUV, ITU-R BT.601, full range */
-const static struct dispc7_csc_coef csc_rgb2yuv_bt601_full = {
+static const struct dispc7_csc_coef csc_rgb2yuv_bt601_full = {
        dispc7_csc_rgb2yuv_regval,
        { 77,  150,  29,        /* yr,   yg,  yb | 0.299  0.587  0.114|*/
         -43,  -85, 128,        /* cbr, cbg, cbb |-0.173 -0.339  0.511|*/
@@ -1357,7 +1376,7 @@ const static struct dispc7_csc_coef csc_rgb2yuv_bt601_full = {
 };
 
 /* RGB -> YUV, ITU-R BT.601, limited range */
-const static struct dispc7_csc_coef csc_rgb2yuv_bt601_lim = {
+static const struct dispc7_csc_coef csc_rgb2yuv_bt601_lim = {
        dispc7_csc_rgb2yuv_regval,
        { 66,  129,  25,        /* yr,   yg,  yb | 0.257  0.504  0.098|*/
         -38,  -74, 112,        /* cbr, cbg, cbb |-0.148 -0.291  0.439|*/
@@ -1369,7 +1388,7 @@ const static struct dispc7_csc_coef csc_rgb2yuv_bt601_lim = {
 };
 
 /* RGB -> YUV, ITU-R BT.709, full range */
-const static struct dispc7_csc_coef csc_rgb2yuv_bt709_full = {
+static const struct dispc7_csc_coef csc_rgb2yuv_bt709_full = {
        dispc7_csc_rgb2yuv_regval,
        { 54,  183,  18,        /* yr,   yg,  yb | 0.1826  0.6142  0.0620|*/
         -30, -101, 131,        /* cbr, cbg, cbb |-0.1006 -0.3386  0.4392|*/
@@ -1381,7 +1400,7 @@ const static struct dispc7_csc_coef csc_rgb2yuv_bt709_full = {
 };
 
 /* RGB -> YUV, ITU-R BT.709, limited range */
-const static struct dispc7_csc_coef csc_rgb2yuv_bt709_lim = {
+static const struct dispc7_csc_coef csc_rgb2yuv_bt709_lim = {
        dispc7_csc_rgb2yuv_regval,
        { 47,  157,   16,       /* yr,   yg,  yb | 0.1826  0.6142  0.0620|*/
         -26,  -87,  112,       /* cbr, cbg, cbb |-0.1006 -0.3386  0.4392|*/
@@ -1398,7 +1417,7 @@ struct dispc7_csc_entry {
        const struct dispc7_csc_coef *csc;
 };
 
-const static struct dispc7_csc_entry dispc7_yuv2rgb_table[] = {
+static const struct dispc7_csc_entry dispc7_yuv2rgb_table[] = {
        { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_FULL_RANGE,
          &csc_yuv2rgb_bt601_full, },
        { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE,
@@ -1409,7 +1428,7 @@ const static struct dispc7_csc_entry dispc7_yuv2rgb_table[] = {
          &csc_yuv2rgb_bt709_lim, },
 };
 
-const static struct dispc7_csc_entry dispc7_rgb2yuv_table[] = {
+static const struct dispc7_csc_entry dispc7_rgb2yuv_table[] = {
        { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_FULL_RANGE,
          &csc_rgb2yuv_bt601_full, },
        { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE,
@@ -1450,7 +1469,7 @@ struct dispc7_csc_coef *dispc7_find_csc(enum dispc7_csc_direction direction,
 static void dispc7_vid_csc_setup(struct dispc_device *dispc, u32 hw_plane,
                                 const struct drm_plane_state *state)
 {
-       const static struct dispc7_csc_coef *coef;
+       static const struct dispc7_csc_coef *coef;
 
        coef = dispc7_find_csc(DISPC7_YUV2RGB, state->color_encoding,
                               state->color_range);
@@ -1472,7 +1491,7 @@ static void dispc7_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane,
 static void dispc7_wb_csc_setup(struct dispc_device *dispc,
                                const struct drm_plane_state *state)
 {
-       const static struct dispc7_csc_coef *coef;
+       static const struct dispc7_csc_coef *coef;
 
        coef = dispc7_find_csc(DISPC7_RGB2YUV, state->color_encoding,
                               state->color_range);
@@ -2399,13 +2418,9 @@ static bool dispc7_has_writeback(struct dispc_device *dispc)
        return dispc->wb_managed;
 }
 
-static u32 dispc7_vid_get_fifo_size(struct dispc_device *dispc,
-                                   u32 hw_plane)
+static u32 dispc7_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)
 {
-       const u32 unit_size = 16;       /* 128-bits */
-
-       return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0) *
-              unit_size;
+       return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0);
 }
 
 static void dispc7_vid_set_mflag_threshold(struct dispc_device *dispc,
@@ -2415,31 +2430,16 @@ static void dispc7_vid_set_mflag_threshold(struct dispc_device *dispc,
                         FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
 }
 
-static void dispc7_vid_mflag_setup(struct dispc_device *dispc,
-                                  u32 hw_plane)
+static void dispc7_vid_set_buf_threshold(struct dispc_device *dispc,
+                                        u32 hw_plane, u32 low, u32 high)
 {
-       const u32 unit_size = 16;       /* 128-bits */
-       u32 size = dispc7_vid_get_fifo_size(dispc, hw_plane);
-       u32 low, high;
-
-       /*
-        * Simulation team suggests below thesholds:
-        * HT = fifosize * 5 / 8;
-        * LT = fifosize * 4 / 8;
-        */
-
-       low = size * 4 / 8 / unit_size;
-       high = size * 5 / 8 / unit_size;
-
-       dispc7_vid_set_mflag_threshold(dispc, hw_plane, low, high);
+       dispc7_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD,
+                        FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
 }
 
 static u32 dispc7_wb_get_fifo_size(struct dispc_device *dispc)
 {
-       const u32 unit_size = 16;       /* 128-bits */
-
-       return WB_REG_GET(dispc, DISPC_VID_BUF_SIZE_STATUS, 15, 0) *
-              unit_size;
+       return WB_REG_GET(dispc, DISPC_VID_BUF_SIZE_STATUS, 15, 0);
 }
 
 static void dispc7_wb_set_mflag_threshold(struct dispc_device *dispc,
@@ -2449,55 +2449,85 @@ static void dispc7_wb_set_mflag_threshold(struct dispc_device *dispc,
                        FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
 }
 
-static void dispc7_wb_mflag_setup(struct dispc_device *dispc)
+static void dispc7_wb_set_buf_threshold(struct dispc_device *dispc,
+                                        u32 low, u32 high)
 {
-       const u32 unit_size = 16;       /* 128-bits */
-       u32 size = dispc7_wb_get_fifo_size(dispc);
-       u32 low, high;
+       dispc7_wb_write(dispc, DISPC_WB_BUF_THRESHOLD,
+                       FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
 
-       /*
-        * Simulation team suggests below thesholds:
-        * HT = fifosize * 5 / 8;
-        * LT = fifosize * 4 / 8;
-        */
+static void dispc7_plane_init(struct dispc_device *dispc)
+{
+       unsigned int hw_plane;
+
+       dev_dbg(dispc->dev, "%s()\n", __func__);
 
-       low = size * 4 / 8 / unit_size;
-       high = size * 5 / 8 / unit_size;
+       if (dispc->has_cfg_common) {
+               u32 cba_lo_pri = 1;
+               u32 cba_hi_pri = 0;
 
-       dispc7_wb_set_mflag_threshold(dispc, low, high);
-}
+               CFG_REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0);
+               CFG_REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3);
 
-static void dispc7_mflag_setup(struct dispc_device *dispc)
-{
-       unsigned int i;
+               /* MFLAG_CTRL = ENABLED */
+               CFG_REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+               /* MFLAG_START = MFLAGNORMALSTARTMODE */
+               CFG_REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+       }
 
-       if (!dispc->has_cfg_common)
-               goto no_cfg;
+       dispc_for_each_managed_plane(dispc, hw_plane) {
+               u32 size = dispc7_vid_get_fifo_size(dispc, hw_plane);
+               u32 thr_low, thr_high;
+               u32 mflag_low, mflag_high;
+               u32 preload;
 
-       /* MFLAG_CTRL = ENABLED */
-       CFG_REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
-       /* MFLAG_START = MFLAGNORMALSTARTMODE */
-       CFG_REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+               thr_high = size - 1;
+               thr_low = size / 2;
 
-no_cfg:
-       dispc_for_each_managed_plane(dispc, i)
-               dispc7_vid_mflag_setup(dispc, i);
+               mflag_high = size * 2 / 3;
+               mflag_low = size / 3;
 
-       if (dispc7_has_writeback(dispc))
-               dispc7_wb_mflag_setup(dispc);
-}
+               preload = thr_low;
 
-static void dispc7_plane_init(struct dispc_device *dispc)
-{
-       unsigned int i;
+               dev_dbg(dispc->dev,
+                       "%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
+                       dispc7_plane_name(dispc, hw_plane),
+                       size,
+                       thr_high, thr_low,
+                       mflag_high, mflag_low,
+                       preload);
 
-       dev_dbg(dispc->dev, "%s()\n", __func__);
+               dispc7_vid_set_buf_threshold(dispc, hw_plane,
+                                            thr_low, thr_high);
+               dispc7_vid_set_mflag_threshold(dispc, hw_plane,
+                                              mflag_low, mflag_high);
 
-       /* FIFO underflows when scaling if preload is not high enough */
-       dispc_for_each_managed_plane(dispc, i)
-               if (!dispc->feat->vid_lite[i])
-                       VID_REG_FLD_MOD(dispc, i, DISPC_VID_PRELOAD,
-                                       0x7FF, 11, 0);
+               dispc7_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload);
+
+               /* Prefech up to PRELOAD value */
+               VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0, 19, 19);
+       }
+
+       if (dispc7_has_writeback(dispc)) {
+               u32 size = dispc7_wb_get_fifo_size(dispc);
+               u32 thr_low, thr_high;
+               u32 mflag_low, mflag_high;
+
+               thr_high = size - 1;
+               thr_low = size / 2;
+
+               mflag_high = size * 2 / 3;
+               mflag_low = size / 3;
+
+               dev_dbg(dispc->dev,
+                       "wb: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u\n",
+                       size,
+                       thr_high, thr_low,
+                       mflag_high, mflag_low);
+
+               dispc7_wb_set_buf_threshold(dispc, thr_low, thr_high);
+               dispc7_wb_set_mflag_threshold(dispc, mflag_low, mflag_high);
+       }
 }
 
 static void dispc7_vp_init(struct dispc_device *dispc)
@@ -2513,7 +2543,6 @@ static void dispc7_vp_init(struct dispc_device *dispc)
 
 static void dispc7_initial_config(struct dispc_device *dispc)
 {
-       dispc7_mflag_setup(dispc);
        dispc7_plane_init(dispc);
        dispc7_vp_init(dispc);
 
@@ -2800,10 +2829,9 @@ no_cfg:
        return 0;
 }
 
-static int dispc7_wb_find_free_ovr(struct dispc_device *dispc)
+static void dispc7_wb_find_free_ovr(struct dispc_device *dispc)
 {
        struct tidss_device *tidss = dispc->tidss;
-       struct device *dev = tidss->dev;
        int i, j;
        bool found;
        u32 ovr_id = 0xff;
@@ -2826,18 +2854,10 @@ static int dispc7_wb_find_free_ovr(struct dispc_device *dispc)
                }
        }
 
-       if (ovr_id != 0xff) {
+       if (ovr_id != 0xff)
                dispc->wb_reserved_ovr = ovr_id;
-
-               dev_info(dev, "%s: found ovr %s (%d)\n", __func__,
-                        tidss->dispc_ops->vp_name(tidss->dispc, ovr_id), ovr_id);
-
-               return 0;
-       }
-
-       dispc->wb_managed = false;
-       dev_warn(dev, "%s: No OVR available for WB, disabling WB.\n", __func__);
-       return -1;
+       else
+               dispc->wb_managed = false;
 }
 
 static u32 dispc7_wb_get_reserved_ovr(struct dispc_device *dispc)
@@ -2906,6 +2926,8 @@ static int dispc7_modeset_init(struct dispc_device *dispc)
                                conn_type = DRM_MODE_CONNECTOR_DPI;
                                break;
                        default:
+                               dev_warn(dev, "%s: Bad vp bus type: %d\n",
+                                        __func__, dispc->feat->vp_bus_type[i]);
                                conn_type = DRM_MODE_CONNECTOR_Unknown;
                                break;
                        }
index 7ac3092acfe28627064cd946cf05f64e86c4b6b7..da78176f422f285a07d9a8f0fe9e6cb9df99bf44 100644 (file)
@@ -27,8 +27,10 @@ struct dispc7_errata {
 };
 
 enum dispc7_vp_bus_type {
-       DISPC7_VP_DPI,
-       DISPC7_VP_OLDI,
+       DISPC7_VP_DPI,          /* DPI output */
+       DISPC7_VP_OLDI,         /* OLDI (LVDS) output */
+       DISPC7_VP_INTERNAL,     /* SoC internal routing */
+       DISPC7_VP_MAX_BUS_TYPE,
 };
 
 enum dispc7_dss_subrevision {
@@ -37,9 +39,7 @@ enum dispc7_dss_subrevision {
 };
 
 struct dispc7_features {
-       /* XXX should these come from the .dts? Min pclk is not feature of DSS IP */
-       unsigned long min_pclk;
-       unsigned long max_pclk;
+       int max_pclk_kHz[DISPC7_VP_MAX_BUS_TYPE];
 
        u32 num_commons;
        const char *common_name[DISPC7_MAX_COMMONS];
index 0a85ee6a2efb5e4867eb577b14767e5d66e4bd9b..7f1e4c3c5cc81e3d5e1463ab72bb565b13aafddf 100644 (file)
@@ -273,7 +273,7 @@ err_poll_fini:
        drm_irq_uninstall(ddev);
 
 err_modeset_cleanup:
-       drm_mode_config_cleanup(ddev);
+       tidss_modeset_cleanup(tidss);
 
 err_runtime_suspend:
 #ifndef CONFIG_PM
@@ -314,7 +314,7 @@ static int tidss_remove(struct platform_device *pdev)
 
        drm_irq_uninstall(ddev);
 
-       drm_mode_config_cleanup(ddev);
+       tidss_modeset_cleanup(tidss);
 
 #ifndef CONFIG_PM
        /* If we don't have PM, we need to call suspend manually */
index 5330dd6843f3d7ed417e5e68c978e358ded123a6..6a852fdf019f79973c4e274770add6bbad9a2e32 100644 (file)
@@ -68,11 +68,6 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
        .atomic_commit = drm_atomic_helper_commit,
 };
 
-static int tidss_modeset_init_properties(struct tidss_device *tidss)
-{
-       return 0;
-}
-
 static struct drm_crtc *tidss_v_modeset_init_v_crtc(struct tidss_device *tidss, struct rpmsg_remotedev_display_disp *vp)
 {
        struct drm_device *dev = tidss->ddev;
@@ -128,7 +123,7 @@ connector_fail:
        return NULL;
 }
 
-struct rpmsg_remotedev_display_cb tidss_rdev_cb  = {
+static const struct rpmsg_remotedev_display_cb tidss_rdev_cb  = {
        .commit_done = v_crtc_commit_done,
        .buffer_done = v_crtc_buffer_done,
 };
@@ -151,13 +146,9 @@ int tidss_modeset_init(struct tidss_device *tidss)
        ddev->mode_config.funcs = &mode_config_funcs;
        ddev->mode_config.helper_private = &mode_config_helper_funcs;
 
-       ret = tidss_modeset_init_properties(tidss);
-       if (ret < 0)
-               return ret;
-
        ret = tidss->dispc_ops->modeset_init(tidss->dispc);
        if (ret)
-               return ret;
+               goto err_mode_config_cleanup;
 
        if (tidss->rdev) {
                tidss->rdev->device.display.ops->get_res_info(tidss->rdev, &tidss->rres);
@@ -167,14 +158,16 @@ int tidss_modeset_init(struct tidss_device *tidss)
 
                for (i = 0; i < tidss->rres.num_disps; i++) {
                        tidss->v_crtcs[i] = tidss_v_modeset_init_v_crtc(tidss, &tidss->rres.disps[i]);
-                       if (!tidss->v_crtcs[i])
-                               return -ENOMEM;
+                       if (!tidss->v_crtcs[i]) {
+                               ret = -ENOMEM;
+                               goto err_mode_config_cleanup;
+                       }
                }
        }
 
        ret = drm_vblank_init(ddev, tidss->num_crtcs + tidss->num_v_crtcs);
        if (ret)
-               return ret;
+               goto err_mode_config_cleanup;
 
        /* Start with vertical blanking interrupt reporting disabled. */
        for (i = 0; i < tidss->num_crtcs; ++i)
@@ -188,4 +181,15 @@ int tidss_modeset_init(struct tidss_device *tidss)
        dev_dbg(tidss->dev, "%s done\n", __func__);
 
        return 0;
+
+err_mode_config_cleanup:
+       drm_mode_config_cleanup(ddev);
+       return ret;
+}
+
+void tidss_modeset_cleanup(struct tidss_device *tidss)
+{
+       struct drm_device *ddev = tidss->ddev;
+
+       drm_mode_config_cleanup(ddev);
 }
index 99aaff099f22980a1c384fab3fcfe01ca097caf4..dda5625d01283d034870c1b13643d03251e08b7f 100644 (file)
@@ -10,5 +10,6 @@
 struct tidss_device;
 
 int tidss_modeset_init(struct tidss_device *tidss);
+void tidss_modeset_cleanup(struct tidss_device *tidss);
 
 #endif
index d9aee5ba9be9b39fd08db48a67b82d8d79e31e28..771c2d3c99b2ff59e35d34c3d807d44348bb3c89 100644 (file)
@@ -248,9 +248,6 @@ struct drm_plane *tidss_plane_reserve_wb(struct drm_device *dev)
        int i;
        u32 ovr_id = tidss->dispc_ops->wb_get_reserved_ovr(tidss->dispc);
 
-       dev_dbg(dev->dev, "%s: found ovr %s (%d)\n", __func__,
-               tidss->dispc_ops->vp_name(tidss->dispc, ovr_id), ovr_id);
-
        for (i = tidss->num_planes - 1; i >= 0; --i) {
                struct drm_plane *plane = tidss->planes[i];
                struct tidss_plane *tplane = to_tidss_plane(plane);
index 2cc5f6760afdb2b54ff39492b5a5fdc1df3c07fa..81a87a037ba377bf67bad7c694b7c090ea13060d 100644 (file)
@@ -121,13 +121,6 @@ static const struct tidss_scale_coefs coef3_M8 = {
        .c0 = { 512, 502, 492, 462, 432, 390, 348, 174, 256, },
 };
 
-/* Nearest neigbor coefficients for testing */
-static const struct tidss_scale_coefs coefs_null = {
-       .c2 = { 0 },
-       .c1 = { 0 },
-       .c0 = { 512, 512, 512, 512, 512, 512, 512, 512, 256,  },
-};
-
 const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev,
                                                      u32 firinc,
                                                      bool five_taps)
index 138f0de368674ed080edec17f2197572b8350c48..a7711e2ba3f3e82d5113df5d3b8ba2da82f36f76 100644 (file)
@@ -69,7 +69,7 @@ static void v_crtc_finish_page_flip(struct drm_crtc *crtc)
        drm_crtc_vblank_put(crtc);
 }
 
-void v_crtc_vblank_irq(struct drm_crtc *crtc)
+static void v_crtc_vblank_irq(struct drm_crtc *crtc)
 {
        struct v_crtc *v_crtc = to_v_crtc(crtc);
 
index 62ca19539f1bbb7c0e735291d1a046e26de27a4d..ef0301ffb5f7a46da31712220af7f0a78f8f9b9c 100644 (file)
@@ -172,7 +172,7 @@ static void v_plane_destroy(struct drm_plane *plane)
        kfree(v_plane);
 }
 
-struct drm_plane_state *
+static struct drm_plane_state *
 v_plane_atomic_duplicate_state(struct drm_plane *plane)
 {
        struct v_plane_state *v_state;
@@ -193,7 +193,7 @@ v_plane_atomic_duplicate_state(struct drm_plane *plane)
        return state;
 }
 
-void v_plane_atomic_destroy_state(struct drm_plane *plane,
+static void v_plane_atomic_destroy_state(struct drm_plane *plane,
                                           struct drm_plane_state *state)
 {
        struct v_plane_state *v_state = to_v_plane_state(state);
index 332d81aed80c4b96266bfb9aa9c91942b3cbf866..8a0ad7b6080d781559e344f2fe7bfca44c10b361 100644 (file)
@@ -147,12 +147,11 @@ int tidss_wb_init(struct drm_device *drmdev)
        ret = tidss_wbm2m_init(wdev);
        if (ret) {
                log_err(wdev, "Failed to initialize wb m2m\n");
+               return ret;
        }
 
        log_dbg(wdev, "WB loaded\n");
        return 0;
-
-       return ret;
 }
 
 void tidss_wb_cleanup(struct drm_device *drmdev)
index 306d7d059412d353beb119a664aabb92ff4c40f2..ed044256bdd354099cd14fbc51063e7971a24635 100644 (file)
@@ -74,7 +74,7 @@ enum tidss_wb_mode {
        TIDSS_WB_CAPTURE_MGR = 3
 };
 
-enum wb_state {
+enum tidss_wb_state {
        WB_STATE_NONE = 0,
        WB_STATE_FIRST_FRAME,
        WB_STATE_CAPTURING,
@@ -165,6 +165,15 @@ struct wbm2m_ctx {
 
        /* src & dst queue data */
        struct wb_q_data        q_data[2];
+
+       /* src & dst state data */
+       struct drm_plane_state s_state;
+       struct drm_framebuffer s_fb;
+       struct drm_gem_cma_object s_cma_gem_obj[2];
+
+       struct drm_plane_state d_state;
+       struct drm_framebuffer d_fb;
+       struct drm_gem_cma_object d_cma_gem_obj[2];
 };
 
 static inline struct wb_buffer *to_wb_buffer(struct vb2_buffer *vb2)
index 1d92733eb69db517febe818a87d997c52283c76a..da60aa0996de908379fab96fe9294261e7666b26 100644 (file)
@@ -119,6 +119,11 @@ static void prepare_plane_state(struct drm_plane_state *state,
 {
        int drm_fourcc = tidss_wb_fourcc_v4l2_to_drm(v4l2_pixelformat);
 
+       memset(state, 0, sizeof(*state));
+       memset(fb, 0, sizeof(*fb));
+       memset(&gem_obj[0], 0, sizeof(gem_obj[0]));
+       memset(&gem_obj[1], 0, sizeof(gem_obj[1]));
+
        state->fb = fb;
        state->fb->format = drm_format_info(drm_fourcc);
        state->fb->obj[0] = &gem_obj[0].base;
@@ -137,12 +142,6 @@ static void device_run(void *priv)
        struct wb_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
        struct wb_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
        struct vb2_v4l2_buffer *s_vb, *d_vb;
-       struct drm_plane_state s_state = {};
-       struct drm_framebuffer s_fb = {};
-       struct drm_gem_cma_object s_cma_gem_obj[2] = { {}, {} };
-       struct drm_plane_state d_state = {};
-       struct drm_framebuffer d_fb = {};
-       struct drm_gem_cma_object d_cma_gem_obj[2] = { {}, {} };
        struct v4l2_pix_format_mplane *spix, *dpix;
        struct v4l2_rect *srect, *drect;
        bool ok;
@@ -153,29 +152,32 @@ static void device_run(void *priv)
        /* fill source info */
        srect = &s_q_data->c_rect;
        spix = &s_q_data->format.fmt.pix_mp;
-       prepare_plane_state(&s_state, &s_fb, spix->pixelformat, s_cma_gem_obj);
-
-       s_state.src_w = srect->width << 16;
-       s_state.src_h = srect->height << 16;
-       s_state.src_x = srect->left << 16;
-       s_state.src_y = srect->top << 16;
-       s_state.crtc_w = srect->width;
-       s_state.crtc_h = srect->height;
-       s_state.fb->pitches[0] = spix->plane_fmt[0].bytesperline;
-       s_state.alpha = DRM_BLEND_ALPHA_OPAQUE;
-       s_state.color_encoding = DRM_COLOR_YCBCR_BT601;
-       s_state.color_range = DRM_COLOR_YCBCR_FULL_RANGE;
-
-       s_cma_gem_obj[0].paddr = vb2_dma_contig_plane_dma_addr(&s_vb->vb2_buf, 0);
+       prepare_plane_state(&ctx->s_state, &ctx->s_fb,
+                           spix->pixelformat, ctx->s_cma_gem_obj);
+
+       ctx->s_state.src_w = srect->width << 16;
+       ctx->s_state.src_h = srect->height << 16;
+       ctx->s_state.src_x = srect->left << 16;
+       ctx->s_state.src_y = srect->top << 16;
+       ctx->s_state.crtc_w = srect->width;
+       ctx->s_state.crtc_h = srect->height;
+       ctx->s_state.fb->pitches[0] = spix->plane_fmt[0].bytesperline;
+       ctx->s_state.alpha = DRM_BLEND_ALPHA_OPAQUE;
+       ctx->s_state.color_encoding = DRM_COLOR_YCBCR_BT601;
+       ctx->s_state.color_range = DRM_COLOR_YCBCR_FULL_RANGE;
+
+       ctx->s_cma_gem_obj[0].paddr =
+                       vb2_dma_contig_plane_dma_addr(&s_vb->vb2_buf, 0);
        if (spix->num_planes == 2) {
-               s_cma_gem_obj[1].paddr = vb2_dma_contig_plane_dma_addr(&s_vb->vb2_buf, 1);
-               s_state.fb->pitches[1] = spix->plane_fmt[1].bytesperline;
+               ctx->s_cma_gem_obj[1].paddr =
+                       vb2_dma_contig_plane_dma_addr(&s_vb->vb2_buf, 1);
+               ctx->s_state.fb->pitches[1] = spix->plane_fmt[1].bytesperline;
        } else if (spix->pixelformat == V4L2_PIX_FMT_NV12) {
-               s_cma_gem_obj[1].paddr = s_cma_gem_obj[0].paddr +
+               ctx->s_cma_gem_obj[1].paddr = ctx->s_cma_gem_obj[0].paddr +
                        (spix->plane_fmt[0].bytesperline * spix->height);
-               s_state.fb->pitches[1] = spix->plane_fmt[0].bytesperline;
+               ctx->s_state.fb->pitches[1] = spix->plane_fmt[0].bytesperline;
        }
-       if (!s_cma_gem_obj[0].paddr) {
+       if (!ctx->s_cma_gem_obj[0].paddr) {
                log_err(dev,
                        "acquiring source buffer(%d) dma_addr failed\n",
                        (&s_vb->vb2_buf)->index);
@@ -184,41 +186,46 @@ static void device_run(void *priv)
 
        log_dbg(dev, "SRC: ctx %pa buf_index %d %dx%d, pitches: %d, cpp: %d, sw %d\n",
                &ctx, (&s_vb->vb2_buf)->index,
-               s_state.crtc_w, s_state.crtc_h,
-               s_state.fb->pitches[0], s_state.fb->format->cpp[0],
-               s_state.fb->pitches[0] / s_state.fb->format->cpp[0]);
+               ctx->s_state.crtc_w, ctx->s_state.crtc_h,
+               ctx->s_state.fb->pitches[0], ctx->s_state.fb->format->cpp[0],
+               ctx->s_state.fb->pitches[0] / ctx->s_state.fb->format->cpp[0]);
        if (spix->num_planes == 2 || spix->pixelformat == V4L2_PIX_FMT_NV12) {
                log_dbg(dev, "SRC: pitches_uv: %d, cpp_uv: %d, sw_uv %d\n",
-                       s_state.fb->pitches[1], s_state.fb->format->cpp[1],
-                       s_state.fb->pitches[1] / s_state.fb->format->cpp[1]);
+                       ctx->s_state.fb->pitches[1],
+                       ctx->s_state.fb->format->cpp[1],
+                       ctx->s_state.fb->pitches[1] /
+                               ctx->s_state.fb->format->cpp[1]);
        }
 
        /* fill WB info */
        drect = &d_q_data->c_rect;
        dpix = &d_q_data->format.fmt.pix_mp;
-       prepare_plane_state(&d_state, &d_fb, dpix->pixelformat, d_cma_gem_obj);
-
-       d_state.src_w = s_state.crtc_w << 16;
-       d_state.src_h = s_state.crtc_h << 16;
-       d_state.src_x = drect->left << 16;
-       d_state.src_y = drect->top << 16;
-       d_state.crtc_w = drect->width;
-       d_state.crtc_h = drect->height;
-       d_state.fb->pitches[0] = dpix->plane_fmt[0].bytesperline;
-       d_state.alpha = DRM_BLEND_ALPHA_OPAQUE;
-       d_state.color_encoding = DRM_COLOR_YCBCR_BT601;
-       d_state.color_range = DRM_COLOR_YCBCR_FULL_RANGE;
-
-       d_cma_gem_obj[0].paddr = vb2_dma_contig_plane_dma_addr(&d_vb->vb2_buf, 0);
+       prepare_plane_state(&ctx->d_state, &ctx->d_fb,
+                           dpix->pixelformat, ctx->d_cma_gem_obj);
+
+       ctx->d_state.src_w = ctx->s_state.crtc_w << 16;
+       ctx->d_state.src_h = ctx->s_state.crtc_h << 16;
+       ctx->d_state.src_x = drect->left << 16;
+       ctx->d_state.src_y = drect->top << 16;
+       ctx->d_state.crtc_w = drect->width;
+       ctx->d_state.crtc_h = drect->height;
+       ctx->d_state.fb->pitches[0] = dpix->plane_fmt[0].bytesperline;
+       ctx->d_state.alpha = DRM_BLEND_ALPHA_OPAQUE;
+       ctx->d_state.color_encoding = DRM_COLOR_YCBCR_BT601;
+       ctx->d_state.color_range = DRM_COLOR_YCBCR_FULL_RANGE;
+
+       ctx->d_cma_gem_obj[0].paddr =
+                       vb2_dma_contig_plane_dma_addr(&d_vb->vb2_buf, 0);
        if (dpix->num_planes == 2) {
-               d_cma_gem_obj[1].paddr = vb2_dma_contig_plane_dma_addr(&d_vb->vb2_buf, 1);
-               d_state.fb->pitches[1] = dpix->plane_fmt[1].bytesperline;
+               ctx->d_cma_gem_obj[1].paddr =
+                       vb2_dma_contig_plane_dma_addr(&d_vb->vb2_buf, 1);
+               ctx->d_state.fb->pitches[1] = dpix->plane_fmt[1].bytesperline;
        } else if (dpix->pixelformat == V4L2_PIX_FMT_NV12) {
-               d_cma_gem_obj[1].paddr = d_cma_gem_obj[0].paddr +
+               ctx->d_cma_gem_obj[1].paddr = ctx->d_cma_gem_obj[0].paddr +
                        (dpix->plane_fmt[0].bytesperline * dpix->height);
-               d_state.fb->pitches[1] = dpix->plane_fmt[0].bytesperline;
+               ctx->d_state.fb->pitches[1] = dpix->plane_fmt[0].bytesperline;
        }
-       if (!d_cma_gem_obj[0].paddr) {
+       if (!ctx->d_cma_gem_obj[0].paddr) {
                log_err(dev,
                        "acquiring destination buffer(%d) dma_addr failed\n",
                        (&d_vb->vb2_buf)->index);
@@ -227,30 +234,34 @@ static void device_run(void *priv)
 
        log_dbg(dev, "DST: ctx %pa buf_index %d %dx%d, pitches: %d, cpp: %d, sw %d\n",
                &ctx, (&d_vb->vb2_buf)->index,
-               d_state.crtc_w, d_state.crtc_h,
-               d_state.fb->pitches[0], d_state.fb->format->cpp[0],
-               d_state.fb->pitches[0] / d_state.fb->format->cpp[0]);
+               ctx->d_state.crtc_w, ctx->d_state.crtc_h,
+               ctx->d_state.fb->pitches[0], ctx->d_state.fb->format->cpp[0],
+               ctx->d_state.fb->pitches[0] / ctx->d_state.fb->format->cpp[0]);
        if (dpix->num_planes == 2 || dpix->pixelformat == V4L2_PIX_FMT_NV12) {
                log_dbg(dev, "DST: pitches_uv: %d, cpp_uv: %d, sw_uv %d\n",
-                       d_state.fb->pitches[1], d_state.fb->format->cpp[1],
-                       d_state.fb->pitches[1] / d_state.fb->format->cpp[1]);
+                       ctx->d_state.fb->pitches[1],
+                       ctx->d_state.fb->format->cpp[1],
+                       ctx->d_state.fb->pitches[1] /
+                               ctx->d_state.fb->format->cpp[1]);
        }
 
        ok = wbm2m_convert(dev, ctx->dev->plane,
-                         (const struct drm_plane_state *)&s_state,
-                         (const struct drm_plane_state *)&d_state);
+                          (const struct drm_plane_state *)&ctx->s_state,
+                          (const struct drm_plane_state *)&ctx->d_state);
        if (!ok) {
                log_err(dev,
                        "Conversion setup failed, check source and destination parameters\n"
                        );
                log_err(dev, "\tSRC: %dx%d, fmt: %4.4s sw %d\n",
-                       s_state.crtc_w, s_state.crtc_h,
+                       ctx->s_state.crtc_w, ctx->s_state.crtc_h,
                        (char *)&spix->pixelformat,
-                       s_state.fb->pitches[0] / s_state.fb->format->cpp[0]);
+                       ctx->s_state.fb->pitches[0] /
+                               ctx->s_state.fb->format->cpp[0]);
                log_err(dev, "\tDST: %dx%d, fmt: %4.4s sw %d\n",
-                       d_state.crtc_w, d_state.crtc_h,
+                       ctx->d_state.crtc_w, ctx->d_state.crtc_h,
                        (char *)&dpix->pixelformat,
-                       d_state.fb->pitches[0] / d_state.fb->format->cpp[0]);
+                       ctx->d_state.fb->pitches[0] /
+                               ctx->d_state.fb->format->cpp[0]);
                return;
        }
 }
index 6fe91c1b692d6fd547fcfff08b1633c229e77b0a..185655f22f8986e8708ee3a9fbddbb1258d6bac4 100644 (file)
@@ -273,15 +273,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
                else
                        ret = vmf_insert_pfn(&cvma, address, pfn);
 
-               /*
-                * Somebody beat us to this PTE or prefaulting to
-                * an already populated PTE, or prefaulting error.
-                */
-
-               if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
-                       break;
-               else if (unlikely(ret & VM_FAULT_ERROR))
-                       goto out_io_unlock;
+               /* Never error on prefaulted PTEs */
+               if (unlikely((ret & VM_FAULT_ERROR))) {
+                       if (i == 0)
+                               goto out_io_unlock;
+                       else
+                               break;
+               }
 
                address += PAGE_SIZE;
                if (unlikely(++page_offset >= page_last))
index 54e767bd5ddb7c5fc282f18cde0a10dd65094682..f28703db8dbd65a4f68b2f7d960a3157858b72e5 100644 (file)
@@ -47,10 +47,16 @@ static const struct file_operations udl_driver_fops = {
        .llseek = noop_llseek,
 };
 
+static void udl_driver_release(struct drm_device *dev)
+{
+       udl_fini(dev);
+       udl_modeset_cleanup(dev);
+       drm_dev_fini(dev);
+       kfree(dev);
+}
+
 static struct drm_driver driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
-       .load = udl_driver_load,
-       .unload = udl_driver_unload,
        .release = udl_driver_release,
 
        /* gem hooks */
@@ -74,28 +80,56 @@ static struct drm_driver driver = {
        .patchlevel = DRIVER_PATCHLEVEL,
 };
 
+static struct udl_device *udl_driver_create(struct usb_interface *interface)
+{
+       struct usb_device *udev = interface_to_usbdev(interface);
+       struct udl_device *udl;
+       int r;
+
+       udl = kzalloc(sizeof(*udl), GFP_KERNEL);
+       if (!udl)
+               return ERR_PTR(-ENOMEM);
+
+       r = drm_dev_init(&udl->drm, &driver, &interface->dev);
+       if (r) {
+               kfree(udl);
+               return ERR_PTR(r);
+       }
+
+       udl->udev = udev;
+       udl->drm.dev_private = udl;
+
+       r = udl_init(udl);
+       if (r) {
+               drm_dev_fini(&udl->drm);
+               kfree(udl);
+               return ERR_PTR(r);
+       }
+
+       usb_set_intfdata(interface, udl);
+       return udl;
+}
+
 static int udl_usb_probe(struct usb_interface *interface,
                         const struct usb_device_id *id)
 {
-       struct usb_device *udev = interface_to_usbdev(interface);
-       struct drm_device *dev;
        int r;
+       struct udl_device *udl;
 
-       dev = drm_dev_alloc(&driver, &interface->dev);
-       if (IS_ERR(dev))
-               return PTR_ERR(dev);
+       udl = udl_driver_create(interface);
+       if (IS_ERR(udl))
+               return PTR_ERR(udl);
 
-       r = drm_dev_register(dev, (unsigned long)udev);
+       r = drm_dev_register(&udl->drm, 0);
        if (r)
                goto err_free;
 
-       usb_set_intfdata(interface, dev);
-       DRM_INFO("Initialized udl on minor %d\n", dev->primary->index);
+       DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
 
        return 0;
 
 err_free:
-       drm_dev_unref(dev);
+       drm_dev_put(&udl->drm);
        return r;
 }
 
index 4ae67d882eae928e6b39fb4f240a46bfc272ed15..35c1f33fbc1a0b455c9d2dcf3d23d638923bc863 100644 (file)
@@ -50,8 +50,8 @@ struct urb_list {
 struct udl_fbdev;
 
 struct udl_device {
+       struct drm_device drm;
        struct device *dev;
-       struct drm_device *ddev;
        struct usb_device *udev;
        struct drm_crtc *crtc;
 
@@ -71,6 +71,8 @@ struct udl_device {
        atomic_t cpu_kcycles_used; /* transpired during pixel processing */
 };
 
+#define to_udl(x) container_of(x, struct udl_device, drm)
+
 struct udl_gem_object {
        struct drm_gem_object base;
        struct page **pages;
@@ -102,9 +104,8 @@ struct urb *udl_get_urb(struct drm_device *dev);
 int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
 void udl_urb_completion(struct urb *urb);
 
-int udl_driver_load(struct drm_device *dev, unsigned long flags);
-void udl_driver_unload(struct drm_device *dev);
-void udl_driver_release(struct drm_device *dev);
+int udl_init(struct udl_device *udl);
+void udl_fini(struct drm_device *dev);
 
 int udl_fbdev_init(struct drm_device *dev);
 void udl_fbdev_cleanup(struct drm_device *dev);
index dd9ffded223b5fb09c025d518d5090b27716b560..4ab101bf1df010b58c377079e892fc8142c7064d 100644 (file)
@@ -82,7 +82,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
                      int width, int height)
 {
        struct drm_device *dev = fb->base.dev;
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        int i, ret;
        char *cmd;
        cycles_t start_cycles, end_cycles;
@@ -210,10 +210,10 @@ static int udl_fb_open(struct fb_info *info, int user)
 {
        struct udl_fbdev *ufbdev = info->par;
        struct drm_device *dev = ufbdev->ufb.base.dev;
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
 
        /* If the USB device is gone, we don't accept new opens */
-       if (drm_dev_is_unplugged(udl->ddev))
+       if (drm_dev_is_unplugged(&udl->drm))
                return -ENODEV;
 
        ufbdev->fb_count++;
@@ -441,7 +441,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
 
 int udl_fbdev_init(struct drm_device *dev)
 {
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        int bpp_sel = fb_bpp;
        struct udl_fbdev *ufbdev;
        int ret;
@@ -480,7 +480,7 @@ free:
 
 void udl_fbdev_cleanup(struct drm_device *dev)
 {
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        if (!udl->fbdev)
                return;
 
@@ -491,7 +491,7 @@ void udl_fbdev_cleanup(struct drm_device *dev)
 
 void udl_fbdev_unplug(struct drm_device *dev)
 {
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        struct udl_fbdev *ufbdev;
        if (!udl->fbdev)
                return;
index bb7b58407039bbbb099a371b9a432dc12983f886..3b3e17652bb20f341e68fb33302c8635a8db18c9 100644 (file)
@@ -203,7 +203,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
 {
        struct udl_gem_object *gobj;
        struct drm_gem_object *obj;
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        int ret = 0;
 
        mutex_lock(&udl->gem_lock);
index 19055dda31409d0d1bcc50084a37b4caf5fc7b85..8d22b6cd524123f60d2220bc32441cbf5ff8c285 100644 (file)
@@ -29,7 +29,7 @@
 static int udl_parse_vendor_descriptor(struct drm_device *dev,
                                       struct usb_device *usbdev)
 {
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        char *desc;
        char *buf;
        char *desc_end;
@@ -165,7 +165,7 @@ void udl_urb_completion(struct urb *urb)
 
 static void udl_free_urb_list(struct drm_device *dev)
 {
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        int count = udl->urbs.count;
        struct list_head *node;
        struct urb_node *unode;
@@ -198,7 +198,7 @@ static void udl_free_urb_list(struct drm_device *dev)
 
 static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
 {
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        struct urb *urb;
        struct urb_node *unode;
        char *buf;
@@ -262,7 +262,7 @@ retry:
 
 struct urb *udl_get_urb(struct drm_device *dev)
 {
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        int ret = 0;
        struct list_head *entry;
        struct urb_node *unode;
@@ -295,7 +295,7 @@ error:
 
 int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
 {
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
        int ret;
 
        BUG_ON(len > udl->urbs.size);
@@ -310,20 +310,12 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
        return ret;
 }
 
-int udl_driver_load(struct drm_device *dev, unsigned long flags)
+int udl_init(struct udl_device *udl)
 {
-       struct usb_device *udev = (void*)flags;
-       struct udl_device *udl;
+       struct drm_device *dev = &udl->drm;
        int ret = -ENOMEM;
 
        DRM_DEBUG("\n");
-       udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
-       if (!udl)
-               return -ENOMEM;
-
-       udl->udev = udev;
-       udl->ddev = dev;
-       dev->dev_private = udl;
 
        mutex_init(&udl->gem_lock);
 
@@ -357,7 +349,6 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
 err:
        if (udl->urbs.count)
                udl_free_urb_list(dev);
-       kfree(udl);
        DRM_ERROR("%d\n", ret);
        return ret;
 }
@@ -368,9 +359,9 @@ int udl_drop_usb(struct drm_device *dev)
        return 0;
 }
 
-void udl_driver_unload(struct drm_device *dev)
+void udl_fini(struct drm_device *dev)
 {
-       struct udl_device *udl = dev->dev_private;
+       struct udl_device *udl = to_udl(dev);
 
        drm_kms_helper_poll_fini(dev);
 
@@ -378,12 +369,4 @@ void udl_driver_unload(struct drm_device *dev)
                udl_free_urb_list(dev);
 
        udl_fbdev_cleanup(dev);
-       kfree(udl);
-}
-
-void udl_driver_release(struct drm_device *dev)
-{
-       udl_modeset_cleanup(dev);
-       drm_dev_fini(dev);
-       kfree(dev);
 }
index 7bdf6f0e58a5343a880470e61e69ee2292b7bd42..8d2f5ded86d66e19241528c51e7474e86283bbc2 100644 (file)
@@ -528,6 +528,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
        if (!ret)
                return -EBUSY;
 
+       /* is_valid check must proceed before copy of the cache entry. */
+       smp_rmb();
+
        ptr = cache_ent->caps_cache;
 
 copy_exit:
index 020070d483d350a58695c7fa9f21f7c2be4db463..c8a581b1f4c40355a4bcb0eacc58d8043712bbbb 100644 (file)
@@ -588,6 +588,8 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
                    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
                        memcpy(cache_ent->caps_cache, resp->capset_data,
                               cache_ent->size);
+                       /* Copy must occur before is_valid is signalled. */
+                       smp_wmb();
                        atomic_set(&cache_ent->is_valid, 1);
                        break;
                }
index e4e09d47c5c0e001934a14a2df103e13a7a328aa..0af048d1a8156acfdb24af337f3b3479c4bb1004 100644 (file)
@@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
                                     !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
                if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
                        kfree(reply);
-
+                       reply = NULL;
                        if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
                                /* A checkpoint occurred. Retry. */
                                continue;
@@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
 
                if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
                        kfree(reply);
-
+                       reply = NULL;
                        if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
                                /* A checkpoint occurred. Retry. */
                                continue;
@@ -389,7 +389,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
                break;
        }
 
-       if (retries == RETRIES)
+       if (!reply)
                return -EINVAL;
 
        *msg_len = reply_len;
index 815bdb42e3f0368f78c8397f4b2fbda3e94f8ccf..0121fe7a4548dbbfbc6a24ba21667db8f0ff93e7 100644 (file)
@@ -423,6 +423,9 @@ static int host1x_device_add(struct host1x *host1x,
 
        of_dma_configure(&device->dev, host1x->dev->of_node, true);
 
+       device->dev.dma_parms = &device->dma_parms;
+       dma_set_max_seg_size(&device->dev, SZ_4M);
+
        err = host1x_device_parse_dt(device, driver);
        if (err < 0) {
                kfree(device);
index 67cc820253a99b84341825437e9e0bfce3cd16d6..fb79e118f26c8759cc37ec1b222b89f609a53fea 100644 (file)
@@ -257,7 +257,7 @@ static int init_csc(struct ipu_ic *ic,
        writel(param, base++);
 
        param = ((a[0] & 0x1fe0) >> 5) | (params->scale << 8) |
-               (params->sat << 9);
+               (params->sat << 10);
        writel(param, base++);
 
        param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) |
index 9428ea7cdf8a00dc686e00c6da77be5cb60215b7..c52bd163abb3e1a93714f0ddc64a418858f52881 100644 (file)
 #define A4_2WHEEL_MOUSE_HACK_7 0x01
 #define A4_2WHEEL_MOUSE_HACK_B8        0x02
 
+#define A4_WHEEL_ORIENTATION   (HID_UP_GENDESK | 0x000000b8)
+
 struct a4tech_sc {
        unsigned long quirks;
        unsigned int hw_wheel;
        __s32 delayed_value;
 };
 
+static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+                           struct hid_field *field, struct hid_usage *usage,
+                           unsigned long **bit, int *max)
+{
+       struct a4tech_sc *a4 = hid_get_drvdata(hdev);
+
+       if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
+           usage->hid == A4_WHEEL_ORIENTATION) {
+               /*
+                * We do not want to have this usage mapped to anything as it's
+                * nonstandard and doesn't really behave like an HID report.
+                * It's only selecting the orientation (vertical/horizontal) of
+                * the previous mouse wheel report. The input_events will be
+                * generated once both reports are recorded in a4_event().
+                */
+               return -1;
+       }
+
+       return 0;
+
+}
+
 static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
                struct hid_field *field, struct hid_usage *usage,
                unsigned long **bit, int *max)
@@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
        struct a4tech_sc *a4 = hid_get_drvdata(hdev);
        struct input_dev *input;
 
-       if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
-                       !usage->type)
+       if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
                return 0;
 
        input = field->hidinput->input;
@@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
                        return 1;
                }
 
-               if (usage->hid == 0x000100b8) {
+               if (usage->hid == A4_WHEEL_ORIENTATION) {
                        input_event(input, EV_REL, value ? REL_HWHEEL :
                                        REL_WHEEL, a4->delayed_value);
                        return 1;
@@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
 static struct hid_driver a4_driver = {
        .name = "a4tech",
        .id_table = a4_devices,
+       .input_mapping = a4_input_mapping,
        .input_mapped = a4_input_mapped,
        .event = a4_event,
        .probe = a4_probe,
index 1cb41992aaa1f650f89cbf5ace72bd46786d7abc..d0a81a03ddbdd5cb206db6760a2954feaff0de41 100644 (file)
@@ -57,7 +57,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
 struct apple_sc {
        unsigned long quirks;
        unsigned int fn_on;
-       DECLARE_BITMAP(pressed_fn, KEY_CNT);
        DECLARE_BITMAP(pressed_numlock, KEY_CNT);
 };
 
@@ -184,6 +183,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
 {
        struct apple_sc *asc = hid_get_drvdata(hid);
        const struct apple_key_translation *trans, *table;
+       bool do_translate;
+       u16 code = 0;
 
        if (usage->code == KEY_FN) {
                asc->fn_on = !!value;
@@ -192,8 +193,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
        }
 
        if (fnmode) {
-               int do_translate;
-
                if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
                                hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
                        table = macbookair_fn_keys;
@@ -205,25 +204,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
                trans = apple_find_translation (table, usage->code);
 
                if (trans) {
-                       if (test_bit(usage->code, asc->pressed_fn))
-                               do_translate = 1;
-                       else if (trans->flags & APPLE_FLAG_FKEY)
-                               do_translate = (fnmode == 2 && asc->fn_on) ||
-                                       (fnmode == 1 && !asc->fn_on);
-                       else
-                               do_translate = asc->fn_on;
-
-                       if (do_translate) {
-                               if (value)
-                                       set_bit(usage->code, asc->pressed_fn);
-                               else
-                                       clear_bit(usage->code, asc->pressed_fn);
-
-                               input_event(input, usage->type, trans->to,
-                                               value);
-
-                               return 1;
+                       if (test_bit(trans->from, input->key))
+                               code = trans->from;
+                       else if (test_bit(trans->to, input->key))
+                               code = trans->to;
+
+                       if (!code) {
+                               if (trans->flags & APPLE_FLAG_FKEY) {
+                                       switch (fnmode) {
+                                       case 1:
+                                               do_translate = !asc->fn_on;
+                                               break;
+                                       case 2:
+                                               do_translate = asc->fn_on;
+                                               break;
+                                       default:
+                                               /* should never happen */
+                                               do_translate = false;
+                                       }
+                               } else {
+                                       do_translate = asc->fn_on;
+                               }
+
+                               code = do_translate ? trans->to : trans->from;
                        }
+
+                       input_event(input, usage->type, code, value);
+                       return 1;
                }
 
                if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
index 271f31461da427d93459632b096c578d71f3ee44..6f65f525723688d7847664bc98e41e65ebc93cfe 100644 (file)
@@ -1160,8 +1160,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
 
        INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
 
-       cp2112_gpio_direction_input(gc, d->hwirq);
-
        if (!dev->gpio_poll) {
                dev->gpio_poll = true;
                schedule_delayed_work(&dev->gpio_poll_worker, 0);
@@ -1209,6 +1207,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
                return PTR_ERR(dev->desc[pin]);
        }
 
+       ret = cp2112_gpio_direction_input(&dev->gc, pin);
+       if (ret < 0) {
+               dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
+               goto err_desc;
+       }
+
        ret = gpiochip_lock_as_irq(&dev->gc, pin);
        if (ret) {
                dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
index 6e1a4a4fc0c109f7bf60083963d8c239a10939cf..ab9da597106fa5f49d4a12c07cbfff76ad33088d 100644 (file)
@@ -126,9 +126,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
 
        /* Locate the boot interface, to receive the LED change events */
        struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
+       struct hid_device *boot_hid;
+       struct hid_input *boot_hid_input;
 
-       struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
-       struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
+       if (unlikely(boot_interface == NULL))
+               return -ENODEV;
+
+       boot_hid = usb_get_intfdata(boot_interface);
+       boot_hid_input = list_first_entry(&boot_hid->inputs,
                struct hid_input, list);
 
        return boot_hid_input->input->event(boot_hid_input->input, type, code,
index 92452992b3681545546e9ea91abe33cc1dc5c64a..0eeb273fb73d22ceeefe535976fd4c173002dbb3 100644 (file)
@@ -82,6 +82,7 @@
 #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP    0x1220
 #define HID_DEVICE_ID_ALPS_U1          0x1215
 #define HID_DEVICE_ID_ALPS_T4_BTNLESS  0x120C
+#define HID_DEVICE_ID_ALPS_1222                0x1222
 
 
 #define USB_VENDOR_ID_AMI              0x046b
 #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH      0xb19d
 #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
 #define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053
+#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2        0x0939
 #define USB_DEVICE_ID_CHICONY_WIRELESS2        0x1123
 #define USB_DEVICE_ID_ASUS_AK1D                0x1125
 #define USB_DEVICE_ID_CHICONY_ACER_SWITCH12    0x1421
 #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A  0x0b4a
 #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE         0x134a
 #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A    0x094a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941    0x0941
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641    0x0641
 
 #define USB_VENDOR_ID_HUION            0x256c
 #define USB_DEVICE_ID_HUION_TABLET     0x006e
 #define USB_DEVICE_ID_SAITEK_RAT7      0x0cd7
 #define USB_DEVICE_ID_SAITEK_RAT9      0x0cfa
 #define USB_DEVICE_ID_SAITEK_MMO7      0x0cd0
+#define USB_DEVICE_ID_SAITEK_X52       0x075c
 
 #define USB_VENDOR_ID_SAMSUNG          0x0419
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE        0x0001
index 596227ddb6e078af028b2d83102d40e5006b147a..17d6123f7930fd62b835a303944fafdbae6d79a9 100644 (file)
@@ -763,7 +763,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
                if (!buf) {
                        ret = -ENOMEM;
-                       goto err_free;
+                       goto err_stop;
                }
 
                ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
@@ -795,9 +795,12 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
                ret = lg4ff_init(hdev);
 
        if (ret)
-               goto err_free;
+               goto err_stop;
 
        return 0;
+
+err_stop:
+       hid_hw_stop(hdev);
 err_free:
        kfree(drv_data);
        return ret;
@@ -808,8 +811,7 @@ static void lg_remove(struct hid_device *hdev)
        struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
        if (drv_data->quirks & LG_FF4)
                lg4ff_deinit(hdev);
-       else
-               hid_hw_stop(hdev);
+       hid_hw_stop(hdev);
        kfree(drv_data);
 }
 
index 512d67e1aae386e37b90288f800734b5a826aebe..4b26928cb2b658676d614d8fef5dfe216f37332c 100644 (file)
@@ -1483,7 +1483,6 @@ int lg4ff_deinit(struct hid_device *hid)
                }
        }
 #endif
-       hid_hw_stop(hid);
        drv_data->device_props = NULL;
 
        kfree(entry);
index 184e49036e1dce14c332aa5a071bc3d43e22b63a..f9167d0e095ceff56b6aec3422692ae328be57a4 100644 (file)
@@ -1788,6 +1788,10 @@ static const struct hid_device_id mt_devices[] = {
                HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
                        USB_VENDOR_ID_ALPS_JP,
                        HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
+       { .driver_data = MT_CLS_WIN_8_DUAL,
+               HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+                       USB_VENDOR_ID_ALPS_JP,
+                       HID_DEVICE_ID_ALPS_1222) },
 
        /* Lenovo X1 TAB Gen 2 */
        { .driver_data = MT_CLS_WIN_8_DUAL,
index 87eda34ea2f86aa2abb26ac40793ad2e9df40f56..d3773251b3745fd6622411244ec0e0a6dedb267a 100644 (file)
@@ -555,10 +555,14 @@ static void pcmidi_setup_extra_keys(
 
 static int pcmidi_set_operational(struct pcmidi_snd *pm)
 {
+       int rc;
+
        if (pm->ifnum != 1)
                return 0; /* only set up ONCE for interace 1 */
 
-       pcmidi_get_output_report(pm);
+       rc = pcmidi_get_output_report(pm);
+       if (rc < 0)
+               return rc;
        pcmidi_submit_output_report(pm, 0xc1);
        return 0;
 }
@@ -687,7 +691,11 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
        spin_lock_init(&pm->rawmidi_in_lock);
 
        init_sustain_timers(pm);
-       pcmidi_set_operational(pm);
+       err = pcmidi_set_operational(pm);
+       if (err < 0) {
+               pk_error("failed to find output report\n");
+               goto fail_register;
+       }
 
        /* register it */
        err = snd_card_register(card);
index 5892f1bd037ec4f4aba27f609aac246e33e1cc7f..a407fd2399ff435f4828dfa641fea78af7094a74 100644 (file)
@@ -45,6 +45,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD },
        { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET },
@@ -93,6 +94,8 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
@@ -141,6 +144,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
index 9671a4bad64392b2348d977c424027b559994c6d..09f2c617b09fd0fda62ae9793274b3e397a31278 100644 (file)
@@ -587,10 +587,14 @@ static void sony_set_leds(struct sony_sc *sc);
 static inline void sony_schedule_work(struct sony_sc *sc,
                                      enum sony_worker which)
 {
+       unsigned long flags;
+
        switch (which) {
        case SONY_WORKER_STATE:
-               if (!sc->defer_initialization)
+               spin_lock_irqsave(&sc->lock, flags);
+               if (!sc->defer_initialization && sc->state_worker_initialized)
                        schedule_work(&sc->state_worker);
+               spin_unlock_irqrestore(&sc->lock, flags);
                break;
        case SONY_WORKER_HOTPLUG:
                if (sc->hotplug_worker_initialized)
@@ -2553,13 +2557,18 @@ static inline void sony_init_output_report(struct sony_sc *sc,
 
 static inline void sony_cancel_work_sync(struct sony_sc *sc)
 {
+       unsigned long flags;
+
        if (sc->hotplug_worker_initialized)
                cancel_work_sync(&sc->hotplug_worker);
-       if (sc->state_worker_initialized)
+       if (sc->state_worker_initialized) {
+               spin_lock_irqsave(&sc->lock, flags);
+               sc->state_worker_initialized = 0;
+               spin_unlock_irqrestore(&sc->lock, flags);
                cancel_work_sync(&sc->state_worker);
+       }
 }
 
-
 static int sony_input_configured(struct hid_device *hdev,
                                        struct hid_input *hidinput)
 {
@@ -2797,7 +2806,6 @@ err_stop:
        sony_cancel_work_sync(sc);
        sony_remove_dev_list(sc);
        sony_release_device_id(sc);
-       hid_hw_stop(hdev);
        return ret;
 }
 
@@ -2859,6 +2867,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
         */
        if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
                hid_err(hdev, "failed to claim input\n");
+               hid_hw_stop(hdev);
                return -ENODEV;
        }
 
index bea8def64f437ed13ed5576a479634cee0a27ca4..30b8c3256c9917e88f6dcff081ee7a9306bc00f9 100644 (file)
@@ -34,6 +34,8 @@
 
 #include "hid-ids.h"
 
+#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT       0xb320
+
 static const signed short ff_rumble[] = {
        FF_RUMBLE,
        -1
@@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
        struct hid_field *ff_field = tmff->ff_field;
        int x, y;
        int left, right;        /* Rumbling */
+       int motor_swap;
 
        switch (effect->type) {
        case FF_CONSTANT:
@@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
                                        ff_field->logical_minimum,
                                        ff_field->logical_maximum);
 
+               /* 2-in-1 strong motor is left */
+               if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
+                       motor_swap = left;
+                       left = right;
+                       right = motor_swap;
+               }
+
                dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
                ff_field->value[0] = left;
                ff_field->value[1] = right;
@@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
                .driver_data = (unsigned long)ff_rumble },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304),   /* FireStorm Dual Power 2 (and 3) */
                .driver_data = (unsigned long)ff_rumble },
+       { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT),   /* Dual Trigger 2-in-1 */
+               .driver_data = (unsigned long)ff_rumble },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323),   /* Dual Trigger 3-in-1 (PC Mode) */
                .driver_data = (unsigned long)ff_rumble },
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324),   /* Dual Trigger 3-in-1 (PS3 Mode) */
index 4a44e48e08b225a6180ad014604dabc83ce65c2d..c7cff929b4190874790602119498e9331537956d 100644 (file)
@@ -378,7 +378,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
 
        mutex_lock(&minors_lock);
        dev = hidraw_table[minor];
-       if (!dev) {
+       if (!dev || !dev->exist) {
                ret = -ENODEV;
                goto out;
        }
index a746017fac170ca15895435fd4df4fbe3a04d51f..5a949ca42b1d06c4279e08cbe0674f7fa85ef676 100644 (file)
@@ -297,6 +297,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
        spin_unlock_irq(&list->hiddev->list_lock);
 
        mutex_lock(&hiddev->existancelock);
+       /*
+        * recheck exist with existance lock held to
+        * avoid opening a disconnected device
+        */
+       if (!list->hiddev->exist) {
+               res = -ENODEV;
+               goto bail_unlock;
+       }
        if (!list->hiddev->open++)
                if (list->hiddev->exist) {
                        struct hid_device *hid = hiddev->hid;
@@ -313,6 +321,10 @@ bail_normal_power:
        hid_hw_power(hid, PM_HINT_NORMAL);
 bail_unlock:
        mutex_unlock(&hiddev->existancelock);
+
+       spin_lock_irq(&list->hiddev->list_lock);
+       list_del(&list->node);
+       spin_unlock_irq(&list->hiddev->list_lock);
 bail:
        file->private_data = NULL;
        vfree(list);
index 0bdd85d486feeb2fec33f7651f201df146c8cd71..3038c975e417c2e4c58fb13e5e9f815d87445f25 100644 (file)
@@ -91,7 +91,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
 }
 
 static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
-               struct hid_report *report, u8 *raw_data, int size)
+               struct hid_report *report, u8 *raw_data, int report_size)
 {
        struct wacom *wacom = hid_get_drvdata(hdev);
        struct wacom_wac *wacom_wac = &wacom->wacom_wac;
@@ -152,7 +152,8 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
        if (flush)
                wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
        else if (insert)
-               wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size);
+               wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
+                                      raw_data, report_size);
 
        return insert && !flush;
 }
@@ -275,18 +276,23 @@ static void wacom_feature_mapping(struct hid_device *hdev,
        wacom_hid_usage_quirk(hdev, field, usage);
 
        switch (equivalent_usage) {
+       case WACOM_HID_WD_TOUCH_RING_SETTING:
+               wacom->generic_has_leds = true;
+               break;
        case HID_DG_CONTACTMAX:
                /* leave touch_max as is if predefined */
                if (!features->touch_max) {
                        /* read manually */
-                       data = kzalloc(2, GFP_KERNEL);
+                       n = hid_report_len(field->report);
+                       data = hid_alloc_report_buf(field->report, GFP_KERNEL);
                        if (!data)
                                break;
                        data[0] = field->report->id;
                        ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
-                                               data, 2, WAC_CMD_RETRIES);
-                       if (ret == 2) {
-                               features->touch_max = data[1];
+                                              data, n, WAC_CMD_RETRIES);
+                       if (ret == n) {
+                               ret = hid_report_raw_event(hdev,
+                                       HID_FEATURE_REPORT, data, n, 0);
                        } else {
                                features->touch_max = 16;
                                hid_warn(hdev, "wacom_feature_mapping: "
@@ -2142,7 +2148,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
 {
        struct wacom_wac *wacom_wac = &wacom->wacom_wac;
        struct wacom_features *features = &wacom_wac->features;
-       char name[WACOM_NAME_MAX];
+       char name[WACOM_NAME_MAX - 20]; /* Leave some room for suffixes */
 
        /* Generic devices name unspecified */
        if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
index d7c3f4ac2c045d230f81eaa9880cdba7606f0243..1df037e7f0b4212f5b6432d8ee6183681f2167fc 100644 (file)
@@ -255,7 +255,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
 
 static int wacom_dtus_irq(struct wacom_wac *wacom)
 {
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
        struct input_dev *input = wacom->pen_input;
        unsigned short prox, pressure = 0;
 
@@ -537,14 +537,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
                 */
                buttons = (data[4] << 1) | (data[3] & 0x01);
        } else if (features->type == CINTIQ_COMPANION_2) {
-               /* d-pad right  -> data[4] & 0x10
-                * d-pad up     -> data[4] & 0x20
-                * d-pad left   -> data[4] & 0x40
-                * d-pad down   -> data[4] & 0x80
-                * d-pad center -> data[3] & 0x01
+               /* d-pad right  -> data[2] & 0x10
+                * d-pad up     -> data[2] & 0x20
+                * d-pad left   -> data[2] & 0x40
+                * d-pad down   -> data[2] & 0x80
+                * d-pad center -> data[1] & 0x01
                 */
                buttons = ((data[2] >> 4) << 7) |
-                         ((data[1] & 0x04) << 6) |
+                         ((data[1] & 0x04) << 4) |
                          ((data[2] & 0x0F) << 2) |
                          (data[1] & 0x03);
        } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
@@ -576,7 +576,7 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
                strip2 = ((data[3] & 0x1f) << 8) | data[4];
        }
 
-       prox = (buttons & ~(~0 << nbuttons)) | (keys & ~(~0 << nkeys)) |
+       prox = (buttons & ~(~0U << nbuttons)) | (keys & ~(~0U << nkeys)) |
               (ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2;
 
        wacom_report_numbered_buttons(input, nbuttons, buttons);
@@ -848,6 +848,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
                y >>= 1;
                distance >>= 1;
        }
+       if (features->type == INTUOSHT2)
+               distance = features->distance_max - distance;
        input_report_abs(input, ABS_X, x);
        input_report_abs(input, ABS_Y, y);
        input_report_abs(input, ABS_DISTANCE, distance);
@@ -1061,7 +1063,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
        input_report_key(input, BTN_BASE2, (data[11] & 0x02));
 
        if (data[12] & 0x80)
-               input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
+               input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
        else
                input_report_abs(input, ABS_WHEEL, 0);
 
@@ -1928,8 +1930,6 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
                features->device_type |= WACOM_DEVICETYPE_PAD;
                break;
        case WACOM_HID_WD_BUTTONCENTER:
-               wacom->generic_has_leds = true;
-               /* fall through */
        case WACOM_HID_WD_BUTTONHOME:
        case WACOM_HID_WD_BUTTONUP:
        case WACOM_HID_WD_BUTTONDOWN:
@@ -2121,14 +2121,12 @@ static void wacom_wac_pad_report(struct hid_device *hdev,
        bool active = wacom_wac->hid_data.inrange_state != 0;
 
        /* report prox for expresskey events */
-       if ((wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) &&
-           wacom_wac->hid_data.pad_input_event_flag) {
+       if (wacom_wac->hid_data.pad_input_event_flag) {
                input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
                input_sync(input);
                if (!active)
                        wacom_wac->hid_data.pad_input_event_flag = false;
        }
-
 }
 
 static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
@@ -2535,6 +2533,7 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
        struct wacom *wacom = hid_get_drvdata(hdev);
        struct wacom_wac *wacom_wac = &wacom->wacom_wac;
        unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+       struct wacom_features *features = &wacom->wacom_wac.features;
 
        switch (equivalent_usage) {
        case HID_GD_X:
@@ -2555,6 +2554,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
        case HID_DG_TIPSWITCH:
                wacom_wac->hid_data.tipswitch = value;
                break;
+       case HID_DG_CONTACTMAX:
+               features->touch_max = value;
+               return;
        }
 
 
@@ -2725,9 +2727,7 @@ static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *repo
        if (report->type != HID_INPUT_REPORT)
                return -1;
 
-       if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
-               wacom_wac_pad_report(hdev, report, field);
-       else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+       if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
                wacom_wac_pen_report(hdev, report);
        else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
                wacom_wac_finger_report(hdev, report);
@@ -2741,7 +2741,7 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
        struct wacom_wac *wacom_wac = &wacom->wacom_wac;
        struct hid_field *field;
        bool pad_in_hid_field = false, pen_in_hid_field = false,
-               finger_in_hid_field = false;
+               finger_in_hid_field = false, true_pad = false;
        int r;
        int prev_collection = -1;
 
@@ -2757,6 +2757,8 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
                        pen_in_hid_field = true;
                if (WACOM_FINGER_FIELD(field))
                        finger_in_hid_field = true;
+               if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY)
+                       true_pad = true;
        }
 
        wacom_wac_battery_pre_report(hdev, report);
@@ -2780,6 +2782,9 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
        }
 
        wacom_wac_battery_report(hdev, report);
+
+       if (true_pad && wacom->wacom_wac.pad_input)
+               wacom_wac_pad_report(hdev, report, field);
 }
 
 static int wacom_bpt_touch(struct wacom_wac *wacom)
@@ -3735,7 +3740,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
                                             0, 5920, 4, 0);
                }
                input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
-               input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
+               input_abs_set_res(input_dev, ABS_MT_POSITION_Y, 40);
 
                /* fall through */
 
index 295fd3718caa0bed767683775c852d47de92fd7b..f67d871841c0c62859a963e2566b23835b7e2b27 100644 (file)
 #define WACOM_HID_WD_OFFSETBOTTOM       (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33)
 #define WACOM_HID_WD_DATAMODE           (WACOM_HID_UP_WACOMDIGITIZER | 0x1002)
 #define WACOM_HID_WD_DIGITIZERINFO      (WACOM_HID_UP_WACOMDIGITIZER | 0x1013)
+#define WACOM_HID_WD_TOUCH_RING_SETTING (WACOM_HID_UP_WACOMDIGITIZER | 0x1032)
 #define WACOM_HID_UP_G9                 0xff090000
 #define WACOM_HID_G9_PEN                (WACOM_HID_UP_G9 | 0x02)
 #define WACOM_HID_G9_TOUCHSCREEN        (WACOM_HID_UP_G9 | 0x11)
index 2f164bd746874549709a7c51122217d52ad05111..fdb0f832fadefe33aff3642406ae7fc380657078 100644 (file)
@@ -38,7 +38,7 @@
 
 static unsigned long virt_to_hvpfn(void *addr)
 {
-       unsigned long paddr;
+       phys_addr_t paddr;
 
        if (is_vmalloc_addr(addr))
                paddr = page_to_phys(vmalloc_to_page(addr)) +
index 5eed1e7da15c4c2eff2302504078db826ab5bd7e..d6106e1a0d4af597d04cb833215c3b27c0b2aa0d 100644 (file)
@@ -353,7 +353,9 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
 
                out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
 
-       default:
+               /* fallthrough */
+
+       case KVP_OP_GET_IP_INFO:
                utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
                                MAX_ADAPTER_ID_SIZE,
                                UTF16_LITTLE_ENDIAN,
@@ -406,6 +408,10 @@ kvp_send_key(struct work_struct *dummy)
                process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
                break;
        case KVP_OP_GET_IP_INFO:
+               /*
+                * We only need to pass on the info of operation, adapter_id
+                * and addr_family to the userland kvp daemon.
+                */
                process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
                break;
        case KVP_OP_SET:
@@ -421,7 +427,7 @@ kvp_send_key(struct work_struct *dummy)
                                UTF16_LITTLE_ENDIAN,
                                message->body.kvp_set.data.value,
                                HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1) + 1;
-                               break;
+                       break;
 
                case REG_U32:
                        /*
@@ -446,7 +452,10 @@ kvp_send_key(struct work_struct *dummy)
                        break;
 
                }
-       case KVP_OP_GET:
+
+               /*
+                * The key is always a string - utf16 encoding.
+                */
                message->body.kvp_set.data.key_size =
                        utf16s_to_utf8s(
                        (wchar_t *)in_msg->body.kvp_set.data.key,
@@ -454,7 +463,18 @@ kvp_send_key(struct work_struct *dummy)
                        UTF16_LITTLE_ENDIAN,
                        message->body.kvp_set.data.key,
                        HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
-                       break;
+
+               break;
+
+       case KVP_OP_GET:
+               message->body.kvp_get.data.key_size =
+                       utf16s_to_utf8s(
+                       (wchar_t *)in_msg->body.kvp_get.data.key,
+                       in_msg->body.kvp_get.data.key_size,
+                       UTF16_LITTLE_ENDIAN,
+                       message->body.kvp_get.data.key,
+                       HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
+               break;
 
        case KVP_OP_DELETE:
                message->body.kvp_delete.key_size =
@@ -464,12 +484,12 @@ kvp_send_key(struct work_struct *dummy)
                        UTF16_LITTLE_ENDIAN,
                        message->body.kvp_delete.key,
                        HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
-                       break;
+               break;
 
        case KVP_OP_ENUMERATE:
                message->body.kvp_enum_data.index =
                        in_msg->body.kvp_enum_data.index;
-                       break;
+               break;
        }
 
        kvp_transaction.state = HVUTIL_USERSPACE_REQ;
index 34e45b97629ed73869baf28edf3acef0376290c5..2f2fb19669580a1de5a17939e3bc0e9a87be5951 100644 (file)
@@ -694,8 +694,8 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
 
        if (resource->caps.flags & POWER_METER_CAN_CAP) {
                if (!can_cap_in_hardware()) {
-                       dev_err(&resource->acpi_dev->dev,
-                               "Ignoring unsafe software power cap!\n");
+                       dev_warn(&resource->acpi_dev->dev,
+                                "Ignoring unsafe software power cap!\n");
                        goto skip_unsafe_cap;
                }
 
index 78603b78cf410de903aa22d55147e6b600ab0398..eba692cddbdee721ece32e4b943d1a244575e991 100644 (file)
@@ -818,7 +818,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
 static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
 static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
 static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
+static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
 static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
 static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
 
@@ -3673,6 +3673,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
                data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
                data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
+               data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
                data->REG_PWM[0] = NCT6106_REG_PWM;
                data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
                data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
index 2876c18ed84115dbd97e7eb2c4f9b899ebf2cfe7..38ffbdb0a85fba288cdcac8e76cc7ff3eadec65d 100644 (file)
@@ -768,7 +768,7 @@ static struct attribute *nct7802_in_attrs[] = {
        &sensor_dev_attr_in3_alarm.dev_attr.attr,
        &sensor_dev_attr_in3_beep.dev_attr.attr,
 
-       &sensor_dev_attr_in4_input.dev_attr.attr,       /* 17 */
+       &sensor_dev_attr_in4_input.dev_attr.attr,       /* 16 */
        &sensor_dev_attr_in4_min.dev_attr.attr,
        &sensor_dev_attr_in4_max.dev_attr.attr,
        &sensor_dev_attr_in4_alarm.dev_attr.attr,
@@ -794,9 +794,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
 
        if (index >= 6 && index < 11 && (reg & 0x03) != 0x03)   /* VSEN1 */
                return 0;
-       if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c)  /* VSEN2 */
+       if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c)  /* VSEN2 */
                return 0;
-       if (index >= 17 && (reg & 0x30) != 0x30)                /* VSEN3 */
+       if (index >= 16 && (reg & 0x30) != 0x30)                /* VSEN3 */
                return 0;
 
        return attr->mode;
index 2bce7cf0b0af4fb3424f08064e679d834bbcb80f..e45b5ec2f4512d10cc69d5719eb0fa3b1f19ff0f 100644 (file)
@@ -174,6 +174,12 @@ static void etm4_enable_hw(void *info)
        if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
                dev_err(drvdata->dev,
                        "timeout while waiting for Idle Trace Status\n");
+       /*
+        * As recommended by section 4.3.7 ("Synchronization when using the
+        * memory-mapped interface") of ARM IHI 0064D
+        */
+       dsb(sy);
+       isb();
 
        CS_LOCK(drvdata->base);
 
@@ -324,8 +330,12 @@ static void etm4_disable_hw(void *info)
        /* EN, bit[0] Trace unit enable bit */
        control &= ~0x1;
 
-       /* make sure everything completes before disabling */
-       mb();
+       /*
+        * Make sure everything completes before disabling, as recommended
+        * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
+        * SSTATUS") of ARM IHI 0064D
+        */
+       dsb(sy);
        isb();
        writel_relaxed(control, drvdata->base + TRCPRGCTLR);
 
index 8ff326c0c406c9ac2a5f58815d0057d39978c90d..3cdf85b1ce4fee2366bef46a53aa4a92319aa889 100644 (file)
@@ -632,7 +632,7 @@ static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
                goto err_out;
 
        ret = -ENOMEM;
-       page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+       page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
        if (!page)
                goto err_free_sgt;
 
index 70f2cb90adc5eb2e15441dcc7f189f9e5495f5aa..968319f4e5f101e5f1371d7701bc1493630e3d0e 100644 (file)
@@ -140,6 +140,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6),
                .driver_data = (kernel_ulong_t)0,
        },
+       {
+               /* Lewisburg PCH */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226),
+               .driver_data = (kernel_ulong_t)0,
+       },
        {
                /* Gemini Lake */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
@@ -170,6 +175,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Ice Lake NNPI */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
+       {
+               /* Tiger Lake PCH */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        { 0 },
 };
 
index 9ec9197edffaf64bd041e9e6071bcd59e6a3ea48..eeba421dc823d71f1864697cd861eb6e875fe593 100644 (file)
@@ -1098,7 +1098,6 @@ int stm_source_register_device(struct device *parent,
 
 err:
        put_device(&src->dev);
-       kfree(src);
 
        return err;
 }
index 3f3e8b3bf5ff9df550991d18530fe45f41c870d3..d51bf536bdf7509b6e1ce70fd51f60c02a761272 100644 (file)
@@ -270,9 +270,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
        writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
 
        /* send stop when last byte has been written */
-       if (--dev->buf_len == 0)
+       if (--dev->buf_len == 0) {
                if (!dev->use_alt_cmd)
                        at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
+               at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
+       }
 
        dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
 
@@ -690,9 +692,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
                } else {
                        at91_twi_write_next_byte(dev);
                        at91_twi_write(dev, AT91_TWI_IER,
-                                      AT91_TWI_TXCOMP |
-                                      AT91_TWI_NACK |
-                                      AT91_TWI_TXRDY);
+                                      AT91_TWI_TXCOMP | AT91_TWI_NACK |
+                                      (dev->buf_len ? AT91_TWI_TXRDY : 0));
                }
        }
 
@@ -913,7 +914,7 @@ static struct at91_twi_pdata sama5d4_config = {
 
 static struct at91_twi_pdata sama5d2_config = {
        .clk_max_div = 7,
-       .clk_offset = 4,
+       .clk_offset = 3,
        .has_unre_flag = true,
        .has_alt_cmd = true,
        .has_hold_field = true,
index c4d176f5ed793c76c78c412d081c21bc8dff2327..f890af67f50173b727c5b388a29735bea3a481db 100644 (file)
@@ -187,6 +187,51 @@ static const struct i2c_algorithm cht_wc_i2c_adap_algo = {
        .smbus_xfer = cht_wc_i2c_adap_smbus_xfer,
 };
 
+/*
+ * We are an i2c-adapter which itself is part of an i2c-client. This means that
+ * transfers done through us take adapter->bus_lock twice, once for our parent
+ * i2c-adapter and once to take our own bus_lock. Lockdep does not like this
+ * nested locking, to make lockdep happy in the case of busses with muxes, the
+ * i2c-core's i2c_adapter_lock_bus function calls:
+ * rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
+ *
+ * But i2c_adapter_depth only works when the direct parent of the adapter is
+ * another adapter, as it is only meant for muxes. In our case there is an
+ * i2c-client and MFD instantiated platform_device in the parent->child chain
+ * between the 2 devices.
+ *
+ * So we override the default i2c_lock_operations and pass a hardcoded
+ * depth of 1 to rt_mutex_lock_nested, to make lockdep happy.
+ *
+ * Note that if there were to be a mux attached to our adapter, this would
+ * break things again since the i2c-mux code expects the root-adapter to have
+ * a locking depth of 0. But we always have only 1 client directly attached
+ * in the form of the Charger IC paired with the CHT Whiskey Cove PMIC.
+ */
+static void cht_wc_i2c_adap_lock_bus(struct i2c_adapter *adapter,
+                                unsigned int flags)
+{
+       rt_mutex_lock_nested(&adapter->bus_lock, 1);
+}
+
+static int cht_wc_i2c_adap_trylock_bus(struct i2c_adapter *adapter,
+                                  unsigned int flags)
+{
+       return rt_mutex_trylock(&adapter->bus_lock);
+}
+
+static void cht_wc_i2c_adap_unlock_bus(struct i2c_adapter *adapter,
+                                  unsigned int flags)
+{
+       rt_mutex_unlock(&adapter->bus_lock);
+}
+
+static const struct i2c_lock_operations cht_wc_i2c_adap_lock_ops = {
+       .lock_bus =    cht_wc_i2c_adap_lock_bus,
+       .trylock_bus = cht_wc_i2c_adap_trylock_bus,
+       .unlock_bus =  cht_wc_i2c_adap_unlock_bus,
+};
+
 /**** irqchip for the client connected to the extchgr i2c adapter ****/
 static void cht_wc_i2c_irq_lock(struct irq_data *data)
 {
@@ -295,6 +340,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
        adap->adapter.owner = THIS_MODULE;
        adap->adapter.class = I2C_CLASS_HWMON;
        adap->adapter.algo = &cht_wc_i2c_adap_algo;
+       adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
        strlcpy(adap->adapter.name, "PMIC I2C Adapter",
                sizeof(adap->adapter.name));
        adap->adapter.dev.parent = &pdev->dev;
index e7f9305b2dd9f661c8863839dee346aa49438c4e..f5f001738df5e2b1b8c6e9db4973a9ce8f065140 100644 (file)
@@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
 
        dev->disable_int(dev);
        dev->disable(dev);
+       synchronize_irq(dev->irq);
        dev->slave = NULL;
        pm_runtime_put(dev->dev);
 
index 35b302d983e0d93d74b255d57f113226f9c799f9..959d4912ec0d5ce1082c4713cee86e13b10a228d 100644 (file)
@@ -69,6 +69,7 @@ struct em_i2c_device {
        struct completion msg_done;
        struct clk *sclk;
        struct i2c_client *slave;
+       int irq;
 };
 
 static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
@@ -339,6 +340,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
 
        writeb(0, priv->base + I2C_OFS_SVA0);
 
+       /*
+        * Wait for interrupt to finish. New slave irqs cannot happen because we
+        * cleared the slave address and, thus, only extension codes will be
+        * detected which do not use the slave ptr.
+        */
+       synchronize_irq(priv->irq);
        priv->slave = NULL;
 
        return 0;
@@ -355,7 +362,7 @@ static int em_i2c_probe(struct platform_device *pdev)
 {
        struct em_i2c_device *priv;
        struct resource *r;
-       int irq, ret;
+       int ret;
 
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
@@ -390,8 +397,8 @@ static int em_i2c_probe(struct platform_device *pdev)
 
        em_i2c_reset(&priv->adap);
 
-       irq = platform_get_irq(pdev, 0);
-       ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
+       priv->irq = platform_get_irq(pdev, 0);
+       ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
                                "em_i2c", priv);
        if (ret)
                goto err_clk;
@@ -401,7 +408,8 @@ static int em_i2c_probe(struct platform_device *pdev)
        if (ret)
                goto err_clk;
 
-       dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
+       dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
+                priv->irq);
 
        return 0;
 
index 90946a8b9a75a94a8eadc4d50071b166a9d44c81..9ff3371ec385da0221ec530b0bfadfb4233d0a33 100644 (file)
@@ -98,7 +98,7 @@
 #define SB800_PIIX4_PORT_IDX_MASK      0x06
 #define SB800_PIIX4_PORT_IDX_SHIFT     1
 
-/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
 #define SB800_PIIX4_PORT_IDX_KERNCZ            0x02
 #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ       0x18
 #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ      3
@@ -362,18 +362,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
 
        /* Find which register is used for port selection */
        if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
-               switch (PIIX4_dev->device) {
-               case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+               if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
+                   (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+                    PIIX4_dev->revision >= 0x1F)) {
                        piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
                        piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
                        piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
-                       break;
-               case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
-               default:
+               } else {
                        piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
                        piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
                        piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
-                       break;
                }
        } else {
                if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
index 254e6219e5389f17114185c57470914562c2bed6..2c29f901d30908b3f0b4f9b5c29b6639ea46b398 100644 (file)
@@ -139,6 +139,7 @@ struct rcar_i2c_priv {
        enum dma_data_direction dma_direction;
 
        struct reset_control *rstc;
+       int irq;
 };
 
 #define rcar_i2c_priv_to_dev(p)                ((p)->adap.dev.parent)
@@ -859,9 +860,11 @@ static int rcar_unreg_slave(struct i2c_client *slave)
 
        WARN_ON(!priv->slave);
 
+       /* disable irqs and ensure none is running before clearing ptr */
        rcar_i2c_write(priv, ICSIER, 0);
        rcar_i2c_write(priv, ICSCR, 0);
 
+       synchronize_irq(priv->irq);
        priv->slave = NULL;
 
        pm_runtime_put(rcar_i2c_priv_to_dev(priv));
@@ -916,7 +919,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
        struct i2c_adapter *adap;
        struct device *dev = &pdev->dev;
        struct i2c_timings i2c_t;
-       int irq, ret;
+       int ret;
 
        priv = devm_kzalloc(dev, sizeof(struct rcar_i2c_priv), GFP_KERNEL);
        if (!priv)
@@ -979,10 +982,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
                pm_runtime_put(dev);
 
 
-       irq = platform_get_irq(pdev, 0);
-       ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv);
+       priv->irq = platform_get_irq(pdev, 0);
+       ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
        if (ret < 0) {
-               dev_err(dev, "cannot get irq %d\n", irq);
+               dev_err(dev, "cannot get irq %d\n", priv->irq);
                goto out_pm_disable;
        }
 
index b75ff144b5704293e0946c4919e747d91ede0b85..e6f351c92c02df434586ea8540176825211535a0 100644 (file)
@@ -203,6 +203,7 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
        if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) {
                /* We got a NACKIE */
                readb(riic->base + RIIC_ICDRR); /* dummy read */
+               riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2);
                riic->err = -ENXIO;
        } else if (riic->bytes_left) {
                return IRQ_NONE;
index a492da9fd0d326db0128ada9d2f940985b2851c8..ac9c9486b834cc78cb71ab7be34dfcc4b68f1daa 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
@@ -1782,15 +1781,14 @@ static struct i2c_algorithm stm32f7_i2c_algo = {
 
 static int stm32f7_i2c_probe(struct platform_device *pdev)
 {
-       struct device_node *np = pdev->dev.of_node;
        struct stm32f7_i2c_dev *i2c_dev;
        const struct stm32f7_i2c_setup *setup;
        struct resource *res;
-       u32 irq_error, irq_event, clk_rate, rise_time, fall_time;
+       u32 clk_rate, rise_time, fall_time;
        struct i2c_adapter *adap;
        struct reset_control *rst;
        dma_addr_t phy_addr;
-       int ret;
+       int irq_error, irq_event, ret;
 
        i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
        if (!i2c_dev)
@@ -1802,16 +1800,20 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
                return PTR_ERR(i2c_dev->base);
        phy_addr = (dma_addr_t)res->start;
 
-       irq_event = irq_of_parse_and_map(np, 0);
-       if (!irq_event) {
-               dev_err(&pdev->dev, "IRQ event missing or invalid\n");
-               return -EINVAL;
+       irq_event = platform_get_irq(pdev, 0);
+       if (irq_event <= 0) {
+               if (irq_event != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "Failed to get IRQ event: %d\n",
+                               irq_event);
+               return irq_event ? : -ENOENT;
        }
 
-       irq_error = irq_of_parse_and_map(np, 1);
-       if (!irq_error) {
-               dev_err(&pdev->dev, "IRQ error missing or invalid\n");
-               return -EINVAL;
+       irq_error = platform_get_irq(pdev, 1);
+       if (irq_error <= 0) {
+               if (irq_error != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "Failed to get IRQ error: %d\n",
+                               irq_error);
+               return irq_error ? : -ENOENT;
        }
 
        i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
index 8ab0dab075fae7d40f9b91683e2c2e46105d6448..b6e026c5d815791884d20e2abca70df41fe4ce91 100644 (file)
@@ -185,7 +185,7 @@ static int i2c_generic_bus_free(struct i2c_adapter *adap)
 int i2c_generic_scl_recovery(struct i2c_adapter *adap)
 {
        struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
-       int i = 0, scl = 1, ret;
+       int i = 0, scl = 1, ret = 0;
 
        if (bri->prepare_recovery)
                bri->prepare_recovery(adap);
index 063e89eff791a7de06ec2abfa5f814d9ab0193ee..c776a3509a7173720435d70e3611f287c550fad2 100644 (file)
@@ -328,7 +328,6 @@ static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
                .modified = 1,                                          \
                .info_mask_separate =                                   \
                        BIT(IIO_CHAN_INFO_RAW) |                        \
-                       BIT(IIO_CHAN_INFO_SCALE) |                      \
                        BIT(IIO_CHAN_INFO_CALIBBIAS),                   \
                .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE),    \
                .ext_info = cros_ec_accel_legacy_ext_info,              \
index e1da67d5ee2204b6ad1e97281653cb3e89ffbc86..9e61720db7eaf17a8a97fe539d97e738449846cf 100644 (file)
@@ -814,10 +814,10 @@ static int ad799x_probe(struct i2c_client *client,
 
        ret = ad799x_write_config(st, st->chip_config->default_config);
        if (ret < 0)
-               goto error_disable_reg;
+               goto error_disable_vref;
        ret = ad799x_read_config(st);
        if (ret < 0)
-               goto error_disable_reg;
+               goto error_disable_vref;
        st->config = ret;
 
        ret = iio_triggered_buffer_setup(indio_dev, NULL,
index 4e339cfd0c546db8751f7d547ce14d2ce33bb5ed..e6ce25bcc01cac6946119cfb83698bf21bd43b37 100644 (file)
@@ -16,6 +16,7 @@
  *
  */
 
+#include <linux/dmi.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
 #define AXP288_ADC_EN_MASK                             0xF0
 #define AXP288_ADC_TS_ENABLE                           0x01
 
+#define AXP288_ADC_TS_BIAS_MASK                                GENMASK(5, 4)
+#define AXP288_ADC_TS_BIAS_20UA                                (0 << 4)
+#define AXP288_ADC_TS_BIAS_40UA                                (1 << 4)
+#define AXP288_ADC_TS_BIAS_60UA                                (2 << 4)
+#define AXP288_ADC_TS_BIAS_80UA                                (3 << 4)
 #define AXP288_ADC_TS_CURRENT_ON_OFF_MASK              GENMASK(1, 0)
 #define AXP288_ADC_TS_CURRENT_OFF                      (0 << 0)
 #define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING         (1 << 0)
@@ -186,10 +192,36 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
        return ret;
 }
 
+/*
+ * We rely on the machine's firmware to correctly setup the TS pin bias current
+ * at boot. This lists systems with broken fw where we need to set it ourselves.
+ */
+static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
+       {
+               /* Lenovo Ideapad 100S (11 inch) */
+               .matches = {
+                 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 100S-11IBY"),
+               },
+               .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
+       },
+       {}
+};
+
 static int axp288_adc_initialize(struct axp288_adc_info *info)
 {
+       const struct dmi_system_id *bias_override;
        int ret, adc_enable_val;
 
+       bias_override = dmi_first_match(axp288_adc_ts_bias_override);
+       if (bias_override) {
+               ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+                                        AXP288_ADC_TS_BIAS_MASK,
+                                        (uintptr_t)bias_override->driver_data);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * Determine if the TS pin is enabled and set the TS current-source
         * accordingly.
index 4be29ed447559c0f736694f56de3e2013e61c310..1ca2c4d39f87851dac86f3bf4724c3c2d7be9510 100644 (file)
 #define MAX_ADC_V2_CHANNELS            10
 #define MAX_ADC_V1_CHANNELS            8
 #define MAX_EXYNOS3250_ADC_CHANNELS    2
+#define MAX_EXYNOS4212_ADC_CHANNELS    4
+#define MAX_S5PV210_ADC_CHANNELS       10
 
 /* Bit definitions common for ADC_V1 and ADC_V2 */
 #define ADC_CON_EN_START       (1u << 0)
@@ -270,6 +272,19 @@ static void exynos_adc_v1_start_conv(struct exynos_adc *info,
        writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
 }
 
+/* Exynos4212 and 4412 is like ADCv1 but with four channels only */
+static const struct exynos_adc_data exynos4212_adc_data = {
+       .num_channels   = MAX_EXYNOS4212_ADC_CHANNELS,
+       .mask           = ADC_DATX_MASK,        /* 12 bit ADC resolution */
+       .needs_adc_phy  = true,
+       .phy_offset     = EXYNOS_ADCV1_PHY_OFFSET,
+
+       .init_hw        = exynos_adc_v1_init_hw,
+       .exit_hw        = exynos_adc_v1_exit_hw,
+       .clear_irq      = exynos_adc_v1_clear_irq,
+       .start_conv     = exynos_adc_v1_start_conv,
+};
+
 static const struct exynos_adc_data exynos_adc_v1_data = {
        .num_channels   = MAX_ADC_V1_CHANNELS,
        .mask           = ADC_DATX_MASK,        /* 12 bit ADC resolution */
@@ -282,6 +297,16 @@ static const struct exynos_adc_data exynos_adc_v1_data = {
        .start_conv     = exynos_adc_v1_start_conv,
 };
 
+static const struct exynos_adc_data exynos_adc_s5pv210_data = {
+       .num_channels   = MAX_S5PV210_ADC_CHANNELS,
+       .mask           = ADC_DATX_MASK,        /* 12 bit ADC resolution */
+
+       .init_hw        = exynos_adc_v1_init_hw,
+       .exit_hw        = exynos_adc_v1_exit_hw,
+       .clear_irq      = exynos_adc_v1_clear_irq,
+       .start_conv     = exynos_adc_v1_start_conv,
+};
+
 static void exynos_adc_s3c2416_start_conv(struct exynos_adc *info,
                                          unsigned long addr)
 {
@@ -478,6 +503,12 @@ static const struct of_device_id exynos_adc_match[] = {
        }, {
                .compatible = "samsung,s3c6410-adc",
                .data = &exynos_adc_s3c64xx_data,
+       }, {
+               .compatible = "samsung,s5pv210-adc",
+               .data = &exynos_adc_s5pv210_data,
+       }, {
+               .compatible = "samsung,exynos4212-adc",
+               .data = &exynos4212_adc_data,
        }, {
                .compatible = "samsung,exynos-adc-v1",
                .data = &exynos_adc_v1_data,
index 36b59d8957fb850a78638c2cff7dcd2ef222f67a..6c5d81a89aec9c6f7c077e2d78e19d9bfecd08d1 100644 (file)
@@ -109,14 +109,14 @@ struct hx711_data {
 
 static int hx711_cycle(struct hx711_data *hx711_data)
 {
-       int val;
+       unsigned long flags;
 
        /*
         * if preempted for more then 60us while PD_SCK is high:
         * hx711 is going in reset
         * ==> measuring is false
         */
-       preempt_disable();
+       local_irq_save(flags);
        gpiod_set_value(hx711_data->gpiod_pd_sck, 1);
 
        /*
@@ -126,7 +126,6 @@ static int hx711_cycle(struct hx711_data *hx711_data)
         */
        ndelay(hx711_data->data_ready_delay_ns);
 
-       val = gpiod_get_value(hx711_data->gpiod_dout);
        /*
         * here we are not waiting for 0.2 us as suggested by the datasheet,
         * because the oscilloscope showed in a test scenario
@@ -134,7 +133,7 @@ static int hx711_cycle(struct hx711_data *hx711_data)
         * and 0.56 us for PD_SCK low on TI Sitara with 800 MHz
         */
        gpiod_set_value(hx711_data->gpiod_pd_sck, 0);
-       preempt_enable();
+       local_irq_restore(flags);
 
        /*
         * make it a square wave for addressing cases with capacitance on
@@ -142,7 +141,8 @@ static int hx711_cycle(struct hx711_data *hx711_data)
         */
        ndelay(hx711_data->data_ready_delay_ns);
 
-       return val;
+       /* sample as late as possible */
+       return gpiod_get_value(hx711_data->gpiod_dout);
 }
 
 static int hx711_read(struct hx711_data *hx711_data)
index 0538ff8c4ac1d2f1e242898daf644d3b69345c4d..49c1956e6a6742f6189e3ce7dcb19cd9af4fb039 100644 (file)
@@ -86,7 +86,7 @@
 #define MAX9611_TEMP_MAX_POS           0x7f80
 #define MAX9611_TEMP_MAX_NEG           0xff80
 #define MAX9611_TEMP_MIN_NEG           0xd980
-#define MAX9611_TEMP_MASK              GENMASK(7, 15)
+#define MAX9611_TEMP_MASK              GENMASK(15, 7)
 #define MAX9611_TEMP_SHIFT             0x07
 #define MAX9611_TEMP_RAW(_r)           ((_r) >> MAX9611_TEMP_SHIFT)
 #define MAX9611_TEMP_SCALE_NUM         1000000
@@ -483,7 +483,7 @@ static int max9611_init(struct max9611_dev *max9611)
        if (ret)
                return ret;
 
-       regval = ret & MAX9611_TEMP_MASK;
+       regval &= MAX9611_TEMP_MASK;
 
        if ((regval > MAX9611_TEMP_MAX_POS &&
             regval < MAX9611_TEMP_MIN_NEG) ||
index dcb50172186f49ab62722997cc34a55bdb667a06..f3a966ab35dcb41a614204527bc7b829d7033d92 100644 (file)
@@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
                                dev_err(dev,
                                        "Only %i channels supported with %s, but reg = <%i>.\n",
                                        num_channels, child->name, reg);
-                               return ret;
+                               return -EINVAL;
                        }
                }
 
@@ -400,7 +400,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
                        dev_err(dev,
                                "Channel %i uses different ADC mode than the rest.\n",
                                reg);
-                       return ret;
+                       return -EINVAL;
                }
 
                /* Channel is valid, grab the regulator. */
index ca432e7b6ff1dbb61f5c30bdd20105cc37cec753..38eb966930793c78b10d396e3127b4169d4858ab 100644 (file)
 
 #include "stm32-adc-core.h"
 
-/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
-#define STM32F4_ADC_CSR                        (STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32F4_ADC_CCR                        (STM32_ADCX_COMN_OFFSET + 0x04)
-
-/* STM32F4_ADC_CSR - bit fields */
-#define STM32F4_EOC3                   BIT(17)
-#define STM32F4_EOC2                   BIT(9)
-#define STM32F4_EOC1                   BIT(1)
-
-/* STM32F4_ADC_CCR - bit fields */
-#define STM32F4_ADC_ADCPRE_SHIFT       16
-#define STM32F4_ADC_ADCPRE_MASK                GENMASK(17, 16)
-
-/* STM32H7 - common registers for all ADC instances */
-#define STM32H7_ADC_CSR                        (STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32H7_ADC_CCR                        (STM32_ADCX_COMN_OFFSET + 0x08)
-
-/* STM32H7_ADC_CSR - bit fields */
-#define STM32H7_EOC_SLV                        BIT(18)
-#define STM32H7_EOC_MST                        BIT(2)
-
-/* STM32H7_ADC_CCR - bit fields */
-#define STM32H7_PRESC_SHIFT            18
-#define STM32H7_PRESC_MASK             GENMASK(21, 18)
-#define STM32H7_CKMODE_SHIFT           16
-#define STM32H7_CKMODE_MASK            GENMASK(17, 16)
-
 /**
  * stm32_adc_common_regs - stm32 common registers, compatible dependent data
  * @csr:       common status register offset
  * @eoc1:      adc1 end of conversion flag in @csr
  * @eoc2:      adc2 end of conversion flag in @csr
  * @eoc3:      adc3 end of conversion flag in @csr
+ * @ier:       interrupt enable register offset for each adc
+ * @eocie_msk: end of conversion interrupt enable mask in @ier
  */
 struct stm32_adc_common_regs {
        u32 csr;
        u32 eoc1_msk;
        u32 eoc2_msk;
        u32 eoc3_msk;
+       u32 ier;
+       u32 eocie_msk;
 };
 
 struct stm32_adc_priv;
@@ -268,6 +245,8 @@ static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
        .eoc1_msk = STM32F4_EOC1,
        .eoc2_msk = STM32F4_EOC2,
        .eoc3_msk = STM32F4_EOC3,
+       .ier = STM32F4_ADC_CR1,
+       .eocie_msk = STM32F4_EOCIE,
 };
 
 /* STM32H7 common registers definitions */
@@ -275,8 +254,24 @@ static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
        .csr = STM32H7_ADC_CSR,
        .eoc1_msk = STM32H7_EOC_MST,
        .eoc2_msk = STM32H7_EOC_SLV,
+       .ier = STM32H7_ADC_IER,
+       .eocie_msk = STM32H7_EOCIE,
+};
+
+static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
+       0, STM32_ADC_OFFSET, STM32_ADC_OFFSET * 2,
 };
 
+static unsigned int stm32_adc_eoc_enabled(struct stm32_adc_priv *priv,
+                                         unsigned int adc)
+{
+       u32 ier, offset = stm32_adc_offset[adc];
+
+       ier = readl_relaxed(priv->common.base + offset + priv->cfg->regs->ier);
+
+       return ier & priv->cfg->regs->eocie_msk;
+}
+
 /* ADC common interrupt for all instances */
 static void stm32_adc_irq_handler(struct irq_desc *desc)
 {
@@ -287,13 +282,28 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
        chained_irq_enter(chip, desc);
        status = readl_relaxed(priv->common.base + priv->cfg->regs->csr);
 
-       if (status & priv->cfg->regs->eoc1_msk)
+       /*
+        * End of conversion may be handled by using IRQ or DMA. There may be a
+        * race here when two conversions complete at the same time on several
+        * ADCs. EOC may be read 'set' for several ADCs, with:
+        * - an ADC configured to use DMA (EOC triggers the DMA request, and
+        *   is then automatically cleared by DR read in hardware)
+        * - an ADC configured to use IRQs (EOCIE bit is set. The handler must
+        *   be called in this case)
+        * So both EOC status bit in CSR and EOCIE control bit must be checked
+        * before invoking the interrupt handler (e.g. call ISR only for
+        * IRQ-enabled ADCs).
+        */
+       if (status & priv->cfg->regs->eoc1_msk &&
+           stm32_adc_eoc_enabled(priv, 0))
                generic_handle_irq(irq_find_mapping(priv->domain, 0));
 
-       if (status & priv->cfg->regs->eoc2_msk)
+       if (status & priv->cfg->regs->eoc2_msk &&
+           stm32_adc_eoc_enabled(priv, 1))
                generic_handle_irq(irq_find_mapping(priv->domain, 1));
 
-       if (status & priv->cfg->regs->eoc3_msk)
+       if (status & priv->cfg->regs->eoc3_msk &&
+           stm32_adc_eoc_enabled(priv, 2))
                generic_handle_irq(irq_find_mapping(priv->domain, 2));
 
        chained_irq_exit(chip, desc);
index 8af507b3f32d914d70307bfff7e8819b4284581d..2579d514c2a3448cbd877ec8f3567e8103fc1399 100644 (file)
  * --------------------------------------------------------
  */
 #define STM32_ADC_MAX_ADCS             3
+#define STM32_ADC_OFFSET               0x100
 #define STM32_ADCX_COMN_OFFSET         0x300
 
+/* STM32F4 - Registers for each ADC instance */
+#define STM32F4_ADC_SR                 0x00
+#define STM32F4_ADC_CR1                        0x04
+#define STM32F4_ADC_CR2                        0x08
+#define STM32F4_ADC_SMPR1              0x0C
+#define STM32F4_ADC_SMPR2              0x10
+#define STM32F4_ADC_HTR                        0x24
+#define STM32F4_ADC_LTR                        0x28
+#define STM32F4_ADC_SQR1               0x2C
+#define STM32F4_ADC_SQR2               0x30
+#define STM32F4_ADC_SQR3               0x34
+#define STM32F4_ADC_JSQR               0x38
+#define STM32F4_ADC_JDR1               0x3C
+#define STM32F4_ADC_JDR2               0x40
+#define STM32F4_ADC_JDR3               0x44
+#define STM32F4_ADC_JDR4               0x48
+#define STM32F4_ADC_DR                 0x4C
+
+/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
+#define STM32F4_ADC_CSR                        (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32F4_ADC_CCR                        (STM32_ADCX_COMN_OFFSET + 0x04)
+
+/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_STRT                   BIT(4)
+#define STM32F4_EOC                    BIT(1)
+
+/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_RES_SHIFT              24
+#define STM32F4_RES_MASK               GENMASK(25, 24)
+#define STM32F4_SCAN                   BIT(8)
+#define STM32F4_EOCIE                  BIT(5)
+
+/* STM32F4_ADC_CR2 - bit fields */
+#define STM32F4_SWSTART                        BIT(30)
+#define STM32F4_EXTEN_SHIFT            28
+#define STM32F4_EXTEN_MASK             GENMASK(29, 28)
+#define STM32F4_EXTSEL_SHIFT           24
+#define STM32F4_EXTSEL_MASK            GENMASK(27, 24)
+#define STM32F4_EOCS                   BIT(10)
+#define STM32F4_DDS                    BIT(9)
+#define STM32F4_DMA                    BIT(8)
+#define STM32F4_ADON                   BIT(0)
+
+/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_EOC3                   BIT(17)
+#define STM32F4_EOC2                   BIT(9)
+#define STM32F4_EOC1                   BIT(1)
+
+/* STM32F4_ADC_CCR - bit fields */
+#define STM32F4_ADC_ADCPRE_SHIFT       16
+#define STM32F4_ADC_ADCPRE_MASK                GENMASK(17, 16)
+
+/* STM32H7 - Registers for each ADC instance */
+#define STM32H7_ADC_ISR                        0x00
+#define STM32H7_ADC_IER                        0x04
+#define STM32H7_ADC_CR                 0x08
+#define STM32H7_ADC_CFGR               0x0C
+#define STM32H7_ADC_SMPR1              0x14
+#define STM32H7_ADC_SMPR2              0x18
+#define STM32H7_ADC_PCSEL              0x1C
+#define STM32H7_ADC_SQR1               0x30
+#define STM32H7_ADC_SQR2               0x34
+#define STM32H7_ADC_SQR3               0x38
+#define STM32H7_ADC_SQR4               0x3C
+#define STM32H7_ADC_DR                 0x40
+#define STM32H7_ADC_DIFSEL             0xC0
+#define STM32H7_ADC_CALFACT            0xC4
+#define STM32H7_ADC_CALFACT2           0xC8
+
+/* STM32H7 - common registers for all ADC instances */
+#define STM32H7_ADC_CSR                        (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32H7_ADC_CCR                        (STM32_ADCX_COMN_OFFSET + 0x08)
+
+/* STM32H7_ADC_ISR - bit fields */
+#define STM32MP1_VREGREADY             BIT(12)
+#define STM32H7_EOC                    BIT(2)
+#define STM32H7_ADRDY                  BIT(0)
+
+/* STM32H7_ADC_IER - bit fields */
+#define STM32H7_EOCIE                  STM32H7_EOC
+
+/* STM32H7_ADC_CR - bit fields */
+#define STM32H7_ADCAL                  BIT(31)
+#define STM32H7_ADCALDIF               BIT(30)
+#define STM32H7_DEEPPWD                        BIT(29)
+#define STM32H7_ADVREGEN               BIT(28)
+#define STM32H7_LINCALRDYW6            BIT(27)
+#define STM32H7_LINCALRDYW5            BIT(26)
+#define STM32H7_LINCALRDYW4            BIT(25)
+#define STM32H7_LINCALRDYW3            BIT(24)
+#define STM32H7_LINCALRDYW2            BIT(23)
+#define STM32H7_LINCALRDYW1            BIT(22)
+#define STM32H7_ADCALLIN               BIT(16)
+#define STM32H7_BOOST                  BIT(8)
+#define STM32H7_ADSTP                  BIT(4)
+#define STM32H7_ADSTART                        BIT(2)
+#define STM32H7_ADDIS                  BIT(1)
+#define STM32H7_ADEN                   BIT(0)
+
+/* STM32H7_ADC_CFGR bit fields */
+#define STM32H7_EXTEN_SHIFT            10
+#define STM32H7_EXTEN_MASK             GENMASK(11, 10)
+#define STM32H7_EXTSEL_SHIFT           5
+#define STM32H7_EXTSEL_MASK            GENMASK(9, 5)
+#define STM32H7_RES_SHIFT              2
+#define STM32H7_RES_MASK               GENMASK(4, 2)
+#define STM32H7_DMNGT_SHIFT            0
+#define STM32H7_DMNGT_MASK             GENMASK(1, 0)
+
+enum stm32h7_adc_dmngt {
+       STM32H7_DMNGT_DR_ONLY,          /* Regular data in DR only */
+       STM32H7_DMNGT_DMA_ONESHOT,      /* DMA one shot mode */
+       STM32H7_DMNGT_DFSDM,            /* DFSDM mode */
+       STM32H7_DMNGT_DMA_CIRC,         /* DMA circular mode */
+};
+
+/* STM32H7_ADC_CALFACT - bit fields */
+#define STM32H7_CALFACT_D_SHIFT                16
+#define STM32H7_CALFACT_D_MASK         GENMASK(26, 16)
+#define STM32H7_CALFACT_S_SHIFT                0
+#define STM32H7_CALFACT_S_MASK         GENMASK(10, 0)
+
+/* STM32H7_ADC_CALFACT2 - bit fields */
+#define STM32H7_LINCALFACT_SHIFT       0
+#define STM32H7_LINCALFACT_MASK                GENMASK(29, 0)
+
+/* STM32H7_ADC_CSR - bit fields */
+#define STM32H7_EOC_SLV                        BIT(18)
+#define STM32H7_EOC_MST                        BIT(2)
+
+/* STM32H7_ADC_CCR - bit fields */
+#define STM32H7_PRESC_SHIFT            18
+#define STM32H7_PRESC_MASK             GENMASK(21, 18)
+#define STM32H7_CKMODE_SHIFT           16
+#define STM32H7_CKMODE_MASK            GENMASK(17, 16)
+
 /**
  * struct stm32_adc_common - stm32 ADC driver common data (for all instances)
  * @base:              control registers base cpu addr
index 378411853d7516b1fc3121da560f9e1679583ed4..c52d20f7ca2ed46694ab1ee938ebaebacc7f660e 100644 (file)
 
 #include "stm32-adc-core.h"
 
-/* STM32F4 - Registers for each ADC instance */
-#define STM32F4_ADC_SR                 0x00
-#define STM32F4_ADC_CR1                        0x04
-#define STM32F4_ADC_CR2                        0x08
-#define STM32F4_ADC_SMPR1              0x0C
-#define STM32F4_ADC_SMPR2              0x10
-#define STM32F4_ADC_HTR                        0x24
-#define STM32F4_ADC_LTR                        0x28
-#define STM32F4_ADC_SQR1               0x2C
-#define STM32F4_ADC_SQR2               0x30
-#define STM32F4_ADC_SQR3               0x34
-#define STM32F4_ADC_JSQR               0x38
-#define STM32F4_ADC_JDR1               0x3C
-#define STM32F4_ADC_JDR2               0x40
-#define STM32F4_ADC_JDR3               0x44
-#define STM32F4_ADC_JDR4               0x48
-#define STM32F4_ADC_DR                 0x4C
-
-/* STM32F4_ADC_SR - bit fields */
-#define STM32F4_STRT                   BIT(4)
-#define STM32F4_EOC                    BIT(1)
-
-/* STM32F4_ADC_CR1 - bit fields */
-#define STM32F4_RES_SHIFT              24
-#define STM32F4_RES_MASK               GENMASK(25, 24)
-#define STM32F4_SCAN                   BIT(8)
-#define STM32F4_EOCIE                  BIT(5)
-
-/* STM32F4_ADC_CR2 - bit fields */
-#define STM32F4_SWSTART                        BIT(30)
-#define STM32F4_EXTEN_SHIFT            28
-#define STM32F4_EXTEN_MASK             GENMASK(29, 28)
-#define STM32F4_EXTSEL_SHIFT           24
-#define STM32F4_EXTSEL_MASK            GENMASK(27, 24)
-#define STM32F4_EOCS                   BIT(10)
-#define STM32F4_DDS                    BIT(9)
-#define STM32F4_DMA                    BIT(8)
-#define STM32F4_ADON                   BIT(0)
-
-/* STM32H7 - Registers for each ADC instance */
-#define STM32H7_ADC_ISR                        0x00
-#define STM32H7_ADC_IER                        0x04
-#define STM32H7_ADC_CR                 0x08
-#define STM32H7_ADC_CFGR               0x0C
-#define STM32H7_ADC_SMPR1              0x14
-#define STM32H7_ADC_SMPR2              0x18
-#define STM32H7_ADC_PCSEL              0x1C
-#define STM32H7_ADC_SQR1               0x30
-#define STM32H7_ADC_SQR2               0x34
-#define STM32H7_ADC_SQR3               0x38
-#define STM32H7_ADC_SQR4               0x3C
-#define STM32H7_ADC_DR                 0x40
-#define STM32H7_ADC_DIFSEL             0xC0
-#define STM32H7_ADC_CALFACT            0xC4
-#define STM32H7_ADC_CALFACT2           0xC8
-
-/* STM32H7_ADC_ISR - bit fields */
-#define STM32MP1_VREGREADY             BIT(12)
-#define STM32H7_EOC                    BIT(2)
-#define STM32H7_ADRDY                  BIT(0)
-
-/* STM32H7_ADC_IER - bit fields */
-#define STM32H7_EOCIE                  STM32H7_EOC
-
-/* STM32H7_ADC_CR - bit fields */
-#define STM32H7_ADCAL                  BIT(31)
-#define STM32H7_ADCALDIF               BIT(30)
-#define STM32H7_DEEPPWD                        BIT(29)
-#define STM32H7_ADVREGEN               BIT(28)
-#define STM32H7_LINCALRDYW6            BIT(27)
-#define STM32H7_LINCALRDYW5            BIT(26)
-#define STM32H7_LINCALRDYW4            BIT(25)
-#define STM32H7_LINCALRDYW3            BIT(24)
-#define STM32H7_LINCALRDYW2            BIT(23)
-#define STM32H7_LINCALRDYW1            BIT(22)
-#define STM32H7_ADCALLIN               BIT(16)
-#define STM32H7_BOOST                  BIT(8)
-#define STM32H7_ADSTP                  BIT(4)
-#define STM32H7_ADSTART                        BIT(2)
-#define STM32H7_ADDIS                  BIT(1)
-#define STM32H7_ADEN                   BIT(0)
-
-/* STM32H7_ADC_CFGR bit fields */
-#define STM32H7_EXTEN_SHIFT            10
-#define STM32H7_EXTEN_MASK             GENMASK(11, 10)
-#define STM32H7_EXTSEL_SHIFT           5
-#define STM32H7_EXTSEL_MASK            GENMASK(9, 5)
-#define STM32H7_RES_SHIFT              2
-#define STM32H7_RES_MASK               GENMASK(4, 2)
-#define STM32H7_DMNGT_SHIFT            0
-#define STM32H7_DMNGT_MASK             GENMASK(1, 0)
-
-enum stm32h7_adc_dmngt {
-       STM32H7_DMNGT_DR_ONLY,          /* Regular data in DR only */
-       STM32H7_DMNGT_DMA_ONESHOT,      /* DMA one shot mode */
-       STM32H7_DMNGT_DFSDM,            /* DFSDM mode */
-       STM32H7_DMNGT_DMA_CIRC,         /* DMA circular mode */
-};
-
-/* STM32H7_ADC_CALFACT - bit fields */
-#define STM32H7_CALFACT_D_SHIFT                16
-#define STM32H7_CALFACT_D_MASK         GENMASK(26, 16)
-#define STM32H7_CALFACT_S_SHIFT                0
-#define STM32H7_CALFACT_S_MASK         GENMASK(10, 0)
-
-/* STM32H7_ADC_CALFACT2 - bit fields */
-#define STM32H7_LINCALFACT_SHIFT       0
-#define STM32H7_LINCALFACT_MASK                GENMASK(29, 0)
-
 /* Number of linear calibration shadow registers / LINCALRDYW control bits */
 #define STM32H7_LINCALFACT_NUM         6
 
index fcd4a1c00ca0574d02326602f68da118a499fca4..f5586dd6414d2c579b0c7c927c8d58e37fea92ca 100644 (file)
@@ -981,11 +981,11 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
        ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
 
        if (adc->dev_data->type == DFSDM_AUDIO) {
-               ch->scan_type.sign = 's';
                ch->ext_info = dfsdm_adc_audio_ext_info;
        } else {
-               ch->scan_type.sign = 'u';
+               ch->scan_type.shift = 8;
        }
+       ch->scan_type.sign = 's';
        ch->scan_type.realbits = 24;
        ch->scan_type.storagebits = 32;
 
@@ -1144,6 +1144,12 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
         * So IRQ associated to filter instance 0 is dedicated to the Filter 0.
         */
        irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               if (irq != -EPROBE_DEFER)
+                       dev_err(dev, "Failed to get IRQ: %d\n", irq);
+               return irq;
+       }
+
        ret = devm_request_irq(dev, irq, stm32_dfsdm_irq,
                               0, pdev->name, adc);
        if (ret < 0) {
index bf089f5d622532740885146c0be6c513882d95a4..941630615e88535be61e6bb097b9af77ca8d2ac5 100644 (file)
@@ -213,6 +213,8 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
        }
        priv->dfsdm.phys_base = res->start;
        priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->dfsdm.base))
+               return PTR_ERR(priv->dfsdm.base);
 
        /*
         * "dfsdm" clock is mandatory for DFSDM peripheral clocking.
@@ -222,8 +224,10 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
         */
        priv->clk = devm_clk_get(&pdev->dev, "dfsdm");
        if (IS_ERR(priv->clk)) {
-               dev_err(&pdev->dev, "No stm32_dfsdm_clk clock found\n");
-               return -EINVAL;
+               ret = PTR_ERR(priv->clk);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "Failed to get clock (%d)\n", ret);
+               return ret;
        }
 
        priv->aclk = devm_clk_get(&pdev->dev, "audio");
index 54d88b60e30359be56561767c668fbd3a46e2969..f9d13e4ec1083191bc482cf230ae2ebc20d14ee9 100644 (file)
@@ -694,6 +694,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
        struct iio_dev *iio = _iio;
        struct opt3001 *opt = iio_priv(iio);
        int ret;
+       bool wake_result_ready_queue = false;
 
        if (!opt->ok_to_ignore_lock)
                mutex_lock(&opt->lock);
@@ -728,13 +729,16 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
                }
                opt->result = ret;
                opt->result_ready = true;
-               wake_up(&opt->result_ready_queue);
+               wake_result_ready_queue = true;
        }
 
 out:
        if (!opt->ok_to_ignore_lock)
                mutex_unlock(&opt->lock);
 
+       if (wake_result_ready_queue)
+               wake_up(&opt->result_ready_queue);
+
        return IRQ_HANDLED;
 }
 
index 39dc7be56884ab35b961c757c5235317dac89ea6..6257be21cbeddfd932a5cb116bcbbaf07825bbe2 100644 (file)
@@ -1723,8 +1723,8 @@ void rdma_destroy_id(struct rdma_cm_id *id)
        mutex_lock(&id_priv->handler_mutex);
        mutex_unlock(&id_priv->handler_mutex);
 
+       rdma_restrack_del(&id_priv->res);
        if (id_priv->cma_dev) {
-               rdma_restrack_del(&id_priv->res);
                if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
                        if (id_priv->cm_id.ib)
                                ib_destroy_cm_id(id_priv->cm_id.ib);
@@ -3463,10 +3463,9 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
 
        return 0;
 err2:
-       if (id_priv->cma_dev) {
-               rdma_restrack_del(&id_priv->res);
+       rdma_restrack_del(&id_priv->res);
+       if (id_priv->cma_dev)
                cma_release_dev(id_priv);
-       }
 err1:
        cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
        return ret;
index af5ad6a56ae404d1cd2aae64f95e59d7ccded0ac..9271f72900052aa7007727364c4746f7e94675f3 100644 (file)
@@ -112,12 +112,12 @@ static void ib_cq_poll_work(struct work_struct *work)
                                    IB_POLL_BATCH);
        if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
            ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
-               queue_work(ib_comp_wq, &cq->work);
+               queue_work(cq->comp_wq, &cq->work);
 }
 
 static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
 {
-       queue_work(ib_comp_wq, &cq->work);
+       queue_work(cq->comp_wq, &cq->work);
 }
 
 /**
@@ -175,9 +175,12 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
                ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
                break;
        case IB_POLL_WORKQUEUE:
+       case IB_POLL_UNBOUND_WORKQUEUE:
                cq->comp_handler = ib_cq_completion_workqueue;
                INIT_WORK(&cq->work, ib_cq_poll_work);
                ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+               cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
+                               ib_comp_wq : ib_comp_unbound_wq;
                break;
        default:
                ret = -EINVAL;
@@ -213,6 +216,7 @@ void ib_free_cq(struct ib_cq *cq)
                irq_poll_disable(&cq->iop);
                break;
        case IB_POLL_WORKQUEUE:
+       case IB_POLL_UNBOUND_WORKQUEUE:
                cancel_work_sync(&cq->work);
                break;
        default:
index db3b6271f09d5949a88c5f5a3ffab2d46fecb7f5..6d8ac51a39cc05600e6ea97b50ed62b13319e778 100644 (file)
@@ -61,6 +61,7 @@ struct ib_client_data {
 };
 
 struct workqueue_struct *ib_comp_wq;
+struct workqueue_struct *ib_comp_unbound_wq;
 struct workqueue_struct *ib_wq;
 EXPORT_SYMBOL_GPL(ib_wq);
 
@@ -1166,10 +1167,19 @@ static int __init ib_core_init(void)
                goto err;
        }
 
+       ib_comp_unbound_wq =
+               alloc_workqueue("ib-comp-unb-wq",
+                               WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
+                               WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
+       if (!ib_comp_unbound_wq) {
+               ret = -ENOMEM;
+               goto err_comp;
+       }
+
        ret = class_register(&ib_class);
        if (ret) {
                pr_warn("Couldn't create InfiniBand device class\n");
-               goto err_comp;
+               goto err_comp_unbound;
        }
 
        ret = rdma_nl_init();
@@ -1218,6 +1228,8 @@ err_ibnl:
        rdma_nl_exit();
 err_sysfs:
        class_unregister(&ib_class);
+err_comp_unbound:
+       destroy_workqueue(ib_comp_unbound_wq);
 err_comp:
        destroy_workqueue(ib_comp_wq);
 err:
@@ -1236,6 +1248,7 @@ static void __exit ib_core_cleanup(void)
        addr_cleanup();
        rdma_nl_exit();
        class_unregister(&ib_class);
+       destroy_workqueue(ib_comp_unbound_wq);
        destroy_workqueue(ib_comp_wq);
        /* Make sure that any pending umem accounting work is done. */
        destroy_workqueue(ib_wq);
index ef459f2f2eeb859c5a7c7a4b4501e00adedd7ae1..74aa3e651bc3c61e2c89a41b1117820d5a8de3e1 100644 (file)
@@ -3182,18 +3182,18 @@ static int ib_mad_port_open(struct ib_device *device,
        if (has_smi)
                cq_size *= 2;
 
-       port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
-                       IB_POLL_WORKQUEUE);
-       if (IS_ERR(port_priv->cq)) {
-               dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
-               ret = PTR_ERR(port_priv->cq);
-               goto error3;
-       }
-
        port_priv->pd = ib_alloc_pd(device, 0);
        if (IS_ERR(port_priv->pd)) {
                dev_err(&device->dev, "Couldn't create ib_mad PD\n");
                ret = PTR_ERR(port_priv->pd);
+               goto error3;
+       }
+
+       port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
+                       IB_POLL_UNBOUND_WORKQUEUE);
+       if (IS_ERR(port_priv->cq)) {
+               dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
+               ret = PTR_ERR(port_priv->cq);
                goto error4;
        }
 
@@ -3236,11 +3236,11 @@ error8:
 error7:
        destroy_mad_qp(&port_priv->qp_info[0]);
 error6:
-       ib_dealloc_pd(port_priv->pd);
-error4:
        ib_free_cq(port_priv->cq);
        cleanup_recv_queue(&port_priv->qp_info[1]);
        cleanup_recv_queue(&port_priv->qp_info[0]);
+error4:
+       ib_dealloc_pd(port_priv->pd);
 error3:
        kfree(port_priv);
 
@@ -3270,8 +3270,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
        destroy_workqueue(port_priv->wq);
        destroy_mad_qp(&port_priv->qp_info[1]);
        destroy_mad_qp(&port_priv->qp_info[0]);
-       ib_dealloc_pd(port_priv->pd);
        ib_free_cq(port_priv->cq);
+       ib_dealloc_pd(port_priv->pd);
        cleanup_recv_queue(&port_priv->qp_info[1]);
        cleanup_recv_queue(&port_priv->qp_info[0]);
        /* XXX: Handle deallocation of MAD registration tables */
index 3b7fa0ccaa08a228d0d4523cb0a19a2cc96b23c2..279f0ae6591273b8e521e739bed5175039bd039b 100644 (file)
@@ -209,7 +209,7 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
        struct ib_device *dev;
 
        if (!res->valid)
-               return;
+               goto out;
 
        dev = res_to_dev(res);
        if (!dev)
@@ -222,8 +222,12 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
        down_write(&dev->res.rwsem);
        hash_del(&res->node);
        res->valid = false;
-       if (res->task)
-               put_task_struct(res->task);
        up_write(&dev->res.rwsem);
+
+out:
+       if (res->task) {
+               put_task_struct(res->task);
+               res->task = NULL;
+       }
 }
 EXPORT_SYMBOL(rdma_restrack_del);
index 7b794a14d6e851fbc4afc82711ecdb6ecf336c61..8be082edf986fd1e4d763c69bbd06ef3b3bd8cdc 100644 (file)
@@ -1232,7 +1232,6 @@ static int roce_resolve_route_from_path(struct sa_path_rec *rec,
 {
        struct rdma_dev_addr dev_addr = {};
        union {
-               struct sockaddr     _sockaddr;
                struct sockaddr_in  _sockaddr_in;
                struct sockaddr_in6 _sockaddr_in6;
        } sgid_addr, dgid_addr;
@@ -1249,12 +1248,12 @@ static int roce_resolve_route_from_path(struct sa_path_rec *rec,
         */
        dev_addr.net = &init_net;
 
-       rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
-       rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
+       rdma_gid2ip((struct sockaddr *)&sgid_addr, &rec->sgid);
+       rdma_gid2ip((struct sockaddr *)&dgid_addr, &rec->dgid);
 
        /* validate the route */
-       ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
-                                   &dgid_addr._sockaddr, &dev_addr);
+       ret = rdma_resolve_ip_route((struct sockaddr *)&sgid_addr,
+                                   (struct sockaddr *)&dgid_addr, &dev_addr);
        if (ret)
                return ret;
 
index c34a6852d691f666fb1d8deac965335c4f7c0840..a18f3f8ad77fe91944b54157a7003b84565d8f7d 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/sched.h>
 #include <linux/semaphore.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 
 #include <linux/uaccess.h>
 
@@ -868,11 +869,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
 
        if (get_user(id, arg))
                return -EFAULT;
+       if (id >= IB_UMAD_MAX_AGENTS)
+               return -EINVAL;
 
        mutex_lock(&file->port->file_mutex);
        mutex_lock(&file->mutex);
 
-       if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+       id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+       if (!__get_agent(file, id)) {
                ret = -EINVAL;
                goto out;
        }
index 50152c1b100452f7a4c8a9f733739ac25cbe777d..357de3b4fdddff6ac2a8fea3d45712863587f0ab 100644 (file)
@@ -265,6 +265,9 @@ void ib_uverbs_release_file(struct kref *ref)
        if (atomic_dec_and_test(&file->device->refcount))
                ib_uverbs_comp_dev(file->device);
 
+       if (file->async_file)
+               kref_put(&file->async_file->ref,
+                        ib_uverbs_release_async_event_file);
        kobject_put(&file->device->kobj);
        kfree(file);
 }
@@ -915,10 +918,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
        }
        mutex_unlock(&file->device->lists_mutex);
 
-       if (file->async_file)
-               kref_put(&file->async_file->ref,
-                        ib_uverbs_release_async_event_file);
-
        kref_put(&file->ref, ib_uverbs_release_file);
 
        return 0;
index 7b76e6f81aeb477181afedc2f44fec990ce3090f..f2fb7318abc104e99e2495f42ace1950ab43bb34 100644 (file)
@@ -274,13 +274,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
                           struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
 {
        int err;
-       struct fw_ri_tpte tpt;
+       struct fw_ri_tpte *tpt;
        u32 stag_idx;
        static atomic_t key;
 
        if (c4iw_fatal_error(rdev))
                return -EIO;
 
+       tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
+       if (!tpt)
+               return -ENOMEM;
+
        stag_state = stag_state > 0;
        stag_idx = (*stag) >> 8;
 
@@ -290,6 +294,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
                        mutex_lock(&rdev->stats.lock);
                        rdev->stats.stag.fail++;
                        mutex_unlock(&rdev->stats.lock);
+                       kfree(tpt);
                        return -ENOMEM;
                }
                mutex_lock(&rdev->stats.lock);
@@ -304,28 +309,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 
        /* write TPT entry */
        if (reset_tpt_entry)
-               memset(&tpt, 0, sizeof(tpt));
+               memset(tpt, 0, sizeof(*tpt));
        else {
-               tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+               tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
                        FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
                        FW_RI_TPTE_STAGSTATE_V(stag_state) |
                        FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
-               tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+               tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
                        (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
                        FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
                                                      FW_RI_VA_BASED_TO))|
                        FW_RI_TPTE_PS_V(page_size));
-               tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+               tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
                        FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
-               tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
-               tpt.va_hi = cpu_to_be32((u32)(to >> 32));
-               tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
-               tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
-               tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+               tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+               tpt->va_hi = cpu_to_be32((u32)(to >> 32));
+               tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+               tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
+               tpt->len_hi = cpu_to_be32((u32)(len >> 32));
        }
        err = write_adapter_mem(rdev, stag_idx +
                                (rdev->lldi.vr->stag.start >> 5),
-                               sizeof(tpt), &tpt, skb, wr_waitp);
+                               sizeof(*tpt), tpt, skb, wr_waitp);
 
        if (reset_tpt_entry) {
                c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -333,6 +338,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
                rdev->stats.stag.cur -= 32;
                mutex_unlock(&rdev->stats.lock);
        }
+       kfree(tpt);
        return err;
 }
 
index d8eb4dc04d69035c050c7c48a0dc1d1177709192..6aa5a8a242ffddb2cd9494932076a995dbb78310 100644 (file)
@@ -14586,7 +14586,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
                clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
 }
 
-static void init_rxe(struct hfi1_devdata *dd)
+static int init_rxe(struct hfi1_devdata *dd)
 {
        struct rsm_map_table *rmt;
        u64 val;
@@ -14595,6 +14595,9 @@ static void init_rxe(struct hfi1_devdata *dd)
        write_csr(dd, RCV_ERR_MASK, ~0ull);
 
        rmt = alloc_rsm_map_table(dd);
+       if (!rmt)
+               return -ENOMEM;
+
        /* set up QOS, including the QPN map table */
        init_qos(dd, rmt);
        init_user_fecn_handling(dd, rmt);
@@ -14621,6 +14624,7 @@ static void init_rxe(struct hfi1_devdata *dd)
        val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
                RCV_BYPASS_HDR_SIZE_SHIFT);
        write_csr(dd, RCV_BYPASS, val);
+       return 0;
 }
 
 static void init_other(struct hfi1_devdata *dd)
@@ -15163,7 +15167,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
                goto bail_cleanup;
 
        /* set initial RXE CSRs */
-       init_rxe(dd);
+       ret = init_rxe(dd);
+       if (ret)
+               goto bail_cleanup;
+
        /* set initial TXE CSRs */
        init_txe(dd);
        /* set initial non-RXE, non-TXE CSRs */
index 7eaff4dcbfd77dd668fe979235711cb6a91a3ea3..5bc811b7e6cf95d36b0242b5a969025520b05e33 100644 (file)
@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
        if (!data)
                return -ENOMEM;
        copy = min(len, datalen - 1);
-       if (copy_from_user(data, buf, copy))
-               return -EFAULT;
+       if (copy_from_user(data, buf, copy)) {
+               ret = -EFAULT;
+               goto free_data;
+       }
 
        ret = debugfs_file_get(file->f_path.dentry);
        if (unlikely(ret))
-               return ret;
+               goto free_data;
        ptr = data;
        token = ptr;
        for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
        ret = len;
 
        debugfs_file_put(file->f_path.dentry);
+free_data:
        kfree(data);
        return ret;
 }
@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
                return -ENOMEM;
        ret = debugfs_file_get(file->f_path.dentry);
        if (unlikely(ret))
-               return ret;
+               goto free_data;
        bit = find_first_bit(fault->opcodes, bitsize);
        while (bit < bitsize) {
                zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
        data[size - 1] = '\n';
        data[size] = '\0';
        ret = simple_read_from_buffer(buf, len, pos, data, size);
+free_data:
        kfree(data);
        return ret;
 }
index 0307405491e015e325d1ed692c3e873822b991fc..f208a25d0e4f5d8e387d630938a39e9bb0dba567 100644 (file)
@@ -2326,7 +2326,7 @@ struct opa_port_status_req {
        __be32 vl_select_mask;
 };
 
-#define VL_MASK_ALL            0x000080ff
+#define VL_MASK_ALL            0x00000000000080ffUL
 
 struct opa_port_status_rsp {
        __u8 port_num;
@@ -2625,15 +2625,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
 }
 
 static void a0_portstatus(struct hfi1_pportdata *ppd,
-                         struct opa_port_status_rsp *rsp, u32 vl_select_mask)
+                         struct opa_port_status_rsp *rsp)
 {
        if (!is_bx(ppd->dd)) {
                unsigned long vl;
                u64 sum_vl_xmit_wait = 0;
-               u32 vl_all_mask = VL_MASK_ALL;
+               unsigned long vl_all_mask = VL_MASK_ALL;
 
-               for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
-                                8 * sizeof(vl_all_mask)) {
+               for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
                        u64 tmp = sum_vl_xmit_wait +
                                  read_port_cntr(ppd, C_TX_WAIT_VL,
                                                 idx_from_vl(vl));
@@ -2730,12 +2729,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
                (struct opa_port_status_req *)pmp->data;
        struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
        struct opa_port_status_rsp *rsp;
-       u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
+       unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
        unsigned long vl;
        size_t response_data_size;
        u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
        u8 port_num = req->port_num;
-       u8 num_vls = hweight32(vl_select_mask);
+       u8 num_vls = hweight64(vl_select_mask);
        struct _vls_pctrs *vlinfo;
        struct hfi1_ibport *ibp = to_iport(ibdev, port);
        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
@@ -2771,7 +2770,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
 
        hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
 
-       rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
+       rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask);
        rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
                                          CNTR_INVALID_VL));
        rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
@@ -2842,8 +2841,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
         * So in the for_each_set_bit() loop below, we don't need
         * any additional checks for vl.
         */
-       for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
-                        8 * sizeof(vl_select_mask)) {
+       for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
                memset(vlinfo, 0, sizeof(*vlinfo));
 
                tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
@@ -2884,7 +2882,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
                vfi++;
        }
 
-       a0_portstatus(ppd, rsp, vl_select_mask);
+       a0_portstatus(ppd, rsp);
 
        if (resp_len)
                *resp_len += response_data_size;
@@ -2931,16 +2929,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
        return error_counter_summary;
 }
 
-static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
-                           u32 vl_select_mask)
+static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
 {
        if (!is_bx(ppd->dd)) {
                unsigned long vl;
                u64 sum_vl_xmit_wait = 0;
-               u32 vl_all_mask = VL_MASK_ALL;
+               unsigned long vl_all_mask = VL_MASK_ALL;
 
-               for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
-                                8 * sizeof(vl_all_mask)) {
+               for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
                        u64 tmp = sum_vl_xmit_wait +
                                  read_port_cntr(ppd, C_TX_WAIT_VL,
                                                 idx_from_vl(vl));
@@ -2995,7 +2991,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
        u64 port_mask;
        u8 port_num;
        unsigned long vl;
-       u32 vl_select_mask;
+       unsigned long vl_select_mask;
        int vfi;
        u16 link_width;
        u16 link_speed;
@@ -3073,8 +3069,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
         * So in the for_each_set_bit() loop below, we don't need
         * any additional checks for vl.
         */
-       for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
-                        8 * sizeof(req->vl_select_mask)) {
+       for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
                memset(vlinfo, 0, sizeof(*vlinfo));
 
                rsp->vls[vfi].port_vl_xmit_data =
@@ -3122,7 +3117,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
                vfi++;
        }
 
-       a0_datacounters(ppd, rsp, vl_select_mask);
+       a0_datacounters(ppd, rsp);
 
        if (resp_len)
                *resp_len += response_data_size;
@@ -3217,7 +3212,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
        struct _vls_ectrs *vlinfo;
        unsigned long vl;
        u64 port_mask, tmp;
-       u32 vl_select_mask;
+       unsigned long vl_select_mask;
        int vfi;
 
        req = (struct opa_port_error_counters64_msg *)pmp->data;
@@ -3276,8 +3271,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
        vlinfo = &rsp->vls[0];
        vfi = 0;
        vl_select_mask = be32_to_cpu(req->vl_select_mask);
-       for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
-                        8 * sizeof(req->vl_select_mask)) {
+       for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
                memset(vlinfo, 0, sizeof(*vlinfo));
                rsp->vls[vfi].port_vl_xmit_discards =
                        cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
@@ -3488,7 +3482,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
        u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
        u64 portn = be64_to_cpu(req->port_select_mask[3]);
        u32 counter_select = be32_to_cpu(req->counter_select_mask);
-       u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
+       unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
        unsigned long vl;
 
        if ((nports != 1) || (portn != 1 << port)) {
@@ -3582,8 +3576,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
        if (counter_select & CS_UNCORRECTABLE_ERRORS)
                write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
 
-       for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
-                        8 * sizeof(vl_select_mask)) {
+       for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
                if (counter_select & CS_PORT_XMIT_DATA)
                        write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
 
index 88e326d6cc494b528f9675e7c5bf45e9fbcee8f8..d648a4167832c61cd02a071b0de11139cb8c3dc9 100644 (file)
@@ -410,10 +410,7 @@ static void sdma_flush(struct sdma_engine *sde)
        sdma_flush_descq(sde);
        spin_lock_irqsave(&sde->flushlist_lock, flags);
        /* copy flush list */
-       list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
-               list_del_init(&txp->list);
-               list_add_tail(&txp->list, &flushlist);
-       }
+       list_splice_init(&sde->flushlist, &flushlist);
        spin_unlock_irqrestore(&sde->flushlist_lock, flags);
        /* flush from flush list */
        list_for_each_entry_safe(txp, txp_next, &flushlist, list)
@@ -2426,7 +2423,7 @@ unlock_noconn:
                wait->tx_count++;
                wait->count += tx->num_desc;
        }
-       schedule_work(&sde->flush_worker);
+       queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
        ret = -ECOMM;
        goto unlock;
 nodesc:
@@ -2526,7 +2523,7 @@ unlock_noconn:
                }
        }
        spin_unlock(&sde->flushlist_lock);
-       schedule_work(&sde->flush_worker);
+       queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
        ret = -ECOMM;
        goto update_tail;
 nodesc:
index 27d9c4cefdc7a515ad5b183373652939ab9a09eb..1ad38c8c1ef97e2250f231b10f56c6d16f42abd6 100644 (file)
@@ -54,6 +54,7 @@
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
 #include <rdma/opa_addr.h>
+#include <linux/nospec.h>
 
 #include "hfi.h"
 #include "common.h"
@@ -1596,6 +1597,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
        sl = rdma_ah_get_sl(ah_attr);
        if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
                return -EINVAL;
+       sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
 
        sc5 = ibp->sl_to_sc[sl];
        if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
index e2e6c74a74522598e2fc02a7c5d78bb68939d746..a5e3349b8a7c31acf5e682306f4e483ae7ac5284 100644 (file)
@@ -806,6 +806,8 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
        struct i40iw_qp *iwqp = to_iwqp(ibqp);
        struct i40iw_sc_qp *qp = &iwqp->sc_qp;
 
+       attr->qp_state = iwqp->ibqp_state;
+       attr->cur_qp_state = attr->qp_state;
        attr->qp_access_flags = 0;
        attr->cap.max_send_wr = qp->qp_uk.sq_size;
        attr->cap.max_recv_wr = qp->qp_uk.rq_size;
index e5466d786bb1e9ed2ed8b600228a8e9e361a12f7..5aaa2a6c431b66c9bcc3f131cdd10184fd5003aa 100644 (file)
@@ -1668,8 +1668,6 @@ tx_err:
                                    tx_buf_size, DMA_TO_DEVICE);
                kfree(tun_qp->tx_ring[i].buf.addr);
        }
-       kfree(tun_qp->tx_ring);
-       tun_qp->tx_ring = NULL;
        i = MLX4_NUM_TUNNEL_BUFS;
 err:
        while (i > 0) {
@@ -1678,6 +1676,8 @@ err:
                                    rx_buf_size, DMA_FROM_DEVICE);
                kfree(tun_qp->ring[i].addr);
        }
+       kfree(tun_qp->tx_ring);
+       tun_qp->tx_ring = NULL;
        kfree(tun_qp->ring);
        tun_qp->ring = NULL;
        return -ENOMEM;
index 32a9e9228b13554c2d1d5db057a5a484efc4f2bf..cdf6e26ebc87da8ca5e4747f9835ea959d8885db 100644 (file)
@@ -197,19 +197,33 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
                             vl_15_dropped);
 }
 
-static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
+static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
                           const struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
-       int err;
+       struct mlx5_core_dev *mdev;
+       bool native_port = true;
+       u8 mdev_port_num;
        void *out_cnt;
+       int err;
 
+       mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+       if (!mdev) {
+               /* Fail to get the native port, likely due to 2nd port is still
+                * unaffiliated. In such case default to 1st port and attached
+                * PF device.
+                */
+               native_port = false;
+               mdev = dev->mdev;
+               mdev_port_num = 1;
+       }
        /* Declaring support of extended counters */
        if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
                struct ib_class_port_info cpi = {};
 
                cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
                memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
-               return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+               err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+               goto done;
        }
 
        if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
@@ -218,11 +232,13 @@ static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
                int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
 
                out_cnt = kvzalloc(sz, GFP_KERNEL);
-               if (!out_cnt)
-                       return IB_MAD_RESULT_FAILURE;
+               if (!out_cnt) {
+                       err = IB_MAD_RESULT_FAILURE;
+                       goto done;
+               }
 
                err = mlx5_core_query_vport_counter(mdev, 0, 0,
-                                                   port_num, out_cnt, sz);
+                                                   mdev_port_num, out_cnt, sz);
                if (!err)
                        pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
        } else {
@@ -231,20 +247,23 @@ static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
                int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 
                out_cnt = kvzalloc(sz, GFP_KERNEL);
-               if (!out_cnt)
-                       return IB_MAD_RESULT_FAILURE;
+               if (!out_cnt) {
+                       err = IB_MAD_RESULT_FAILURE;
+                       goto done;
+               }
 
-               err = mlx5_core_query_ib_ppcnt(mdev, port_num,
+               err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num,
                                               out_cnt, sz);
                if (!err)
                        pma_cnt_assign(pma_cnt, out_cnt);
-               }
-
+       }
        kvfree(out_cnt);
-       if (err)
-               return IB_MAD_RESULT_FAILURE;
-
-       return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+       err = err ? IB_MAD_RESULT_FAILURE :
+                   IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+done:
+       if (native_port)
+               mlx5_ib_put_native_port_mdev(dev, port_num);
+       return err;
 }
 
 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -256,8 +275,6 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
-       struct mlx5_core_dev *mdev;
-       u8 mdev_port_num;
        int ret;
 
        if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
@@ -266,19 +283,14 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 
        memset(out_mad->data, 0, sizeof(out_mad->data));
 
-       mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
-       if (!mdev)
-               return IB_MAD_RESULT_FAILURE;
-
-       if (MLX5_CAP_GEN(mdev, vport_counters) &&
+       if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
            in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
            in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
-               ret = process_pma_cmd(mdev, mdev_port_num, in_mad, out_mad);
+               ret = process_pma_cmd(dev, port_num, in_mad, out_mad);
        } else {
                ret =  process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
                                   in_mad, out_mad);
        }
-       mlx5_ib_put_native_port_mdev(dev, port_num);
        return ret;
 }
 
index 8cc4da62f050e860878b29ba0e40ce0527bd9ae7..c05eae93170eb738e37fa968eede41207782e4b1 100644 (file)
@@ -939,15 +939,19 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
        }
 
        if (MLX5_CAP_GEN(mdev, tag_matching)) {
-               props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
                props->tm_caps.max_num_tags =
                        (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
-               props->tm_caps.flags = IB_TM_CAP_RC;
                props->tm_caps.max_ops =
                        1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
                props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
        }
 
+       if (MLX5_CAP_GEN(mdev, tag_matching) &&
+           MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
+               props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
+               props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
+       }
+
        if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
                props->cq_caps.max_cq_moderation_count =
                                                MLX5_MAX_CQ_COUNT;
@@ -6366,6 +6370,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
                        mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
                list_del(&mpi->list);
                mutex_unlock(&mlx5_ib_multiport_mutex);
+               kfree(mpi);
                return;
        }
 
index 320d4dfe8c2f419cfdb9f53ddcdcc122eed294f1..941d1df54631afb0e283a83e75e18fec9e734f46 100644 (file)
@@ -467,6 +467,7 @@ struct mlx5_umr_wr {
        u64                             length;
        int                             access_flags;
        u32                             mkey;
+       u8                              ignore_free_state:1;
 };
 
 static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
index 7df4a4fe4af477727c1bdf26ad120a04c0916a7d..bd1fdadf7ba01907078b9cae895be3a14fa4151e 100644 (file)
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
-       return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
 
 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
 {
        return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
 }
 
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
-       return order <= mr_cache_max_order(dev) &&
-               umr_can_modify_entity_size(dev);
-}
-
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
        int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -548,13 +538,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                return;
 
        c = order2idx(dev, mr->order);
-       if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
-               mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
-               return;
-       }
+       WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
 
-       if (unreg_umr(dev, mr))
+       if (unreg_umr(dev, mr)) {
+               mr->allocated_from_cache = false;
+               destroy_mkey(dev, mr);
+               ent = &cache->ent[c];
+               if (ent->cur < ent->limit)
+                       queue_work(cache->wq, &ent->work);
                return;
+       }
 
        ent = &cache->ent[c];
        spin_lock_irq(&ent->lock);
@@ -1302,7 +1295,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
        struct mlx5_ib_mr *mr = NULL;
-       bool populate_mtts = false;
+       bool use_umr;
        struct ib_umem *umem;
        int page_shift;
        int npages;
@@ -1335,29 +1328,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        if (err < 0)
                return ERR_PTR(err);
 
-       if (use_umr(dev, order)) {
+       use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+                 (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+                  !MLX5_CAP_GEN(dev->mdev, atomic));
+
+       if (order <= mr_cache_max_order(dev) && use_umr) {
                mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
                                         page_shift, order, access_flags);
                if (PTR_ERR(mr) == -EAGAIN) {
                        mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
                        mr = NULL;
                }
-               populate_mtts = false;
        } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
                if (access_flags & IB_ACCESS_ON_DEMAND) {
                        err = -EINVAL;
                        pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
                        goto error;
                }
-               populate_mtts = true;
+               use_umr = false;
        }
 
        if (!mr) {
-               if (!umr_can_modify_entity_size(dev))
-                       populate_mtts = true;
                mutex_lock(&dev->slow_path_mutex);
                mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
-                               page_shift, access_flags, populate_mtts);
+                               page_shift, access_flags, !use_umr);
                mutex_unlock(&dev->slow_path_mutex);
        }
 
@@ -1375,7 +1369,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        update_odp_mr(mr);
 #endif
 
-       if (!populate_mtts) {
+       if (use_umr) {
                int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
 
                if (access_flags & IB_ACCESS_ON_DEMAND)
@@ -1408,9 +1402,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                return 0;
 
        umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
-                             MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+                             MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
        umrwr.wr.opcode = MLX5_IB_WR_UMR;
+       umrwr.pd = dev->umrc.pd;
        umrwr.mkey = mr->mmkey.key;
+       umrwr.ignore_free_state = 1;
 
        return mlx5_ib_post_send_wait(dev, &umrwr);
 }
@@ -1615,10 +1611,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                mr->sig = NULL;
        }
 
-       mlx5_free_priv_descs(mr);
-
-       if (!allocated_from_cache)
+       if (!allocated_from_cache) {
                destroy_mkey(dev, mr);
+               mlx5_free_priv_descs(mr);
+       }
 }
 
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
index 9e1cac8cb2609ed6b3c9c301b997cc2be699beeb..453e5c4ac19f4bb6dea3f8ebdd2b76ba2b656ab0 100644 (file)
@@ -497,7 +497,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
 static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
                        u64 io_virt, size_t bcnt, u32 *bytes_mapped)
 {
-       u64 access_mask = ODP_READ_ALLOWED_BIT;
+       u64 access_mask;
        int npages = 0, page_shift, np;
        u64 start_idx, page_mask;
        struct ib_umem_odp *odp;
@@ -522,6 +522,7 @@ next_mr:
        page_shift = mr->umem->page_shift;
        page_mask = ~(BIT(page_shift) - 1);
        start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+       access_mask = ODP_READ_ALLOWED_BIT;
 
        if (mr->umem->writable)
                access_mask |= ODP_WRITE_ALLOWED_BIT;
index 183fe5c8ceb77600b593d0f872e1225e9111c773..77b1f3fd086ad754564627d4f465b155767ef2f3 100644 (file)
@@ -1501,7 +1501,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                }
 
                MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
-               MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
                memcpy(rss_key, ucmd.rx_hash_key, len);
                break;
        }
@@ -3717,10 +3716,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
 
        memset(umr, 0, sizeof(*umr));
 
-       if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
-               umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
-       else
-               umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
+       if (!umrwr->ignore_free_state) {
+               if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+                        /* fail if free */
+                       umr->flags = MLX5_UMR_CHECK_FREE;
+               else
+                       /* fail if not free */
+                       umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+       }
 
        umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
        if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
index 4111b798fd3c9ab7e749675140690270b9cac46c..681d8e0913d069c5aca66dbc87d134793e5c3f6d 100644 (file)
@@ -435,6 +435,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
                        qp->resp.va = reth_va(pkt);
                        qp->resp.rkey = reth_rkey(pkt);
                        qp->resp.resid = reth_len(pkt);
+                       qp->resp.length = reth_len(pkt);
                }
                access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
                                                     : IB_ACCESS_REMOTE_WRITE;
@@ -859,7 +860,9 @@ static enum resp_states do_complete(struct rxe_qp *qp,
                                pkt->mask & RXE_WRITE_MASK) ?
                                        IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
                wc->vendor_err = 0;
-               wc->byte_len = wqe->dma.length - wqe->dma.resid;
+               wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
+                               pkt->mask & RXE_WRITE_MASK) ?
+                                       qp->resp.length : wqe->dma.length - wqe->dma.resid;
 
                /* fields after byte_len are different between kernel and user
                 * space
index 332a16dad2a7e66e16b892d9c84fbad739377fff..3b731c7682e5bcadf874e2b0f475ca92591beee8 100644 (file)
@@ -212,6 +212,7 @@ struct rxe_resp_info {
        struct rxe_mem          *mr;
        u32                     resid;
        u32                     rkey;
+       u32                     length;
        u64                     atomic_orig;
 
        /* SRQ only */
index 30f840f874b3c010ddf373b133ef0bdd8297fb25..78dd36daac00ec29889844f75330ef66dac583ac 100644 (file)
@@ -1892,12 +1892,6 @@ static void ipoib_child_init(struct net_device *ndev)
        struct ipoib_dev_priv *priv = ipoib_priv(ndev);
        struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
 
-       dev_hold(priv->parent);
-
-       down_write(&ppriv->vlan_rwsem);
-       list_add_tail(&priv->list, &ppriv->child_intfs);
-       up_write(&ppriv->vlan_rwsem);
-
        priv->max_ib_mtu = ppriv->max_ib_mtu;
        set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
        memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
@@ -1940,6 +1934,17 @@ static int ipoib_ndo_init(struct net_device *ndev)
        if (rc) {
                pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
                        priv->ca->name, priv->dev->name, priv->port, rc);
+               return rc;
+       }
+
+       if (priv->parent) {
+               struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+               dev_hold(priv->parent);
+
+               down_write(&ppriv->vlan_rwsem);
+               list_add_tail(&priv->list, &ppriv->child_intfs);
+               up_write(&ppriv->vlan_rwsem);
        }
 
        return 0;
@@ -1957,6 +1962,14 @@ static void ipoib_ndo_uninit(struct net_device *dev)
         */
        WARN_ON(!list_empty(&priv->child_intfs));
 
+       if (priv->parent) {
+               struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+               down_write(&ppriv->vlan_rwsem);
+               list_del(&priv->list);
+               up_write(&ppriv->vlan_rwsem);
+       }
+
        ipoib_neigh_hash_uninit(dev);
 
        ipoib_ib_dev_cleanup(dev);
@@ -1968,15 +1981,8 @@ static void ipoib_ndo_uninit(struct net_device *dev)
                priv->wq = NULL;
        }
 
-       if (priv->parent) {
-               struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
-
-               down_write(&ppriv->vlan_rwsem);
-               list_del(&priv->list);
-               up_write(&ppriv->vlan_rwsem);
-
+       if (priv->parent)
                dev_put(priv->parent);
-       }
 }
 
 static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
@@ -1997,6 +2003,7 @@ static int ipoib_get_vf_config(struct net_device *dev, int vf,
                return err;
 
        ivf->vf = vf;
+       memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
 
        return 0;
 }
index 2c1114ee0c6dac47471df814b34f4213fd78b970..bc6a44a16445c17422970faa9e9432e77c121ac4 100644 (file)
@@ -3401,13 +3401,17 @@ static const match_table_t srp_opt_tokens = {
 
 /**
  * srp_parse_in - parse an IP address and port number combination
+ * @net:          [in]  Network namespace.
+ * @sa:                   [out] Address family, IP address and port number.
+ * @addr_port_str: [in]  IP address and port number.
+ * @has_port:     [out] Whether or not @addr_port_str includes a port number.
  *
  * Parse the following address formats:
  * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
  * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
  */
 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
-                       const char *addr_port_str)
+                       const char *addr_port_str, bool *has_port)
 {
        char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
        char *port_str;
@@ -3416,9 +3420,12 @@ static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
        if (!addr)
                return -ENOMEM;
        port_str = strrchr(addr, ':');
-       if (!port_str)
-               return -EINVAL;
-       *port_str++ = '\0';
+       if (port_str && strchr(port_str, ']'))
+               port_str = NULL;
+       if (port_str)
+               *port_str++ = '\0';
+       if (has_port)
+               *has_port = port_str != NULL;
        ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
        if (ret && addr[0]) {
                addr_end = addr + strlen(addr) - 1;
@@ -3440,6 +3447,7 @@ static int srp_parse_options(struct net *net, const char *buf,
        char *p;
        substring_t args[MAX_OPT_ARGS];
        unsigned long long ull;
+       bool has_port;
        int opt_mask = 0;
        int token;
        int ret = -EINVAL;
@@ -3538,7 +3546,8 @@ static int srp_parse_options(struct net *net, const char *buf,
                                ret = -ENOMEM;
                                goto out;
                        }
-                       ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
+                       ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
+                                          NULL);
                        if (ret < 0) {
                                pr_warn("bad source parameter '%s'\n", p);
                                kfree(p);
@@ -3554,7 +3563,10 @@ static int srp_parse_options(struct net *net, const char *buf,
                                ret = -ENOMEM;
                                goto out;
                        }
-                       ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
+                       ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
+                                          &has_port);
+                       if (!has_port)
+                               ret = -EINVAL;
                        if (ret < 0) {
                                pr_warn("bad dest parameter '%s'\n", p);
                                kfree(p);
index 78073259c9a1ad3d49575379e675239566ea295c..c431df7401b44bc8767bec6fa88b88209e7ac17d 100644 (file)
@@ -141,7 +141,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
                return -ENODEV;
 
        epirq = &interface->endpoint[0].desc;
+       if (!usb_endpoint_is_int_in(epirq))
+               return -ENODEV;
+
        epout = &interface->endpoint[1].desc;
+       if (!usb_endpoint_is_int_out(epout))
+               return -ENODEV;
 
        if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
                goto fail;
index 3e9c353d82effcd4a6e29396470bde27e5f19000..a01b25facf464a8ae90126218a02fca5744edc71 100644 (file)
@@ -248,10 +248,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
        onkey->input->phys = onkey->phys;
        onkey->input->dev.parent = &pdev->dev;
 
-       if (onkey->key_power)
-               input_set_capability(onkey->input, EV_KEY, KEY_POWER);
-
-       input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
+       input_set_capability(onkey->input, EV_KEY, KEY_POWER);
 
        INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
 
index 0a6f7ca883e7fe816a82d0dae915ab120e6681e9..dd80ff6cc4273079fa8f75806d89df97e914fa01 100644 (file)
@@ -24,6 +24,7 @@
 
 #include "psmouse.h"
 #include "alps.h"
+#include "trackpoint.h"
 
 /*
  * Definitions for ALPS version 3 and 4 command mode protocol
@@ -2864,6 +2865,23 @@ static const struct alps_protocol_info *alps_match_table(unsigned char *e7,
        return NULL;
 }
 
+static bool alps_is_cs19_trackpoint(struct psmouse *psmouse)
+{
+       u8 param[2] = { 0 };
+
+       if (ps2_command(&psmouse->ps2dev,
+                       param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
+               return false;
+
+       /*
+        * param[0] contains the trackpoint device variant_id while
+        * param[1] contains the firmware_id. So far all alps
+        * trackpoint-only devices have their variant_ids equal
+        * TP_VARIANT_ALPS and their firmware_ids are in 0x20~0x2f range.
+        */
+       return param[0] == TP_VARIANT_ALPS && ((param[1] & 0xf0) == 0x20);
+}
+
 static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
 {
        const struct alps_protocol_info *protocol;
@@ -3164,6 +3182,20 @@ int alps_detect(struct psmouse *psmouse, bool set_properties)
        if (error)
                return error;
 
+       /*
+        * ALPS cs19 is a trackpoint-only device, and uses different
+        * protocol than DualPoint ones, so we return -EINVAL here and let
+        * trackpoint.c drive this device. If the trackpoint driver is not
+        * enabled, the device will fall back to a bare PS/2 mouse.
+        * If ps2_command() fails here, we depend on the immediately
+        * followed psmouse_reset() to reset the device to normal state.
+        */
+       if (alps_is_cs19_trackpoint(psmouse)) {
+               psmouse_dbg(psmouse,
+                           "ALPS CS19 trackpoint-only device detected, ignoring\n");
+               return -EINVAL;
+       }
+
        /*
         * Reset the device to make sure it is fully operational:
         * on some laptops, like certain Dell Latitudes, we may
index f9525d6f0bfe810c9ab1c2bd0a2a971f2e9695b4..ae012639ae1d52bf30ee59f03884a3fea52eeeac 100644 (file)
@@ -1358,7 +1358,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0618", 0 },
        { "ELAN0619", 0 },
        { "ELAN061A", 0 },
-       { "ELAN061B", 0 },
+/*     { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
        { "ELAN061C", 0 },
        { "ELAN061D", 0 },
        { "ELAN061E", 0 },
index 8e6077d8e434a30cf76598164aa95cffcf2439fb..06cebde2422ea7589928351da4b117edcd0239e9 100644 (file)
@@ -176,13 +176,16 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
        "LEN0073", /* X1 Carbon G5 (Elantech) */
        "LEN0092", /* X1 Carbon 6 */
+       "LEN0093", /* T480 */
        "LEN0096", /* X280 */
        "LEN0097", /* X280 -> ALPS trackpoint */
+       "LEN009b", /* T580 */
        "LEN200f", /* T450s */
        "LEN2054", /* E480 */
        "LEN2055", /* E580 */
        "SYN3052", /* HP EliteBook 840 G4 */
        "SYN3221", /* HP 15-ay000 */
+       "SYN323d", /* HP Spectre X360 13-w013dx */
        NULL
 };
 
index 10a0391482343d4ba482336367b204cf64f6e47c..538986e5ac5bcf4f5c38267db393a30fedb0dee6 100644 (file)
@@ -161,7 +161,8 @@ struct trackpoint_data {
 #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
 int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
 #else
-inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
+static inline int trackpoint_detect(struct psmouse *psmouse,
+                                   bool set_properties)
 {
        return -ENOSYS;
 }
index 7fb358f961957507969db706c780459b937d2ba0..162526a0d463eba4e465e211d63e449348d0a3f2 100644 (file)
@@ -149,7 +149,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
        }
 
        mutex_lock(&data->irq_mutex);
-       bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
+       bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
               data->irq_count);
        /*
         * At this point, irq_status has all bits that are set in the
@@ -388,6 +388,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
        bitmap_copy(data->current_irq_mask, data->new_irq_mask,
                    data->num_of_irq_regs);
 
+       bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
+
 error_unlock:
        mutex_unlock(&data->irq_mutex);
        return error;
@@ -401,6 +403,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
        struct device *dev = &rmi_dev->dev;
 
        mutex_lock(&data->irq_mutex);
+       bitmap_andnot(data->fn_irq_bits,
+                     data->fn_irq_bits, mask, data->irq_count);
        bitmap_andnot(data->new_irq_mask,
                  data->current_irq_mask, mask, data->irq_count);
 
index a8b9be3e28db709ef8769e29da2684c3d1e3bcf9..7d0a5ccf5775122f7240f32061372d72de77f089 100644 (file)
@@ -245,40 +245,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
 
 static void hv_kbd_on_channel_callback(void *context)
 {
+       struct vmpacket_descriptor *desc;
        struct hv_device *hv_dev = context;
-       void *buffer;
-       int bufferlen = 0x100; /* Start with sensible size */
        u32 bytes_recvd;
        u64 req_id;
-       int error;
 
-       buffer = kmalloc(bufferlen, GFP_ATOMIC);
-       if (!buffer)
-               return;
-
-       while (1) {
-               error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
-                                            &bytes_recvd, &req_id);
-               switch (error) {
-               case 0:
-                       if (bytes_recvd == 0) {
-                               kfree(buffer);
-                               return;
-                       }
-
-                       hv_kbd_handle_received_packet(hv_dev, buffer,
-                                                     bytes_recvd, req_id);
-                       break;
+       foreach_vmbus_pkt(desc, hv_dev->channel) {
+               bytes_recvd = desc->len8 * 8;
+               req_id = desc->trans_id;
 
-               case -ENOBUFS:
-                       kfree(buffer);
-                       /* Handle large packet */
-                       bufferlen = bytes_recvd;
-                       buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
-                       if (!buffer)
-                               return;
-                       break;
-               }
+               hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
+                                             req_id);
        }
 }
 
index 4b8b9d7aa75e2785991fc5838bf55728c715e6fe..35031228a6d076cf0dd91dab411f049ec679df6b 100644 (file)
@@ -78,6 +78,7 @@ Scott Hill shill@gtcocalcomp.com
 
 /* Max size of a single report */
 #define REPORT_MAX_SIZE       10
+#define MAX_COLLECTION_LEVELS  10
 
 
 /* Bitmask whether pen is in range */
@@ -223,8 +224,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
        char  maintype = 'x';
        char  globtype[12];
        int   indent = 0;
-       char  indentstr[10] = "";
-
+       char  indentstr[MAX_COLLECTION_LEVELS + 1] = { 0 };
 
        dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n");
 
@@ -350,6 +350,13 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
                        case TAG_MAIN_COL_START:
                                maintype = 'S';
 
+                               if (indent == MAX_COLLECTION_LEVELS) {
+                                       dev_err(ddev, "Collection level %d would exceed limit of %d\n",
+                                               indent + 1,
+                                               MAX_COLLECTION_LEVELS);
+                                       break;
+                               }
+
                                if (data == 0) {
                                        dev_dbg(ddev, "======>>>>>> Physical\n");
                                        strcpy(globtype, "Physical");
@@ -369,8 +376,15 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
                                break;
 
                        case TAG_MAIN_COL_END:
-                               dev_dbg(ddev, "<<<<<<======\n");
                                maintype = 'E';
+
+                               if (indent == 0) {
+                                       dev_err(ddev, "Collection level already at zero\n");
+                                       break;
+                               }
+
+                               dev_dbg(ddev, "<<<<<<======\n");
+
                                indent--;
                                for (x = 0; x < indent; x++)
                                        indentstr[x] = '-';
index 75b500651e4e4051f1ec403e02febed6d59cd72f..b1cf0c9712740dc9b552907160d11b37fedecfb2 100644 (file)
@@ -116,6 +116,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
        if (intf->cur_altsetting->desc.bNumEndpoints < 1)
                return -ENODEV;
 
+       endpoint = &intf->cur_altsetting->endpoint[0].desc;
+       if (!usb_endpoint_is_int_in(endpoint))
+               return -ENODEV;
+
        kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
        input_dev = input_allocate_device();
        if (!kbtab || !input_dev)
@@ -154,8 +158,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
        input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
        input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
 
-       endpoint = &intf->cur_altsetting->endpoint[0].desc;
-
        usb_fill_int_urb(kbtab->irq, dev,
                         usb_rcvintpipe(dev, endpoint->bEndpointAddress),
                         kbtab->data, 8,
index d61570d64ee76bd8ac161daa6a9dc55b23a5ee40..48304e26f988b15f0947d1ecccfa4e6e5878b995 100644 (file)
@@ -1672,6 +1672,8 @@ static int usbtouch_probe(struct usb_interface *intf,
        if (!usbtouch || !input_dev)
                goto out_free;
 
+       mutex_init(&usbtouch->pm_mutex);
+
        type = &usbtouch_dev_info[id->driver_info];
        usbtouch->type = type;
        if (!type->process_pkt)
index ab5eba6edf82b308f81a7c5acf9f992e8e0e2dde..e13ea199f5896ec4e7b994e6d18fec0802de45ed 100644 (file)
@@ -10,7 +10,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
 obj-$(CONFIG_IOMMU_IOVA) += iova.o
 obj-$(CONFIG_OF_IOMMU) += of_iommu.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
-obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
 obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
 obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
index 8d9920ff41344801136872cb5249d13466da4e9f..1f2ed44de243865d48b07a2a8d06b008a65cb837 100644 (file)
@@ -1153,6 +1153,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
        iommu_completion_wait(iommu);
 }
 
+static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
+{
+       struct iommu_cmd cmd;
+
+       build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+                             dom_id, 1);
+       iommu_queue_command(iommu, &cmd);
+
+       iommu_completion_wait(iommu);
+}
+
 static void amd_iommu_flush_all(struct amd_iommu *iommu)
 {
        struct iommu_cmd cmd;
@@ -1329,18 +1340,21 @@ static void domain_flush_devices(struct protection_domain *domain)
  * another level increases the size of the address space by 9 bits to a size up
  * to 64 bits.
  */
-static bool increase_address_space(struct protection_domain *domain,
+static void increase_address_space(struct protection_domain *domain,
                                   gfp_t gfp)
 {
+       unsigned long flags;
        u64 *pte;
 
-       if (domain->mode == PAGE_MODE_6_LEVEL)
+       spin_lock_irqsave(&domain->lock, flags);
+
+       if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
                /* address space already 64 bit large */
-               return false;
+               goto out;
 
        pte = (void *)get_zeroed_page(gfp);
        if (!pte)
-               return false;
+               goto out;
 
        *pte             = PM_LEVEL_PDE(domain->mode,
                                        iommu_virt_to_phys(domain->pt_root));
@@ -1348,7 +1362,10 @@ static bool increase_address_space(struct protection_domain *domain,
        domain->mode    += 1;
        domain->updated  = true;
 
-       return true;
+out:
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return;
 }
 
 static u64 *alloc_pte(struct protection_domain *domain,
@@ -1838,6 +1855,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
 {
        u64 pte_root = 0;
        u64 flags = 0;
+       u32 old_domid;
 
        if (domain->mode != PAGE_MODE_NONE)
                pte_root = iommu_virt_to_phys(domain->pt_root);
@@ -1887,8 +1905,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
        flags &= ~DEV_DOMID_MASK;
        flags |= domain->id;
 
+       old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
        amd_iommu_dev_table[devid].data[1]  = flags;
        amd_iommu_dev_table[devid].data[0]  = pte_root;
+
+       /*
+        * A kdump kernel might be replacing a domain ID that was copied from
+        * the previous kernel--if so, it needs to flush the translation cache
+        * entries for the old domain ID that is being overwritten
+        */
+       if (old_domid) {
+               struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+               amd_iommu_flush_tlb_domid(iommu, old_domid);
+       }
 }
 
 static void clear_dte_entry(u16 devid)
@@ -2533,7 +2563,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
 
                        bus_addr  = address + s->dma_address + (j << PAGE_SHIFT);
                        phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
-                       ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
+                       ret = iommu_map_page(domain, bus_addr, phys_addr,
+                                            PAGE_SIZE, prot,
+                                            GFP_ATOMIC | __GFP_NOWARN);
                        if (ret)
                                goto out_unmap;
 
diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h
new file mode 100644 (file)
index 0000000..12d540d
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef AMD_IOMMU_H
+#define AMD_IOMMU_H
+
+int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line);
+
+#ifdef CONFIG_DMI
+void amd_iommu_apply_ivrs_quirks(void);
+#else
+static void amd_iommu_apply_ivrs_quirks(void) { }
+#endif
+
+#endif
index 3a1d30304f7e9167a0a448b8132b5c93a535abff..1e9a5da562f0d7dd28b2623ca3ec2e6e3900811c 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/irq_remapping.h>
 
 #include <linux/crash_dump.h>
+#include "amd_iommu.h"
 #include "amd_iommu_proto.h"
 #include "amd_iommu_types.h"
 #include "irq_remapping.h"
@@ -1002,7 +1003,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
        set_iommu_for_device(iommu, devid);
 }
 
-static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
+int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
 {
        struct devid_map *entry;
        struct list_head *list;
@@ -1153,6 +1154,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
        if (ret)
                return ret;
 
+       amd_iommu_apply_ivrs_quirks();
+
        /*
         * First save the recommended feature enable bits from ACPI
         */
@@ -1710,7 +1713,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
        NULL,
 };
 
-static int iommu_init_pci(struct amd_iommu *iommu)
+static int __init iommu_init_pci(struct amd_iommu *iommu)
 {
        int cap_ptr = iommu->cap_ptr;
        u32 range, misc, low, high;
diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c
new file mode 100644 (file)
index 0000000..c235f79
--- /dev/null
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Quirks for AMD IOMMU
+ *
+ * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com>
+ */
+
+#ifdef CONFIG_DMI
+#include <linux/dmi.h>
+
+#include "amd_iommu.h"
+
+#define IVHD_SPECIAL_IOAPIC            1
+
+struct ivrs_quirk_entry {
+       u8 id;
+       u16 devid;
+};
+
+enum {
+       DELL_INSPIRON_7375 = 0,
+       DELL_LATITUDE_5495,
+       LENOVO_IDEAPAD_330S_15ARR,
+};
+
+static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = {
+       /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */
+       [DELL_INSPIRON_7375] = {
+               { .id = 4, .devid = 0xa0 },
+               { .id = 5, .devid = 0x2 },
+               {}
+       },
+       /* ivrs_ioapic[4]=00:14.0 */
+       [DELL_LATITUDE_5495] = {
+               { .id = 4, .devid = 0xa0 },
+               {}
+       },
+       /* ivrs_ioapic[32]=00:14.0 */
+       [LENOVO_IDEAPAD_330S_15ARR] = {
+               { .id = 32, .devid = 0xa0 },
+               {}
+       },
+       {}
+};
+
+static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
+{
+       const struct ivrs_quirk_entry *i;
+
+       for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
+               add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
+
+       return 0;
+}
+
+static const struct dmi_system_id ivrs_quirks[] __initconst = {
+       {
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Dell Inspiron 7375",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375],
+       },
+       {
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Dell Latitude 5495",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+       },
+       {
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Lenovo ideapad 330S-15ARR",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "81FB"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR],
+       },
+       {}
+};
+
+void __init amd_iommu_apply_ivrs_quirks(void)
+{
+       dmi_check_system(ivrs_quirks);
+}
+#endif
index 511ff9a1d6d94087bffaaac226055bf892e08d91..f9dbb064f95719640be283979275f669b1462c63 100644 (file)
@@ -675,7 +675,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
                 * - and wouldn't make the resulting output segment too long
                 */
                if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
-                   (cur_len + s_length <= max_len)) {
+                   (max_len - cur_len >= s_length)) {
                        /* ...then concatenate it with the previous one */
                        cur_len += s_length;
                } else {
index c1439019dd127edcba6a59e3aebf2ff35bdb023d..b9af2419006f8341563551d2f32c1226c081b249 100644 (file)
@@ -3721,7 +3721,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
 
        freelist = domain_unmap(domain, start_pfn, last_pfn);
 
-       if (intel_iommu_strict) {
+       if (intel_iommu_strict || !has_iova_flush_queue(&domain->iovad)) {
                iommu_flush_iotlb_psi(iommu, domain, start_pfn,
                                      nrpages, !freelist, 0);
                /* free iova */
index 8c15c598029934484e26520aa4aa7aa703a04ca3..bc14825edc9cbe508ada87af24dfef388f7b72c7 100644 (file)
@@ -211,18 +211,21 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new,
                        pos = pos->next;
                } else if ((start >= a) && (end <= b)) {
                        if (new->type == type)
-                               goto done;
+                               return 0;
                        else
                                pos = pos->next;
                } else {
                        if (new->type == type) {
                                phys_addr_t new_start = min(a, start);
                                phys_addr_t new_end = max(b, end);
+                               int ret;
 
                                list_del(&entry->list);
                                entry->start = new_start;
                                entry->length = new_end - new_start + 1;
-                               iommu_insert_resv_region(entry, regions);
+                               ret = iommu_insert_resv_region(entry, regions);
+                               kfree(entry);
+                               return ret;
                        } else {
                                pos = pos->next;
                        }
@@ -235,7 +238,6 @@ insert:
                return -ENOMEM;
 
        list_add_tail(&region->list, pos);
-done:
        return 0;
 }
 
index 83fe2621effe72bc1cbeecd80df4030235d87328..da4516fbf5425f25a6ea88a833b94fe47401ee6e 100644 (file)
@@ -65,9 +65,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
 }
 EXPORT_SYMBOL_GPL(init_iova_domain);
 
+bool has_iova_flush_queue(struct iova_domain *iovad)
+{
+       return !!iovad->fq;
+}
+
 static void free_iova_flush_queue(struct iova_domain *iovad)
 {
-       if (!iovad->fq)
+       if (!has_iova_flush_queue(iovad))
                return;
 
        if (timer_pending(&iovad->fq_timer))
@@ -85,13 +90,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
 int init_iova_flush_queue(struct iova_domain *iovad,
                          iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
 {
+       struct iova_fq __percpu *queue;
        int cpu;
 
        atomic64_set(&iovad->fq_flush_start_cnt,  0);
        atomic64_set(&iovad->fq_flush_finish_cnt, 0);
 
-       iovad->fq = alloc_percpu(struct iova_fq);
-       if (!iovad->fq)
+       queue = alloc_percpu(struct iova_fq);
+       if (!queue)
                return -ENOMEM;
 
        iovad->flush_cb   = flush_cb;
@@ -100,13 +106,17 @@ int init_iova_flush_queue(struct iova_domain *iovad,
        for_each_possible_cpu(cpu) {
                struct iova_fq *fq;
 
-               fq = per_cpu_ptr(iovad->fq, cpu);
+               fq = per_cpu_ptr(queue, cpu);
                fq->head = 0;
                fq->tail = 0;
 
                spin_lock_init(&fq->lock);
        }
 
+       smp_wmb();
+
+       iovad->fq = queue;
+
        timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
        atomic_set(&iovad->fq_timer_on, 0);
 
@@ -138,8 +148,9 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
        struct iova *cached_iova;
 
        cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
-       if (free->pfn_hi < iovad->dma_32bit_pfn &&
-           free->pfn_lo >= cached_iova->pfn_lo)
+       if (free == cached_iova ||
+           (free->pfn_hi < iovad->dma_32bit_pfn &&
+            free->pfn_lo >= cached_iova->pfn_lo))
                iovad->cached32_node = rb_next(&free->node);
 
        cached_iova = rb_entry(iovad->cached_node, struct iova, node);
@@ -569,7 +580,9 @@ void queue_iova(struct iova_domain *iovad,
 
        spin_unlock_irqrestore(&fq->lock, flags);
 
-       if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
+       /* Avoid false sharing as much as possible. */
+       if (!atomic_read(&iovad->fq_timer_on) &&
+           !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
                mod_timer(&iovad->fq_timer,
                          jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
 }
index 4abc0ef522a8ef80445947e19ccd21afda3df6f2..72954b2894acb006848a4347eb4c8965ffdea3e0 100644 (file)
@@ -197,7 +197,7 @@ static void dump_ioptable(struct seq_file *s)
                        continue;
                }
 
-               iopte = iopte_offset(iopgd, 0);
+               iopte = iopte_get(obj, iopgd, 0);
                for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
                        if (!*iopte)
                                continue;
index c69d8690052073e841062de9cd8558809630628d..b52ba5717e8e37a2a27f1e3714883a92f73bf636 100644 (file)
@@ -44,6 +44,12 @@ static const struct iommu_ops omap_iommu_ops;
 /* bitmap of the page sizes currently supported */
 #define OMAP_IOMMU_PGSIZES     (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
 
+/*
+ * total size of L1 and L2 page tables reserved/used by bootloader per rproc
+ * for early boot usecases, must match the value used in bootloader
+ */
+#define EARLY_PAGE_TABLES_SIZE SZ_256K
+
 #define MMU_LOCK_BASE_SHIFT    10
 #define MMU_LOCK_BASE_MASK     (0x1f << MMU_LOCK_BASE_SHIFT)
 #define MMU_LOCK_BASE(x)       \
@@ -163,7 +169,7 @@ static int omap2_iommu_enable(struct omap_iommu *obj)
        if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd,  SZ_16K))
                return -EINVAL;
 
-       pa = virt_to_phys(obj->iopgd);
+       pa = obj->iopgd_pa;
        if (!IS_ALIGNED(pa, SZ_16K))
                return -EINVAL;
 
@@ -198,6 +204,15 @@ static int iommu_enable(struct omap_iommu *obj)
 {
        int ret;
 
+       /*
+        * now that the threat of idling has passed, decrement the
+        * device usage count to balance the increment done in probe,
+        * the pm runtime device usage count will be managed normally
+        * from here on
+        */
+       if (obj->late_attach)
+               pm_runtime_put_noidle(obj->dev);
+
        ret = pm_runtime_get_sync(obj->dev);
        if (ret < 0)
                pm_runtime_put_noidle(obj->dev);
@@ -537,7 +552,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
        }
 
 pte_ready:
-       iopte = iopte_offset(iopgd, da);
+       iopte = iopte_get(obj, iopgd, da);
        *pt_dma = iopgd_page_paddr(iopgd);
        dev_vdbg(obj->dev,
                 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
@@ -696,7 +711,7 @@ iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
                goto out;
 
        if (iopgd_is_table(*iopgd))
-               iopte = iopte_offset(iopgd, da);
+               iopte = iopte_get(obj, iopgd, da);
 out:
        *ppgd = iopgd;
        *ppte = iopte;
@@ -716,13 +731,13 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
 
        if (iopgd_is_table(*iopgd)) {
                int i;
-               u32 *iopte = iopte_offset(iopgd, da);
+               u32 *iopte = iopte_get(obj, iopgd, da);
 
                bytes = IOPTE_SIZE;
                if (*iopte & IOPTE_LARGE) {
                        nent *= 16;
                        /* rewind to the 1st entry */
-                       iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
+                       iopte = iopte_get(obj, iopgd, (da & IOLARGE_MASK));
                }
                bytes *= nent;
                memset(iopte, 0, nent * sizeof(*iopte));
@@ -732,7 +747,8 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
                /*
                 * do table walk to check if this table is necessary or not
                 */
-               iopte = iopte_offset(iopgd, 0);
+               iopte = iopte_get(obj, iopgd, 0);
+
                for (i = 0; i < PTRS_PER_IOPTE; i++)
                        if (iopte[i])
                                goto out;
@@ -791,8 +807,15 @@ static void iopgtable_clear_entry_all(struct omap_iommu *obj)
                if (!*iopgd)
                        continue;
 
-               if (iopgd_is_table(*iopgd))
-                       iopte_free(obj, iopte_offset(iopgd, 0), true);
+               if (iopgd_is_table(*iopgd)) {
+                       if (obj->late_attach)
+                               iopte_free(obj, iopte_offset_lateattach(obj,
+                                                                       iopgd,
+                                                                       0),
+                                          true);
+                       else
+                               iopte_free(obj, iopte_offset(iopgd, 0), true);
+               }
 
                *iopgd = 0;
                flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
@@ -835,7 +858,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
                return IRQ_NONE;
        }
 
-       iopte = iopte_offset(iopgd, da);
+       iopte = iopte_get(obj, iopgd, da);
 
        dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
                obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
@@ -851,6 +874,16 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
 static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
 {
        int err;
+       u32 iopgd_pa;
+
+       if (obj->late_attach) {
+               iopgd_pa = iommu_read_reg(obj, MMU_TTB);
+               iopgd = ioremap(iopgd_pa, EARLY_PAGE_TABLES_SIZE);
+               if (!iopgd)
+                       return -ENOMEM;
+       } else {
+               iopgd_pa = virt_to_phys(iopgd);
+       }
 
        spin_lock(&obj->iommu_lock);
 
@@ -862,11 +895,14 @@ static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
                goto out_err;
        }
 
+       obj->iopgd_pa = iopgd_pa;
        obj->iopgd = iopgd;
        err = iommu_enable(obj);
        if (err)
                goto out_err;
-       flush_iotlb_all(obj);
+
+       if (!obj->late_attach)
+               flush_iotlb_all(obj);
 
        spin_unlock(&obj->iommu_lock);
 
@@ -889,13 +925,19 @@ static void omap_iommu_detach(struct omap_iommu *obj)
        if (!obj || IS_ERR(obj))
                return;
 
+       if (obj->late_attach && obj->iopgd)
+               iounmap(obj->iopgd);
+
        spin_lock(&obj->iommu_lock);
 
        dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
                         DMA_TO_DEVICE);
        obj->pd_dma = 0;
+
+       obj->iopgd_pa = 0;
        obj->iopgd = NULL;
        iommu_disable(obj);
+       obj->late_attach = 0;
 
        spin_unlock(&obj->iommu_lock);
 
@@ -1069,7 +1111,9 @@ static int omap_iommu_runtime_resume(struct device *dev)
                }
        }
 
-       if (pdata && pdata->deassert_reset) {
+       /* do not deassert reset only during initial boot for late attach */
+       if ((!obj->late_attach || obj->domain) &&
+           pdata && pdata->deassert_reset) {
                ret = pdata->deassert_reset(pdev, pdata->reset_name);
                if (ret) {
                        dev_err(dev, "deassert_reset failed: %d\n", ret);
@@ -1170,6 +1214,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
        struct omap_iommu *obj;
        struct resource *res;
        struct device_node *of = pdev->dev.of_node;
+       struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
 
        if (!of) {
                pr_err("%s: only DT-based devices are supported\n", __func__);
@@ -1192,6 +1237,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
        obj->name = dev_name(&pdev->dev);
        obj->nr_tlb_entries = 32;
        err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
+
        if (err && err != -EINVAL)
                return err;
        if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
@@ -1199,6 +1245,10 @@ static int omap_iommu_probe(struct platform_device *pdev)
        if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
                obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
 
+       if (pdata && pdata->device_is_enabled &&
+           pdata->device_is_enabled(pdev))
+               obj->late_attach = 1;
+
        obj->dev = &pdev->dev;
        obj->ctx = (void *)obj + sizeof(*obj);
        obj->cr_ctx = devm_kzalloc(&pdev->dev,
@@ -1247,6 +1297,15 @@ static int omap_iommu_probe(struct platform_device *pdev)
        }
 
        pm_runtime_irq_safe(obj->dev);
+
+       /*
+        * increment the device usage count so that runtime_suspend is not
+        * invoked immediately after the probe (due to the ti,no-idle-on-init)
+        * and before any remoteproc has attached to the iommu
+        */
+       if (obj->late_attach)
+               pm_runtime_get_noresume(obj->dev);
+
        pm_runtime_enable(obj->dev);
 
        omap_iommu_debugfs_add(obj);
@@ -1428,6 +1487,11 @@ static int omap_iommu_attach_init(struct device *dev,
 
        iommu = odomain->iommus;
        for (i = 0; i < odomain->num_iommus; i++, iommu++) {
+               /*
+                * not necessary for late attach, the page table would be setup
+                * by the boot loader. Leaving the below code in place, it does
+                * not have any side effects during late attach.
+                */
                iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
                if (!iommu->pgtable)
                        return -ENOMEM;
@@ -1549,7 +1613,8 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
        arch_data += (omap_domain->num_iommus - 1);
        for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
                oiommu = iommu->iommu_dev;
-               iopgtable_clear_entry_all(oiommu);
+               if (!oiommu->late_attach)
+                       iopgtable_clear_entry_all(oiommu);
 
                omap_iommu_detach(oiommu);
                iommu->iommu_dev = NULL;
index ca07fbf287d0910f19188ab815b5a6998b5c8bf1..c83810aceb82d255fe30f931e445aa3122883a75 100644 (file)
@@ -69,6 +69,8 @@ struct omap_iommu {
         * but share it globally for each iommu.
         */
        u32             *iopgd;
+       u32             iopgd_pa;
+       u32             late_attach;
        spinlock_t      page_table_lock; /* protect iopgd */
        dma_addr_t      pd_dma;
 
@@ -272,4 +274,12 @@ static inline int iotlb_cr_valid(struct cr_regs *cr)
        return cr->cam & MMU_CAM_V;
 }
 
+static inline u32 *iopte_get(struct omap_iommu *obj, u32 *iopgd, u32 da)
+{
+       if (obj->late_attach)
+               return iopte_offset_lateattach(obj, iopgd, da);
+       else
+               return iopte_offset(iopgd, da);
+}
+
 #endif /* _OMAP_IOMMU_H */
index 01a315227bf052d03a0c1f72e6e4e48c6121b201..12fc64f9be5572895b921f01ee28739e4bccb9f3 100644 (file)
@@ -99,4 +99,16 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
 #define iopte_index(da)                (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
 #define iopte_offset(iopgd, da)        (iopgd_page_vaddr(iopgd) + iopte_index(da))
 
+/*
+ * compute vaddr for second-level page table relative to page table directory
+ * for late-attach mode
+ */
+#define iopgd_page_vaddr_lateattach(obj, pgd)                          \
+       ((u32 *)((u32 *)((obj)->iopgd)) +                               \
+       ((u32 *)iopgd_page_paddr((pgd)) - (u32 *)((obj)->iopgd_pa)))
+
+/* to find an entry in the second-level page table for late-attach mode */
+#define iopte_offset_lateattach(obj, iopgd, da)                                \
+       (iopgd_page_vaddr_lateattach(obj, iopgd) + iopte_index(da))
+
 #endif /* _OMAP_IOPGTABLE_H */
index 65ab2c80529ce58001cf7ddd2df791979dae7e43..e7549a2b1482bef4628de7724dc2de5aaac0fde1 100644 (file)
@@ -740,32 +740,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
 }
 
 static int its_wait_for_range_completion(struct its_node *its,
-                                        struct its_cmd_block *from,
+                                        u64    prev_idx,
                                         struct its_cmd_block *to)
 {
-       u64 rd_idx, from_idx, to_idx;
+       u64 rd_idx, to_idx, linear_idx;
        u32 count = 1000000;    /* 1s! */
 
-       from_idx = its_cmd_ptr_to_offset(its, from);
+       /* Linearize to_idx if the command set has wrapped around */
        to_idx = its_cmd_ptr_to_offset(its, to);
+       if (to_idx < prev_idx)
+               to_idx += ITS_CMD_QUEUE_SZ;
+
+       linear_idx = prev_idx;
 
        while (1) {
+               s64 delta;
+
                rd_idx = readl_relaxed(its->base + GITS_CREADR);
 
-               /* Direct case */
-               if (from_idx < to_idx && rd_idx >= to_idx)
-                       break;
+               /*
+                * Compute the read pointer progress, taking the
+                * potential wrap-around into account.
+                */
+               delta = rd_idx - prev_idx;
+               if (rd_idx < prev_idx)
+                       delta += ITS_CMD_QUEUE_SZ;
 
-               /* Wrapped case */
-               if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
+               linear_idx += delta;
+               if (linear_idx >= to_idx)
                        break;
 
                count--;
                if (!count) {
-                       pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
-                                          from_idx, to_idx, rd_idx);
+                       pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
+                                          to_idx, linear_idx);
                        return -1;
                }
+               prev_idx = rd_idx;
                cpu_relax();
                udelay(1);
        }
@@ -782,6 +793,7 @@ void name(struct its_node *its,                                             \
        struct its_cmd_block *cmd, *sync_cmd, *next_cmd;                \
        synctype *sync_obj;                                             \
        unsigned long flags;                                            \
+       u64 rd_idx;                                                     \
                                                                        \
        raw_spin_lock_irqsave(&its->lock, flags);                       \
                                                                        \
@@ -803,10 +815,11 @@ void name(struct its_node *its,                                           \
        }                                                               \
                                                                        \
 post:                                                                  \
+       rd_idx = readl_relaxed(its->base + GITS_CREADR);                \
        next_cmd = its_post_commands(its);                              \
        raw_spin_unlock_irqrestore(&its->lock, flags);                  \
                                                                        \
-       if (its_wait_for_range_completion(its, cmd, next_cmd))          \
+       if (its_wait_for_range_completion(its, rd_idx, next_cmd))       \
                pr_err_ratelimited("ITS cmd %ps failed\n", builder);    \
 }
 
@@ -2501,14 +2514,13 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
        struct its_node *its = its_dev->its;
        int i;
 
+       bitmap_release_region(its_dev->event_map.lpi_map,
+                             its_get_event_id(irq_domain_get_irq_data(domain, virq)),
+                             get_count_order(nr_irqs));
+
        for (i = 0; i < nr_irqs; i++) {
                struct irq_data *data = irq_domain_get_irq_data(domain,
                                                                virq + i);
-               u32 event = its_get_event_id(data);
-
-               /* Mark interrupt index as unused */
-               clear_bit(event, its_dev->event_map.lpi_map);
-
                /* Nuke the entry in the domain */
                irq_domain_reset_irq_data(data);
        }
@@ -2870,7 +2882,7 @@ static int its_vpe_init(struct its_vpe *vpe)
 
        if (!its_alloc_vpe_table(vpe_id)) {
                its_vpe_id_free(vpe_id);
-               its_free_pending_table(vpe->vpt_page);
+               its_free_pending_table(vpt_page);
                return -ENOMEM;
        }
 
index 4760307ab43fc33404b6b2ec07b2c3b49a6f6405..cef8f5e2e8fce9a1bb5e9609ec500fa1a2d9d149 100644 (file)
@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
        .irq_unmask             = imx_gpcv2_irq_unmask,
        .irq_set_wake           = imx_gpcv2_irq_set_wake,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
+       .irq_set_type           = irq_chip_set_type_parent,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
 #endif
index 7b531fd075b885396c378ef84bf0aaa110dc3c0f..7599b10ecf09d153e8b2b563f2ce515f19eb3358 100644 (file)
@@ -73,6 +73,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
        { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
        { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
        { .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
+       { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
        { }
 };
 
index ef5560b848ab3a66adc29d119d5608bf726642e0..21786a44236877ee35f24bbc04c2c1838f507cb2 100644 (file)
@@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
        if (!cdev->ap.applid)
                return -ENODEV;
 
+       if (count < CAPIMSG_BASELEN)
+               return -EINVAL;
+
        skb = alloc_skb(count, GFP_USER);
        if (!skb)
                return -ENOMEM;
@@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
        }
        mlen = CAPIMSG_LEN(skb->data);
        if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
-               if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
+               if (count < CAPI_DATA_B3_REQ_LEN ||
+                   (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
                        kfree_skb(skb);
                        return -EINVAL;
                }
@@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
        CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
 
        if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
+               if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
+                       kfree_skb(skb);
+                       return -EINVAL;
+               }
                mutex_lock(&cdev->lock);
                capincci_free(cdev, CAPIMSG_NCCI(skb->data));
                mutex_unlock(&cdev->lock);
index 6d05946b445eb039aeb6c9c755e94dbe8b8f1dac..c952002c6301d8c219c51243e22d38c5bee8b2ec 100644 (file)
@@ -1406,6 +1406,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
                                printk(KERN_DEBUG
                                       "%s: %s: alloc urb for fifo %i failed",
                                       hw->name, __func__, fifo->fifonum);
+                               continue;
                        }
                        fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
                        fifo->iso[i].indx = i;
@@ -1704,13 +1705,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
 static int
 setup_hfcsusb(struct hfcsusb *hw)
 {
+       void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
        u_char b;
+       int ret;
 
        if (debug & DBG_HFC_CALL_TRACE)
                printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
 
+       if (!dmabuf)
+               return -ENOMEM;
+
+       ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
+
+       memcpy(&b, dmabuf, sizeof(u_char));
+       kfree(dmabuf);
+
        /* check the chip id */
-       if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
+       if (ret != 1) {
                printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
                       hw->name, __func__);
                return 1;
@@ -1967,6 +1978,9 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
                                /* get endpoint base */
                                idx = ((ep_addr & 0x7f) - 1) * 2;
+                               if (idx > 15)
+                                       return -EIO;
+
                                if (ep_addr & 0x80)
                                        idx++;
                                attr = ep->desc.bmAttributes;
index a73337b74f410c1bc70c84bddfa74caca32253c7..db588a79a9f043f2ef4a63329ac1842bf9391ae6 100644 (file)
@@ -764,6 +764,8 @@ base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
 
        if (sock->type != SOCK_RAW)
                return -ESOCKTNOSUPPORT;
+       if (!capable(CAP_NET_RAW))
+               return -EPERM;
 
        sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
        if (!sk)
index 17d73db1456ebb4009c9dfa396820aa2e2485acd..e4cb3811e82a3e8ac7901dc6523cddd40e42d98d 100644 (file)
@@ -177,6 +177,7 @@ err_activate:
        list_del(&led_cdev->trig_list);
        write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
        led_set_brightness(led_cdev, LED_OFF);
+       kfree(event);
 
        return ret;
 }
index 2a9009fe5545d059d625075eb04a0ed53dbb432e..18edc8bdc9f775d52867f1453d0976f770d82dd6 100644 (file)
@@ -263,7 +263,11 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip)
 {
        const struct firmware *fw = chip->fw;
 
-       if (fw->size > LP5562_PROGRAM_LENGTH) {
+       /*
+        * the firmware is encoded in ascii hex character, with 2 chars
+        * per byte
+        */
+       if (fw->size > (LP5562_PROGRAM_LENGTH * 2)) {
                dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
                        fw->size);
                return;
index 95be6e36c7ddf5f16d4ebf16363ae121ce65ad95..80710c62ac29313073399cc16b5c87d01cc81759 100644 (file)
@@ -288,14 +288,16 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
                         int nr_pages)
 {
-       struct bio_vec bv;
-       int i;
-
-       WARN_ON(off + nr_pages != bio->bi_vcnt);
-
-       for (i = off; i < nr_pages + off; i++) {
-               bv = bio->bi_io_vec[i];
-               mempool_free(bv.bv_page, &pblk->page_bio_pool);
+       struct bio_vec *bv;
+       struct page *page;
+       int i, e, nbv = 0;
+
+       for (i = 0; i < bio->bi_vcnt; i++) {
+               bv = &bio->bi_io_vec[i];
+               page = bv->bv_page;
+               for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
+                       if (nbv >= off)
+                               mempool_free(page++, &pblk->page_bio_pool);
        }
 }
 
index 674b35f402f5e939833bdbde4a39399689d653f9..055c90b8253cbe37749cb94338ef45b01574056b 100644 (file)
@@ -391,11 +391,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
 
        of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
                if (!strncmp(name, mbox_name, strlen(name)))
-                       break;
+                       return mbox_request_channel(cl, index);
                index++;
        }
 
-       return mbox_request_channel(cl, index);
+       dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
+               __func__, name);
+       return ERR_PTR(-EINVAL);
 }
 EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
 
index 333ed4a9d4b8fc62d14fdc9abc8db0242a80026c..5255dcb551a788a3ca4802e3c9cfc13fe60f83c6 100644 (file)
@@ -55,7 +55,6 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
 
 static int qcom_apcs_ipc_probe(struct platform_device *pdev)
 {
-       struct device_node *np = pdev->dev.of_node;
        struct qcom_apcs_ipc *apcs;
        struct regmap *regmap;
        struct resource *res;
@@ -63,6 +62,11 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
        void __iomem *base;
        unsigned long i;
        int ret;
+       const struct of_device_id apcs_clk_match_table[] = {
+               { .compatible = "qcom,msm8916-apcs-kpss-global", },
+               { .compatible = "qcom,qcs404-apcs-apps-global", },
+               {}
+       };
 
        apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
        if (!apcs)
@@ -97,7 +101,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
                return ret;
        }
 
-       if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) {
+       if (of_match_device(apcs_clk_match_table, &pdev->dev)) {
                apcs->clk = platform_device_register_data(&pdev->dev,
                                                          "qcom-apcs-msm8916-clk",
                                                          -1, NULL, 0);
index de85b3af3b39dd289e117fc655032a757b5b98c4..9c3beb1e382b9ed616cc039662987d99618511e6 100644 (file)
@@ -393,6 +393,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
        struct bucket *b;
        long r;
 
+
+       /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+       if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
+               return -1;
+
        /* fastpath */
        if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
            fifo_pop(&ca->free[reserve], r))
@@ -484,6 +489,10 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
 {
        int i;
 
+       /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+       if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+               return -1;
+
        lockdep_assert_held(&c->bucket_lock);
        BUG_ON(!n || n > c->caches_loaded || n > 8);
 
index 954dad29e6e8fca910b0ebd24171591f2acd0831..83f0b91aeb90d48d3e6db32bf3a83cdc06217e92 100644 (file)
@@ -708,8 +708,6 @@ struct cache_set {
 
 #define BUCKET_HASH_BITS       12
        struct hlist_head       bucket_hash[1 << BUCKET_HASH_BITS];
-
-       DECLARE_HEAP(struct btree *, flush_btree);
 };
 
 struct bbio {
index 3f4211b5cd3347329c16bac78b3fe2c04804d3ae..45f684689c357c96783e68a7b0b5568263ce6a74 100644 (file)
@@ -35,7 +35,7 @@
 #include <linux/rcupdate.h>
 #include <linux/sched/clock.h>
 #include <linux/rculist.h>
-
+#include <linux/delay.h>
 #include <trace/events/bcache.h>
 
 /*
@@ -649,7 +649,25 @@ static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
                up(&b->io_mutex);
        }
 
+retry:
+       /*
+        * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
+        * __bch_btree_node_write(). To avoid an extra flush, acquire
+        * b->write_lock before checking BTREE_NODE_dirty bit.
+        */
        mutex_lock(&b->write_lock);
+       /*
+        * If this btree node is selected in btree_flush_write() by journal
+        * code, delay and retry until the node is flushed by journal code
+        * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
+        */
+       if (btree_node_journal_flush(b)) {
+               pr_debug("bnode %p is flushing by journal, retry", b);
+               mutex_unlock(&b->write_lock);
+               udelay(1);
+               goto retry;
+       }
+
        if (btree_node_dirty(b))
                __bch_btree_node_write(b, &cl);
        mutex_unlock(&b->write_lock);
@@ -772,10 +790,15 @@ void bch_btree_cache_free(struct cache_set *c)
        while (!list_empty(&c->btree_cache)) {
                b = list_first_entry(&c->btree_cache, struct btree, list);
 
-               if (btree_node_dirty(b))
+               /*
+                * This function is called by cache_set_free(), no I/O
+                * request on cache now, it is unnecessary to acquire
+                * b->write_lock before clearing BTREE_NODE_dirty anymore.
+                */
+               if (btree_node_dirty(b)) {
                        btree_complete_write(b, btree_current_write(b));
-               clear_bit(BTREE_NODE_dirty, &b->flags);
-
+                       clear_bit(BTREE_NODE_dirty, &b->flags);
+               }
                mca_data_free(b);
        }
 
@@ -1061,11 +1084,25 @@ static void btree_node_free(struct btree *b)
 
        BUG_ON(b == b->c->root);
 
+retry:
        mutex_lock(&b->write_lock);
+       /*
+        * If the btree node is selected and flushing in btree_flush_write(),
+        * delay and retry until the BTREE_NODE_journal_flush bit cleared,
+        * then it is safe to free the btree node here. Otherwise this btree
+        * node will be in race condition.
+        */
+       if (btree_node_journal_flush(b)) {
+               mutex_unlock(&b->write_lock);
+               pr_debug("bnode %p journal_flush set, retry", b);
+               udelay(1);
+               goto retry;
+       }
 
-       if (btree_node_dirty(b))
+       if (btree_node_dirty(b)) {
                btree_complete_write(b, btree_current_write(b));
-       clear_bit(BTREE_NODE_dirty, &b->flags);
+               clear_bit(BTREE_NODE_dirty, &b->flags);
+       }
 
        mutex_unlock(&b->write_lock);
 
index a68d6c55783bd97eaf49d5744c9422f8af24be07..4d0cca145f6992a4efe7be1ff930102b4ce6c620 100644 (file)
@@ -158,11 +158,13 @@ enum btree_flags {
        BTREE_NODE_io_error,
        BTREE_NODE_dirty,
        BTREE_NODE_write_idx,
+       BTREE_NODE_journal_flush,
 };
 
 BTREE_FLAG(io_error);
 BTREE_FLAG(dirty);
 BTREE_FLAG(write_idx);
+BTREE_FLAG(journal_flush);
 
 static inline struct btree_write *btree_current_write(struct btree *b)
 {
index 73f5319295bc9c1c8a3caf47ac7b7ab4ee5cfcdd..c12cd809ab1938c3aedf64e86c30dfeb0655b354 100644 (file)
@@ -105,8 +105,14 @@ struct closure_syncer {
 
 static void closure_sync_fn(struct closure *cl)
 {
-       cl->s->done = 1;
-       wake_up_process(cl->s->task);
+       struct closure_syncer *s = cl->s;
+       struct task_struct *p;
+
+       rcu_read_lock();
+       p = READ_ONCE(s->task);
+       s->done = 1;
+       wake_up_process(p);
+       rcu_read_unlock();
 }
 
 void __sched __closure_sync(struct closure *cl)
index c809724e6571e4be1d61ed0198a471ec8889c044..886710043025f21fe4f55fb4cb9fc1e6196a081e 100644 (file)
@@ -538,6 +538,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
 {
        struct btree *b = container_of(bk, struct btree, keys);
        unsigned int i, stale;
+       char buf[80];
 
        if (!KEY_PTRS(k) ||
            bch_extent_invalid(bk, k))
@@ -547,19 +548,19 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
                if (!ptr_available(b->c, k, i))
                        return true;
 
-       if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
-               return false;
-
        for (i = 0; i < KEY_PTRS(k); i++) {
                stale = ptr_stale(b->c, k, i);
 
-               btree_bug_on(stale > 96, b,
+               if (stale && KEY_DIRTY(k)) {
+                       bch_extent_to_text(buf, sizeof(buf), k);
+                       pr_info("stale dirty pointer, stale %u, key: %s",
+                               stale, buf);
+               }
+
+               btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
                             "key too stale: %i, need_gc %u",
                             stale, b->c->need_gc);
 
-               btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
-                            b, "stale dirty pointer");
-
                if (stale)
                        return true;
 
index c250979683194c69f8ea1b9585e13907c877eb3a..4d93f07f63e515c23e0792331d87f3c2cea1c485 100644 (file)
@@ -58,6 +58,18 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
 
        WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
 
+       /*
+        * Read-ahead requests on a degrading and recovering md raid
+        * (e.g. raid6) device might be failured immediately by md
+        * raid code, which is not a real hardware media failure. So
+        * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
+        */
+       if (bio->bi_opf & REQ_RAHEAD) {
+               pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore",
+                                   dc->backing_dev_name);
+               return;
+       }
+
        errors = atomic_add_return(1, &dc->io_errors);
        if (errors < dc->error_limit)
                pr_err("%s: IO error on backing device, unrecoverable",
index f880e5eba8dd9a7e9e4030934453a3c2501e472c..7bb15cddca5ecb6dbd25d3f21b2ca201e9d106f8 100644 (file)
@@ -390,12 +390,6 @@ err:
 }
 
 /* Journalling */
-#define journal_max_cmp(l, r) \
-       (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \
-        fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
-#define journal_min_cmp(l, r) \
-       (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \
-        fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
 
 static void btree_flush_write(struct cache_set *c)
 {
@@ -403,38 +397,34 @@ static void btree_flush_write(struct cache_set *c)
         * Try to find the btree node with that references the oldest journal
         * entry, best is our current candidate and is locked if non NULL:
         */
-       struct btree *b;
-       int i;
+       struct btree *b, *best;
+       unsigned int i;
 
        atomic_long_inc(&c->flush_write);
-
 retry:
-       spin_lock(&c->journal.lock);
-       if (heap_empty(&c->flush_btree)) {
-               for_each_cached_btree(b, c, i)
-                       if (btree_current_write(b)->journal) {
-                               if (!heap_full(&c->flush_btree))
-                                       heap_add(&c->flush_btree, b,
-                                                journal_max_cmp);
-                               else if (journal_max_cmp(b,
-                                        heap_peek(&c->flush_btree))) {
-                                       c->flush_btree.data[0] = b;
-                                       heap_sift(&c->flush_btree, 0,
-                                                 journal_max_cmp);
-                               }
+       best = NULL;
+
+       mutex_lock(&c->bucket_lock);
+       for_each_cached_btree(b, c, i)
+               if (btree_current_write(b)->journal) {
+                       if (!best)
+                               best = b;
+                       else if (journal_pin_cmp(c,
+                                       btree_current_write(best)->journal,
+                                       btree_current_write(b)->journal)) {
+                               best = b;
                        }
+               }
 
-               for (i = c->flush_btree.used / 2 - 1; i >= 0; --i)
-                       heap_sift(&c->flush_btree, i, journal_min_cmp);
-       }
-
-       b = NULL;
-       heap_pop(&c->flush_btree, b, journal_min_cmp);
-       spin_unlock(&c->journal.lock);
+       b = best;
+       if (b)
+               set_btree_node_journal_flush(b);
+       mutex_unlock(&c->bucket_lock);
 
        if (b) {
                mutex_lock(&b->write_lock);
                if (!btree_current_write(b)->journal) {
+                       clear_bit(BTREE_NODE_journal_flush, &b->flags);
                        mutex_unlock(&b->write_lock);
                        /* We raced */
                        atomic_long_inc(&c->retry_flush_write);
@@ -442,6 +432,7 @@ retry:
                }
 
                __bch_btree_node_write(b, NULL);
+               clear_bit(BTREE_NODE_journal_flush, &b->flags);
                mutex_unlock(&b->write_lock);
        }
 }
@@ -810,6 +801,10 @@ atomic_t *bch_journal(struct cache_set *c,
        struct journal_write *w;
        atomic_t *ret;
 
+       /* No journaling if CACHE_SET_IO_DISABLE set already */
+       if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+               return NULL;
+
        if (!CACHE_SYNC(&c->sb))
                return NULL;
 
@@ -854,7 +849,6 @@ void bch_journal_free(struct cache_set *c)
        free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
        free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
        free_fifo(&c->journal.pin);
-       free_heap(&c->flush_btree);
 }
 
 int bch_journal_alloc(struct cache_set *c)
@@ -869,8 +863,7 @@ int bch_journal_alloc(struct cache_set *c)
        j->w[0].c = c;
        j->w[1].c = c;
 
-       if (!(init_heap(&c->flush_btree, 128, GFP_KERNEL)) ||
-           !(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
+       if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
            !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
            !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
                return -ENOMEM;
index 2409507d7bff854595578b161225c86f92a46053..2321643974dab5e7f1d23666405aab008c0cc440 100644 (file)
@@ -1180,18 +1180,16 @@ static void cached_dev_free(struct closure *cl)
 {
        struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
 
-       mutex_lock(&bch_register_lock);
-
        if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
                cancel_writeback_rate_update_dwork(dc);
 
        if (!IS_ERR_OR_NULL(dc->writeback_thread))
                kthread_stop(dc->writeback_thread);
-       if (dc->writeback_write_wq)
-               destroy_workqueue(dc->writeback_write_wq);
        if (!IS_ERR_OR_NULL(dc->status_update_thread))
                kthread_stop(dc->status_update_thread);
 
+       mutex_lock(&bch_register_lock);
+
        if (atomic_read(&dc->running))
                bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
        bcache_device_free(&dc->disk);
@@ -1425,8 +1423,6 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
 
 bool bch_cached_dev_error(struct cached_dev *dc)
 {
-       struct cache_set *c;
-
        if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
                return false;
 
@@ -1437,21 +1433,6 @@ bool bch_cached_dev_error(struct cached_dev *dc)
        pr_err("stop %s: too many IO errors on backing device %s\n",
                dc->disk.disk->disk_name, dc->backing_dev_name);
 
-       /*
-        * If the cached device is still attached to a cache set,
-        * even dc->io_disable is true and no more I/O requests
-        * accepted, cache device internal I/O (writeback scan or
-        * garbage collection) may still prevent bcache device from
-        * being stopped. So here CACHE_SET_IO_DISABLE should be
-        * set to c->flags too, to make the internal I/O to cache
-        * device rejected and stopped immediately.
-        * If c is NULL, that means the bcache device is not attached
-        * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
-        */
-       c = dc->disk.c;
-       if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
-               pr_info("CACHE_SET_IO_DISABLE already set");
-
        bcache_device_stop(&dc->disk);
        return true;
 }
@@ -1552,7 +1533,7 @@ static void cache_set_flush(struct closure *cl)
        kobject_put(&c->internal);
        kobject_del(&c->kobj);
 
-       if (c->gc_thread)
+       if (!IS_ERR_OR_NULL(c->gc_thread))
                kthread_stop(c->gc_thread);
 
        if (!IS_ERR_OR_NULL(c->root))
@@ -1787,7 +1768,6 @@ static int run_cache_set(struct cache_set *c)
        set_gc_sectors(c);
 
        if (CACHE_SYNC(&c->sb)) {
-               LIST_HEAD(journal);
                struct bkey *k;
                struct jset *j;
 
index 541454b4f479ca5e8b2f24ce76d8188e5f989b95..5bb81e564ce8827869b12f5da6f6fbf6a2ac5d90 100644 (file)
@@ -175,7 +175,7 @@ SHOW(__bch_cached_dev)
        var_print(writeback_percent);
        sysfs_hprint(writeback_rate,
                     wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
-       sysfs_hprint(io_errors,         atomic_read(&dc->io_errors));
+       sysfs_printf(io_errors,         "%i", atomic_read(&dc->io_errors));
        sysfs_printf(io_error_limit,    "%i", dc->error_limit);
        sysfs_printf(io_disable,        "%i", dc->io_disable);
        var_print(writeback_rate_update_seconds);
@@ -426,7 +426,7 @@ static struct attribute *bch_cached_dev_files[] = {
        &sysfs_writeback_rate_p_term_inverse,
        &sysfs_writeback_rate_minimum,
        &sysfs_writeback_rate_debug,
-       &sysfs_errors,
+       &sysfs_io_errors,
        &sysfs_io_error_limit,
        &sysfs_io_disable,
        &sysfs_dirty_data,
index 00aab6abcfe4fd3bb4e625780de0badee109dbf0..b1f5b7aea8724e33fa9eb28d51983f6885e369a7 100644 (file)
@@ -113,8 +113,6 @@ do {                                                                        \
 
 #define heap_full(h)   ((h)->used == (h)->size)
 
-#define heap_empty(h)  ((h)->used == 0)
-
 #define DECLARE_FIFO(type, name)                                       \
        struct {                                                        \
                size_t front, back, size, mask;                         \
index 08c3a9f9676c9e89649b56f765fb378973149a62..ba5395fd386d562ac070158fa03666e9197e5ce7 100644 (file)
@@ -708,6 +708,10 @@ static int bch_writeback_thread(void *arg)
                }
        }
 
+       if (dc->writeback_write_wq) {
+               flush_workqueue(dc->writeback_write_wq);
+               destroy_workqueue(dc->writeback_write_wq);
+       }
        cached_dev_put(dc);
        wait_for_kthread_stop();
 
@@ -803,6 +807,7 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc)
                                              "bcache_writeback");
        if (IS_ERR(dc->writeback_thread)) {
                cached_dev_put(dc);
+               destroy_workqueue(dc->writeback_write_wq);
                return PTR_ERR(dc->writeback_thread);
        }
 
index b29a8327eed15641df9000e019c82ad5c1cffedc..84ff70027c2520d4e04da2b4b725d0abadc74b3b 100644 (file)
@@ -541,7 +541,7 @@ static void wake_migration_worker(struct cache *cache)
 
 static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
 {
-       return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
+       return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
 }
 
 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
@@ -553,9 +553,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
 {
        struct dm_cache_migration *mg;
 
-       mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
-       if (!mg)
-               return NULL;
+       mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
 
        memset(mg, 0, sizeof(*mg));
 
@@ -663,10 +661,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
        struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
 
        cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
-       if (!cell_prealloc) {
-               defer_bio(cache, bio);
-               return false;
-       }
 
        build_key(oblock, end, &key);
        r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
@@ -1492,11 +1486,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
        struct dm_bio_prison_cell_v2 *prealloc;
 
        prealloc = alloc_prison_cell(cache);
-       if (!prealloc) {
-               DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
-               mg_complete(mg, false);
-               return -ENOMEM;
-       }
 
        /*
         * Prevent writes to the block, but allow reads to continue.
@@ -1534,11 +1523,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
        }
 
        mg = alloc_migration(cache);
-       if (!mg) {
-               policy_complete_background_work(cache->policy, op, false);
-               background_work_end(cache);
-               return -ENOMEM;
-       }
 
        mg->op = op;
        mg->overwrite_bio = bio;
@@ -1627,10 +1611,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
        struct dm_bio_prison_cell_v2 *prealloc;
 
        prealloc = alloc_prison_cell(cache);
-       if (!prealloc) {
-               invalidate_complete(mg, false);
-               return -ENOMEM;
-       }
 
        build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
        r = dm_cell_lock_v2(cache->prison, &key,
@@ -1668,10 +1648,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
                return -EPERM;
 
        mg = alloc_migration(cache);
-       if (!mg) {
-               background_work_end(cache);
-               return -ENOMEM;
-       }
 
        mg->overwrite_bio = bio;
        mg->invalidate_cblock = cblock;
index 7d480c930eaf0a1f4fe90238d830ea4ee61daebb..7e426e4d1352823235e621aac3e7be22ca891b83 100644 (file)
@@ -130,6 +130,7 @@ struct mapped_device {
 };
 
 int md_in_flight(struct mapped_device *md);
+void disable_discard(struct mapped_device *md);
 void disable_write_same(struct mapped_device *md);
 void disable_write_zeroes(struct mapped_device *md);
 
index f3dcc7640319e27b03a8dd10fc75d0120abaea50..34f5de13a93d160c0058a45946ef905c67657d6d 100644 (file)
@@ -949,6 +949,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
 {
 #ifdef CONFIG_BLK_DEV_INTEGRITY
        struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
+       struct mapped_device *md = dm_table_get_md(ti->table);
 
        /* From now we require underlying device with our integrity profile */
        if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
@@ -968,7 +969,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
 
        if (crypt_integrity_aead(cc)) {
                cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
-               DMINFO("Integrity AEAD, tag size %u, IV size %u.",
+               DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
                       cc->integrity_tag_size, cc->integrity_iv_size);
 
                if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
@@ -976,7 +977,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
                        return -EINVAL;
                }
        } else if (cc->integrity_iv_size)
-               DMINFO("Additional per-sector space %u bytes for IV.",
+               DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
                       cc->integrity_iv_size);
 
        if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
index dbdcc543832dfe10da9dcac2330d91a692888e66..2e22d588f0563ccc5492889f4a13cd6f66905f1e 100644 (file)
@@ -1749,7 +1749,22 @@ offload_to_thread:
                        queue_work(ic->wait_wq, &dio->work);
                        return;
                }
+               if (journal_read_pos != NOT_FOUND)
+                       dio->range.n_sectors = ic->sectors_per_block;
                wait_and_add_new_range(ic, &dio->range);
+               /*
+                * wait_and_add_new_range drops the spinlock, so the journal
+                * may have been changed arbitrarily. We need to recheck.
+                * To simplify the code, we restrict I/O size to just one block.
+                */
+               if (journal_read_pos != NOT_FOUND) {
+                       sector_t next_sector;
+                       unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+                       if (unlikely(new_pos != journal_read_pos)) {
+                               remove_range_unlocked(ic, &dio->range);
+                               goto retry;
+                       }
+               }
        }
        spin_unlock_irq(&ic->endio_wait.lock);
 
index 671c24332802e5702de7353d3a9479c004616657..3f694d9061ec5fd53e2449f601cee75d3ec6023e 100644 (file)
@@ -548,8 +548,10 @@ static int run_io_job(struct kcopyd_job *job)
         * no point in continuing.
         */
        if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
-           job->master_job->write_err)
+           job->master_job->write_err) {
+               job->write_err = job->master_job->write_err;
                return -EIO;
+       }
 
        io_job_start(job->kc->throttle);
 
@@ -601,6 +603,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
                        else
                                job->read_err = 1;
                        push(&kc->complete_jobs, job);
+                       wake(kc);
                        break;
                }
 
index baa966e2778c0bd03c8cda561a940685d4d28d89..481e54ded9dc7a3e0651a0115f823187c328f3c4 100644 (file)
@@ -554,8 +554,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
        return DM_MAPIO_REMAPPED;
 }
 
-static void multipath_release_clone(struct request *clone)
+static void multipath_release_clone(struct request *clone,
+                                   union map_info *map_context)
 {
+       if (unlikely(map_context)) {
+               /*
+                * non-NULL map_context means caller is still map
+                * method; must undo multipath_clone_and_map()
+                */
+               struct dm_mpath_io *mpio = get_mpio(map_context);
+               struct pgpath *pgpath = mpio->pgpath;
+
+               if (pgpath && pgpath->pg->ps.type->end_io)
+                       pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
+                                                   &pgpath->path,
+                                                   mpio->nr_bytes);
+       }
+
        blk_put_request(clone);
 }
 
index c44925e4e4813d246d0d208fef5587019e4afb17..b78a8a4d061caf0b6f396b66e4a7f9a5478a5323 100644 (file)
@@ -3199,7 +3199,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                          */
                        r = rs_prepare_reshape(rs);
                        if (r)
-                               return r;
+                               goto bad;
 
                        /* Reshaping ain't recovery, so disable recovery */
                        rs_setup_recovery(rs, MaxSector);
index 6e547b8dd2982425ed51dff7b5c3a027ccb6cab7..4d36373e1c0f044264af149bfb46efe7aa6c25c5 100644 (file)
@@ -219,7 +219,7 @@ static void dm_end_request(struct request *clone, blk_status_t error)
        struct request *rq = tio->orig;
 
        blk_rq_unprep_clone(clone);
-       tio->ti->type->release_clone_rq(clone);
+       tio->ti->type->release_clone_rq(clone, NULL);
 
        rq_end_stats(md, rq);
        if (!rq->q->mq_ops)
@@ -270,7 +270,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
        rq_end_stats(md, rq);
        if (tio->clone) {
                blk_rq_unprep_clone(tio->clone);
-               tio->ti->type->release_clone_rq(tio->clone);
+               tio->ti->type->release_clone_rq(tio->clone, NULL);
        }
 
        if (!rq->q->mq_ops)
@@ -295,11 +295,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
        }
 
        if (unlikely(error == BLK_STS_TARGET)) {
-               if (req_op(clone) == REQ_OP_WRITE_SAME &&
-                   !clone->q->limits.max_write_same_sectors)
+               if (req_op(clone) == REQ_OP_DISCARD &&
+                   !clone->q->limits.max_discard_sectors)
+                       disable_discard(tio->md);
+               else if (req_op(clone) == REQ_OP_WRITE_SAME &&
+                        !clone->q->limits.max_write_same_sectors)
                        disable_write_same(tio->md);
-               if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
-                   !clone->q->limits.max_write_zeroes_sectors)
+               else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+                        !clone->q->limits.max_write_zeroes_sectors)
                        disable_write_zeroes(tio->md);
        }
 
@@ -492,7 +495,7 @@ check_again:
        case DM_MAPIO_REMAPPED:
                if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
                        /* -ENOMEM */
-                       ti->type->release_clone_rq(clone);
+                       ti->type->release_clone_rq(clone, &tio->info);
                        return DM_MAPIO_REQUEUE;
                }
 
@@ -502,7 +505,8 @@ check_again:
                ret = dm_dispatch_clone_request(clone, rq);
                if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
                        blk_rq_unprep_clone(clone);
-                       tio->ti->type->release_clone_rq(clone);
+                       blk_mq_cleanup_rq(clone);
+                       tio->ti->type->release_clone_rq(clone, &tio->info);
                        tio->clone = NULL;
                        if (!rq->q->mq_ops)
                                r = DM_MAPIO_DELAY_REQUEUE;
index c7fe4789c40efc6d0892c202773327bc6492e34c..36275c59e4e7b06aa41c45fc36d5d4b30637486b 100644 (file)
@@ -562,7 +562,7 @@ static char **realloc_argv(unsigned *size, char **old_argv)
                gfp = GFP_NOIO;
        }
        argv = kmalloc_array(new_size, sizeof(*argv), gfp);
-       if (argv) {
+       if (argv && old_argv) {
                memcpy(argv, old_argv, *size * sizeof(*argv));
                *size = new_size;
        }
@@ -1349,7 +1349,7 @@ void dm_table_event(struct dm_table *t)
 }
 EXPORT_SYMBOL(dm_table_event);
 
-sector_t dm_table_get_size(struct dm_table *t)
+inline sector_t dm_table_get_size(struct dm_table *t)
 {
        return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
 }
@@ -1374,6 +1374,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
        unsigned int l, n = 0, k = 0;
        sector_t *node;
 
+       if (unlikely(sector >= dm_table_get_size(t)))
+               return &t->targets[t->num_targets];
+
        for (l = 0; l < t->depth; l++) {
                n = get_child(n, k);
                node = get_node(t, l, n);
index 314d17ca64668a70ea1f6445111ca19b2024141e..64dd0b34fcf490cee3779e179e9b8c7c543b7e53 100644 (file)
@@ -136,7 +136,8 @@ static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
        return DM_MAPIO_KILL;
 }
 
-static void io_err_release_clone_rq(struct request *clone)
+static void io_err_release_clone_rq(struct request *clone,
+                                   union map_info *map_context)
 {
 }
 
index ed3caceaed07c07c33e16b9038f0c3bffd7616d5..6a26afcc1fd6b15f9e17f8b860cdb193f0443446 100644 (file)
@@ -2001,16 +2001,19 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
 
 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
 {
-       int r;
+       int r = -EINVAL;
        struct dm_block *sblock;
        struct thin_disk_superblock *disk_super;
 
        down_write(&pmd->root_lock);
+       if (pmd->fail_io)
+               goto out;
+
        pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
 
        r = superblock_lock(pmd, &sblock);
        if (r) {
-               DMERR("couldn't read superblock");
+               DMERR("couldn't lock superblock");
                goto out;
        }
 
index fc65f0dedf7f702b31d6adf21b8a2b26238c8b43..e3599b43f9eb984ccb52e8484ec5e7b6c3a8cd54 100644 (file)
@@ -236,8 +236,8 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
                BUG();
        }
 
-       DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
-               block);
+       DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
+                   type_str, block);
 
        if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
                DMERR("%s: reached maximum errors", v->data_dev->name);
index d8334cd45d7cb5eb90745b09463cf4daaa231021..7e8d7fc99410dff89b1fbd7380dbed5bb97c4a27 100644 (file)
@@ -401,15 +401,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
        sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
        struct bio *bio;
 
+       if (dmz_bdev_is_dying(zmd->dev))
+               return ERR_PTR(-EIO);
+
        /* Get a new block and a BIO to read it */
        mblk = dmz_alloc_mblock(zmd, mblk_no);
        if (!mblk)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        bio = bio_alloc(GFP_NOIO, 1);
        if (!bio) {
                dmz_free_mblock(zmd, mblk);
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
        spin_lock(&zmd->mblk_lock);
@@ -540,8 +543,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
        if (!mblk) {
                /* Cache miss: read the block from disk */
                mblk = dmz_get_mblock_slow(zmd, mblk_no);
-               if (!mblk)
-                       return ERR_PTR(-ENOMEM);
+               if (IS_ERR(mblk))
+                       return mblk;
        }
 
        /* Wait for on-going read I/O and check for error */
@@ -569,16 +572,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
 /*
  * Issue a metadata block write BIO.
  */
-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
-                            unsigned int set)
+static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
+                           unsigned int set)
 {
        sector_t block = zmd->sb[set].block + mblk->no;
        struct bio *bio;
 
+       if (dmz_bdev_is_dying(zmd->dev))
+               return -EIO;
+
        bio = bio_alloc(GFP_NOIO, 1);
        if (!bio) {
                set_bit(DMZ_META_ERROR, &mblk->state);
-               return;
+               return -ENOMEM;
        }
 
        set_bit(DMZ_META_WRITING, &mblk->state);
@@ -590,6 +596,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
        bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
        bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
        submit_bio(bio);
+
+       return 0;
 }
 
 /*
@@ -601,6 +609,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
        struct bio *bio;
        int ret;
 
+       if (dmz_bdev_is_dying(zmd->dev))
+               return -EIO;
+
        bio = bio_alloc(GFP_NOIO, 1);
        if (!bio)
                return -ENOMEM;
@@ -658,22 +669,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
 {
        struct dmz_mblock *mblk;
        struct blk_plug plug;
-       int ret = 0;
+       int ret = 0, nr_mblks_submitted = 0;
 
        /* Issue writes */
        blk_start_plug(&plug);
-       list_for_each_entry(mblk, write_list, link)
-               dmz_write_mblock(zmd, mblk, set);
+       list_for_each_entry(mblk, write_list, link) {
+               ret = dmz_write_mblock(zmd, mblk, set);
+               if (ret)
+                       break;
+               nr_mblks_submitted++;
+       }
        blk_finish_plug(&plug);
 
        /* Wait for completion */
        list_for_each_entry(mblk, write_list, link) {
+               if (!nr_mblks_submitted)
+                       break;
                wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
                               TASK_UNINTERRUPTIBLE);
                if (test_bit(DMZ_META_ERROR, &mblk->state)) {
                        clear_bit(DMZ_META_ERROR, &mblk->state);
                        ret = -EIO;
                }
+               nr_mblks_submitted--;
        }
 
        /* Flush drive cache (this will also sync data) */
@@ -735,6 +753,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
         */
        dmz_lock_flush(zmd);
 
+       if (dmz_bdev_is_dying(zmd->dev)) {
+               ret = -EIO;
+               goto out;
+       }
+
        /* Get dirty blocks */
        spin_lock(&zmd->mblk_lock);
        list_splice_init(&zmd->mblk_dirty_list, &write_list);
@@ -1534,7 +1557,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
        struct dm_zone *zone;
 
        if (list_empty(&zmd->map_rnd_list))
-               return NULL;
+               return ERR_PTR(-EBUSY);
 
        list_for_each_entry(zone, &zmd->map_rnd_list, link) {
                if (dmz_is_buf(zone))
@@ -1545,7 +1568,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
                        return dzone;
        }
 
-       return NULL;
+       return ERR_PTR(-EBUSY);
 }
 
 /*
@@ -1556,7 +1579,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
        struct dm_zone *zone;
 
        if (list_empty(&zmd->map_seq_list))
-               return NULL;
+               return ERR_PTR(-EBUSY);
 
        list_for_each_entry(zone, &zmd->map_seq_list, link) {
                if (!zone->bzone)
@@ -1565,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
                        return zone;
        }
 
-       return NULL;
+       return ERR_PTR(-EBUSY);
 }
 
 /*
@@ -1593,30 +1616,6 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
        return zone;
 }
 
-/*
- * Activate a zone (increment its reference count).
- */
-void dmz_activate_zone(struct dm_zone *zone)
-{
-       set_bit(DMZ_ACTIVE, &zone->flags);
-       atomic_inc(&zone->refcount);
-}
-
-/*
- * Deactivate a zone. This decrement the zone reference counter
- * and clears the active state of the zone once the count reaches 0,
- * indicating that all BIOs to the zone have completed. Returns
- * true if the zone was deactivated.
- */
-void dmz_deactivate_zone(struct dm_zone *zone)
-{
-       if (atomic_dec_and_test(&zone->refcount)) {
-               WARN_ON(!test_bit(DMZ_ACTIVE, &zone->flags));
-               clear_bit_unlock(DMZ_ACTIVE, &zone->flags);
-               smp_mb__after_atomic();
-       }
-}
-
 /*
  * Get the zone mapping a chunk, if the chunk is mapped already.
  * If no mapping exist and the operation is WRITE, a zone is
@@ -1647,6 +1646,10 @@ again:
                /* Alloate a random zone */
                dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
                if (!dzone) {
+                       if (dmz_bdev_is_dying(zmd->dev)) {
+                               dzone = ERR_PTR(-EIO);
+                               goto out;
+                       }
                        dmz_wait_for_free_zones(zmd);
                        goto again;
                }
@@ -1744,6 +1747,10 @@ again:
        /* Alloate a random zone */
        bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
        if (!bzone) {
+               if (dmz_bdev_is_dying(zmd->dev)) {
+                       bzone = ERR_PTR(-EIO);
+                       goto out;
+               }
                dmz_wait_for_free_zones(zmd);
                goto again;
        }
index edf4b95eb0750dc6485513d49c240b2982017114..9470b8f77a337bb22e639ea8e45defc12cb59cb2 100644 (file)
@@ -37,7 +37,7 @@ enum {
 /*
  * Number of seconds of target BIO inactivity to consider the target idle.
  */
-#define DMZ_IDLE_PERIOD                (10UL * HZ)
+#define DMZ_IDLE_PERIOD                        (10UL * HZ)
 
 /*
  * Percentage of unmapped (free) random zones below which reclaim starts
@@ -134,6 +134,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
                set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
 
        while (block < end_block) {
+               if (dev->flags & DMZ_BDEV_DYING)
+                       return -EIO;
+
                /* Get a valid region from the source zone */
                ret = dmz_first_valid_block(zmd, src_zone, &block);
                if (ret <= 0)
@@ -215,7 +218,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
 
        dmz_unlock_flush(zmd);
 
-       return 0;
+       return ret;
 }
 
 /*
@@ -259,7 +262,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
 
        dmz_unlock_flush(zmd);
 
-       return 0;
+       return ret;
 }
 
 /*
@@ -312,7 +315,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
 
        dmz_unlock_flush(zmd);
 
-       return 0;
+       return ret;
 }
 
 /*
@@ -334,7 +337,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
 /*
  * Find a candidate zone for reclaim and process it.
  */
-static void dmz_reclaim(struct dmz_reclaim *zrc)
+static int dmz_do_reclaim(struct dmz_reclaim *zrc)
 {
        struct dmz_metadata *zmd = zrc->metadata;
        struct dm_zone *dzone;
@@ -344,8 +347,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
 
        /* Get a data zone */
        dzone = dmz_get_zone_for_reclaim(zmd);
-       if (!dzone)
-               return;
+       if (IS_ERR(dzone))
+               return PTR_ERR(dzone);
 
        start = jiffies;
 
@@ -391,13 +394,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
 out:
        if (ret) {
                dmz_unlock_zone_reclaim(dzone);
-               return;
+               return ret;
        }
 
-       (void) dmz_flush_metadata(zrc->metadata);
+       ret = dmz_flush_metadata(zrc->metadata);
+       if (ret) {
+               dmz_dev_debug(zrc->dev,
+                             "Metadata flush for zone %u failed, err %d\n",
+                             dmz_id(zmd, rzone), ret);
+               return ret;
+       }
 
        dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
                      dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
+       return 0;
 }
 
 /*
@@ -442,6 +452,10 @@ static void dmz_reclaim_work(struct work_struct *work)
        struct dmz_metadata *zmd = zrc->metadata;
        unsigned int nr_rnd, nr_unmap_rnd;
        unsigned int p_unmap_rnd;
+       int ret;
+
+       if (dmz_bdev_is_dying(zrc->dev))
+               return;
 
        if (!dmz_should_reclaim(zrc)) {
                mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
@@ -471,7 +485,17 @@ static void dmz_reclaim_work(struct work_struct *work)
                      (dmz_target_idle(zrc) ? "Idle" : "Busy"),
                      p_unmap_rnd, nr_unmap_rnd, nr_rnd);
 
-       dmz_reclaim(zrc);
+       ret = dmz_do_reclaim(zrc);
+       if (ret) {
+               dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
+               if (ret == -EIO)
+                       /*
+                        * LLD might be performing some error handling sequence
+                        * at the underlying device. To not interfere, do not
+                        * attempt to schedule the next reclaim run immediately.
+                        */
+                       return;
+       }
 
        dmz_schedule_reclaim(zrc);
 }
index 85fb2baa8a7fa0addb106936adb2e2719c1dbe40..3dd668f6940512903b9b76d6d642fd43a850349f 100644 (file)
@@ -277,8 +277,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
 
        /* Get the buffer zone. One will be allocated if needed */
        bzone = dmz_get_chunk_buffer(zmd, zone);
-       if (!bzone)
-               return -ENOSPC;
+       if (IS_ERR(bzone))
+               return PTR_ERR(bzone);
 
        if (dmz_is_readonly(bzone))
                return -EROFS;
@@ -389,6 +389,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
 
        dmz_lock_metadata(zmd);
 
+       if (dmz->dev->flags & DMZ_BDEV_DYING) {
+               ret = -EIO;
+               goto out;
+       }
+
        /*
         * Get the data zone mapping the chunk. There may be no
         * mapping for read and discard. If a mapping is obtained,
@@ -493,6 +498,8 @@ static void dmz_flush_work(struct work_struct *work)
 
        /* Flush dirty metadata blocks */
        ret = dmz_flush_metadata(dmz->metadata);
+       if (ret)
+               dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
 
        /* Process queued flush requests */
        while (1) {
@@ -513,22 +520,24 @@ static void dmz_flush_work(struct work_struct *work)
  * Get a chunk work and start it to process a new BIO.
  * If the BIO chunk has no work yet, create one.
  */
-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
 {
        unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
        struct dm_chunk_work *cw;
+       int ret = 0;
 
        mutex_lock(&dmz->chunk_lock);
 
        /* Get the BIO chunk work. If one is not active yet, create one */
        cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
        if (!cw) {
-               int ret;
 
                /* Create a new chunk work */
                cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
-               if (!cw)
+               if (unlikely(!cw)) {
+                       ret = -ENOMEM;
                        goto out;
+               }
 
                INIT_WORK(&cw->work, dmz_chunk_work);
                atomic_set(&cw->refcount, 0);
@@ -539,7 +548,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
                ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
                if (unlikely(ret)) {
                        kfree(cw);
-                       cw = NULL;
                        goto out;
                }
        }
@@ -547,10 +555,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
        bio_list_add(&cw->bio_list, bio);
        dmz_get_chunk_work(cw);
 
+       dmz_reclaim_bio_acc(dmz->reclaim);
        if (queue_work(dmz->chunk_wq, &cw->work))
                dmz_get_chunk_work(cw);
 out:
        mutex_unlock(&dmz->chunk_lock);
+       return ret;
+}
+
+/*
+ * Check the backing device availability. If it's on the way out,
+ * start failing I/O. Reclaim and metadata components also call this
+ * function to cleanly abort operation in the event of such failure.
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
+{
+       struct gendisk *disk;
+
+       if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
+               disk = dmz_dev->bdev->bd_disk;
+               if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
+                       dmz_dev_warn(dmz_dev, "Backing device queue dying");
+                       dmz_dev->flags |= DMZ_BDEV_DYING;
+               } else if (disk->fops->check_events) {
+                       if (disk->fops->check_events(disk, 0) &
+                                       DISK_EVENT_MEDIA_CHANGE) {
+                               dmz_dev_warn(dmz_dev, "Backing device offline");
+                               dmz_dev->flags |= DMZ_BDEV_DYING;
+                       }
+               }
+       }
+
+       return dmz_dev->flags & DMZ_BDEV_DYING;
 }
 
 /*
@@ -564,6 +600,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
        sector_t sector = bio->bi_iter.bi_sector;
        unsigned int nr_sectors = bio_sectors(bio);
        sector_t chunk_sector;
+       int ret;
+
+       if (dmz_bdev_is_dying(dmz->dev))
+               return DM_MAPIO_KILL;
 
        dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
                      bio_op(bio), (unsigned long long)sector, nr_sectors,
@@ -601,8 +641,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
                dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
 
        /* Now ready to handle this BIO */
-       dmz_reclaim_bio_acc(dmz->reclaim);
-       dmz_queue_chunk_work(dmz, bio);
+       ret = dmz_queue_chunk_work(dmz, bio);
+       if (ret) {
+               dmz_dev_debug(dmz->dev,
+                             "BIO op %d, can't process chunk %llu, err %i\n",
+                             bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
+                             ret);
+               return DM_MAPIO_REQUEUE;
+       }
 
        return DM_MAPIO_SUBMITTED;
 }
@@ -856,6 +902,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
 {
        struct dmz_target *dmz = ti->private;
 
+       if (dmz_bdev_is_dying(dmz->dev))
+               return -ENODEV;
+
        *bdev = dmz->dev->bdev;
 
        return 0;
index 12419f0bfe78ba14fd8e0c29249f26a22a99095b..93a64529f21902968a705c6ea88a292ad6161d24 100644 (file)
@@ -56,6 +56,8 @@ struct dmz_dev {
 
        unsigned int            nr_zones;
 
+       unsigned int            flags;
+
        sector_t                zone_nr_sectors;
        unsigned int            zone_nr_sectors_shift;
 
@@ -67,6 +69,9 @@ struct dmz_dev {
                                 (dev)->zone_nr_sectors_shift)
 #define dmz_chunk_block(dev, b)        ((b) & ((dev)->zone_nr_blocks - 1))
 
+/* Device flags. */
+#define DMZ_BDEV_DYING         (1 << 0)
+
 /*
  * Zone descriptor.
  */
@@ -115,7 +120,6 @@ enum {
        DMZ_BUF,
 
        /* Zone internal state */
-       DMZ_ACTIVE,
        DMZ_RECLAIM,
        DMZ_SEQ_WRITE_ERR,
 };
@@ -128,7 +132,6 @@ enum {
 #define dmz_is_empty(z)                ((z)->wp_block == 0)
 #define dmz_is_offline(z)      test_bit(DMZ_OFFLINE, &(z)->flags)
 #define dmz_is_readonly(z)     test_bit(DMZ_READ_ONLY, &(z)->flags)
-#define dmz_is_active(z)       test_bit(DMZ_ACTIVE, &(z)->flags)
 #define dmz_in_reclaim(z)      test_bit(DMZ_RECLAIM, &(z)->flags)
 #define dmz_seq_write_err(z)   test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)
 
@@ -188,8 +191,30 @@ void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
 unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
 unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
 
-void dmz_activate_zone(struct dm_zone *zone);
-void dmz_deactivate_zone(struct dm_zone *zone);
+/*
+ * Activate a zone (increment its reference count).
+ */
+static inline void dmz_activate_zone(struct dm_zone *zone)
+{
+       atomic_inc(&zone->refcount);
+}
+
+/*
+ * Deactivate a zone. This decrement the zone reference counter
+ * indicating that all BIOs to the zone have completed when the count is 0.
+ */
+static inline void dmz_deactivate_zone(struct dm_zone *zone)
+{
+       atomic_dec(&zone->refcount);
+}
+
+/*
+ * Test if a zone is active, that is, has a refcount > 0.
+ */
+static inline bool dmz_is_active(struct dm_zone *zone)
+{
+       return atomic_read(&zone->refcount);
+}
 
 int dmz_lock_zone_reclaim(struct dm_zone *zone);
 void dmz_unlock_zone_reclaim(struct dm_zone *zone);
@@ -225,4 +250,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
 void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
 void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
 
+/*
+ * Functions defined in dm-zoned-target.c
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
+
 #endif /* DM_ZONED_H */
index 42768fe92b41b21a4f2cdd02efce6285d9248a6b..c9860e3b04ddf10d5a5c8c9ad9f5f0e048599a36 100644 (file)
@@ -910,6 +910,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
        }
 }
 
+void disable_discard(struct mapped_device *md)
+{
+       struct queue_limits *limits = dm_get_queue_limits(md);
+
+       /* device doesn't really support DISCARD, disable it */
+       limits->max_discard_sectors = 0;
+       blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
+}
+
 void disable_write_same(struct mapped_device *md)
 {
        struct queue_limits *limits = dm_get_queue_limits(md);
@@ -935,11 +944,14 @@ static void clone_endio(struct bio *bio)
        dm_endio_fn endio = tio->ti->type->end_io;
 
        if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
-               if (bio_op(bio) == REQ_OP_WRITE_SAME &&
-                   !bio->bi_disk->queue->limits.max_write_same_sectors)
+               if (bio_op(bio) == REQ_OP_DISCARD &&
+                   !bio->bi_disk->queue->limits.max_discard_sectors)
+                       disable_discard(md);
+               else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+                        !bio->bi_disk->queue->limits.max_write_same_sectors)
                        disable_write_same(md);
-               if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
-                   !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+               else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+                        !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
                        disable_write_zeroes(md);
        }
 
index fb5d702e43b5b31f06fcdce0ba3cef2cdc7ebf6c..a8fbaa384e9ae5174327f57daafb24616f92c5f8 100644 (file)
@@ -1770,8 +1770,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
                                if (!(le32_to_cpu(sb->feature_map) &
                                      MD_FEATURE_RECOVERY_BITMAP))
                                        rdev->saved_raid_disk = -1;
-                       } else
-                               set_bit(In_sync, &rdev->flags);
+                       } else {
+                               /*
+                                * If the array is FROZEN, then the device can't
+                                * be in_sync with rest of array.
+                                */
+                               if (!test_bit(MD_RECOVERY_FROZEN,
+                                             &mddev->recovery))
+                                       set_bit(In_sync, &rdev->flags);
+                       }
                        rdev->raid_disk = role;
                        break;
                }
@@ -4116,7 +4123,7 @@ array_state_show(struct mddev *mddev, char *page)
 {
        enum array_state st = inactive;
 
-       if (mddev->pers)
+       if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags))
                switch(mddev->ro) {
                case 1:
                        st = readonly;
@@ -5671,9 +5678,6 @@ int md_run(struct mddev *mddev)
                md_update_sb(mddev, 0);
 
        md_new_event(mddev);
-       sysfs_notify_dirent_safe(mddev->sysfs_state);
-       sysfs_notify_dirent_safe(mddev->sysfs_action);
-       sysfs_notify(&mddev->kobj, NULL, "degraded");
        return 0;
 
 abort:
@@ -5687,6 +5691,7 @@ static int do_md_run(struct mddev *mddev)
 {
        int err;
 
+       set_bit(MD_NOT_READY, &mddev->flags);
        err = md_run(mddev);
        if (err)
                goto out;
@@ -5707,9 +5712,14 @@ static int do_md_run(struct mddev *mddev)
 
        set_capacity(mddev->gendisk, mddev->array_sectors);
        revalidate_disk(mddev->gendisk);
+       clear_bit(MD_NOT_READY, &mddev->flags);
        mddev->changed = 1;
        kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
+       sysfs_notify_dirent_safe(mddev->sysfs_state);
+       sysfs_notify_dirent_safe(mddev->sysfs_action);
+       sysfs_notify(&mddev->kobj, NULL, "degraded");
 out:
+       clear_bit(MD_NOT_READY, &mddev->flags);
        return err;
 }
 
@@ -8797,6 +8807,7 @@ void md_check_recovery(struct mddev *mddev)
 
        if (mddev_trylock(mddev)) {
                int spares = 0;
+               bool try_set_sync = mddev->safemode != 0;
 
                if (!mddev->external && mddev->safemode == 1)
                        mddev->safemode = 0;
@@ -8842,7 +8853,7 @@ void md_check_recovery(struct mddev *mddev)
                        }
                }
 
-               if (!mddev->external && !mddev->in_sync) {
+               if (try_set_sync && !mddev->external && !mddev->in_sync) {
                        spin_lock(&mddev->lock);
                        set_in_sync(mddev);
                        spin_unlock(&mddev->lock);
@@ -8948,7 +8959,8 @@ void md_reap_sync_thread(struct mddev *mddev)
        /* resync has finished, collect result */
        md_unregister_thread(&mddev->sync_thread);
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
-           !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+           !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
+           mddev->degraded != mddev->raid_disks) {
                /* success...*/
                /* activate any spares */
                if (mddev->pers->spare_active(mddev)) {
index 325cb2136a49e35503d1d98f0c8e638821b0970f..4f89463e0b01e6144c4327f0a86cc7259c065ac8 100644 (file)
@@ -243,6 +243,9 @@ enum mddev_flags {
        MD_UPDATING_SB,         /* md_check_recovery is updating the metadata
                                 * without explicitly holding reconfig_mutex.
                                 */
+       MD_NOT_READY,           /* do_md_run() is active, so 'array_state'
+                                * must not report that array is ready yet
+                                */
 };
 
 enum mddev_sb_flags {
index 58b319757b1e5a1c274bd98681d6ce229bbbabbe..8aae0624a2971e939fa79acd656f98035049460d 100644 (file)
@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
 
        new_parent = shadow_current(s);
 
+       pn = dm_block_data(new_parent);
+       size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+               sizeof(__le64) : s->info->value_type.size;
+
+       /* create & init the left block */
        r = new_block(s->info, &left);
        if (r < 0)
                return r;
 
+       ln = dm_block_data(left);
+       nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+
+       ln->header.flags = pn->header.flags;
+       ln->header.nr_entries = cpu_to_le32(nr_left);
+       ln->header.max_entries = pn->header.max_entries;
+       ln->header.value_size = pn->header.value_size;
+       memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+       memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
+
+       /* create & init the right block */
        r = new_block(s->info, &right);
        if (r < 0) {
                unlock_block(s->info, left);
                return r;
        }
 
-       pn = dm_block_data(new_parent);
-       ln = dm_block_data(left);
        rn = dm_block_data(right);
-
-       nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
        nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
 
-       ln->header.flags = pn->header.flags;
-       ln->header.nr_entries = cpu_to_le32(nr_left);
-       ln->header.max_entries = pn->header.max_entries;
-       ln->header.value_size = pn->header.value_size;
-
        rn->header.flags = pn->header.flags;
        rn->header.nr_entries = cpu_to_le32(nr_right);
        rn->header.max_entries = pn->header.max_entries;
        rn->header.value_size = pn->header.value_size;
-
-       memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
        memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
-
-       size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
-               sizeof(__le64) : s->info->value_type.size;
-       memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
        memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
               nr_right * size);
 
index aec44924396622729f64e84abda381d24606e14a..25328582cc4820dc29d0e79a8012eeeb8a7fb2f4 100644 (file)
@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
        }
 
        if (smm->recursion_count == 1)
-               apply_bops(smm);
+               r = apply_bops(smm);
 
        smm->recursion_count--;
 
index f4daa56d204dd1880044c61747faa84a2e0cf3c4..3cafbfd655f5df859c066af0ecd3cc6e84b83496 100644 (file)
@@ -26,6 +26,9 @@
 #include "raid0.h"
 #include "raid5.h"
 
+static int default_layout = 0;
+module_param(default_layout, int, 0644);
+
 #define UNSUPPORTED_MDDEV_FLAGS                \
        ((1L << MD_HAS_JOURNAL) |       \
         (1L << MD_JOURNAL_CLEAN) |     \
@@ -146,6 +149,19 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
        }
        pr_debug("md/raid0:%s: FINAL %d zones\n",
                 mdname(mddev), conf->nr_strip_zones);
+
+       if (conf->nr_strip_zones == 1) {
+               conf->layout = RAID0_ORIG_LAYOUT;
+       } else if (default_layout == RAID0_ORIG_LAYOUT ||
+                  default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
+               conf->layout = default_layout;
+       } else {
+               pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
+                      mdname(mddev));
+               pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
+               err = -ENOTSUPP;
+               goto abort;
+       }
        /*
         * now since we have the hard sector sizes, we can make sure
         * chunk size is a multiple of that sector size
@@ -555,10 +571,12 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
 
 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
 {
+       struct r0conf *conf = mddev->private;
        struct strip_zone *zone;
        struct md_rdev *tmp_dev;
        sector_t bio_sector;
        sector_t sector;
+       sector_t orig_sector;
        unsigned chunk_sects;
        unsigned sectors;
 
@@ -592,8 +610,21 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
                bio = split;
        }
 
+       orig_sector = sector;
        zone = find_zone(mddev->private, &sector);
-       tmp_dev = map_sector(mddev, zone, sector, &sector);
+       switch (conf->layout) {
+       case RAID0_ORIG_LAYOUT:
+               tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
+               break;
+       case RAID0_ALT_MULTIZONE_LAYOUT:
+               tmp_dev = map_sector(mddev, zone, sector, &sector);
+               break;
+       default:
+               WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
+               bio_io_error(bio);
+               return true;
+       }
+
        bio_set_dev(bio, tmp_dev->bdev);
        bio->bi_iter.bi_sector = sector + zone->dev_start +
                tmp_dev->data_offset;
index 540e65d92642d8427fbe3af4d5960834cc967a74..3816e5477db1e743afeaf707c1971f1dbdce1901 100644 (file)
@@ -8,11 +8,25 @@ struct strip_zone {
        int      nb_dev;        /* # of devices attached to the zone */
 };
 
+/* Linux 3.14 (20d0189b101) made an unintended change to
+ * the RAID0 layout for multi-zone arrays (where devices aren't all
+ * the same size.
+ * RAID0_ORIG_LAYOUT restores the original layout
+ * RAID0_ALT_MULTIZONE_LAYOUT uses the altered layout
+ * The layouts are identical when there is only one zone (all
+ * devices the same size).
+ */
+
+enum r0layout {
+       RAID0_ORIG_LAYOUT = 1,
+       RAID0_ALT_MULTIZONE_LAYOUT = 2,
+};
 struct r0conf {
        struct strip_zone       *strip_zone;
        struct md_rdev          **devlist; /* lists of rdevs, pointed to
                                            * by strip_zone->dev */
        int                     nr_strip_zones;
+       enum r0layout           layout;
 };
 
 #endif
index fa47249fa3e42819a76f2931963cebec4accda40..6929d110d80488e72bce1d8830289cbce904e434 100644 (file)
@@ -434,19 +434,21 @@ static void raid1_end_write_request(struct bio *bio)
                    /* We never try FailFast to WriteMostly devices */
                    !test_bit(WriteMostly, &rdev->flags)) {
                        md_error(r1_bio->mddev, rdev);
-                       if (!test_bit(Faulty, &rdev->flags))
-                               /* This is the only remaining device,
-                                * We need to retry the write without
-                                * FailFast
-                                */
-                               set_bit(R1BIO_WriteError, &r1_bio->state);
-                       else {
-                               /* Finished with this branch */
-                               r1_bio->bios[mirror] = NULL;
-                               to_put = bio;
-                       }
-               } else
+               }
+
+               /*
+                * When the device is faulty, it is not necessary to
+                * handle write error.
+                * For failfast, this is the only remaining device,
+                * We need to retry the write without FailFast.
+                */
+               if (!test_bit(Faulty, &rdev->flags))
                        set_bit(R1BIO_WriteError, &r1_bio->state);
+               else {
+                       /* Finished with this branch */
+                       r1_bio->bios[mirror] = NULL;
+                       to_put = bio;
+               }
        } else {
                /*
                 * Set R1BIO_Uptodate in our master bio, so that we
@@ -3103,6 +3105,13 @@ static int raid1_run(struct mddev *mddev)
                    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
                    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
                        mddev->degraded++;
+       /*
+        * RAID1 needs at least one disk in active
+        */
+       if (conf->raid_disks - mddev->degraded < 1) {
+               ret = -EINVAL;
+               goto abort;
+       }
 
        if (conf->raid_disks - mddev->degraded == 1)
                mddev->recovery_cp = MaxSector;
@@ -3136,8 +3145,12 @@ static int raid1_run(struct mddev *mddev)
        ret =  md_integrity_register(mddev);
        if (ret) {
                md_unregister_thread(&mddev->thread);
-               raid1_free(mddev, conf);
+               goto abort;
        }
+       return 0;
+
+abort:
+       raid1_free(mddev, conf);
        return ret;
 }
 
index f237d6f307529ae42b1ae47277a88ca80f2fb0af..4a5aad26ded75db670a25efbf0897c1d03d7e519 100644 (file)
@@ -2540,7 +2540,8 @@ static void raid5_end_read_request(struct bio * bi)
                int set_bad = 0;
 
                clear_bit(R5_UPTODATE, &sh->dev[i].flags);
-               atomic_inc(&rdev->read_errors);
+               if (!(bi->bi_status == BLK_STS_PROTECTION))
+                       atomic_inc(&rdev->read_errors);
                if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
                        pr_warn_ratelimited(
                                "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
@@ -2572,7 +2573,9 @@ static void raid5_end_read_request(struct bio * bi)
                    && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
                        retry = 1;
                if (retry)
-                       if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
+                       if (sh->qd_idx >= 0 && sh->pd_idx == i)
+                               set_bit(R5_ReadError, &sh->dev[i].flags);
+                       else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
                                set_bit(R5_ReadError, &sh->dev[i].flags);
                                clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
                        } else
@@ -5721,7 +5724,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
                                do_flush = false;
                        }
 
-                       set_bit(STRIPE_HANDLE, &sh->state);
+                       if (!sh->batch_head)
+                               set_bit(STRIPE_HANDLE, &sh->state);
                        clear_bit(STRIPE_DELAYED, &sh->state);
                        if ((!sh->batch_head || sh == sh->batch_head) &&
                            (bi->bi_opf & REQ_SYNC) &&
@@ -7670,7 +7674,7 @@ abort:
 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
 {
        struct r5conf *conf = mddev->private;
-       int err = -EEXIST;
+       int ret, err = -EEXIST;
        int disk;
        struct disk_info *p;
        int first = 0;
@@ -7685,7 +7689,14 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                 * The array is in readonly mode if journal is missing, so no
                 * write requests running. We should be safe
                 */
-               log_init(conf, rdev, false);
+               ret = log_init(conf, rdev, false);
+               if (ret)
+                       return ret;
+
+               ret = r5l_start(conf->log);
+               if (ret)
+                       return ret;
+
                return 0;
        }
        if (mddev->recovery_disabled == conf->recovery_disabled)
index 29a2ab9e77c5dffd8fadc034450f83bdbe9e2aad..ad8677d8c89679bb6b0eda8357a136dfb920fbf3 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
+cec-objs := cec-core.o cec-adap.o cec-api.o
 
 ifeq ($(CONFIG_CEC_NOTIFIER),y)
   cec-objs += cec-notifier.o
index a7ea27d2aa8ef219d07265bc03fbae9e8c71756f..4a15d53f659ecf8496bc28c38a02780b66f30b17 100644 (file)
@@ -62,6 +62,19 @@ static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr
        return adap->log_addrs.primary_device_type[i < 0 ? 0 : i];
 }
 
+u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
+                          unsigned int *offset)
+{
+       unsigned int loc = cec_get_edid_spa_location(edid, size);
+
+       if (offset)
+               *offset = loc;
+       if (loc == 0)
+               return CEC_PHYS_ADDR_INVALID;
+       return (edid[loc] << 8) | edid[loc + 1];
+}
+EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
+
 /*
  * Queue a new event for this filehandle. If ts == 0, then set it
  * to the current time.
diff --git a/drivers/media/cec/cec-edid.c b/drivers/media/cec/cec-edid.c
deleted file mode 100644 (file)
index f587e8e..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * cec-edid - HDMI Consumer Electronics Control EDID & CEC helper functions
- *
- * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <media/cec.h>
-
-u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
-                          unsigned int *offset)
-{
-       unsigned int loc = cec_get_edid_spa_location(edid, size);
-
-       if (offset)
-               *offset = loc;
-       if (loc == 0)
-               return CEC_PHYS_ADDR_INVALID;
-       return (edid[loc] << 8) | edid[loc + 1];
-}
-EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
-
-void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
-{
-       unsigned int loc = cec_get_edid_spa_location(edid, size);
-       u8 sum = 0;
-       unsigned int i;
-
-       if (loc == 0)
-               return;
-       edid[loc] = phys_addr >> 8;
-       edid[loc + 1] = phys_addr & 0xff;
-       loc &= ~0x7f;
-
-       /* update the checksum */
-       for (i = loc; i < loc + 127; i++)
-               sum += edid[i];
-       edid[i] = 256 - sum;
-}
-EXPORT_SYMBOL_GPL(cec_set_edid_phys_addr);
-
-u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
-{
-       /* Check if input is sane */
-       if (WARN_ON(input == 0 || input > 0xf))
-               return CEC_PHYS_ADDR_INVALID;
-
-       if (phys_addr == 0)
-               return input << 12;
-
-       if ((phys_addr & 0x0fff) == 0)
-               return phys_addr | (input << 8);
-
-       if ((phys_addr & 0x00ff) == 0)
-               return phys_addr | (input << 4);
-
-       if ((phys_addr & 0x000f) == 0)
-               return phys_addr | input;
-
-       /*
-        * All nibbles are used so no valid physical addresses can be assigned
-        * to the input.
-        */
-       return CEC_PHYS_ADDR_INVALID;
-}
-EXPORT_SYMBOL_GPL(cec_phys_addr_for_input);
-
-int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
-{
-       int i;
-
-       if (parent)
-               *parent = phys_addr;
-       if (port)
-               *port = 0;
-       if (phys_addr == CEC_PHYS_ADDR_INVALID)
-               return 0;
-       for (i = 0; i < 16; i += 4)
-               if (phys_addr & (0xf << i))
-                       break;
-       if (i == 16)
-               return 0;
-       if (parent)
-               *parent = phys_addr & (0xfff0 << i);
-       if (port)
-               *port = (phys_addr >> i) & 0xf;
-       for (i += 4; i < 16; i += 4)
-               if ((phys_addr & (0xf << i)) == 0)
-                       return -EINVAL;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(cec_phys_addr_validate);
index dd2078b27a419e6a4cd5afeb9cd9e48220747b63..2424680f71c3db3abb62065251928a567237fa12 100644 (file)
@@ -123,6 +123,8 @@ void cec_notifier_unregister(struct cec_notifier *n)
 {
        mutex_lock(&n->lock);
        n->callback = NULL;
+       n->cec_adap->notifier = NULL;
+       n->cec_adap = NULL;
        mutex_unlock(&n->lock);
        cec_notifier_put(n);
 }
index 9226dca44e907d847b111b1acedd17e8b20c2f08..93d250db0b6f06616f0654f37a49c2c6b2e8741e 100644 (file)
@@ -207,6 +207,10 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
        for (plane = 0; plane < vb->num_planes; ++plane) {
                unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
 
+               /* Did it wrap around? */
+               if (size < vb->planes[plane].length)
+                       goto free;
+
                mem_priv = call_ptr_memop(vb, alloc,
                                q->alloc_devs[plane] ? : q->dev,
                                q->dma_attrs, size, q->dma_dir, q->gfp_flags);
index 015e737095cdd6644b4e0332120aa3ee993eb1d0..e9bfea986cc47e4bba3064d9de1c53e6f59741ee 100644 (file)
@@ -59,7 +59,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
                gfp_t gfp_flags)
 {
        unsigned int last_page = 0;
-       int size = buf->size;
+       unsigned long size = buf->size;
 
        while (size > 0) {
                struct page *pages;
index c4e7ebfe4d2955c9d9eb8b0cd55e1302269d7f4b..8a61150ee249e60c28bed5274b2237e6028785ed 100644 (file)
@@ -164,6 +164,9 @@ static void dvb_frontend_free(struct kref *ref)
 
 static void dvb_frontend_put(struct dvb_frontend *fe)
 {
+       /* call detach before dropping the reference count */
+       if (fe->ops.detach)
+               fe->ops.detach(fe);
        /*
         * Check if the frontend was registered, as otherwise
         * kref was not initialized yet.
@@ -3035,7 +3038,6 @@ void dvb_frontend_detach(struct dvb_frontend *fe)
        dvb_frontend_invoke_release(fe, fe->ops.release_sec);
        dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release);
        dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release);
-       dvb_frontend_invoke_release(fe, fe->ops.detach);
        dvb_frontend_put(fe);
 }
 EXPORT_SYMBOL(dvb_frontend_detach);
index 3c87785703310fb97f342b1e42279b33f466b80f..04dc2f4bc7aafd4d40d6486f22cf9a79231307c2 100644 (file)
@@ -339,8 +339,10 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
        if (npads) {
                dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads),
                                       GFP_KERNEL);
-               if (!dvbdev->pads)
+               if (!dvbdev->pads) {
+                       kfree(dvbdev->entity);
                        return -ENOMEM;
+               }
        }
 
        switch (type) {
index 29836c1a40e987985937f89dc7574ca6205a489e..ee830c76e4b30b69361b5fbdc4d2708cc71463e3 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/idr.h>
 #include <linux/dvb/frontend.h>
 #include <asm/types.h>
 
@@ -43,8 +44,7 @@ struct dvb_pll_priv {
 };
 
 #define DVB_PLL_MAX 64
-
-static unsigned int dvb_pll_devcount;
+static DEFINE_IDA(pll_ida);
 
 static int debug;
 module_param(debug, int, 0644);
@@ -796,6 +796,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
        struct dvb_pll_priv *priv = NULL;
        int ret;
        const struct dvb_pll_desc *desc;
+       int nr;
 
        b1 = kmalloc(1, GFP_KERNEL);
        if (!b1)
@@ -804,9 +805,14 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
        b1[0] = 0;
        msg.buf = b1;
 
-       if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
-           (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
-               pll_desc_id = id[dvb_pll_devcount];
+       nr = ida_simple_get(&pll_ida, 0, DVB_PLL_MAX, GFP_KERNEL);
+       if (nr < 0) {
+               kfree(b1);
+               return NULL;
+       }
+
+       if (id[nr] > DVB_PLL_UNDEFINED && id[nr] < ARRAY_SIZE(pll_list))
+               pll_desc_id = id[nr];
 
        BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list));
 
@@ -817,24 +823,20 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
                        fe->ops.i2c_gate_ctrl(fe, 1);
 
                ret = i2c_transfer (i2c, &msg, 1);
-               if (ret != 1) {
-                       kfree(b1);
-                       return NULL;
-               }
+               if (ret != 1)
+                       goto out;
                if (fe->ops.i2c_gate_ctrl)
                             fe->ops.i2c_gate_ctrl(fe, 0);
        }
 
        priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
-       if (!priv) {
-               kfree(b1);
-               return NULL;
-       }
+       if (!priv)
+               goto out;
 
        priv->pll_i2c_address = pll_addr;
        priv->i2c = i2c;
        priv->pll_desc = desc;
-       priv->nr = dvb_pll_devcount++;
+       priv->nr = nr;
 
        memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops,
               sizeof(struct dvb_tuner_ops));
@@ -867,6 +869,11 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
        kfree(b1);
 
        return fe;
+out:
+       kfree(b1);
+       ida_simple_remove(&pll_ida, nr);
+
+       return NULL;
 }
 EXPORT_SYMBOL(dvb_pll_attach);
 
@@ -903,9 +910,10 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
 static int dvb_pll_remove(struct i2c_client *client)
 {
-       struct dvb_frontend *fe;
+       struct dvb_frontend *fe = i2c_get_clientdata(client);
+       struct dvb_pll_priv *priv = fe->tuner_priv;
 
-       fe = i2c_get_clientdata(client);
+       ida_simple_remove(&pll_ida, priv->nr);
        dvb_pll_release(fe);
        return 0;
 }
index b233b7be0b84aaee1eb621660c02a22614e988fa..e6aaf4973aef47bdf8e84628bf5aa471734b2b96 100644 (file)
@@ -75,8 +75,8 @@ static int tua6100_set_params(struct dvb_frontend *fe)
        struct i2c_msg msg1 = { .addr = priv->i2c_address, .flags = 0, .buf = reg1, .len = 4 };
        struct i2c_msg msg2 = { .addr = priv->i2c_address, .flags = 0, .buf = reg2, .len = 3 };
 
-#define _R 4
-#define _P 32
+#define _R_VAL 4
+#define _P_VAL 32
 #define _ri 4000000
 
        // setup register 0
@@ -91,14 +91,14 @@ static int tua6100_set_params(struct dvb_frontend *fe)
        else
                reg1[1] = 0x0c;
 
-       if (_P == 64)
+       if (_P_VAL == 64)
                reg1[1] |= 0x40;
        if (c->frequency >= 1525000)
                reg1[1] |= 0x80;
 
        // register 2
-       reg2[1] = (_R >> 8) & 0x03;
-       reg2[2] = _R;
+       reg2[1] = (_R_VAL >> 8) & 0x03;
+       reg2[2] = _R_VAL;
        if (c->frequency < 1455000)
                reg2[1] |= 0x1c;
        else if (c->frequency < 1630000)
@@ -110,18 +110,18 @@ static int tua6100_set_params(struct dvb_frontend *fe)
         * The N divisor ratio (note: c->frequency is in kHz, but we
         * need it in Hz)
         */
-       prediv = (c->frequency * _R) / (_ri / 1000);
-       div = prediv / _P;
+       prediv = (c->frequency * _R_VAL) / (_ri / 1000);
+       div = prediv / _P_VAL;
        reg1[1] |= (div >> 9) & 0x03;
        reg1[2] = div >> 1;
        reg1[3] = (div << 7);
-       priv->frequency = ((div * _P) * (_ri / 1000)) / _R;
+       priv->frequency = ((div * _P_VAL) * (_ri / 1000)) / _R_VAL;
 
        // Finally, calculate and store the value for A
-       reg1[3] |= (prediv - (div*_P)) & 0x7f;
+       reg1[3] |= (prediv - (div*_P_VAL)) & 0x7f;
 
-#undef _R
-#undef _P
+#undef _R_VAL
+#undef _P_VAL
 #undef _ri
 
        if (fe->ops.i2c_gate_ctrl)
index eb60c7315e26307277ba7864e7bff8e05c4b56ed..f0cc194765d45b4865ea6deea81edcde918f9d42 100644 (file)
@@ -60,8 +60,9 @@ config VIDEO_TDA1997X
        tristate "NXP TDA1997x HDMI receiver"
        depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
        depends on SND_SOC
-       select SND_PCM
        select HDMI
+       select SND_PCM
+       select V4L2_FWNODE
        ---help---
          V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
 
index 29d13338a1d3461fbb4ba0f9b22f6c547ebe063c..1425eb37d54f3181847e507aa541725934012453 100644 (file)
@@ -36,7 +36,7 @@ obj-$(CONFIG_VIDEO_ADV748X) += adv748x/
 obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o
 obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
 obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
-obj-$(CONFIG_VIDEO_ADV7511) += adv7511.o
+obj-$(CONFIG_VIDEO_ADV7511) += adv7511-v4l2.o
 obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
 obj-$(CONFIG_VIDEO_VS6624)  += vs6624.o
 obj-$(CONFIG_VIDEO_BT819) += bt819.o
similarity index 99%
rename from drivers/media/i2c/adv7511.c
rename to drivers/media/i2c/adv7511-v4l2.c
index 88349b5053cce0298734ace8ae247171cd439712..6869bb593a68275d36062268bcfc5e0a7af83859 100644 (file)
@@ -5,6 +5,11 @@
  * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
  */
 
+/*
+ * This file is named adv7511-v4l2.c so it doesn't conflict with the Analog
+ * Device ADV7511 (config fragment CONFIG_DRM_I2C_ADV7511).
+ */
+
 
 #include <linux/kernel.h>
 #include <linux/module.h>
index f01964c36ad5753d058876db904462711136b895..a4b0a89c7e7e632e703d2ea2724cecf56b2f8ab2 100644 (file)
@@ -2297,8 +2297,8 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
                edid->blocks = 2;
                return -E2BIG;
        }
-       pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, &spa_loc);
-       err = cec_phys_addr_validate(pa, &pa, NULL);
+       pa = v4l2_get_edid_phys_addr(edid->edid, edid->blocks * 128, &spa_loc);
+       err = v4l2_phys_addr_validate(pa, &pa, NULL);
        if (err)
                return err;
 
index bb43a75ed6d0b1204e819b9cc59733aa6b61bd14..58662ba92d4f8f79a27fbb5126c5d730da87ed7a 100644 (file)
@@ -791,8 +791,8 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
                return 0;
        }
 
-       pa = cec_get_edid_phys_addr(edid, 256, &spa_loc);
-       err = cec_phys_addr_validate(pa, &pa, NULL);
+       pa = v4l2_get_edid_phys_addr(edid, 256, &spa_loc);
+       err = v4l2_phys_addr_validate(pa, &pa, NULL);
        if (err)
                return err;
 
index c4958108c388e4937e217d65b17ac4ce81d99390..e9667f50ee8c1b630f14e36f29bc2a10739e7ad6 100644 (file)
@@ -2588,7 +2588,6 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
                v4l2_ctrl_new_std(hdl, ops,
                                  V4L2_CID_PIXEL_RATE, 0, INT_MAX, 1,
                                  55969920);
-       ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
 
        /* Auto/manual white balance */
        ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
@@ -2637,6 +2636,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
                goto free_ctrls;
        }
 
+       ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
        ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
        ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
 
@@ -2962,9 +2962,14 @@ static int ov5640_probe(struct i2c_client *client,
        /* request optional power down pin */
        sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown",
                                                    GPIOD_OUT_HIGH);
+       if (IS_ERR(sensor->pwdn_gpio))
+               return PTR_ERR(sensor->pwdn_gpio);
+
        /* request optional reset pin */
        sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
                                                     GPIOD_OUT_HIGH);
+       if (IS_ERR(sensor->reset_gpio))
+               return PTR_ERR(sensor->reset_gpio);
 
        v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
 
index 1722cdab0daf2ae1b4a7c444ddb29cc48be5d3f0..34343bc10007850e502844bdeded13c2ee64a3a6 100644 (file)
@@ -53,6 +53,8 @@
 #define                OV5645_CHIP_ID_HIGH_BYTE        0x56
 #define OV5645_CHIP_ID_LOW             0x300b
 #define                OV5645_CHIP_ID_LOW_BYTE         0x45
+#define OV5645_IO_MIPI_CTRL00          0x300e
+#define OV5645_PAD_OUTPUT00            0x3019
 #define OV5645_AWB_MANUAL_CONTROL      0x3406
 #define                OV5645_AWB_MANUAL_ENABLE        BIT(0)
 #define OV5645_AEC_PK_MANUAL           0x3503
@@ -63,6 +65,7 @@
 #define                OV5645_ISP_VFLIP                BIT(2)
 #define OV5645_TIMING_TC_REG21         0x3821
 #define                OV5645_SENSOR_MIRROR            BIT(1)
+#define OV5645_MIPI_CTRL00             0x4800
 #define OV5645_PRE_ISP_TEST_SETTING_1  0x503d
 #define                OV5645_TEST_PATTERN_MASK        0x3
 #define                OV5645_SET_TEST_PATTERN(x)      ((x) & OV5645_TEST_PATTERN_MASK)
@@ -129,7 +132,6 @@ static const struct reg_value ov5645_global_init_setting[] = {
        { 0x3503, 0x07 },
        { 0x3002, 0x1c },
        { 0x3006, 0xc3 },
-       { 0x300e, 0x45 },
        { 0x3017, 0x00 },
        { 0x3018, 0x00 },
        { 0x302e, 0x0b },
@@ -358,7 +360,10 @@ static const struct reg_value ov5645_global_init_setting[] = {
        { 0x3a1f, 0x14 },
        { 0x0601, 0x02 },
        { 0x3008, 0x42 },
-       { 0x3008, 0x02 }
+       { 0x3008, 0x02 },
+       { OV5645_IO_MIPI_CTRL00, 0x40 },
+       { OV5645_MIPI_CTRL00, 0x24 },
+       { OV5645_PAD_OUTPUT00, 0x70 }
 };
 
 static const struct reg_value ov5645_setting_sxga[] = {
@@ -745,13 +750,9 @@ static int ov5645_s_power(struct v4l2_subdev *sd, int on)
                                goto exit;
                        }
 
-                       ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
-                                              OV5645_SYSTEM_CTRL0_STOP);
-                       if (ret < 0) {
-                               ov5645_set_power_off(ov5645);
-                               goto exit;
-                       }
+                       usleep_range(500, 1000);
                } else {
+                       ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x58);
                        ov5645_set_power_off(ov5645);
                }
        }
@@ -1057,11 +1058,20 @@ static int ov5645_s_stream(struct v4l2_subdev *subdev, int enable)
                        dev_err(ov5645->dev, "could not sync v4l2 controls\n");
                        return ret;
                }
+
+               ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x45);
+               if (ret < 0)
+                       return ret;
+
                ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
                                       OV5645_SYSTEM_CTRL0_START);
                if (ret < 0)
                        return ret;
        } else {
+               ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x40);
+               if (ret < 0)
+                       return ret;
+
                ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
                                       OV5645_SYSTEM_CTRL0_STOP);
                if (ret < 0)
index f5a1ee90a6c5e27469192db4dbb96cd613ae7501..8a6a7a5929aa35ce32a3be4fb98b9ab9f973177f 100644 (file)
@@ -761,7 +761,11 @@ static int ov7740_try_fmt_internal(struct v4l2_subdev *sd,
 
                fsize++;
        }
-
+       if (i >= ARRAY_SIZE(ov7740_framesizes)) {
+               fsize = &ov7740_framesizes[0];
+               fmt->width = fsize->width;
+               fmt->height = fsize->height;
+       }
        if (ret_frmsize != NULL)
                *ret_frmsize = fsize;
 
index 5bea31cd41aa1e63b3e024e0ce9d55624d633b7e..33a21d585dc9c2b9eaee1dbd564ea7ef6b8994f7 100644 (file)
@@ -716,6 +716,11 @@ static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain)
                for (m = 6; m >= 0; m--)
                        if (gain >= (1 << m) * 16)
                                break;
+
+               /* Sanity check: don't adjust the gain with a negative value */
+               if (m < 0)
+                       return -EINVAL;
+
                rgain = (gain - ((1 << m) * 16)) / (1 << m);
                rgain |= (((1 << m) - 1) << 4);
 
index 26070fb6ce4ebca20b30dfe4cd1ce88a5f0ad7dc..e4c0a27b636aaa6901f8b5d5d2bddaf91bcece94 100644 (file)
@@ -1789,7 +1789,7 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
                return -E2BIG;
        }
        pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
-       err = cec_phys_addr_validate(pa, &pa, NULL);
+       err = v4l2_phys_addr_validate(pa, &pa, NULL);
        if (err)
                return err;
 
index 8b450fc53202f84d477bf6428626a8f72d99dd72..15a5e98b3d4585f2bb80005a72f943b114383c1c 100644 (file)
@@ -828,7 +828,7 @@ static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl)
                return 0;
        case V4L2_CID_HUE:
                tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->val);
-               break;
+               return 0;
        case V4L2_CID_TEST_PATTERN:
                decoder->enable = ctrl->val ? false : true;
                tvp5150_selmux(sd);
index 3bae24b15eaa4cbd5e25485121e50ddcb5991af2..ed518b1f82e4a941799ee2c0061b5dcbf5fbabbf 100644 (file)
@@ -487,6 +487,7 @@ static long media_device_enum_links32(struct media_device *mdev,
 {
        struct media_links_enum links;
        compat_uptr_t pads_ptr, links_ptr;
+       int ret;
 
        memset(&links, 0, sizeof(links));
 
@@ -498,7 +499,14 @@ static long media_device_enum_links32(struct media_device *mdev,
        links.pads = compat_ptr(pads_ptr);
        links.links = compat_ptr(links_ptr);
 
-       return media_device_enum_links(mdev, &links);
+       ret = media_device_enum_links(mdev, &links);
+       if (ret)
+               return ret;
+
+       if (copy_to_user(ulinks->reserved, links.reserved,
+                        sizeof(ulinks->reserved)))
+               return -EFAULT;
+       return 0;
 }
 
 #define MEDIA_IOC_ENUM_LINKS32         _IOWR('|', 0x02, struct media_links_enum32)
index cf1e526de56ac0fadd1c75b05b964481d8ec3e09..8a1128c60680b928835da0601fdc51d06383b593 100644 (file)
@@ -351,7 +351,11 @@ static const struct i2c_client saa7134_client_template = {
 
 /* ----------------------------------------------------------- */
 
-/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
+/*
+ * On Medion 7134 reading the SAA7134 chip config EEPROM needs DVB-T
+ * demod i2c gate closed due to an address clash between this EEPROM
+ * and the demod one.
+ */
 static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
 {
        u8 subaddr = 0x7, dmdregval;
@@ -368,14 +372,14 @@ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
 
        ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
        if ((ret == 2) && (dmdregval & 0x2)) {
-               pr_debug("%s: DVB-T demod i2c gate was left closed\n",
+               pr_debug("%s: DVB-T demod i2c gate was left open\n",
                         dev->name);
 
                data[0] = subaddr;
                data[1] = (dmdregval & ~0x2);
                if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
-                       pr_err("%s: EEPROM i2c gate open failure\n",
-                         dev->name);
+                       pr_err("%s: EEPROM i2c gate close failure\n",
+                              dev->name);
        }
 }
 
index 6d8e4afe9673afd1738d4f91f767be4b45111114..8c56d4c37a525a643daafff463b0cf794931a702 100644 (file)
@@ -304,6 +304,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
        ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
        if (ret < 0) {
                pr_err("cannot register capture v4l2 device. skipping.\n");
+               saa7146_vv_release(dev);
+               i2c_del_adapter(&hexium->i2c_adapter);
+               kfree(hexium);
                return ret;
        }
 
index d697e1ad929c2d8e765feb15f136563b103bef05..5102519df108328cccf15c061b4038dd673d9963 100644 (file)
@@ -1122,16 +1122,25 @@ static int saa7164_proc_show(struct seq_file *m, void *v)
        return 0;
 }
 
+static struct proc_dir_entry *saa7164_pe;
+
 static int saa7164_proc_create(void)
 {
-       struct proc_dir_entry *pe;
-
-       pe = proc_create_single("saa7164", S_IRUGO, NULL, saa7164_proc_show);
-       if (!pe)
+       saa7164_pe = proc_create_single("saa7164", 0444, NULL, saa7164_proc_show);
+       if (!saa7164_pe)
                return -ENOMEM;
 
        return 0;
 }
+
+static void saa7164_proc_destroy(void)
+{
+       if (saa7164_pe)
+               remove_proc_entry("saa7164", NULL);
+}
+#else
+static int saa7164_proc_create(void) { return 0; }
+static void saa7164_proc_destroy(void) {}
 #endif
 
 static int saa7164_thread_function(void *data)
@@ -1503,19 +1512,21 @@ static struct pci_driver saa7164_pci_driver = {
 
 static int __init saa7164_init(void)
 {
-       printk(KERN_INFO "saa7164 driver loaded\n");
+       int ret = pci_register_driver(&saa7164_pci_driver);
+
+       if (ret)
+               return ret;
 
-#ifdef CONFIG_PROC_FS
        saa7164_proc_create();
-#endif
-       return pci_register_driver(&saa7164_pci_driver);
+
+       pr_info("saa7164 driver loaded\n");
+
+       return 0;
 }
 
 static void __exit saa7164_fini(void)
 {
-#ifdef CONFIG_PROC_FS
-       remove_proc_entry("saa7164", NULL);
-#endif
+       saa7164_proc_destroy();
        pci_unregister_driver(&saa7164_pci_driver);
 }
 
index a3cfefdbee1275a45ff6d45d717a8b376f808049..c3eaddced72143a5c1969c56925d6ffd5de8b728 100644 (file)
@@ -1728,6 +1728,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
                v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
                return ret;
        }
+       ctx->sequence_offset = ~0U;
        ctx->initialized = 1;
 
        /* Update kfifo out pointer from coda bitstream read pointer */
@@ -2142,12 +2143,17 @@ static void coda_finish_decode(struct coda_ctx *ctx)
                else if (ctx->display_idx < 0)
                        ctx->hold = true;
        } else if (decoded_idx == -2) {
+               if (ctx->display_idx >= 0 &&
+                   ctx->display_idx < ctx->num_internal_frames)
+                       ctx->sequence_offset++;
                /* no frame was decoded, we still return remaining buffers */
        } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
                v4l2_err(&dev->v4l2_dev,
                         "decoded frame index out of range: %d\n", decoded_idx);
        } else {
-               val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
+               val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM);
+               if (ctx->sequence_offset == -1)
+                       ctx->sequence_offset = val;
                val -= ctx->sequence_offset;
                spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
                if (!list_empty(&ctx->buffer_meta_list)) {
@@ -2303,7 +2309,6 @@ irqreturn_t coda_irq_handler(int irq, void *data)
        if (ctx == NULL) {
                v4l2_err(&dev->v4l2_dev,
                         "Instance released before the end of transaction\n");
-               mutex_unlock(&dev->coda_mutex);
                return IRQ_HANDLED;
        }
 
index 19d92edcc98113b145427888632f281c72f7f3d6..4b0220f40b4253d0739553e51185fd9dc9d23484 100644 (file)
@@ -997,6 +997,8 @@ static int coda_encoder_cmd(struct file *file, void *fh,
        /* Set the stream-end flag on this context */
        ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
 
+       flush_work(&ctx->pic_run_work);
+
        /* If there is no buffer in flight, wake up */
        if (!ctx->streamon_out || ctx->qsequence == ctx->osequence) {
                dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
index 19cf6853411e223905e183585753ba2ded590e11..89a86c19579b8ab9d2cbee42a4cc25913dab9b96 100644 (file)
@@ -518,6 +518,11 @@ static int __init vpss_init(void)
                return -EBUSY;
 
        oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+       if (unlikely(!oper_cfg.vpss_regs_base2)) {
+               release_mem_region(VPSS_CLK_CTRL, 4);
+               return -ENOMEM;
+       }
+
        writel(VPSS_CLK_CTRL_VENCCLKEN |
                     VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
 
index 5ddb2321e9e48f74d41c803453cf2e112fcba22a..0fe9be93fabe2fd2feb0537ddae6092a9f0eee51 100644 (file)
@@ -819,6 +819,7 @@ static int fimc_is_probe(struct platform_device *pdev)
                return -ENODEV;
 
        is->pmu_regs = of_iomap(node, 0);
+       of_node_put(node);
        if (!is->pmu_regs)
                return -ENOMEM;
 
index deb499f76412a33f139aaedc467e038476f905aa..b5993532831da6b3aada06390b8338f3b32f66d4 100644 (file)
@@ -498,6 +498,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
                        continue;
 
                ret = fimc_md_parse_port_node(fmd, port, index);
+               of_node_put(port);
                if (ret < 0) {
                        of_node_put(node);
                        goto rpm_put;
@@ -531,6 +532,7 @@ static int __of_get_csis_id(struct device_node *np)
        if (!np)
                return -EINVAL;
        of_property_read_u32(np, "reg", &reg);
+       of_node_put(np);
        return reg - FIMC_INPUT_MIPI_CSI2_0;
 }
 
index 0273302aa7412f7c83a5b1cfe30ae3cd6050614e..83086eea145006bd8bcf6c3c055f4c3f18a7c13d 100644 (file)
@@ -37,7 +37,7 @@
 #define VIU_VERSION            "0.5.1"
 
 /* Allow building this driver with COMPILE_TEST */
-#ifndef CONFIG_PPC
+#if !defined(CONFIG_PPC) && !defined(CONFIG_MICROBLAZE)
 #define out_be32(v, a) iowrite32be(a, (void __iomem *)v)
 #define in_be32(a)     ioread32be((void __iomem *)a)
 #endif
index dfdbd4354b74cf700305c0b2c70a396938cd039a..eeee15ff007d878a3028d85e0f8a3eac2bcc4743 100644 (file)
@@ -200,7 +200,6 @@ struct mcam_vb_buffer {
        struct list_head queue;
        struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
        dma_addr_t dma_desc_pa;         /* Descriptor physical address */
-       int dma_desc_nent;              /* Number of mapped descriptors */
 };
 
 static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
@@ -608,9 +607,11 @@ static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
 static void mcam_sg_next_buffer(struct mcam_camera *cam)
 {
        struct mcam_vb_buffer *buf;
+       struct sg_table *sg_table;
 
        buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
        list_del_init(&buf->queue);
+       sg_table = vb2_dma_sg_plane_desc(&buf->vb_buf.vb2_buf, 0);
        /*
         * Very Bad Not Good Things happen if you don't clear
         * C1_DESC_ENA before making any descriptor changes.
@@ -618,7 +619,7 @@ static void mcam_sg_next_buffer(struct mcam_camera *cam)
        mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
        mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
        mcam_reg_write(cam, REG_DESC_LEN_Y,
-                       buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
+                       sg_table->nents * sizeof(struct mcam_dma_desc));
        mcam_reg_write(cam, REG_DESC_LEN_U, 0);
        mcam_reg_write(cam, REG_DESC_LEN_V, 0);
        mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
index bbb24fb95b9519b156428ee01846d3554cfada67..3deb0549b1a131855a4476e334164ed5e6d29409 100644 (file)
@@ -118,7 +118,9 @@ static int mtk_mdp_probe(struct platform_device *pdev)
        mutex_init(&mdp->vpulock);
 
        /* Old dts had the components as child nodes */
-       if (of_get_next_child(dev->of_node, NULL)) {
+       node = of_get_next_child(dev->of_node, NULL);
+       if (node) {
+               of_node_put(node);
                parent = dev->of_node;
                dev_warn(dev, "device tree is out of date\n");
        } else {
index 29e3f5da59c1ff61137f66d93b75b865521ed5e1..11ec048929e80109e6702249aea0b345e79378d3 100644 (file)
@@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
         */
 
        pixsize = vout->bpp * vout->vrfb_bpp;
-       dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) -
-                 (vout->pix.width * vout->bpp)) + 1;
+       dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
 
        xt->src_start = vout->buf_phy_addr[vb->i];
        xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
index 432bc7fbedc99196152e1d342c99d2f19f3ab156..addd03b51748136843eedb8c89f1eee0bc9b2d01 100644 (file)
@@ -722,6 +722,10 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
                                        s_stream, mode);
                        pipe->do_propagation = true;
                }
+
+               /* Stop at the first external sub-device. */
+               if (subdev->dev != isp->dev)
+                       break;
        }
 
        return 0;
@@ -836,6 +840,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
                                                      &subdev->entity);
                        failure = -ETIMEDOUT;
                }
+
+               /* Stop at the first external sub-device. */
+               if (subdev->dev != isp->dev)
+                       break;
        }
 
        return failure;
index 77b73e27a2746bf086d1bf86c7cb252582c94316..412438dce2854a353d257e1267b8cc797edddd8b 100644 (file)
@@ -2605,6 +2605,7 @@ int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
        int ret;
 
        /* Register the subdev and video node. */
+       ccdc->subdev.dev = vdev->mdev->dev;
        ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
        if (ret < 0)
                goto error;
index e062939d0d054386b533ab8d3ab13afb3bdc1fbf..47b0d3fe87d8076dee4625dcb35a6452620f1868 100644 (file)
@@ -1034,6 +1034,7 @@ int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
        int ret;
 
        /* Register the subdev and video nodes. */
+       ccp2->subdev.dev = vdev->mdev->dev;
        ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
        if (ret < 0)
                goto error;
index a4d3d030e81e20568b063d0544d18b64dd3a4d87..e45292a1bf6c5a35f3f370bd205c08b8fda4dd18 100644 (file)
@@ -1201,6 +1201,7 @@ int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
        int ret;
 
        /* Register the subdev and video nodes. */
+       csi2->subdev.dev = vdev->mdev->dev;
        ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
        if (ret < 0)
                goto error;
index 3195f7c8b8b7e43343af0908ad4a6ac8d0075a27..591c6de498f8913b15b43771696d1c1bba089100 100644 (file)
@@ -2228,6 +2228,7 @@ int omap3isp_preview_register_entities(struct isp_prev_device *prev,
        int ret;
 
        /* Register the subdev and video nodes. */
+       prev->subdev.dev = vdev->mdev->dev;
        ret = v4l2_device_register_subdev(vdev, &prev->subdev);
        if (ret < 0)
                goto error;
index 0b6a87508584f4eb5201f3f2b0bf284c1b7380f7..2035e3c6a9deeeb5660d25ae690b4719ac9e4ebb 100644 (file)
@@ -1684,6 +1684,7 @@ int omap3isp_resizer_register_entities(struct isp_res_device *res,
        int ret;
 
        /* Register the subdev and video nodes. */
+       res->subdev.dev = vdev->mdev->dev;
        ret = v4l2_device_register_subdev(vdev, &res->subdev);
        if (ret < 0)
                goto error;
index 47353fee26c3297234502d6d9ce197ff5098eb0c..bfa2d05046466a40baae760eecf11aa2e8e71936 100644 (file)
@@ -1029,6 +1029,8 @@ void omap3isp_stat_unregister_entities(struct ispstat *stat)
 int omap3isp_stat_register_entities(struct ispstat *stat,
                                    struct v4l2_device *vdev)
 {
+       stat->subdev.dev = vdev->mdev->dev;
+
        return v4l2_device_register_subdev(vdev, &stat->subdev);
 }
 
index 2a15b7cca338fe6445ae11a8ec275c9e40ac1245..5a30f1d84fe177021c050149d953f07a4c3196e9 100644 (file)
@@ -257,6 +257,8 @@ MODULE_PARM_DESC(debug, "activate debug info");
 #define FD1_IP_H3_ES1                  0x02010101
 #define FD1_IP_M3W                     0x02010202
 #define FD1_IP_H3                      0x02010203
+#define FD1_IP_M3N                     0x02010204
+#define FD1_IP_E3                      0x02010205
 
 /* LUTs */
 #define FD1_LUT_DIF_ADJ                        0x1000
@@ -2304,7 +2306,7 @@ static int fdp1_probe(struct platform_device *pdev)
                fdp1->fcp = rcar_fcp_get(fcp_node);
                of_node_put(fcp_node);
                if (IS_ERR(fdp1->fcp)) {
-                       dev_err(&pdev->dev, "FCP not found (%ld)\n",
+                       dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
                                PTR_ERR(fdp1->fcp));
                        return PTR_ERR(fdp1->fcp);
                }
@@ -2365,6 +2367,12 @@ static int fdp1_probe(struct platform_device *pdev)
        case FD1_IP_H3:
                dprintk(fdp1, "FDP1 Version R-Car H3\n");
                break;
+       case FD1_IP_M3N:
+               dprintk(fdp1, "FDP1 Version R-Car M3N\n");
+               break;
+       case FD1_IP_E3:
+               dprintk(fdp1, "FDP1 Version R-Car E3\n");
+               break;
        default:
                dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
                                hw_version);
index ca11f8a7569dca82883f4a124d881db62151c9d8..4b8516c35bc204700bdc1de88d6faf7503b71249 100644 (file)
@@ -527,7 +527,8 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
                                dev);
                ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
                                dev);
-               ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
+               if (FW_HAS_E_MIN_SCRATCH_BUF(dev))
+                       ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
                                                get_min_scratch_buf_size, dev);
                if (ctx->img_width == 0 || ctx->img_height == 0)
                        ctx->state = MFCINST_ERROR;
index eb85cedc5ef34a66ba1889c416ef7e401921c804..5e080f32b0e8247324ea513151cb97c362edd44a 100644 (file)
@@ -38,6 +38,11 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
        for (i = 0; i < pm->num_clocks; i++) {
                pm->clocks[i] = devm_clk_get(pm->device, pm->clk_names[i]);
                if (IS_ERR(pm->clocks[i])) {
+                       /* additional clocks are optional */
+                       if (i && PTR_ERR(pm->clocks[i]) == -ENOENT) {
+                               pm->clocks[i] = NULL;
+                               continue;
+                       }
                        mfc_err("Failed to get clock: %s\n",
                                pm->clk_names[i]);
                        return PTR_ERR(pm->clocks[i]);
index d38682265892239ab599701301f2f620d4c9468c..1d9c028e52cba8e4c2554912dd1313b7331e8d61 100644 (file)
@@ -1681,7 +1681,7 @@ static int dcmi_probe(struct platform_device *pdev)
        if (irq <= 0) {
                if (irq != -EPROBE_DEFER)
                        dev_err(&pdev->dev, "Could not get irq\n");
-               return irq;
+               return irq ? irq : -ENXIO;
        }
 
        dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 462099a141e4aaae2da52bedad20d94b5197d554..7b8cf661f2386c22c3b95e92794dfadd8975820f 100644 (file)
@@ -3,7 +3,8 @@
  *
  * This is a virtual device driver for testing mem-to-mem videobuf framework.
  * It simulates a device that uses memory buffers for both source and
- * destination, processes the data and issues an "irq" (simulated by a timer).
+ * destination, processes the data and issues an "irq" (simulated by a delayed
+ * workqueue).
  * The device is capable of multi-instance, multi-buffer-per-transaction
  * operation (via the mem2mem framework).
  *
@@ -19,7 +20,6 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/fs.h>
-#include <linux/timer.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 
@@ -148,7 +148,7 @@ struct vim2m_dev {
        struct mutex            dev_mutex;
        spinlock_t              irqlock;
 
-       struct timer_list       timer;
+       struct delayed_work     work_run;
 
        struct v4l2_m2m_dev     *m2m_dev;
 };
@@ -336,12 +336,6 @@ static int device_process(struct vim2m_ctx *ctx,
        return 0;
 }
 
-static void schedule_irq(struct vim2m_dev *dev, int msec_timeout)
-{
-       dprintk(dev, "Scheduling a simulated irq\n");
-       mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
-}
-
 /*
  * mem2mem callbacks
  */
@@ -387,13 +381,14 @@ static void device_run(void *priv)
 
        device_process(ctx, src_buf, dst_buf);
 
-       /* Run a timer, which simulates a hardware irq  */
-       schedule_irq(dev, ctx->transtime);
+       /* Run delayed work, which simulates a hardware irq  */
+       schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime));
 }
 
-static void device_isr(struct timer_list *t)
+static void device_work(struct work_struct *w)
 {
-       struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer);
+       struct vim2m_dev *vim2m_dev =
+               container_of(w, struct vim2m_dev, work_run.work);
        struct vim2m_ctx *curr_ctx;
        struct vb2_v4l2_buffer *src_vb, *dst_vb;
        unsigned long flags;
@@ -802,9 +797,13 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
 static void vim2m_stop_streaming(struct vb2_queue *q)
 {
        struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
+       struct vim2m_dev *dev = ctx->dev;
        struct vb2_v4l2_buffer *vbuf;
        unsigned long flags;
 
+       if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
+               cancel_delayed_work_sync(&dev->work_run);
+
        for (;;) {
                if (V4L2_TYPE_IS_OUTPUT(q->type))
                        vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
@@ -1015,6 +1014,7 @@ static int vim2m_probe(struct platform_device *pdev)
        vfd = &dev->vfd;
        vfd->lock = &dev->dev_mutex;
        vfd->v4l2_dev = &dev->v4l2_dev;
+       INIT_DELAYED_WORK(&dev->work_run, device_work);
 
        ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
        if (ret) {
@@ -1026,7 +1026,6 @@ static int vim2m_probe(struct platform_device *pdev)
        v4l2_info(&dev->v4l2_dev,
                        "Device registered as /dev/video%d\n", vfd->num);
 
-       timer_setup(&dev->timer, device_isr, 0);
        platform_set_drvdata(pdev, dev);
 
        dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
@@ -1083,7 +1082,6 @@ static int vim2m_remove(struct platform_device *pdev)
        media_device_cleanup(&dev->mdev);
 #endif
        v4l2_m2m_release(dev->m2m_dev);
-       del_timer_sync(&dev->timer);
        video_unregister_device(&dev->vfd);
        v4l2_device_unregister(&dev->v4l2_dev);
 
index 65d657daf66f8e7b0da13aaaea893f733ce7fbec..8e014cc485f002ef8555794a4fa182f1f4e634e0 100644 (file)
@@ -132,12 +132,15 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
                                  struct v4l2_format *f)
 {
        struct vimc_cap_device *vcap = video_drvdata(file);
+       int ret;
 
        /* Do not change the format while stream is on */
        if (vb2_is_busy(&vcap->queue))
                return -EBUSY;
 
-       vimc_cap_try_fmt_vid_cap(file, priv, f);
+       ret = vimc_cap_try_fmt_vid_cap(file, priv, f);
+       if (ret)
+               return ret;
 
        dev_dbg(vcap->dev, "%s: format update: "
                "old:%dx%d (0x%x, %d, %d, %d, %d) "
index 3b09ffceefd56ac718a6944757377ff6a969c5d6..2e273f4dfc2951b53fee1c2261a5e2309bef32ed 100644 (file)
@@ -1724,7 +1724,7 @@ int vidioc_s_edid(struct file *file, void *_fh,
                return -E2BIG;
        }
        phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
-       ret = cec_phys_addr_validate(phys_addr, &phys_addr, NULL);
+       ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL);
        if (ret)
                return ret;
 
@@ -1740,7 +1740,7 @@ set_phys_addr:
 
        for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
                cec_s_phys_addr(dev->cec_tx_adap[i],
-                               cec_phys_addr_for_input(phys_addr, i + 1),
+                               v4l2_phys_addr_for_input(phys_addr, i + 1),
                                false);
        return 0;
 }
index 2079861d2270fad5e0870134dc089d4ff7564a37..e108e9befb77fc53d1f7fd33f4a68a049224d2d2 100644 (file)
@@ -863,7 +863,7 @@ int vidioc_g_edid(struct file *file, void *_fh,
        if (edid->blocks > dev->edid_blocks - edid->start_block)
                edid->blocks = dev->edid_blocks - edid->start_block;
        if (adap)
-               cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
+               v4l2_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
        memcpy(edid->edid, dev->edid + edid->start_block * 128, edid->blocks * 128);
        return 0;
 }
index 26289adaf658c1f4a2adda8e02d3618323ea1f1b..a5634ca85a3165c3aa51ca9f829abf1390c9f700 100644 (file)
@@ -557,8 +557,10 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
 
        /* Get a default body for our list. */
        dl->body0 = vsp1_dl_body_get(dlm->pool);
-       if (!dl->body0)
+       if (!dl->body0) {
+               kfree(dl);
                return NULL;
+       }
 
        header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
 
index 9a5079d64c4ab1fa96af7f0a22b0ebdb4793701b..729600c4a056b824b4fdfcc7e831ae939e951aa2 100644 (file)
@@ -271,6 +271,14 @@ static int vidioc_g_frequency(struct file *file, void *priv,
        return 0;
 }
 
+static void raremono_device_release(struct v4l2_device *v4l2_dev)
+{
+       struct raremono_device *radio = to_raremono_dev(v4l2_dev);
+
+       kfree(radio->buffer);
+       kfree(radio);
+}
+
 /* File system interface */
 static const struct v4l2_file_operations usb_raremono_fops = {
        .owner          = THIS_MODULE,
@@ -295,12 +303,14 @@ static int usb_raremono_probe(struct usb_interface *intf,
        struct raremono_device *radio;
        int retval = 0;
 
-       radio = devm_kzalloc(&intf->dev, sizeof(struct raremono_device), GFP_KERNEL);
-       if (radio)
-               radio->buffer = devm_kmalloc(&intf->dev, BUFFER_LENGTH, GFP_KERNEL);
-
-       if (!radio || !radio->buffer)
+       radio = kzalloc(sizeof(*radio), GFP_KERNEL);
+       if (!radio)
+               return -ENOMEM;
+       radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
+       if (!radio->buffer) {
+               kfree(radio);
                return -ENOMEM;
+       }
 
        radio->usbdev = interface_to_usbdev(intf);
        radio->intf = intf;
@@ -324,7 +334,8 @@ static int usb_raremono_probe(struct usb_interface *intf,
        if (retval != 3 ||
            (get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) {
                dev_info(&intf->dev, "this is not Thanko's Raremono.\n");
-               return -ENODEV;
+               retval = -ENODEV;
+               goto free_mem;
        }
 
        dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n",
@@ -333,7 +344,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
        retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
        if (retval < 0) {
                dev_err(&intf->dev, "couldn't register v4l2_device\n");
-               return retval;
+               goto free_mem;
        }
 
        mutex_init(&radio->lock);
@@ -345,6 +356,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
        radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops;
        radio->vdev.lock = &radio->lock;
        radio->vdev.release = video_device_release_empty;
+       radio->v4l2_dev.release = raremono_device_release;
 
        usb_set_intfdata(intf, &radio->v4l2_dev);
 
@@ -360,6 +372,10 @@ static int usb_raremono_probe(struct usb_interface *intf,
        }
        dev_err(&intf->dev, "could not register video device\n");
        v4l2_device_unregister(&radio->v4l2_dev);
+
+free_mem:
+       kfree(radio->buffer);
+       kfree(radio);
        return retval;
 }
 
index 313a95f195a27235bf2f1545e7bf2e25cde521e1..19e381dd58089130664471855922db71f3fd5243 100644 (file)
@@ -743,7 +743,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
        /* start radio */
        retval = si470x_start_usb(radio);
        if (retval < 0)
-               goto err_all;
+               goto err_buf;
 
        /* set initial frequency */
        si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
@@ -758,6 +758,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
 
        return 0;
 err_all:
+       usb_kill_urb(radio->int_in_urb);
+err_buf:
        kfree(radio->buffer);
 err_ctrl:
        v4l2_ctrl_handler_free(&radio->hdl);
@@ -831,6 +833,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
        mutex_lock(&radio->lock);
        v4l2_device_disconnect(&radio->v4l2_dev);
        video_unregister_device(&radio->videodev);
+       usb_kill_urb(radio->int_in_urb);
        usb_set_intfdata(intf, NULL);
        mutex_unlock(&radio->lock);
        v4l2_device_put(&radio->v4l2_dev);
index dccdf6558e6ab7ce4e1e613453acdd5558badd3a..33abc8616ecb8122bd24a6117680e1f22aaf3ef7 100644 (file)
@@ -549,6 +549,7 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
 
        /* Register with V4L2 subsystem as RADIO device */
        if (video_register_device(&gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
+               v4l2_device_unregister(&fmdev->v4l2_dev);
                fmerr("Could not register video device\n");
                return -ENOMEM;
        }
@@ -562,6 +563,8 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
        if (ret < 0) {
                fmerr("(fmdev): Can't init ctrl handler\n");
                v4l2_ctrl_handler_free(&fmdev->ctrl_handler);
+               video_unregister_device(fmdev->radio_dev);
+               v4l2_device_unregister(&fmdev->v4l2_dev);
                return -EBUSY;
        }
 
index 7daac8bab83b02d2db1f52f394ac701971cc6125..6f3030b2054d07ca0731df6552e108e1c1d6e54f 100644 (file)
@@ -424,6 +424,10 @@ static int iguanair_probe(struct usb_interface *intf,
        int ret, pipein, pipeout;
        struct usb_host_interface *idesc;
 
+       idesc = intf->altsetting;
+       if (idesc->desc.bNumEndpoints < 2)
+               return -ENODEV;
+
        ir = kzalloc(sizeof(*ir), GFP_KERNEL);
        rc = rc_allocate_device(RC_DRIVER_IR_RAW);
        if (!ir || !rc) {
@@ -438,18 +442,13 @@ static int iguanair_probe(struct usb_interface *intf,
        ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
        ir->urb_out = usb_alloc_urb(0, GFP_KERNEL);
 
-       if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out) {
+       if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out ||
+           !usb_endpoint_is_int_in(&idesc->endpoint[0].desc) ||
+           !usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) {
                ret = -ENOMEM;
                goto out;
        }
 
-       idesc = intf->altsetting;
-
-       if (idesc->desc.bNumEndpoints < 2) {
-               ret = -ENODEV;
-               goto out;
-       }
-
        ir->rc = rc;
        ir->dev = &intf->dev;
        ir->udev = udev;
index 1041c056854d5035d2d6c6072ad37537334c2927..f23a220352f7f970ddfd9a54470d6d7886aab27c 100644 (file)
@@ -1835,12 +1835,17 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
                break;
        /* iMON VFD, MCE IR */
        case 0x46:
-       case 0x7e:
        case 0x9e:
                dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
                detected_display_type = IMON_DISPLAY_TYPE_VFD;
                allowed_protos = RC_PROTO_BIT_RC6_MCE;
                break;
+       /* iMON VFD, iMON or MCE IR */
+       case 0x7e:
+               dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR");
+               detected_display_type = IMON_DISPLAY_TYPE_VFD;
+               allowed_protos |= RC_PROTO_BIT_RC6_MCE;
+               break;
        /* iMON LCD, MCE IR */
        case 0x9f:
                dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
index 66334e8d63baa8c41fb4e4afe742aeb994704148..c58f2d38a4582ec3ab126bd471a1861b7e42f215 100644 (file)
@@ -161,6 +161,7 @@ static const struct of_device_id ir_spi_of_match[] = {
        { .compatible = "ir-spi-led" },
        {},
 };
+MODULE_DEVICE_TABLE(of, ir_spi_of_match);
 
 static struct spi_driver ir_spi_driver = {
        .probe = ir_spi_probe,
index 4c0c8008872aed48b4dac05063eb4c8831f2283b..f1dfb84094328bb2865ca0e9be1ad7f88ef6e2d3 100644 (file)
 #include <linux/pm_wakeup.h>
 #include <media/rc-core.h>
 
-#define DRIVER_VERSION "1.94"
+#define DRIVER_VERSION "1.95"
 #define DRIVER_AUTHOR  "Jarod Wilson <jarod@redhat.com>"
 #define DRIVER_DESC    "Windows Media Center Ed. eHome Infrared Transceiver " \
                        "device driver"
 #define DRIVER_NAME    "mceusb"
 
+#define USB_TX_TIMEOUT         1000 /* in milliseconds */
 #define USB_CTRL_MSG_SZ                2  /* Size of usb ctrl msg on gen1 hw */
 #define MCE_G1_INIT_MSGS       40 /* Init messages on gen1 hw to throw out */
 
 /* MCE constants */
-#define MCE_CMDBUF_SIZE                384  /* MCE Command buffer length */
+#define MCE_IRBUF_SIZE         128  /* TX IR buffer length */
 #define MCE_TIME_UNIT          50   /* Approx 50us resolution */
-#define MCE_CODE_LENGTH                5    /* Normal length of packet (with header) */
-#define MCE_PACKET_SIZE                4    /* Normal length of packet (without header) */
-#define MCE_IRDATA_HEADER      0x84 /* Actual header format is 0x80 + num_bytes */
+#define MCE_PACKET_SIZE                31   /* Max length of packet (with header) */
+#define MCE_IRDATA_HEADER      (0x80 + MCE_PACKET_SIZE - 1)
+                                    /* Actual format is 0x80 + num_bytes */
 #define MCE_IRDATA_TRAILER     0x80 /* End of IR data */
 #define MCE_MAX_CHANNELS       2    /* Two transmitters, hardware dependent? */
 #define MCE_DEFAULT_TX_MASK    0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
@@ -609,9 +610,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
        if (len <= skip)
                return;
 
-       dev_dbg(dev, "%cx data: %*ph (length=%d)",
-               (out ? 't' : 'r'),
-               min(len, buf_len - offset), buf + offset, len);
+       dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
+               (out ? 't' : 'r'), offset,
+               min(len, buf_len - offset), buf + offset, len, buf_len);
 
        inout = out ? "Request" : "Got";
 
@@ -733,6 +734,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
                case MCE_RSP_CMD_ILLEGAL:
                        dev_dbg(dev, "Illegal PORT_IR command");
                        break;
+               case MCE_RSP_TX_TIMEOUT:
+                       dev_dbg(dev, "IR TX timeout (TX buffer underrun)");
+                       break;
                default:
                        dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
                                 cmd, subcmd);
@@ -747,13 +751,14 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
                dev_dbg(dev, "End of raw IR data");
        else if ((cmd != MCE_CMD_PORT_IR) &&
                 ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
-               dev_dbg(dev, "Raw IR data, %d pulse/space samples", ir->rem);
+               dev_dbg(dev, "Raw IR data, %d pulse/space samples",
+                       cmd & MCE_PACKET_LENGTH_MASK);
 #endif
 }
 
 /*
  * Schedule work that can't be done in interrupt handlers
- * (mceusb_dev_recv() and mce_async_callback()) nor tasklets.
+ * (mceusb_dev_recv() and mce_write_callback()) nor tasklets.
  * Invokes mceusb_deferred_kevent() for recovering from
  * error events specified by the kevent bit field.
  */
@@ -766,23 +771,80 @@ static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent)
                dev_dbg(ir->dev, "kevent %d scheduled", kevent);
 }
 
-static void mce_async_callback(struct urb *urb)
+static void mce_write_callback(struct urb *urb)
 {
-       struct mceusb_dev *ir;
-       int len;
-
        if (!urb)
                return;
 
-       ir = urb->context;
+       complete(urb->context);
+}
+
+/*
+ * Write (TX/send) data to MCE device USB endpoint out.
+ * Used for IR blaster TX and MCE device commands.
+ *
+ * Return: The number of bytes written (> 0) or errno (< 0).
+ */
+static int mce_write(struct mceusb_dev *ir, u8 *data, int size)
+{
+       int ret;
+       struct urb *urb;
+       struct device *dev = ir->dev;
+       unsigned char *buf_out;
+       struct completion tx_done;
+       unsigned long expire;
+       unsigned long ret_wait;
+
+       mceusb_dev_printdata(ir, data, size, 0, size, true);
+
+       urb = usb_alloc_urb(0, GFP_KERNEL);
+       if (unlikely(!urb)) {
+               dev_err(dev, "Error: mce write couldn't allocate urb");
+               return -ENOMEM;
+       }
+
+       buf_out = kmalloc(size, GFP_KERNEL);
+       if (!buf_out) {
+               usb_free_urb(urb);
+               return -ENOMEM;
+       }
+
+       init_completion(&tx_done);
+
+       /* outbound data */
+       if (usb_endpoint_xfer_int(ir->usb_ep_out))
+               usb_fill_int_urb(urb, ir->usbdev, ir->pipe_out,
+                                buf_out, size, mce_write_callback, &tx_done,
+                                ir->usb_ep_out->bInterval);
+       else
+               usb_fill_bulk_urb(urb, ir->usbdev, ir->pipe_out,
+                                 buf_out, size, mce_write_callback, &tx_done);
+       memcpy(buf_out, data, size);
+
+       ret = usb_submit_urb(urb, GFP_KERNEL);
+       if (ret) {
+               dev_err(dev, "Error: mce write submit urb error = %d", ret);
+               kfree(buf_out);
+               usb_free_urb(urb);
+               return ret;
+       }
+
+       expire = msecs_to_jiffies(USB_TX_TIMEOUT);
+       ret_wait = wait_for_completion_timeout(&tx_done, expire);
+       if (!ret_wait) {
+               dev_err(dev, "Error: mce write timed out (expire = %lu (%dms))",
+                       expire, USB_TX_TIMEOUT);
+               usb_kill_urb(urb);
+               ret = (urb->status == -ENOENT ? -ETIMEDOUT : urb->status);
+       } else {
+               ret = urb->status;
+       }
+       if (ret >= 0)
+               ret = urb->actual_length;       /* bytes written */
 
        switch (urb->status) {
        /* success */
        case 0:
-               len = urb->actual_length;
-
-               mceusb_dev_printdata(ir, urb->transfer_buffer, len,
-                                    0, len, true);
                break;
 
        case -ECONNRESET:
@@ -792,140 +854,135 @@ static void mce_async_callback(struct urb *urb)
                break;
 
        case -EPIPE:
-               dev_err(ir->dev, "Error: request urb status = %d (TX HALT)",
+               dev_err(ir->dev, "Error: mce write urb status = %d (TX HALT)",
                        urb->status);
                mceusb_defer_kevent(ir, EVENT_TX_HALT);
                break;
 
        default:
-               dev_err(ir->dev, "Error: request urb status = %d", urb->status);
+               dev_err(ir->dev, "Error: mce write urb status = %d",
+                       urb->status);
                break;
        }
 
-       /* the transfer buffer and urb were allocated in mce_request_packet */
-       kfree(urb->transfer_buffer);
-       usb_free_urb(urb);
-}
-
-/* request outgoing (send) usb packet - used to initialize remote */
-static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
-                                                               int size)
-{
-       int res;
-       struct urb *async_urb;
-       struct device *dev = ir->dev;
-       unsigned char *async_buf;
+       dev_dbg(dev, "tx done status = %d (wait = %lu, expire = %lu (%dms), urb->actual_length = %d, urb->status = %d)",
+               ret, ret_wait, expire, USB_TX_TIMEOUT,
+               urb->actual_length, urb->status);
 
-       async_urb = usb_alloc_urb(0, GFP_KERNEL);
-       if (unlikely(!async_urb)) {
-               dev_err(dev, "Error, couldn't allocate urb!");
-               return;
-       }
-
-       async_buf = kmalloc(size, GFP_KERNEL);
-       if (!async_buf) {
-               usb_free_urb(async_urb);
-               return;
-       }
-
-       /* outbound data */
-       if (usb_endpoint_xfer_int(ir->usb_ep_out))
-               usb_fill_int_urb(async_urb, ir->usbdev, ir->pipe_out,
-                                async_buf, size, mce_async_callback, ir,
-                                ir->usb_ep_out->bInterval);
-       else
-               usb_fill_bulk_urb(async_urb, ir->usbdev, ir->pipe_out,
-                                 async_buf, size, mce_async_callback, ir);
-
-       memcpy(async_buf, data, size);
-
-       dev_dbg(dev, "send request called (size=%#x)", size);
+       kfree(buf_out);
+       usb_free_urb(urb);
 
-       res = usb_submit_urb(async_urb, GFP_ATOMIC);
-       if (res) {
-               dev_err(dev, "send request FAILED! (res=%d)", res);
-               kfree(async_buf);
-               usb_free_urb(async_urb);
-               return;
-       }
-       dev_dbg(dev, "send request complete (res=%d)", res);
+       return ret;
 }
 
-static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
+static void mce_command_out(struct mceusb_dev *ir, u8 *data, int size)
 {
        int rsize = sizeof(DEVICE_RESUME);
 
        if (ir->need_reset) {
                ir->need_reset = false;
-               mce_request_packet(ir, DEVICE_RESUME, rsize);
+               mce_write(ir, DEVICE_RESUME, rsize);
                msleep(10);
        }
 
-       mce_request_packet(ir, data, size);
+       mce_write(ir, data, size);
        msleep(10);
 }
 
-/* Send data out the IR blaster port(s) */
+/*
+ * Transmit IR out the MCE device IR blaster port(s).
+ *
+ * Convert IR pulse/space sequence from LIRC to MCE format.
+ * Break up a long IR sequence into multiple parts (MCE IR data packets).
+ *
+ * u32 txbuf[] consists of IR pulse, space, ..., and pulse times in usec.
+ * Pulses and spaces are implicit by their position.
+ * The first IR sample, txbuf[0], is always a pulse.
+ *
+ * u8 irbuf[] consists of multiple IR data packets for the MCE device.
+ * A packet is 1 u8 MCE_IRDATA_HEADER and up to 30 u8 IR samples.
+ * An IR sample is 1-bit pulse/space flag with 7-bit time
+ * in MCE time units (50usec).
+ *
+ * Return: The number of IR samples sent (> 0) or errno (< 0).
+ */
 static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
 {
        struct mceusb_dev *ir = dev->priv;
-       int i, length, ret = 0;
-       int cmdcount = 0;
-       unsigned char cmdbuf[MCE_CMDBUF_SIZE];
-
-       /* MCE tx init header */
-       cmdbuf[cmdcount++] = MCE_CMD_PORT_IR;
-       cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS;
-       cmdbuf[cmdcount++] = ir->tx_mask;
+       u8 cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00 };
+       u8 irbuf[MCE_IRBUF_SIZE];
+       int ircount = 0;
+       unsigned int irsample;
+       int i, length, ret;
 
        /* Send the set TX ports command */
-       mce_async_out(ir, cmdbuf, cmdcount);
-       cmdcount = 0;
-
-       /* Generate mce packet data */
-       for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) {
-               txbuf[i] = txbuf[i] / MCE_TIME_UNIT;
-
-               do { /* loop to support long pulses/spaces > 127*50us=6.35ms */
-
-                       /* Insert mce packet header every 4th entry */
-                       if ((cmdcount < MCE_CMDBUF_SIZE) &&
-                           (cmdcount % MCE_CODE_LENGTH) == 0)
-                               cmdbuf[cmdcount++] = MCE_IRDATA_HEADER;
-
-                       /* Insert mce packet data */
-                       if (cmdcount < MCE_CMDBUF_SIZE)
-                               cmdbuf[cmdcount++] =
-                                       (txbuf[i] < MCE_PULSE_BIT ?
-                                        txbuf[i] : MCE_MAX_PULSE_LENGTH) |
-                                        (i & 1 ? 0x00 : MCE_PULSE_BIT);
-                       else {
-                               ret = -EINVAL;
-                               goto out;
+       cmdbuf[2] = ir->tx_mask;
+       mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+
+       /* Generate mce IR data packet */
+       for (i = 0; i < count; i++) {
+               irsample = txbuf[i] / MCE_TIME_UNIT;
+
+               /* loop to support long pulses/spaces > 6350us (127*50us) */
+               while (irsample > 0) {
+                       /* Insert IR header every 30th entry */
+                       if (ircount % MCE_PACKET_SIZE == 0) {
+                               /* Room for IR header and one IR sample? */
+                               if (ircount >= MCE_IRBUF_SIZE - 1) {
+                                       /* Send near full buffer */
+                                       ret = mce_write(ir, irbuf, ircount);
+                                       if (ret < 0)
+                                               return ret;
+                                       ircount = 0;
+                               }
+                               irbuf[ircount++] = MCE_IRDATA_HEADER;
                        }
 
-               } while ((txbuf[i] > MCE_MAX_PULSE_LENGTH) &&
-                        (txbuf[i] -= MCE_MAX_PULSE_LENGTH));
-       }
-
-       /* Check if we have room for the empty packet at the end */
-       if (cmdcount >= MCE_CMDBUF_SIZE) {
-               ret = -EINVAL;
-               goto out;
-       }
+                       /* Insert IR sample */
+                       if (irsample <= MCE_MAX_PULSE_LENGTH) {
+                               irbuf[ircount] = irsample;
+                               irsample = 0;
+                       } else {
+                               irbuf[ircount] = MCE_MAX_PULSE_LENGTH;
+                               irsample -= MCE_MAX_PULSE_LENGTH;
+                       }
+                       /*
+                        * Even i = IR pulse
+                        * Odd  i = IR space
+                        */
+                       irbuf[ircount] |= (i & 1 ? 0 : MCE_PULSE_BIT);
+                       ircount++;
+
+                       /* IR buffer full? */
+                       if (ircount >= MCE_IRBUF_SIZE) {
+                               /* Fix packet length in last header */
+                               length = ircount % MCE_PACKET_SIZE;
+                               if (length > 0)
+                                       irbuf[ircount - length] -=
+                                               MCE_PACKET_SIZE - length;
+                               /* Send full buffer */
+                               ret = mce_write(ir, irbuf, ircount);
+                               if (ret < 0)
+                                       return ret;
+                               ircount = 0;
+                       }
+               }
+       } /* after for loop, 0 <= ircount < MCE_IRBUF_SIZE */
 
        /* Fix packet length in last header */
-       length = cmdcount % MCE_CODE_LENGTH;
-       cmdbuf[cmdcount - length] -= MCE_CODE_LENGTH - length;
+       length = ircount % MCE_PACKET_SIZE;
+       if (length > 0)
+               irbuf[ircount - length] -= MCE_PACKET_SIZE - length;
 
-       /* All mce commands end with an empty packet (0x80) */
-       cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER;
+       /* Append IR trailer (0x80) to final partial (or empty) IR buffer */
+       irbuf[ircount++] = MCE_IRDATA_TRAILER;
 
-       /* Transmit the command to the mce device */
-       mce_async_out(ir, cmdbuf, cmdcount);
+       /* Send final buffer */
+       ret = mce_write(ir, irbuf, ircount);
+       if (ret < 0)
+               return ret;
 
-out:
-       return ret ? ret : count;
+       return count;
 }
 
 /* Sets active IR outputs -- mce devices typically have two */
@@ -965,7 +1022,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
                        cmdbuf[2] = MCE_CMD_SIG_END;
                        cmdbuf[3] = MCE_IRDATA_TRAILER;
                        dev_dbg(ir->dev, "disabling carrier modulation");
-                       mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+                       mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
                        return 0;
                }
 
@@ -979,7 +1036,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
                                                                carrier);
 
                                /* Transmit new carrier to mce device */
-                               mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+                               mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
                                return 0;
                        }
                }
@@ -1002,10 +1059,10 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout)
        cmdbuf[2] = units >> 8;
        cmdbuf[3] = units;
 
-       mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+       mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
 
        /* get receiver timeout value */
-       mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
+       mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
 
        return 0;
 }
@@ -1030,7 +1087,7 @@ static int mceusb_set_rx_wideband(struct rc_dev *dev, int enable)
                ir->wideband_rx_enabled = false;
                cmdbuf[2] = 1;  /* port 1 is long range receiver */
        }
-       mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+       mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
        /* response from device sets ir->learning_active */
 
        return 0;
@@ -1053,7 +1110,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
                ir->carrier_report_enabled = true;
                if (!ir->learning_active) {
                        cmdbuf[2] = 2;  /* port 2 is short range receiver */
-                       mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+                       mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
                }
        } else {
                ir->carrier_report_enabled = false;
@@ -1064,7 +1121,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
                 */
                if (ir->learning_active && !ir->wideband_rx_enabled) {
                        cmdbuf[2] = 1;  /* port 1 is long range receiver */
-                       mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+                       mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
                }
        }
 
@@ -1143,6 +1200,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
                }
                break;
        case MCE_RSP_CMD_ILLEGAL:
+       case MCE_RSP_TX_TIMEOUT:
                ir->need_reset = true;
                break;
        default:
@@ -1280,7 +1338,7 @@ static void mceusb_get_emulator_version(struct mceusb_dev *ir)
 {
        /* If we get no reply or an illegal command reply, its ver 1, says MS */
        ir->emver = 1;
-       mce_async_out(ir, GET_EMVER, sizeof(GET_EMVER));
+       mce_command_out(ir, GET_EMVER, sizeof(GET_EMVER));
 }
 
 static void mceusb_gen1_init(struct mceusb_dev *ir)
@@ -1326,10 +1384,10 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
        dev_dbg(dev, "set handshake  - retC = %d", ret);
 
        /* device resume */
-       mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
+       mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
 
        /* get hw/sw revision? */
-       mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
+       mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
 
        kfree(data);
 }
@@ -1337,13 +1395,13 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
 static void mceusb_gen2_init(struct mceusb_dev *ir)
 {
        /* device resume */
-       mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
+       mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
 
        /* get wake version (protocol, key, address) */
-       mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
+       mce_command_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
 
        /* unknown what this one actually returns... */
-       mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
+       mce_command_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
 }
 
 static void mceusb_get_parameters(struct mceusb_dev *ir)
@@ -1357,24 +1415,24 @@ static void mceusb_get_parameters(struct mceusb_dev *ir)
        ir->num_rxports = 2;
 
        /* get number of tx and rx ports */
-       mce_async_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
+       mce_command_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
 
        /* get the carrier and frequency */
-       mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
+       mce_command_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
 
        if (ir->num_txports && !ir->flags.no_tx)
                /* get the transmitter bitmask */
-               mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
+               mce_command_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
 
        /* get receiver timeout value */
-       mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
+       mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
 
        /* get receiver sensor setting */
-       mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
+       mce_command_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
 
        for (i = 0; i < ir->num_txports; i++) {
                cmdbuf[2] = i;
-               mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+               mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
        }
 }
 
@@ -1383,7 +1441,7 @@ static void mceusb_flash_led(struct mceusb_dev *ir)
        if (ir->emver < 2)
                return;
 
-       mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED));
+       mce_command_out(ir, FLASH_LED, sizeof(FLASH_LED));
 }
 
 /*
index e42efd9d382ec4290413e606b6f6040c28805cfe..d37b85d2bc750c0a3a5283b63c310c0663e33b02 100644 (file)
 /* Fields containing pulse width data */
 #define MTK_WIDTH_MASK           (GENMASK(7, 0))
 
+/* IR threshold */
+#define MTK_IRTHD               0x14
+#define MTK_DG_CNT_MASK                 (GENMASK(12, 8))
+#define MTK_DG_CNT(x)           ((x) << 8)
+
 /* Bit to enable interrupt */
 #define MTK_IRINT_EN             BIT(0)
 
@@ -409,6 +414,9 @@ static int mtk_ir_probe(struct platform_device *pdev)
        mtk_w32_mask(ir, val, ir->data->fields[MTK_HW_PERIOD].mask,
                     ir->data->fields[MTK_HW_PERIOD].reg);
 
+       /* Set de-glitch counter */
+       mtk_w32_mask(ir, MTK_DG_CNT(1), MTK_DG_CNT_MASK, MTK_IRTHD);
+
        /* Enable IR and PWM */
        val = mtk_r32(ir, MTK_CONFIG_HIGH_REG);
        val |= MTK_OK_COUNT(ir->data->ok_count) |  MTK_PWM_EN | MTK_IR_EN;
index 257ae0d8cfe27250d52a6d6ee3523c40d0028320..e3f63299f85c0906a31db16528f7af468b836e68 100644 (file)
@@ -623,6 +623,12 @@ static int au0828_usb_probe(struct usb_interface *interface,
        /* Setup */
        au0828_card_setup(dev);
 
+       /*
+        * Store the pointer to the au0828_dev so it can be accessed in
+        * au0828_usb_disconnect
+        */
+       usb_set_intfdata(interface, dev);
+
        /* Analog TV */
        retval = au0828_analog_register(dev, interface);
        if (retval) {
@@ -641,12 +647,6 @@ static int au0828_usb_probe(struct usb_interface *interface,
        /* Remote controller */
        au0828_rc_register(dev);
 
-       /*
-        * Store the pointer to the au0828_dev so it can be accessed in
-        * au0828_usb_disconnect
-        */
-       usb_set_intfdata(interface, dev);
-
        pr_info("Registered device AU0828 [%s]\n",
                dev->board.name == NULL ? "Unset" : dev->board.name);
 
index a771e0a52610c84e492f1bafccfebbfe9f73e7c5..4c191fcd3a7f55be1c7306bdfbb8b87d97e1ca36 100644 (file)
@@ -685,6 +685,10 @@ static int submit_urbs(struct camera_data *cam)
                if (!urb) {
                        for (j = 0; j < i; j++)
                                usb_free_urb(cam->sbuf[j].urb);
+                       for (j = 0; j < NUM_SBUF; j++) {
+                               kfree(cam->sbuf[j].data);
+                               cam->sbuf[j].data = NULL;
+                       }
                        return -ENOMEM;
                }
 
@@ -902,7 +906,6 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
        cpia2_unregister_camera(cam);
        v4l2_device_disconnect(&cam->v4l2_dev);
        mutex_unlock(&cam->v4l2_lock);
-       v4l2_device_put(&cam->v4l2_dev);
 
        if(cam->buffers) {
                DBG("Wakeup waiting processes\n");
@@ -911,6 +914,8 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
                wake_up_interruptible(&cam->wq_stream);
        }
 
+       v4l2_device_put(&cam->v4l2_dev);
+
        LOG("CPiA2 camera disconnected.\n");
 }
 
index 091389fdf89ee62f6e15f8c02f025cfd8ffe6c66..c8d79502827b7ecab4f7757624e30086de013bb1 100644 (file)
@@ -2442,9 +2442,13 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
                8, 0x0486,
        };
 
+       if (!IS_ENABLED(CONFIG_DVB_DIB9000))
+               return -ENODEV;
        if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL)
                return -ENODEV;
        i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
+       if (!i2c)
+               return -ENODEV;
        if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0)
                return -ENODEV;
        dib0700_set_i2c_speed(adap->dev, 1500);
@@ -2520,10 +2524,14 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
                0, 0x00ef,
                8, 0x0406,
        };
+       if (!IS_ENABLED(CONFIG_DVB_DIB9000))
+               return -ENODEV;
        i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe);
        if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL)
                return -ENODEV;
        i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
+       if (!i2c)
+               return -ENODEV;
        if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0)
                return -ENODEV;
 
index 40ca4eafb137412edd1bb58f24868da5069e017c..39ac22486bcd91b6823ba0055fc58444afccba59 100644 (file)
@@ -287,12 +287,15 @@ EXPORT_SYMBOL(dvb_usb_device_init);
 void dvb_usb_device_exit(struct usb_interface *intf)
 {
        struct dvb_usb_device *d = usb_get_intfdata(intf);
-       const char *name = "generic DVB-USB module";
+       const char *default_name = "generic DVB-USB module";
+       char name[40];
 
        usb_set_intfdata(intf, NULL);
        if (d != NULL && d->desc != NULL) {
-               name = d->desc->name;
+               strscpy(name, d->desc->name, sizeof(name));
                dvb_usb_exit(d);
+       } else {
+               strscpy(name, default_name, sizeof(name));
        }
        info("%s successfully deinitialized and disconnected.", name);
 
index 0af74383083d25ece0606a4826e5d9053b29d5ed..ae793dac49648a104db367f66f8e7efc7b8203de 100644 (file)
@@ -913,14 +913,6 @@ static int pctv452e_frontend_attach(struct dvb_usb_adapter *a)
                                                &a->dev->i2c_adap);
        if (!a->fe_adap[0].fe)
                return -ENODEV;
-
-       /*
-        * dvb_frontend will call dvb_detach for both stb0899_detach
-        * and stb0899_release but we only do dvb_attach(stb0899_attach).
-        * Increment the module refcount instead.
-        */
-       symbol_get(stb0899_attach);
-
        if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe,
                                        &a->dev->i2c_adap)) == NULL)
                err("Cannot attach lnbp22\n");
index 18d0f8f5283fa6cb96af7702229a3dc196950a24..8d8e9f56a8be5c7d5fa04384157a1eee783137f3 100644 (file)
@@ -607,10 +607,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
 {
        struct technisat_usb2_state *state = d->priv;
-       u8 *buf = state->buf;
-       u8 *b;
-       int ret;
        struct ir_raw_event ev;
+       u8 *buf = state->buf;
+       int i, ret;
 
        buf[0] = GET_IR_DATA_VENDOR_REQUEST;
        buf[1] = 0x08;
@@ -646,26 +645,25 @@ unlock:
                return 0; /* no key pressed */
 
        /* decoding */
-       b = buf+1;
 
 #if 0
        deb_rc("RC: %d ", ret);
-       debug_dump(b, ret, deb_rc);
+       debug_dump(buf + 1, ret, deb_rc);
 #endif
 
        ev.pulse = 0;
-       while (1) {
-               ev.pulse = !ev.pulse;
-               ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
-               ir_raw_event_store(d->rc_dev, &ev);
-
-               b++;
-               if (*b == 0xff) {
+       for (i = 1; i < ARRAY_SIZE(state->buf); i++) {
+               if (buf[i] == 0xff) {
                        ev.pulse = 0;
                        ev.duration = 888888*2;
                        ir_raw_event_store(d->rc_dev, &ev);
                        break;
                }
+
+               ev.pulse = !ev.pulse;
+               ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
+                              FIRMWARE_CLOCK_TICK) / 1000;
+               ir_raw_event_store(d->rc_dev, &ev);
        }
 
        ir_raw_event_handle(d->rc_dev);
index 87b887b7604ef31716faccaa12812897e9b20641..3f59a98dbf9a161e10dec9e37e5d204e83c9363a 100644 (file)
@@ -4020,7 +4020,6 @@ static void em28xx_usb_disconnect(struct usb_interface *intf)
                dev->dev_next->disconnected = 1;
                dev_info(&dev->intf->dev, "Disconnecting %s\n",
                         dev->dev_next->name);
-               flush_request_modules(dev->dev_next);
        }
 
        dev->disconnected = 1;
index 989ae997f66de8ccb174af33016556583294ef45..89b9293b31bef597b528814d0a95b4132050a49c 100644 (file)
@@ -123,6 +123,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index)
        if (ret < 0) {
                pr_err("reg_r err %d\n", ret);
                gspca_dev->usb_err = ret;
+               /*
+                * Make sure the buffer is zeroed to avoid uninitialized
+                * values.
+                */
+               memset(gspca_dev->usb_buf, 0, 2);
        }
 }
 
index bedc04a72e97e01f540c83283aaae4e786445339..bde4441f935e79ff1414bc474f47af0bd1d88d15 100644 (file)
@@ -1581,6 +1581,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
        if (ret < 0) {
                pr_err("reg_r err %d\n", ret);
                gspca_dev->usb_err = ret;
+               /*
+                * Make sure the buffer is zeroed to avoid uninitialized
+                * values.
+                */
+               memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
                return;
        }
        if (len == 1)
index 10fcbe9e8614b2f0d1d064a72573ac806309a43f..cb41e61d50dd31d695f261e1d2959fbc963d84dc 100644 (file)
@@ -2083,6 +2083,11 @@ static int reg_r(struct sd *sd, u16 index)
        } else {
                gspca_err(gspca_dev, "reg_r %02x failed %d\n", index, ret);
                sd->gspca_dev.usb_err = ret;
+               /*
+                * Make sure the result is zeroed to avoid uninitialized
+                * values.
+                */
+               gspca_dev->usb_buf[0] = 0;
        }
 
        return ret;
@@ -2111,6 +2116,11 @@ static int reg_r8(struct sd *sd,
        } else {
                gspca_err(gspca_dev, "reg_r8 %02x failed %d\n", index, ret);
                sd->gspca_dev.usb_err = ret;
+               /*
+                * Make sure the buffer is zeroed to avoid uninitialized
+                * values.
+                */
+               memset(gspca_dev->usb_buf, 0, 8);
        }
 
        return ret;
index d06dc0755b9a5586f93434a47440be353ee1e660..9e3326b66c7922fe067a9f6f2d7c4e01d3125b10 100644 (file)
@@ -642,6 +642,11 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
        if (ret < 0) {
                pr_err("read failed %d\n", ret);
                gspca_dev->usb_err = ret;
+               /*
+                * Make sure the result is zeroed to avoid uninitialized
+                * values.
+                */
+               gspca_dev->usb_buf[0] = 0;
        }
        return gspca_dev->usb_buf[0];
 }
index 3d1364d2f83e628d0b80adca5f683cf4d05b0003..4d4ae22e96406c53ee59a25871c46fbc294b4a62 100644 (file)
@@ -1154,6 +1154,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg)
        if (ret < 0) {
                pr_err("reg_r err %d\n", ret);
                gspca_dev->usb_err = ret;
+               return 0;
        }
        return gspca_dev->usb_buf[0];
 }
index 477da0664b7daf1a77537c6314c22bcb7778550d..40b87717bb5c5ab76e13970aead7d04b2520d361 100644 (file)
@@ -111,6 +111,11 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
                        pr_err("read req failed req %#04x error %d\n",
                               req, err);
                gspca_dev->usb_err = err;
+               /*
+                * Make sure the buffer is zeroed to avoid uninitialized
+                * values.
+                */
+               memset(gspca_dev->usb_buf, 0, READ_REQ_SIZE);
        }
 }
 
index cfa2a04d9f3f6a90cb4c34fd32f32c6f15ba3583..efca54ee0f35230f060d24930dcd771a7b245137 100644 (file)
@@ -132,6 +132,13 @@ static const struct dmi_system_id flip_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "0341")
                }
        },
+       {
+               .ident = "MSI MS-1039",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "MS-1039"),
+               }
+       },
        {
                .ident = "MSI MS-1632",
                .matches = {
@@ -918,6 +925,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length)
        if (unlikely(result < 0 || result != length)) {
                pr_err("Read register %02x failed %d\n", reg, result);
                gspca_dev->usb_err = result;
+               /*
+                * Make sure the buffer is zeroed to avoid uninitialized
+                * values.
+                */
+               memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
        }
 }
 
index 5f3f2979540a64b7a868dd2573db3c329a6e08da..22de65d840dd3d980b5725b45762c02e9a1e6714 100644 (file)
@@ -462,6 +462,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
                dev_err(gspca_dev->v4l2_dev.dev,
                        "Error reading register %02x: %d\n", value, res);
                gspca_dev->usb_err = res;
+               /*
+                * Make sure the result is zeroed to avoid uninitialized
+                * values.
+                */
+               gspca_dev->usb_buf[0] = 0;
        }
 }
 
index df8d8482b79599974b5465b1fd4a995e02bcfb90..fa108ce000ad6e77143c97ed8769f87e958f7978 100644 (file)
@@ -1171,6 +1171,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
        if (ret < 0) {
                pr_err("reg_r err %d\n", ret);
                gspca_dev->usb_err = ret;
+               /*
+                * Make sure the buffer is zeroed to avoid uninitialized
+                * values.
+                */
+               memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
        }
 }
 
index d25924e430f37b8a3f2c9af0fb208db0079491be..a20eb8580db2ea944100ecfd32ecafc9825f49eb 100644 (file)
@@ -80,6 +80,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
        if (ret < 0) {
                pr_err("reg_r err %d\n", ret);
                gspca_dev->usb_err = ret;
+               /*
+                * Make sure the buffer is zeroed to avoid uninitialized
+                * values.
+                */
+               memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
        }
 }
 
index d7cbcf2b394794ac8d51529da921c2c51eb14065..3521f5ff428e9e28f545835497f5334cd0bd03a6 100644 (file)
@@ -434,6 +434,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
        if (ret < 0) {
                pr_err("reg_r %04x failed %d\n", value, ret);
                gspca_dev->usb_err = ret;
+               /*
+                * Make sure the buffer is zeroed to avoid uninitialized
+                * values.
+                */
+               memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
        }
 }
 
index 437a3367ab97488734b88685341a1eb8085aa98f..26eae69a2562f565a51f8f41895cbd44a2bb12f4 100644 (file)
@@ -264,6 +264,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
        if (ret < 0) {
                pr_err("reg_r err %d\n", ret);
                gspca_dev->usb_err = ret;
+               /*
+                * Make sure the buffer is zeroed to avoid uninitialized
+                * values.
+                */
+               memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
        }
 }
 
index 52d0716596343dbd44a93b444f6c48abdc16dee9..6e32264d3825a5e744c8ed5de9c35f317c41131a 100644 (file)
@@ -2915,6 +2915,11 @@ static void reg_r_i(struct gspca_dev *gspca_dev,
        if (ret < 0) {
                pr_err("reg_r err %d\n", ret);
                gspca_dev->usb_err = ret;
+               /*
+                * Make sure the buffer is zeroed to avoid uninitialized
+                * values.
+                */
+               memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
        }
 }
 static void reg_r(struct gspca_dev *gspca_dev,
index abfab3de18662b1ed60499538e3fbe0f5c88664c..ef0a839f9b8aeccf5e61a5fe6d9fbe9bf1e8ed20 100644 (file)
@@ -143,6 +143,11 @@ static int w9968cf_read_sb(struct sd *sd)
        } else {
                pr_err("Read SB reg [01] failed\n");
                sd->gspca_dev.usb_err = ret;
+               /*
+                * Make sure the buffer is zeroed to avoid uninitialized
+                * values.
+                */
+               memset(sd->gspca_dev.usb_buf, 0, 2);
        }
 
        udelay(W9968CF_I2C_BUS_DELAY);
index 29ac7fc5b039fe1a4f51c8becfe492cbc63e5a39..3316a17c141be265bbc5077896ca28daf0aeaa3c 100644 (file)
@@ -141,6 +141,7 @@ static int device_authorization(struct hdpvr_device *dev)
 
        dev->fw_ver = dev->usbc_buf[1];
 
+       dev->usbc_buf[46] = '\0';
        v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n",
                          dev->fw_ver, &dev->usbc_buf[2]);
 
@@ -275,6 +276,7 @@ static int hdpvr_probe(struct usb_interface *interface,
 #endif
        size_t buffer_size;
        int i;
+       int dev_num;
        int retval = -ENOMEM;
 
        /* allocate memory for our device state and initialize it */
@@ -372,8 +374,17 @@ static int hdpvr_probe(struct usb_interface *interface,
        }
 #endif
 
+       dev_num = atomic_inc_return(&dev_nr);
+       if (dev_num >= HDPVR_MAX) {
+               v4l2_err(&dev->v4l2_dev,
+                        "max device number reached, device register failed\n");
+               atomic_dec(&dev_nr);
+               retval = -ENODEV;
+               goto reg_fail;
+       }
+
        retval = hdpvr_register_videodev(dev, &interface->dev,
-                                   video_nr[atomic_inc_return(&dev_nr)]);
+                                   video_nr[dev_num]);
        if (retval < 0) {
                v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
                goto reg_fail;
index 1b89c77bad6673d03600f2961f4c8fc96667b5dc..0615996572e41a618626795d36822ee466d324d2 100644 (file)
@@ -439,7 +439,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
        /* wait for the first buffer */
        if (!(file->f_flags & O_NONBLOCK)) {
                if (wait_event_interruptible(dev->wait_data,
-                                            hdpvr_get_next_buffer(dev)))
+                                            !list_empty_careful(&dev->rec_buff_list)))
                        return -ERESTARTSYS;
        }
 
@@ -465,10 +465,17 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
                                goto err;
                        }
                        if (!err) {
-                               v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
-                                       "timeout: restart streaming\n");
+                               v4l2_info(&dev->v4l2_dev,
+                                         "timeout: restart streaming\n");
+                               mutex_lock(&dev->io_mutex);
                                hdpvr_stop_streaming(dev);
-                               msecs_to_jiffies(4000);
+                               mutex_unlock(&dev->io_mutex);
+                               /*
+                                * The FW needs about 4 seconds after streaming
+                                * stopped before it is ready to restart
+                                * streaming.
+                                */
+                               msleep(4000);
                                err = hdpvr_start_streaming(dev);
                                if (err) {
                                        ret = err;
@@ -1133,9 +1140,7 @@ static void hdpvr_device_release(struct video_device *vdev)
        struct hdpvr_device *dev = video_get_drvdata(vdev);
 
        hdpvr_delete(dev);
-       mutex_lock(&dev->io_mutex);
        flush_work(&dev->worker);
-       mutex_unlock(&dev->io_mutex);
 
        v4l2_device_unregister(&dev->v4l2_dev);
        v4l2_ctrl_handler_free(&dev->hdl);
index 673fdca8d2dac0fc09fc713b8f87e2ce5879bd5f..fcb201a40920e0281f15d05f55fb8550de245b0e 100644 (file)
@@ -1680,7 +1680,7 @@ static int pvr2_decoder_enable(struct pvr2_hdw *hdw,int enablefl)
        }
        if (!hdw->flag_decoder_missed) {
                pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-                          "WARNING: No decoder present");
+                          "***WARNING*** No decoder present");
                hdw->flag_decoder_missed = !0;
                trace_stbit("flag_decoder_missed",
                            hdw->flag_decoder_missed);
@@ -2366,7 +2366,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
        if (hdw_desc->flag_is_experimental) {
                pvr2_trace(PVR2_TRACE_INFO, "**********");
                pvr2_trace(PVR2_TRACE_INFO,
-                          "WARNING: Support for this device (%s) is experimental.",
+                          "***WARNING*** Support for this device (%s) is experimental.",
                                                              hdw_desc->description);
                pvr2_trace(PVR2_TRACE_INFO,
                           "Important functionality might not be entirely working.");
index f3003ca05f4ba370d3ccbc647b269ca5bd817c9c..922c06279663519e4e4240b229784cefb7702b1c 100644 (file)
@@ -343,11 +343,11 @@ static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
 
        if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
                pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-                          "WARNING: Detected a wedged cx25840 chip; the device will not work.");
+                          "***WARNING*** Detected a wedged cx25840 chip; the device will not work.");
                pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-                          "WARNING: Try power cycling the pvrusb2 device.");
+                          "***WARNING*** Try power cycling the pvrusb2 device.");
                pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-                          "WARNING: Disabling further access to the device to prevent other foul-ups.");
+                          "***WARNING*** Disabling further access to the device to prevent other foul-ups.");
                // This blocks all further communication with the part.
                hdw->i2c_func[0x44] = NULL;
                pvr2_hdw_render_useless(hdw);
index 6b651f8b54df0f7a713d34959b08c7bcac0cec39..37dc299a1ca2682e41913388ff2d11a87880dd58 100644 (file)
@@ -353,7 +353,7 @@ struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
                bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
                pvr2_trace(
                        PVR2_TRACE_ERROR_LEGS,
-                       "WARNING: Failed to classify the following standard(s): %.*s",
+                       "***WARNING*** Failed to classify the following standard(s): %.*s",
                        bcnt,buf);
        }
 
index 5accb52410720196b24181e18ee0620e68dbc223..6e3f234e790b849b5cd0c222556659f420c0f130 100644 (file)
@@ -641,8 +641,7 @@ static int v4l_stk_release(struct file *fp)
                dev->owner = NULL;
        }
 
-       if (is_present(dev))
-               usb_autopm_put_interface(dev->interface);
+       usb_autopm_put_interface(dev->interface);
        mutex_unlock(&dev->lock);
        return v4l2_fh_release(fp);
 }
index 3a4e545c603745126c4a4e8f6bfdbc58a54f1092..3db2fd7f5d7c49a2ec768d26a73374921fd7e82c 100644 (file)
@@ -105,6 +105,7 @@ static void tm6000_urb_received(struct urb *urb)
                        printk(KERN_ERR "tm6000:  error %s\n", __func__);
                        kfree(urb->transfer_buffer);
                        usb_free_urb(urb);
+                       dev->dvb->bulk_urb = NULL;
                }
        }
 }
@@ -135,6 +136,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
        dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
        if (!dvb->bulk_urb->transfer_buffer) {
                usb_free_urb(dvb->bulk_urb);
+               dvb->bulk_urb = NULL;
                return -ENOMEM;
        }
 
@@ -161,6 +163,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
 
                kfree(dvb->bulk_urb->transfer_buffer);
                usb_free_urb(dvb->bulk_urb);
+               dvb->bulk_urb = NULL;
                return ret;
        }
 
index 44ca66cb9b8f141e15d7ca0ccc0b7ae5c2abee60..f34efa7c61b40c9dd71c7bc6be2d519374664a07 100644 (file)
@@ -329,7 +329,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
 
        dprintk("%s\n", __func__);
 
-       b = kmalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
+       b = kzalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
        if (!b)
                return -ENOMEM;
 
index 467b1ddaf4e75ec641129eb16388bba7189de692..f2854337cdcac8073bd59854bfd58eb22d2480fc 100644 (file)
@@ -2350,7 +2350,9 @@ void uvc_ctrl_cleanup_device(struct uvc_device *dev)
        struct uvc_entity *entity;
        unsigned int i;
 
-       cancel_work_sync(&dev->async_ctrl.work);
+       /* Can be uninitialized if we are aborting on probe error. */
+       if (dev->async_ctrl.work.func)
+               cancel_work_sync(&dev->async_ctrl.work);
 
        /* Free controls and control mappings for all entities. */
        list_for_each_entry(entity, &dev->entities, list) {
index 6ac5f5d426154c60eddc6662bb544f96d851a3ed..0986572bbe881323f5675f6cb10b94c8191cdcc7 100644 (file)
@@ -2249,16 +2249,15 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
                v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
                                                                &def, &flags);
 
-       is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
-                  cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
+       is_menu = (type == V4L2_CTRL_TYPE_MENU ||
+                  type == V4L2_CTRL_TYPE_INTEGER_MENU);
        if (is_menu)
                WARN_ON(step);
        else
                WARN_ON(cfg->menu_skip_mask);
-       if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
+       if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
                qmenu = v4l2_ctrl_get_menu(cfg->id);
-       else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
-                qmenu_int == NULL) {
+       } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
                handler_set_err(hdl, -EINVAL);
                return NULL;
        }
index c7c600c1f63b85295daa75ea4dee4de4c10e82f4..a24b40dfec97a3b7b75b077791c28df9ff527af5 100644 (file)
@@ -15,6 +15,7 @@
 #include <media/v4l2-dv-timings.h>
 #include <linux/math64.h>
 #include <linux/hdmi.h>
+#include <media/cec.h>
 
 MODULE_AUTHOR("Hans Verkuil");
 MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
@@ -942,3 +943,153 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
        return c;
 }
 EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry);
+
+/**
+ * v4l2_get_edid_phys_addr() - find and return the physical address
+ *
+ * @edid:      pointer to the EDID data
+ * @size:      size in bytes of the EDID data
+ * @offset:    If not %NULL then the location of the physical address
+ *             bytes in the EDID will be returned here. This is set to 0
+ *             if there is no physical address found.
+ *
+ * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
+ */
+u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
+                           unsigned int *offset)
+{
+       unsigned int loc = cec_get_edid_spa_location(edid, size);
+
+       if (offset)
+               *offset = loc;
+       if (loc == 0)
+               return CEC_PHYS_ADDR_INVALID;
+       return (edid[loc] << 8) | edid[loc + 1];
+}
+EXPORT_SYMBOL_GPL(v4l2_get_edid_phys_addr);
+
+/**
+ * v4l2_set_edid_phys_addr() - find and set the physical address
+ *
+ * @edid:      pointer to the EDID data
+ * @size:      size in bytes of the EDID data
+ * @phys_addr: the new physical address
+ *
+ * This function finds the location of the physical address in the EDID
+ * and fills in the given physical address and updates the checksum
+ * at the end of the EDID block. It does nothing if the EDID doesn't
+ * contain a physical address.
+ */
+void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
+{
+       unsigned int loc = cec_get_edid_spa_location(edid, size);
+       u8 sum = 0;
+       unsigned int i;
+
+       if (loc == 0)
+               return;
+       edid[loc] = phys_addr >> 8;
+       edid[loc + 1] = phys_addr & 0xff;
+       loc &= ~0x7f;
+
+       /* update the checksum */
+       for (i = loc; i < loc + 127; i++)
+               sum += edid[i];
+       edid[i] = 256 - sum;
+}
+EXPORT_SYMBOL_GPL(v4l2_set_edid_phys_addr);
+
+/**
+ * v4l2_phys_addr_for_input() - calculate the PA for an input
+ *
+ * @phys_addr: the physical address of the parent
+ * @input:     the number of the input port, must be between 1 and 15
+ *
+ * This function calculates a new physical address based on the input
+ * port number. For example:
+ *
+ * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
+ *
+ * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
+ *
+ * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
+ *
+ * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
+ *
+ * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
+ */
+u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input)
+{
+       /* Check if input is sane */
+       if (WARN_ON(input == 0 || input > 0xf))
+               return CEC_PHYS_ADDR_INVALID;
+
+       if (phys_addr == 0)
+               return input << 12;
+
+       if ((phys_addr & 0x0fff) == 0)
+               return phys_addr | (input << 8);
+
+       if ((phys_addr & 0x00ff) == 0)
+               return phys_addr | (input << 4);
+
+       if ((phys_addr & 0x000f) == 0)
+               return phys_addr | input;
+
+       /*
+        * All nibbles are used so no valid physical addresses can be assigned
+        * to the input.
+        */
+       return CEC_PHYS_ADDR_INVALID;
+}
+EXPORT_SYMBOL_GPL(v4l2_phys_addr_for_input);
+
+/**
+ * v4l2_phys_addr_validate() - validate a physical address from an EDID
+ *
+ * @phys_addr: the physical address to validate
+ * @parent:    if not %NULL, then this is filled with the parents PA.
+ * @port:      if not %NULL, then this is filled with the input port.
+ *
+ * This validates a physical address as read from an EDID. If the
+ * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
+ * then it will return -EINVAL.
+ *
+ * The parent PA is passed into %parent and the input port is passed into
+ * %port. For example:
+ *
+ * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
+ *
+ * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
+ *
+ * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
+ *
+ * PA = f.f.f.f: has parent f.f.f.f and input port 0.
+ *
+ * Return: 0 if the PA is valid, -EINVAL if not.
+ */
+int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
+{
+       int i;
+
+       if (parent)
+               *parent = phys_addr;
+       if (port)
+               *port = 0;
+       if (phys_addr == CEC_PHYS_ADDR_INVALID)
+               return 0;
+       for (i = 0; i < 16; i += 4)
+               if (phys_addr & (0xf << i))
+                       break;
+       if (i == 16)
+               return 0;
+       if (parent)
+               *parent = phys_addr & (0xfff0 << i);
+       if (port)
+               *port = (phys_addr >> i) & 0xf;
+       for (i += 4; i < 16; i += 4)
+               if ((phys_addr & (0xf << i)) == 0)
+                       return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_phys_addr_validate);
index 1246d69ba187422610fd65f68fba60a2dbd181dd..b1564cacd19e1b86dc26fabe945b4c892e5f4e40 100644 (file)
@@ -629,13 +629,18 @@ static int __init memstick_init(void)
                return -ENOMEM;
 
        rc = bus_register(&memstick_bus_type);
-       if (!rc)
-               rc = class_register(&memstick_host_class);
+       if (rc)
+               goto error_destroy_workqueue;
 
-       if (!rc)
-               return 0;
+       rc = class_register(&memstick_host_class);
+       if (rc)
+               goto error_bus_unregister;
+
+       return 0;
 
+error_bus_unregister:
        bus_unregister(&memstick_bus_type);
+error_destroy_workqueue:
        destroy_workqueue(workqueue);
 
        return rc;
index bcdca9fbef51cc802d34b74a4fab19abd308b1bc..29f5021d21ea62dc83344c2c5d3ea9f6630f85e0 100644 (file)
@@ -949,7 +949,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
        if (!cnt) {
                rc = -ENODEV;
                pci_dev_busy = 1;
-               goto err_out;
+               goto err_out_int;
        }
 
        jm = kzalloc(sizeof(struct jmb38x_ms)
index 11841f4b7b2ba4c9f1fe845fdf1c8daaf4e539ae..dd938a5d04094e5e2476e33972fa8cafc8078309 100644 (file)
@@ -509,10 +509,10 @@ config INTEL_SOC_PMIC
        bool "Support for Crystal Cove PMIC"
        depends on ACPI && HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK
        depends on X86 || COMPILE_TEST
+       depends on I2C_DESIGNWARE_PLATFORM=y
        select MFD_CORE
        select REGMAP_I2C
        select REGMAP_IRQ
-       select I2C_DESIGNWARE_PLATFORM
        help
          Select this option to enable support for Crystal Cove PMIC
          on some Intel SoC systems. The PMIC provides ADC, GPIO,
@@ -538,10 +538,10 @@ config INTEL_SOC_PMIC_CHTWC
        bool "Support for Intel Cherry Trail Whiskey Cove PMIC"
        depends on ACPI && HAS_IOMEM && I2C=y && COMMON_CLK
        depends on X86 || COMPILE_TEST
+       depends on I2C_DESIGNWARE_PLATFORM=y
        select MFD_CORE
        select REGMAP_I2C
        select REGMAP_IRQ
-       select I2C_DESIGNWARE_PLATFORM
        help
          Select this option to enable support for the Intel Cherry Trail
          Whiskey Cove PMIC found on some Intel Cherry Trail systems.
@@ -1403,9 +1403,9 @@ config MFD_TPS65217
 config MFD_TPS68470
        bool "TI TPS68470 Power Management / LED chips"
        depends on ACPI && I2C=y
+       depends on I2C_DESIGNWARE_PLATFORM=y
        select MFD_CORE
        select REGMAP_I2C
-       select I2C_DESIGNWARE_PLATFORM
        help
          If you say yes here you get support for the TPS68470 series of
          Power Management / LED chips.
index 5f1e37d23943a3986bcb416b42f4170fa8a478c5..47d6d40f41cd52fe47cb52462d6672a94e9597b6 100644 (file)
@@ -996,7 +996,7 @@ int arizona_dev_init(struct arizona *arizona)
        unsigned int reg, val;
        int (*apply_patch)(struct arizona *) = NULL;
        const struct mfd_cell *subdevs = NULL;
-       int n_subdevs, ret, i;
+       int n_subdevs = 0, ret, i;
 
        dev_set_drvdata(arizona->dev, arizona);
        mutex_init(&arizona->clk_lock);
index 96c07fa1802adcce7d48f971410096b4b2a4f479..6693f74aa6ab9a41307b1838ce21011e0fc0fc68 100644 (file)
@@ -112,6 +112,8 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
 
        pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base,
                                                 &hi655x_regmap_config);
+       if (IS_ERR(pmic->regmap))
+               return PTR_ERR(pmic->regmap);
 
        regmap_read(pmic->regmap, HI655X_BUS_ADDR(HI655X_VER_REG), &pmic->ver);
        if ((pmic->ver < PMU_VER_START) || (pmic->ver > PMU_VER_END)) {
index 0e5282fc1467568aa3663be8872b1216d26a77da..c37c8bb860685d5f6e75d70c126018b32d77da8a 100644 (file)
@@ -39,6 +39,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
        info->mem = &pdev->resource[0];
        info->irq = pdev->irq;
 
+       pdev->d3cold_delay = 0;
+
        /* Probably it is enough to set this for iDMA capable devices only */
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
index 8cfea969b060277dd53633a114b534e3eefdb2e0..45c7d8b9734938625c58890191ab28c806621e4f 100644 (file)
@@ -278,6 +278,7 @@ const struct of_device_id madera_of_match[] = {
        { .compatible = "cirrus,wm1840", .data = (void *)WM1840 },
        {}
 };
+MODULE_DEVICE_TABLE(of, madera_of_match);
 EXPORT_SYMBOL_GPL(madera_of_match);
 
 static int madera_get_reset_gpio(struct madera *madera)
index 94e3f32ce935717e97f2e938b433f3673c38f22d..182973df1aed4b7c5f0ce6766f5fb94a2e675ab1 100644 (file)
@@ -179,6 +179,7 @@ static int mfd_add_device(struct device *parent, int id,
                for_each_child_of_node(parent->of_node, np) {
                        if (of_device_is_compatible(np, cell->of_compatible)) {
                                pdev->dev.of_node = np;
+                               pdev->dev.fwnode = &np->fwnode;
                                break;
                        }
                }
index ddfcf4ade7bf33b46fbc0f5256d32a10240c74c9..dc3537651b807a7d4e1d1ee22a0a81acfccf8c0a 100644 (file)
@@ -724,7 +724,7 @@ static int at24_probe(struct i2c_client *client)
        nvmem_config.name = dev_name(dev);
        nvmem_config.dev = dev;
        nvmem_config.read_only = !writable;
-       nvmem_config.root_only = true;
+       nvmem_config.root_only = !(pdata.flags & AT24_FLAG_IRUGO);
        nvmem_config.owner = THIS_MODULE;
        nvmem_config.compat = true;
        nvmem_config.base_dev = dev;
index a6f41f96f2a163a911acbba5e346b5b55e6d0145..198e030e5b3d543ab2d225776b41adce3017b791 100644 (file)
@@ -214,13 +214,21 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
 {
        int ret;
 
+       /* No need to enable the client if nothing is needed from it */
+       if (!cldev->bus->fw_f_fw_ver_supported &&
+           !cldev->bus->hbm_f_os_supported)
+               return;
+
        ret = mei_cldev_enable(cldev);
        if (ret)
                return;
 
-       ret = mei_fwver(cldev);
-       if (ret < 0)
-               dev_err(&cldev->dev, "FW version command failed %d\n", ret);
+       if (cldev->bus->fw_f_fw_ver_supported) {
+               ret = mei_fwver(cldev);
+               if (ret < 0)
+                       dev_err(&cldev->dev, "FW version command failed %d\n",
+                               ret);
+       }
 
        if (cldev->bus->hbm_f_os_supported) {
                ret = mei_osver(cldev);
index bb1ee9834a029d28fdd690ed660f6f6355c89b67..f85aa3f4042d90167bdd4f8f564879ff69bbe7b2 100644 (file)
 #define MEI_DEV_ID_CNP_H      0xA360  /* Cannon Point H */
 #define MEI_DEV_ID_CNP_H_4    0xA364  /* Cannon Point H 4 (iTouch) */
 
+#define MEI_DEV_ID_CMP_LP     0x02e0  /* Comet Point LP */
+#define MEI_DEV_ID_CMP_LP_3   0x02e4  /* Comet Point LP 3 (iTouch) */
+
 #define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
 
+#define MEI_DEV_ID_TGP_LP     0xA0E0  /* Tiger Lake Point LP */
+
+#define MEI_DEV_ID_MCC        0x4B70  /* Mule Creek Canyon (EHL) */
+#define MEI_DEV_ID_MCC_4      0x4B75  /* Mule Creek Canyon 4 (EHL) */
+
 /*
  * MEI HW Section
  */
index 0759c3a668de7a7fc38968be27c619672c8f6ece..60c8c84181a97acb0b2fba6ac00a7b41e9b583e8 100644 (file)
@@ -1368,6 +1368,8 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
 #define MEI_CFG_FW_SPS                           \
        .quirk_probe = mei_me_fw_type_sps
 
+#define MEI_CFG_FW_VER_SUPP                     \
+       .fw_ver_supported = 1
 
 #define MEI_CFG_ICH_HFS                      \
        .fw_status.count = 0
@@ -1405,31 +1407,41 @@ static const struct mei_cfg mei_me_ich10_cfg = {
        MEI_CFG_ICH10_HFS,
 };
 
-/* PCH devices */
-static const struct mei_cfg mei_me_pch_cfg = {
+/* PCH6 devices */
+static const struct mei_cfg mei_me_pch6_cfg = {
        MEI_CFG_PCH_HFS,
 };
 
+/* PCH7 devices */
+static const struct mei_cfg mei_me_pch7_cfg = {
+       MEI_CFG_PCH_HFS,
+       MEI_CFG_FW_VER_SUPP,
+};
+
 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
 static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
        MEI_CFG_PCH_HFS,
+       MEI_CFG_FW_VER_SUPP,
        MEI_CFG_FW_NM,
 };
 
 /* PCH8 Lynx Point and newer devices */
 static const struct mei_cfg mei_me_pch8_cfg = {
        MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
 };
 
 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
 static const struct mei_cfg mei_me_pch8_sps_cfg = {
        MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
        MEI_CFG_FW_SPS,
 };
 
 /* Cannon Lake and newer devices */
 static const struct mei_cfg mei_me_pch12_cfg = {
        MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
        MEI_CFG_DMA_128,
 };
 
@@ -1441,7 +1453,8 @@ static const struct mei_cfg *const mei_cfg_list[] = {
        [MEI_ME_UNDEF_CFG] = NULL,
        [MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
        [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
-       [MEI_ME_PCH_CFG] = &mei_me_pch_cfg,
+       [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
+       [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
        [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
        [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
        [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
@@ -1480,6 +1493,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
 
        mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
        hw->cfg = cfg;
+       dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
+
        return dev;
 }
 
index bbcc5fc106cdf46418acd2dcd4adfe6f0c1f5383..77597133978952cc805368dc25713236884d17ab 100644 (file)
  * @fw_status: FW status
  * @quirk_probe: device exclusion quirk
  * @dma_size: device DMA buffers size
+ * @fw_ver_supported: is fw version retrievable from FW
  */
 struct mei_cfg {
        const struct mei_fw_status fw_status;
        bool (*quirk_probe)(struct pci_dev *pdev);
        size_t dma_size[DMA_DSCR_NUM];
+       u32 fw_ver_supported:1;
 };
 
 
@@ -74,7 +76,8 @@ struct mei_me_hw {
  * @MEI_ME_UNDEF_CFG:      Lower sentinel.
  * @MEI_ME_ICH_CFG:        I/O Controller Hub legacy devices.
  * @MEI_ME_ICH10_CFG:      I/O Controller Hub platforms Gen10
- * @MEI_ME_PCH_CFG:        Platform Controller Hub platforms (Up to Gen8).
+ * @MEI_ME_PCH6_CFG:       Platform Controller Hub platforms (Gen6).
+ * @MEI_ME_PCH7_CFG:       Platform Controller Hub platforms (Gen7).
  * @MEI_ME_PCH_CPT_PBG_CFG:Platform Controller Hub workstations
  *                         with quirk for Node Manager exclusion.
  * @MEI_ME_PCH8_CFG:       Platform Controller Hub Gen8 and newer
@@ -89,7 +92,8 @@ enum mei_cfg_idx {
        MEI_ME_UNDEF_CFG,
        MEI_ME_ICH_CFG,
        MEI_ME_ICH10_CFG,
-       MEI_ME_PCH_CFG,
+       MEI_ME_PCH6_CFG,
+       MEI_ME_PCH7_CFG,
        MEI_ME_PCH_CPT_PBG_CFG,
        MEI_ME_PCH8_CFG,
        MEI_ME_PCH8_SPS_CFG,
index 377397e1b5a5bfd08f1a02a39ceeef3ffe3b6d03..fc7a5e3fbfcd16349c5ef58edd274ef8b22e401b 100644 (file)
@@ -422,6 +422,8 @@ struct mei_fw_version {
  *
  * @fw_ver : FW versions
  *
+ * @fw_f_fw_ver_supported : fw feature: fw version supported
+ *
  * @me_clients_rwsem: rw lock over me_clients list
  * @me_clients  : list of FW clients
  * @me_clients_map : FW clients bit map
@@ -500,6 +502,8 @@ struct mei_device {
 
        struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
 
+       unsigned int fw_f_fw_ver_supported:1;
+
        struct rw_semaphore me_clients_rwsem;
        struct list_head me_clients;
        DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
index 4299658d48d63ab011271c264cc6af77cd94843c..28cdd87851cbaf254a86997aa2750b015ec4ab77 100644 (file)
@@ -70,13 +70,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
 
-       {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
@@ -105,8 +105,16 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
+
+       {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
+
        /* required last entry */
        {0, }
 };
index b3fa738ae0050b48ba3f07ef3a02460d72898ea4..f005206d9033b53a83bf39955cc9d12fe732e0c3 100644 (file)
@@ -318,7 +318,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
 
        entry = container_of(resource, struct dbell_entry, resource);
        if (entry->run_delayed) {
-               schedule_work(&entry->work);
+               if (!schedule_work(&entry->work))
+                       vmci_resource_put(resource);
        } else {
                entry->notify_cb(entry->client_data);
                vmci_resource_put(resource);
@@ -366,7 +367,8 @@ static void dbell_fire_entries(u32 notify_idx)
                    atomic_read(&dbell->active) == 1) {
                        if (dbell->run_delayed) {
                                vmci_resource_get(&dbell->resource);
-                               schedule_work(&dbell->work);
+                               if (!schedule_work(&dbell->work))
+                                       vmci_resource_put(&dbell->resource);
                        } else {
                                dbell->notify_cb(dbell->client_data);
                        }
index cfb8ee24eaba153f00e13016987126e49e382436..04738359ec0292797136344778879999212d8651 100644 (file)
@@ -1277,6 +1277,12 @@ int mmc_attach_sd(struct mmc_host *host)
                        goto err;
        }
 
+       /*
+        * Some SD cards claims an out of spec VDD voltage range. Let's treat
+        * these bits as being in-valid and especially also bit7.
+        */
+       ocr &= ~0x7FFF;
+
        rocr = mmc_select_voltage(host, ocr);
 
        /*
index b299a24d33f96592e36c7463f80d601811c709d2..d206f2de80d2331130a106155c3fbc7862ab95c6 100644 (file)
@@ -35,6 +35,7 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
 {
        struct mmc_card *card = host->card;
        int i, ret, count;
+       bool sdio_irq_pending = host->sdio_irq_pending;
        unsigned char pending;
        struct sdio_func *func;
 
@@ -42,13 +43,16 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
        if (mmc_card_suspended(card))
                return 0;
 
+       /* Clear the flag to indicate that we have processed the IRQ. */
+       host->sdio_irq_pending = false;
+
        /*
         * Optimization, if there is only 1 function interrupt registered
         * and we know an IRQ was signaled then call irq handler directly.
         * Otherwise do the full probe.
         */
        func = card->sdio_single_irq;
-       if (func && host->sdio_irq_pending) {
+       if (func && sdio_irq_pending) {
                func->irq_handler(func);
                return 1;
        }
@@ -100,7 +104,6 @@ void sdio_run_irqs(struct mmc_host *host)
 {
        mmc_claim_host(host);
        if (host->sdio_irqs) {
-               host->sdio_irq_pending = true;
                process_sdio_pending_irqs(host);
                if (host->ops->ack_sdio_irq)
                        host->ops->ack_sdio_irq(host);
@@ -119,6 +122,7 @@ void sdio_irq_work(struct work_struct *work)
 
 void sdio_signal_irq(struct mmc_host *host)
 {
+       host->sdio_irq_pending = true;
        queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
 }
 EXPORT_SYMBOL_GPL(sdio_signal_irq);
@@ -164,7 +168,6 @@ static int sdio_irq_thread(void *_host)
                if (ret)
                        break;
                ret = process_sdio_pending_irqs(host);
-               host->sdio_irq_pending = false;
                mmc_release_host(host);
 
                /*
index ed5cefb8376838b401aba0ac394c6a69e37dd818..89deb451e0ac6225c9481dc21ca21373f46c2d7c 100644 (file)
@@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
 {
        data->bytes_xfered = data->blocks * data->blksz;
        data->error = 0;
+       dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
        return 1;
 }
 
@@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
                mmc->max_segs = 1;
 
        /* DMA size field can address up to 8 MB */
-       mmc->max_seg_size = 8 * 1024 * 1024;
+       mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
+                                 dma_get_max_seg_size(host->dev));
        mmc->max_req_size = mmc->max_seg_size;
        /* External DMA is in 512 byte blocks */
        mmc->max_blk_size = 512;
index a8af682a9182160e1e9074f1714dd33f5ee39cdd..28f5aaca505acb668468b7ac34cd812e90219f36 100644 (file)
@@ -617,7 +617,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
        cq_host->slot[tag].flags = 0;
 
        cq_host->qcnt += 1;
-
+       /* Make sure descriptors are ready before ringing the doorbell */
+       wmb();
        cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
        if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
                pr_debug("%s: cqhci: doorbell not set for tag %d\n",
index 80dc2fd6576cf3f88afd695ad1f36ec1b4f52b41..22c454c7aaca6a6bdaa906d8d17a2a3394ba7dcb 100644 (file)
@@ -2038,8 +2038,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
                                 * delayed. Allowing the transfer to take place
                                 * avoids races and keeps things simple.
                                 */
-                               if ((err != -ETIMEDOUT) &&
-                                   (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
+                               if (err != -ETIMEDOUT) {
                                        state = STATE_SENDING_DATA;
                                        continue;
                                }
@@ -3487,6 +3486,10 @@ int dw_mci_runtime_resume(struct device *dev)
        /* Force setup bus to guarantee available clock output */
        dw_mci_setup_bus(host->slot, true);
 
+       /* Re-enable SDIO interrupts. */
+       if (sdio_irq_claimed(host->slot->mmc))
+               __dw_mci_enable_sdio_irq(host->slot, 1);
+
        /* Now that slots are all setup, we can enable card detect */
        dw_mci_enable_cd(host);
 
index 9841b447ccde0df5ba98d49782c793eb7b0fa1bf..f6c76be2be0d3fa0d5b93bdd1d79acee793f7a9d 100644 (file)
@@ -76,7 +76,7 @@
        #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK               GENMASK(7, 6)
        #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK               BIT(8)
        #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD               BIT(9)
-       #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK          GENMASK(10, 13)
+       #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK          GENMASK(13, 10)
        #define MESON_MX_SDIO_IRQC_SOFT_RESET                   BIT(15)
        #define MESON_MX_SDIO_IRQC_FORCE_HALT                   BIT(30)
        #define MESON_MX_SDIO_IRQC_HALT_HOLE                    BIT(31)
index 45baf5d9120e358f02007576d35120fd573bed6c..61f0faddfd8897d9684fcdbefa08279c2ca121d7 100644 (file)
@@ -636,6 +636,13 @@ int renesas_sdhi_probe(struct platform_device *pdev,
                host->ops.card_busy = renesas_sdhi_card_busy;
                host->ops.start_signal_voltage_switch =
                        renesas_sdhi_start_signal_voltage_switch;
+
+               /* SDR and HS200/400 registers requires HW reset */
+               if (of_data && of_data->scc_offset) {
+                       priv->scc_ctl = host->ctl + of_data->scc_offset;
+                       host->mmc->caps |= MMC_CAP_HW_RESET;
+                       host->hw_reset = renesas_sdhi_hw_reset;
+               }
        }
 
        /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
@@ -693,8 +700,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
                const struct renesas_sdhi_scc *taps = of_data->taps;
                bool hit = false;
 
-               host->mmc->caps |= MMC_CAP_HW_RESET;
-
                for (i = 0; i < of_data->taps_num; i++) {
                        if (taps[i].clk_rate == 0 ||
                            taps[i].clk_rate == host->mmc->f_max) {
@@ -707,12 +712,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
                if (!hit)
                        dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
 
-               priv->scc_ctl = host->ctl + of_data->scc_offset;
                host->init_tuning = renesas_sdhi_init_tuning;
                host->prepare_tuning = renesas_sdhi_prepare_tuning;
                host->select_tuning = renesas_sdhi_select_tuning;
                host->check_scc_error = renesas_sdhi_check_scc_error;
-               host->hw_reset = renesas_sdhi_hw_reset;
                host->prepare_hs400_tuning =
                        renesas_sdhi_prepare_hs400_tuning;
                host->hs400_downgrade = renesas_sdhi_disable_scc;
index 8594659cb59238f70998fce6b8598d5fe86b0ec4..ad0275191d910b4ed73afda896b94adf108c3889 100644 (file)
@@ -582,11 +582,14 @@ static int msm_init_cm_dll(struct sdhci_host *host)
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
        int wait_cnt = 50;
-       unsigned long flags;
+       unsigned long flags, xo_clk = 0;
        u32 config;
        const struct sdhci_msm_offset *msm_offset =
                                        msm_host->offset;
 
+       if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
+               xo_clk = clk_get_rate(msm_host->xo_clk);
+
        spin_lock_irqsave(&host->lock, flags);
 
        /*
@@ -634,10 +637,10 @@ static int msm_init_cm_dll(struct sdhci_host *host)
                config &= CORE_FLL_CYCLE_CNT;
                if (config)
                        mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
-                                       clk_get_rate(msm_host->xo_clk));
+                                       xo_clk);
                else
                        mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
-                                       clk_get_rate(msm_host->xo_clk));
+                                       xo_clk);
 
                config = readl_relaxed(host->ioaddr +
                                msm_offset->core_dll_config_2);
index 7fdac277e382f0de00b8b5d6f4347398a363cb0b..9c77bfe4334f3c67ab61fb1cac75d5adc4ef53bb 100644 (file)
@@ -788,7 +788,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
 
        ret = mmc_of_parse(host->mmc);
        if (ret) {
-               dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
                goto unreg_clk;
        }
 
index 682c573e20a727e118282b33eae9d982917e138f..e284102c16e97aa690e65198ab3390ecb1cc030c 100644 (file)
@@ -365,6 +365,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
        pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
        pm_runtime_use_autosuspend(&pdev->dev);
 
+       /* HS200 is broken at this moment */
+       host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
+
        ret = sdhci_add_host(host);
        if (ret)
                goto pm_runtime_disable;
index e5c598ae5f244164c71ee91187617c7a2ef6697e..6627523e728b9c0c20b8373d99b2eeb4c1e8e10f 100644 (file)
@@ -480,7 +480,12 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
                dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
 
        value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
-       value |= ESDHC_DMA_SNOOP;
+
+       if (of_dma_is_coherent(dev->of_node))
+               value |= ESDHC_DMA_SNOOP;
+       else
+               value &= ~ESDHC_DMA_SNOOP;
+
        sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
        return 0;
 }
index 56e7bc62c21898157d9732682babc0682f04c866..7367bf2a967d1c68084d8155bf1faa81dfee94c3 100644 (file)
@@ -383,7 +383,7 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
         * on temperature
         */
        if (temperature < -20000)
-               phase_delay = min(max_window + 4 * max_len - 24,
+               phase_delay = min((max_window + 4 * (max_len - 1)) - 24,
                                  max_window +
                                  DIV_ROUND_UP(13 * max_len, 16) * 4);
        else if (temperature < 20000)
index c4115bae5db187f1a331efd5c495b20bc31a5c86..71794391f48fac44138640391c05d63974ee511d 100644 (file)
@@ -1577,6 +1577,8 @@ static const struct pci_device_id pci_ids[] = {
        SDHCI_PCI_DEVICE(INTEL, CNPH_SD,   intel_byt_sd),
        SDHCI_PCI_DEVICE(INTEL, ICP_EMMC,  intel_glk_emmc),
        SDHCI_PCI_DEVICE(INTEL, ICP_SD,    intel_byt_sd),
+       SDHCI_PCI_DEVICE(INTEL, CML_EMMC,  intel_glk_emmc),
+       SDHCI_PCI_DEVICE(INTEL, CML_SD,    intel_byt_sd),
        SDHCI_PCI_DEVICE(O2, 8120,     o2),
        SDHCI_PCI_DEVICE(O2, 8220,     o2),
        SDHCI_PCI_DEVICE(O2, 8221,     o2),
index fa8d9da2ab7f6b18dd838ef8fc6d5504d46f7d53..e248d7945c062a561d035a5613c04e0a241721ac 100644 (file)
@@ -290,11 +290,21 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
 {
        struct sdhci_pci_chip *chip;
        struct sdhci_host *host;
-       u32 reg;
+       u32 reg, caps;
        int ret;
 
        chip = slot->chip;
        host = slot->host;
+
+       caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+
+       /*
+        * mmc_select_bus_width() will test the bus to determine the actual bus
+        * width.
+        */
+       if (caps & SDHCI_CAN_DO_8BIT)
+               host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
        switch (chip->pdev->device) {
        case PCI_DEVICE_ID_O2_SDS0:
        case PCI_DEVICE_ID_O2_SEABIRD0:
index 2ef0bdca919743baaf51ae78d4cce8e215c5e65b..6f04a62b2998ecd632c2e505ae8e1df6efb0d08c 100644 (file)
@@ -50,6 +50,8 @@
 #define PCI_DEVICE_ID_INTEL_CNPH_SD    0xa375
 #define PCI_DEVICE_ID_INTEL_ICP_EMMC   0x34c4
 #define PCI_DEVICE_ID_INTEL_ICP_SD     0x34f8
+#define PCI_DEVICE_ID_INTEL_CML_EMMC   0x02c4
+#define PCI_DEVICE_ID_INTEL_CML_SD     0x02f5
 
 #define PCI_DEVICE_ID_SYSKONNECT_8000  0x8000
 #define PCI_DEVICE_ID_VIA_95D0         0x95d0
index c749d3dc1d36dd448f6f4e3fa71eba76f62084ce..e99d5632d8fa397c244732b398990c6f8cd450f9 100644 (file)
@@ -1713,7 +1713,9 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
                ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
        else if (timing == MMC_TIMING_UHS_SDR12)
                ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
-       else if (timing == MMC_TIMING_UHS_SDR25)
+       else if (timing == MMC_TIMING_SD_HS ||
+                timing == MMC_TIMING_MMC_HS ||
+                timing == MMC_TIMING_UHS_SDR25)
                ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
        else if (timing == MMC_TIMING_UHS_SDR50)
                ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
@@ -2718,6 +2720,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
 static void sdhci_adma_show_error(struct sdhci_host *host)
 {
        void *desc = host->adma_table;
+       dma_addr_t dma = host->adma_addr;
 
        sdhci_dumpregs(host);
 
@@ -2725,18 +2728,21 @@ static void sdhci_adma_show_error(struct sdhci_host *host)
                struct sdhci_adma2_64_desc *dma_desc = desc;
 
                if (host->flags & SDHCI_USE_64_BIT_DMA)
-                       DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
-                           desc, le32_to_cpu(dma_desc->addr_hi),
+                       SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
+                           (unsigned long long)dma,
+                           le32_to_cpu(dma_desc->addr_hi),
                            le32_to_cpu(dma_desc->addr_lo),
                            le16_to_cpu(dma_desc->len),
                            le16_to_cpu(dma_desc->cmd));
                else
-                       DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
-                           desc, le32_to_cpu(dma_desc->addr_lo),
+                       SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+                           (unsigned long long)dma,
+                           le32_to_cpu(dma_desc->addr_lo),
                            le16_to_cpu(dma_desc->len),
                            le16_to_cpu(dma_desc->cmd));
 
                desc += host->desc_sz;
+               dma += host->desc_sz;
 
                if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
                        break;
@@ -2812,7 +2818,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
                        != MMC_BUS_TEST_R)
                host->data->error = -EILSEQ;
        else if (intmask & SDHCI_INT_ADMA_ERROR) {
-               pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
+               pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
+                      intmask);
                sdhci_adma_show_error(host);
                host->data->error = -EIO;
                if (host->ops->adma_workaround)
index 7ef2d37fcfc649309fe53b5196e1758b48cd1a45..9b46b97317fa4cf71203d54fa0208b28f03e826c 100644 (file)
@@ -47,6 +47,8 @@
 #define SEL100_MASK            BIT(SEL100_SHIFT)
 #define FREQSEL_SHIFT          8
 #define FREQSEL_MASK           GENMASK(10, 8)
+#define CLKBUFSEL_SHIFT                0
+#define CLKBUFSEL_MASK         GENMASK(2, 0)
 #define DLL_TRIM_ICP_SHIFT     4
 #define DLL_TRIM_ICP_MASK      GENMASK(7, 4)
 #define DR_TY_SHIFT            20
@@ -84,6 +86,7 @@ struct sdhci_am654_data {
        struct regmap *base;
        bool legacy_otapdly;
        int otap_del_sel[11];
+       int clkbuf_sel;
        int trm_icp;
        int drv_strength;
        bool dll_on;
@@ -231,7 +234,6 @@ void sdhci_j721e_4bit_set_clock(struct sdhci_host *host, unsigned int clock)
        struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
        unsigned char timing = host->mmc->ios.timing;
        u32 otap_del_sel;
-       u32 otap_del_ena;
        u32 mask, val;
 
        /* Setup DLL Output TAP delay */
@@ -240,12 +242,14 @@ void sdhci_j721e_4bit_set_clock(struct sdhci_host *host, unsigned int clock)
        else
                otap_del_sel = sdhci_am654->otap_del_sel[timing];
 
-       otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0;
        mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
-       val = (otap_del_ena << OTAPDLYENA_SHIFT) |
+       val = (0x1 << OTAPDLYENA_SHIFT) |
              (otap_del_sel << OTAPDLYSEL_SHIFT);
        regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
 
+       regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
+                          sdhci_am654->clkbuf_sel);
+
        sdhci_set_clock(host, clock);
 }
 
@@ -294,8 +298,7 @@ struct sdhci_ops sdhci_am654_ops = {
 
 static const struct sdhci_pltfm_data sdhci_am654_pdata = {
        .ops = &sdhci_am654_ops,
-       .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
@@ -331,8 +334,7 @@ struct sdhci_ops sdhci_j721e_8bit_ops = {
 
 static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
        .ops = &sdhci_j721e_8bit_ops,
-       .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
@@ -355,8 +357,7 @@ struct sdhci_ops sdhci_j721e_4bit_ops = {
 
 static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
        .ops = &sdhci_j721e_4bit_ops,
-       .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
@@ -560,6 +561,8 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
        }
 
        device_property_read_u32(dev, "ti,strobe-sel", &sdhci_am654->strb_sel);
+       device_property_read_u32(dev, "ti,clkbuf-sel",
+                                &sdhci_am654->clkbuf_sel);
 
        sdhci_get_of_property(pdev);
 
index 1d1215242055c4090da16f83d22c58cc21439da2..b009d44183996aeb9b0cde8f3886101f746be505 100644 (file)
@@ -1712,30 +1712,36 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
                        continue;
                }
 
-               if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
+               /*
+                * We check "time_after" and "!chip_good" before checking
+                * "chip_good" to avoid the failure due to scheduling.
+                */
+               if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) {
                        xip_enable(map, chip, adr);
                        printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
                        xip_disable(map, chip, adr);
+                       ret = -EIO;
                        break;
                }
 
-               if (chip_ready(map, adr))
+               if (chip_good(map, adr, datum))
                        break;
 
                /* Latency issues. Drop the lock, wait a while and retry */
                UDELAY(map, chip, adr, 1);
        }
+
        /* Did we succeed? */
-       if (!chip_good(map, adr, datum)) {
+       if (ret) {
                /* reset on all failures. */
                cfi_check_err_status(map, adr);
                map_write(map, CMD(0xF0), chip->start);
                /* FIXME - should have reset delay before continuing */
 
-               if (++retry_cnt <= MAX_RETRIES)
+               if (++retry_cnt <= MAX_RETRIES) {
+                       ret = 0;
                        goto retry;
-
-               ret = -EIO;
+               }
        }
        xip_enable(map, chip, adr);
  op_done:
index 57b5ed1699e386e51865ba77c61c6561ffc1775f..ab5a8778c4b249ec08b8176dbc9b770898de1b84 100644 (file)
@@ -509,7 +509,8 @@ static int mtk_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
 {
        struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
        const struct nand_sdr_timings *timings;
-       u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt;
+       u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0;
+       u32 thold;
 
        timings = nand_get_sdr_timings(conf);
        if (IS_ERR(timings))
@@ -545,11 +546,28 @@ static int mtk_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
        twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
        twh &= 0xf;
 
-       twst = timings->tWP_min / 1000;
+       /* Calculate real WE#/RE# hold time in nanosecond */
+       thold = (twh + 1) * 1000000 / rate;
+       /* nanosecond to picosecond */
+       thold *= 1000;
+
+       /*
+        * WE# low level time should be expaned to meet WE# pulse time
+        * and WE# cycle time at the same time.
+        */
+       if (thold < timings->tWC_min)
+               twst = timings->tWC_min - thold;
+       twst = max(timings->tWP_min, twst) / 1000;
        twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
        twst &= 0xf;
 
-       trlt = max(timings->tREA_max, timings->tRP_min) / 1000;
+       /*
+        * RE# low level time should be expaned to meet RE# pulse time,
+        * RE# access time and RE# cycle time at the same time.
+        */
+       if (thold < timings->tRC_min)
+               trlt = timings->tRC_min - thold;
+       trlt = max3(trlt, timings->tREA_max, timings->tRP_min) / 1000;
        trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
        trlt &= 0xf;
 
@@ -845,19 +863,21 @@ static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
        return mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
 }
 
-static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
+static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
+                                   u32 sectors)
 {
        struct nand_chip *chip = mtd_to_nand(mtd);
        struct mtk_nfc *nfc = nand_get_controller_data(chip);
        struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
        struct mtk_ecc_stats stats;
+       u32 reg_size = mtk_nand->fdm.reg_size;
        int rc, i;
 
        rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
        if (rc) {
                memset(buf, 0xff, sectors * chip->ecc.size);
                for (i = 0; i < sectors; i++)
-                       memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
+                       memset(oob_ptr(chip, start + i), 0xff, reg_size);
                return 0;
        }
 
@@ -877,7 +897,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
        u32 spare = mtk_nand->spare_per_sector;
        u32 column, sectors, start, end, reg;
        dma_addr_t addr;
-       int bitflips;
+       int bitflips = 0;
        size_t len;
        u8 *buf;
        int rc;
@@ -944,14 +964,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
        if (rc < 0) {
                dev_err(nfc->dev, "subpage done timeout\n");
                bitflips = -EIO;
-       } else {
-               bitflips = 0;
-               if (!raw) {
-                       rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
-                       bitflips = rc < 0 ? -ETIMEDOUT :
-                               mtk_nfc_update_ecc_stats(mtd, buf, sectors);
-                       mtk_nfc_read_fdm(chip, start, sectors);
-               }
+       } else if (!raw) {
+               rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+               bitflips = rc < 0 ? -ETIMEDOUT :
+                       mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
+               mtk_nfc_read_fdm(chip, start, sectors);
        }
 
        dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
index f5dc0a7a2456325a92142ce0f4b537a7dcbbe79d..fb401c25732c7abf61119a5830859d5992a4bc0f 100644 (file)
@@ -400,6 +400,14 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
            (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
                return MICRON_ON_DIE_UNSUPPORTED;
 
+       /*
+        * It seems that there are devices which do not support ECC officially.
+        * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports
+        * enabling the ECC feature but don't reflect that to the READ_ID table.
+        * So we have to guarantee that we disable the ECC feature directly
+        * after we did the READ_ID table command. Later we can evaluate the
+        * ECC_ENABLE support.
+        */
        ret = micron_nand_on_die_ecc_setup(chip, true);
        if (ret)
                return MICRON_ON_DIE_UNSUPPORTED;
@@ -408,13 +416,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
        if (ret)
                return MICRON_ON_DIE_UNSUPPORTED;
 
-       if (!(id[4] & MICRON_ID_ECC_ENABLED))
-               return MICRON_ON_DIE_UNSUPPORTED;
-
        ret = micron_nand_on_die_ecc_setup(chip, false);
        if (ret)
                return MICRON_ON_DIE_UNSUPPORTED;
 
+       if (!(id[4] & MICRON_ID_ECC_ENABLED))
+               return MICRON_ON_DIE_UNSUPPORTED;
+
        ret = nand_readid_op(chip, 0, id, sizeof(id));
        if (ret)
                return MICRON_ON_DIE_UNSUPPORTED;
index 8c7bf91ce4e1d3844e8247b60ecec208dee2e85f..48b3ab26b12492d8b2efbc41e1d8eb4a5eda72d6 100644 (file)
@@ -572,12 +572,12 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
                if (ret == -EBADMSG) {
                        ecc_failed = true;
                        mtd->ecc_stats.failed++;
-                       ret = 0;
                } else {
                        mtd->ecc_stats.corrected += ret;
                        max_bitflips = max_t(unsigned int, max_bitflips, ret);
                }
 
+               ret = 0;
                ops->retlen += iter.req.datalen;
                ops->oobretlen += iter.req.ooblen;
        }
index 8459115d9d4e51faf405335c9886908cee5c1b02..553776cc1d29ddceb9f4855d93e5d97366dd5a8f 100644 (file)
@@ -1063,31 +1063,34 @@ EXPORT_SYMBOL(arcnet_interrupt);
 static void arcnet_rx(struct net_device *dev, int bufnum)
 {
        struct arcnet_local *lp = netdev_priv(dev);
-       struct archdr pkt;
+       union {
+               struct archdr pkt;
+               char buf[512];
+       } rxdata;
        struct arc_rfc1201 *soft;
        int length, ofs;
 
-       soft = &pkt.soft.rfc1201;
+       soft = &rxdata.pkt.soft.rfc1201;
 
-       lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
-       if (pkt.hard.offset[0]) {
-               ofs = pkt.hard.offset[0];
+       lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE);
+       if (rxdata.pkt.hard.offset[0]) {
+               ofs = rxdata.pkt.hard.offset[0];
                length = 256 - ofs;
        } else {
-               ofs = pkt.hard.offset[1];
+               ofs = rxdata.pkt.hard.offset[1];
                length = 512 - ofs;
        }
 
        /* get the full header, if possible */
-       if (sizeof(pkt.soft) <= length) {
-               lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
+       if (sizeof(rxdata.pkt.soft) <= length) {
+               lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft));
        } else {
-               memset(&pkt.soft, 0, sizeof(pkt.soft));
+               memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft));
                lp->hw.copy_from_card(dev, bufnum, ofs, soft, length);
        }
 
        arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n",
-                  bufnum, pkt.hard.source, pkt.hard.dest, length);
+                  bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length);
 
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += length + ARC_HDR_SIZE;
@@ -1096,13 +1099,13 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
        if (arc_proto_map[soft->proto]->is_ip) {
                if (BUGLVL(D_PROTO)) {
                        struct ArcProto
-                       *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
+                       *oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]],
                        *newp = arc_proto_map[soft->proto];
 
                        if (oldp != newp) {
                                arc_printk(D_PROTO, dev,
                                           "got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n",
-                                          soft->proto, pkt.hard.source,
+                                          soft->proto, rxdata.pkt.hard.source,
                                           newp->suffix, oldp->suffix);
                        }
                }
@@ -1111,10 +1114,10 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
                lp->default_proto[0] = soft->proto;
 
                /* in striking contrast, the following isn't a hack. */
-               lp->default_proto[pkt.hard.source] = soft->proto;
+               lp->default_proto[rxdata.pkt.hard.source] = soft->proto;
        }
        /* call the protocol-specific receiver. */
-       arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length);
+       arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length);
 }
 
 static void null_rx(struct net_device *dev, int bufnum,
index 7e162fff01abbd3371e01dd8bff1d72af065bd83..0d2392c4b625a195c9727adab45c6bc933b5c794 100644 (file)
@@ -1102,6 +1102,8 @@ static void bond_compute_features(struct bonding *bond)
 done:
        bond_dev->vlan_features = vlan_features;
        bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+                                   NETIF_F_HW_VLAN_CTAG_TX |
+                                   NETIF_F_HW_VLAN_STAG_TX |
                                    NETIF_F_GSO_UDP_L4;
        bond_dev->gso_max_segs = gso_max_segs;
        netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -2188,6 +2190,15 @@ static void bond_miimon_commit(struct bonding *bond)
        bond_for_each_slave(bond, slave, iter) {
                switch (slave->new_link) {
                case BOND_LINK_NOCHANGE:
+                       /* For 802.3ad mode, check current slave speed and
+                        * duplex again in case its port was disabled after
+                        * invalid speed/duplex reporting but recovered before
+                        * link monitoring could make a decision on the actual
+                        * link status
+                        */
+                       if (BOND_MODE(bond) == BOND_MODE_8023AD &&
+                           slave->link == BOND_LINK_UP)
+                               bond_3ad_adapter_speed_duplex_changed(slave);
                        continue;
 
                case BOND_LINK_UP:
@@ -3852,8 +3863,8 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
                                        struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct iphdr *iph = ip_hdr(skb);
        struct slave *slave;
+       int slave_cnt;
        u32 slave_id;
 
        /* Start with the curr_active_slave that joined the bond as the
@@ -3862,23 +3873,32 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
         * send the join/membership reports.  The curr_active_slave found
         * will send all of this type of traffic.
         */
-       if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
-               slave = rcu_dereference(bond->curr_active_slave);
-               if (slave)
-                       bond_dev_queue_xmit(bond, skb, slave->dev);
-               else
-                       bond_xmit_slave_id(bond, skb, 0);
-       } else {
-               int slave_cnt = READ_ONCE(bond->slave_cnt);
+       if (skb->protocol == htons(ETH_P_IP)) {
+               int noff = skb_network_offset(skb);
+               struct iphdr *iph;
 
-               if (likely(slave_cnt)) {
-                       slave_id = bond_rr_gen_slave_id(bond);
-                       bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
-               } else {
-                       bond_tx_drop(bond_dev, skb);
+               if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
+                       goto non_igmp;
+
+               iph = ip_hdr(skb);
+               if (iph->protocol == IPPROTO_IGMP) {
+                       slave = rcu_dereference(bond->curr_active_slave);
+                       if (slave)
+                               bond_dev_queue_xmit(bond, skb, slave->dev);
+                       else
+                               bond_xmit_slave_id(bond, skb, 0);
+                       return NETDEV_TX_OK;
                }
        }
 
+non_igmp:
+       slave_cnt = READ_ONCE(bond->slave_cnt);
+       if (likely(slave_cnt)) {
+               slave_id = bond_rr_gen_slave_id(bond);
+               bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
+       } else {
+               bond_tx_drop(bond_dev, skb);
+       }
        return NETDEV_TX_OK;
 }
 
index 433a14b9f731bc5a71ac431b2eb71274c31247da..253a1bbe37e8bd48caf8b59ea8de8a6d3797830e 100644 (file)
@@ -1455,7 +1455,7 @@ static void __exit cfhsi_exit_module(void)
        rtnl_lock();
        list_for_each_safe(list_node, n, &cfhsi_list) {
                cfhsi = list_entry(list_node, struct cfhsi, list);
-               unregister_netdev(cfhsi->ndev);
+               unregister_netdevice(cfhsi->ndev);
        }
        rtnl_unlock();
 }
index c05e4d50d43d74a32ef38c656fb85bbb8b06e962..bd127ce3aba245e3bc100538da2aebdcf2b0287e 100644 (file)
@@ -1260,6 +1260,8 @@ int register_candev(struct net_device *dev)
                return -EINVAL;
 
        dev->rtnl_link_ops = &can_link_ops;
+       netif_carrier_off(dev);
+
        return register_netdev(dev);
 }
 EXPORT_SYMBOL_GPL(register_candev);
index 602c19e23f052ed50bd691408bc9644bf5049cb5..786d852a70d5844f4531637e07a9fb2e040b90cd 100644 (file)
@@ -1512,10 +1512,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
 
        /* All packets processed */
        if (num_pkts < quota) {
-               napi_complete_done(napi, num_pkts);
-               /* Enable Rx FIFO interrupts */
-               rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
-                                  RCANFD_RFCC_RFIE);
+               if (napi_complete_done(napi, num_pkts)) {
+                       /* Enable Rx FIFO interrupts */
+                       rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+                                          RCANFD_RFCC_RFIE);
+               }
        }
        return num_pkts;
 }
index b8c39ede7cd51445b6ed653585811066ab75d7d4..179bfcd541f2f552253848d20342ab7c23409f58 100644 (file)
@@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
                if (!netdev)
                        continue;
 
-               strncpy(name, netdev->name, IFNAMSIZ);
+               strlcpy(name, netdev->name, IFNAMSIZ);
 
                unregister_sja1000dev(netdev);
 
index da64e71a62ee238adb65a8933f8bd0ec61bd7976..de8d9dceb123619102107a7d5ca6e6f9feb60697 100644 (file)
@@ -626,7 +626,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
 static int mcp251x_hw_reset(struct spi_device *spi)
 {
        struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       u8 reg;
+       unsigned long timeout;
        int ret;
 
        /* Wait for oscillator startup timer after power up */
@@ -640,10 +640,19 @@ static int mcp251x_hw_reset(struct spi_device *spi)
        /* Wait for oscillator startup timer after reset */
        mdelay(MCP251X_OST_DELAY_MS);
 
-       reg = mcp251x_read_reg(spi, CANSTAT);
-       if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
-               return -ENODEV;
-
+       /* Wait for reset to finish */
+       timeout = jiffies + HZ;
+       while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
+              CANCTRL_REQOP_CONF) {
+               usleep_range(MCP251X_OST_DELAY_MS * 1000,
+                            MCP251X_OST_DELAY_MS * 1000 * 2);
+
+               if (time_after(jiffies, timeout)) {
+                       dev_err(&spi->dev,
+                               "MCP251x didn't enter in conf mode after reset\n");
+                       return -EBUSY;
+               }
+       }
        return 0;
 }
 
@@ -678,17 +687,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable)
                return regulator_disable(reg);
 }
 
-static void mcp251x_open_clean(struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-       struct spi_device *spi = priv->spi;
-
-       free_irq(spi->irq, priv);
-       mcp251x_hw_sleep(spi);
-       mcp251x_power_enable(priv->transceiver, 0);
-       close_candev(net);
-}
-
 static int mcp251x_stop(struct net_device *net)
 {
        struct mcp251x_priv *priv = netdev_priv(net);
@@ -954,37 +952,43 @@ static int mcp251x_open(struct net_device *net)
                                   flags | IRQF_ONESHOT, DEVICE_NAME, priv);
        if (ret) {
                dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
-               mcp251x_power_enable(priv->transceiver, 0);
-               close_candev(net);
-               goto open_unlock;
+               goto out_close;
        }
 
        priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
                                   0);
+       if (!priv->wq) {
+               ret = -ENOMEM;
+               goto out_clean;
+       }
        INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
        INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
 
        ret = mcp251x_hw_reset(spi);
-       if (ret) {
-               mcp251x_open_clean(net);
-               goto open_unlock;
-       }
+       if (ret)
+               goto out_free_wq;
        ret = mcp251x_setup(net, spi);
-       if (ret) {
-               mcp251x_open_clean(net);
-               goto open_unlock;
-       }
+       if (ret)
+               goto out_free_wq;
        ret = mcp251x_set_normal_mode(spi);
-       if (ret) {
-               mcp251x_open_clean(net);
-               goto open_unlock;
-       }
+       if (ret)
+               goto out_free_wq;
 
        can_led_event(net, CAN_LED_EVENT_OPEN);
 
        netif_wake_queue(net);
+       mutex_unlock(&priv->mcp_lock);
+
+       return 0;
 
-open_unlock:
+out_free_wq:
+       destroy_workqueue(priv->wq);
+out_clean:
+       free_irq(spi->irq, priv);
+       mcp251x_hw_sleep(spi);
+out_close:
+       mcp251x_power_enable(priv->transceiver, 0);
+       close_candev(net);
        mutex_unlock(&priv->mcp_lock);
        return ret;
 }
index 611f9d31be5d0370612fe8d4f9771b88dd9f3d37..43b0fa2b99322e33a0ea0fb81529a4c6844b0050 100644 (file)
@@ -576,16 +576,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
        dev->state &= ~PCAN_USB_STATE_STARTED;
        netif_stop_queue(netdev);
 
+       close_candev(netdev);
+
+       dev->can.state = CAN_STATE_STOPPED;
+
        /* unlink all pending urbs and free used memory */
        peak_usb_unlink_all_urbs(dev);
 
        if (dev->adapter->dev_stop)
                dev->adapter->dev_stop(dev);
 
-       close_candev(netdev);
-
-       dev->can.state = CAN_STATE_STOPPED;
-
        /* can set bus off now */
        if (dev->adapter->dev_set_bus) {
                int err = dev->adapter->dev_set_bus(dev, 0);
@@ -863,7 +863,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
 
                dev_prev_siblings = dev->prev_siblings;
                dev->state &= ~PCAN_USB_STATE_CONNECTED;
-               strncpy(name, netdev->name, IFNAMSIZ);
+               strlcpy(name, netdev->name, IFNAMSIZ);
 
                unregister_netdev(netdev);
 
index dd161c5eea8ec7e01aaa7d4c759fe212e5d9b79d..41988358f63c86cdc307fe541e510ef5bcfd1b1a 100644 (file)
@@ -849,7 +849,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
                        goto err_out;
 
                /* allocate command buffer once for all for the interface */
-               pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+               pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
                                                GFP_KERNEL);
                if (!pdev->cmd_buffer_addr)
                        goto err_out_1;
index d516def846abec6c661dc185da1d64b73ca22441..b304198f0b3af0d670677bb0526549c49394a037 100644 (file)
@@ -502,7 +502,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
        u8 *buffer;
        int err;
 
-       buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+       buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
        if (!buffer)
                return -ENOMEM;
 
index 411cfb806459a6c994d27f2d10bdb8bd17a52755..703e6bdaf0e1fe1ee1b4dd0dba860da1d53155b9 100644 (file)
@@ -4816,6 +4816,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
                err = PTR_ERR(chip->reset);
                goto out;
        }
+       if (chip->reset)
+               usleep_range(1000, 2000);
 
        err = mv88e6xxx_detect(chip);
        if (err)
index bdd8f2df66303fc8ba83fdf65c2eef76edaf91f1..33232cc9fb04d2a0dcf8713e9f161e971a7a0bd0 100644 (file)
@@ -543,7 +543,7 @@ qca8k_setup(struct dsa_switch *ds)
                    BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
 
        /* Setup connection between CPU port & user ports */
-       for (i = 0; i < DSA_MAX_PORTS; i++) {
+       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
                /* CPU port gets connected to all user ports of the switch */
                if (dsa_is_cpu_port(ds, i)) {
                        qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
@@ -897,7 +897,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
        if (id != QCA8K_ID_QCA8337)
                return -ENODEV;
 
-       priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+       priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
        if (!priv->ds)
                return -ENOMEM;
 
index 35b767baf21f7c94100be0c4a33efe86bcc3cce4..c281c488a306fb6aed2839b0b0442a2877ad7328 100644 (file)
@@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
                         const struct switchdev_obj_port_vlan *vlan)
 {
        struct realtek_smi *smi = ds->priv;
+       u16 vid;
        int ret;
 
-       if (!smi->ops->is_vlan_valid(smi, port))
-               return -EINVAL;
+       for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
+               if (!smi->ops->is_vlan_valid(smi, vid))
+                       return -EINVAL;
 
        dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
                 vlan->vid_begin, vlan->vid_end);
@@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
        u16 vid;
        int ret;
 
-       if (!smi->ops->is_vlan_valid(smi, port))
-               return;
+       for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
+               if (!smi->ops->is_vlan_valid(smi, vid))
+                       return;
 
        dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
                 port,
index a4d5049df6928281edaddb198457c064989e3657..f4b14b6acd22d9d1683911d72378fa7e5c9cf697 100644 (file)
@@ -507,7 +507,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
        irq = of_irq_get(intc, 0);
        if (irq <= 0) {
                dev_err(smi->dev, "failed to get parent IRQ\n");
-               return irq ? irq : -EINVAL;
+               ret = irq ? irq : -EINVAL;
+               goto out_put_node;
        }
 
        /* This clears the IRQ status register */
@@ -515,7 +516,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
                          &val);
        if (ret) {
                dev_err(smi->dev, "can't read interrupt status\n");
-               return ret;
+               goto out_put_node;
        }
 
        /* Fetch IRQ edge information from the descriptor */
@@ -537,7 +538,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
                                 val);
        if (ret) {
                dev_err(smi->dev, "could not configure IRQ polarity\n");
-               return ret;
+               goto out_put_node;
        }
 
        ret = devm_request_threaded_irq(smi->dev, irq, NULL,
@@ -545,7 +546,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
                                        "RTL8366RB", smi);
        if (ret) {
                dev_err(smi->dev, "unable to request irq: %d\n", ret);
-               return ret;
+               goto out_put_node;
        }
        smi->irqdomain = irq_domain_add_linear(intc,
                                               RTL8366RB_NUM_INTERRUPT,
@@ -553,12 +554,15 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
                                               smi);
        if (!smi->irqdomain) {
                dev_err(smi->dev, "failed to create IRQ domain\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_put_node;
        }
        for (i = 0; i < smi->num_ports; i++)
                irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
 
-       return 0;
+out_put_node:
+       of_node_put(intc);
+       return ret;
 }
 
 static int rtl8366rb_set_addr(struct realtek_smi *smi)
index b41f23679a087b2a49676ae362044c1b8085820a..7ce9c69e9c44f3d4288d04f710b96222ebf2fb77 100644 (file)
@@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void)
 
        ret = xgbe_platform_init();
        if (ret)
-               return ret;
+               goto err_platform_init;
 
        ret = xgbe_pci_init();
        if (ret)
-               return ret;
+               goto err_pci_init;
 
        return 0;
+
+err_pci_init:
+       xgbe_platform_exit();
+err_platform_init:
+       unregister_netdevice_notifier(&xgbe_netdev_notifier);
+       return ret;
 }
 
 static void __exit xgbe_mod_exit(void)
index d335c334fa561ed2ae1a8dad45fcd9af822ee0a7..82582fa54d5d25fdce4b7d9bac10a2a146d3ab6a 100644 (file)
@@ -89,6 +89,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
                        }
                }
 
+err_exit:
                if (!was_tx_cleaned)
                        work_done = budget;
 
@@ -98,7 +99,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
                                        1U << self->aq_ring_param.vec_idx);
                }
        }
-err_exit:
+
        return work_done;
 }
 
index 5a727d4729da7348075b75101154cca3cf515073..cf01e73d1bcc888e0815a63b294ec2f32df61611 100644 (file)
@@ -286,6 +286,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
        hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
        sw_cons = txdata->tx_pkt_cons;
 
+       /* Ensure subsequent loads occur after hw_cons */
+       smp_rmb();
+
        while (sw_cons != hw_cons) {
                u16 pkt_cons;
 
@@ -1933,8 +1936,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
        }
 
        /* select a non-FCoE queue */
-       return fallback(dev, skb, NULL) %
-              (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+       return fallback(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
 }
 
 void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -3056,12 +3058,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
        /* if VF indicate to PF this function is going down (PF will delete sp
         * elements and clear initializations
         */
-       if (IS_VF(bp))
+       if (IS_VF(bp)) {
+               bnx2x_clear_vlan_info(bp);
                bnx2x_vfpf_close_vf(bp);
-       else if (unload_mode != UNLOAD_RECOVERY)
+       } else if (unload_mode != UNLOAD_RECOVERY) {
                /* if this is a normal/close unload need to clean up chip*/
                bnx2x_chip_cleanup(bp, unload_mode, keep_link);
-       else {
+       else {
                /* Send the UNLOAD_REQUEST to the MCP */
                bnx2x_send_unload_req(bp, unload_mode);
 
@@ -3858,9 +3861,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
                if (!(bp->flags & TX_TIMESTAMPING_EN)) {
+                       bp->eth_stats.ptp_skip_tx_ts++;
                        BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
                } else if (bp->ptp_tx_skb) {
-                       BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+                       bp->eth_stats.ptp_skip_tx_ts++;
+                       netdev_err_once(bp->dev,
+                                       "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
                } else {
                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                        /* schedule check for Tx timestamp */
index 0e508e5defce315f2e5254ca238afe26b523054a..ee5159ef837e38e285b13653d70a5529dcf35310 100644 (file)
@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
 int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
 
+void bnx2x_clear_vlan_info(struct bnx2x *bp);
+
 /**
  * bnx2x_sp_event - handle ramrods completion.
  *
index c428b0655c26e924dfc8b1c2e2fe52e7017fc8e7..00f9ed93360c6707860c5aa3538d518f45224f59 100644 (file)
@@ -182,7 +182,9 @@ static const struct {
        { STATS_OFFSET32(driver_filtered_tx_pkt),
                                4, false, "driver_filtered_tx_pkt" },
        { STATS_OFFSET32(eee_tx_lpi),
-                               4, true, "Tx LPI entry count"}
+                               4, true, "Tx LPI entry count"},
+       { STATS_OFFSET32(ptp_skip_tx_ts),
+                               4, false, "ptp_skipped_tx_tstamp" },
 };
 
 #define BNX2X_NUM_STATS                ARRAY_SIZE(bnx2x_stats_arr)
index a585f1025a5802cb71c4ec8cb61bbaa1a513d074..68c62e32e882046c96f7a66d35c2a1ba78da6ede 100644 (file)
@@ -8488,11 +8488,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
        return rc;
 }
 
+void bnx2x_clear_vlan_info(struct bnx2x *bp)
+{
+       struct bnx2x_vlan_entry *vlan;
+
+       /* Mark that hw forgot all entries */
+       list_for_each_entry(vlan, &bp->vlan_reg, link)
+               vlan->hw = false;
+
+       bp->vlan_cnt = 0;
+}
+
 static int bnx2x_del_all_vlans(struct bnx2x *bp)
 {
        struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
        unsigned long ramrod_flags = 0, vlan_flags = 0;
-       struct bnx2x_vlan_entry *vlan;
        int rc;
 
        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8501,10 +8511,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
        if (rc)
                return rc;
 
-       /* Mark that hw forgot all entries */
-       list_for_each_entry(vlan, &bp->vlan_reg, link)
-               vlan->hw = false;
-       bp->vlan_cnt = 0;
+       bnx2x_clear_vlan_info(bp);
 
        return 0;
 }
@@ -15244,11 +15251,24 @@ static void bnx2x_ptp_task(struct work_struct *work)
        u32 val_seq;
        u64 timestamp, ns;
        struct skb_shared_hwtstamps shhwtstamps;
+       bool bail = true;
+       int i;
+
+       /* FW may take a while to complete timestamping; try a bit and if it's
+        * still not complete, may indicate an error state - bail out then.
+        */
+       for (i = 0; i < 10; i++) {
+               /* Read Tx timestamp registers */
+               val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+                                NIG_REG_P0_TLLH_PTP_BUF_SEQID);
+               if (val_seq & 0x10000) {
+                       bail = false;
+                       break;
+               }
+               msleep(1 << i);
+       }
 
-       /* Read Tx timestamp registers */
-       val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
-                        NIG_REG_P0_TLLH_PTP_BUF_SEQID);
-       if (val_seq & 0x10000) {
+       if (!bail) {
                /* There is a valid timestamp value */
                timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
                                   NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
@@ -15263,16 +15283,18 @@ static void bnx2x_ptp_task(struct work_struct *work)
                memset(&shhwtstamps, 0, sizeof(shhwtstamps));
                shhwtstamps.hwtstamp = ns_to_ktime(ns);
                skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
-               dev_kfree_skb_any(bp->ptp_tx_skb);
-               bp->ptp_tx_skb = NULL;
 
                DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
                   timestamp, ns);
        } else {
-               DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
-               /* Reschedule to keep checking for a valid timestamp value */
-               schedule_work(&bp->ptp_task);
+               DP(BNX2X_MSG_PTP,
+                  "Tx timestamp is not recorded (register read=%u)\n",
+                  val_seq);
+               bp->eth_stats.ptp_skip_tx_ts++;
        }
+
+       dev_kfree_skb_any(bp->ptp_tx_skb);
+       bp->ptp_tx_skb = NULL;
 }
 
 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
index b2644ed13d064eacc3b34cf59d48b156bedac16b..d55e63692cf3bf5ab4c11b8ddc9d800c520d5e78 100644 (file)
@@ -207,6 +207,9 @@ struct bnx2x_eth_stats {
        u32 driver_filtered_tx_pkt;
        /* src: Clear-on-Read register; Will not survive PMF Migration */
        u32 eee_tx_lpi;
+
+       /* PTP */
+       u32 ptp_skip_tx_ts;
 };
 
 struct bnx2x_eth_q_stats {
index 2d6f090bf6440cc7253fe4f0764b10bde618ff73..fd587bed32ebd1f1c222708ae9052a481c73cb4b 100644 (file)
@@ -3086,39 +3086,42 @@ static void bcmgenet_timeout(struct net_device *dev)
        netif_tx_wake_all_queues(dev);
 }
 
-#define MAX_MC_COUNT   16
+#define MAX_MDF_FILTER 17
 
 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
                                         unsigned char *addr,
-                                        int *i,
-                                        int *mc)
+                                        int *i)
 {
-       u32 reg;
-
        bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
                             UMAC_MDF_ADDR + (*i * 4));
        bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
                             addr[4] << 8 | addr[5],
                             UMAC_MDF_ADDR + ((*i + 1) * 4));
-       reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
-       reg |= (1 << (MAX_MC_COUNT - *mc));
-       bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
        *i += 2;
-       (*mc)++;
 }
 
 static void bcmgenet_set_rx_mode(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct netdev_hw_addr *ha;
-       int i, mc;
+       int i, nfilter;
        u32 reg;
 
        netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
 
-       /* Promiscuous mode */
+       /* Number of filters needed */
+       nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
+
+       /*
+        * Turn on promicuous mode for three scenarios
+        * 1. IFF_PROMISC flag is set
+        * 2. IFF_ALLMULTI flag is set
+        * 3. The number of filters needed exceeds the number filters
+        *    supported by the hardware.
+       */
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       if (dev->flags & IFF_PROMISC) {
+       if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
+           (nfilter > MAX_MDF_FILTER)) {
                reg |= CMD_PROMISC;
                bcmgenet_umac_writel(priv, reg, UMAC_CMD);
                bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
@@ -3128,32 +3131,24 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
                bcmgenet_umac_writel(priv, reg, UMAC_CMD);
        }
 
-       /* UniMac doesn't support ALLMULTI */
-       if (dev->flags & IFF_ALLMULTI) {
-               netdev_warn(dev, "ALLMULTI is not supported\n");
-               return;
-       }
-
        /* update MDF filter */
        i = 0;
-       mc = 0;
        /* Broadcast */
-       bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
+       bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
        /* my own address.*/
-       bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
-       /* Unicast list*/
-       if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
-               return;
+       bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
 
-       if (!netdev_uc_empty(dev))
-               netdev_for_each_uc_addr(ha, dev)
-                       bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
-       /* Multicast */
-       if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
-               return;
+       /* Unicast */
+       netdev_for_each_uc_addr(ha, dev)
+               bcmgenet_set_mdf_addr(priv, ha->addr, &i);
 
+       /* Multicast */
        netdev_for_each_mc_addr(ha, dev)
-               bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+               bcmgenet_set_mdf_addr(priv, ha->addr, &i);
+
+       /* Enable filters */
+       reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
+       bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
 }
 
 /* Set the hardware MAC address. */
index 14b49612aa8639816c1d4b58bdbf5d9ff1995248..4dabf37319c84301773dabaf3011accda2e15e54 100644 (file)
@@ -369,6 +369,7 @@ struct bcmgenet_mib_counters {
 #define  EXT_PWR_DOWN_PHY_EN           (1 << 20)
 
 #define EXT_RGMII_OOB_CTRL             0x0C
+#define  RGMII_MODE_EN_V123            (1 << 0)
 #define  RGMII_LINK                    (1 << 4)
 #define  OOB_DISABLE                   (1 << 5)
 #define  RGMII_MODE_EN                 (1 << 6)
index de0e24d912fe9873dbedac6d6694d0c7d937d232..0d527fa5de610c26586ccc33dae468e2e43d2289 100644 (file)
@@ -261,7 +261,11 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
         */
        if (priv->ext_phy) {
                reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
-               reg |= RGMII_MODE_EN | id_mode_dis;
+               reg |= id_mode_dis;
+               if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+                       reg |= RGMII_MODE_EN_V123;
+               else
+                       reg |= RGMII_MODE_EN;
                bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
        }
 
@@ -276,11 +280,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct device_node *dn = priv->pdev->dev.of_node;
        struct phy_device *phydev;
-       u32 phy_flags;
+       u32 phy_flags = 0;
        int ret;
 
        /* Communicate the integrated PHY revision */
-       phy_flags = priv->gphy_rev;
+       if (priv->internal_phy)
+               phy_flags = priv->gphy_rev;
 
        /* Initialize link state variables that bcmgenet_mii_setup() uses */
        priv->old_link = -1;
index 6aeb1045c302ad4cb1dd4efab54e414eb2bc14b9..1ab40c97403bad5f9e6612367167b840c52e0bb8 100644 (file)
@@ -10,7 +10,7 @@
 
 #include "cavium_ptp.h"
 
-#define DRV_NAME       "Cavium PTP Driver"
+#define DRV_NAME "cavium_ptp"
 
 #define PCI_DEVICE_ID_CAVIUM_PTP       0xA00C
 #define PCI_DEVICE_ID_CAVIUM_RST       0xA00E
index 8f746e1348d4cf11272b4fb316b8ac084a32f6f0..3deb3c07681fd6a2b1d76f9c5a578522f4136de9 100644 (file)
@@ -238,8 +238,10 @@ int octeon_setup_iq(struct octeon_device *oct,
        }
 
        oct->num_iqs++;
-       if (oct->fn_list.enable_io_queues(oct))
+       if (oct->fn_list.enable_io_queues(oct)) {
+               octeon_delete_instr_queue(oct, iq_no);
                return 1;
+       }
 
        return 0;
 }
index c34ea385fe4a5b40d9f2905780ed6224fccfb11c..6be6de0774b61f7cd90e7e607326c1e5ac8052c5 100644 (file)
@@ -3270,7 +3270,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!adapter->regs) {
                dev_err(&pdev->dev, "cannot map device registers\n");
                err = -ENOMEM;
-               goto out_free_adapter;
+               goto out_free_adapter_nofail;
        }
 
        adapter->pdev = pdev;
@@ -3398,6 +3398,9 @@ out_free_dev:
                if (adapter->port[i])
                        free_netdev(adapter->port[i]);
 
+out_free_adapter_nofail:
+       kfree_skb(adapter->nofail_skb);
+
 out_free_adapter:
        kfree(adapter);
 
index d97e0d7e541afde772cf669a98f58d9a5a6eb626..b766362031c32e4be277d13c8f6b0c3397eab97d 100644 (file)
@@ -1065,14 +1065,12 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
        }
 }
 
-static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
-                                   struct cudbg_buffer *dbg_buff,
-                                   struct cudbg_error *cudbg_err,
-                                   u8 mem_type)
+static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
+                                          struct cudbg_error *cudbg_err,
+                                          u8 mem_type)
 {
        struct adapter *padap = pdbg_init->adap;
        struct cudbg_meminfo mem_info;
-       unsigned long size;
        u8 mc_idx;
        int rc;
 
@@ -1086,7 +1084,16 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
        if (rc)
                return rc;
 
-       size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
+       return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
+}
+
+static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
+                                   struct cudbg_buffer *dbg_buff,
+                                   struct cudbg_error *cudbg_err,
+                                   u8 mem_type)
+{
+       unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type);
+
        return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
                                 cudbg_err);
 }
index 0f72f9c4ec74c6f42722d3e731071b535d1bab33..b429b726b987b23e0fe74a7a7f2724cd3e55033e 100644 (file)
@@ -3276,8 +3276,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
                return -ENOMEM;
 
        err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
-       if (err)
+       if (err) {
+               kvfree(t);
                return err;
+       }
 
        bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
        kvfree(t);
index f2aba5b160c2d55fd1e319d2c6c284e92a25d64c..d45c435a599d667c8595f1ed9fc1d1f62c65ec52 100644 (file)
@@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = {
 static struct ch_tc_flower_entry *allocate_flower_entry(void)
 {
        struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
-       spin_lock_init(&new->lock);
+       if (new)
+               spin_lock_init(&new->lock);
        return new;
 }
 
index 4bc211093c98e3564e628a7ccbdacbadbd1c040d..dba8a0c1eda3a5a235952cb878f381d60f4037fa 100644 (file)
@@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
 static int alloc_uld_rxqs(struct adapter *adap,
                          struct sge_uld_rxq_info *rxq_info, bool lro)
 {
-       struct sge *s = &adap->sge;
        unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
+       int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
        struct sge_ofld_rxq *q = rxq_info->uldrxq;
        unsigned short *ids = rxq_info->rspq_id;
-       unsigned int bmap_idx = 0;
+       struct sge *s = &adap->sge;
        unsigned int per_chan;
-       int i, err, msi_idx, que_idx = 0;
 
        per_chan = rxq_info->nrxq / adap->params.nports;
 
@@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
 
                if (msi_idx >= 0) {
                        bmap_idx = get_msix_idx_from_bmap(adap);
+                       if (bmap_idx < 0) {
+                               err = -ENOSPC;
+                               goto freeout;
+                       }
                        msi_idx = adap->msix_info_ulds[bmap_idx].idx;
                }
                err = t4_sge_alloc_rxq(adap, &q->rspq, false,
index bfb16a4744901736b6db90352a4fdaf0e41b2290..d1905d50c26cb8a58dcee88648f09e1cd709ebd3 100644 (file)
@@ -895,7 +895,7 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
                         u64 *data)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       int status;
+       int status, cnt;
        u8 link_status = 0;
 
        if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
@@ -906,6 +906,9 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
 
        memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
 
+       /* check link status before offline tests */
+       link_status = netif_carrier_ok(netdev);
+
        if (test->flags & ETH_TEST_FL_OFFLINE) {
                if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
                        test->flags |= ETH_TEST_FL_FAILED;
@@ -926,13 +929,26 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
                test->flags |= ETH_TEST_FL_FAILED;
        }
 
-       status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
-       if (status) {
-               test->flags |= ETH_TEST_FL_FAILED;
-               data[4] = -1;
-       } else if (!link_status) {
+       /* link status was down prior to test */
+       if (!link_status) {
                test->flags |= ETH_TEST_FL_FAILED;
                data[4] = 1;
+               return;
+       }
+
+       for (cnt = 10; cnt; cnt--) {
+               status = be_cmd_link_status_query(adapter, NULL, &link_status,
+                                                 0);
+               if (status) {
+                       test->flags |= ETH_TEST_FL_FAILED;
+                       data[4] = -1;
+                       break;
+               }
+
+               if (link_status)
+                       break;
+
+               msleep_interruptible(500);
        }
 }
 
index bff74752cef16f4fd55b1702660ca048a236162c..3fe6a28027fe13fcde75b5d7848b3a8246b686ae 100644 (file)
@@ -4700,8 +4700,12 @@ int be_update_queues(struct be_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        int status;
 
-       if (netif_running(netdev))
+       if (netif_running(netdev)) {
+               /* device cannot transmit now, avoid dev_watchdog timeouts */
+               netif_carrier_off(netdev);
+
                be_close(netdev);
+       }
 
        be_cancel_worker(adapter);
 
index bf715a3672736eda537aefc64ebdb00d0835a39f..4cf80de4c471c425905da873c79ba3b5635da378 100644 (file)
@@ -1689,10 +1689,10 @@ static void fec_get_mac(struct net_device *ndev)
         */
        if (!is_valid_ether_addr(iap)) {
                /* Report it and use a random ethernet address instead */
-               netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
+               dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
                eth_hw_addr_random(ndev);
-               netdev_info(ndev, "Using random MAC address: %pM\n",
-                           ndev->dev_addr);
+               dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
+                        ndev->dev_addr);
                return;
        }
 
index 6127697ede120707fdc92f8d50cc9355487664f3..a91d49dd92ea6c32a308ea61d0e1a02785985aa1 100644 (file)
@@ -157,6 +157,7 @@ struct hip04_priv {
        unsigned int reg_inten;
 
        struct napi_struct napi;
+       struct device *dev;
        struct net_device *ndev;
 
        struct tx_desc *tx_desc;
@@ -185,7 +186,7 @@ struct hip04_priv {
 
 static inline unsigned int tx_count(unsigned int head, unsigned int tail)
 {
-       return (head - tail) % (TX_DESC_NUM - 1);
+       return (head - tail) % TX_DESC_NUM;
 }
 
 static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
@@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
                }
 
                if (priv->tx_phys[tx_tail]) {
-                       dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
+                       dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
                                         priv->tx_skb[tx_tail]->len,
                                         DMA_TO_DEVICE);
                        priv->tx_phys[tx_tail] = 0;
@@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                return NETDEV_TX_BUSY;
        }
 
-       phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
-       if (dma_mapping_error(&ndev->dev, phys)) {
+       phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+       if (dma_mapping_error(priv->dev, phys)) {
                dev_kfree_skb(skb);
                return NETDEV_TX_OK;
        }
@@ -497,6 +498,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
        u16 len;
        u32 err;
 
+       /* clean up tx descriptors */
+       tx_remaining = hip04_tx_reclaim(ndev, false);
+
        while (cnt && !last) {
                buf = priv->rx_buf[priv->rx_head];
                skb = build_skb(buf, priv->rx_buf_size);
@@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
                        goto refill;
                }
 
-               dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
+               dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
                                 RX_BUF_SIZE, DMA_FROM_DEVICE);
                priv->rx_phys[priv->rx_head] = 0;
 
@@ -534,9 +538,9 @@ refill:
                buf = netdev_alloc_frag(priv->rx_buf_size);
                if (!buf)
                        goto done;
-               phys = dma_map_single(&ndev->dev, buf,
+               phys = dma_map_single(priv->dev, buf,
                                      RX_BUF_SIZE, DMA_FROM_DEVICE);
-               if (dma_mapping_error(&ndev->dev, phys))
+               if (dma_mapping_error(priv->dev, phys))
                        goto done;
                priv->rx_buf[priv->rx_head] = buf;
                priv->rx_phys[priv->rx_head] = phys;
@@ -557,8 +561,7 @@ refill:
        }
        napi_complete_done(napi, rx);
 done:
-       /* clean up tx descriptors and start a new timer if necessary */
-       tx_remaining = hip04_tx_reclaim(ndev, false);
+       /* start a new timer if necessary */
        if (rx < budget && tx_remaining)
                hip04_start_tx_timer(priv);
 
@@ -640,9 +643,9 @@ static int hip04_mac_open(struct net_device *ndev)
        for (i = 0; i < RX_DESC_NUM; i++) {
                dma_addr_t phys;
 
-               phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
+               phys = dma_map_single(priv->dev, priv->rx_buf[i],
                                      RX_BUF_SIZE, DMA_FROM_DEVICE);
-               if (dma_mapping_error(&ndev->dev, phys))
+               if (dma_mapping_error(priv->dev, phys))
                        return -EIO;
 
                priv->rx_phys[i] = phys;
@@ -676,7 +679,7 @@ static int hip04_mac_stop(struct net_device *ndev)
 
        for (i = 0; i < RX_DESC_NUM; i++) {
                if (priv->rx_phys[i]) {
-                       dma_unmap_single(&ndev->dev, priv->rx_phys[i],
+                       dma_unmap_single(priv->dev, priv->rx_phys[i],
                                         RX_BUF_SIZE, DMA_FROM_DEVICE);
                        priv->rx_phys[i] = 0;
                }
@@ -820,6 +823,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        priv = netdev_priv(ndev);
+       priv->dev = d;
        priv->ndev = ndev;
        platform_set_drvdata(pdev, ndev);
        SET_NETDEV_DEV(ndev, &pdev->dev);
index fff5be8078ac388102456f3e505a5a0d46b630bf..0594a6c3dccda161a9ecc3ac38e55eb456e16f49 100644 (file)
@@ -229,6 +229,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
 
                ae_algo->ops->uninit_ae_dev(ae_dev);
                hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+               ae_dev->ops = NULL;
        }
 
        list_del(&ae_algo->node);
@@ -316,6 +317,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
 
                ae_algo->ops->uninit_ae_dev(ae_dev);
                hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+               ae_dev->ops = NULL;
        }
 
        list_del(&ae_dev->node);
index 9684ad015c429dfdde7a82e614d390945eb80f60..6a3c6b02a77cd5ddb33b5de31dc58cd2442bf114 100644 (file)
@@ -245,11 +245,13 @@ static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
 
                skb_get(skb);
                tx_ret = hns3_nic_net_xmit(skb, ndev);
-               if (tx_ret == NETDEV_TX_OK)
+               if (tx_ret == NETDEV_TX_OK) {
                        good_cnt++;
-               else
+               } else {
+                       kfree_skb(skb);
                        netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n",
                                   tx_ret);
+               }
        }
        if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
                ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR;
index 4648c6a9d9e8654191060af4c88a7ac5549f71a2..89ca69fa2b97b2ea28647192db6747049dab18f2 100644 (file)
@@ -663,8 +663,7 @@ static u8 *hclge_comm_get_strings(u32 stringset,
                return buff;
 
        for (i = 0; i < size; i++) {
-               snprintf(buff, ETH_GSTRING_LEN,
-                        strs[i].desc);
+               snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
                buff = buff + ETH_GSTRING_LEN;
        }
 
index 48235dc2dd56f03ce482f10b17a5021fb73ca7fc..11e9259ca0407b255672e85e87f25eee554a9103 100644 (file)
@@ -54,7 +54,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
        u32 tick;
 
        /* Calc tick */
-       if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
+       if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
+           ir > HCLGE_ETHER_MAX_RATE)
                return -EINVAL;
 
        tick = tick_array[shaper_level];
@@ -1057,6 +1058,9 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
        int ret;
        u8 i;
 
+       if (vport->vport_id >= HNAE3_MAX_TC)
+               return -EINVAL;
+
        ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
        if (ret)
                return ret;
index baf5cc251f3299499f3fc03ee17513aa740110f6..9a3bc0994a1dbd3da534c3bbc7138e128d6a558e 100644 (file)
@@ -156,11 +156,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
 {
        u32 time_cnt;
        u32 reg_value;
+       int ret;
 
        regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
 
        for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
-               regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+               ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+               if (ret)
+                       return ret;
+
                reg_value &= st_msk;
                if ((!!check_st) == (!!reg_value))
                        break;
index b69c622ba8b2d027fff91468a15c88349edae3d7..6f0e4019adefa27b2cd50f80271b0b70f79b72df 100644 (file)
@@ -96,6 +96,8 @@
 
 #define OPT_SWAP_PORT  0x0001  /* Need to wordswp on the MPU port */
 
+#define LIB82596_DMA_ATTR      DMA_ATTR_NON_CONSISTENT
+
 #define DMA_WBACK(ndev, addr, len) \
        do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
 
@@ -199,7 +201,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
 
        unregister_netdev (dev);
        dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
-                      lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+                      lp->dma_addr, LIB82596_DMA_ATTR);
        free_netdev (dev);
        return 0;
 }
index 2f7ae118217fe881a53614fb436ed119bf56919f..d0e8193ca4708a6dbb4a06cdb3b847760fc65622 100644 (file)
@@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev)
 
        dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
                              &lp->dma_addr, GFP_KERNEL,
-                             DMA_ATTR_NON_CONSISTENT);
+                             LIB82596_DMA_ATTR);
        if (!dma) {
                printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
                return -ENOMEM;
@@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev)
        i = register_netdev(dev);
        if (i) {
                dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
-                              dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+                              dma, lp->dma_addr, LIB82596_DMA_ATTR);
                return i;
        }
 
index b2c04a789744fc770b798d79b96604910077dfd3..43c1fd18670b00373e099adcab94062a0ea93fdb 100644 (file)
@@ -23,6 +23,8 @@
 
 static const char sni_82596_string[] = "snirm_82596";
 
+#define LIB82596_DMA_ATTR      0
+
 #define DMA_WBACK(priv, addr, len)     do { } while (0)
 #define DMA_INV(priv, addr, len)       do { } while (0)
 #define DMA_WBACK_INV(priv, addr, len) do { } while (0)
@@ -151,7 +153,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
 
        unregister_netdev(dev);
        dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
-                      lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+                      lp->dma_addr, LIB82596_DMA_ATTR);
        iounmap(lp->ca);
        iounmap(lp->mpu_port);
        free_netdev (dev);
index f70cb4d3c684669b408d09aecf552388b8cb7d15..40ad1e5032553b9a833b0a7697f7cc9316fee382 100644 (file)
@@ -1618,7 +1618,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        struct net_device *netdev;
        struct ibmveth_adapter *adapter;
        unsigned char *mac_addr_p;
-       unsigned int *mcastFilterSize_p;
+       __be32 *mcastFilterSize_p;
        long ret;
        unsigned long ret_attr;
 
@@ -1640,8 +1640,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
                return -EINVAL;
        }
 
-       mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
-                                               VETH_MCAST_FILTER_SIZE, NULL);
+       mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
+                                                       VETH_MCAST_FILTER_SIZE,
+                                                       NULL);
        if (!mcastFilterSize_p) {
                dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
                        "attribute\n");
@@ -1658,7 +1659,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
 
        adapter->vdev = dev;
        adapter->netdev = netdev;
-       adapter->mcastFilterSize = *mcastFilterSize_p;
+       adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
        adapter->pool_config = 0;
 
        netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
index 0ae43d27cdcff5d90c060893df72bd8d255dff39..8fa14736449bcf34a9b44803c7467fba1ff7fc0e 100644 (file)
@@ -1586,6 +1586,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
                lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
                                               (u64)tx_buff->indir_dma,
                                               (u64)num_entries);
+               dma_unmap_single(dev, tx_buff->indir_dma,
+                                sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
        } else {
                tx_buff->num_entries = num_entries;
                lpar_rc = send_subcrq(adapter, handle_array[queue_num],
@@ -1996,6 +1998,13 @@ static void __ibmvnic_reset(struct work_struct *work)
 
        rwi = get_next_rwi(adapter);
        while (rwi) {
+               if (adapter->state == VNIC_REMOVING ||
+                   adapter->state == VNIC_REMOVED) {
+                       kfree(rwi);
+                       rc = EBUSY;
+                       break;
+               }
+
                if (adapter->force_reset_recovery) {
                        adapter->force_reset_recovery = false;
                        rc = do_hard_reset(adapter, rwi, reset_state);
@@ -2722,12 +2731,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
 
        if (adapter->resetting &&
            adapter->reset_reason == VNIC_RESET_MOBILITY) {
-               u64 val = (0xff000000) | scrq->hw_irq;
+               struct irq_desc *desc = irq_to_desc(scrq->irq);
+               struct irq_chip *chip = irq_desc_get_chip(desc);
 
-               rc = plpar_hcall_norets(H_EOI, val);
-               if (rc)
-                       dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
-                               val, rc);
+               chip->irq_eoi(&desc->irq_data);
        }
 
        rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
@@ -2747,7 +2754,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
        union sub_crq *next;
        int index;
        int i, j;
-       u8 *first;
 
 restart_loop:
        while (pending_scrq(adapter, scrq)) {
@@ -2777,14 +2783,6 @@ restart_loop:
 
                                txbuff->data_dma[j] = 0;
                        }
-                       /* if sub_crq was sent indirectly */
-                       first = &txbuff->indir_arr[0].generic.first;
-                       if (*first == IBMVNIC_CRQ_CMD) {
-                               dma_unmap_single(dev, txbuff->indir_dma,
-                                                sizeof(txbuff->indir_arr),
-                                                DMA_TO_DEVICE);
-                               *first = 0;
-                       }
 
                        if (txbuff->last_frag) {
                                dev_kfree_skb_any(txbuff->skb);
index cdae0efde8e6415a89afeaec45de49acab763164..7998a73b6a0fa175bab2e986a98116af752b4ab1 100644 (file)
@@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                        else
                                phy_reg |= 0xFA;
                        e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
+
+                       if (speed == SPEED_1000) {
+                               hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
+                                                           &phy_reg);
+
+                               phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
+
+                               hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
+                                                            phy_reg);
+                       }
                }
                hw->phy.ops.release(hw);
 
index eb09c755fa172314ffa8500b110d0c481d14e680..1502895eb45ddd996c9f75aefc4ac29ea61e16a5 100644 (file)
 
 /* PHY Power Management Control */
 #define HV_PM_CTRL             PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_CLK_REQ          0x200
 #define HV_PM_CTRL_K1_ENABLE           0x4000
 
 #define I217_PLL_CLOCK_GATE_REG        PHY_REG(772, 28)
index 8cd339c92c1afbe153652459b3f286af84f1fd1a..a7b5a47ab83d5b1ba2eb34d0f328bebd2e809d1d 100644 (file)
@@ -4208,7 +4208,7 @@ void e1000e_up(struct e1000_adapter *adapter)
                e1000_configure_msix(adapter);
        e1000_irq_enable(adapter);
 
-       netif_start_queue(adapter->netdev);
+       /* Tx queue started by watchdog timer when link is up */
 
        e1000e_trigger_lsc(adapter);
 }
@@ -4584,6 +4584,7 @@ int e1000e_open(struct net_device *netdev)
        pm_runtime_get_sync(&pdev->dev);
 
        netif_carrier_off(netdev);
+       netif_stop_queue(netdev);
 
        /* allocate transmit descriptors */
        err = e1000e_setup_tx_resources(adapter->tx_ring);
@@ -4644,7 +4645,6 @@ int e1000e_open(struct net_device *netdev)
        e1000_irq_enable(adapter);
 
        adapter->tx_hang_recheck = false;
-       netif_start_queue(netdev);
 
        hw->mac.get_link_status = true;
        pm_runtime_put(&pdev->dev);
@@ -5266,6 +5266,7 @@ static void e1000_watchdog_task(struct work_struct *work)
                        if (phy->ops.cfg_on_link_up)
                                phy->ops.cfg_on_link_up(hw);
 
+                       netif_wake_queue(netdev);
                        netif_carrier_on(netdev);
 
                        if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -5279,6 +5280,7 @@ static void e1000_watchdog_task(struct work_struct *work)
                        /* Link status message must follow this format */
                        pr_info("%s NIC Link is Down\n", adapter->netdev->name);
                        netif_carrier_off(netdev);
+                       netif_stop_queue(netdev);
                        if (!test_bit(__E1000_DOWN, &adapter->state))
                                mod_timer(&adapter->phy_info_timer,
                                          round_jiffies(jiffies + 2 * HZ));
@@ -5286,13 +5288,8 @@ static void e1000_watchdog_task(struct work_struct *work)
                        /* 8000ES2LAN requires a Rx packet buffer work-around
                         * on link down event; reset the controller to flush
                         * the Rx packet buffer.
-                        *
-                        * If the link is lost the controller stops DMA, but
-                        * if there is queued Tx work it cannot be done.  So
-                        * reset the controller to flush the Tx packet buffers.
                         */
-                       if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
-                           e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
+                       if (adapter->flags & FLAG_RX_NEEDS_RESTART)
                                adapter->flags |= FLAG_RESTART_NOW;
                        else
                                pm_schedule_suspend(netdev->dev.parent,
@@ -5315,6 +5312,14 @@ link_up:
        adapter->gotc_old = adapter->stats.gotc;
        spin_unlock(&adapter->stats64_lock);
 
+       /* If the link is lost the controller stops DMA, but
+        * if there is queued Tx work it cannot be done.  So
+        * reset the controller to flush the Tx packet buffers.
+        */
+       if (!netif_carrier_ok(netdev) &&
+           (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+               adapter->flags |= FLAG_RESTART_NOW;
+
        /* If reset is necessary, do it outside of interrupt context. */
        if (adapter->flags & FLAG_RESTART_NOW) {
                schedule_work(&adapter->reset_task);
index 4e04985fb4307a9cfaeca1e29e5e6ec591642200..055562c930fb0813a88e3e6fad7b6e66f3a63ae6 100644 (file)
@@ -2566,6 +2566,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
                return;
        if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
                return;
+       if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
+               set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+               return;
+       }
 
        for (v = 0; v < pf->num_alloc_vsi; v++) {
                if (pf->vsi[v] &&
@@ -2580,6 +2584,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
                        }
                }
        }
+       clear_bit(__I40E_VF_DISABLE, pf->state);
 }
 
 /**
index a9730711e2579da0ed1621f57d99597868ca2c83..b56d22b530a7079ff740c56a490e0e79a2c27822 100644 (file)
@@ -1291,7 +1291,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
                                          struct i40e_rx_buffer *rx_buffer,
                                          unsigned int size)
 {
-       void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+       void *va;
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
@@ -1301,6 +1301,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
        struct sk_buff *skb;
 
        /* prefetch first cache line of first page */
+       va = page_address(rx_buffer->page) + rx_buffer->page_offset;
        prefetch(va);
 #if L1_CACHE_BYTES < 128
        prefetch(va + L1_CACHE_BYTES);
@@ -1355,7 +1356,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
                                      struct i40e_rx_buffer *rx_buffer,
                                      unsigned int size)
 {
-       void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+       void *va;
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
@@ -1365,6 +1366,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
        struct sk_buff *skb;
 
        /* prefetch first cache line of first page */
+       va = page_address(rx_buffer->page) + rx_buffer->page_offset;
        prefetch(va);
 #if L1_CACHE_BYTES < 128
        prefetch(va + L1_CACHE_BYTES);
index 5aa083d9a6c9ac0a604da886913811476b58f52b..ab76a5f77cd0e82f85665fcc7092bc7fde3e21cd 100644 (file)
@@ -5703,6 +5703,7 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
         */
        if (tx_ring->launchtime_enable) {
                ts = ns_to_timespec64(first->skb->tstamp);
+               first->skb->tstamp = 0;
                context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
        } else {
                context_desc->seqnum_seed = 0;
index e5a8461fe6a99bfbf8ab20b85e38c0f0c24e0bb5..8829bd95d0d36557acf0e669f77f6680e45485c5 100644 (file)
@@ -3223,7 +3223,8 @@ static int ixgbe_get_module_info(struct net_device *dev,
                page_swap = true;
        }
 
-       if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+       if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
+           !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
                /* We have a SFP, but it does not support SFF-8472 */
                modinfo->type = ETH_MODULE_SFF_8079;
                modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
index 410d5d3aa393f1e496b483c8fef8ea8e8fb5eb83..85280765d793de5239bd88807ef52e468852b0aa 100644 (file)
@@ -34,6 +34,7 @@
 #include <net/tc_act/tc_mirred.h>
 #include <net/vxlan.h>
 #include <net/mpls.h>
+#include <net/xfrm.h>
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
@@ -2625,7 +2626,7 @@ adjust_by_size:
                /* 16K ints/sec to 9.2K ints/sec */
                avg_wire_size *= 15;
                avg_wire_size += 11452;
-       } else if (avg_wire_size <= 1980) {
+       } else if (avg_wire_size < 1968) {
                /* 9.2K ints/sec to 8K ints/sec */
                avg_wire_size *= 5;
                avg_wire_size += 22420;
@@ -2658,6 +2659,8 @@ adjust_by_size:
        case IXGBE_LINK_SPEED_2_5GB_FULL:
        case IXGBE_LINK_SPEED_1GB_FULL:
        case IXGBE_LINK_SPEED_10_FULL:
+               if (avg_wire_size > 8064)
+                       avg_wire_size = 8064;
                itr += DIV_ROUND_UP(avg_wire_size,
                                    IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
                       IXGBE_ITR_ADAPTIVE_MIN_INC;
@@ -8599,7 +8602,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
 #endif /* IXGBE_FCOE */
 
 #ifdef CONFIG_XFRM_OFFLOAD
-       if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
+       if (xfrm_offload(skb) &&
+           !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
                goto out_drop;
 #endif
        tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
index 64e44e01c973fc4c047f04432c4694ac06271a25..c56baad04ee615067649c521aef12bedb99c150c 100644 (file)
@@ -45,6 +45,7 @@
 #define IXGBE_SFF_SOFT_RS_SELECT_10G           0x8
 #define IXGBE_SFF_SOFT_RS_SELECT_1G            0x0
 #define IXGBE_SFF_ADDRESSING_MODE              0x4
+#define IXGBE_SFF_DDM_IMPLEMENTED              0x40
 #define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE         0x1
 #define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE                0x8
 #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
index c5dac6bd2be4d31b988df0f572deedff15ce18fc..ee7857298361ded94b87e7e715c87610a2a05924 100644 (file)
@@ -64,7 +64,7 @@
 
 struct orion_mdio_dev {
        void __iomem *regs;
-       struct clk *clk[3];
+       struct clk *clk[4];
        /*
         * If we have access to the error interrupt pin (which is
         * somewhat misnamed as it not only reflects internal errors
@@ -321,6 +321,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
 
        for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
                dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
+               if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
+                       ret = -EPROBE_DEFER;
+                       goto out_clk;
+               }
                if (IS_ERR(dev->clk[i]))
                        break;
                clk_prepare_enable(dev->clk[i]);
@@ -362,6 +366,7 @@ out_mdio:
        if (dev->err_interrupt > 0)
                writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
 
+out_clk:
        for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
                if (IS_ERR(dev->clk[i]))
                        break;
index 2bd4fd091424ddd84fadc175a54377bec68659fa..4958f51bb4f89c8af7d8631fadbb24079d243dd4 100644 (file)
@@ -3486,6 +3486,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
 {
        struct mvpp2_port *port = netdev_priv(dev);
+       bool running = netif_running(dev);
        int err;
 
        if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
@@ -3494,40 +3495,24 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
                mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
        }
 
-       if (!netif_running(dev)) {
-               err = mvpp2_bm_update_mtu(dev, mtu);
-               if (!err) {
-                       port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
-                       return 0;
-               }
-
-               /* Reconfigure BM to the original MTU */
-               err = mvpp2_bm_update_mtu(dev, dev->mtu);
-               if (err)
-                       goto log_error;
-       }
-
-       mvpp2_stop_dev(port);
+       if (running)
+               mvpp2_stop_dev(port);
 
        err = mvpp2_bm_update_mtu(dev, mtu);
-       if (!err) {
+       if (err) {
+               netdev_err(dev, "failed to change MTU\n");
+               /* Reconfigure BM to the original MTU */
+               mvpp2_bm_update_mtu(dev, dev->mtu);
+       } else {
                port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
-               goto out_start;
        }
 
-       /* Reconfigure BM to the original MTU */
-       err = mvpp2_bm_update_mtu(dev, dev->mtu);
-       if (err)
-               goto log_error;
-
-out_start:
-       mvpp2_start_dev(port);
-       mvpp2_egress_enable(port);
-       mvpp2_ingress_enable(port);
+       if (running) {
+               mvpp2_start_dev(port);
+               mvpp2_egress_enable(port);
+               mvpp2_ingress_enable(port);
+       }
 
-       return 0;
-log_error:
-       netdev_err(dev, "failed to change MTU\n");
        return err;
 }
 
@@ -4412,9 +4397,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
        if (state->pause & MLO_PAUSE_RX)
                ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
 
-       ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
-       ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
-                MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
+       ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
+                  MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
+       ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
 
        writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
        writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
@@ -5343,9 +5328,6 @@ static int mvpp2_remove(struct platform_device *pdev)
 
        mvpp2_dbgfs_cleanup(priv);
 
-       flush_workqueue(priv->stats_queue);
-       destroy_workqueue(priv->stats_queue);
-
        fwnode_for_each_available_child_node(fwnode, port_fwnode) {
                if (priv->port_list[i]) {
                        mutex_destroy(&priv->port_list[i]->gather_stats_lock);
@@ -5354,6 +5336,8 @@ static int mvpp2_remove(struct platform_device *pdev)
                i++;
        }
 
+       destroy_workqueue(priv->stats_queue);
+
        for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
                struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
 
index ae2240074d8ef8c3ab763af481acb8936db7ff25..5692c6087bbb0781ef473ea5dfe8f6148c4ae4f7 100644 (file)
@@ -312,7 +312,8 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
        }
 
        /* Set value */
-       pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
+       pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
+               shift & MVPP2_PRS_SRAM_SHIFT_MASK;
 
        /* Reset and set operation */
        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
index 15dea48e019531fe01a97115775da5e1c45dd234..d6f8a41c3e35cd3c2e555b4cabd2e1f90a15973a 100644 (file)
@@ -3122,7 +3122,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
        skb_put(skb, len);
 
        if (dev->features & NETIF_F_RXCSUM) {
-               skb->csum = csum;
+               skb->csum = le16_to_cpu(csum);
                skb->ip_summed = CHECKSUM_COMPLETE;
        }
 
index 1485f66cf7b0ca5001f4559e03131a9b4430e0f9..d013f30019b69f61617157304974649de6359ae1 100644 (file)
@@ -4947,6 +4947,20 @@ static const struct dmi_system_id msi_blacklist[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
                },
        },
+       {
+               .ident = "ASUS P6T",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "P6T"),
+               },
+       },
+       {
+               .ident = "ASUS P6X",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "P6X"),
+               },
+       },
        {}
 };
 
index f5cd9539980f80384d89b21931ef8739a296f1b5..45d9a5f8fa1bcd11f89900c20c5e53f9e654a9ef 100644 (file)
@@ -1190,7 +1190,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
        err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
        if (err) {
                en_err(priv, "Failed to allocate RSS indirection QP\n");
-               goto rss_err;
+               goto qp_alloc_err;
        }
 
        rss_map->indir_qp->event = mlx4_en_sqp_event;
@@ -1244,6 +1244,7 @@ indir_err:
                       MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
        mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
        mlx4_qp_free(mdev->dev, rss_map->indir_qp);
+qp_alloc_err:
        kfree(rss_map->indir_qp);
        rss_map->indir_qp = NULL;
 rss_err:
index 1c225be9c7db996f58c9033c54279fd84bbba2b0..3692d6a1cce8d6acebda01fa84bf0ce8d5a1fcb4 100644 (file)
@@ -307,7 +307,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
        struct mlx5_interface *intf;
 
        mutex_lock(&mlx5_intf_mutex);
-       list_for_each_entry(intf, &intf_list, list)
+       list_for_each_entry_reverse(intf, &intf_list, list)
                mlx5_remove_device(intf, priv);
        list_del(&priv->dev_list);
        mutex_unlock(&mlx5_intf_mutex);
index da52e60d4437c406bc5b1bdb3cec7000b86bea9d..d79e177f89901485afb77f255abdd37b19e954a2 100644 (file)
@@ -210,6 +210,7 @@ static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
        "tx_cqe_moder",
        "rx_cqe_compress",
        "rx_striding_rq",
+       "rx_no_csum_complete",
 };
 
 enum mlx5e_priv_flag {
@@ -217,6 +218,7 @@ enum mlx5e_priv_flag {
        MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
        MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
        MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
+       MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
 };
 
 #define MLX5E_SET_PFLAG(params, pflag, enable)                 \
@@ -298,6 +300,7 @@ struct mlx5e_dcbx_dp {
 enum {
        MLX5E_RQ_STATE_ENABLED,
        MLX5E_RQ_STATE_AM,
+       MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
 };
 
 struct mlx5e_cq {
index 45cdde694d20049af85391ffcc376438fadb921e..a4be04debe671e142bafd9b34a0dd50ab4dcd8e4 100644 (file)
@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
        return &arfs_t->rules_hash[bucket_idx];
 }
 
-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
-{
-       return (skb->protocol == htons(ETH_P_IP)) ?
-               ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
-}
-
 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
                                         u8 ip_proto, __be16 etype)
 {
@@ -599,31 +593,9 @@ out:
        arfs_may_expire_flow(priv);
 }
 
-/* return L4 destination port from ip4/6 packets */
-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
-{
-       char *transport_header;
-
-       transport_header = skb_transport_header(skb);
-       if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
-               return ((struct tcphdr *)transport_header)->dest;
-       return ((struct udphdr *)transport_header)->dest;
-}
-
-/* return L4 source port from ip4/6 packets */
-static __be16 arfs_get_src_port(const struct sk_buff *skb)
-{
-       char *transport_header;
-
-       transport_header = skb_transport_header(skb);
-       if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
-               return ((struct tcphdr *)transport_header)->source;
-       return ((struct udphdr *)transport_header)->source;
-}
-
 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
                                         struct arfs_table *arfs_t,
-                                        const struct sk_buff *skb,
+                                        const struct flow_keys *fk,
                                         u16 rxq, u32 flow_id)
 {
        struct arfs_rule *rule;
@@ -638,19 +610,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
        INIT_WORK(&rule->arfs_work, arfs_handle_work);
 
        tuple = &rule->tuple;
-       tuple->etype = skb->protocol;
+       tuple->etype = fk->basic.n_proto;
+       tuple->ip_proto = fk->basic.ip_proto;
        if (tuple->etype == htons(ETH_P_IP)) {
-               tuple->src_ipv4 = ip_hdr(skb)->saddr;
-               tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+               tuple->src_ipv4 = fk->addrs.v4addrs.src;
+               tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
        } else {
-               memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+               memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
                       sizeof(struct in6_addr));
-               memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+               memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
                       sizeof(struct in6_addr));
        }
-       tuple->ip_proto = arfs_get_ip_proto(skb);
-       tuple->src_port = arfs_get_src_port(skb);
-       tuple->dst_port = arfs_get_dst_port(skb);
+       tuple->src_port = fk->ports.src;
+       tuple->dst_port = fk->ports.dst;
 
        rule->flow_id = flow_id;
        rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
@@ -661,37 +633,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
        return rule;
 }
 
-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
-                        const struct sk_buff *skb)
+static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
 {
-       if (tuple->etype == htons(ETH_P_IP) &&
-           tuple->src_ipv4 == ip_hdr(skb)->saddr &&
-           tuple->dst_ipv4 == ip_hdr(skb)->daddr)
-               return true;
-       if (tuple->etype == htons(ETH_P_IPV6) &&
-           (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
-                    sizeof(struct in6_addr))) &&
-           (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
-                    sizeof(struct in6_addr))))
-               return true;
+       if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
+               return false;
+       if (tuple->etype != fk->basic.n_proto)
+               return false;
+       if (tuple->etype == htons(ETH_P_IP))
+               return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
+                      tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
+       if (tuple->etype == htons(ETH_P_IPV6))
+               return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
+                              sizeof(struct in6_addr)) &&
+                      !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
+                              sizeof(struct in6_addr));
        return false;
 }
 
 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
-                                       const struct sk_buff *skb)
+                                       const struct flow_keys *fk)
 {
        struct arfs_rule *arfs_rule;
        struct hlist_head *head;
-       __be16 src_port = arfs_get_src_port(skb);
-       __be16 dst_port = arfs_get_dst_port(skb);
 
-       head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+       head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
        hlist_for_each_entry(arfs_rule, head, hlist) {
-               if (arfs_rule->tuple.src_port == src_port &&
-                   arfs_rule->tuple.dst_port == dst_port &&
-                   arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+               if (arfs_cmp(&arfs_rule->tuple, fk))
                        return arfs_rule;
-               }
        }
 
        return NULL;
@@ -704,20 +672,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
        struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
        struct arfs_table *arfs_t;
        struct arfs_rule *arfs_rule;
+       struct flow_keys fk;
+
+       if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+               return -EPROTONOSUPPORT;
 
-       if (skb->protocol != htons(ETH_P_IP) &&
-           skb->protocol != htons(ETH_P_IPV6))
+       if (fk.basic.n_proto != htons(ETH_P_IP) &&
+           fk.basic.n_proto != htons(ETH_P_IPV6))
                return -EPROTONOSUPPORT;
 
        if (skb->encapsulation)
                return -EPROTONOSUPPORT;
 
-       arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+       arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
        if (!arfs_t)
                return -EPROTONOSUPPORT;
 
        spin_lock_bh(&arfs->arfs_lock);
-       arfs_rule = arfs_find_rule(arfs_t, skb);
+       arfs_rule = arfs_find_rule(arfs_t, &fk);
        if (arfs_rule) {
                if (arfs_rule->rxq == rxq_index) {
                        spin_unlock_bh(&arfs->arfs_lock);
@@ -725,8 +697,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
                }
                arfs_rule->rxq = rxq_index;
        } else {
-               arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
-                                           rxq_index, flow_id);
+               arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
                if (!arfs_rule) {
                        spin_unlock_bh(&arfs->arfs_lock);
                        return -ENOMEM;
index 792bb8bc0cd34bda53ab35bdcfd4acd8391a2a18..10d72c83714db78efae06948bc74d30dd4a95f4c 100644 (file)
@@ -1083,6 +1083,9 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
        struct mlx5_core_dev *mdev = priv->mdev;
        int err;
 
+       if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+               return -EOPNOTSUPP;
+
        if (pauseparam->autoneg)
                return -EINVAL;
 
@@ -1507,6 +1510,28 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
        return 0;
 }
 
+static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5e_channels *channels = &priv->channels;
+       struct mlx5e_channel *c;
+       int i;
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
+           priv->channels.params.xdp_prog)
+               return 0;
+
+       for (i = 0; i < channels->num; i++) {
+               c = channels->c[i];
+               if (enable)
+                       __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+               else
+                       __clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+       }
+
+       return 0;
+}
+
 static int mlx5e_handle_pflag(struct net_device *netdev,
                              u32 wanted_flags,
                              enum mlx5e_priv_flag flag,
@@ -1558,6 +1583,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
        err = mlx5e_handle_pflag(netdev, pflags,
                                 MLX5E_PFLAG_RX_STRIDING_RQ,
                                 set_pflag_rx_striding_rq);
+       if (err)
+               goto out;
+
+       err = mlx5e_handle_pflag(netdev, pflags,
+                                MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
+                                set_pflag_rx_no_csum_complete);
 
 out:
        mutex_unlock(&priv->state_lock);
index 0f1c296c3ce4ec97cbef95e9cce0b29ffffa148a..7e6706333fa8d27250e12fc5f276bd7603e4ab0e 100644 (file)
@@ -420,12 +420,11 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
 
 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
 {
-       struct mlx5e_wqe_frag_info next_frag, *prev;
+       struct mlx5e_wqe_frag_info next_frag = {};
+       struct mlx5e_wqe_frag_info *prev = NULL;
        int i;
 
        next_frag.di = &rq->wqe.di[0];
-       next_frag.offset = 0;
-       prev = NULL;
 
        for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
                struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
@@ -935,6 +934,13 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (params->rx_dim_enabled)
                __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
+       /* We disable csum_complete when XDP is enabled since
+        * XDP programs might manipulate packets which will render
+        * skb->checksum incorrect.
+        */
+       if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
+               __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+
        return 0;
 
 err_destroy_rq:
@@ -4534,6 +4540,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
                params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
 
        MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
+       MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
 
        /* RQ */
        /* Prefer Striding RQ, unless any of the following holds:
index d3f794d4fb96ebe332479a053e272ba2347179b8..df49dc143c47133c68ef5682e945d282576f6bdc 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/busy_poll.h>
 #include <net/ip6_checksum.h>
 #include <net/page_pool.h>
+#include <net/inet_ecn.h>
 #include "en.h"
 #include "en_tc.h"
 #include "eswitch.h"
@@ -688,27 +689,110 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
        skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
 }
 
-static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
+static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
+                                       __be16 *proto)
 {
-       __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+       *proto = ((struct ethhdr *)skb->data)->h_proto;
+       *proto = __vlan_get_protocol(skb, *proto, network_depth);
 
-       ethertype = __vlan_get_protocol(skb, ethertype, network_depth);
-       return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+       if (*proto == htons(ETH_P_IP))
+               return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
+
+       if (*proto == htons(ETH_P_IPV6))
+               return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
+
+       return false;
 }
 
-static u32 mlx5e_get_fcs(const struct sk_buff *skb)
+static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
 {
-       const void *fcs_bytes;
-       u32 _fcs_bytes;
+       int network_depth = 0;
+       __be16 proto;
+       void *ip;
+       int rc;
 
-       fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
-                                      ETH_FCS_LEN, &_fcs_bytes);
+       if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
+               return;
+
+       ip = skb->data + network_depth;
+       rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
+                                        IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
+
+       rq->stats->ecn_mark += !!rc;
+}
+
+static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
+{
+       void *ip_p = skb->data + network_depth;
 
-       return __get_unaligned_cpu32(fcs_bytes);
+       return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
+                                           ((struct ipv6hdr *)ip_p)->nexthdr;
 }
 
 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
 
+#define MAX_PADDING 8
+
+static void
+tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
+                      struct mlx5e_rq_stats *stats)
+{
+       stats->csum_complete_tail_slow++;
+       skb->csum = csum_block_add(skb->csum,
+                                  skb_checksum(skb, offset, len, 0),
+                                  offset);
+}
+
+static void
+tail_padding_csum(struct sk_buff *skb, int offset,
+                 struct mlx5e_rq_stats *stats)
+{
+       u8 tail_padding[MAX_PADDING];
+       int len = skb->len - offset;
+       void *tail;
+
+       if (unlikely(len > MAX_PADDING)) {
+               tail_padding_csum_slow(skb, offset, len, stats);
+               return;
+       }
+
+       tail = skb_header_pointer(skb, offset, len, tail_padding);
+       if (unlikely(!tail)) {
+               tail_padding_csum_slow(skb, offset, len, stats);
+               return;
+       }
+
+       stats->csum_complete_tail++;
+       skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
+}
+
+static void
+mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
+                      struct mlx5e_rq_stats *stats)
+{
+       struct ipv6hdr *ip6;
+       struct iphdr   *ip4;
+       int pkt_len;
+
+       switch (proto) {
+       case htons(ETH_P_IP):
+               ip4 = (struct iphdr *)(skb->data + network_depth);
+               pkt_len = network_depth + ntohs(ip4->tot_len);
+               break;
+       case htons(ETH_P_IPV6):
+               ip6 = (struct ipv6hdr *)(skb->data + network_depth);
+               pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
+               break;
+       default:
+               return;
+       }
+
+       if (likely(pkt_len >= skb->len))
+               return;
+
+       tail_padding_csum(skb, pkt_len, stats);
+}
+
 static inline void mlx5e_handle_csum(struct net_device *netdev,
                                     struct mlx5_cqe64 *cqe,
                                     struct mlx5e_rq *rq,
@@ -717,6 +801,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 {
        struct mlx5e_rq_stats *stats = rq->stats;
        int network_depth = 0;
+       __be16 proto;
 
        if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
                goto csum_none;
@@ -727,6 +812,10 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                return;
        }
 
+       /* True when explicitly set via priv flag, or XDP prog is loaded */
+       if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
+               goto csum_unnecessary;
+
        /* CQE csum doesn't cover padding octets in short ethernet
         * frames. And the pad field is appended prior to calculating
         * and appending the FCS field.
@@ -738,7 +827,10 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
        if (short_frame(skb->len))
                goto csum_unnecessary;
 
-       if (likely(is_last_ethertype_ip(skb, &network_depth))) {
+       if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
+               if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
+                       goto csum_unnecessary;
+
                skb->ip_summed = CHECKSUM_COMPLETE;
                skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
                if (network_depth > ETH_HLEN)
@@ -749,10 +841,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                        skb->csum = csum_partial(skb->data + ETH_HLEN,
                                                 network_depth - ETH_HLEN,
                                                 skb->csum);
-               if (unlikely(netdev->features & NETIF_F_RXFCS))
-                       skb->csum = csum_block_add(skb->csum,
-                                                  (__force __wsum)mlx5e_get_fcs(skb),
-                                                  skb->len - ETH_FCS_LEN);
+
+               mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
                stats->csum_complete++;
                return;
        }
@@ -775,6 +865,8 @@ csum_none:
        stats->csum_none++;
 }
 
+#define MLX5E_CE_BIT_MASK 0x80
+
 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
                                      u32 cqe_bcnt,
                                      struct mlx5e_rq *rq,
@@ -819,6 +911,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
        skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
 
        mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
+       /* checking CE bit in cqe - MSB in ml_path field */
+       if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
+               mlx5e_enable_ecn(rq, skb);
+
        skb->protocol = eth_type_trans(skb, netdev);
 }
 
index 7047cc293545c4b07be25a6d0739459815f10711..8255d797ea9434829f82f93bbf44ce122e76c6e7 100644 (file)
@@ -53,10 +53,13 @@ static const struct counter_desc sw_stats_desc[] = {
 
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
@@ -144,9 +147,12 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                s->rx_bytes     += rq_stats->bytes;
                s->rx_lro_packets += rq_stats->lro_packets;
                s->rx_lro_bytes += rq_stats->lro_bytes;
+               s->rx_ecn_mark  += rq_stats->ecn_mark;
                s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
                s->rx_csum_none += rq_stats->csum_none;
                s->rx_csum_complete += rq_stats->csum_complete;
+               s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
+               s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
                s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
                s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
                s->rx_xdp_drop     += rq_stats->xdp_drop;
@@ -1137,6 +1143,8 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
@@ -1144,6 +1152,7 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
index 0ad7a165443a25f1fc76317dee05fdcedcab8251..3ea8033ed6bddac0b473c2c24e11db576547b189 100644 (file)
@@ -66,10 +66,13 @@ struct mlx5e_sw_stats {
        u64 tx_nop;
        u64 rx_lro_packets;
        u64 rx_lro_bytes;
+       u64 rx_ecn_mark;
        u64 rx_removed_vlan_packets;
        u64 rx_csum_unnecessary;
        u64 rx_csum_none;
        u64 rx_csum_complete;
+       u64 rx_csum_complete_tail;
+       u64 rx_csum_complete_tail_slow;
        u64 rx_csum_unnecessary_inner;
        u64 rx_xdp_drop;
        u64 rx_xdp_redirect;
@@ -179,11 +182,14 @@ struct mlx5e_rq_stats {
        u64 packets;
        u64 bytes;
        u64 csum_complete;
+       u64 csum_complete_tail;
+       u64 csum_complete_tail_slow;
        u64 csum_unnecessary;
        u64 csum_unnecessary_inner;
        u64 csum_none;
        u64 lro_packets;
        u64 lro_bytes;
+       u64 ecn_mark;
        u64 removed_vlan_packets;
        u64 xdp_drop;
        u64 xdp_redirect;
index 9f7f8425f6767c94dde33332624123f317cc5959..c8928ce69185f287e3c8a6134229235a5fb013a7 100644 (file)
@@ -992,13 +992,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
 {
        struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
-       u64 bytes, packets, lastuse = 0;
        struct mlx5e_tc_flow *flow;
        struct mlx5e_encap_entry *e;
        struct mlx5_fc *counter;
        struct neigh_table *tbl;
        bool neigh_used = false;
        struct neighbour *n;
+       u64 lastuse;
 
        if (m_neigh->family == AF_INET)
                tbl = &arp_tbl;
@@ -1015,7 +1015,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
                list_for_each_entry(flow, &e->flows, encap) {
                        if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
                                counter = mlx5_flow_rule_counter(flow->rule[0]);
-                               mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+                               lastuse = mlx5_fc_query_lastuse(counter);
                                if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
                                        neigh_used = true;
                                        break;
index 58af6be13dfa88a2e08d5d85dd54b3fdaeb4e4f6..808ddd732e04473b218dea25c5def432038896d0 100644 (file)
@@ -321,6 +321,11 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
 }
 EXPORT_SYMBOL(mlx5_fc_query);
 
+u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
+{
+       return counter->cache.lastuse;
+}
+
 void mlx5_fc_query_cached(struct mlx5_fc *counter,
                          u64 *bytes, u64 *packets, u64 *lastuse)
 {
index 5b7fe826414473e51a35560bed7faa7ca048b5f9..db6aafcced0dd4410555e3d6c19fa70a3aff3d49 100644 (file)
@@ -662,7 +662,9 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
 
        profile->init(mdev, netdev, profile, ipriv);
 
-       mlx5e_attach_netdev(epriv);
+       err = mlx5e_attach_netdev(epriv);
+       if (err)
+               goto detach;
        netif_carrier_off(netdev);
 
        /* set rdma_netdev func pointers */
@@ -678,6 +680,11 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
 
        return netdev;
 
+detach:
+       profile->cleanup(epriv);
+       if (ipriv->sub_interface)
+               return NULL;
+       mlx5e_destroy_mdev_resources(mdev);
 destroy_ht:
        mlx5i_pkey_qpn_ht_cleanup(netdev);
 destroy_wq:
index 0e820cf92f8ad1644afe9a23c047ee29bc8e54e4..231ed508c240a45a191f3887dd2e7da975f2f04c 100644 (file)
@@ -1642,6 +1642,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF},   /* ConnectX-6 VF */
        { PCI_VDEVICE(MELLANOX, 0xa2d2) },                      /* BlueField integrated ConnectX-5 network controller */
        { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF},   /* BlueField integrated ConnectX-5 network controller VF */
+       { PCI_VDEVICE(MELLANOX, 0xa2d6) },                      /* BlueField-2 integrated ConnectX-6 Dx network controller */
        { 0, }
 };
 
index 0cab06046e5dc0344391be9d41d0296252021a5e..ee126bcf7c350936e519e3fbd5322d47ac076e03 100644 (file)
@@ -5032,7 +5032,7 @@ static int __init mlxsw_sp_module_init(void)
        return 0;
 
 err_sp2_pci_driver_register:
-       mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+       mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
 err_sp1_pci_driver_register:
        mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
 err_sp2_core_driver_register:
index b25048c6c7618e4b2e9fa210bcdfe7516bcb41d2..21296fa7f7fbf52060d51740e9c2247085c4e8d0 100644 (file)
@@ -408,14 +408,6 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
        have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
                                                        &prio_map);
 
-       if (!have_dscp) {
-               err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
-                                       MLXSW_REG_QPTS_TRUST_STATE_PCP);
-               if (err)
-                       netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
-               return err;
-       }
-
        mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
                                            &dscp_map);
        err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
@@ -432,6 +424,14 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
                return err;
        }
 
+       if (!have_dscp) {
+               err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+                                       MLXSW_REG_QPTS_TRUST_STATE_PCP);
+               if (err)
+                       netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
+               return err;
+       }
+
        err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
                                             MLXSW_REG_QPTS_TRUST_STATE_DSCP);
        if (err) {
index 10291198decd61238024769c9e037beabe95db5f..732ba21d3369dcfec7307755889bb562e6e8f809 100644 (file)
@@ -1767,6 +1767,7 @@ EXPORT_SYMBOL(ocelot_init);
 
 void ocelot_deinit(struct ocelot *ocelot)
 {
+       cancel_delayed_work(&ocelot->stats_work);
        destroy_workqueue(ocelot->stats_queue);
        mutex_destroy(&ocelot->stats_lock);
 }
index b2d2ec8c11e2d15e0562ca89c2f76d58bc4e69c3..6789eed78ff70a29e96a2179f066bd028b066711 100644 (file)
@@ -3922,7 +3922,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         * setup (if available). */
        status = myri10ge_request_irq(mgp);
        if (status != 0)
-               goto abort_with_firmware;
+               goto abort_with_slices;
        myri10ge_free_irq(mgp);
 
        /* Save configuration space to be restored if the
index e57d23746585f7abe1d7d52e0045fde2b2839852..c19e88efe958df2fce544a7995e5f9690d78e958 100644 (file)
@@ -259,6 +259,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
                repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
                if (!repr_priv) {
                        err = -ENOMEM;
+                       nfp_repr_free(repr);
                        goto err_reprs_clean;
                }
 
@@ -271,6 +272,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
                port = nfp_port_alloc(app, port_type, repr);
                if (IS_ERR(port)) {
                        err = PTR_ERR(port);
+                       kfree(repr_priv);
                        nfp_repr_free(repr);
                        goto err_reprs_clean;
                }
@@ -291,6 +293,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
                err = nfp_repr_init(app, repr,
                                    port_id, port, priv->nn->dp.netdev);
                if (err) {
+                       kfree(repr_priv);
                        nfp_port_free(port);
                        nfp_repr_free(repr);
                        goto err_reprs_clean;
@@ -373,6 +376,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
                repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
                if (!repr_priv) {
                        err = -ENOMEM;
+                       nfp_repr_free(repr);
                        goto err_reprs_clean;
                }
 
@@ -382,11 +386,13 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
                port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
                if (IS_ERR(port)) {
                        err = PTR_ERR(port);
+                       kfree(repr_priv);
                        nfp_repr_free(repr);
                        goto err_reprs_clean;
                }
                err = nfp_port_init_phy_port(app->pf, app, port, i);
                if (err) {
+                       kfree(repr_priv);
                        nfp_port_free(port);
                        nfp_repr_free(repr);
                        goto err_reprs_clean;
@@ -399,6 +405,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
                err = nfp_repr_init(app, repr,
                                    cmsg_port_id, port, priv->nn->dp.netdev);
                if (err) {
+                       kfree(repr_priv);
                        nfp_port_free(port);
                        nfp_repr_free(repr);
                        goto err_reprs_clean;
index 08381ef8bdb481df81eb448b2f8ceda61c13ba8e..41d30f55c946be682dd6d790f5ab5ca9f6c88cea 100644 (file)
@@ -1371,13 +1371,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
        pldat->dma_buff_base_p = dma_handle;
 
        netdev_dbg(ndev, "IO address space     :%pR\n", res);
-       netdev_dbg(ndev, "IO address size      :%d\n", resource_size(res));
+       netdev_dbg(ndev, "IO address size      :%zd\n",
+                       (size_t)resource_size(res));
        netdev_dbg(ndev, "IO address (mapped)  :0x%p\n",
                        pldat->net_base);
        netdev_dbg(ndev, "IRQ number           :%d\n", ndev->irq);
-       netdev_dbg(ndev, "DMA buffer size      :%d\n", pldat->dma_buff_size);
-       netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
-                       pldat->dma_buff_base_p);
+       netdev_dbg(ndev, "DMA buffer size      :%zd\n", pldat->dma_buff_size);
+       netdev_dbg(ndev, "DMA buffer P address :%pad\n",
+                       &pldat->dma_buff_base_p);
        netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
                        pldat->dma_buff_base_v);
 
@@ -1424,8 +1425,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
        if (ret)
                goto err_out_unregister_netdev;
 
-       netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
-              res->start, ndev->irq);
+       netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n",
+              (unsigned long)res->start, ndev->irq);
 
        phydev = ndev->phydev;
 
index 4dd82a1612aa800a17c665d41d1593a56902722c..a6a9688db307f50750ef5103a3fa612f48b2e94f 100644 (file)
@@ -3096,6 +3096,7 @@ static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
                                 void __iomem *p_regview,
                                 void __iomem *p_doorbells,
+                                u64 db_phys_addr,
                                 enum qed_pci_personality personality)
 {
        int rc = 0;
@@ -3103,6 +3104,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
        /* Split PCI bars evenly between hwfns */
        p_hwfn->regview = p_regview;
        p_hwfn->doorbells = p_doorbells;
+       p_hwfn->db_phys_addr = db_phys_addr;
 
        if (IS_VF(p_hwfn->cdev))
                return qed_vf_hw_prepare(p_hwfn);
@@ -3198,7 +3200,9 @@ int qed_hw_prepare(struct qed_dev *cdev,
        /* Initialize the first hwfn - will learn number of hwfns */
        rc = qed_hw_prepare_single(p_hwfn,
                                   cdev->regview,
-                                  cdev->doorbells, personality);
+                                  cdev->doorbells,
+                                  cdev->db_phys_addr,
+                                  personality);
        if (rc)
                return rc;
 
@@ -3207,22 +3211,25 @@ int qed_hw_prepare(struct qed_dev *cdev,
        /* Initialize the rest of the hwfns */
        if (cdev->num_hwfns > 1) {
                void __iomem *p_regview, *p_doorbell;
-               u8 __iomem *addr;
+               u64 db_phys_addr;
+               u32 offset;
 
                /* adjust bar offset for second engine */
-               addr = cdev->regview +
-                      qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
-                                      BAR_ID_0) / 2;
-               p_regview = addr;
+               offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+                                        BAR_ID_0) / 2;
+               p_regview = cdev->regview + offset;
 
-               addr = cdev->doorbells +
-                      qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
-                                      BAR_ID_1) / 2;
-               p_doorbell = addr;
+               offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+                                        BAR_ID_1) / 2;
+
+               p_doorbell = cdev->doorbells + offset;
+
+               db_phys_addr = cdev->db_phys_addr + offset;
 
                /* prepare second hw function */
                rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
-                                          p_doorbell, personality);
+                                          p_doorbell, db_phys_addr,
+                                          personality);
 
                /* in case of error, need to free the previously
                 * initiliazed hwfn 0.
index b22f464ea3fa770e94327640e32a35972ee35745..f9e475075d3ea249225b47538344403c59294db0 100644 (file)
@@ -939,7 +939,7 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
                                                snprintf(bit_name, 30,
                                                         p_aeu->bit_name, num);
                                        else
-                                               strncpy(bit_name,
+                                               strlcpy(bit_name,
                                                        p_aeu->bit_name, 30);
 
                                        /* We now need to pass bitmask in its
index b7471e48db7b19d97628900beceba5e4bc395528..7002a660b6b4c278d130388e4ac59fb7e7cddb93 100644 (file)
@@ -2709,6 +2709,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
        data.input.rx_num_desc = n_ooo_bufs * 2;
        data.input.tx_num_desc = data.input.rx_num_desc;
        data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
+       data.input.tx_tc = PKT_LB_TC;
+       data.input.tx_dest = QED_LL2_TX_DEST_LB;
        data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
        data.input.secondary_queue = true;
        data.cbs = &cbs;
index cf3b0e3dc350cca7cf709c73e188721dcab6bc6c..637687b766ff09637b45d563c0f30422a209f835 100644 (file)
@@ -1150,7 +1150,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
                                              &drv_version);
                if (rc) {
                        DP_NOTICE(cdev, "Failed sending drv version command\n");
-                       return rc;
+                       goto err4;
                }
        }
 
@@ -1158,6 +1158,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
 
        return 0;
 
+err4:
+       qed_ll2_dealloc_if(cdev);
 err3:
        qed_hw_stop(cdev);
 err2:
index 7873d6dfd91f55607a6d60b23b568488edf7d360..909422d9390330c6c133a4d47af5ee2b7a8d5dac 100644 (file)
@@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
        /* Vendor specific information */
        dev->vendor_id = cdev->vendor_id;
        dev->vendor_part_id = cdev->device_id;
-       dev->hw_ver = 0;
+       dev->hw_ver = cdev->chip_rev;
        dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
                      (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
 
@@ -803,7 +803,7 @@ static int qed_rdma_add_user(void *rdma_cxt,
                                     dpi_start_offset +
                                     ((out_params->dpi) * p_hwfn->dpi_size));
 
-       out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
+       out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
                                    dpi_start_offset +
                                    ((out_params->dpi) * p_hwfn->dpi_size);
 
index 10b075bc595966ac405751ade7cda6b78ed930d7..783ee6a32b5d2831589be0ed4188dd101835811e 100644 (file)
@@ -2788,6 +2788,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
                                netdev_err(qdev->ndev,
                                           "PCI mapping failed with error: %d\n",
                                           err);
+                               dev_kfree_skb_irq(skb);
                                ql_free_large_buffers(qdev);
                                return -ENOMEM;
                        }
index 884f1f52dcc25e88713a28978bb9bdaa4bc3a320..70879a3ab567c57004da73656e6c8e49237000de 100644 (file)
@@ -59,7 +59,7 @@ struct rmnet_map_dl_csum_trailer {
 struct rmnet_map_ul_csum_header {
        __be16 csum_start_offset;
        u16 csum_insert_offset:14;
-       u16 udp_ip4_ind:1;
+       u16 udp_ind:1;
        u16 csum_enabled:1;
 } __aligned(1);
 
index 57a9c314a665fc8aae9ce94073b9a256fa4973ee..b2090cedd2e965aa1dc901b97b05dae6a537ef30 100644 (file)
@@ -215,9 +215,9 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
        ul_header->csum_insert_offset = skb->csum_offset;
        ul_header->csum_enabled = 1;
        if (ip4h->protocol == IPPROTO_UDP)
-               ul_header->udp_ip4_ind = 1;
+               ul_header->udp_ind = 1;
        else
-               ul_header->udp_ip4_ind = 0;
+               ul_header->udp_ind = 0;
 
        /* Changing remaining fields to network order */
        hdr++;
@@ -248,6 +248,7 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
                              struct rmnet_map_ul_csum_header *ul_header,
                              struct sk_buff *skb)
 {
+       struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
        __be16 *hdr = (__be16 *)ul_header, offset;
 
        offset = htons((__force u16)(skb_transport_header(skb) -
@@ -255,7 +256,11 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
        ul_header->csum_start_offset = offset;
        ul_header->csum_insert_offset = skb->csum_offset;
        ul_header->csum_enabled = 1;
-       ul_header->udp_ip4_ind = 0;
+
+       if (ip6h->nexthdr == IPPROTO_UDP)
+               ul_header->udp_ind = 1;
+       else
+               ul_header->udp_ind = 0;
 
        /* Changing remaining fields to network order */
        hdr++;
@@ -428,7 +433,7 @@ sw_csum:
        ul_header->csum_start_offset = 0;
        ul_header->csum_insert_offset = 0;
        ul_header->csum_enabled = 0;
-       ul_header->udp_ip4_ind = 0;
+       ul_header->udp_ind = 0;
 
        priv->stats.csum_sw++;
 }
index 7a50b911b18046c38381ddf0df747693ea432985..0c8b7146637e1537c621108ea40ea5b852c27b67 100644 (file)
@@ -5202,6 +5202,143 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
        /* disable aspm and clock request before access ephy */
        rtl_hw_aspm_clkreq_enable(tp, false);
        rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
+
+       /* The following Realtek-provided magic fixes an issue with the RX unit
+        * getting confused after the PHY having been powered-down.
+        */
+       r8168_mac_ocp_write(tp, 0xFC28, 0x0000);
+       r8168_mac_ocp_write(tp, 0xFC2A, 0x0000);
+       r8168_mac_ocp_write(tp, 0xFC2C, 0x0000);
+       r8168_mac_ocp_write(tp, 0xFC2E, 0x0000);
+       r8168_mac_ocp_write(tp, 0xFC30, 0x0000);
+       r8168_mac_ocp_write(tp, 0xFC32, 0x0000);
+       r8168_mac_ocp_write(tp, 0xFC34, 0x0000);
+       r8168_mac_ocp_write(tp, 0xFC36, 0x0000);
+       mdelay(3);
+       r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
+
+       r8168_mac_ocp_write(tp, 0xF800, 0xE008);
+       r8168_mac_ocp_write(tp, 0xF802, 0xE00A);
+       r8168_mac_ocp_write(tp, 0xF804, 0xE00C);
+       r8168_mac_ocp_write(tp, 0xF806, 0xE00E);
+       r8168_mac_ocp_write(tp, 0xF808, 0xE027);
+       r8168_mac_ocp_write(tp, 0xF80A, 0xE04F);
+       r8168_mac_ocp_write(tp, 0xF80C, 0xE05E);
+       r8168_mac_ocp_write(tp, 0xF80E, 0xE065);
+       r8168_mac_ocp_write(tp, 0xF810, 0xC602);
+       r8168_mac_ocp_write(tp, 0xF812, 0xBE00);
+       r8168_mac_ocp_write(tp, 0xF814, 0x0000);
+       r8168_mac_ocp_write(tp, 0xF816, 0xC502);
+       r8168_mac_ocp_write(tp, 0xF818, 0xBD00);
+       r8168_mac_ocp_write(tp, 0xF81A, 0x074C);
+       r8168_mac_ocp_write(tp, 0xF81C, 0xC302);
+       r8168_mac_ocp_write(tp, 0xF81E, 0xBB00);
+       r8168_mac_ocp_write(tp, 0xF820, 0x080A);
+       r8168_mac_ocp_write(tp, 0xF822, 0x6420);
+       r8168_mac_ocp_write(tp, 0xF824, 0x48C2);
+       r8168_mac_ocp_write(tp, 0xF826, 0x8C20);
+       r8168_mac_ocp_write(tp, 0xF828, 0xC516);
+       r8168_mac_ocp_write(tp, 0xF82A, 0x64A4);
+       r8168_mac_ocp_write(tp, 0xF82C, 0x49C0);
+       r8168_mac_ocp_write(tp, 0xF82E, 0xF009);
+       r8168_mac_ocp_write(tp, 0xF830, 0x74A2);
+       r8168_mac_ocp_write(tp, 0xF832, 0x8CA5);
+       r8168_mac_ocp_write(tp, 0xF834, 0x74A0);
+       r8168_mac_ocp_write(tp, 0xF836, 0xC50E);
+       r8168_mac_ocp_write(tp, 0xF838, 0x9CA2);
+       r8168_mac_ocp_write(tp, 0xF83A, 0x1C11);
+       r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0);
+       r8168_mac_ocp_write(tp, 0xF83E, 0xE006);
+       r8168_mac_ocp_write(tp, 0xF840, 0x74F8);
+       r8168_mac_ocp_write(tp, 0xF842, 0x48C4);
+       r8168_mac_ocp_write(tp, 0xF844, 0x8CF8);
+       r8168_mac_ocp_write(tp, 0xF846, 0xC404);
+       r8168_mac_ocp_write(tp, 0xF848, 0xBC00);
+       r8168_mac_ocp_write(tp, 0xF84A, 0xC403);
+       r8168_mac_ocp_write(tp, 0xF84C, 0xBC00);
+       r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2);
+       r8168_mac_ocp_write(tp, 0xF850, 0x0C0A);
+       r8168_mac_ocp_write(tp, 0xF852, 0xE434);
+       r8168_mac_ocp_write(tp, 0xF854, 0xD3C0);
+       r8168_mac_ocp_write(tp, 0xF856, 0x49D9);
+       r8168_mac_ocp_write(tp, 0xF858, 0xF01F);
+       r8168_mac_ocp_write(tp, 0xF85A, 0xC526);
+       r8168_mac_ocp_write(tp, 0xF85C, 0x64A5);
+       r8168_mac_ocp_write(tp, 0xF85E, 0x1400);
+       r8168_mac_ocp_write(tp, 0xF860, 0xF007);
+       r8168_mac_ocp_write(tp, 0xF862, 0x0C01);
+       r8168_mac_ocp_write(tp, 0xF864, 0x8CA5);
+       r8168_mac_ocp_write(tp, 0xF866, 0x1C15);
+       r8168_mac_ocp_write(tp, 0xF868, 0xC51B);
+       r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0);
+       r8168_mac_ocp_write(tp, 0xF86C, 0xE013);
+       r8168_mac_ocp_write(tp, 0xF86E, 0xC519);
+       r8168_mac_ocp_write(tp, 0xF870, 0x74A0);
+       r8168_mac_ocp_write(tp, 0xF872, 0x48C4);
+       r8168_mac_ocp_write(tp, 0xF874, 0x8CA0);
+       r8168_mac_ocp_write(tp, 0xF876, 0xC516);
+       r8168_mac_ocp_write(tp, 0xF878, 0x74A4);
+       r8168_mac_ocp_write(tp, 0xF87A, 0x48C8);
+       r8168_mac_ocp_write(tp, 0xF87C, 0x48CA);
+       r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4);
+       r8168_mac_ocp_write(tp, 0xF880, 0xC512);
+       r8168_mac_ocp_write(tp, 0xF882, 0x1B00);
+       r8168_mac_ocp_write(tp, 0xF884, 0x9BA0);
+       r8168_mac_ocp_write(tp, 0xF886, 0x1B1C);
+       r8168_mac_ocp_write(tp, 0xF888, 0x483F);
+       r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2);
+       r8168_mac_ocp_write(tp, 0xF88C, 0x1B04);
+       r8168_mac_ocp_write(tp, 0xF88E, 0xC508);
+       r8168_mac_ocp_write(tp, 0xF890, 0x9BA0);
+       r8168_mac_ocp_write(tp, 0xF892, 0xC505);
+       r8168_mac_ocp_write(tp, 0xF894, 0xBD00);
+       r8168_mac_ocp_write(tp, 0xF896, 0xC502);
+       r8168_mac_ocp_write(tp, 0xF898, 0xBD00);
+       r8168_mac_ocp_write(tp, 0xF89A, 0x0300);
+       r8168_mac_ocp_write(tp, 0xF89C, 0x051E);
+       r8168_mac_ocp_write(tp, 0xF89E, 0xE434);
+       r8168_mac_ocp_write(tp, 0xF8A0, 0xE018);
+       r8168_mac_ocp_write(tp, 0xF8A2, 0xE092);
+       r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20);
+       r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0);
+       r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F);
+       r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4);
+       r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3);
+       r8168_mac_ocp_write(tp, 0xF8AE, 0xF007);
+       r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0);
+       r8168_mac_ocp_write(tp, 0xF8B2, 0xF103);
+       r8168_mac_ocp_write(tp, 0xF8B4, 0xC607);
+       r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00);
+       r8168_mac_ocp_write(tp, 0xF8B8, 0xC606);
+       r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00);
+       r8168_mac_ocp_write(tp, 0xF8BC, 0xC602);
+       r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00);
+       r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C);
+       r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28);
+       r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C);
+       r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00);
+       r8168_mac_ocp_write(tp, 0xF8C8, 0xC707);
+       r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00);
+       r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2);
+       r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1);
+       r8168_mac_ocp_write(tp, 0xF8D0, 0xC502);
+       r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00);
+       r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA);
+       r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0);
+       r8168_mac_ocp_write(tp, 0xF8D8, 0xC502);
+       r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00);
+       r8168_mac_ocp_write(tp, 0xF8DC, 0x0132);
+
+       r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
+
+       r8168_mac_ocp_write(tp, 0xFC2A, 0x0743);
+       r8168_mac_ocp_write(tp, 0xFC2C, 0x0801);
+       r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9);
+       r8168_mac_ocp_write(tp, 0xFC30, 0x02FD);
+       r8168_mac_ocp_write(tp, 0xFC32, 0x0C25);
+       r8168_mac_ocp_write(tp, 0xFC34, 0x00A9);
+       r8168_mac_ocp_write(tp, 0xFC36, 0x012D);
+
        rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
@@ -7102,13 +7239,18 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
 {
        unsigned int flags;
 
-       if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
+       switch (tp->mac_version) {
+       case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
                RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
                RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
                RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+               /* fall through */
+       case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
                flags = PCI_IRQ_LEGACY;
-       } else {
+               break;
+       default:
                flags = PCI_IRQ_ALL_TYPES;
+               break;
        }
 
        return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
index 5f092bbd051484e1009102e8634611ed25a70237..5462d2e8a1b71aff17837ed13dceb49d4fccc398 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Renesas Ethernet AVB device driver
  *
- * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2014-2019 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
  * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
@@ -514,7 +514,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
                        kfree(ts_skb);
                        if (tag == tfa_tag) {
                                skb_tstamp_tx(skb, &shhwtstamps);
+                               dev_consume_skb_any(skb);
                                break;
+                       } else {
+                               dev_kfree_skb_any(skb);
                        }
                }
                ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
@@ -1556,7 +1559,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                                         DMA_TO_DEVICE);
                        goto unmap;
                }
-               ts_skb->skb = skb;
+               ts_skb->skb = skb_get(skb);
                ts_skb->tag = priv->ts_skb_tag++;
                priv->ts_skb_tag &= 0x3ff;
                list_add_tail(&ts_skb->list, &priv->ts_skb_list);
@@ -1685,6 +1688,7 @@ static int ravb_close(struct net_device *ndev)
        /* Clear the timestamp list */
        list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
                list_del(&ts_skb->list);
+               kfree_skb(ts_skb->skb);
                kfree(ts_skb);
        }
 
index 696037d5ac3d5a3ecebb6aaa924ca70da02a4bf5..ad557f457b2ce21bed258d73c37625fbc9939e45 100644 (file)
@@ -793,15 +793,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
                printk(KERN_ERR "Sgiseeq: Cannot register net device, "
                       "aborting.\n");
                err = -ENODEV;
-               goto err_out_free_page;
+               goto err_out_free_attrs;
        }
 
        printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
 
        return 0;
 
-err_out_free_page:
-       free_page((unsigned long) sp->srings);
+err_out_free_attrs:
+       dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
+                      sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
 err_out_free_dev:
        free_netdev(dev);
 
index 4bb89f74742c908386410c23640ddbe6462c9cfc..d5bcbc40a55fc1239b20b1c88b044ffdcedcf927 100644 (file)
@@ -1057,7 +1057,7 @@ sis900_open(struct net_device *net_dev)
        sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
 
        /* Enable all known interrupts by setting the interrupt mask. */
-       sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+       sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
        sw32(cr, RxENA | sr32(cr));
        sw32(ier, IE);
 
@@ -1578,7 +1578,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
        sw32(txdp, sis_priv->tx_ring_dma);
 
        /* Enable all known interrupts by setting the interrupt mask. */
-       sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+       sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
 }
 
 /**
@@ -1618,7 +1618,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
                        spin_unlock_irqrestore(&sis_priv->lock, flags);
                        return NETDEV_TX_OK;
        }
-       sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
+       sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len);
        sw32(cr, TxENA | sr32(cr));
 
        sis_priv->cur_tx ++;
@@ -1674,7 +1674,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
        do {
                status = sr32(isr);
 
-               if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
+               if ((status & (HIBERR|TxURN|TxERR|TxIDLE|TxDESC|RxORN|RxERR|RxOK)) == 0)
                        /* nothing intresting happened */
                        break;
                handled = 1;
@@ -1684,7 +1684,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
                        /* Rx interrupt */
                        sis900_rx(net_dev);
 
-               if (status & (TxURN | TxERR | TxIDLE))
+               if (status & (TxURN | TxERR | TxIDLE | TxDESC))
                        /* Tx interrupt */
                        sis900_finish_xmit(net_dev);
 
@@ -1896,8 +1896,8 @@ static void sis900_finish_xmit (struct net_device *net_dev)
 
                if (tx_status & OWN) {
                        /* The packet is not transmitted yet (owned by hardware) !
-                        * Note: the interrupt is generated only when Tx Machine
-                        * is idle, so this is an almost impossible case */
+                        * Note: this is an almost impossible condition
+                        * in case of TxDESC ('descriptor interrupt') */
                        break;
                }
 
@@ -2473,7 +2473,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
        sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
 
        /* Enable all known interrupts by setting the interrupt mask. */
-       sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+       sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
        sw32(cr, RxENA | sr32(cr));
        sw32(ier, IE);
 
index 272b9ca663148f36ccb7ae45363df773f2dd4c4c..b069b3a2453be2330b8790445e88ae32a6e26399 100644 (file)
@@ -261,7 +261,7 @@ struct stmmac_safety_stats {
 #define STMMAC_COAL_TX_TIMER   1000
 #define STMMAC_MAX_COAL_TX_TICK        100000
 #define STMMAC_TX_MAX_FRAMES   256
-#define STMMAC_TX_FRAMES       25
+#define STMMAC_TX_FRAMES       1
 
 /* Packets types */
 enum packets_types {
index 3b174eae77c10f3b9da3aa9d89d850535233a87c..f45df6df69328bfd359c9fa053dae5869f190fd3 100644 (file)
@@ -1203,10 +1203,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
        int ret;
        struct device *dev = &bsp_priv->pdev->dev;
 
-       if (!ldo) {
-               dev_err(dev, "no regulator found\n");
-               return -1;
-       }
+       if (!ldo)
+               return 0;
 
        if (enable) {
                ret = regulator_enable(ldo);
index 49a896a16391948162b9bc748d516dd5d307c5f7..79c91526f3ecce6470a73d0c52fde876508852e2 100644 (file)
@@ -893,6 +893,11 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
                 * address. No need to mask it again.
                 */
                reg |= 1 << H3_EPHY_ADDR_SHIFT;
+       } else {
+               /* For SoCs without internal PHY the PHY selection bit should be
+                * set to 0 (external PHY).
+                */
+               reg &= ~H3_EPHY_SELECT;
        }
 
        if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
index 0877bde6e860b24a4f6003e817ee82a0726f0f11..21d131347e2effb5f94d777a264a72ed79ef813d 100644 (file)
@@ -216,6 +216,12 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
                                            GMAC_ADDR_LOW(reg));
                        reg++;
                }
+
+               while (reg <= perfect_addr_number) {
+                       writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+                       writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+                       reg++;
+               }
        }
 
 #ifdef FRAME_FILTER_DEBUG
index 7e5d5db0d5165b5ff9b65d9e82f229c4ea5c5888..48cf5e2b24417f282fd6be1c7991c9a3a5dccacb 100644 (file)
@@ -88,6 +88,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
        u32 value;
 
        base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
+       if (queue >= 4)
+               queue -= 4;
 
        value = readl(ioaddr + base_register);
 
@@ -105,6 +107,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
        u32 value;
 
        base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
+       if (queue >= 4)
+               queue -= 4;
 
        value = readl(ioaddr + base_register);
 
@@ -444,14 +448,20 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
                 * are required
                 */
                value |= GMAC_PACKET_FILTER_PR;
-       } else if (!netdev_uc_empty(dev)) {
-               int reg = 1;
+       } else {
                struct netdev_hw_addr *ha;
+               int reg = 1;
 
                netdev_for_each_uc_addr(ha, dev) {
                        dwmac4_set_umac_addr(hw, ha->addr, reg);
                        reg++;
                }
+
+               while (reg <= GMAC_MAX_PERFECT_ADDRESSES) {
+                       writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+                       writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+                       reg++;
+               }
        }
 
        writel(value, ioaddr + GMAC_PACKET_FILTER);
@@ -469,8 +479,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
        if (fc & FLOW_RX) {
                pr_debug("\tReceive Flow-Control ON\n");
                flow |= GMAC_RX_FLOW_CTRL_RFE;
-               writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
        }
+       writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
+
        if (fc & FLOW_TX) {
                pr_debug("\tTransmit Flow-Control ON\n");
 
@@ -478,7 +489,7 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
                        pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
 
                for (queue = 0; queue < tx_cnt; queue++) {
-                       flow |= GMAC_TX_FLOW_CTRL_TFE;
+                       flow = GMAC_TX_FLOW_CTRL_TFE;
 
                        if (duplex)
                                flow |=
@@ -486,6 +497,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
 
                        writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
                }
+       } else {
+               for (queue = 0; queue < tx_cnt; queue++)
+                       writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
        }
 }
 
index d182f82f7b58608f0aa6e335bf6071562dc8cb6f..870302a7177e2334d19056dd4abc2df066851d16 100644 (file)
@@ -106,6 +106,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
        u32 value, reg;
 
        reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
+       if (queue >= 4)
+               queue -= 4;
 
        value = readl(ioaddr + reg);
        value &= ~XGMAC_PSRQ(queue);
@@ -169,6 +171,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
        u32 value, reg;
 
        reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
+       if (queue >= 4)
+               queue -= 4;
 
        value = readl(ioaddr + reg);
        value &= ~XGMAC_QxMDMACH(queue);
index 5c18874614ba1a9bd16e898846c3dacdc544f73f..014fe93ed2d82ed28a84870a64fb209db843afe4 100644 (file)
@@ -3036,17 +3036,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* Manage oversized TCP frames for GMAC4 device */
        if (skb_is_gso(skb) && priv->tso) {
-               if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
-                       /*
-                        * There is no way to determine the number of TSO
-                        * capable Queues. Let's use always the Queue 0
-                        * because if TSO is supported then at least this
-                        * one will be capable.
-                        */
-                       skb_set_queue_mapping(skb, 0);
-
+               if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
                        return stmmac_tso_xmit(skb, dev);
-               }
        }
 
        if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -3855,6 +3846,23 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
        }
 }
 
+static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
+                              struct net_device *sb_dev,
+                              select_queue_fallback_t fallback)
+{
+       if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+               /*
+                * There is no way to determine the number of TSO
+                * capable Queues. Let's use always the Queue 0
+                * because if TSO is supported then at least this
+                * one will be capable.
+                */
+               return 0;
+       }
+
+       return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
+}
+
 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
 {
        struct stmmac_priv *priv = netdev_priv(ndev);
@@ -4097,6 +4105,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
        .ndo_tx_timeout = stmmac_tx_timeout,
        .ndo_do_ioctl = stmmac_ioctl,
        .ndo_setup_tc = stmmac_setup_tc,
+       .ndo_select_queue = stmmac_select_queue,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = stmmac_poll_controller,
 #endif
@@ -4513,8 +4522,10 @@ int stmmac_suspend(struct device *dev)
                stmmac_mac_set(priv, priv->ioaddr, false);
                pinctrl_pm_select_sleep_state(priv->device);
                /* Disable clock in case of PWM is off */
-               clk_disable(priv->plat->pclk);
-               clk_disable(priv->plat->stmmac_clk);
+               if (priv->plat->clk_ptp_ref)
+                       clk_disable_unprepare(priv->plat->clk_ptp_ref);
+               clk_disable_unprepare(priv->plat->pclk);
+               clk_disable_unprepare(priv->plat->stmmac_clk);
        }
        mutex_unlock(&priv->lock);
 
@@ -4579,8 +4590,10 @@ int stmmac_resume(struct device *dev)
        } else {
                pinctrl_pm_select_default_state(priv->device);
                /* enable the clk previously disabled */
-               clk_enable(priv->plat->stmmac_clk);
-               clk_enable(priv->plat->pclk);
+               clk_prepare_enable(priv->plat->stmmac_clk);
+               clk_prepare_enable(priv->plat->pclk);
+               if (priv->plat->clk_ptp_ref)
+                       clk_prepare_enable(priv->plat->clk_ptp_ref);
                /* reset the phy so that it's ready */
                if (priv->mii)
                        stmmac_mdio_reset(priv->mii);
index 58ea18af9813ab950b1252cde08d626abe61c30e..37c0bc699cd9ca80dbfdb77e7893abaad150fe62 100644 (file)
@@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
                entry = &priv->tc_entries[i];
                if (!entry->in_use && !first && free)
                        first = entry;
-               if (entry->handle == loc && !free)
+               if ((entry->handle == loc) && !free && !entry->is_frag)
                        dup = entry;
        }
 
index 8307064406ff6845ff4fdb463fdffc7ed755ea43..9246da38a0dd14ec253f980235c0ff380027a9a6 100644 (file)
@@ -187,6 +187,7 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
                                     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
 
                netif_tx_wake_all_queues(ndev);
+               netif_carrier_on(ndev);
        } else {
                int tmo;
                /* disable forwarding */
@@ -202,6 +203,7 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
 
                cpsw_sl_ctl_reset(port->slave.mac_sl);
 
+               netif_carrier_off(ndev);
                netif_tx_stop_all_queues(ndev);
        }
 
@@ -944,8 +946,7 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
 
                ndev = skb->dev;
 
-               if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)
-                       am65_cpts_tx_timestamp(common->cpts, skb);
+               am65_cpts_tx_timestamp(common->cpts, skb);
 
                ndev_priv = netdev_priv(ndev);
                stats = this_cpu_ptr(ndev_priv->stats);
@@ -1059,8 +1060,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
        pkt_len = skb_headlen(skb);
 
        /* SKB TX timestamp */
-       if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && port->tx_ts_enabled)
-               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+       if (port->tx_ts_enabled)
+               am65_cpts_ask_tx_timestamp(common->cpts, skb);
 
        q_idx = skb_get_queue_mapping(skb);
        dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
index 6c54e5b78eb055794bb673c7032b9ab21482cf5b..955f636f2302dc6dae6d7800b60c7f0934f65f3c 100644 (file)
@@ -803,14 +803,11 @@ void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
 }
 EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
 
-void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
-                           struct sk_buff *skb)
+void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
 {
-       unsigned int ptp_class = ptp_classify_raw(skb);
-       struct am65_cpts_skb_cb_data *skb_cb =
-                       (struct am65_cpts_skb_cb_data *)skb->cb;
+       struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
 
-       if (ptp_class == PTP_CLASS_NONE)
+       if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
                return;
 
        /* add frame to queue for processing later.
@@ -819,12 +816,28 @@ void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
        skb_get(skb);
        /* get the timestamp for timeouts */
        skb_cb->tmo = jiffies + msecs_to_jiffies(100);
-       skb_cb->ptp_class = ptp_class;
        skb_queue_tail(&cpts->txq, skb);
        ptp_schedule_worker(cpts->ptp_clock, 0);
 }
 EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp);
 
+void am65_cpts_ask_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+       struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
+       unsigned int ptp_class;
+
+       if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+               return;
+
+       ptp_class = ptp_classify_raw(skb);
+       if (ptp_class == PTP_CLASS_NONE)
+               return;
+
+       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+       skb_cb->ptp_class = ptp_class;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_ask_tx_timestamp);
+
 int am65_cpts_phc_index(struct am65_cpts *cpts)
 {
        return cpts->phc_index;
index 881c5521ddcedf61fb98d8a4e6a9e5ad48aecd53..7e1f7760e162de11ef64a6c51fca60e078c55a41 100644 (file)
@@ -17,8 +17,8 @@ struct am65_cpts;
 struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
                                   struct device_node *node);
 int am65_cpts_phc_index(struct am65_cpts *cpts);
-void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
-                           struct sk_buff *skb);
+void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_ask_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
 void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en);
 #else
 static inline struct am65_cpts *am65_cpts_create(struct device *dev,
@@ -38,6 +38,11 @@ static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
 {
 }
 
+static inline void am65_cpts_ask_tx_timestamp(struct am65_cpts *cpts,
+                                             struct sk_buff *skb)
+{
+}
+
 static inline void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
 {
 }
index 6c38804a5d1974a93d62585a122f199678287e73..b9f72674b7fc0c3d55c5d3bee844c5031054085c 100644 (file)
@@ -1743,7 +1743,8 @@ static int prueth_probe(struct platform_device *pdev)
        }
 
        pruss = pruss_get(eth0_node ?
-                         prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
+                         prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1],
+                         &prueth->pruss_id);
        if (IS_ERR(pruss)) {
                ret = PTR_ERR(pruss);
                dev_err(dev, "unable to get pruss handle\n");
index 53d6e520f23c4507559716bcfd902f513a03682a..00632aef5722fec12307604168e76261d4734e8d 100644 (file)
@@ -158,6 +158,7 @@ struct prueth_emac {
  * @fw_data: firmware names to be used with PRU remoteprocs
  * @config: firmware load time configuration per slice
  * @miig_rt: regmap to mii_g_rt block
+ * @pruss_id: PRUSS instance id
  */
 struct prueth {
        struct device *dev;
@@ -175,6 +176,7 @@ struct prueth {
        struct icssg_config config[PRUSS_NUM_PRUS];
        struct regmap *miig_rt;
        struct regmap *mii_rt;
+       int pruss_id;
 };
 
 struct emac_tx_ts_response {
index 6dbf2cb67f10a404aff2a1dc4620fbe64dc1c2a8..3cf3fb2b75d161cdfcbe591fe8abaddefb9ac62f 100644 (file)
@@ -191,6 +191,7 @@ struct prueth_emac {
  * @eth_node: node for each emac node
  * @emac: emac data for three ports, one host and two physical
  * @registered_netdevs: net device for each registered emac
+ * @pruss_id: PRUSS instance id
  */
 struct prueth {
        struct device *dev;
@@ -204,6 +205,7 @@ struct prueth {
        struct device_node *eth_node[PRUETH_NUM_MACS];
        struct prueth_emac *emac[PRUETH_NUM_MACS];
        struct net_device *registered_netdevs[PRUETH_NUM_MACS];
+       int pruss_id;
 };
 
 static inline u32 prueth_read_reg(struct prueth *prueth,
@@ -1666,7 +1668,8 @@ static int prueth_probe(struct platform_device *pdev)
                }
        }
 
-       pruss = pruss_get(prueth->pru0 ? prueth->pru0 : prueth->pru1);
+       pruss = pruss_get(prueth->pru0 ? prueth->pru0 : prueth->pru1,
+                         &prueth->pruss_id);
        if (IS_ERR(pruss)) {
                ret = PTR_ERR(pruss);
                dev_err(dev, "unable to get pruss handle\n");
index cce9c9ed46aa9a8462080c5949bdb6621e247f0d..9146068979d2c65160092fbd929ede4c0160acfe 100644 (file)
@@ -1497,7 +1497,7 @@ tc35815_rx(struct net_device *dev, int limit)
                        pci_unmap_single(lp->pci_dev,
                                         lp->rx_skbs[cur_bd].skb_dma,
                                         RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
-                       if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
+                       if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
                                memmove(skb->data, skb->data - NET_IP_ALIGN,
                                        pkt_len);
                        data = skb_put(skb, pkt_len);
index edcd1e60b30d17b729329a5d7558db854b233559..f076050c8ad37fe11c4a5bb37a2481c4c7561859 100644 (file)
@@ -383,9 +383,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
 static void tsi108_stat_carry(struct net_device *dev)
 {
        struct tsi108_prv_data *data = netdev_priv(dev);
+       unsigned long flags;
        u32 carry1, carry2;
 
-       spin_lock_irq(&data->misclock);
+       spin_lock_irqsave(&data->misclock, flags);
 
        carry1 = TSI_READ(TSI108_STAT_CARRY1);
        carry2 = TSI_READ(TSI108_STAT_CARRY2);
@@ -453,7 +454,7 @@ static void tsi108_stat_carry(struct net_device *dev)
                              TSI108_STAT_TXPAUSEDROP_CARRY,
                              &data->tx_pause_drop);
 
-       spin_unlock_irq(&data->misclock);
+       spin_unlock_irqrestore(&data->misclock, flags);
 }
 
 /* Read a stat counter atomically with respect to carries.
index 7cfd7ff38e86f04cf56f9465b1865c35a3ee465f..66b30ebd45ee8fcea48636ed19ba5db2a54e9c09 100644 (file)
@@ -614,6 +614,10 @@ static void axienet_start_xmit_done(struct net_device *ndev)
 
        ndev->stats.tx_packets += packets;
        ndev->stats.tx_bytes += size;
+
+       /* Matches barrier in axienet_start_xmit */
+       smp_mb();
+
        netif_wake_queue(ndev);
 }
 
@@ -668,9 +672,19 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 
        if (axienet_check_tx_bd_space(lp, num_frag)) {
-               if (!netif_queue_stopped(ndev))
-                       netif_stop_queue(ndev);
-               return NETDEV_TX_BUSY;
+               if (netif_queue_stopped(ndev))
+                       return NETDEV_TX_BUSY;
+
+               netif_stop_queue(ndev);
+
+               /* Matches barrier in axienet_start_xmit_done */
+               smp_mb();
+
+               /* Space might have just been freed - check again */
+               if (axienet_check_tx_bd_space(lp, num_frag))
+                       return NETDEV_TX_BUSY;
+
+               netif_wake_queue(ndev);
        }
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
index 7a145172d50385f56f54e4b114ab6194fd672ca5..d178d5bad7e48aa0f8744a213d7660813290f978 100644 (file)
@@ -289,16 +289,29 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
        return gtp_rx(pctx, skb, hdrlen, gtp->role);
 }
 
-static void gtp_encap_destroy(struct sock *sk)
+static void __gtp_encap_destroy(struct sock *sk)
 {
        struct gtp_dev *gtp;
 
-       gtp = rcu_dereference_sk_user_data(sk);
+       lock_sock(sk);
+       gtp = sk->sk_user_data;
        if (gtp) {
+               if (gtp->sk0 == sk)
+                       gtp->sk0 = NULL;
+               else
+                       gtp->sk1u = NULL;
                udp_sk(sk)->encap_type = 0;
                rcu_assign_sk_user_data(sk, NULL);
                sock_put(sk);
        }
+       release_sock(sk);
+}
+
+static void gtp_encap_destroy(struct sock *sk)
+{
+       rtnl_lock();
+       __gtp_encap_destroy(sk);
+       rtnl_unlock();
 }
 
 static void gtp_encap_disable_sock(struct sock *sk)
@@ -306,7 +319,7 @@ static void gtp_encap_disable_sock(struct sock *sk)
        if (!sk)
                return;
 
-       gtp_encap_destroy(sk);
+       __gtp_encap_destroy(sk);
 }
 
 static void gtp_encap_disable(struct gtp_dev *gtp)
@@ -800,7 +813,8 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
                goto out_sock;
        }
 
-       if (rcu_dereference_sk_user_data(sock->sk)) {
+       lock_sock(sock->sk);
+       if (sock->sk->sk_user_data) {
                sk = ERR_PTR(-EBUSY);
                goto out_sock;
        }
@@ -816,6 +830,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
        setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
 
 out_sock:
+       release_sock(sock->sk);
        sockfd_put(sock);
        return sk;
 }
@@ -847,8 +862,13 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
 
        if (data[IFLA_GTP_ROLE]) {
                role = nla_get_u32(data[IFLA_GTP_ROLE]);
-               if (role > GTP_ROLE_SGSN)
+               if (role > GTP_ROLE_SGSN) {
+                       if (sk0)
+                               gtp_encap_disable_sock(sk0);
+                       if (sk1u)
+                               gtp_encap_disable_sock(sk1u);
                        return -EINVAL;
+               }
        }
 
        gtp->sk0 = sk0;
@@ -949,7 +969,7 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
 
        }
 
-       pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
+       pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
        if (pctx == NULL)
                return -ENOMEM;
 
@@ -1038,6 +1058,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
        }
 
+       rtnl_lock();
        rcu_read_lock();
 
        gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
@@ -1062,6 +1083,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
 
 out_unlock:
        rcu_read_unlock();
+       rtnl_unlock();
        return err;
 }
 
@@ -1363,9 +1385,9 @@ late_initcall(gtp_init);
 
 static void __exit gtp_fini(void)
 {
-       unregister_pernet_subsys(&gtp_net_ops);
        genl_unregister_family(&gtp_genl_family);
        rtnl_link_unregister(&gtp_link_ops);
+       unregister_pernet_subsys(&gtp_net_ops);
 
        pr_info("GTP module unloaded\n");
 }
index cf6b9b1771f12cff597648423511b5f146440be5..6f6c0dbd91fc84bb31cbb4411a0d1b665ac512dc 100644 (file)
@@ -847,7 +847,6 @@ int netvsc_recv_callback(struct net_device *net,
                                    csum_info, vlan, data, len);
        if (unlikely(!skb)) {
                ++net_device_ctx->eth_stats.rx_no_memory;
-               rcu_read_unlock();
                return NVSP_STAT_FAIL;
        }
 
@@ -1249,12 +1248,15 @@ static void netvsc_get_stats64(struct net_device *net,
                               struct rtnl_link_stats64 *t)
 {
        struct net_device_context *ndev_ctx = netdev_priv(net);
-       struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
+       struct netvsc_device *nvdev;
        struct netvsc_vf_pcpu_stats vf_tot;
        int i;
 
+       rcu_read_lock();
+
+       nvdev = rcu_dereference(ndev_ctx->nvdev);
        if (!nvdev)
-               return;
+               goto out;
 
        netdev_stats_to_stats64(t, &net->stats);
 
@@ -1293,6 +1295,8 @@ static void netvsc_get_stats64(struct net_device *net,
                t->rx_packets   += packets;
                t->multicast    += multicast;
        }
+out:
+       rcu_read_unlock();
 }
 
 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
index 4f684cbcdc57e4ce382f1b5e57f7885ded2f2af2..078027bbe0025e1d6f40f734f17edd2b6c06b162 100644 (file)
@@ -1140,10 +1140,11 @@ static void atusb_disconnect(struct usb_interface *interface)
 
        ieee802154_unregister_hw(atusb->hw);
 
+       usb_put_dev(atusb->usb_dev);
+
        ieee802154_free_hw(atusb->hw);
 
        usb_set_intfdata(interface, NULL);
-       usb_put_dev(atusb->usb_dev);
 
        pr_debug("%s done\n", __func__);
 }
index b2ff903a9cb6e56a47814be2559589b73325302a..38a41651e451cfcf4cc2345f5e21f90ee8624c3c 100644 (file)
@@ -3151,12 +3151,12 @@ static int ca8210_probe(struct spi_device *spi_device)
                goto error;
        }
 
+       priv->spi->dev.platform_data = pdata;
        ret = ca8210_get_platform_data(priv->spi, pdata);
        if (ret) {
                dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n");
                goto error;
        }
-       priv->spi->dev.platform_data = pdata;
 
        ret = ca8210_dev_com_init(priv);
        if (ret) {
index f1ed1744801c765aa030854bdf1a9b1882e44b3e..be1f1a86bcd61d2bbbe21376e6caeff3c1d64076 100644 (file)
@@ -821,7 +821,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
                err = hwsim_subscribe_all_others(phy);
                if (err < 0) {
                        mutex_unlock(&hwsim_phys_lock);
-                       goto err_reg;
+                       goto err_subscribe;
                }
        }
        list_add_tail(&phy->list, &hwsim_phys);
@@ -831,6 +831,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
 
        return idx;
 
+err_subscribe:
+       ieee802154_unregister_hw(phy->hw);
 err_reg:
        kfree(pib);
 err_pib:
@@ -920,9 +922,9 @@ static __init int hwsim_init_module(void)
        return 0;
 
 platform_drv:
-       genl_unregister_family(&hwsim_genl_family);
-platform_dev:
        platform_device_unregister(mac802154hwsim_dev);
+platform_dev:
+       genl_unregister_family(&hwsim_genl_family);
        return rc;
 }
 
index 7de88b33d5b96d7f18a5f7c242a54c935b587086..0dc92d2faa64dbeb9cefe3f9627c72e4e30f16d7 100644 (file)
@@ -869,6 +869,7 @@ static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
 
 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
 {
+       skb->ip_summed = CHECKSUM_NONE;
        memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
        skb_pull(skb, hdr_len);
        pskb_trim_unique(skb, skb->len - icv_len);
@@ -1103,10 +1104,9 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
        }
 
        skb = skb_unshare(skb, GFP_ATOMIC);
-       if (!skb) {
-               *pskb = NULL;
+       *pskb = skb;
+       if (!skb)
                return RX_HANDLER_CONSUMED;
-       }
 
        pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
        if (!pulled_sci) {
@@ -1238,6 +1238,7 @@ deliver:
                macsec_rxsa_put(rx_sa);
        macsec_rxsc_put(rx_sc);
 
+       skb_orphan(skb);
        ret = gro_cells_receive(&macsec->gro_cells, skb);
        if (ret == NET_RX_SUCCESS)
                count_rx(dev, skb->len);
index 59ad9915e5e01f51a3dd6d9f4d146e3241b1198e..4e825cc328d767d86bf3b99c88260996957b30b5 100644 (file)
@@ -297,12 +297,13 @@ static int dp83867_config_init(struct phy_device *phydev)
                }
        }
 
+       val = phy_read(phydev, DP83867_CFG3);
+
        /* Enable Interrupt output INT_OE in CFG3 register */
-       if (phy_interrupt_is_valid(phydev)) {
-               val = phy_read(phydev, DP83867_CFG3);
+       if (phy_interrupt_is_valid(phydev))
                val |= BIT(7);
-               phy_write(phydev, DP83867_CFG3, val);
-       }
+       val |= BIT(9);
+       phy_write(phydev, DP83867_CFG3, val);
 
        if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP)
                dp83867_config_port_mirroring(phydev);
index 2b1e336961f9ce3034a5268c8a20d4535bc7744b..bf4070ef6b84f841823d354e99a91e0eb5cc7fea 100644 (file)
@@ -110,14 +110,17 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
 
 static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
 {
+       u16 lb_dis = BIT(1);
+
        if (disable)
-               ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1);
+               ns_exp_write(phydev, 0x1c0,
+                            ns_exp_read(phydev, 0x1c0) | lb_dis);
        else
                ns_exp_write(phydev, 0x1c0,
-                            ns_exp_read(phydev, 0x1c0) & 0xfffe);
+                            ns_exp_read(phydev, 0x1c0) & ~lb_dis);
 
        pr_debug("10BASE-T HDX loopback %s\n",
-                (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
+                (ns_exp_read(phydev, 0x1c0) & lb_dis) ? "off" : "on");
 }
 
 static int ns_config_init(struct phy_device *phydev)
index 51467d24b72371ac4587762df9e54426525b9555..1ddbd41a914f29948c67c8e2e49fa6fd04e6138c 100644 (file)
@@ -757,6 +757,9 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
 {
        int rc;
 
+       if (!dev)
+               return -EINVAL;
+
        rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
        if (rc)
                return rc;
@@ -1096,6 +1099,9 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
        struct device *d;
        int rc;
 
+       if (!dev)
+               return ERR_PTR(-EINVAL);
+
        /* Search the list of PHY devices on the mdio bus for the
         * PHY with the requested name
         */
index 491efc1bf5c4894a3c3a8b8d67d236f68205efa4..7278eca70f9f36db2c837f9640526df6b6bf53db 100644 (file)
@@ -58,8 +58,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
                if (!phy->last_triggered)
                        led_trigger_event(&phy->led_link_trigger->trigger,
                                          LED_FULL);
+               else
+                       led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
 
-               led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
                led_trigger_event(&plt->trigger, LED_FULL);
                phy->last_triggered = plt;
        }
index e029c7977a562db9247897b35ae476569cba9f72..723611ac910275069d4a882f6740d39bfe948a71 100644 (file)
@@ -226,6 +226,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
                               __ETHTOOL_LINK_MODE_MASK_NBITS, true);
        linkmode_zero(pl->supported);
        phylink_set(pl->supported, MII);
+       phylink_set(pl->supported, Pause);
+       phylink_set(pl->supported, Asym_Pause);
        if (s) {
                __set_bit(s->bit, pl->supported);
        } else {
@@ -378,8 +380,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
  *  Local device  Link partner
  *  Pause AsymDir Pause AsymDir Result
  *    1     X       1     X     TX+RX
- *    0     1       1     1     RX
- *    1     1       0     1     TX
+ *    0     1       1     1     TX
+ *    1     1       0     1     RX
  */
 static void phylink_resolve_flow(struct phylink *pl,
                                 struct phylink_link_state *state)
@@ -400,7 +402,7 @@ static void phylink_resolve_flow(struct phylink *pl,
                        new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
                else if (pause & MLO_PAUSE_ASYM)
                        new_pause = state->pause & MLO_PAUSE_SYM ?
-                                MLO_PAUSE_RX : MLO_PAUSE_TX;
+                                MLO_PAUSE_TX : MLO_PAUSE_RX;
        } else {
                new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
        }
index 8807a806cc47f304bb3c5197dba54e2935cdee14..998d08ae7431aef493ae70063268e7d2a0ebc53a 100644 (file)
@@ -185,10 +185,11 @@ struct sfp {
        struct gpio_desc *gpio[GPIO_MAX];
 
        bool attached;
+       struct mutex st_mutex;                  /* Protects state */
        unsigned int state;
        struct delayed_work poll;
        struct delayed_work timeout;
-       struct mutex sm_mutex;
+       struct mutex sm_mutex;                  /* Protects state machine */
        unsigned char sm_mod_state;
        unsigned char sm_dev_state;
        unsigned short sm_state;
@@ -513,7 +514,7 @@ static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value)
 
 static void sfp_hwmon_to_rx_power(long *value)
 {
-       *value = DIV_ROUND_CLOSEST(*value, 100);
+       *value = DIV_ROUND_CLOSEST(*value, 10);
 }
 
 static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset,
@@ -1718,6 +1719,7 @@ static void sfp_check_state(struct sfp *sfp)
 {
        unsigned int state, i, changed;
 
+       mutex_lock(&sfp->st_mutex);
        state = sfp_get_state(sfp);
        changed = state ^ sfp->state;
        changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
@@ -1743,6 +1745,7 @@ static void sfp_check_state(struct sfp *sfp)
                sfp_sm_event(sfp, state & SFP_F_LOS ?
                                SFP_E_LOS_HIGH : SFP_E_LOS_LOW);
        rtnl_unlock();
+       mutex_unlock(&sfp->st_mutex);
 }
 
 static irqreturn_t sfp_irq(int irq, void *data)
@@ -1773,6 +1776,7 @@ static struct sfp *sfp_alloc(struct device *dev)
        sfp->dev = dev;
 
        mutex_init(&sfp->sm_mutex);
+       mutex_init(&sfp->st_mutex);
        INIT_DELAYED_WORK(&sfp->poll, sfp_poll);
        INIT_DELAYED_WORK(&sfp->timeout, sfp_timeout);
 
index 02ad03a2fab773cd36707e50434cc559fc639f7c..3e014ecffef8ea1f831b61bfc92625706bfd9685 100644 (file)
@@ -1419,6 +1419,8 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
                        netif_wake_queue(ppp->dev);
                else
                        netif_stop_queue(ppp->dev);
+       } else {
+               kfree_skb(skb);
        }
        ppp_xmit_unlock(ppp);
 }
index a205750b431ba5565df39a3d7d94ac409513fd9e..8609c1a0777b21040f0a8f0127b2bde65fe299ad 100644 (file)
@@ -63,6 +63,7 @@ MODULE_AUTHOR("Frank Cusack <fcusack@fcusack.com>");
 MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
+MODULE_SOFTDEP("pre: arc4");
 MODULE_VERSION("1.0.2");
 
 static unsigned int
index f22639f0116a4268644e4e8af911d5791b21962f..c04f3dc17d76f1c6d476faae5542975ef1c2707b 100644 (file)
@@ -1120,6 +1120,9 @@ static const struct proto_ops pppoe_ops = {
        .recvmsg        = pppoe_recvmsg,
        .mmap           = sock_no_mmap,
        .ioctl          = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = pppox_compat_ioctl,
+#endif
 };
 
 static const struct pppox_proto pppoe_proto = {
index c0599b3b23c06b179c843562dc7026f3e7d2f8df..9128e42e33e74f5f744fa4b9564cb31d2394d71b 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/string.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/compat.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
 #include <linux/net.h>
@@ -103,6 +104,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 
 EXPORT_SYMBOL(pppox_ioctl);
 
+#ifdef CONFIG_COMPAT
+int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+       if (cmd == PPPOEIOCSFWD32)
+               cmd = PPPOEIOCSFWD;
+
+       return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
+}
+
+EXPORT_SYMBOL(pppox_compat_ioctl);
+#endif
+
 static int pppox_create(struct net *net, struct socket *sock, int protocol,
                        int kern)
 {
index 7321a4eca2354005f2704742f0324283f24d21cb..9ad3ff40a563f2428ac710895f8a5b24824b15a2 100644 (file)
@@ -633,6 +633,9 @@ static const struct proto_ops pptp_ops = {
        .recvmsg    = sock_no_recvmsg,
        .mmap       = sock_no_mmap,
        .ioctl      = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = pppox_compat_ioctl,
+#endif
 };
 
 static const struct pppox_proto pppox_pptp_proto = {
index dc30f11f47664641704a9af6a9d61760cbf4ef98..3feb49badda9c7e1a792e84b03f6b07f40d71f39 100644 (file)
@@ -1011,6 +1011,8 @@ static void __team_compute_features(struct team *team)
 
        team->dev->vlan_features = vlan_features;
        team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+                                    NETIF_F_HW_VLAN_CTAG_TX |
+                                    NETIF_F_HW_VLAN_STAG_TX |
                                     NETIF_F_GSO_UDP_L4;
        team->dev->hard_header_len = max_hard_header_len;
 
index b67fee56ec81b6462095ae3480bd4e323b2e92c3..e1ac1c57089ff6e9f2d1e5e97f8bd5ce8c62179d 100644 (file)
@@ -801,7 +801,8 @@ static void tun_detach_all(struct net_device *dev)
 }
 
 static int tun_attach(struct tun_struct *tun, struct file *file,
-                     bool skip_filter, bool napi, bool napi_frags)
+                     bool skip_filter, bool napi, bool napi_frags,
+                     bool publish_tun)
 {
        struct tun_file *tfile = file->private_data;
        struct net_device *dev = tun->dev;
@@ -881,7 +882,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
         * initialized tfile; otherwise we risk using half-initialized
         * object.
         */
-       rcu_assign_pointer(tfile->tun, tun);
+       if (publish_tun)
+               rcu_assign_pointer(tfile->tun, tun);
        rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
        tun->numqueues++;
        tun_set_real_num_queues(tun);
@@ -1682,6 +1684,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
 
        skb_reserve(skb, pad - delta);
        skb_put(skb, len);
+       skb_set_owner_w(skb, tfile->socket.sk);
        get_page(alloc_frag->page);
        alloc_frag->offset += buflen;
 
@@ -2552,7 +2555,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
                err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
                                 ifr->ifr_flags & IFF_NAPI,
-                                ifr->ifr_flags & IFF_NAPI_FRAGS);
+                                ifr->ifr_flags & IFF_NAPI_FRAGS, true);
                if (err < 0)
                        return err;
 
@@ -2651,13 +2654,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
                INIT_LIST_HEAD(&tun->disabled);
                err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
-                                ifr->ifr_flags & IFF_NAPI_FRAGS);
+                                ifr->ifr_flags & IFF_NAPI_FRAGS, false);
                if (err < 0)
                        goto err_free_flow;
 
                err = register_netdevice(tun->dev);
                if (err < 0)
                        goto err_detach;
+               /* free_netdev() won't check refcnt, to aovid race
+                * with dev_put() we need publish tun after registration.
+                */
+               rcu_assign_pointer(tfile->tun, tun);
        }
 
        netif_carrier_on(tun->dev);
@@ -2801,7 +2808,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
                if (ret < 0)
                        goto unlock;
                ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
-                                tun->flags & IFF_NAPI_FRAGS);
+                                tun->flags & IFF_NAPI_FRAGS, true);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
                tun = rtnl_dereference(tfile->tun);
                if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
index 3d93993e74da09abfa63252247c680be069401d3..2eca4168af2f0a658fcb2b1b0584653ac82e9efe 100644 (file)
@@ -238,7 +238,7 @@ static void asix_phy_reset(struct usbnet *dev, unsigned int reset_bits)
 static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
 {
        int ret = 0;
-       u8 buf[ETH_ALEN];
+       u8 buf[ETH_ALEN] = {0};
        int i;
        unsigned long gpio_bits = dev->driver_info->data;
 
@@ -689,7 +689,7 @@ static int asix_resume(struct usb_interface *intf)
 static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
 {
        int ret, i;
-       u8 buf[ETH_ALEN], chipcode = 0;
+       u8 buf[ETH_ALEN] = {0}, chipcode = 0;
        u32 phyid;
        struct asix_common_private *priv;
 
@@ -1073,7 +1073,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
 static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
 {
        int ret;
-       u8 buf[ETH_ALEN];
+       u8 buf[ETH_ALEN] = {0};
 
        usbnet_get_endpoints(dev,intf);
 
index 5c42cf81a08b2c250bb98c792012436d88df9562..85fba64c3fcf7b528197fa39f2a2c84a4b3ec1bd 100644 (file)
@@ -221,9 +221,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
                goto bad_desc;
        }
 skip:
-       if (    rndis &&
-               header.usb_cdc_acm_descriptor &&
-               header.usb_cdc_acm_descriptor->bmCapabilities) {
+       /* Communcation class functions with bmCapabilities are not
+        * RNDIS.  But some Wireless class RNDIS functions use
+        * bmCapabilities for their own purpose. The failsafe is
+        * therefore applied only to Communication class RNDIS
+        * functions.  The rndis test is redundant, but a cheap
+        * optimization.
+        */
+       if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
+           header.usb_cdc_acm_descriptor &&
+           header.usb_cdc_acm_descriptor->bmCapabilities) {
                        dev_dbg(&intf->dev,
                                "ACM capabilities %02x, not really RNDIS?\n",
                                header.usb_cdc_acm_descriptor->bmCapabilities);
index 1eaec648bd1f716db3d06622cdfb7834e64e4e38..f53e3e4e25f37666e3c7cea639ae6b2741645360 100644 (file)
@@ -681,8 +681,12 @@ cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
        u8 ep;
 
        for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
-
                e = intf->cur_altsetting->endpoint + ep;
+
+               /* ignore endpoints which cannot transfer data */
+               if (!usb_endpoint_maxp(&e->desc))
+                       continue;
+
                switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
                case USB_ENDPOINT_XFER_INT:
                        if (usb_endpoint_dir_in(&e->desc)) {
index 947bea81d924124c3827e87f75e732e35adb2acd..dfbdea22fbad9b7f6ae0d2c3938af9565e4855e7 100644 (file)
@@ -175,7 +175,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
        }
        if (!timeout) {
                dev_err(&udev->dev, "firmware not ready in time\n");
-               return -ETIMEDOUT;
+               ret = -ETIMEDOUT;
+               goto err;
        }
 
        /* enable ethernet mode (?) */
index d6916f787fce98ae2ce4d51d0619eb5094738543..5251c5f6f96edf601891d123f534d8aab7306591 100644 (file)
@@ -2634,14 +2634,18 @@ static struct hso_device *hso_create_bulk_serial_device(
                 */
                if (serial->tiocmget) {
                        tiocmget = serial->tiocmget;
+                       tiocmget->endp = hso_get_ep(interface,
+                                                   USB_ENDPOINT_XFER_INT,
+                                                   USB_DIR_IN);
+                       if (!tiocmget->endp) {
+                               dev_err(&interface->dev, "Failed to find INT IN ep\n");
+                               goto exit;
+                       }
+
                        tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
                        if (tiocmget->urb) {
                                mutex_init(&tiocmget->mutex);
                                init_waitqueue_head(&tiocmget->waitq);
-                               tiocmget->endp = hso_get_ep(
-                                       interface,
-                                       USB_ENDPOINT_XFER_INT,
-                                       USB_DIR_IN);
                        } else
                                hso_free_tiomget(serial);
                }
index bd2ba365902883f1b4d66c8d83650d81f1813652..0cc6993c279a2c676e6928de7d5aa890e7ae3bbc 100644 (file)
@@ -117,16 +117,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
        status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1),
                                         usb_buf, 24);
        if (status != 0)
-               return status;
+               goto out;
 
        memcpy(usb_buf, init_msg_2, 12);
        status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2),
                                         usb_buf, 28);
        if (status != 0)
-               return status;
+               goto out;
 
        memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
-
+out:
        kfree(usb_buf);
        return status;
 }
index 8d140495da79dce522d3f3fa2382e6886f24eaa7..e20266bd209e24df2bfd5edd0b97013ceb715b0e 100644 (file)
@@ -3799,7 +3799,7 @@ static int lan78xx_probe(struct usb_interface *intf,
        ret = register_netdev(netdev);
        if (ret != 0) {
                netif_err(dev, probe, netdev, "couldn't register the device\n");
-               goto out3;
+               goto out4;
        }
 
        usb_set_intfdata(intf, dev);
@@ -3814,12 +3814,14 @@ static int lan78xx_probe(struct usb_interface *intf,
 
        ret = lan78xx_phy_init(dev);
        if (ret < 0)
-               goto out4;
+               goto out5;
 
        return 0;
 
-out4:
+out5:
        unregister_netdev(netdev);
+out4:
+       usb_free_urb(dev->urb_intr);
 out3:
        lan78xx_unbind(dev, intf);
 out2:
index f4247b275e0901a54ebf0b36d67adaf708bf6950..b7a0df95d4b0fe760b2f8a4976b5e4fc06003668 100644 (file)
@@ -285,7 +285,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
 static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
 {
        int i;
-       __u8 tmp;
+       __u8 tmp = 0;
        __le16 retdatai;
        int ret;
 
index 128c8a327d8ee4ec192ef1b160d8504565c0f287..6f517e67302082a86d31455c1426ff16711abdd1 100644 (file)
@@ -1231,6 +1231,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
        {QMI_FIXED_INTF(0x2020, 0x2031, 4)},    /* Olicard 600 */
        {QMI_FIXED_INTF(0x2020, 0x2033, 4)},    /* BroadMobi BM806U */
+       {QMI_FIXED_INTF(0x2020, 0x2060, 4)},    /* BroadMobi BM818 */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 8)},    /* Sierra Wireless MC7710 in QMI mode */
@@ -1285,6 +1286,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)},    /* Cinterion PHxx,PXxx (2 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)},    /* Cinterion PHxx,PXxx (2 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)},    /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
+       {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
        {QMI_FIXED_INTF(0x413c, 0x81a2, 8)},    /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a3, 8)},    /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
index f1b5201cc32075da27cf14d94b781c9f58c16189..a291e5f2daef629935bfc015cd3c795ff1415d13 100644 (file)
@@ -788,8 +788,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
        ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
                              RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
                              value, index, tmp, size, 500);
+       if (ret < 0)
+               memset(data, 0xff, size);
+       else
+               memcpy(data, tmp, size);
 
-       memcpy(data, tmp, size);
        kfree(tmp);
 
        return ret;
@@ -4471,10 +4474,9 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
        struct r8152 *tp = usb_get_intfdata(intf);
 
        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
-       mutex_lock(&tp->control);
        tp->rtl_ops.init(tp);
        queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
-       mutex_unlock(&tp->control);
+       set_ethernet_addr(tp);
        return rtl8152_resume(intf);
 }
 
index 10854977c55f1ae335bf9c90a87688de1d35497c..84b354f76dea8560100fe24134806d805398cc6e 100644 (file)
@@ -112,6 +112,11 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
                        int                             intr = 0;
 
                        e = alt->endpoint + ep;
+
+                       /* ignore endpoints which cannot transfer data */
+                       if (!usb_endpoint_maxp(&e->desc))
+                               continue;
+
                        switch (e->desc.bmAttributes) {
                        case USB_ENDPOINT_XFER_INT:
                                if (!usb_endpoint_dir_in(&e->desc))
@@ -351,6 +356,8 @@ void usbnet_update_max_qlen(struct usbnet *dev)
 {
        enum usb_device_speed speed = dev->udev->speed;
 
+       if (!dev->rx_urb_size || !dev->hard_mtu)
+               goto insanity;
        switch (speed) {
        case USB_SPEED_HIGH:
                dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
@@ -367,6 +374,7 @@ void usbnet_update_max_qlen(struct usbnet *dev)
                dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
                break;
        default:
+insanity:
                dev->rx_qlen = dev->tx_qlen = 4;
        }
 }
index 449fc52f9a89e9452793383f03b809002a7f1989..9f895083bc0aad15a2cadfca2fba43d3406f12a8 100644 (file)
@@ -169,23 +169,29 @@ static int vrf_ip6_local_out(struct net *net, struct sock *sk,
 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
                                           struct net_device *dev)
 {
-       const struct ipv6hdr *iph = ipv6_hdr(skb);
+       const struct ipv6hdr *iph;
        struct net *net = dev_net(skb->dev);
-       struct flowi6 fl6 = {
-               /* needed to match OIF rule */
-               .flowi6_oif = dev->ifindex,
-               .flowi6_iif = LOOPBACK_IFINDEX,
-               .daddr = iph->daddr,
-               .saddr = iph->saddr,
-               .flowlabel = ip6_flowinfo(iph),
-               .flowi6_mark = skb->mark,
-               .flowi6_proto = iph->nexthdr,
-               .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
-       };
+       struct flowi6 fl6;
        int ret = NET_XMIT_DROP;
        struct dst_entry *dst;
        struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
 
+       if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+               goto err;
+
+       iph = ipv6_hdr(skb);
+
+       memset(&fl6, 0, sizeof(fl6));
+       /* needed to match OIF rule */
+       fl6.flowi6_oif = dev->ifindex;
+       fl6.flowi6_iif = LOOPBACK_IFINDEX;
+       fl6.daddr = iph->daddr;
+       fl6.saddr = iph->saddr;
+       fl6.flowlabel = ip6_flowinfo(iph);
+       fl6.flowi6_mark = skb->mark;
+       fl6.flowi6_proto = iph->nexthdr;
+       fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
+
        dst = ip6_route_output(net, NULL, &fl6);
        if (dst == dst_null)
                goto err;
@@ -241,21 +247,27 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk,
 static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
                                           struct net_device *vrf_dev)
 {
-       struct iphdr *ip4h = ip_hdr(skb);
+       struct iphdr *ip4h;
        int ret = NET_XMIT_DROP;
-       struct flowi4 fl4 = {
-               /* needed to match OIF rule */
-               .flowi4_oif = vrf_dev->ifindex,
-               .flowi4_iif = LOOPBACK_IFINDEX,
-               .flowi4_tos = RT_TOS(ip4h->tos),
-               .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
-               .flowi4_proto = ip4h->protocol,
-               .daddr = ip4h->daddr,
-               .saddr = ip4h->saddr,
-       };
+       struct flowi4 fl4;
        struct net *net = dev_net(vrf_dev);
        struct rtable *rt;
 
+       if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+               goto err;
+
+       ip4h = ip_hdr(skb);
+
+       memset(&fl4, 0, sizeof(fl4));
+       /* needed to match OIF rule */
+       fl4.flowi4_oif = vrf_dev->ifindex;
+       fl4.flowi4_iif = LOOPBACK_IFINDEX;
+       fl4.flowi4_tos = RT_TOS(ip4h->tos);
+       fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
+       fl4.flowi4_proto = ip4h->protocol;
+       fl4.daddr = ip4h->daddr;
+       fl4.saddr = ip4h->saddr;
+
        rt = ip_route_output_flow(net, &fl4, NULL);
        if (IS_ERR(rt))
                goto err;
index e9fc168bb734504e535be77067dd250a8197bffe..489cba9b284d1b3bf633853c2f1f5de57e1533c2 100644 (file)
@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
                        }
                        result = i2400m_barker_db_add(barker);
                        if (result < 0)
-                               goto error_add;
+                               goto error_parse_add;
                }
                kfree(options_orig);
        }
        return 0;
 
+error_parse_add:
 error_parse:
+       kfree(options_orig);
 error_add:
        kfree(i2400m_barker_db);
        return result;
index 677535b3d2070eea5d20983b89c9aba44be4a829..476e0535f06f0ca6745b0f43bd6c3a5fd78247fe 100644 (file)
@@ -168,7 +168,7 @@ const struct ath10k_hw_values qca6174_values = {
 };
 
 const struct ath10k_hw_values qca99x0_values = {
-       .rtc_state_val_on               = 5,
+       .rtc_state_val_on               = 7,
        .ce_count                       = 12,
        .msi_assign_ce_max              = 12,
        .num_target_ce_config_wlan      = 10,
index f3b1cfacfe9dfcb1a4bfb0ae9438ef3406c2a9cf..1419f9d1505fe2e9291eebdc81a1ac1e6e1176d2 100644 (file)
@@ -1624,6 +1624,10 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
        if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
                return 0;
 
+        /* For mesh, probe response and beacon share the same template */
+       if (ieee80211_vif_is_mesh(vif))
+               return 0;
+
        prb = ieee80211_proberesp_get(hw, vif);
        if (!prb) {
                ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
index 7f61591ce0de6b5c904fde8261c9a6689a587f5a..686759b5613f2c7f05685a8911c1f3a882e55732 100644 (file)
@@ -613,6 +613,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
                                                    full_len,
                                                    last_in_bundle,
                                                    last_in_bundle);
+               if (ret) {
+                       ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
+                       goto err;
+               }
        }
 
        ar_sdio->n_rx_pkts = i;
@@ -2069,6 +2073,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
        cancel_work_sync(&ar_sdio->wr_async_work);
        ath10k_core_unregister(ar);
        ath10k_core_destroy(ar);
+
+       flush_workqueue(ar_sdio->workqueue);
+       destroy_workqueue(ar_sdio->workqueue);
 }
 
 static const struct sdio_device_id ath10k_sdio_devices[] = {
index cda164f6e9f62f87e36c40c96f5fb7586d87a94c..6f62ddc0494c346b6718cb3bea872feafe0d74e1 100644 (file)
@@ -156,6 +156,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
 {
        struct ath10k_peer *peer;
 
+       if (peer_id >= BITS_PER_TYPE(peer->peer_ids))
+               return NULL;
+
        lockdep_assert_held(&ar->data_lock);
 
        list_for_each_entry(peer, &ar->peers, list)
index d4803ff5a78a75eb7e2d63bc77a1623f146e283e..f09a4ad2e9de71c25973ae553b94d0086606bfe0 100644 (file)
@@ -1025,7 +1025,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
        }
 
        /* TODO: remove this once USB support is fully implemented */
-       ath10k_warn(ar, "WARNING: ath10k USB support is incomplete, don't expect anything to work!\n");
+       ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't expect anything to work!\n");
 
        return 0;
 
index 777acc564ac9917d331de8f2ef87657199ce482a..bc7916f2add0971adb18b7fa3501b43a6a8bd7bf 100644 (file)
@@ -1178,6 +1178,10 @@ static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
                return -EINVAL;
 
        ev = (struct wmi_pstream_timeout_event *) datap;
+       if (ev->traffic_class >= WMM_NUM_AC) {
+               ath6kl_err("invalid traffic class: %d\n", ev->traffic_class);
+               return -EINVAL;
+       }
 
        /*
         * When the pstream (fat pipe == AC) timesout, it means there were
@@ -1519,6 +1523,10 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
                return -EINVAL;
 
        reply = (struct wmi_cac_event *) datap;
+       if (reply->ac >= WMM_NUM_AC) {
+               ath6kl_err("invalid AC: %d\n", reply->ac);
+               return -EINVAL;
+       }
 
        if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
            (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
@@ -2635,7 +2643,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
        u16 active_tsids = 0;
        int ret;
 
-       if (traffic_class > 3) {
+       if (traffic_class >= WMM_NUM_AC) {
                ath6kl_err("invalid traffic class: %d\n", traffic_class);
                return -EINVAL;
        }
index bb319f22761fbe46379837ed21dee4f343475dc5..b4f7ee423d4072f1a3baaf6ac11b548745de7dc8 100644 (file)
@@ -252,8 +252,9 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
 /* Chip Revisions */
 /******************/
 
-static void ath9k_hw_read_revisions(struct ath_hw *ah)
+static bool ath9k_hw_read_revisions(struct ath_hw *ah)
 {
+       u32 srev;
        u32 val;
 
        if (ah->get_mac_revision)
@@ -269,25 +270,33 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
                        val = REG_READ(ah, AR_SREV);
                        ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
                }
-               return;
+               return true;
        case AR9300_DEVID_AR9340:
                ah->hw_version.macVersion = AR_SREV_VERSION_9340;
-               return;
+               return true;
        case AR9300_DEVID_QCA955X:
                ah->hw_version.macVersion = AR_SREV_VERSION_9550;
-               return;
+               return true;
        case AR9300_DEVID_AR953X:
                ah->hw_version.macVersion = AR_SREV_VERSION_9531;
-               return;
+               return true;
        case AR9300_DEVID_QCA956X:
                ah->hw_version.macVersion = AR_SREV_VERSION_9561;
-               return;
+               return true;
        }
 
-       val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
+       srev = REG_READ(ah, AR_SREV);
+
+       if (srev == -EIO) {
+               ath_err(ath9k_hw_common(ah),
+                       "Failed to read SREV register");
+               return false;
+       }
+
+       val = srev & AR_SREV_ID;
 
        if (val == 0xFF) {
-               val = REG_READ(ah, AR_SREV);
+               val = srev;
                ah->hw_version.macVersion =
                        (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
                ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
@@ -306,6 +315,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
                if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE)
                        ah->is_pciexpress = true;
        }
+
+       return true;
 }
 
 /************************************/
@@ -559,7 +570,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
        struct ath_common *common = ath9k_hw_common(ah);
        int r = 0;
 
-       ath9k_hw_read_revisions(ah);
+       if (!ath9k_hw_read_revisions(ah)) {
+               ath_err(common, "Could not read hardware revisions");
+               return -EOPNOTSUPP;
+       }
 
        switch (ah->hw_version.macVersion) {
        case AR_SREV_VERSION_5416_PCI:
index d52b31b45df7d1fd20f301573e7e5f0379df02b5..a274eb0d19688f8c8604647d03c38483ce07d923 100644 (file)
@@ -111,7 +111,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = {
        JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
        JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
        JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
-       JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
+       JP_PATTERN(3, 0, 4, 4000, 4000, 1, 18, 50, false),
        JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
        JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
        JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
index 5d287a8e1b458a8aca674275a55c007df944e4c3..0655cd8845142c244a260252d01b3d74e003d062 100644 (file)
@@ -296,21 +296,24 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
 static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
 {
        struct wil6210_priv *wil = cookie;
-       u32 isr = wil_ioread32_and_clear(wil->csr +
-                                        HOSTADDR(RGF_DMA_EP_RX_ICR) +
-                                        offsetof(struct RGF_ICR, ICR));
+       u32 isr;
        bool need_unmask = true;
 
+       wil6210_mask_irq_rx(wil);
+
+       isr = wil_ioread32_and_clear(wil->csr +
+                                    HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                                    offsetof(struct RGF_ICR, ICR));
+
        trace_wil6210_irq_rx(isr);
        wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
 
        if (unlikely(!isr)) {
                wil_err_ratelimited(wil, "spurious IRQ: RX\n");
+               wil6210_unmask_irq_rx(wil);
                return IRQ_NONE;
        }
 
-       wil6210_mask_irq_rx(wil);
-
        /* RX_DONE and RX_HTRSH interrupts are the same if interrupt
         * moderation is not used. Interrupt moderation may cause RX
         * buffer overflow while RX_DONE is delayed. The required
@@ -355,21 +358,24 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
 static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
 {
        struct wil6210_priv *wil = cookie;
-       u32 isr = wil_ioread32_and_clear(wil->csr +
-                                        HOSTADDR(RGF_INT_GEN_RX_ICR) +
-                                        offsetof(struct RGF_ICR, ICR));
+       u32 isr;
        bool need_unmask = true;
 
+       wil6210_mask_irq_rx_edma(wil);
+
+       isr = wil_ioread32_and_clear(wil->csr +
+                                    HOSTADDR(RGF_INT_GEN_RX_ICR) +
+                                    offsetof(struct RGF_ICR, ICR));
+
        trace_wil6210_irq_rx(isr);
        wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
 
        if (unlikely(!isr)) {
                wil_err(wil, "spurious IRQ: RX\n");
+               wil6210_unmask_irq_rx_edma(wil);
                return IRQ_NONE;
        }
 
-       wil6210_mask_irq_rx_edma(wil);
-
        if (likely(isr & BIT_RX_STATUS_IRQ)) {
                wil_dbg_irq(wil, "RX status ring\n");
                isr &= ~BIT_RX_STATUS_IRQ;
@@ -403,21 +409,24 @@ static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
 static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
 {
        struct wil6210_priv *wil = cookie;
-       u32 isr = wil_ioread32_and_clear(wil->csr +
-                                        HOSTADDR(RGF_INT_GEN_TX_ICR) +
-                                        offsetof(struct RGF_ICR, ICR));
+       u32 isr;
        bool need_unmask = true;
 
+       wil6210_mask_irq_tx_edma(wil);
+
+       isr = wil_ioread32_and_clear(wil->csr +
+                                    HOSTADDR(RGF_INT_GEN_TX_ICR) +
+                                    offsetof(struct RGF_ICR, ICR));
+
        trace_wil6210_irq_tx(isr);
        wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
 
        if (unlikely(!isr)) {
                wil_err(wil, "spurious IRQ: TX\n");
+               wil6210_unmask_irq_tx_edma(wil);
                return IRQ_NONE;
        }
 
-       wil6210_mask_irq_tx_edma(wil);
-
        if (likely(isr & BIT_TX_STATUS_IRQ)) {
                wil_dbg_irq(wil, "TX status ring\n");
                isr &= ~BIT_TX_STATUS_IRQ;
@@ -446,21 +455,24 @@ static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
 static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
 {
        struct wil6210_priv *wil = cookie;
-       u32 isr = wil_ioread32_and_clear(wil->csr +
-                                        HOSTADDR(RGF_DMA_EP_TX_ICR) +
-                                        offsetof(struct RGF_ICR, ICR));
+       u32 isr;
        bool need_unmask = true;
 
+       wil6210_mask_irq_tx(wil);
+
+       isr = wil_ioread32_and_clear(wil->csr +
+                                    HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                                    offsetof(struct RGF_ICR, ICR));
+
        trace_wil6210_irq_tx(isr);
        wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
 
        if (unlikely(!isr)) {
                wil_err_ratelimited(wil, "spurious IRQ: TX\n");
+               wil6210_unmask_irq_tx(wil);
                return IRQ_NONE;
        }
 
-       wil6210_mask_irq_tx(wil);
-
        if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
                wil_dbg_irq(wil, "TX done\n");
                isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
@@ -532,20 +544,23 @@ static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
 static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
 {
        struct wil6210_priv *wil = cookie;
-       u32 isr = wil_ioread32_and_clear(wil->csr +
-                                        HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-                                        offsetof(struct RGF_ICR, ICR));
+       u32 isr;
+
+       wil6210_mask_irq_misc(wil, false);
+
+       isr = wil_ioread32_and_clear(wil->csr +
+                                    HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                                    offsetof(struct RGF_ICR, ICR));
 
        trace_wil6210_irq_misc(isr);
        wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
 
        if (!isr) {
                wil_err(wil, "spurious IRQ: MISC\n");
+               wil6210_unmask_irq_misc(wil, false);
                return IRQ_NONE;
        }
 
-       wil6210_mask_irq_misc(wil, false);
-
        if (isr & ISR_MISC_FW_ERROR) {
                u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
                u32 ucode_assert_code =
index 75c8aa297107173362172b1924dbe42524e1eb73..1b1b58e0129a3abc74130f1b7f3524ab0831bc38 100644 (file)
@@ -736,6 +736,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
                [GRO_HELD]              = "GRO_HELD",
                [GRO_NORMAL]            = "GRO_NORMAL",
                [GRO_DROP]              = "GRO_DROP",
+               [GRO_CONSUMED]          = "GRO_CONSUMED",
        };
 
        wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
index 6e3b3031f29bd742544c775ba737bb1304d00905..2010f771478dfc8ce29526e15615ef38d3d1569f 100644 (file)
@@ -2816,7 +2816,18 @@ static void wmi_event_handle(struct wil6210_priv *wil,
                /* check if someone waits for this event */
                if (wil->reply_id && wil->reply_id == id &&
                    wil->reply_mid == mid) {
-                       WARN_ON(wil->reply_buf);
+                       if (wil->reply_buf) {
+                               /* event received while wmi_call is waiting
+                                * with a buffer. Such event should be handled
+                                * in wmi_recv_cmd function. Handling the event
+                                * here means a previous wmi_call was timeout.
+                                * Drop the event and do not handle it.
+                                */
+                               wil_err(wil,
+                                       "Old event (%d, %s) while wmi_call is waiting. Drop it and Continue waiting\n",
+                                       id, eventid2name(id));
+                               return;
+                       }
 
                        wmi_evt_call_handler(vif, id, evt_data,
                                             len - sizeof(*wmi));
index 91ca77c7571cebb2a6dd3265b1713fb6d3202bda..b4347806a59ed31f1e50d361094ee728af1736d5 100644 (file)
 #define IWL_22000_HR_FW_PRE            "iwlwifi-Qu-a0-hr-a0-"
 #define IWL_22000_HR_CDB_FW_PRE                "iwlwifi-QuIcp-z0-hrcdb-a0-"
 #define IWL_22000_HR_A_F0_FW_PRE       "iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_22000_HR_B_FW_PRE          "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_HR_B_F0_FW_PRE       "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_QU_B_HR_B_FW_PRE     "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_HR_B_FW_PRE          "iwlwifi-QuQnj-b0-hr-b0-"
 #define IWL_22000_JF_B0_FW_PRE         "iwlwifi-QuQnj-a0-jf-b0-"
 #define IWL_22000_HR_A0_FW_PRE         "iwlwifi-QuQnj-a0-hr-a0-"
 #define IWL_22000_SU_Z0_FW_PRE         "iwlwifi-su-z0-"
+#define IWL_QU_B_JF_B_FW_PRE           "iwlwifi-Qu-b0-jf-b0-"
 
 #define IWL_22000_HR_MODULE_FIRMWARE(api) \
        IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
        IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
        IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
+#define IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(api) \
+       IWL_22000_HR_B_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \
+       IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api)        \
        IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
        IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
        IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
        IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
+#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
+       IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_22000                10
 
@@ -190,7 +199,54 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = {
 
 const struct iwl_cfg iwl22000_2ax_cfg_hr = {
        .name = "Intel(R) Dual Band Wireless AX 22000",
-       .fw_name_pre = IWL_22000_HR_FW_PRE,
+       .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+       IWL_DEVICE_22500,
+       /*
+        * This device doesn't support receiving BlockAck with a large bitmap
+        * so we need to restrict the size of transmitted aggregation to the
+        * HT size; mac80211 would otherwise pick the HE max (256) by default.
+        */
+       .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+/*
+ * All JF radio modules are part of the 9000 series, but the MAC part
+ * looks more like 22000.  That's why this device is here, but called
+ * 9560 nevertheless.
+ */
+const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = {
+       .name = "Intel(R) Wireless-AC 9461",
+       .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+       IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = {
+       .name = "Intel(R) Wireless-AC 9462",
+       .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+       IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
+       .name = "Intel(R) Wireless-AC 9560",
+       .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+       IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
+       .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+       .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+       IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
+       .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+       .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+       IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_jf = {
+       .name = "Intel(R) Dual Band Wireless AX 22000",
+       .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
        IWL_DEVICE_22500,
        /*
         * This device doesn't support receiving BlockAck with a large bitmap
@@ -264,7 +320,10 @@ const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
 MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
index ff85d69c2a8cb3703bdc1bebe3148435a4cf2e43..557ee47bffd8c33baddf500fd13aebc337bcfa62 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -134,6 +134,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
                .len = { 0, },
        };
        struct iwl_rx_packet *pkt;
+       int ret;
 
        if (fw_has_capa(&fwrt->fw->ucode_capa,
                        IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
@@ -141,8 +142,13 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
        else
                cmd.id = SHARED_MEM_CFG;
 
-       if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd)))
+       ret = iwl_trans_send_cmd(fwrt->trans, &cmd);
+
+       if (ret) {
+               WARN(ret != -ERFKILL,
+                    "Could not send the SMEM command: %d\n", ret);
                return;
+       }
 
        pkt = cmd.resp_pkt;
        if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
index 12fddcf15bab394521122a2fecaab3d2dcd83579..2e9fd7a303985174970f538dcd0530ee6f7ba41e 100644 (file)
@@ -574,11 +574,18 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
 extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
 extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
 extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_jf;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
 extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
-#endif /* CONFIG_IWLMVM */
+#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
 
 #endif /* __IWL_CONFIG_H__ */
index 8b7d70e3a379305fa8709de30279cacc4391c9d2..9cb9f0544c9b172aecbb0d842516bfae1d1aab43 100644 (file)
@@ -724,7 +724,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
 
        for (i = 0; i < n_profiles; i++) {
                /* the tables start at element 3 */
-               static int pos = 3;
+               int pos = 3;
 
                /* The EWRD profiles officially go from 2 to 4, but we
                 * save them in sar_profiles[1-3] (because we don't
@@ -836,6 +836,24 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
        return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
 }
 
+static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
+{
+       /*
+        * The GEO_TX_POWER_LIMIT command is not supported on earlier
+        * firmware versions.  Unfortunately, we don't have a TLV API
+        * flag to rely on, so rely on the major version which is in
+        * the first byte of ucode_ver.  This was implemented
+        * initially on version 38 and then backported to29 and 17.
+        * The intention was to have it in 36 as well, but not all
+        * 8000 family got this feature enabled.  The 8000 family is
+        * the only one using version 36, so skip this version
+        * entirely.
+        */
+       return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
+              IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
+              IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+}
+
 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
 {
        struct iwl_geo_tx_power_profiles_resp *resp;
@@ -851,6 +869,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
                .data = { &geo_cmd },
        };
 
+       if (!iwl_mvm_sar_geo_support(mvm))
+               return -EOPNOTSUPP;
+
        ret = iwl_mvm_send_cmd(mvm, &cmd);
        if (ret) {
                IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
@@ -876,13 +897,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
        int ret, i, j;
        u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
 
-       /*
-        * This command is not supported on earlier firmware versions.
-        * Unfortunately, we don't have a TLV API flag to rely on, so
-        * rely on the major version which is in the first byte of
-        * ucode_ver.
-        */
-       if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+       if (!iwl_mvm_sar_geo_support(mvm))
                return 0;
 
        ret = iwl_mvm_sar_get_wgds_table(mvm);
index d1c1a8069c7e58b9624efa6d2698293714762e81..5e1e671d200219579058074f97a607671b56fb77 100644 (file)
@@ -315,7 +315,7 @@ out:
 }
 
 void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                    enum nl80211_band band)
+                    enum nl80211_band band, bool update)
 {
        struct ieee80211_hw *hw = mvm->hw;
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -324,7 +324,8 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        struct ieee80211_supported_band *sband;
        struct iwl_tlc_config_cmd cfg_cmd = {
                .sta_id = mvmsta->sta_id,
-               .max_ch_width = rs_fw_bw_from_sta_bw(sta),
+               .max_ch_width = update ?
+                       rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
                .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
                .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
                .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
index 6b9c670fcef86f83fd5df226d0e624902734c83c..6f4508d62a97e3b070cc983971d146b34a3c2069 100644 (file)
@@ -4113,7 +4113,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                          enum nl80211_band band, bool update)
 {
        if (iwl_mvm_has_tlc_offload(mvm))
-               rs_fw_rate_init(mvm, sta, band);
+               rs_fw_rate_init(mvm, sta, band, update);
        else
                rs_drv_rate_init(mvm, sta, band, update);
 }
index 8e7f993e29116b06a2ee16c0486188d8a0022d4a..d0f47899f2849505eb60d5343f19c216f8e3fb3c 100644 (file)
@@ -461,7 +461,7 @@ void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
 
 void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
 void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                    enum nl80211_band band);
+                    enum nl80211_band band, bool update);
 int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                        bool enable);
 void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
index 2d21f0a1fa006fc2e4eae4ef700e8dab535db6b5..5615ce55cef56a271609d353c8986d92bb62f134 100644 (file)
@@ -641,6 +641,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 
        memcpy(&info, skb->cb, sizeof(info));
 
+       if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
+               return -1;
+
        if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
                return -1;
 
@@ -668,7 +671,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
                    info.control.vif->type == NL80211_IFTYPE_AP ||
                    info.control.vif->type == NL80211_IFTYPE_ADHOC) {
-                       if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
+                       if (!ieee80211_is_data(hdr->frame_control))
                                sta_id = mvmvif->bcast_sta.sta_id;
                        else
                                sta_id = mvmvif->mcast_sta.sta_id;
index 2146fda8da2fdbdece661ceb3e177ab7c5c2b83e..64d976d872b84ab50de574703d60b081a58ad8ca 100644 (file)
@@ -164,7 +164,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
 
        memcpy(iml_img, trans->iml, trans->iml_len);
 
-       iwl_enable_interrupts(trans);
+       iwl_enable_fw_load_int_ctx_info(trans);
 
        /* kick FW self load */
        iwl_write64(trans, CSR_CTXT_INFO_ADDR,
index b2cd7ef5fc3a9ba3b37351745d6fdedfda985cf9..6f25fd1bbd8f40fb30181613715e09b826cd530f 100644 (file)
@@ -206,7 +206,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
 
        trans_pcie->ctxt_info = ctxt_info;
 
-       iwl_enable_interrupts(trans);
+       iwl_enable_fw_load_int_ctx_info(trans);
 
        /* Configure debug, if exists */
        if (trans->dbg_dest_tlv)
index 5d65500a8aa750f51242d09939a814ba957203fb..0982bd99b1c3cfd03aa04ed57210c0aa414b1635 100644 (file)
@@ -601,6 +601,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
@@ -696,34 +697,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+       {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
        {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
index b63d44b7cd7c7be1e6a0a3fe0081edc5ea4820b7..00f9566bcc2136ab588beb05fada23eeea2b60f1 100644 (file)
@@ -896,6 +896,33 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
        }
 }
 
+static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
+
+       if (!trans_pcie->msix_enabled) {
+               /*
+                * When we'll receive the ALIVE interrupt, the ISR will call
+                * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
+                * interrupt (which is not really needed anymore) but also the
+                * RX interrupt which will allow us to receive the ALIVE
+                * notification (which is Rx) and continue the flow.
+                */
+               trans_pcie->inta_mask =  CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
+               iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+       } else {
+               iwl_enable_hw_int_msk_msix(trans,
+                                          MSIX_HW_INT_CAUSES_REG_ALIVE);
+               /*
+                * Leave all the FH causes enabled to get the ALIVE
+                * notification.
+                */
+               iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
+       }
+}
+
 static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
 {
        return index & (q->n_window - 1);
index 6dcd5374d9b4d3b4c0aa60ad15c42428e2d90842..1d144985ea589d7a285c04279a70e5c09b570647 100644 (file)
@@ -1778,26 +1778,26 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                goto out;
        }
 
-       if (iwl_have_debug_level(IWL_DL_ISR)) {
-               /* NIC fires this, but we don't use it, redundant with WAKEUP */
-               if (inta & CSR_INT_BIT_SCD) {
-                       IWL_DEBUG_ISR(trans,
-                                     "Scheduler finished to transmit the frame/frames.\n");
-                       isr_stats->sch++;
-               }
+       /* NIC fires this, but we don't use it, redundant with WAKEUP */
+       if (inta & CSR_INT_BIT_SCD) {
+               IWL_DEBUG_ISR(trans,
+                             "Scheduler finished to transmit the frame/frames.\n");
+               isr_stats->sch++;
+       }
 
-               /* Alive notification via Rx interrupt will do the real work */
-               if (inta & CSR_INT_BIT_ALIVE) {
-                       IWL_DEBUG_ISR(trans, "Alive interrupt\n");
-                       isr_stats->alive++;
-                       if (trans->cfg->gen2) {
-                               /*
-                                * We can restock, since firmware configured
-                                * the RFH
-                                */
-                               iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
-                       }
+       /* Alive notification via Rx interrupt will do the real work */
+       if (inta & CSR_INT_BIT_ALIVE) {
+               IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+               isr_stats->alive++;
+               if (trans->cfg->gen2) {
+                       /*
+                        * We can restock, since firmware configured
+                        * the RFH
+                        */
+                       iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
                }
+
+               handled |= CSR_INT_BIT_ALIVE;
        }
 
        /* Safely ignore these bits for debug checks below */
@@ -1916,6 +1916,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
        /* Re-enable RF_KILL if it occurred */
        else if (handled & CSR_INT_BIT_RF_KILL)
                iwl_enable_rfkill_int(trans);
+       /* Re-enable the ALIVE / Rx interrupt if it occurred */
+       else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
+               iwl_enable_fw_load_int_ctx_info(trans);
        spin_unlock(&trans_pcie->irq_lock);
 
 out:
@@ -2060,10 +2063,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
                return IRQ_NONE;
        }
 
-       if (iwl_have_debug_level(IWL_DL_ISR))
-               IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
-                             inta_fh,
+       if (iwl_have_debug_level(IWL_DL_ISR)) {
+               IWL_DEBUG_ISR(trans,
+                             "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
+                             inta_fh, trans_pcie->fh_mask,
                              iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
+               if (inta_fh & ~trans_pcie->fh_mask)
+                       IWL_DEBUG_ISR(trans,
+                                     "We got a masked interrupt (0x%08x)\n",
+                                     inta_fh & ~trans_pcie->fh_mask);
+       }
+
+       inta_fh &= trans_pcie->fh_mask;
 
        if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
            inta_fh & MSIX_FH_INT_CAUSES_Q0) {
@@ -2103,11 +2114,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
        }
 
        /* After checking FH register check HW register */
-       if (iwl_have_debug_level(IWL_DL_ISR))
+       if (iwl_have_debug_level(IWL_DL_ISR)) {
                IWL_DEBUG_ISR(trans,
-                             "ISR inta_hw 0x%08x, enabled 0x%08x\n",
-                             inta_hw,
+                             "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
+                             inta_hw, trans_pcie->hw_mask,
                              iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
+               if (inta_hw & ~trans_pcie->hw_mask)
+                       IWL_DEBUG_ISR(trans,
+                                     "We got a masked interrupt 0x%08x\n",
+                                     inta_hw & ~trans_pcie->hw_mask);
+       }
+
+       inta_hw &= trans_pcie->hw_mask;
 
        /* Alive notification via Rx interrupt will do the real work */
        if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
index 2bc67219ed3efadd09597a28fe18e39ad5f1d736..31e72e1ff1e267ea5a57780b5b503e94d9b17cdd 100644 (file)
@@ -289,6 +289,15 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
         * paging memory cannot be freed included since FW will still use it
         */
        iwl_pcie_ctxt_info_free(trans);
+
+       /*
+        * Re-enable all the interrupts, including the RF-Kill one, now that
+        * the firmware is alive.
+        */
+       iwl_enable_interrupts(trans);
+       mutex_lock(&trans_pcie->mutex);
+       iwl_pcie_check_hw_rf_kill(trans);
+       mutex_unlock(&trans_pcie->mutex);
 }
 
 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
index 93f0d387688a1314a54a8f17f4c02153dadd802d..42fdb7970cfdcb6608934ddf25c72087628199dd 100644 (file)
@@ -403,6 +403,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
                                         DMA_TO_DEVICE);
        }
 
+       meta->tbs = 0;
+
        if (trans->cfg->use_tfh) {
                struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
 
index 7cd428c0af433a7e3e0ca0cde0825006cea7d0a8..ce2dd06af62e8b987a027d7cb097b08a5147e258 100644 (file)
@@ -3502,10 +3502,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
                hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq, &hwsim_genl_family,
                                  NLM_F_MULTI, HWSIM_CMD_GET_RADIO);
-               if (!hdr)
+               if (hdr) {
+                       genl_dump_check_consistent(cb, hdr);
+                       genlmsg_end(skb, hdr);
+               } else {
                        res = -EMSGSIZE;
-               genl_dump_check_consistent(cb, hdr);
-               genlmsg_end(skb, hdr);
+               }
        }
 
 done:
index 3dbfce972c56b46a91e85cd81cd566c875859519..9e82ec12564bb6128471ca2618ffa56481da887b 100644 (file)
@@ -49,7 +49,8 @@ static const struct lbs_fw_table fw_table[] = {
        { MODEL_8388, "libertas/usb8388_v5.bin", NULL },
        { MODEL_8388, "libertas/usb8388.bin", NULL },
        { MODEL_8388, "usb8388.bin", NULL },
-       { MODEL_8682, "libertas/usb8682.bin", NULL }
+       { MODEL_8682, "libertas/usb8682.bin", NULL },
+       { 0, NULL, NULL }
 };
 
 static const struct usb_device_id if_usb_table[] = {
index 801a2d7b020ac2fee1ca80d570cf6efd9aaf1d19..a3f4a5e92105df46d99ccce441d23ddef87f2ba4 100644 (file)
@@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
                }
 
                vs_ie = (struct ieee_types_header *)vendor_ie;
+               if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
+                       IEEE_MAX_IE_SIZE)
+                       return -EINVAL;
                memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
                       vs_ie, vs_ie->len + 2);
                le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
index b025ba164412813f6b675b9f096969898c940af2..e39bb5c42c9a540c07ea475d8557cbb7383cf7ff 100644 (file)
@@ -124,6 +124,7 @@ enum {
 
 #define MWIFIEX_MAX_TOTAL_SCAN_TIME    (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
 
+#define WPA_GTK_OUI_OFFSET                             2
 #define RSN_GTK_OUI_OFFSET                             2
 
 #define MWIFIEX_OUI_NOT_PRESENT                        0
index 6dd771ce68a356f24d695954af79d985b31450fa..ed27147efcb37b9d8318b2aaac2d8620ac7adbd6 100644 (file)
@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
        u8 ret = MWIFIEX_OUI_NOT_PRESENT;
 
        if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
-               iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
+               iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
+                                           WPA_GTK_OUI_OFFSET);
                oui = &mwifiex_wpa_oui[cipher][0];
                ret = mwifiex_search_oui_in_ie(iebody, oui);
                if (ret)
index 18f7d9bf30b28edc19acef376675b15d726bcc00..0939a8c8f3ab5cf74a55ae08c3feadc1c486f3ca 100644 (file)
@@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
 
        rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
        if (rate_ie) {
+               if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
+                       return;
                memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
                rate_len = rate_ie->len;
        }
@@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
        rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
                                           params->beacon.tail,
                                           params->beacon.tail_len);
-       if (rate_ie)
+       if (rate_ie) {
+               if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
+                       return;
                memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
+       }
 
        return;
 }
@@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
                                            params->beacon.tail_len);
        if (vendor_ie) {
                wmm_ie = vendor_ie;
+               if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
+                       return;
                memcpy(&bss_cfg->wmm_info, wmm_ie +
                       sizeof(struct ieee_types_header), *(wmm_ie + 1));
                priv->wmm_enabled = 1;
index 0a3e046d78db376d91fe99b603ccffe336e3a51a..da2ba51dec352d498422656e784664a894b81a83 100644 (file)
@@ -369,7 +369,7 @@ static void mt76x0_stop_hardware(struct mt76x0_dev *dev)
        mt76x0_chip_onoff(dev, false, false);
 }
 
-int mt76x0_init_hardware(struct mt76x0_dev *dev)
+int mt76x0_init_hardware(struct mt76x0_dev *dev, bool reset)
 {
        static const u16 beacon_offsets[16] = {
                /* 512 byte per beacon */
@@ -382,7 +382,7 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev)
 
        dev->beacon_offsets = beacon_offsets;
 
-       mt76x0_chip_onoff(dev, true, true);
+       mt76x0_chip_onoff(dev, true, reset);
 
        ret = mt76x0_wait_asic_ready(dev);
        if (ret)
index fc9857f61771ccbc9d712c94402c455f498b8853..f9dfe5097b099cf27fca0e27c3ae8e0e17e322fc 100644 (file)
@@ -279,7 +279,7 @@ void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr);
 
 /* Init */
 struct mt76x0_dev *mt76x0_alloc_device(struct device *dev);
-int mt76x0_init_hardware(struct mt76x0_dev *dev);
+int mt76x0_init_hardware(struct mt76x0_dev *dev, bool reset);
 int mt76x0_register_device(struct mt76x0_dev *dev);
 void mt76x0_cleanup(struct mt76x0_dev *dev);
 void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
index 54ae1f113be23dd51b1ab7fafb79bd3d991af893..5aacb1f6a841d0720b5372112b2f3b325b598597 100644 (file)
@@ -300,7 +300,7 @@ static int mt76x0_probe(struct usb_interface *usb_intf,
        if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
                dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n");
 
-       ret = mt76x0_init_hardware(dev);
+       ret = mt76x0_init_hardware(dev, true);
        if (ret)
                goto err;
 
@@ -354,7 +354,7 @@ static int mt76x0_resume(struct usb_interface *usb_intf)
        struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
        int ret;
 
-       ret = mt76x0_init_hardware(dev);
+       ret = mt76x0_init_hardware(dev, false);
        if (ret)
                return ret;
 
index 6542644bc3259a6e12cff1d05242895eb81adaf3..cec31f0c3017b1b65084c4b9569a73e0514d45e7 100644 (file)
@@ -402,7 +402,7 @@ void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
                ccmp_pn[6] = pn >> 32;
                ccmp_pn[7] = pn >> 40;
                txwi->iv = *((__le32 *)&ccmp_pn[0]);
-               txwi->eiv = *((__le32 *)&ccmp_pn[1]);
+               txwi->eiv = *((__le32 *)&ccmp_pn[4]);
        }
 
        spin_lock_bh(&dev->mt76.lock);
index 7f3e3983b781d72af83b0fc47006b1a2ff34078c..47cebb2ec05c5a3939771b2c041e5c74ac8ca3f9 100644 (file)
@@ -193,10 +193,23 @@ static void mt7601u_complete_rx(struct urb *urb)
        struct mt7601u_rx_queue *q = &dev->rx_q;
        unsigned long flags;
 
-       spin_lock_irqsave(&dev->rx_lock, flags);
+       /* do no schedule rx tasklet if urb has been unlinked
+        * or the device has been removed
+        */
+       switch (urb->status) {
+       case -ECONNRESET:
+       case -ESHUTDOWN:
+       case -ENOENT:
+               return;
+       default:
+               dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
+                                   urb->status);
+               /* fall through */
+       case 0:
+               break;
+       }
 
-       if (mt7601u_urb_has_error(urb))
-               dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
+       spin_lock_irqsave(&dev->rx_lock, flags);
        if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
                goto out;
 
@@ -228,14 +241,25 @@ static void mt7601u_complete_tx(struct urb *urb)
        struct sk_buff *skb;
        unsigned long flags;
 
-       spin_lock_irqsave(&dev->tx_lock, flags);
+       switch (urb->status) {
+       case -ECONNRESET:
+       case -ESHUTDOWN:
+       case -ENOENT:
+               return;
+       default:
+               dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
+                                   urb->status);
+               /* fall through */
+       case 0:
+               break;
+       }
 
-       if (mt7601u_urb_has_error(urb))
-               dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
+       spin_lock_irqsave(&dev->tx_lock, flags);
        if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
                goto out;
 
        skb = q->e[q->start].skb;
+       q->e[q->start].skb = NULL;
        trace_mt_tx_dma_done(dev, skb);
 
        __skb_queue_tail(&dev->tx_skb_done, skb);
@@ -363,19 +387,9 @@ int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
 static void mt7601u_kill_rx(struct mt7601u_dev *dev)
 {
        int i;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->rx_lock, flags);
-
-       for (i = 0; i < dev->rx_q.entries; i++) {
-               int next = dev->rx_q.end;
 
-               spin_unlock_irqrestore(&dev->rx_lock, flags);
-               usb_poison_urb(dev->rx_q.e[next].urb);
-               spin_lock_irqsave(&dev->rx_lock, flags);
-       }
-
-       spin_unlock_irqrestore(&dev->rx_lock, flags);
+       for (i = 0; i < dev->rx_q.entries; i++)
+               usb_poison_urb(dev->rx_q.e[i].urb);
 }
 
 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
@@ -445,10 +459,10 @@ static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
 {
        int i;
 
-       WARN_ON(q->used);
-
        for (i = 0; i < q->entries; i++)  {
                usb_poison_urb(q->e[i].urb);
+               if (q->e[i].skb)
+                       mt7601u_tx_status(q->dev, q->e[i].skb);
                usb_free_urb(q->e[i].urb);
        }
 }
index 3600e911a63e85d23ecc15f238c42db66865472d..4d81c45722fbb756dc3c91fa8559772dc8738fda 100644 (file)
@@ -117,9 +117,9 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
        info->status.rates[0].idx = -1;
        info->flags |= IEEE80211_TX_STAT_ACK;
 
-       spin_lock(&dev->mac_lock);
+       spin_lock_bh(&dev->mac_lock);
        ieee80211_tx_status(dev->hw, skb);
-       spin_unlock(&dev->mac_lock);
+       spin_unlock_bh(&dev->mac_lock);
 }
 
 static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
index 2ac5004d7a401ab5d1255126c5c0a00a5e233705..5adb939afee8832435c6ed07b408fa23ac8cd65c 100644 (file)
@@ -1081,13 +1081,13 @@ int rtl_usb_probe(struct usb_interface *intf,
        rtlpriv->cfg->ops->read_eeprom_info(hw);
        err = _rtl_usb_init(hw);
        if (err)
-               goto error_out;
+               goto error_out2;
        rtl_usb_init_sw(hw);
        /* Init mac80211 sw */
        err = rtl_init_core(hw);
        if (err) {
                pr_err("Can't allocate sw for mac80211\n");
-               goto error_out;
+               goto error_out2;
        }
        if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
                pr_err("Can't init_sw_vars\n");
@@ -1108,6 +1108,7 @@ int rtl_usb_probe(struct usb_interface *intf,
 
 error_out:
        rtl_deinit_core(hw);
+error_out2:
        _rtl_usb_io_handler_release(hw);
        usb_put_dev(udev);
        complete(&rtlpriv->firmware_loading_complete);
index f360690396dd02a536ac7664a2c477028b3c2524..14e56bee05484c33bc88db5f9ecfdf533684487a 100644 (file)
@@ -643,7 +643,6 @@ fail_rx:
        kfree(rsi_dev->tx_buffer);
 
 fail_eps:
-       kfree(rsi_dev);
 
        return status;
 }
index 82add0ac4a5f67590b9d39d4668a5bb747a38fb4..27b6b141cb71f3be8c3f64d6fd41387b9c70bbe8 100644 (file)
@@ -718,7 +718,6 @@ err_unmap:
        xenvif_unmap_frontend_data_rings(queue);
        netif_napi_del(&queue->napi);
 err:
-       module_put(THIS_MODULE);
        return err;
 }
 
index d5081ffdc8f035a5ae3f7b1ea7aef53b521b69e3..1c849106b7935274e79390fb7d2f748166b7d12d 100644 (file)
@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
                        nskb = xenvif_alloc_skb(0);
                        if (unlikely(nskb == NULL)) {
+                               skb_shinfo(skb)->nr_frags = 0;
                                kfree_skb(skb);
                                xenvif_tx_err(queue, &txreq, extra_count, idx);
                                if (net_ratelimit())
@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
                        if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
                                /* Failure in xenvif_set_skb_gso is fatal. */
+                               skb_shinfo(skb)->nr_frags = 0;
                                kfree_skb(skb);
                                kfree_skb(nskb);
                                break;
index 5b97cc946d70a695cb68e1f3e2f93328c29d8bc2..6b4675a9494b2c3b8471c33a9da37278de418b74 100644 (file)
@@ -890,9 +890,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
        return 0;
 }
 
-static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
-                                 struct sk_buff *skb,
-                                 struct sk_buff_head *list)
+static int xennet_fill_frags(struct netfront_queue *queue,
+                            struct sk_buff *skb,
+                            struct sk_buff_head *list)
 {
        RING_IDX cons = queue->rx.rsp_cons;
        struct sk_buff *nskb;
@@ -909,9 +909,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
                if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
-                       queue->rx.rsp_cons = ++cons;
+                       queue->rx.rsp_cons = ++cons + skb_queue_len(list);
                        kfree_skb(nskb);
-                       return ~0U;
+                       return -ENOENT;
                }
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
@@ -922,7 +922,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                kfree_skb(nskb);
        }
 
-       return cons;
+       queue->rx.rsp_cons = cons;
+
+       return 0;
 }
 
 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
@@ -1048,8 +1050,7 @@ err:
                skb->data_len = rx->status;
                skb->len += rx->status;
 
-               i = xennet_fill_frags(queue, skb, &tmpq);
-               if (unlikely(i == ~0U))
+               if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
                        goto err;
 
                if (rx->flags & XEN_NETRXF_csum_blank)
@@ -1059,7 +1060,7 @@ err:
 
                __skb_queue_tail(&rxq, skb);
 
-               queue->rx.rsp_cons = ++i;
+               i = ++queue->rx.rsp_cons;
                work_done++;
        }
 
index e65d027b91fafbbd752970cff0afdc9e8cfb0d7c..529be35ac1782a62e9f18b71905fa597ea52a222 100644 (file)
@@ -244,7 +244,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
        /* Reset possible fault of previous session */
        clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
 
-       if (priv->config.reset_n_io) {
+       if (gpio_is_valid(priv->config.reset_n_io)) {
                nfc_info(priv->dev, "reset the chip\n");
                gpio_set_value(priv->config.reset_n_io, 0);
                usleep_range(5000, 10000);
@@ -255,7 +255,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
 
 void nfcmrvl_chip_halt(struct nfcmrvl_private *priv)
 {
-       if (priv->config.reset_n_io)
+       if (gpio_is_valid(priv->config.reset_n_io))
                gpio_set_value(priv->config.reset_n_io, 0);
 }
 
index 9a22056e8d9eea921b59a588318be8c47b6b1046..e5a622ce4b9517d299170f20d35bde2bd5e003da 100644 (file)
@@ -26,7 +26,7 @@
 static unsigned int hci_muxed;
 static unsigned int flow_control;
 static unsigned int break_control;
-static unsigned int reset_n_io;
+static int reset_n_io = -EINVAL;
 
 /*
 ** NFCMRVL NCI OPS
@@ -231,5 +231,5 @@ MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal.");
 module_param(hci_muxed, uint, 0);
 MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
 
-module_param(reset_n_io, uint, 0);
+module_param(reset_n_io, int, 0);
 MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
index 945cc903d8f1123fd63a13c42564a3dfe7465a12..888e298f610b8ed7140c85c5d0355d55007b6cfc 100644 (file)
@@ -305,6 +305,7 @@ static int nfcmrvl_probe(struct usb_interface *intf,
 
        /* No configuration for USB */
        memset(&config, 0, sizeof(config));
+       config.reset_n_io = -EINVAL;
 
        nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
 
index f55d082ace71558c8bf23d1813d70da18c9c5a0d..5d6e7e931bc6cd06bc162a2672acba679d60d782 100644 (file)
@@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
 
                transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
                                            skb->len - 2, GFP_KERNEL);
+               if (!transaction)
+                       return -ENOMEM;
 
                transaction->aid_len = skb->data[1];
                memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
index 4bed9e842db38126859d74d4d585dee66ea80d33..fd967a38a94a5da4c4687fd9d058120887c3f8d7 100644 (file)
@@ -328,6 +328,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
 
                transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
                                                   skb->len - 2, GFP_KERNEL);
+               if (!transaction)
+                       return -ENOMEM;
 
                transaction->aid_len = skb->data[1];
                memcpy(transaction->aid, &skb->data[2],
index e51b581fd102e8db1f0b0a0ed597f789a4ba9929..623d43a2bec004f79528364b6b58622e02995869 100644 (file)
@@ -1,4 +1,5 @@
 source "drivers/ntb/hw/amd/Kconfig"
 source "drivers/ntb/hw/idt/Kconfig"
 source "drivers/ntb/hw/intel/Kconfig"
+source "drivers/ntb/hw/epf/Kconfig"
 source "drivers/ntb/hw/mscc/Kconfig"
index 923c442db750a16caa3e1bad74cc168ecb9fcfc0..48f672ca857a5ec2c217d6fe2718d63246579f1c 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_NTB_AMD)  += amd/
 obj-$(CONFIG_NTB_IDT)  += idt/
 obj-$(CONFIG_NTB_INTEL)        += intel/
+obj-$(CONFIG_NTB_EPF)  += epf/
 obj-$(CONFIG_NTB_SWITCHTEC) += mscc/
diff --git a/drivers/ntb/hw/epf/Kconfig b/drivers/ntb/hw/epf/Kconfig
new file mode 100644 (file)
index 0000000..3144855
--- /dev/null
@@ -0,0 +1,5 @@
+config NTB_EPF
+       tristate "Generic EPF Non-Transparent Bridge support"
+       help
+         This driver supports EPF NTB on configurable endpoint.
+         If unsure, say N.
diff --git a/drivers/ntb/hw/epf/Makefile b/drivers/ntb/hw/epf/Makefile
new file mode 100644 (file)
index 0000000..2f560a4
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_EPF) += ntb_hw_epf.o
diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c
new file mode 100644 (file)
index 0000000..0792b9e
--- /dev/null
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Host side endpoint driver to implement Non-Transparent Bridge functionality
+ *
+ * Copyright (C) 2019 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/ntb.h>
+
+#define NTB_EPF_COMMAND                0x0
+#define CMD_CONFIGURE_DOORBELL 1
+#define CMD_CONFIGURE_MW       2
+#define CMD_LINK_UP            3
+
+#define NTB_EPF_ARGUMENT       0x4
+
+#define NTB_EPF_STATUS         0x8
+#define COMMAND_STATUS_OK      BIT(0)
+#define COMMAND_STATUS_ERROR   BIT(1)
+#define LINK_STATUS_UP         BIT(2)
+
+#define NTB_EPF_TOPOLOGY       0xc
+#define NTB_EPF_ADDR           0x10
+#define NTB_EPF_SIZE           0x18
+#define NTB_EPF_MW1_OFFSET     0x1c
+#define NTB_EPF_MW_COUNT       0x20
+#define NTB_EPF_SPAD_OFFSET    0x24
+#define NTB_EPF_SPAD_COUNT     0x28
+#define NTB_EPF_DB_ENTRY_SIZE  0x2c
+#define NTB_EPF_DB_DATA(n)     (0x30 + (n) * 4)
+
+#define NTB_MIN_DB_COUNT       2
+#define NTB_MAX_DB_COUNT       32
+#define NTB_MW_OFFSET          2
+
+enum pci_barno {
+       BAR_0,
+       BAR_1,
+       BAR_2,
+       BAR_3,
+       BAR_4,
+       BAR_5,
+};
+
+struct ntb_epf_dev {
+       struct ntb_dev ntb;
+
+       enum pci_barno ctrl_reg_bar;
+       enum pci_barno peer_spad_reg_bar;
+       enum pci_barno db_reg_bar;
+
+       unsigned int mw_count;
+       unsigned int spad_count;
+       unsigned int db_count;
+
+       void __iomem *ctrl_reg;
+       void __iomem *db_reg;
+       void __iomem *peer_spad_reg;
+
+       unsigned int self_spad;
+       unsigned int peer_spad;
+
+       int db_val;
+       u64 db_valid_mask;
+};
+
+#define ntb_ndev(__ntb) container_of(__ntb, struct ntb_epf_dev, ntb)
+
+struct ntb_epf_data {
+       /* BAR that contains both control region and self spad region */
+       enum pci_barno ctrl_reg_bar;
+       /* BAR that contains peer spad region */
+       enum pci_barno peer_spad_reg_bar;
+       /* BAR that contains Doorbell region and Memory window '1' */
+       enum pci_barno db_reg_bar;
+};
+
+static inline u32 ntb_epf_ctrl_readl(struct ntb_epf_dev *ndev, u32 offset)
+{
+       return readl(ndev->ctrl_reg + offset);
+}
+
+static inline void ntb_epf_ctrl_writel(struct ntb_epf_dev *ndev, u32 offset,
+                                      u32 val)
+{
+       return writel(val, ndev->ctrl_reg + offset);
+}
+
+static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command,
+                               u32 argument)
+{
+       ktime_t timeout;
+       bool timedout;
+       int ret = 0;
+       u32 status;
+
+       ntb_epf_ctrl_writel(ndev, NTB_EPF_ARGUMENT, argument);
+       ntb_epf_ctrl_writel(ndev, NTB_EPF_COMMAND, command);
+
+       /* wait 50ms */
+       timeout = ktime_add_ms(ktime_get(), 50);
+       while (1) {
+               timedout = ktime_after(ktime_get(), timeout);
+               status = ntb_epf_ctrl_readl(ndev, NTB_EPF_STATUS);
+
+               if (status & COMMAND_STATUS_ERROR) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (status & COMMAND_STATUS_OK)
+                       break;
+
+               if (WARN_ON(timedout)) {
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+
+               usleep_range(5, 10);
+       }
+
+       status &= ~(COMMAND_STATUS_ERROR | COMMAND_STATUS_OK);
+       ntb_epf_ctrl_writel(ndev, NTB_EPF_STATUS, status);
+
+       return ret;
+}
+
+static int ntb_epf_mw_to_bar(struct ntb_epf_dev *ndev, int idx)
+{
+       if (idx < 0 || idx > ndev->mw_count)
+               return -EINVAL;
+
+       return idx + 2;
+}
+
+static int ntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
+{
+       if (pidx != NTB_DEF_PEER_IDX)
+               return -EINVAL;
+
+       return ntb_ndev(ntb)->mw_count;
+}
+
+static int ntb_epf_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
+                               resource_size_t *addr_align,
+                               resource_size_t *size_align,
+                               resource_size_t *size_max)
+{
+       struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+       int bar;
+
+       if (pidx != NTB_DEF_PEER_IDX)
+               return -EINVAL;
+
+       bar = ntb_epf_mw_to_bar(ndev, idx);
+       if (bar < 0)
+               return bar;
+
+       if (addr_align)
+               *addr_align = SZ_4K;
+
+       if (size_align)
+               *size_align = 1;
+
+       if (size_max)
+               *size_max = pci_resource_len(ndev->ntb.pdev, bar);
+
+       return 0;
+}
+
+static u64 ntb_epf_link_is_up(struct ntb_dev *ntb,
+                             enum ntb_speed *speed,
+                             enum ntb_width *width)
+{
+       struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+       u32 status;
+
+       status = ntb_epf_ctrl_readl(ndev, NTB_EPF_STATUS);
+
+       return !!(status & LINK_STATUS_UP);
+}
+
+static u32 ntb_epf_spad_read(struct ntb_dev *ntb, int idx)
+{
+       struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+       u32 offset;
+
+       if (idx < 0 || idx >= ndev->spad_count)
+               return 0;
+
+       offset = ntb_epf_ctrl_readl(ndev, NTB_EPF_SPAD_OFFSET);
+       offset += (idx << 2);
+
+       return ntb_epf_ctrl_readl(ndev, offset);
+}
+
+static int ntb_epf_spad_write(struct ntb_dev *ntb,
+                             int idx, u32 val)
+{
+       struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+       u32 offset;
+
+       if (idx < 0 || idx >= ndev->spad_count)
+               return -EINVAL;
+
+       offset = ntb_epf_ctrl_readl(ndev, NTB_EPF_SPAD_OFFSET);
+       offset += (idx << 2);
+       ntb_epf_ctrl_writel(ndev, offset, val);
+
+       return 0;
+}
+
+static u32 ntb_epf_peer_spad_read(struct ntb_dev *ntb, int pidx, int idx)
+{
+       struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+       u32 offset;
+
+       if (idx < 0 || idx >= ndev->spad_count)
+               return -EINVAL;
+
+       offset = (idx << 2);
+       return readl(ndev->peer_spad_reg + offset);
+}
+
+static int ntb_epf_peer_spad_write(struct ntb_dev *ntb, int pidx,
+                                  int idx, u32 val)
+{
+       struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+       u32 offset;
+
+       if (idx < 0 || idx >= ndev->spad_count)
+               return -EINVAL;
+
+       offset = (idx << 2);
+       writel(val, ndev->peer_spad_reg + offset);
+
+       return 0;
+}
+
+static int ntb_epf_link_enable(struct ntb_dev *ntb,
+                              enum ntb_speed max_speed,
+                              enum ntb_width max_width)
+{
+       struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+       struct device *dev = &ntb->pdev->dev;
+       int ret;
+
+       ret = ntb_epf_send_command(ndev, CMD_LINK_UP, 0);
+       if (ret) {
+               dev_err(dev, "Fail to enable link\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int ntb_epf_link_disable(struct ntb_dev *ntb)
+{
+       return 0;
+}
+
+static irqreturn_t ndev_vec_isr(int irq, void *dev)
+{
+       struct ntb_epf_dev *ndev = dev;
+       int irq_no;
+
+       irq_no = irq - ndev->ntb.pdev->irq;
+       ndev->db_val = irq_no + 1;
+
+       if (irq_no == 0)
+               ntb_link_event(&ndev->ntb);
+       else
+               ntb_db_event(&ndev->ntb, irq_no);
+
+       return IRQ_HANDLED;
+}
+
+static int ntb_epf_init_isr(struct ntb_epf_dev *ndev, int msi_min, int msi_max)
+{
+       struct pci_dev *pdev = ndev->ntb.pdev;
+       struct device *dev = &pdev->dev;
+       int irq;
+       int ret;
+       int i;
+
+       irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max, PCI_IRQ_MSI);
+       if (irq < 0) {
+               dev_err(dev, "Failed to get MSI interrupts\n");
+               return irq;
+       }
+
+       for (i = 0; i < irq; i++) {
+               ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+                                      ndev_vec_isr, IRQF_SHARED, "ntb_epf",
+                                      ndev);
+               if (ret) {
+                       dev_err(dev, "Failed to request irq\n");
+                       goto err_request_irq;
+               }
+       }
+
+       ndev->db_count = irq;
+
+       ret = ntb_epf_send_command(ndev, CMD_CONFIGURE_DOORBELL, irq);
+       if (ret) {
+               dev_err(dev, "Failed to configure doorbell\n");
+               goto err_request_irq;
+       }
+
+       return 0;
+
+err_request_irq:
+       pci_free_irq_vectors(pdev);
+
+       return ret;
+}
+
+static int ntb_epf_peer_mw_count(struct ntb_dev *ntb)
+{
+       return ntb_ndev(ntb)->mw_count;
+}
+
+static int ntb_epf_spad_count(struct ntb_dev *ntb)
+{
+       return ntb_ndev(ntb)->spad_count;
+}
+
+static u64 ntb_epf_db_valid_mask(struct ntb_dev *ntb)
+{
+       return ntb_ndev(ntb)->db_valid_mask;
+}
+
+static int ntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+       return 0;
+}
+
+static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
+                               dma_addr_t addr, resource_size_t size)
+{
+       struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+       struct device *dev = &ntb->pdev->dev;
+       resource_size_t mw_size;
+       int bar;
+
+       if (pidx != NTB_DEF_PEER_IDX)
+               return -EINVAL;
+
+       bar = idx + NTB_MW_OFFSET;
+
+       mw_size = pci_resource_len(ntb->pdev, bar);
+
+       if (size > mw_size) {
+               dev_err(dev, "Size is greater than the MW size\n");
+               return -EINVAL;
+       }
+
+       ntb_epf_ctrl_writel(ndev, NTB_EPF_ADDR, addr);
+       ntb_epf_ctrl_writel(ndev, NTB_EPF_SIZE, size);
+       ntb_epf_send_command(ndev, CMD_CONFIGURE_MW, idx);
+
+       return 0;
+}
+
+static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
+                                   phys_addr_t *base, resource_size_t *size)
+{
+       struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+       u32 offset = 0;
+       int bar;
+
+       if (idx == 0)
+               offset = ntb_epf_ctrl_readl(ndev, NTB_EPF_MW1_OFFSET);
+
+       bar = idx + NTB_MW_OFFSET;
+
+       if (base)
+               *base = pci_resource_start(ndev->ntb.pdev, bar) + offset;
+
+       if (size)
+               *size = pci_resource_len(ndev->ntb.pdev, bar) - offset;
+
+       return 0;
+}
+
+static int ntb_epf_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+       struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+       u32 interrupt_num = ffs(db_bits) + 1;
+       u32 db_entry_size;
+       u32 db_data;
+
+       if (interrupt_num > ndev->db_count)
+               return -EINVAL;
+
+       db_entry_size = ntb_epf_ctrl_readl(ndev, NTB_EPF_DB_ENTRY_SIZE);
+
+       db_data = readl(ndev->ctrl_reg + NTB_EPF_DB_DATA(interrupt_num));
+       writel(db_data, ndev->db_reg + (db_entry_size * interrupt_num));
+
+       return 0;
+}
+
+static u64 ntb_epf_db_read(struct ntb_dev *ntb)
+{
+       struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+
+       return ndev->db_val;
+}
+
+static int ntb_epf_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+       return 0;
+}
+
+static int ntb_epf_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+       struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+
+       ndev->db_val = 0;
+
+       return 0;
+}
+
+static const struct ntb_dev_ops ntb_epf_ops = {
+       .mw_count               = ntb_epf_mw_count,
+       .spad_count             = ntb_epf_spad_count,
+       .peer_mw_count          = ntb_epf_peer_mw_count,
+       .db_valid_mask          = ntb_epf_db_valid_mask,
+       .db_set_mask            = ntb_epf_db_set_mask,
+       .mw_set_trans           = ntb_epf_mw_set_trans,
+       .peer_mw_get_addr       = ntb_epf_peer_mw_get_addr,
+       .link_enable            = ntb_epf_link_enable,
+       .spad_read              = ntb_epf_spad_read,
+       .spad_write             = ntb_epf_spad_write,
+       .peer_spad_read         = ntb_epf_peer_spad_read,
+       .peer_spad_write        = ntb_epf_peer_spad_write,
+       .peer_db_set            = ntb_epf_peer_db_set,
+       .db_read                = ntb_epf_db_read,
+       .mw_get_align           = ntb_epf_mw_get_align,
+       .link_is_up             = ntb_epf_link_is_up,
+       .db_clear_mask          = ntb_epf_db_clear_mask,
+       .db_clear               = ntb_epf_db_clear,
+       .link_disable           = ntb_epf_link_disable,
+};
+
+static inline void ntb_epf_init_struct(struct ntb_epf_dev *ndev,
+                                      struct pci_dev *pdev)
+{
+       ndev->ntb.pdev = pdev;
+       ndev->ntb.topo = NTB_TOPO_NONE;
+       ndev->ntb.ops = &ntb_epf_ops;
+}
+
+static int ntb_epf_init_dev(struct ntb_epf_dev *ndev)
+{
+       struct pci_dev *pdev = ndev->ntb.pdev;
+       struct device *dev = &pdev->dev;
+       int ret;
+
+       ret = ntb_epf_init_isr(ndev, NTB_MIN_DB_COUNT, NTB_MAX_DB_COUNT);
+       if (ret) {
+               dev_err(dev, "Failed to init ISR\n");
+               return ret;
+       }
+
+       ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+       ndev->mw_count = ntb_epf_ctrl_readl(ndev, NTB_EPF_MW_COUNT);
+       ndev->spad_count = ntb_epf_ctrl_readl(ndev, NTB_EPF_SPAD_COUNT);
+
+       return 0;
+}
+
+static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
+                           struct pci_dev *pdev)
+{
+       struct device *dev = &pdev->dev;
+       int ret;
+
+       pci_set_drvdata(pdev, ndev);
+
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               dev_err(dev, "Cannot enable PCI device\n");
+               goto err_pci_enable;
+       }
+
+       ret = pci_request_regions(pdev, "ntb");
+       if (ret) {
+               dev_err(dev, "Cannot obtain PCI resources\n");
+               goto err_pci_regions;
+       }
+
+       pci_set_master(pdev);
+
+       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+       if (ret) {
+               ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+               if (ret) {
+                       dev_err(dev, "Cannot set DMA mask\n");
+                       goto err_dma_mask;
+               }
+               dev_warn(&pdev->dev, "Cannot DMA highmem\n");
+       }
+
+       ret = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
+                                          dma_get_mask(&pdev->dev));
+       if (ret) {
+               dev_err(dev, "Cannot set DMA mask\n");
+               goto err_dma_mask;
+       }
+
+       ndev->ctrl_reg = pci_iomap(pdev, 0, 0);
+       if (!ndev->ctrl_reg) {
+               ret = -EIO;
+               goto err_dma_mask;
+       }
+
+       ndev->peer_spad_reg = pci_iomap(pdev, 1, 0);
+       if (!ndev->peer_spad_reg) {
+               ret = -EIO;
+               goto err_dma_mask;
+       }
+
+       ndev->db_reg = pci_iomap(pdev, 2, 0);
+       if (!ndev->db_reg) {
+               ret = -EIO;
+               goto err_dma_mask;
+       }
+
+       return 0;
+
+err_dma_mask:
+       pci_clear_master(pdev);
+
+err_pci_regions:
+       pci_disable_device(pdev);
+
+err_pci_enable:
+       pci_set_drvdata(pdev, NULL);
+
+       return ret;
+}
+
+static void ntb_epf_deinit_pci(struct ntb_epf_dev *ndev)
+{
+       struct pci_dev *pdev = ndev->ntb.pdev;
+
+       pci_iounmap(pdev, ndev->ctrl_reg);
+       pci_iounmap(pdev, ndev->peer_spad_reg);
+       pci_iounmap(pdev, ndev->db_reg);
+
+       pci_clear_master(pdev);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+}
+
+static int ntb_epf_pci_probe(struct pci_dev *pdev,
+                            const struct pci_device_id *id)
+{
+       enum pci_barno peer_spad_reg_bar = BAR_1;
+       enum pci_barno ctrl_reg_bar = BAR_0;
+       enum pci_barno db_reg_bar = BAR_2;
+       struct device *dev = &pdev->dev;
+       struct ntb_epf_data *data;
+       struct ntb_epf_dev *ndev;
+       int ret;
+
+       if (pci_is_bridge(pdev))
+               return -ENODEV;
+
+       ndev = devm_kzalloc(dev, sizeof(*ndev), GFP_KERNEL);
+       if (!ndev)
+               return -ENOMEM;
+
+       data = (struct ntb_epf_data *)id->driver_data;
+       if (data) {
+               if (data->peer_spad_reg_bar)
+                       peer_spad_reg_bar = data->peer_spad_reg_bar;
+               if (data->ctrl_reg_bar)
+                       ctrl_reg_bar = data->ctrl_reg_bar;
+               if (data->db_reg_bar)
+                       db_reg_bar = data->db_reg_bar;
+       }
+
+       ndev->peer_spad_reg_bar = peer_spad_reg_bar;
+       ndev->ctrl_reg_bar = ctrl_reg_bar;
+       ndev->db_reg_bar = db_reg_bar;
+
+       ntb_epf_init_struct(ndev, pdev);
+
+       ret = ntb_epf_init_pci(ndev, pdev);
+       if (ret) {
+               dev_err(dev, "Failed to init PCI\n");
+               return ret;
+       }
+
+       ret = ntb_epf_init_dev(ndev);
+       if (ret) {
+               dev_err(dev, "Failed to init device\n");
+               goto err_init_dev;
+       }
+
+       ret = ntb_register_device(&ndev->ntb);
+       if (ret) {
+               dev_err(dev, "Failed to register NTB device\n");
+               goto err_register_dev;
+       }
+
+       return 0;
+
+err_register_dev:
+       pci_free_irq_vectors(pdev);
+
+err_init_dev:
+       ntb_epf_deinit_pci(ndev);
+
+       return ret;
+}
+
+static void ntb_epf_pci_remove(struct pci_dev *pdev)
+{
+       struct ntb_epf_dev *ndev = pci_get_drvdata(pdev);
+
+       ntb_unregister_device(&ndev->ntb);
+       pci_free_irq_vectors(pdev);
+       kfree(ndev);
+}
+
+static const struct ntb_epf_data j721e_data = {
+       .ctrl_reg_bar = BAR_0,
+       .peer_spad_reg_bar = BAR_1,
+       .db_reg_bar = BAR_2,
+};
+
+static const struct pci_device_id ntb_epf_pci_tbl[] = {
+       {
+               PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
+               .driver_data = (kernel_ulong_t)&j721e_data,
+       },
+       { },
+};
+MODULE_DEVICE_TABLE(pci, ntb_epf_pci_tbl);
+
+static struct pci_driver ntb_epf_pci_driver = {
+       .name           = KBUILD_MODNAME,
+       .id_table       = ntb_epf_pci_tbl,
+       .probe          = ntb_epf_pci_probe,
+       .remove         = ntb_epf_pci_remove,
+};
+module_pci_driver(ntb_epf_pci_driver);
+
+MODULE_DESCRIPTION("PCI ENDPOINT NTB HOST DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
index 2a9d6b0d1f193257266acd9b86ecfe68949d1eb9..80508da3c8b5cbe9f6f7aaf3ba9a043da1b10a84 100644 (file)
@@ -1373,7 +1373,7 @@ static int perf_setup_peer_mw(struct perf_peer *peer)
        int ret;
 
        /* Get outbound MW parameters and map it */
-       ret = ntb_peer_mw_get_addr(perf->ntb, peer->gidx, &phys_addr,
+       ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
                                   &peer->outbuf_size);
        if (ret)
                return ret;
index d592c0ffbd198f015e2a8920eb82a91c1fc019c0..04138e6a371bae6f62b50eafdf54d22209dd163a 100644 (file)
@@ -1638,6 +1638,7 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
 
        tool_setup_dbgfs(tc);
 
+       ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
        return 0;
 
 err_clear_mws:
index a3132a9eb91c21b2ecbac00d3d38f0efb966c6a0..54a633e8cb5d21ef3d2e6afcede959ce1e1e3b06 100644 (file)
@@ -86,7 +86,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
 {
        nvdimm_bus_lock(&nvdimm_bus->dev);
        if (--nvdimm_bus->probe_active == 0)
-               wake_up(&nvdimm_bus->probe_wait);
+               wake_up(&nvdimm_bus->wait);
        nvdimm_bus_unlock(&nvdimm_bus->dev);
 }
 
@@ -189,7 +189,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
        sector_t sector;
 
        /* make sure device is a region */
-       if (!is_nd_pmem(dev))
+       if (!is_memory(dev))
                return 0;
 
        nd_region = to_nd_region(dev);
@@ -348,7 +348,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
                return NULL;
        INIT_LIST_HEAD(&nvdimm_bus->list);
        INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
-       init_waitqueue_head(&nvdimm_bus->probe_wait);
+       init_waitqueue_head(&nvdimm_bus->wait);
        nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
        mutex_init(&nvdimm_bus->reconfig_mutex);
        badrange_init(&nvdimm_bus->badrange);
@@ -418,6 +418,9 @@ static int nd_bus_remove(struct device *dev)
        list_del_init(&nvdimm_bus->list);
        mutex_unlock(&nvdimm_bus_list_mutex);
 
+       wait_event(nvdimm_bus->wait,
+                       atomic_read(&nvdimm_bus->ioctl_active) == 0);
+
        nd_synchronize();
        device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
 
@@ -525,13 +528,38 @@ EXPORT_SYMBOL(nd_device_register);
 
 void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
 {
+       bool killed;
+
        switch (mode) {
        case ND_ASYNC:
+               /*
+                * In the async case this is being triggered with the
+                * device lock held and the unregistration work needs to
+                * be moved out of line iff this is thread has won the
+                * race to schedule the deletion.
+                */
+               if (!kill_device(dev))
+                       return;
+
                get_device(dev);
                async_schedule_domain(nd_async_device_unregister, dev,
                                &nd_async_domain);
                break;
        case ND_SYNC:
+               /*
+                * In the sync case the device is being unregistered due
+                * to a state change of the parent. Claim the kill state
+                * to synchronize against other unregistration requests,
+                * or otherwise let the async path handle it if the
+                * unregistration was already queued.
+                */
+               device_lock(dev);
+               killed = kill_device(dev);
+               device_unlock(dev);
+
+               if (!killed)
+                       return;
+
                nd_synchronize();
                device_unregister(dev);
                break;
@@ -837,10 +865,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
        do {
                if (nvdimm_bus->probe_active == 0)
                        break;
-               nvdimm_bus_unlock(&nvdimm_bus->dev);
-               wait_event(nvdimm_bus->probe_wait,
+               nvdimm_bus_unlock(dev);
+               device_unlock(dev);
+               wait_event(nvdimm_bus->wait,
                                nvdimm_bus->probe_active == 0);
-               nvdimm_bus_lock(&nvdimm_bus->dev);
+               device_lock(dev);
+               nvdimm_bus_lock(dev);
        } while (true);
 }
 
@@ -923,20 +953,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
                int read_only, unsigned int ioctl_cmd, unsigned long arg)
 {
        struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
-       static char out_env[ND_CMD_MAX_ENVELOPE];
-       static char in_env[ND_CMD_MAX_ENVELOPE];
        const struct nd_cmd_desc *desc = NULL;
        unsigned int cmd = _IOC_NR(ioctl_cmd);
        struct device *dev = &nvdimm_bus->dev;
        void __user *p = (void __user *) arg;
+       char *out_env = NULL, *in_env = NULL;
        const char *cmd_name, *dimm_name;
        u32 in_len = 0, out_len = 0;
        unsigned int func = cmd;
        unsigned long cmd_mask;
        struct nd_cmd_pkg pkg;
        int rc, i, cmd_rc;
+       void *buf = NULL;
        u64 buf_len = 0;
-       void *buf;
 
        if (nvdimm) {
                desc = nd_cmd_dimm_desc(cmd);
@@ -967,7 +996,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
                case ND_CMD_ARS_START:
                case ND_CMD_CLEAR_ERROR:
                case ND_CMD_CALL:
-                       dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
+                       dev_dbg(dev, "'%s' command while read-only.\n",
                                        nvdimm ? nvdimm_cmd_name(cmd)
                                        : nvdimm_bus_cmd_name(cmd));
                        return -EPERM;
@@ -976,6 +1005,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
                }
 
        /* process an input envelope */
+       in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+       if (!in_env)
+               return -ENOMEM;
        for (i = 0; i < desc->in_num; i++) {
                u32 in_size, copy;
 
@@ -983,14 +1015,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
                if (in_size == UINT_MAX) {
                        dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
                                        __func__, dimm_name, cmd_name, i);
-                       return -ENXIO;
+                       rc = -ENXIO;
+                       goto out;
                }
-               if (in_len < sizeof(in_env))
-                       copy = min_t(u32, sizeof(in_env) - in_len, in_size);
+               if (in_len < ND_CMD_MAX_ENVELOPE)
+                       copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
                else
                        copy = 0;
-               if (copy && copy_from_user(&in_env[in_len], p + in_len, copy))
-                       return -EFAULT;
+               if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
+                       rc = -EFAULT;
+                       goto out;
+               }
                in_len += in_size;
        }
 
@@ -1002,6 +1037,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
        }
 
        /* process an output envelope */
+       out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+       if (!out_env) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
        for (i = 0; i < desc->out_num; i++) {
                u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
                                (u32 *) in_env, (u32 *) out_env, 0);
@@ -1010,15 +1051,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
                if (out_size == UINT_MAX) {
                        dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
                                        dimm_name, cmd_name, i);
-                       return -EFAULT;
+                       rc = -EFAULT;
+                       goto out;
                }
-               if (out_len < sizeof(out_env))
-                       copy = min_t(u32, sizeof(out_env) - out_len, out_size);
+               if (out_len < ND_CMD_MAX_ENVELOPE)
+                       copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
                else
                        copy = 0;
                if (copy && copy_from_user(&out_env[out_len],
-                                       p + in_len + out_len, copy))
-                       return -EFAULT;
+                                       p + in_len + out_len, copy)) {
+                       rc = -EFAULT;
+                       goto out;
+               }
                out_len += out_size;
        }
 
@@ -1026,19 +1070,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
        if (buf_len > ND_IOCTL_MAX_BUFLEN) {
                dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
                                cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
-               return -EINVAL;
+               rc = -EINVAL;
+               goto out;
        }
 
        buf = vmalloc(buf_len);
-       if (!buf)
-               return -ENOMEM;
+       if (!buf) {
+               rc = -ENOMEM;
+               goto out;
+       }
 
        if (copy_from_user(buf, p, buf_len)) {
                rc = -EFAULT;
                goto out;
        }
 
-       nvdimm_bus_lock(&nvdimm_bus->dev);
+       device_lock(dev);
+       nvdimm_bus_lock(dev);
        rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
        if (rc)
                goto out_unlock;
@@ -1053,39 +1101,24 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
                nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
                                clear_err->cleared);
        }
-       nvdimm_bus_unlock(&nvdimm_bus->dev);
 
        if (copy_to_user(p, buf, buf_len))
                rc = -EFAULT;
 
-       vfree(buf);
-       return rc;
-
- out_unlock:
-       nvdimm_bus_unlock(&nvdimm_bus->dev);
- out:
+out_unlock:
+       nvdimm_bus_unlock(dev);
+       device_unlock(dev);
+out:
+       kfree(in_env);
+       kfree(out_env);
        vfree(buf);
        return rc;
 }
 
-static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       long id = (long) file->private_data;
-       int rc = -ENXIO, ro;
-       struct nvdimm_bus *nvdimm_bus;
-
-       ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
-       mutex_lock(&nvdimm_bus_list_mutex);
-       list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
-               if (nvdimm_bus->id == id) {
-                       rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg);
-                       break;
-               }
-       }
-       mutex_unlock(&nvdimm_bus_list_mutex);
-
-       return rc;
-}
+enum nd_ioctl_mode {
+       BUS_IOCTL,
+       DIMM_IOCTL,
+};
 
 static int match_dimm(struct device *dev, void *data)
 {
@@ -1100,31 +1133,62 @@ static int match_dimm(struct device *dev, void *data)
        return 0;
 }
 
-static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
+               enum nd_ioctl_mode mode)
+
 {
-       int rc = -ENXIO, ro;
-       struct nvdimm_bus *nvdimm_bus;
+       struct nvdimm_bus *nvdimm_bus, *found = NULL;
+       long id = (long) file->private_data;
+       struct nvdimm *nvdimm = NULL;
+       int rc, ro;
 
        ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
        mutex_lock(&nvdimm_bus_list_mutex);
        list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
-               struct device *dev = device_find_child(&nvdimm_bus->dev,
-                               file->private_data, match_dimm);
-               struct nvdimm *nvdimm;
-
-               if (!dev)
-                       continue;
+               if (mode == DIMM_IOCTL) {
+                       struct device *dev;
+
+                       dev = device_find_child(&nvdimm_bus->dev,
+                                       file->private_data, match_dimm);
+                       if (!dev)
+                               continue;
+                       nvdimm = to_nvdimm(dev);
+                       found = nvdimm_bus;
+               } else if (nvdimm_bus->id == id) {
+                       found = nvdimm_bus;
+               }
 
-               nvdimm = to_nvdimm(dev);
-               rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
-               put_device(dev);
-               break;
+               if (found) {
+                       atomic_inc(&nvdimm_bus->ioctl_active);
+                       break;
+               }
        }
        mutex_unlock(&nvdimm_bus_list_mutex);
 
+       if (!found)
+               return -ENXIO;
+
+       nvdimm_bus = found;
+       rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
+
+       if (nvdimm)
+               put_device(&nvdimm->dev);
+       if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
+               wake_up(&nvdimm_bus->wait);
+
        return rc;
 }
 
+static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       return nd_ioctl(file, cmd, arg, BUS_IOCTL);
+}
+
+static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       return nd_ioctl(file, cmd, arg, DIMM_IOCTL);
+}
+
 static int nd_open(struct inode *inode, struct file *file)
 {
        long minor = iminor(inode);
@@ -1136,16 +1200,16 @@ static int nd_open(struct inode *inode, struct file *file)
 static const struct file_operations nvdimm_bus_fops = {
        .owner = THIS_MODULE,
        .open = nd_open,
-       .unlocked_ioctl = nd_ioctl,
-       .compat_ioctl = nd_ioctl,
+       .unlocked_ioctl = bus_ioctl,
+       .compat_ioctl = bus_ioctl,
        .llseek = noop_llseek,
 };
 
 static const struct file_operations nvdimm_fops = {
        .owner = THIS_MODULE,
        .open = nd_open,
-       .unlocked_ioctl = nvdimm_ioctl,
-       .compat_ioctl = nvdimm_ioctl,
+       .unlocked_ioctl = dimm_ioctl,
+       .compat_ioctl = dimm_ioctl,
        .llseek = noop_llseek,
 };
 
index 0453f49dc70814f35d2e0988f46304777f3e2559..326f02ffca81f7ce638d60f3935469170c959594 100644 (file)
@@ -126,7 +126,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
        nvdimm_bus_unlock(&ndns->dev);
        if (!dax_dev)
                return -ENOMEM;
-       pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+       pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
        nd_pfn->pfn_sb = pfn_sb;
        rc = nd_pfn_validate(nd_pfn, DAX_SIG);
        dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
index 5ff254dc9b14fb52bc3c9b2dcc6d4d263e900d9c..adf62a6c0fe277cbd4cba1280490259e1031e49d 100644 (file)
@@ -25,10 +25,11 @@ extern int nvdimm_major;
 
 struct nvdimm_bus {
        struct nvdimm_bus_descriptor *nd_desc;
-       wait_queue_head_t probe_wait;
+       wait_queue_head_t wait;
        struct list_head list;
        struct device dev;
        int id, probe_active;
+       atomic_t ioctl_active;
        struct list_head mapping_list;
        struct mutex reconfig_mutex;
        struct badrange badrange;
index dde9853453d3c622c42511772ff807bcf1fd70d0..e901e3a3b04c9920007609f64443a8895bb2dca4 100644 (file)
@@ -36,6 +36,7 @@ struct nd_pfn_sb {
        __le32 end_trunc;
        /* minor-version-2 record the base alignment of the mapping */
        __le32 align;
+       /* minor-version-3 guarantee the padding and flags are zero */
        u8 padding[4000];
        __le64 checksum;
 };
index 3ee995a3bfc9bd9327cddc716787368566cb8377..86ed09b2a1929a52dcd16e0a6d85216331cf9b9f 100644 (file)
@@ -361,6 +361,15 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
        return dev;
 }
 
+/**
+ * nd_pfn_validate - read and validate info-block
+ * @nd_pfn: fsdax namespace runtime state / properties
+ * @sig: 'devdax' or 'fsdax' signature
+ *
+ * Upon return the info-block buffer contents (->pfn_sb) are
+ * indeterminate when validation fails, and a coherent info-block
+ * otherwise.
+ */
 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 {
        u64 checksum, offset;
@@ -506,7 +515,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
        nvdimm_bus_unlock(&ndns->dev);
        if (!pfn_dev)
                return -ENOMEM;
-       pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+       pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
        nd_pfn = to_nd_pfn(pfn_dev);
        nd_pfn->pfn_sb = pfn_sb;
        rc = nd_pfn_validate(nd_pfn, PFN_SIG);
@@ -638,7 +647,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        u64 checksum;
        int rc;
 
-       pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
+       pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
        if (!pfn_sb)
                return -ENOMEM;
 
@@ -647,11 +656,14 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
                sig = DAX_SIG;
        else
                sig = PFN_SIG;
+
        rc = nd_pfn_validate(nd_pfn, sig);
        if (rc != -ENODEV)
                return rc;
 
        /* no info block, do init */;
+       memset(pfn_sb, 0, sizeof(*pfn_sb));
+
        nd_region = to_nd_region(nd_pfn->dev.parent);
        if (nd_region->ro) {
                dev_info(&nd_pfn->dev,
@@ -705,7 +717,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
        memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
        pfn_sb->version_major = cpu_to_le16(1);
-       pfn_sb->version_minor = cpu_to_le16(2);
+       pfn_sb->version_minor = cpu_to_le16(3);
        pfn_sb->start_pad = cpu_to_le32(start_pad);
        pfn_sb->end_trunc = cpu_to_le32(end_trunc);
        pfn_sb->align = cpu_to_le32(nd_pfn->align);
index b9ca0033cc9996b295fe28e96369531ef87ab20a..22224b21c34df51368a1522c3b132335cc66aa29 100644 (file)
@@ -42,18 +42,7 @@ static int nd_region_probe(struct device *dev)
        if (rc)
                return rc;
 
-       rc = nd_region_register_namespaces(nd_region, &err);
-       if (rc < 0)
-               return rc;
-
-       ndrd = dev_get_drvdata(dev);
-       ndrd->ns_active = rc;
-       ndrd->ns_count = rc + err;
-
-       if (rc && err && rc == err)
-               return -ENODEV;
-
-       if (is_nd_pmem(&nd_region->dev)) {
+       if (is_memory(&nd_region->dev)) {
                struct resource ndr_res;
 
                if (devm_init_badblocks(dev, &nd_region->bb))
@@ -68,6 +57,17 @@ static int nd_region_probe(struct device *dev)
                nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
        }
 
+       rc = nd_region_register_namespaces(nd_region, &err);
+       if (rc < 0)
+               return rc;
+
+       ndrd = dev_get_drvdata(dev);
+       ndrd->ns_active = rc;
+       ndrd->ns_count = rc + err;
+
+       if (rc && err && rc == err)
+               return -ENODEV;
+
        nd_region->btt_seed = nd_btt_create(nd_region);
        nd_region->pfn_seed = nd_pfn_create(nd_region);
        nd_region->dax_seed = nd_dax_create(nd_region);
@@ -131,7 +131,7 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
                struct nd_region *nd_region = to_nd_region(dev);
                struct resource res;
 
-               if (is_nd_pmem(&nd_region->dev)) {
+               if (is_memory(&nd_region->dev)) {
                        res.start = nd_region->ndr_start;
                        res.end = nd_region->ndr_start +
                                nd_region->ndr_size - 1;
index e7377f1028ef687637a4a9f481899b05cc264b1f..609fc450522a14979b5ab3b608ce8d3adb6647d8 100644 (file)
@@ -425,10 +425,12 @@ static ssize_t available_size_show(struct device *dev,
         * memory nvdimm_bus_lock() is dropped, but that's userspace's
         * problem to not race itself.
         */
+       device_lock(dev);
        nvdimm_bus_lock(dev);
        wait_nvdimm_bus_probe_idle(dev);
        available = nd_region_available_dpa(nd_region);
        nvdimm_bus_unlock(dev);
+       device_unlock(dev);
 
        return sprintf(buf, "%llu\n", available);
 }
@@ -440,10 +442,12 @@ static ssize_t max_available_extent_show(struct device *dev,
        struct nd_region *nd_region = to_nd_region(dev);
        unsigned long long available = 0;
 
+       device_lock(dev);
        nvdimm_bus_lock(dev);
        wait_nvdimm_bus_probe_idle(dev);
        available = nd_region_allocatable_dpa(nd_region);
        nvdimm_bus_unlock(dev);
+       device_unlock(dev);
 
        return sprintf(buf, "%llu\n", available);
 }
@@ -629,11 +633,11 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
        if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
                return 0;
 
-       if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
+       if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
                return 0;
 
        if (a == &dev_attr_resource.attr) {
-               if (is_nd_pmem(dev))
+               if (is_memory(dev))
                        return 0400;
                else
                        return 0;
index d8869d978c341474051a1f21981aca9f859cf3bc..5d0f99bcc987f4fb2350a9aac5b086717e271585 100644 (file)
@@ -111,10 +111,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
         */
        if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
                return;
-       revalidate_disk(ns->disk);
        blk_set_queue_dying(ns->queue);
        /* Forcibly unquiesce queues to avoid blocking dispatch */
        blk_mq_unquiesce_queue(ns->queue);
+       /*
+        * Revalidate after unblocking dispatchers that may be holding bd_butex
+        */
+       revalidate_disk(ns->disk);
 }
 
 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
@@ -1183,6 +1186,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
         */
        if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
                mutex_lock(&ctrl->scan_lock);
+               mutex_lock(&ctrl->subsys->lock);
+               nvme_mpath_start_freeze(ctrl->subsys);
+               nvme_mpath_wait_freeze(ctrl->subsys);
                nvme_start_freeze(ctrl);
                nvme_wait_freeze(ctrl);
        }
@@ -1213,6 +1219,8 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
                nvme_update_formats(ctrl);
        if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
                nvme_unfreeze(ctrl);
+               nvme_mpath_unfreeze(ctrl->subsys);
+               mutex_unlock(&ctrl->subsys->lock);
                mutex_unlock(&ctrl->scan_lock);
        }
        if (effects & NVME_CMD_EFFECTS_CCC)
@@ -1557,6 +1565,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
        if (ns->head->disk) {
                nvme_update_disk_info(ns->head->disk, ns, id);
                blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+               revalidate_disk(ns->head->disk);
        }
 #endif
 }
@@ -3168,6 +3177,14 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                return;
 
        nvme_fault_inject_fini(ns);
+
+       mutex_lock(&ns->ctrl->subsys->lock);
+       list_del_rcu(&ns->siblings);
+       mutex_unlock(&ns->ctrl->subsys->lock);
+       synchronize_rcu(); /* guarantee not available in head->list */
+       nvme_mpath_clear_current_path(ns);
+       synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
+
        if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
                                        &nvme_ns_id_attr_group);
@@ -3179,16 +3196,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                        blk_integrity_unregister(ns->disk);
        }
 
-       mutex_lock(&ns->ctrl->subsys->lock);
-       list_del_rcu(&ns->siblings);
-       nvme_mpath_clear_current_path(ns);
-       mutex_unlock(&ns->ctrl->subsys->lock);
-
        down_write(&ns->ctrl->namespaces_rwsem);
        list_del_init(&ns->list);
        up_write(&ns->ctrl->namespaces_rwsem);
 
-       synchronize_srcu(&ns->head->srcu);
        nvme_mpath_check_last_path(ns);
        nvme_put_ns(ns);
 }
index 67dec8860bf3c9e24578359cefebedae8fe99a3b..565bddcfd130d5aac3ac1057956fbe4fa6d686f5 100644 (file)
@@ -206,7 +206,7 @@ static LIST_HEAD(nvme_fc_lport_list);
 static DEFINE_IDA(nvme_fc_local_port_cnt);
 static DEFINE_IDA(nvme_fc_ctrl_cnt);
 
-
+static struct workqueue_struct *nvme_fc_wq;
 
 /*
  * These items are short-term. They will eventually be moved into
@@ -2053,7 +2053,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
         */
        if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
                active = atomic_xchg(&ctrl->err_work_active, 1);
-               if (!active && !schedule_work(&ctrl->err_work)) {
+               if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
                        atomic_set(&ctrl->err_work_active, 0);
                        WARN_ON(1);
                }
@@ -3321,6 +3321,10 @@ static int __init nvme_fc_init_module(void)
 {
        int ret;
 
+       nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
+       if (!nvme_fc_wq)
+               return -ENOMEM;
+
        /*
         * NOTE:
         * It is expected that in the future the kernel will combine
@@ -3338,7 +3342,8 @@ static int __init nvme_fc_init_module(void)
        fc_class = class_create(THIS_MODULE, "fc");
        if (IS_ERR(fc_class)) {
                pr_err("couldn't register class fc\n");
-               return PTR_ERR(fc_class);
+               ret = PTR_ERR(fc_class);
+               goto out_destroy_wq;
        }
 
        /*
@@ -3362,6 +3367,9 @@ out_destroy_device:
        device_destroy(fc_class, MKDEV(0, 0));
 out_destroy_class:
        class_destroy(fc_class);
+out_destroy_wq:
+       destroy_workqueue(nvme_fc_wq);
+
        return ret;
 }
 
@@ -3378,6 +3386,7 @@ static void __exit nvme_fc_exit_module(void)
 
        device_destroy(fc_class, MKDEV(0, 0));
        class_destroy(fc_class);
+       destroy_workqueue(nvme_fc_wq);
 }
 
 module_init(nvme_fc_init_module);
index 260248fbb8feb4cbf7c9149c9b9e43939c7a431f..892ef52122329c3c8068ea87936dea8cffa9810f 100644 (file)
@@ -20,9 +20,34 @@ module_param(multipath, bool, 0444);
 MODULE_PARM_DESC(multipath,
        "turn on native support for multiple controllers per subsystem");
 
-inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
 {
-       return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
+       struct nvme_ns_head *h;
+
+       lockdep_assert_held(&subsys->lock);
+       list_for_each_entry(h, &subsys->nsheads, entry)
+               if (h->disk)
+                       blk_mq_unfreeze_queue(h->disk->queue);
+}
+
+void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+{
+       struct nvme_ns_head *h;
+
+       lockdep_assert_held(&subsys->lock);
+       list_for_each_entry(h, &subsys->nsheads, entry)
+               if (h->disk)
+                       blk_mq_freeze_queue_wait(h->disk->queue);
+}
+
+void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+{
+       struct nvme_ns_head *h;
+
+       lockdep_assert_held(&subsys->lock);
+       list_for_each_entry(h, &subsys->nsheads, entry)
+               if (h->disk)
+                       blk_freeze_queue_start(h->disk->queue);
 }
 
 /*
@@ -298,6 +323,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
                                 "failed to create id group.\n");
        }
 
+       synchronize_srcu(&ns->head->srcu);
        kblockd_schedule_work(&ns->head->requeue_work);
 }
 
@@ -378,14 +404,16 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
 
        down_write(&ctrl->namespaces_rwsem);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
-               if (ns->head->ns_id != le32_to_cpu(desc->nsids[n]))
+               unsigned nsid = le32_to_cpu(desc->nsids[n]);
+
+               if (ns->head->ns_id < nsid)
                        continue;
-               nvme_update_ns_ana_state(desc, ns);
+               if (ns->head->ns_id == nsid)
+                       nvme_update_ns_ana_state(desc, ns);
                if (++n == nr_nsids)
                        break;
        }
        up_write(&ctrl->namespaces_rwsem);
-       WARN_ON_ONCE(n < nr_nsids);
        return 0;
 }
 
@@ -516,7 +544,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 {
        int error;
 
-       if (!nvme_ctrl_use_ana(ctrl))
+       /* check if multipath is enabled and we have the capability */
+       if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
                return 0;
 
        ctrl->anacap = id->anacap;
index e82cdaec81c9c10e90f4f825c35ab4cfcc33d97b..2653e1f4196d508fdf7c11c4d8306fbdf1373f35 100644 (file)
@@ -464,7 +464,14 @@ extern const struct attribute_group nvme_ns_id_attr_group;
 extern const struct block_device_operations nvme_ns_head_ops;
 
 #ifdef CONFIG_NVME_MULTIPATH
-bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl);
+static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+{
+       return ctrl->ana_log_buf != NULL;
+}
+
+void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
+void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
+void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
                        struct nvme_ctrl *ctrl, int *flags);
 void nvme_failover_req(struct request *req);
@@ -549,6 +556,15 @@ static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
 {
 }
+static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+{
+}
 #endif /* CONFIG_NVME_MULTIPATH */
 
 #ifdef CONFIG_NVM
index c8eeecc581154269afb726824f8caa17691eede2..a64a8bca0d5b9dcfe9056f9207568f2a3dd1e0b7 100644 (file)
@@ -2253,11 +2253,13 @@ static void nvme_reset_work(struct work_struct *work)
        struct nvme_dev *dev =
                container_of(work, struct nvme_dev, ctrl.reset_work);
        bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
-       int result = -ENODEV;
+       int result;
        enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
 
-       if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
+       if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
+               result = -ENODEV;
                goto out;
+       }
 
        /*
         * If we're called to reset a live controller first shut it down before
@@ -2294,6 +2296,7 @@ static void nvme_reset_work(struct work_struct *work)
        if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
                dev_warn(dev->ctrl.device,
                        "failed to mark controller CONNECTING\n");
+               result = -EBUSY;
                goto out;
        }
 
@@ -2354,6 +2357,7 @@ static void nvme_reset_work(struct work_struct *work)
        if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
                dev_warn(dev->ctrl.device,
                        "failed to mark controller state %d\n", new_state);
+               result = -ENODEV;
                goto out;
        }
 
@@ -2464,7 +2468,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
 {
        struct nvme_dev *dev = data;
 
-       nvme_reset_ctrl_sync(&dev->ctrl);
+       flush_work(&dev->ctrl.reset_work);
        flush_work(&dev->ctrl.scan_work);
        nvme_put_ctrl(&dev->ctrl);
 }
@@ -2531,6 +2535,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
+       nvme_reset_ctrl(&dev->ctrl);
        nvme_get_ctrl(&dev->ctrl);
        async_schedule(nvme_async_probe, dev);
 
index 2008fa62a373bb9ba84775e39b5b8956dd25bca6..a8eb8784e151fb38b0da838199bf19275dcb953e 100644 (file)
@@ -68,9 +68,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
                goto out;
 
        host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
-       data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
+       data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
+               sectors[READ]), 1000);
        host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
-       data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+       data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
+               sectors[WRITE]), 1000);
 
        put_unaligned_le64(host_reads, &slog->host_reads[0]);
        put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
@@ -98,11 +100,11 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
                if (!ns->bdev)
                        continue;
                host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
-               data_units_read +=
-                       part_stat_read(ns->bdev->bd_part, sectors[READ]);
+               data_units_read += DIV_ROUND_UP(
+                       part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
                host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
-               data_units_written +=
-                       part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+               data_units_written += DIV_ROUND_UP(
+                       part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
 
        }
        rcu_read_unlock();
index 9908082b32c4b42647085f18bd5dcdf1e0184652..137a27fa369cbf8bc6495878deb1127df4820b2b 100644 (file)
@@ -678,6 +678,14 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
        mutex_lock(&nvme_loop_ports_mutex);
        list_del_init(&port->entry);
        mutex_unlock(&nvme_loop_ports_mutex);
+
+       /*
+        * Ensure any ctrls that are in the process of being
+        * deleted are in fact deleted before we return
+        * and free the port. This is to prevent active
+        * ctrls from using a port after it's freed.
+        */
+       flush_workqueue(nvme_delete_wq);
 }
 
 static const struct nvmet_fabrics_ops nvme_loop_ops = {
index 99de51e87f7f848a891112ed2c4c15aac00d50b0..d32eba11c000f7a78803f5887f8a55ec730f9982 100644 (file)
@@ -415,10 +415,17 @@ static int nvmem_setup_compat(struct nvmem_device *nvmem,
        if (!config->base_dev)
                return -EINVAL;
 
-       if (nvmem->read_only)
-               nvmem->eeprom = bin_attr_ro_root_nvmem;
-       else
-               nvmem->eeprom = bin_attr_rw_root_nvmem;
+       if (nvmem->read_only) {
+               if (config->root_only)
+                       nvmem->eeprom = bin_attr_ro_root_nvmem;
+               else
+                       nvmem->eeprom = bin_attr_ro_nvmem;
+       } else {
+               if (config->root_only)
+                       nvmem->eeprom = bin_attr_rw_root_nvmem;
+               else
+                       nvmem->eeprom = bin_attr_rw_nvmem;
+       }
        nvmem->eeprom.attr.name = "eeprom";
        nvmem->eeprom.size = nvmem->size;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
index 7390fb8ca9d156c485f85aa1c0cc475988765b23..29df6ab29e95cd1fcb948f4e135978645d9deaf4 100644 (file)
@@ -160,6 +160,15 @@ struct dino_device
        (struct dino_device *)__pdata; })
 
 
+/* Check if PCI device is behind a Card-mode Dino. */
+static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
+{
+       struct dino_device *dino_dev;
+
+       dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
+       return is_card_dino(&dino_dev->hba.dev->id);
+}
+
 /*
  * Dino Configuration Space Accessor Functions
  */
@@ -442,6 +451,21 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
 
+#ifdef CONFIG_TULIP
+static void pci_fixup_tulip(struct pci_dev *dev)
+{
+       if (!pci_dev_is_behind_card_dino(dev))
+               return;
+       if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM))
+               return;
+       pr_warn("%s: HP HSC-PCI Cards with card-mode Dino not yet supported.\n",
+               pci_name(dev));
+       /* Disable this card by zeroing the PCI resources */
+       memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
+       memset(&dev->resource[1], 0, sizeof(dev->resource[1]));
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_DEC, PCI_ANY_ID, pci_fixup_tulip);
+#endif /* CONFIG_TULIP */
 
 static void __init
 dino_bios_init(void)
index d0d01f8d1feb11c47d7e2340ae5579ecdc342824..ca018b5bb0b034aa2cd08704d3b614b1452e5db3 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/types.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
 
 #include "../../pci.h"
 #include "pcie-designware.h"
@@ -728,7 +729,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
-       base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+       base = devm_ioremap_resource(dev, res);
        if (!base)
                return -ENOMEM;
 
index cee5f2f590e2d4c51e9531383225d16e69f0d15d..14a6ba4067fbec47c3c2ea5609e591b8c689faff 100644 (file)
@@ -465,7 +465,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
 
        ep->phy = devm_of_phy_get(dev, np, NULL);
        if (IS_ERR(ep->phy)) {
-               if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
+               if (PTR_ERR(ep->phy) != -ENODEV)
                        return PTR_ERR(ep->phy);
 
                ep->phy = NULL;
index 3826b444298c194cf32646ac10ce7c86322b302f..3b2ceb566728943a012afa6193a90c319d56d4dc 100644 (file)
@@ -807,8 +807,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
 
        imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
        if (IS_ERR(imx6_pcie->vpcie)) {
-               if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
+               if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
+                       return PTR_ERR(imx6_pcie->vpcie);
                imx6_pcie->vpcie = NULL;
        }
 
index fe241b3239eba20f7160c9ebe7db59f07e698f25..6efed8fba5dae513a7ac9cd37d79440dfff61144 100644 (file)
@@ -523,7 +523,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
                                   lower_32_bits(start) | OB_ENABLEN);
                ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
                                   upper_32_bits(start));
-               start += OB_WIN_SIZE;
+               start += (OB_WIN_SIZE * SZ_1M);
        }
 
        val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
@@ -611,7 +611,7 @@ static void ks_pcie_stop_link(struct dw_pcie *pci)
        /* Disable Link training */
        val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
        val &= ~LTSSM_EN_VAL;
-       ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+       ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
 }
 
 static int ks_pcie_start_link(struct dw_pcie *pci)
index f841538cd092d4b6372cb25bf51731cb2ddb26d8..7a6a1d68309dd191757f9a6416ac83a37d9a4cba 100644 (file)
@@ -334,7 +334,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
                dev_err(dev, "Missing *config* reg space\n");
        }
 
-       bridge = pci_alloc_host_bridge(0);
+       bridge = devm_pci_alloc_host_bridge(dev, 0);
        if (!bridge)
                return -ENOMEM;
 
@@ -345,7 +345,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 
        ret = devm_request_pci_bus_resources(dev, &bridge->windows);
        if (ret)
-               goto error;
+               return ret;
 
        /* Get the I/O and memory ranges from DT */
        resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
@@ -389,8 +389,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
                                                resource_size(pp->cfg));
                if (!pci->dbi_base) {
                        dev_err(dev, "Error with ioremap\n");
-                       ret = -ENOMEM;
-                       goto error;
+                       return -ENOMEM;
                }
        }
 
@@ -401,8 +400,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
                                        pp->cfg0_base, pp->cfg0_size);
                if (!pp->va_cfg0_base) {
                        dev_err(dev, "Error with ioremap in function\n");
-                       ret = -ENOMEM;
-                       goto error;
+                       return -ENOMEM;
                }
        }
 
@@ -412,8 +410,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
                                                pp->cfg1_size);
                if (!pp->va_cfg1_base) {
                        dev_err(dev, "Error with ioremap\n");
-                       ret = -ENOMEM;
-                       goto error;
+                       return -ENOMEM;
                }
        }
 
@@ -436,7 +433,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
                            pp->num_vectors == 0) {
                                dev_err(dev,
                                        "Invalid number of vectors\n");
-                               goto error;
+                               return -EINVAL;
                        }
                }
 
@@ -445,7 +442,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
 
                        ret = dw_pcie_allocate_domains(pp);
                        if (ret)
-                               goto error;
+                               return ret;
 
                        if (pp->msi_irq)
                                irq_set_chained_handler_and_data(pp->msi_irq,
@@ -454,7 +451,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
                } else {
                        ret = pp->ops->msi_host_init(pp);
                        if (ret < 0)
-                               goto error;
+                               return ret;
                }
        }
 
@@ -494,8 +491,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
 err_free_msi:
        if (pci_msi_enabled() && !pp->ops->msi_host_init)
                dw_pcie_free_msi(pp);
-error:
-       pci_free_host_bridge(bridge);
        return ret;
 }
 
index 7b32e619b959c8697eb16287c9e7d547581d97fd..a3489839a8fc3a06a340c03ed4b3c26f7af902fb 100644 (file)
@@ -340,8 +340,8 @@ static int histb_pcie_probe(struct platform_device *pdev)
 
        hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
        if (IS_ERR(hipcie->vpcie)) {
-               if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
+               if (PTR_ERR(hipcie->vpcie) != -ENODEV)
+                       return PTR_ERR(hipcie->vpcie);
                hipcie->vpcie = NULL;
        }
 
index 5352e0c3be8259e4237e3d6c12c604f1b61746ff..9b599296205dcc5333a23577ee6215bc2ab4d430 100644 (file)
@@ -467,8 +467,8 @@ static int kirin_pcie_add_msi(struct dw_pcie *pci,
        return 0;
 }
 
-static int __init kirin_add_pcie_port(struct dw_pcie *pci,
-                                     struct platform_device *pdev)
+static int kirin_add_pcie_port(struct dw_pcie *pci,
+                              struct platform_device *pdev)
 {
        int ret;
 
index 4352c1cb926d587532fd57d644b911b739026069..e292801fff7fd8ed4e434958cc4c5363457d19dc 100644 (file)
@@ -178,6 +178,8 @@ static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
 
 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
 {
+       /* Ensure that PERST has been asserted for at least 100 ms */
+       msleep(100);
        gpiod_set_value_cansleep(pcie->reset, 0);
        usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
 }
@@ -1089,7 +1091,6 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
        struct qcom_pcie *pcie = to_qcom_pcie(pci);
        int ret;
 
-       pm_runtime_get_sync(pci->dev);
        qcom_ep_reset_assert(pcie);
 
        ret = pcie->ops->init(pcie);
@@ -1126,7 +1127,6 @@ err_disable_phy:
        phy_power_off(pcie->phy);
 err_deinit:
        pcie->ops->deinit(pcie);
-       pm_runtime_put(pci->dev);
 
        return ret;
 }
@@ -1216,6 +1216,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        pm_runtime_enable(dev);
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0) {
+               pm_runtime_disable(dev);
+               return ret;
+       }
+
        pci->dev = dev;
        pci->ops = &dw_pcie_ops;
        pp = &pci->pp;
@@ -1224,45 +1230,57 @@ static int qcom_pcie_probe(struct platform_device *pdev)
 
        pcie->ops = of_device_get_match_data(dev);
 
-       pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
-       if (IS_ERR(pcie->reset))
-               return PTR_ERR(pcie->reset);
+       pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+       if (IS_ERR(pcie->reset)) {
+               ret = PTR_ERR(pcie->reset);
+               goto err_pm_runtime_put;
+       }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
        pcie->parf = devm_ioremap_resource(dev, res);
-       if (IS_ERR(pcie->parf))
-               return PTR_ERR(pcie->parf);
+       if (IS_ERR(pcie->parf)) {
+               ret = PTR_ERR(pcie->parf);
+               goto err_pm_runtime_put;
+       }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
        pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
-       if (IS_ERR(pci->dbi_base))
-               return PTR_ERR(pci->dbi_base);
+       if (IS_ERR(pci->dbi_base)) {
+               ret = PTR_ERR(pci->dbi_base);
+               goto err_pm_runtime_put;
+       }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
        pcie->elbi = devm_ioremap_resource(dev, res);
-       if (IS_ERR(pcie->elbi))
-               return PTR_ERR(pcie->elbi);
+       if (IS_ERR(pcie->elbi)) {
+               ret = PTR_ERR(pcie->elbi);
+               goto err_pm_runtime_put;
+       }
 
        pcie->phy = devm_phy_optional_get(dev, "pciephy");
-       if (IS_ERR(pcie->phy))
-               return PTR_ERR(pcie->phy);
+       if (IS_ERR(pcie->phy)) {
+               ret = PTR_ERR(pcie->phy);
+               goto err_pm_runtime_put;
+       }
 
        ret = pcie->ops->get_resources(pcie);
        if (ret)
-               return ret;
+               goto err_pm_runtime_put;
 
        pp->ops = &qcom_pcie_dw_ops;
 
        if (IS_ENABLED(CONFIG_PCI_MSI)) {
                pp->msi_irq = platform_get_irq_byname(pdev, "msi");
-               if (pp->msi_irq < 0)
-                       return pp->msi_irq;
+               if (pp->msi_irq < 0) {
+                       ret = pp->msi_irq;
+                       goto err_pm_runtime_put;
+               }
        }
 
        ret = phy_init(pcie->phy);
        if (ret) {
                pm_runtime_disable(&pdev->dev);
-               return ret;
+               goto err_pm_runtime_put;
        }
 
        platform_set_drvdata(pdev, pcie);
@@ -1271,10 +1289,16 @@ static int qcom_pcie_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(dev, "cannot initialize host\n");
                pm_runtime_disable(&pdev->dev);
-               return ret;
+               goto err_pm_runtime_put;
        }
 
        return 0;
+
+err_pm_runtime_put:
+       pm_runtime_put(dev);
+       pm_runtime_disable(dev);
+
+       return ret;
 }
 
 static const struct of_device_id qcom_pcie_match[] = {
index 808a182830e5cd90648652c572557bbc7322e569..5c2849846641590d4fdb78f25c3165e30ffc6983 100644 (file)
@@ -1880,6 +1880,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
 static void hv_eject_device_work(struct work_struct *work)
 {
        struct pci_eject_response *ejct_pkt;
+       struct hv_pcibus_device *hbus;
        struct hv_pci_dev *hpdev;
        struct pci_dev *pdev;
        unsigned long flags;
@@ -1890,6 +1891,7 @@ static void hv_eject_device_work(struct work_struct *work)
        } ctxt;
 
        hpdev = container_of(work, struct hv_pci_dev, wrk);
+       hbus = hpdev->hbus;
 
        WARN_ON(hpdev->state != hv_pcichild_ejecting);
 
@@ -1900,8 +1902,7 @@ static void hv_eject_device_work(struct work_struct *work)
         * because hbus->pci_bus may not exist yet.
         */
        wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
-       pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
-                                          wslot);
+       pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
        if (pdev) {
                pci_lock_rescan_remove();
                pci_stop_and_remove_bus_device(pdev);
@@ -1909,9 +1910,9 @@ static void hv_eject_device_work(struct work_struct *work)
                pci_unlock_rescan_remove();
        }
 
-       spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
+       spin_lock_irqsave(&hbus->device_list_lock, flags);
        list_del(&hpdev->list_entry);
-       spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+       spin_unlock_irqrestore(&hbus->device_list_lock, flags);
 
        if (hpdev->pci_slot)
                pci_destroy_slot(hpdev->pci_slot);
@@ -1920,7 +1921,7 @@ static void hv_eject_device_work(struct work_struct *work)
        ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
        ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
        ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
-       vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
+       vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
                         sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
                         VM_PKT_DATA_INBAND, 0);
 
@@ -1929,7 +1930,9 @@ static void hv_eject_device_work(struct work_struct *work)
        /* For the two refs got in new_pcichild_device() */
        put_pcichild(hpdev);
        put_pcichild(hpdev);
-       put_hvpcibus(hpdev->hbus);
+       /* hpdev has been freed. Do not use it any more. */
+
+       put_hvpcibus(hbus);
 }
 
 /**
@@ -2703,8 +2706,8 @@ static int hv_pci_remove(struct hv_device *hdev)
                /* Remove the bus from PCI's point of view. */
                pci_lock_rescan_remove();
                pci_stop_root_bus(hbus->pci_bus);
-               pci_remove_root_bus(hbus->pci_bus);
                hv_pci_remove_slots(hbus);
+               pci_remove_root_bus(hbus->pci_bus);
                pci_unlock_rescan_remove();
                hbus->state = hv_pcibus_removed;
        }
index f4f53d092e00526cd4dec08e575334fd9f400c1e..976eaa9a9f2662e71444d35f20d7d384a765eaab 100644 (file)
@@ -1975,14 +1975,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
                err = of_pci_get_devfn(port);
                if (err < 0) {
                        dev_err(dev, "failed to parse address: %d\n", err);
-                       return err;
+                       goto err_node_put;
                }
 
                index = PCI_SLOT(err);
 
                if (index < 1 || index > soc->num_ports) {
                        dev_err(dev, "invalid port number: %d\n", index);
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto err_node_put;
                }
 
                index--;
@@ -1991,12 +1992,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
                if (err < 0) {
                        dev_err(dev, "failed to parse # of lanes: %d\n",
                                err);
-                       return err;
+                       goto err_node_put;
                }
 
                if (value > 16) {
                        dev_err(dev, "invalid # of lanes: %u\n", value);
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto err_node_put;
                }
 
                lanes |= value << (index << 3);
@@ -2010,13 +2012,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
                lane += value;
 
                rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
-               if (!rp)
-                       return -ENOMEM;
+               if (!rp) {
+                       err = -ENOMEM;
+                       goto err_node_put;
+               }
 
                err = of_address_to_resource(port, 0, &rp->regs);
                if (err < 0) {
                        dev_err(dev, "failed to parse address: %d\n", err);
-                       return err;
+                       goto err_node_put;
                }
 
                INIT_LIST_HEAD(&rp->list);
@@ -2043,6 +2047,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
                return err;
 
        return 0;
+
+err_node_put:
+       of_node_put(port);
+       return err;
 }
 
 /*
index ff4b4b8eb017f785c558aa55b05b8af136faa685..6906f7c0994907a4e6b8183b97d42c2d2d6c8361 100644 (file)
@@ -517,6 +517,64 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
        return 0;
 }
 
+static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
+                                   phys_addr_t addr, u8 interrupt_num,
+                                   u32 entry_size, u32 *msi_data)
+{
+       u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
+       struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+       u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+       struct cdns_pcie *pcie = &ep->pcie;
+       u16 flags, mme, data, data_mask;
+       u32 first_vf_offset, stride;
+       u8 msi_count;
+       u64 pci_addr;
+       int ret;
+       int i;
+
+       if (vfn > 0) {
+               first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+                                                       PCI_SRIOV_VF_OFFSET);
+               stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+                                              PCI_SRIOV_VF_STRIDE);
+               fn = fn + first_vf_offset + ((vfn - 1) * stride);
+       }
+
+       /* Check whether the MSI feature has been enabled by the PCI host. */
+       flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+       if (!(flags & PCI_MSI_FLAGS_ENABLE))
+               return -EINVAL;
+
+       /* Get the number of enabled MSIs */
+       mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+       msi_count = 1 << mme;
+       if (!interrupt_num || interrupt_num > msi_count)
+               return -EINVAL;
+
+       /* Compute the data value to be written. */
+       data_mask = msi_count - 1;
+       data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
+       data = data & ~data_mask;
+
+       /* Get the PCI address where to write the data into. */
+       pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
+       pci_addr <<= 32;
+       pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
+       pci_addr &= GENMASK_ULL(63, 2);
+
+       for (i = 0; i < interrupt_num; i++) {
+               ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr, pci_addr,
+                                           entry_size);
+               if (ret)
+                       return ret;
+               addr = addr + entry_size;
+       }
+
+       *msi_data = data;
+
+       return 0;
+}
+
 static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
                                      u16 interrupt_num)
 {
@@ -555,7 +613,7 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
        tbl_addr = ((u64)bar_addr_upper) << 32 | bar_addr_lower;
        tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
        tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
-       msix_tbl = phys_to_virt(tbl_addr);
+       msix_tbl = (void __iomem *)phys_to_virt(tbl_addr);
        if (!msix_tbl)
                return -EINVAL;
 
@@ -678,6 +736,7 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
        .set_msix       = cdns_pcie_ep_set_msix,
        .get_msix       = cdns_pcie_ep_get_msix,
        .raise_irq      = cdns_pcie_ep_raise_irq,
+       .map_msi_irq    = cdns_pcie_ep_map_msi_irq,
        .start          = cdns_pcie_ep_start,
        .get_features   = cdns_pcie_ep_get_features,
 };
index a939e8d31735a7bc0cb56b3fdc231dae50751ab0..a2d1e89d48674842e8e0afb0b711ec6370c602fb 100644 (file)
@@ -508,6 +508,12 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
                return err;
        }
 
+       /* setup bus numbers */
+       value = csr_readl(pcie, PCI_PRIMARY_BUS);
+       value &= 0xff000000;
+       value |= 0x00ff0100;
+       csr_writel(pcie, value, PCI_PRIMARY_BUS);
+
        /*
         * program Bus Master Enable Bit in Command Register in PAB Config
         * Space
@@ -547,7 +553,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
                        resource_size(pcie->ob_io_res));
 
        /* memory inbound translation window */
-       program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
+       program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
 
        /* Get the I/O and memory ranges from DT */
        resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
@@ -559,11 +565,18 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
                if (type) {
                        /* configure outbound translation window */
                        program_ob_windows(pcie, pcie->ob_wins_configured,
-                               win->res->start, 0, type,
-                               resource_size(win->res));
+                                          win->res->start,
+                                          win->res->start - win->offset,
+                                          type, resource_size(win->res));
                }
        }
 
+       /* fixup for PCIe class register */
+       value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
+       value &= 0xff;
+       value |= (PCI_CLASS_BRIDGE_PCI << 16);
+       csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
+
        /* setup MSI hardware registers */
        mobiveil_pcie_enable_msi(pcie);
 
@@ -804,9 +817,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
                goto error;
        }
 
-       /* fixup for PCIe class register */
-       csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
-
        /* initialize the IRQ domains */
        ret = mobiveil_pcie_init_irq_domain(pcie);
        if (ret) {
index 1372d270764f9e8c863007260191a27ec65b39d7..5ce8e6375687572a3f981d974ab893abe0040353 100644 (file)
@@ -608,29 +608,29 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
 
        rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
        if (IS_ERR(rockchip->vpcie12v)) {
-               if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
+               if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
+                       return PTR_ERR(rockchip->vpcie12v);
                dev_info(dev, "no vpcie12v regulator found\n");
        }
 
        rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
        if (IS_ERR(rockchip->vpcie3v3)) {
-               if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
+               if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
+                       return PTR_ERR(rockchip->vpcie3v3);
                dev_info(dev, "no vpcie3v3 regulator found\n");
        }
 
        rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
        if (IS_ERR(rockchip->vpcie1v8)) {
-               if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
+               if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
+                       return PTR_ERR(rockchip->vpcie1v8);
                dev_info(dev, "no vpcie1v8 regulator found\n");
        }
 
        rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
        if (IS_ERR(rockchip->vpcie0v9)) {
-               if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
+               if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
+                       return PTR_ERR(rockchip->vpcie0v9);
                dev_info(dev, "no vpcie0v9 regulator found\n");
        }
 
index fb32840ce8e66ac75f7c164a402ac9a0e0ae094b..4850a1b8eec127628eb390a3cf5401078dbc3e2d 100644 (file)
@@ -483,15 +483,13 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
        int i;
 
        mutex_lock(&msi->lock);
-       bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0,
-                                        nr_irqs, 0);
-       if (bit >= INT_PCI_MSI_NR) {
+       bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR,
+                                     get_count_order(nr_irqs));
+       if (bit < 0) {
                mutex_unlock(&msi->lock);
                return -ENOSPC;
        }
 
-       bitmap_set(msi->bitmap, bit, nr_irqs);
-
        for (i = 0; i < nr_irqs; i++) {
                irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
                                domain->host_data, handle_simple_irq,
@@ -509,7 +507,8 @@ static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
        struct nwl_msi *msi = &pcie->msi;
 
        mutex_lock(&msi->lock);
-       bitmap_clear(msi->bitmap, data->hwirq, nr_irqs);
+       bitmap_release_region(msi->bitmap, data->hwirq,
+                             get_count_order(nr_irqs));
        mutex_unlock(&msi->lock);
 }
 
index fd2dbd7eed7bca808f44470ba060725acc1ec061..65eaa6b61868585d3258f17d178f326eaf1bf4ad 100644 (file)
@@ -31,6 +31,9 @@
 #define PCI_REG_VMLOCK         0x70
 #define MB2_SHADOW_EN(vmlock)  (vmlock & 0x2)
 
+#define MB2_SHADOW_OFFSET      0x2000
+#define MB2_SHADOW_SIZE                16
+
 enum vmd_features {
        /*
         * Device may contain registers which hint the physical location of the
@@ -94,6 +97,7 @@ struct vmd_dev {
        struct resource         resources[3];
        struct irq_domain       *irq_domain;
        struct pci_bus          *bus;
+       u8                      busn_start;
 
 #ifdef CONFIG_X86_DEV_DMA_OPS
        struct dma_map_ops      dma_ops;
@@ -465,7 +469,8 @@ static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
                                  unsigned int devfn, int reg, int len)
 {
        char __iomem *addr = vmd->cfgbar +
-                            (bus->number << 20) + (devfn << 12) + reg;
+                            ((bus->number - vmd->busn_start) << 20) +
+                            (devfn << 12) + reg;
 
        if ((addr - vmd->cfgbar) + len >=
            resource_size(&vmd->dev->resource[VMD_CFGBAR]))
@@ -588,7 +593,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
        unsigned long flags;
        LIST_HEAD(resources);
        resource_size_t offset[2] = {0};
-       resource_size_t membar2_offset = 0x2000, busn_start = 0;
+       resource_size_t membar2_offset = 0x2000;
 
        /*
         * Shadow registers may exist in certain VMD device ids which allow
@@ -600,7 +605,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
                u32 vmlock;
                int ret;
 
-               membar2_offset = 0x2018;
+               membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
                ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
                if (ret || vmlock == ~0)
                        return -ENODEV;
@@ -612,9 +617,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
                        if (!membar2)
                                return -ENOMEM;
                        offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
-                                               readq(membar2 + 0x2008);
+                                       readq(membar2 + MB2_SHADOW_OFFSET);
                        offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
-                                               readq(membar2 + 0x2010);
+                                       readq(membar2 + MB2_SHADOW_OFFSET + 8);
                        pci_iounmap(vmd->dev, membar2);
                }
        }
@@ -630,14 +635,14 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
                pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
                if (BUS_RESTRICT_CAP(vmcap) &&
                    (BUS_RESTRICT_CFG(vmconfig) == 0x1))
-                       busn_start = 128;
+                       vmd->busn_start = 128;
        }
 
        res = &vmd->dev->resource[VMD_CFGBAR];
        vmd->resources[0] = (struct resource) {
                .name  = "VMD CFGBAR",
-               .start = busn_start,
-               .end   = busn_start + (resource_size(res) >> 20) - 1,
+               .start = vmd->busn_start,
+               .end   = vmd->busn_start + (resource_size(res) >> 20) - 1,
                .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
        };
 
@@ -705,8 +710,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
        pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
        pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
 
-       vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
-                                      sd, &resources);
+       vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
+                                       &vmd_ops, sd, &resources);
        if (!vmd->bus) {
                pci_free_resource_list(&resources);
                irq_domain_remove(vmd->irq_domain);
index 95b2fe47e3b06be6ecd210e8d834610021a22470..36cf33cf975cc242baf3aa98728186192dc7cfc2 100644 (file)
@@ -5,4 +5,5 @@
 
 obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS)    += pci-ep-cfs.o
 obj-$(CONFIG_PCI_ENDPOINT)             += pci-epc-core.o pci-epf-core.o\
-                                          pci-epc-mem.o functions/
+                                          pci-epc-mem.o pci-epf-bus.o \
+                                          functions/
index 8820d0f7ec77961e654bfeab2ed5a0e892ea76ee..55ac7bb2d46926bbd1ca1a2d75b3d95620e1178d 100644 (file)
@@ -12,3 +12,15 @@ config PCI_EPF_TEST
           for PCI Endpoint.
 
           If in doubt, say "N" to disable Endpoint test driver.
+
+config PCI_EPF_NTB
+       tristate "PCI Endpoint NTB driver"
+       depends on PCI_ENDPOINT
+       help
+          Select this configuration option to enable the NTB driver
+          for PCI Endpoint. NTB driver implements NTB controller
+          functionality using multiple PCIe endpoint instances. It
+          can support NTB endpoint function devices created using
+          device tree.
+
+          If in doubt, say "N" to disable Endpoint NTB driver.
index d6fafff080e2a7869c2ad535503815305d061979..96ab932a537a2480ee6af6b2cbf0b28e0f2e6e43 100644 (file)
@@ -4,3 +4,4 @@
 #
 
 obj-$(CONFIG_PCI_EPF_TEST)             += pci-epf-test.o
+obj-$(CONFIG_PCI_EPF_NTB)              += pci-epf-ntb.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
new file mode 100644 (file)
index 0000000..307b81d
--- /dev/null
@@ -0,0 +1,1176 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Endpoint Function Driver to implement Non-Transparent Bridge functionality
+ *
+ * Copyright (C) 2019 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+static struct workqueue_struct *kpcintb_workqueue;
+
+#define COMMAND_CONFIGURE_DOORBELL     1
+#define COMMAND_CONFIGURE_MW           2
+#define COMMAND_LINK_UP                        3
+
+#define COMMAND_STATUS_OK              BIT(0)
+#define COMMAND_STATUS_ERROR           BIT(1)
+#define LINK_STATUS_UP                 BIT(2)
+
+#define SPAD_COUNT                     64
+#define DB_COUNT                       4
+#define NTB_MW_OFFSET                  2
+#define DB_COUNT_MASK                  GENMASK(15, 0)
+#define MSIX_ENABLE                    BIT(16)
+#define MAX_DB_COUNT                   32
+#define MAX_MW                         4
+
+enum epf_ntb_bar {
+       BAR_CONFIG,
+       BAR_PEER_SPAD,
+       BAR_DB_MW1,
+       BAR_MW2,
+       BAR_MW3,
+       BAR_MW4,
+};
+
+struct epf_ntb {
+       u32 num_mws;
+       u32 *mws_size;
+       u32 db_count;
+       u32 spad_count;
+       struct pci_epf *epf;
+       struct epf_ntb_epc *epc[2];
+};
+
+struct epf_ntb_epc {
+       u8 func_no;
+       u8 vfunc_no;
+       bool linkup;
+       u32 spad_size;
+       struct pci_epc *epc;
+       struct epf_ntb *epf_ntb;
+       void __iomem *mw_addr[6];
+       struct epf_ntb_ctrl *reg;
+       struct pci_epf_bar *epf_bar;
+       enum pci_barno epf_ntb_bar[6];
+       struct delayed_work cmd_handler;
+       enum pci_epc_interface_type type;
+       const struct pci_epc_features *epc_features;
+};
+
+struct epf_ntb_ctrl {
+       u32     command;
+       u32     argument;
+       u32     status;
+       u32     topology;
+       u64     addr;
+       u32     size;
+       u32     mw1_offset;
+       u32     num_mws;
+       u32     spad_offset;
+       u32     spad_count;
+       u32     db_entry_size;
+       u32     db_data[MAX_DB_COUNT];
+} __packed;
+
+static struct pci_epf_header epf_ntb_header = {
+       .vendorid       = PCI_ANY_ID,
+       .deviceid       = PCI_ANY_ID,
+       .baseclass_code = PCI_BASE_CLASS_MEMORY,
+       .interrupt_pin  = PCI_INTERRUPT_INTA,
+};
+
+static int epf_ntb_link_up(struct epf_ntb *ntb)
+{
+       enum pci_epc_interface_type type;
+       struct epf_ntb_epc *ntb_epc;
+       struct epf_ntb_ctrl *ctrl;
+       u8 vfunc_no, func_no;
+       int ret;
+
+       for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+               ntb_epc = ntb->epc[type];
+               func_no = ntb_epc->func_no;
+               vfunc_no = ntb_epc->vfunc_no;
+               ctrl = ntb_epc->reg;
+               ctrl->status |= LINK_STATUS_UP;
+               ret = pci_epc_raise_irq(ntb_epc->epc, func_no, vfunc_no,
+                                       PCI_EPC_IRQ_MSI, 1);
+               if (ret < 0) {
+                       WARN(1, "%s intf: Failed to raise Link Up IRQ\n",
+                            pci_epc_interface_string(type));
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int
+epf_ntb_configure_mw(struct epf_ntb *ntb, enum pci_epc_interface_type type,
+                    u32 mw)
+{
+       struct epf_ntb_epc *peer_ntb_epc;
+       struct pci_epf_bar *peer_epf_bar;
+       struct epf_ntb_epc *ntb_epc;
+       enum pci_barno peer_barno;
+       struct epf_ntb_ctrl *ctrl;
+       phys_addr_t phys_addr;
+       u8 vfunc_no, func_no;
+       struct pci_epc *epc;
+       u64 addr;
+       u32 size;
+       int ret;
+
+       ntb_epc = ntb->epc[type];
+       epc = ntb_epc->epc;
+
+       peer_ntb_epc = ntb->epc[!type];
+       peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET];
+       peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+       phys_addr = peer_epf_bar->phys_addr;
+       ctrl = ntb_epc->reg;
+       addr = ctrl->addr;
+       size = ctrl->size;
+       if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
+               phys_addr += ctrl->mw1_offset;
+
+       func_no = ntb_epc->func_no;
+       vfunc_no = ntb_epc->vfunc_no;
+
+       ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, addr, size);
+       WARN(ret < 0, "%s intf: Failed to map memory window %d address\n",
+            pci_epc_interface_string(type), mw);
+
+       return ret;
+}
+
+static int
+epf_ntb_configure_db(struct epf_ntb *ntb, enum pci_epc_interface_type type,
+                    u16 db_count, bool msix)
+{
+       struct epf_ntb_epc *peer_ntb_epc;
+       struct pci_epf_bar *peer_epf_bar;
+       struct epf_ntb_ctrl *peer_ctrl;
+       struct epf_ntb_epc *ntb_epc;
+       enum pci_barno peer_barno;
+       phys_addr_t phys_addr;
+       u8 vfunc_no, func_no;
+       struct pci_epc *epc;
+       u32 db_entry_size;
+       u32 db_data;
+       int ret, i;
+
+       if (db_count > MAX_DB_COUNT)
+               return -EINVAL;
+
+       ntb_epc = ntb->epc[type];
+       epc = ntb_epc->epc;
+
+       peer_ntb_epc = ntb->epc[!type];
+       peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+       peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+       peer_ctrl = peer_ntb_epc->reg;
+       db_entry_size = peer_ctrl->db_entry_size;
+
+       phys_addr = peer_epf_bar->phys_addr;
+       func_no = ntb_epc->func_no;
+       vfunc_no = ntb_epc->vfunc_no;
+
+       ret = pci_epc_map_msi_irq(epc, func_no, vfunc_no, phys_addr, db_count,
+                                 db_entry_size, &db_data);
+       if (ret < 0) {
+               WARN(1, "%s intf: Failed to map MSI IRQ\n",
+                    pci_epc_interface_string(type));
+               return ret;
+       }
+
+       for (i = 0; i < db_count; i++)
+               peer_ctrl->db_data[i] = db_data | i;
+
+       return 0;
+}
+
+static void epf_ntb_cmd_handler(struct work_struct *work)
+{
+       enum pci_epc_interface_type type;
+       struct epf_ntb_epc *ntb_epc;
+       struct epf_ntb_ctrl *ctrl;
+       u32 command, argument;
+       struct epf_ntb *ntb;
+       struct device *dev;
+       u16 db_count;
+       bool is_msix;
+       int ret;
+
+       ntb_epc = container_of(work, struct epf_ntb_epc, cmd_handler.work);
+       ctrl = ntb_epc->reg;
+       command = ctrl->command;
+       if (!command)
+               goto reset_handler;
+       argument = ctrl->argument;
+
+       ctrl->command = 0;
+       ctrl->argument = 0;
+
+       ctrl = ntb_epc->reg;
+       type = ntb_epc->type;
+       ntb = ntb_epc->epf_ntb;
+       dev = &ntb->epf->dev;
+
+       switch (command) {
+       case COMMAND_CONFIGURE_DOORBELL:
+               db_count = argument & DB_COUNT_MASK;
+               is_msix = argument & MSIX_ENABLE;
+               ret = epf_ntb_configure_db(ntb, type, db_count, is_msix);
+               if (ret < 0)
+                       ctrl->status |= COMMAND_STATUS_ERROR;
+               else
+                       ctrl->status |= COMMAND_STATUS_OK;
+               break;
+       case COMMAND_CONFIGURE_MW:
+               ret = epf_ntb_configure_mw(ntb, type, argument);
+               if (ret < 0)
+                       ctrl->status |= COMMAND_STATUS_ERROR;
+               else
+                       ctrl->status |= COMMAND_STATUS_OK;
+               break;
+       case COMMAND_LINK_UP:
+               ntb_epc->linkup = true;
+               if (ntb->epc[PRIMARY_INTERFACE]->linkup &&
+                   ntb->epc[SECONDARY_INTERFACE]->linkup) {
+                       ret = epf_ntb_link_up(ntb);
+                       if (ret < 0)
+                               ctrl->status |= COMMAND_STATUS_ERROR;
+                       else
+                               ctrl->status |= COMMAND_STATUS_OK;
+                       goto reset_handler;
+               }
+               ctrl->status |= COMMAND_STATUS_OK;
+               break;
+       default:
+               dev_err(dev, "UNKNOWN command: %d\n", command);
+               break;
+       }
+
+reset_handler:
+       queue_delayed_work(kpcintb_workqueue, &ntb_epc->cmd_handler,
+                          msecs_to_jiffies(5));
+}
+
+static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+       struct pci_epf_bar *epf_bar;
+       enum pci_barno barno;
+       u8 vfunc_no, func_no;
+       struct pci_epc *epc;
+
+       epc = ntb_epc->epc;
+       func_no = ntb_epc->func_no;
+       vfunc_no = ntb_epc->vfunc_no;
+       barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+       epf_bar = &ntb_epc->epf_bar[barno];
+       pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
+}
+
+static int
+epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb, enum pci_epc_interface_type type)
+{
+       struct epf_ntb_epc *peer_ntb_epc;
+       struct pci_epf_bar *peer_epf_bar;
+       struct epf_ntb_epc *ntb_epc;
+       struct pci_epf_bar *epf_bar;
+       enum pci_barno peer_barno;
+       u32 peer_spad_offset;
+       enum pci_barno barno;
+       u8 vfunc_no, func_no;
+       struct pci_epc *epc;
+       struct device *dev;
+       int ret;
+
+       dev = &ntb->epf->dev;
+
+       peer_ntb_epc = ntb->epc[!type];
+       peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_CONFIG];
+       peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+       ntb_epc = ntb->epc[type];
+       barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+       epf_bar = &ntb_epc->epf_bar[barno];
+       func_no = ntb_epc->func_no;
+       vfunc_no = ntb_epc->vfunc_no;
+       epc = ntb_epc->epc;
+
+       peer_spad_offset = peer_ntb_epc->reg->spad_offset;
+       epf_bar->phys_addr = peer_epf_bar->phys_addr + peer_spad_offset;
+       epf_bar->size = peer_ntb_epc->spad_size;
+       epf_bar->barno = barno;
+       epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+       ret = pci_epc_set_bar(ntb_epc->epc, func_no, vfunc_no, epf_bar);
+       if (ret) {
+               dev_err(dev, "%s intf: peer SPAD BAR set failed\n",
+                       pci_epc_interface_string(type));
+               return ret;
+       }
+
+       return 0;
+}
+
+static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+       struct pci_epf_bar *epf_bar;
+       u8 vfunc_no, func_no;
+       enum pci_barno barno;
+       struct pci_epc *epc;
+
+       epc = ntb_epc->epc;
+       func_no = ntb_epc->func_no;
+       vfunc_no = ntb_epc->vfunc_no;
+       barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+       epf_bar = &ntb_epc->epf_bar[barno];
+       pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
+}
+
+static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
+{
+       struct pci_epf_bar *epf_bar;
+       enum pci_barno barno;
+       u8 vfunc_no, func_no;
+       struct epf_ntb *ntb;
+       struct pci_epc *epc;
+       struct device *dev;
+       int ret;
+
+       ntb = ntb_epc->epf_ntb;
+       dev = &ntb->epf->dev;
+
+       epc = ntb_epc->epc;
+       func_no = ntb_epc->func_no;
+       vfunc_no = ntb_epc->vfunc_no;
+       barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+       epf_bar = &ntb_epc->epf_bar[barno];
+
+       ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
+       if (ret) {
+               dev_err(dev, "%s inft: Config/Status/SPAD BAR set failed\n",
+                       pci_epc_interface_string(ntb_epc->type));
+               return ret;
+       }
+
+       return 0;
+}
+
+static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
+{
+       enum pci_epc_interface_type type;
+       struct epf_ntb_epc *ntb_epc;
+       enum pci_barno barno;
+       struct pci_epf *epf;
+
+       epf = ntb->epf;
+       for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+               ntb_epc = ntb->epc[type];
+               barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+               if (ntb_epc->reg)
+                       pci_epf_free_space(epf, ntb_epc->reg, barno, type);
+       }
+}
+
+static int
+epf_ntb_config_spad_bar_alloc_interface(struct epf_ntb *ntb,
+                                       enum pci_epc_interface_type type)
+{
+       const struct pci_epc_features *peer_epc_features;
+       const struct pci_epc_features *epc_features;
+       struct epf_ntb_epc *peer_ntb_epc;
+       struct epf_ntb_epc *ntb_epc;
+       struct epf_ntb_ctrl *ctrl;
+       enum pci_barno peer_barno;
+       struct device_node *node;
+       u32 spad_size, ctrl_size;
+       enum pci_barno barno;
+       u64 size, peer_size;
+       struct pci_epc *epc;
+       struct pci_epf *epf;
+       struct device *dev;
+       u32 spad_count;
+       size_t align;
+       void *base;
+
+       epf = ntb->epf;
+       node = epf->node;
+       dev = &epf->dev;
+       ntb_epc = ntb->epc[type];
+       epc = ntb_epc->epc;
+
+       epc_features = ntb_epc->epc_features;
+       barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+       size = epc_features->bar_fixed_size[barno];
+       align = epc_features->align;
+
+       peer_ntb_epc = ntb->epc[!type];
+       peer_epc_features = peer_ntb_epc->epc_features;
+       peer_barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+       peer_size = peer_epc_features->bar_fixed_size[barno];
+
+       /* Check if epc_features is populated incorrectly */
+       if ((!IS_ALIGNED(size, align)))
+               return -EINVAL;
+
+       spad_count = SPAD_COUNT;
+       of_property_read_u32(node, "spad-count", &spad_count);
+
+       ctrl_size = sizeof(struct epf_ntb_ctrl);
+       spad_size = spad_count * 4;
+
+       if (!align) {
+               ctrl_size = roundup_pow_of_two(ctrl_size);
+               spad_size = roundup_pow_of_two(spad_size);
+       } else {
+               ctrl_size = ALIGN(ctrl_size, align);
+               spad_size = ALIGN(spad_size, align);
+       }
+
+       if (peer_size) {
+               if (peer_size < spad_size)
+                       spad_count = peer_size / 4;
+               spad_size = peer_size;
+       }
+
+       /*
+        * In order to make sure SPAD offset is aligned to its size,
+        * expand control region size to the size of SPAD if SPAD size
+        * is greater than control region size.
+        */
+       if (spad_size > ctrl_size)
+               ctrl_size = spad_size;
+
+       if (!size)
+               size = ctrl_size + spad_size;
+       else if (size < ctrl_size + spad_size)
+               return -EINVAL;
+
+       base = pci_epf_alloc_space(epf, size, barno, align, type);
+       if (!base) {
+               dev_err(dev, "%s intf: Config/Status/SPAD alloc region fail\n",
+                       pci_epc_interface_string(type));
+               return -ENOMEM;
+       }
+
+       ntb_epc->reg = base;
+
+       ctrl = ntb_epc->reg;
+       ctrl->spad_offset = ctrl_size;
+       ctrl->spad_count = spad_count;
+       ctrl->num_mws = ntb->num_mws;
+       ctrl->db_entry_size = align ? align : 4;
+       ntb_epc->spad_size = spad_size;
+
+       return 0;
+}
+
+static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
+{
+       enum pci_epc_interface_type type;
+       struct device *dev;
+       int ret;
+
+       dev = &ntb->epf->dev;
+
+       for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+               ret = epf_ntb_config_spad_bar_alloc_interface(ntb, type);
+               if (ret) {
+                       dev_err(dev, "%s intf: Config/SPAD BAR alloc failed\n",
+                               pci_epc_interface_string(type));
+                       goto err_config_spad_bar_alloc;
+               }
+       }
+
+       return 0;
+
+err_config_spad_bar_alloc:
+       epf_ntb_config_spad_bar_free(ntb);
+
+       return ret;
+}
+
+static void epf_ntb_free_peer_mem(struct epf_ntb_epc *ntb_epc)
+{
+       struct pci_epf_bar *epf_bar;
+       void __iomem *mw_addr;
+       phys_addr_t phys_addr;
+       enum epf_ntb_bar bar;
+       enum pci_barno barno;
+       struct pci_epc *epc;
+       size_t size;
+
+       epc = ntb_epc->epc;
+
+       for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
+               barno = ntb_epc->epf_ntb_bar[bar];
+               mw_addr = ntb_epc->mw_addr[barno];
+               epf_bar = &ntb_epc->epf_bar[barno];
+               phys_addr = epf_bar->phys_addr;
+               size = epf_bar->size;
+               if (mw_addr) {
+                       pci_epc_mem_free_addr(epc, phys_addr, mw_addr, size);
+                       ntb_epc->mw_addr[barno] = NULL;
+               }
+       }
+}
+
+static void epf_ntb_db_mw_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+       struct pci_epf_bar *epf_bar;
+       enum epf_ntb_bar bar;
+       enum pci_barno barno;
+       u8 vfunc_no, func_no;
+       struct pci_epc *epc;
+
+       epc = ntb_epc->epc;
+
+       func_no = ntb_epc->func_no;
+       vfunc_no = ntb_epc->vfunc_no;
+
+       for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
+               barno = ntb_epc->epf_ntb_bar[bar];
+               epf_bar = &ntb_epc->epf_bar[barno];
+               pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
+       }
+}
+
+static void epf_ntb_db_mw_bar_cleanup(struct epf_ntb_epc *ntb_epc,
+                                     struct epf_ntb_epc *peer_ntb_epc)
+{
+       epf_ntb_db_mw_bar_clear(ntb_epc);
+       epf_ntb_free_peer_mem(peer_ntb_epc);
+}
+
+static int
+epf_ntb_alloc_peer_mem(struct device *dev, struct epf_ntb_epc *ntb_epc,
+                      enum epf_ntb_bar bar, struct epf_ntb_epc *peer_ntb_epc,
+                      size_t size)
+{
+       const struct pci_epc_features *epc_features;
+       struct pci_epf_bar *epf_bar;
+       struct pci_epc *peer_epc;
+       phys_addr_t phys_addr;
+       void __iomem *mw_addr;
+       enum pci_barno barno;
+       size_t align;
+
+       epc_features = ntb_epc->epc_features;
+       align = epc_features->align;
+
+       if (size < 128)
+               size = 128;
+
+       if (align)
+               size = ALIGN(size, align);
+       else
+               size = roundup_pow_of_two(size);
+
+       peer_epc = peer_ntb_epc->epc;
+       mw_addr = pci_epc_mem_alloc_addr(peer_epc, &phys_addr, size);
+       if (!mw_addr) {
+               dev_err(dev, "%s intf: Failed to allocate OB address\n",
+                       pci_epc_interface_string(peer_ntb_epc->type));
+               return -ENOMEM;
+       }
+
+       barno = ntb_epc->epf_ntb_bar[bar];
+       epf_bar = &ntb_epc->epf_bar[barno];
+       ntb_epc->mw_addr[barno] = mw_addr;
+
+       epf_bar->phys_addr = phys_addr;
+       epf_bar->size = size;
+       epf_bar->barno = barno;
+       epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+       return 0;
+}
+
+static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
+                                      enum pci_epc_interface_type type)
+{
+       const struct pci_epc_features *epc_features;
+       bool msix_capable, msi_capable;
+       struct epf_ntb_epc *ntb_epc;
+       struct device_node *node;
+       u8 vfunc_no, func_no;
+       struct pci_epc *epc;
+       struct device *dev;
+       u32 db_count;
+       int ret;
+
+       ntb_epc = ntb->epc[type];
+       dev = &ntb->epf->dev;
+       node = ntb->epf->node;
+
+       epc_features = ntb_epc->epc_features;
+       msix_capable = epc_features->msix_capable;
+       msi_capable = epc_features->msi_capable;
+
+       if (!(msix_capable || msi_capable)) {
+               dev_err(dev, "MSI or MSI-X is required for doorbell\n");
+               return -EINVAL;
+       }
+
+       func_no = ntb_epc->func_no;
+       vfunc_no = ntb_epc->vfunc_no;
+
+       db_count = DB_COUNT;
+       of_property_read_u32(node, "db-count", &db_count);
+       if (db_count > MAX_DB_COUNT) {
+               dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
+               return -EINVAL;
+       }
+
+       ntb->db_count = db_count;
+       epc = ntb_epc->epc;
+
+       if (msi_capable) {
+               ret = pci_epc_set_msi(epc, func_no, vfunc_no, db_count);
+               if (ret) {
+                       dev_err(dev, "%s intf: MSI configuration failed\n",
+                               pci_epc_interface_string(type));
+                       return ret;
+               }
+       }
+
+       if (msix_capable) {
+               ret = pci_epc_set_msix(epc, func_no, vfunc_no, db_count);
+               if (ret) {
+                       dev_err(dev, "MSI configuration failed\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
+                                 enum pci_epc_interface_type type)
+{
+       const struct pci_epc_features *epc_features;
+       struct epf_ntb_epc *peer_ntb_epc;
+       struct epf_ntb_epc *ntb_epc;
+       struct pci_epf_bar *epf_bar;
+       struct epf_ntb_ctrl *ctrl;
+       enum epf_ntb_bar bar;
+       u8 vfunc_no, func_no;
+       enum pci_barno barno;
+       struct pci_epc *epc;
+       struct device *dev;
+       u32 num_mws, size;
+       u32 db_count;
+       size_t align;
+       int ret;
+       int i;
+
+       ntb_epc = ntb->epc[type];
+       peer_ntb_epc = ntb->epc[!type];
+
+       dev = &ntb->epf->dev;
+       epc_features = ntb_epc->epc_features;
+       align = epc_features->align;
+       func_no = ntb_epc->func_no;
+       vfunc_no = ntb_epc->vfunc_no;
+       epc = ntb_epc->epc;
+       num_mws = ntb->num_mws;
+       db_count = ntb->db_count;
+
+       for (bar = BAR_DB_MW1, i = 0; i < num_mws; bar++, i++) {
+               if (bar == BAR_DB_MW1) {
+                       align = align ? align : 4;
+                       size = db_count * align;
+                       size = ALIGN(size, ntb->mws_size[i]);
+                       ctrl = ntb_epc->reg;
+                       ctrl->mw1_offset = size;
+                       size += ntb->mws_size[i];
+               } else {
+                       size = ntb->mws_size[i];
+               }
+
+               ret = epf_ntb_alloc_peer_mem(dev, ntb_epc, bar,
+                                            peer_ntb_epc, size);
+               if (ret)
+                       goto err_alloc_peer_mem;
+
+               barno = ntb_epc->epf_ntb_bar[bar];
+               epf_bar = &ntb_epc->epf_bar[barno];
+
+               ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
+               if (ret) {
+                       dev_err(dev, "%s intf: DoorBell BAR set failed\n",
+                               pci_epc_interface_string(type));
+                       goto err_alloc_peer_mem;
+               }
+       }
+
+       return 0;
+
+err_alloc_peer_mem:
+       epf_ntb_db_mw_bar_cleanup(ntb_epc, peer_ntb_epc);
+
+       return ret;
+}
+
+static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
+{
+       enum pci_epc_interface_type type;
+       struct epf_ntb_epc *ntb_epc;
+       struct pci_epc *epc;
+       struct pci_epf *epf;
+
+       epf = ntb->epf;
+       for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+               ntb_epc = ntb->epc[type];
+               if (!ntb_epc)
+                       return;
+               epc = ntb_epc->epc;
+               pci_epc_remove_epf(epc, epf, type);
+               pci_epc_put(epc);
+       }
+}
+
+static int
+epf_ntb_epc_create_interface(struct epf_ntb *ntb, struct pci_epc *epc,
+                            enum pci_epc_interface_type type)
+{
+       const struct pci_epc_features *epc_features;
+       struct pci_epf_bar *epf_bar;
+       struct epf_ntb_epc *ntb_epc;
+       u8 vfunc_no, func_no;
+       struct pci_epf *epf;
+       struct device *dev;
+
+       dev = &ntb->epf->dev;
+
+       ntb_epc = devm_kzalloc(dev, sizeof(*ntb_epc), GFP_KERNEL);
+       if (!ntb_epc)
+               return -ENOMEM;
+
+       epf = ntb->epf;
+       if (type == PRIMARY_INTERFACE) {
+               func_no = epf->func_no;
+               vfunc_no = epf->vfunc_no;
+               epf_bar = epf->bar;
+       } else {
+               func_no = epf->sec_epc_func_no;
+               vfunc_no = epf->sec_epc_vfunc_no;
+               epf_bar = epf->sec_epc_bar;
+       }
+
+       ntb_epc->linkup = false;
+       ntb_epc->epc = epc;
+       ntb_epc->func_no = func_no;
+       ntb_epc->vfunc_no = vfunc_no;
+       ntb_epc->type = type;
+       ntb_epc->epf_bar = epf_bar;
+       ntb_epc->epf_ntb = ntb;
+
+       epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
+       ntb_epc->epc_features = epc_features;
+
+       ntb->epc[type] = ntb_epc;
+
+       return 0;
+}
+
+static int epf_ntb_epc_create(struct epf_ntb *ntb)
+{
+       enum pci_epc_interface_type type;
+       struct device_node *node;
+       const char *epc_name;
+       struct pci_epc *epc;
+       struct pci_epf *epf;
+       struct device *dev;
+       int ret;
+
+       epf = ntb->epf;
+       node = epf->node;
+       dev = &epf->dev;
+
+       for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+               epc_name = pci_epc_interface_string(type);
+
+               epc = of_pci_epc_get_by_name(node, epc_name);
+               if (IS_ERR(epc)) {
+                       if (PTR_ERR(epc) != -EPROBE_DEFER)
+                               dev_err(dev, "%s intf: Failed to get EPC\n",
+                                       epc_name);
+                       ret = PTR_ERR(epc);
+                       goto err_epc_get;
+               }
+
+               ret = pci_epc_add_epf(epc, epf, type);
+               if (ret) {
+                       dev_err(dev, "%s intf: Fail to add EPF to EPC\n",
+                               epc_name);
+                       goto err_epc_get;
+               }
+
+               ret = epf_ntb_epc_create_interface(ntb, epc, type);
+               if (ret) {
+                       dev_err(dev, "%s intf: Fail to create NTB EPC\n",
+                               epc_name);
+                       goto err_epc_get;
+               }
+       }
+
+       return 0;
+
+err_epc_get:
+       epf_ntb_epc_destroy(ntb);
+
+       return ret;
+}
+
+static int epf_ntb_init_epc_bar_interface(struct epf_ntb *ntb,
+                                         enum pci_epc_interface_type type)
+{
+       const struct pci_epc_features *epc_features;
+       struct epf_ntb_epc *ntb_epc;
+       enum pci_barno barno;
+       enum epf_ntb_bar bar;
+       struct device *dev;
+       u32 num_mws;
+       int i;
+
+       barno = BAR_0;
+       ntb_epc = ntb->epc[type];
+       num_mws = ntb->num_mws;
+       dev = &ntb->epf->dev;
+       epc_features = ntb_epc->epc_features;
+
+       /* These are required BARs which are mandatory for NTB functionality */
+       for (bar = BAR_CONFIG; bar <= BAR_DB_MW1; bar++, barno++) {
+               barno = pci_epc_get_next_free_bar(epc_features, barno);
+               if (barno < 0) {
+                       dev_err(dev, "%s intf: Fail to get NTB function BAR\n",
+                               pci_epc_interface_string(type));
+                       return barno;
+               }
+               ntb_epc->epf_ntb_bar[bar] = barno;
+       }
+
+       /* These are optional BARs which doesn't impact NTB functionality */
+       for (bar = BAR_MW2, i = 1; i < num_mws; bar++, barno++, i++) {
+               barno = pci_epc_get_next_free_bar(epc_features, barno);
+               if (barno < 0) {
+                       ntb->num_mws = i;
+                       dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
+               }
+               ntb_epc->epf_ntb_bar[bar] = barno;
+       }
+
+       return 0;
+}
+
+static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+{
+       enum pci_epc_interface_type type;
+       struct device *dev;
+       int ret;
+
+       dev = &ntb->epf->dev;
+       for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+               ret = epf_ntb_init_epc_bar_interface(ntb, type);
+               if (ret) {
+                       dev_err(dev, "Fail to init EPC bar for %s interface\n",
+                               pci_epc_interface_string(type));
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
+                                     enum pci_epc_interface_type type)
+{
+       struct epf_ntb_epc *ntb_epc;
+       u8 vfunc_no, func_no;
+       struct pci_epc *epc;
+       struct pci_epf *epf;
+       struct device *dev;
+       int ret;
+
+       ntb_epc = ntb->epc[type];
+       epf = ntb->epf;
+       dev = &epf->dev;
+       epc = ntb_epc->epc;
+       func_no = ntb_epc->func_no;
+       vfunc_no = ntb_epc->vfunc_no;
+
+       ret = epf_ntb_config_sspad_bar_set(ntb->epc[type]);
+       if (ret) {
+               dev_err(dev, "%s intf: Config/self SPAD BAR init failed\n",
+                       pci_epc_interface_string(type));
+               return ret;
+       }
+
+       ret = epf_ntb_peer_spad_bar_set(ntb, type);
+       if (ret) {
+               dev_err(dev, "%s intf: Peer SPAD BAR init failed\n",
+                       pci_epc_interface_string(type));
+               goto err_peer_spad_bar_init;
+       }
+
+       ret = epf_ntb_configure_interrupt(ntb, type);
+       if (ret) {
+               dev_err(dev, "%s intf: Interrupt configuration failed\n",
+                       pci_epc_interface_string(type));
+               goto err_peer_spad_bar_init;
+       }
+
+       ret = epf_ntb_db_mw_bar_init(ntb, type);
+       if (ret) {
+               dev_err(dev, "%s intf: DB/MW BAR init failed\n",
+                       pci_epc_interface_string(type));
+               goto err_db_mw_bar_init;
+       }
+
+       ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
+       if (ret) {
+               dev_err(dev, "%s intf: Configuration header write failed\n",
+                       pci_epc_interface_string(type));
+               goto err_write_header;
+       }
+
+       INIT_DELAYED_WORK(&ntb->epc[type]->cmd_handler, epf_ntb_cmd_handler);
+       queue_work(kpcintb_workqueue, &ntb->epc[type]->cmd_handler.work);
+
+       return 0;
+
+err_write_header:
+       epf_ntb_db_mw_bar_cleanup(ntb->epc[type], ntb->epc[!type]);
+
+err_db_mw_bar_init:
+       epf_ntb_peer_spad_bar_clear(ntb->epc[type]);
+
+err_peer_spad_bar_init:
+       epf_ntb_config_sspad_bar_clear(ntb->epc[type]);
+
+       return ret;
+}
+
+static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
+{
+       enum pci_epc_interface_type type;
+       struct epf_ntb_epc *peer_ntb_epc;
+       struct epf_ntb_epc *ntb_epc;
+
+       for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+               ntb_epc = ntb->epc[type];
+               peer_ntb_epc = ntb->epc[!type];
+               cancel_delayed_work(&ntb_epc->cmd_handler);
+               epf_ntb_db_mw_bar_cleanup(ntb_epc, peer_ntb_epc);
+               epf_ntb_peer_spad_bar_clear(ntb_epc);
+               epf_ntb_config_sspad_bar_clear(ntb_epc);
+       }
+}
+
+static int epf_ntb_epc_init(struct epf_ntb *ntb)
+{
+       enum pci_epc_interface_type type;
+       struct device *dev;
+       int ret;
+
+       dev = &ntb->epf->dev;
+
+       for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+               ret = epf_ntb_epc_init_interface(ntb, type);
+               if (ret) {
+                       dev_err(dev, "%s intf: Failed to initialize\n",
+                               pci_epc_interface_string(type));
+                       goto err_init_type;
+               }
+       }
+
+       return 0;
+
+err_init_type:
+       epf_ntb_epc_cleanup(ntb);
+
+       return ret;
+}
+
+static int epf_ntb_of_parse_mw(struct epf_ntb *ntb, struct device_node *node)
+{
+       struct device *dev;
+       u32 *mws_size;
+       u32 num_mws;
+       int ret;
+
+       dev = &ntb->epf->dev;
+       ret = of_property_read_u32(node, "num-mws", &num_mws);
+       if (ret) {
+               dev_err(dev, "Failed to get num-mws dt property\n");
+               return ret;
+       }
+
+       if (num_mws > MAX_MW) {
+               dev_err(dev, "Cannot support more than 4 memory window\n");
+               return ret;
+       }
+
+       mws_size = devm_kzalloc(dev, sizeof(*mws_size) * num_mws, GFP_KERNEL);
+       if (!mws_size)
+               return -ENOMEM;
+
+       ret = of_property_read_u32_array(node, "mws-size", mws_size,
+                                        num_mws);
+       if (ret) {
+               dev_err(dev, "Failed to get mws-size dt property\n");
+               return ret;
+       }
+
+       ntb->num_mws = num_mws;
+       ntb->mws_size = mws_size;
+
+       return 0;
+}
+
+static int pci_epf_ntb_of_parse(struct epf_ntb *ntb)
+{
+       struct device_node *node;
+       struct pci_epf *epf;
+       struct device *dev;
+       int ret;
+
+       epf = ntb->epf;
+       node = epf->node;
+       dev = &epf->dev;
+
+       epf->header = &epf_ntb_header;
+       pci_epc_of_parse_header(node, epf->header);
+
+       ret = epf_ntb_of_parse_mw(ntb, node);
+       if (ret) {
+               dev_err(dev, "Invalid memory window configuration in DT\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int pci_epf_ntb_probe(struct pci_epf *epf)
+{
+       struct epf_ntb *ntb;
+       struct device *dev;
+       int ret;
+
+       dev = &epf->dev;
+
+       ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
+       if (!ntb)
+               return -ENOMEM;
+
+       ntb->epf = epf;
+
+       ret = pci_epf_ntb_of_parse(ntb);
+       if (ret) {
+               dev_err(dev, "Failed to parse NTB DT node\n");
+               return ret;
+       }
+
+       ret = epf_ntb_epc_create(ntb);
+       if (ret) {
+               dev_err(dev, "Failed to create NTB EPC\n");
+               return ret;
+       }
+
+       ret = epf_ntb_init_epc_bar(ntb);
+       if (ret) {
+               dev_err(dev, "Failed to create NTB EPC\n");
+               goto err_bar_init;
+       }
+
+       ret = epf_ntb_config_spad_bar_alloc(ntb);
+       if (ret) {
+               dev_err(dev, "Failed to allocate BAR memory\n");
+               goto err_bar_init;
+       }
+
+       ret = epf_ntb_epc_init(ntb);
+       if (ret) {
+               dev_err(dev, "Failed to initialize EPC\n");
+               goto err_epc_init;
+       }
+
+       epf_set_drvdata(epf, ntb);
+
+       return 0;
+
+err_epc_init:
+       epf_ntb_config_spad_bar_free(ntb);
+
+err_bar_init:
+       epf_ntb_epc_destroy(ntb);
+
+       return ret;
+}
+
+static int pci_epf_ntb_remove(struct pci_epf *epf)
+{
+       struct epf_ntb *ntb = epf_get_drvdata(epf);
+
+       epf_ntb_epc_cleanup(ntb);
+       epf_ntb_config_spad_bar_free(ntb);
+       epf_ntb_epc_destroy(ntb);
+
+       return 0;
+}
+
+static const struct pci_epf_device_id pci_epf_ntb_ids[] = {
+       {
+               .name = "pci-epf-ntb",
+       },
+       {},
+};
+
+static struct pci_epf_driver epf_ntb_driver = {
+       .driver.name    = "pci_epf_ntb",
+       .probe          = pci_epf_ntb_probe,
+       .remove         = pci_epf_ntb_remove,
+       .id_table       = pci_epf_ntb_ids,
+       .owner          = THIS_MODULE,
+};
+
+static int __init pci_epf_ntb_init(void)
+{
+       int ret;
+
+       kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
+                                           WQ_HIGHPRI, 0);
+       ret = pci_epf_register_driver(&epf_ntb_driver);
+       if (ret) {
+               pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+module_init(pci_epf_ntb_init);
+
+static void __exit pci_epf_ntb_exit(void)
+{
+       pci_epf_unregister_driver(&epf_ntb_driver);
+}
+module_exit(pci_epf_ntb_exit);
+
+MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
index 608545aaf7c8b8273dc9a97e2804ca1b9a8e037d..c7003c2dbbb2199070d9af9638d0c54419093fa3 100644 (file)
@@ -484,7 +484,8 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
                epf_bar = &epf->bar[bar];
 
                if (epf_test->reg[bar]) {
-                       pci_epf_free_space(epf, epf_test->reg[bar], bar);
+                       pci_epf_free_space(epf, epf_test->reg[bar], bar,
+                                          PRIMARY_INTERFACE);
                        pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
                                          epf_bar);
                }
@@ -514,7 +515,8 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
                ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
                                      epf_bar);
                if (ret) {
-                       pci_epf_free_space(epf, epf_test->reg[bar], bar);
+                       pci_epf_free_space(epf, epf_test->reg[bar], bar,
+                                          PRIMARY_INTERFACE);
                        dev_err(dev, "Failed to set BAR%d\n", bar);
                        if (bar == test_reg_bar)
                                return ret;
@@ -544,7 +546,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
        epc_features = epf_test->epc_features;
 
        base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
-                                  test_reg_bar, epc_features->align);
+                                  test_reg_bar, epc_features->align,
+                                  PRIMARY_INTERFACE);
        if (!base) {
                dev_err(dev, "Failed to allocated register space\n");
                return -ENOMEM;
@@ -560,7 +563,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
                        continue;
 
                base = pci_epf_alloc_space(epf, bar_size[bar], bar,
-                                          epc_features->align);
+                                          epc_features->align,
+                                          PRIMARY_INTERFACE);
                if (!base)
                        dev_err(dev, "Failed to allocate space for BAR%d\n",
                                bar);
index c18ef626ada54e1abeb42190479d048490bae984..f274a5b6ee10568845defce16f25fe5bcd9bf924 100644 (file)
@@ -93,13 +93,13 @@ static int pci_epc_epf_link(struct config_item *epc_item,
        struct pci_epc *epc = epc_group->epc;
        struct pci_epf *epf = epf_group->epf;
 
-       ret = pci_epc_add_epf(epc, epf);
+       ret = pci_epc_add_epf(epc, epf, PRIMARY_INTERFACE);
        if (ret)
                return ret;
 
        ret = pci_epf_bind(epf);
        if (ret) {
-               pci_epc_remove_epf(epc, epf);
+               pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
                return ret;
        }
 
@@ -119,7 +119,7 @@ static void pci_epc_epf_unlink(struct config_item *epc_item,
        epc = epc_group->epc;
        epf = epf_group->epf;
        pci_epf_unbind(epf);
-       pci_epc_remove_epf(epc, epf);
+       pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
 }
 
 static struct configfs_item_operations pci_epc_item_ops = {
index 5bc094093a479ddbe4c80b6cffbca2c0fbe46a0e..a93c78488bcad49834c48177ab65a6dbf5da4235 100644 (file)
@@ -31,6 +31,29 @@ static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
        return *epc == match_data;
 }
 
+/**
+ * pci_epc_of_parse_header() - parse the device tree to get PCI config space
+ *                             header
+ * @node: The device tree node (of endpoint function) which has the PCI config
+ *        space header values
+ * @header: standard configuration space header fields that has to be populated
+ *
+ * Invoke to populate *header* with the PCI configuration space values populated
+ * in device tree.
+ */
+void pci_epc_of_parse_header(struct device_node *node,
+                            struct pci_epf_header *header)
+{
+       of_property_read_u16(node, "vendor-id", &header->vendorid);
+       of_property_read_u16(node, "device-id", &header->deviceid);
+       of_property_read_u8(node, "baseclass-code", &header->baseclass_code);
+       of_property_read_u8(node, "subclass-code", &header->subclass_code);
+       of_property_read_u16(node, "subsys-vendor-id",
+                            &header->subsys_vendor_id);
+       of_property_read_u16(node, "subsys-id", &header->subsys_id);
+}
+EXPORT_SYMBOL_GPL(pci_epc_of_parse_header);
+
 /**
  * pci_epc_put() - release the PCI endpoint controller
  * @epc: epc returned by pci_epc_get()
@@ -83,28 +106,113 @@ err:
 }
 EXPORT_SYMBOL_GPL(pci_epc_get);
 
+/**
+ * of_pci_epc_get() - get PCI endpoint controller from device node and index
+ * @node: device node which contains the phandle to endpoint controller
+ * @index: index of the endpoint controller in "epcs" property
+ *
+ * Returns the EPC corresponding to the _index_ entry in "epcs" property
+ * present in device node, after getting a refcount  to it or -ENODEV if
+ * there is no such EPC or -EPROBE_DEFER if there is a phandle to the phy,
+ * but the device is not yet loaded.
+ */
+struct pci_epc *of_pci_epc_get(struct device_node *node, int index)
+{
+       struct device_node *epc_node;
+       struct class_dev_iter iter;
+       struct pci_epc *epc;
+       struct device *dev;
+
+       epc_node = of_parse_phandle(node, "epcs", index);
+       if (!epc_node)
+               return ERR_PTR(-ENODEV);
+
+       class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
+       while ((dev = class_dev_iter_next(&iter))) {
+               epc = to_pci_epc(dev);
+               if (epc_node != epc->dev.of_node)
+                       continue;
+
+               of_node_put(epc_node);
+               class_dev_iter_exit(&iter);
+               get_device(&epc->dev);
+               return epc;
+       }
+
+       of_node_put(node);
+       class_dev_iter_exit(&iter);
+       return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL_GPL(of_pci_epc_get);
+
+/**
+ * of_pci_epc_get_by_name() - get PCI endpoint controller from device node
+ *                            and string
+ * @node: device node which contains the phandle to endpoint controller
+ * @epc_name: name of endpoint controller as present in "epc-names" property
+ *
+ * Returns the EPC corresponding to the epc_name in "epc-names" property
+ * present in device node.
+ */
+struct pci_epc *of_pci_epc_get_by_name(struct device_node *node,
+                                      const char *epc_name)
+{
+       int index = 0;
+
+       if (epc_name)
+               index = of_property_match_string(node, "epc-names", epc_name);
+
+       return of_pci_epc_get(node, index);
+}
+EXPORT_SYMBOL_GPL(of_pci_epc_get_by_name);
+
 /**
  * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
  * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
  *
- * Invoke to get the first unreserved BAR that can be used for endpoint
+ * Invoke to get the first unreserved BAR that can be used by the endpoint
  * function. For any incorrect value in reserved_bar return '0'.
  */
-unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
-                                       *epc_features)
+int pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
+{
+       return pci_epc_get_next_free_bar(epc_features, BAR_0);
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
+
+/**
+ * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
+ * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
+ * @bar: the starting BAR number from where unreserved BAR should be searched
+ *
+ * Invoke to get the next unreserved BAR starting from @bar that can be used
+ * for endpoint function. For any incorrect value in reserved_bar return '0'.
+ */
+int pci_epc_get_next_free_bar(const struct pci_epc_features
+                             *epc_features, enum pci_barno bar)
 {
-       int free_bar;
+       unsigned long free_bar;
 
        if (!epc_features)
                return 0;
 
-       free_bar = ffz(epc_features->reserved_bar);
+       /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
+       if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
+               bar++;
+
+       /* Find if the reserved BAR is also a 64-bit BAR */
+       free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
+
+       /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
+       free_bar <<= 1;
+       free_bar |= epc_features->reserved_bar;
+
+       free_bar = find_next_zero_bit(&free_bar, 6, bar);
        if (free_bar > 5)
-               return 0;
+               return -EINVAL;
 
        return free_bar;
 }
-EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
+EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
 
 /**
  * pci_epc_get_features() - get the features supported by EPC
@@ -254,6 +362,46 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 }
 EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
 
+/**
+ * pci_epc_map_msi_irq() - Map physical address to MSI address and return
+ *                         MSI data
+ * @epc: the EPC device which has the MSI capability
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @phys_addr: the physical address of the outbound region
+ * @interrupt_num: the MSI interrupt number
+ * @entry_size: Size of Outbound address region for each interrupt
+ * @msi_data: the data that should be written in order to raise MSI interrupt
+ *            with interrupt number as 'interrupt num'
+ *
+ * Invoke to map physical address to MSI address and return MSI data. The
+ * physical address should be an address in the outbound region. This is
+ * required to implement doorbell functionality of NTB wherein EPC on either
+ * side of the interface (primary and secondary) can directly write to the
+ * physical address (in outbound region) of the other interface to ring
+ * doorbell.
+ */
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+                       phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
+                       u32 *msi_data)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(epc))
+               return -EINVAL;
+
+       if (!epc->ops->map_msi_irq)
+               return -EINVAL;
+
+       mutex_lock(&epc->lock);
+       ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
+                                   interrupt_num, entry_size, msi_data);
+       mutex_unlock(&epc->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
+
 /**
  * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
  * @epc: the EPC device to which MSI interrupts was requested
@@ -525,17 +673,22 @@ EXPORT_SYMBOL_GPL(pci_epc_write_header);
  * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
  * @epc: the EPC device to which the endpoint function should be added
  * @epf: the endpoint function to be added
+ * @type: Identifies if the EPC is connected to the primary or secondary
+ *        interface of EPF
  *
  * A PCI endpoint device can have one or more functions. In the case of PCIe,
  * the specification allows up to 8 PCIe endpoint functions. Invoke
  * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
  */
-int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
+                   enum pci_epc_interface_type type)
 {
+       struct list_head *list;
        u32 func_no = 0;
+       int ret = 0;
 
-       if (epf->epc || epf->is_vf)
-               return -EBUSY;
+       if (epf->is_vf)
+               return -EINVAL;
 
        if (IS_ERR(epc))
                return -EINVAL;
@@ -543,20 +696,37 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
        if (epf->func_no > epc->max_functions - 1)
                return -EINVAL;
 
+       if (type == PRIMARY_INTERFACE && epf->epc)
+               return -EBUSY;
+
+       if (type == SECONDARY_INTERFACE && epf->sec_epc)
+               return -EBUSY;
+
        mutex_lock(&epc->lock);
        func_no = find_first_zero_bit(&epc->function_num_map,
                                      BITS_PER_LONG);
-       if (func_no >= BITS_PER_LONG)
-               return -EINVAL;
+       if (func_no >= BITS_PER_LONG) {
+               ret = -EINVAL;
+               goto err;
+       }
 
        set_bit(func_no, &epc->function_num_map);
-       epf->func_no = func_no;
-       epf->epc = epc;
+       if (type == PRIMARY_INTERFACE) {
+               epf->func_no = func_no;
+               epf->epc = epc;
+               list = &epf->list;
+       } else {
+               epf->sec_epc_func_no = func_no;
+               epf->sec_epc = epc;
+               list = &epf->sec_epc_list;
+       }
+
+       list_add_tail(list, &epc->pci_epf);
 
-       list_add_tail(&epf->list, &epc->pci_epf);
+err:
        mutex_unlock(&epc->lock);
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(pci_epc_add_epf);
 
@@ -567,14 +737,26 @@ EXPORT_SYMBOL_GPL(pci_epc_add_epf);
  *
  * Invoke to remove PCI endpoint function from the endpoint controller.
  */
-void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
+                       enum pci_epc_interface_type type)
 {
+       struct list_head *list;
+       u32 func_no = 0;
+
        if (!epc || IS_ERR(epc))
                return;
 
+       if (type == PRIMARY_INTERFACE) {
+               func_no = epf->func_no;
+               list = &epf->list;
+       } else {
+               func_no = epf->sec_epc_func_no;
+               list = &epf->sec_epc_list;
+       }
+
        mutex_lock(&epc->lock);
-       clear_bit(epf->func_no, &epc->function_num_map);
-       list_del(&epf->list);
+       clear_bit(func_no, &epc->function_num_map);
+       list_del(list);
        mutex_unlock(&epc->lock);
 }
 EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
@@ -661,6 +843,7 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
        device_initialize(&epc->dev);
        epc->dev.class = pci_epc_class;
        epc->dev.parent = dev;
+       epc->dev.of_node = dev->of_node;
        epc->ops = ops;
 
        ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
diff --git a/drivers/pci/endpoint/pci-epf-bus.c b/drivers/pci/endpoint/pci-epf-bus.c
new file mode 100644 (file)
index 0000000..c47eeae
--- /dev/null
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * PCI Endpoint *Function* Bus Driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci-epf.h>
+#include <linux/platform_device.h>
+
+static int pci_epf_bus_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = of_node_get(dev->of_node);
+       struct device_node *child;
+       struct pci_epf *epf;
+
+       for_each_child_of_node(node, child) {
+               epf = devm_pci_epf_of_create(dev, child);
+               if (IS_ERR(epf)) {
+                       dev_err(dev, "Failed to create PCI EPF device %s\n",
+                               node->full_name);
+                       of_node_put(child);
+                       break;
+               }
+       }
+       of_node_put(node);
+
+       return 0;
+}
+
+static const struct of_device_id pci_epf_bus_id_table[] = {
+       { .compatible = "pci-epf-bus" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, pci_epf_bus_id_table);
+
+static struct platform_driver pci_epf_bus_driver = {
+       .probe          = pci_epf_bus_probe,
+       .driver         = {
+               .name   = "pci-epf-bus",
+               .of_match_table = of_match_ptr(pci_epf_bus_id_table),
+       },
+};
+
+module_platform_driver(pci_epf_bus_driver);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("PCI EPF Bus Driver");
+MODULE_LICENSE("GPL v2");
index 16feff8cad50006898b8dd08caedb12c6e518264..9fa4d8d4d8297b7ca6bbe89127cde33d6b01af07 100644 (file)
@@ -320,23 +320,36 @@ EXPORT_SYMBOL_GPL(pci_epf_remove_vepf);
  * pci_epf_free_space() - free the allocated PCI EPF register space
  * @addr: the virtual address of the PCI EPF register space
  * @bar: the BAR number corresponding to the register space
+ * @type: Identifies if the allocated space is for primary EPC or secondary EPC
  *
  * Invoke to free the allocated PCI EPF register space.
  */
-void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
+                       enum pci_epc_interface_type type)
 {
        struct device *dev = epf->epc->dev.parent;
+       struct pci_epf_bar *epf_bar;
+       struct pci_epc *epc;
 
        if (!addr)
                return;
 
-       dma_free_coherent(dev, epf->bar[bar].size, addr,
-                         epf->bar[bar].phys_addr);
+       if (type == PRIMARY_INTERFACE) {
+               epc = epf->epc;
+               epf_bar = epf->bar;
+       } else {
+               epc = epf->sec_epc;
+               epf_bar = epf->sec_epc_bar;
+       }
+
+       dev = epc->dev.parent;
+       dma_free_coherent(dev, epf_bar[bar].size, addr,
+                         epf_bar[bar].phys_addr);
 
-       epf->bar[bar].phys_addr = 0;
-       epf->bar[bar].size = 0;
-       epf->bar[bar].barno = 0;
-       epf->bar[bar].flags = 0;
+       epf_bar[bar].phys_addr = 0;
+       epf_bar[bar].size = 0;
+       epf_bar[bar].barno = 0;
+       epf_bar[bar].flags = 0;
 }
 EXPORT_SYMBOL_GPL(pci_epf_free_space);
 
@@ -345,15 +358,18 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
  * @size: the size of the memory that has to be allocated
  * @bar: the BAR number corresponding to the allocated register space
  * @align: alignment size for the allocation region
+ * @type: Identifies if the allocation is for primary EPC or secondary EPC
  *
  * Invoke to allocate memory for the PCI EPF register space.
  */
 void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
-                         size_t align)
+                         size_t align, enum pci_epc_interface_type type)
 {
-       void *space;
-       struct device *dev = epf->epc->dev.parent;
+       struct pci_epf_bar *epf_bar;
        dma_addr_t phys_addr;
+       struct pci_epc *epc;
+       struct device *dev;
+       void *space;
 
        if (size < 128)
                size = 128;
@@ -363,16 +379,25 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
        else
                size = roundup_pow_of_two(size);
 
+       if (type == PRIMARY_INTERFACE) {
+               epc = epf->epc;
+               epf_bar = epf->bar;
+       } else {
+               epc = epf->sec_epc;
+               epf_bar = epf->sec_epc_bar;
+       }
+
+       dev = epc->dev.parent;
        space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
        if (!space) {
                dev_err(dev, "failed to allocate mem space\n");
                return NULL;
        }
 
-       epf->bar[bar].phys_addr = phys_addr;
-       epf->bar[bar].size = size;
-       epf->bar[bar].barno = bar;
-       epf->bar[bar].flags |= upper_32_bits(size) ?
+       epf_bar[bar].phys_addr = phys_addr;
+       epf_bar[bar].size = size;
+       epf_bar[bar].barno = bar;
+       epf_bar[bar].flags |= upper_32_bits(size) ?
                                PCI_BASE_ADDRESS_MEM_TYPE_64 :
                                PCI_BASE_ADDRESS_MEM_TYPE_32;
 
@@ -446,11 +471,9 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
 {
        int ret;
 
-       if (!driver->ops)
-               return -EINVAL;
-
-       if (!driver->ops->bind || !driver->ops->unbind)
-               return -EINVAL;
+       if (!driver->ops || !driver->ops->bind || !driver->ops->unbind)
+               pr_debug("%s: Supports only pci_epf device created using DT\n",
+                        driver->driver.name);
 
        driver->driver.bus = &pci_epf_bus_type;
        driver->driver.owner = owner;
@@ -529,26 +552,78 @@ struct pci_epf *pci_epf_create(const char *name)
 }
 EXPORT_SYMBOL_GPL(pci_epf_create);
 
-const struct pci_epf_device_id *
-pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf)
+/**
+ * pci_epf_of_create() - create a new PCI EPF device from device tree node
+ * @node: the device node of the PCI EPF device.
+ *
+ * Invoke to create a new PCI EPF device by providing a device tree node
+ * with compatible property.
+ */
+struct pci_epf *pci_epf_of_create(struct device_node *node)
 {
-       if (!id || !epf)
-               return NULL;
+       struct pci_epf *epf;
+       const char *compat;
+       int ret;
 
-       while (*id->name) {
-               if (strcmp(epf->name, id->name) == 0)
-                       return id;
-               id++;
+       of_node_get(node);
+
+       ret = of_property_read_string(node, "compatible", &compat);
+       if (ret) {
+               of_node_put(node);
+               return ERR_PTR(ret);
+       }
+
+       epf = pci_epf_create(compat);
+       if (!IS_ERR(epf))
+               epf->node = node;
+
+       return epf;
+}
+EXPORT_SYMBOL_GPL(pci_epf_of_create);
+
+static void devm_epf_release(struct device *dev, void *res)
+{
+       struct pci_epf *epf = *(struct pci_epf **)res;
+
+       pci_epf_destroy(epf);
+}
+
+/**
+ * devm_pci_epf_of_create() - create a new PCI EPF device from device tree node
+ * @dev: device that is creating the new EPF
+ * @node: the device node of the PCI EPF device.
+ *
+ * Invoke to create a new PCI EPF device by providing a device tree node with
+ * compatible property. While at that, it also associates the device with the
+ * EPF using devres. On driver detach, release function is invoked on the devres
+ * data, where devres data is freed.
+ */
+struct pci_epf *devm_pci_epf_of_create(struct device *dev,
+                                      struct device_node *node)
+{
+       struct pci_epf **ptr, *epf;
+
+       ptr = devres_alloc(devm_epf_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return ERR_PTR(-ENOMEM);
+
+       epf = pci_epf_of_create(node);
+       if (!IS_ERR(epf)) {
+               *ptr = epf;
+               devres_add(dev, ptr);
+       } else {
+               devres_free(ptr);
        }
 
-       return NULL;
+       return epf;
 }
-EXPORT_SYMBOL_GPL(pci_epf_match_device);
+EXPORT_SYMBOL_GPL(devm_pci_epf_of_create);
 
 static void pci_epf_dev_release(struct device *dev)
 {
        struct pci_epf *epf = to_pci_epf(dev);
 
+       of_node_put(epf->node);
        kfree(epf->name);
        kfree(epf);
 }
index 857c358b727b839aa1bca6f67b11c16bee282cb1..cc860c5f7d26f4c64939fc970295d114f5842447 100644 (file)
@@ -230,7 +230,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
        struct of_drc_info drc;
        const __be32 *value;
        char cell_drc_name[MAX_DRC_NAME_LEN];
-       int j, fndit;
+       int j;
 
        info = of_find_property(dn->parent, "ibm,drc-info", NULL);
        if (info == NULL)
@@ -245,17 +245,13 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
 
                /* Should now know end of current entry */
 
-               if (my_index > drc.last_drc_index)
-                       continue;
-
-               fndit = 1;
-               break;
+               /* Found it */
+               if (my_index <= drc.last_drc_index) {
+                       sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
+                               my_index);
+                       break;
+               }
        }
-       /* Found it */
-
-       if (fndit)
-               sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix, 
-                       my_index);
 
        if (((drc_name == NULL) ||
             (drc_name && !strcmp(drc_name, cell_drc_name))) &&
index 33f3f475e5c6bc7bd7288cd17df61af5ff1446ce..ec317bcb1bcabb2f8ea1bc9ebcb775eaa1ed0ed7 100644 (file)
@@ -399,7 +399,8 @@ void __weak pcibios_free_irq(struct pci_dev *dev)
 #ifdef CONFIG_PCI_IOV
 static inline bool pci_device_can_probe(struct pci_dev *pdev)
 {
-       return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
+       return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
+               pdev->driver_override);
 }
 #else
 static inline bool pci_device_can_probe(struct pci_dev *pdev)
@@ -414,6 +415,9 @@ static int pci_device_probe(struct device *dev)
        struct pci_dev *pci_dev = to_pci_dev(dev);
        struct pci_driver *drv = to_pci_driver(dev->driver);
 
+       if (!pci_device_can_probe(pci_dev))
+               return -ENODEV;
+
        pci_assign_irq(pci_dev);
 
        error = pcibios_alloc_irq(pci_dev);
@@ -421,12 +425,10 @@ static int pci_device_probe(struct device *dev)
                return error;
 
        pci_dev_get(pci_dev);
-       if (pci_device_can_probe(pci_dev)) {
-               error = __pci_device_probe(drv, pci_dev);
-               if (error) {
-                       pcibios_free_irq(pci_dev);
-                       pci_dev_put(pci_dev);
-               }
+       error = __pci_device_probe(drv, pci_dev);
+       if (error) {
+               pcibios_free_irq(pci_dev);
+               pci_dev_put(pci_dev);
        }
 
        return error;
index 9ecfe13157c09660ec627076b470e118e794c405..1edf5a1836ea92880b74a13c421eadf891dc3404 100644 (file)
@@ -478,7 +478,7 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
                pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
        return count;
 }
-static struct device_attribute dev_remove_attr = __ATTR(remove,
+static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove,
                                                        (S_IWUSR|S_IWGRP),
                                                        NULL, remove_store);
 
index 61f2ef28ea1c73ed154ab0593918828a817e5f25..2baf1f82f89333e22cdfd2d24087f884c1ab5fb3 100644 (file)
@@ -925,19 +925,6 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
        }
 }
 
-/**
- * pci_power_up - Put the given device into D0 forcibly
- * @dev: PCI device to power up
- */
-void pci_power_up(struct pci_dev *dev)
-{
-       if (platform_pci_power_manageable(dev))
-               platform_pci_set_power_state(dev, PCI_D0);
-
-       pci_raw_set_power_state(dev, PCI_D0);
-       pci_update_current_state(dev, PCI_D0);
-}
-
 /**
  * pci_platform_power_transition - Use platform to change device power state
  * @dev: PCI device to handle.
@@ -1116,6 +1103,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 }
 EXPORT_SYMBOL(pci_set_power_state);
 
+/**
+ * pci_power_up - Put the given device into D0 forcibly
+ * @dev: PCI device to power up
+ */
+void pci_power_up(struct pci_dev *dev)
+{
+       __pci_start_power_transition(dev, PCI_D0);
+       pci_raw_set_power_state(dev, PCI_D0);
+       pci_update_current_state(dev, PCI_D0);
+}
+
 /**
  * pci_choose_state - Choose the power state of a PCI device
  * @dev: PCI device to be suspended
@@ -1366,7 +1364,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
                pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
                bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
                res = pdev->resource + bar_idx;
-               size = order_base_2((resource_size(res) >> 20) | 1) - 1;
+               size = ilog2(resource_size(res)) - 20;
                ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
                ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
                pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
@@ -2004,6 +2002,13 @@ static void pci_pme_list_scan(struct work_struct *work)
                         */
                        if (bridge && bridge->current_state != PCI_D0)
                                continue;
+                       /*
+                        * If the device is in D3cold it should not be
+                        * polled either.
+                        */
+                       if (pme_dev->dev->current_state == PCI_D3cold)
+                               continue;
+
                        pci_pme_wakeup(pme_dev->dev, NULL);
                } else {
                        list_del(&pme_dev->list);
index 28c64f84bfe72b1b301a99c5ac2ba3773480ec70..06be52912dcdb201347737dbefb44e6aaa2db910 100644 (file)
@@ -5082,59 +5082,95 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
        pci_iounmap(pdev, mmio);
        pci_disable_device(pdev);
 }
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8531,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8532,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8533,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8534,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8535,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8536,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8543,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8544,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8545,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8546,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8551,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8552,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8553,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8554,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8555,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8556,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8561,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8562,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8563,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8564,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8565,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8566,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8571,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8572,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8573,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8574,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8575,
-                       quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8576,
-                       quirk_switchtec_ntb_dma_alias);
+#define SWITCHTEC_QUIRK(vid) \
+       DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
+                               quirk_switchtec_ntb_dma_alias)
+
+SWITCHTEC_QUIRK(0x8531);  /* PFX 24xG3 */
+SWITCHTEC_QUIRK(0x8532);  /* PFX 32xG3 */
+SWITCHTEC_QUIRK(0x8533);  /* PFX 48xG3 */
+SWITCHTEC_QUIRK(0x8534);  /* PFX 64xG3 */
+SWITCHTEC_QUIRK(0x8535);  /* PFX 80xG3 */
+SWITCHTEC_QUIRK(0x8536);  /* PFX 96xG3 */
+SWITCHTEC_QUIRK(0x8541);  /* PSX 24xG3 */
+SWITCHTEC_QUIRK(0x8542);  /* PSX 32xG3 */
+SWITCHTEC_QUIRK(0x8543);  /* PSX 48xG3 */
+SWITCHTEC_QUIRK(0x8544);  /* PSX 64xG3 */
+SWITCHTEC_QUIRK(0x8545);  /* PSX 80xG3 */
+SWITCHTEC_QUIRK(0x8546);  /* PSX 96xG3 */
+SWITCHTEC_QUIRK(0x8551);  /* PAX 24XG3 */
+SWITCHTEC_QUIRK(0x8552);  /* PAX 32XG3 */
+SWITCHTEC_QUIRK(0x8553);  /* PAX 48XG3 */
+SWITCHTEC_QUIRK(0x8554);  /* PAX 64XG3 */
+SWITCHTEC_QUIRK(0x8555);  /* PAX 80XG3 */
+SWITCHTEC_QUIRK(0x8556);  /* PAX 96XG3 */
+SWITCHTEC_QUIRK(0x8561);  /* PFXL 24XG3 */
+SWITCHTEC_QUIRK(0x8562);  /* PFXL 32XG3 */
+SWITCHTEC_QUIRK(0x8563);  /* PFXL 48XG3 */
+SWITCHTEC_QUIRK(0x8564);  /* PFXL 64XG3 */
+SWITCHTEC_QUIRK(0x8565);  /* PFXL 80XG3 */
+SWITCHTEC_QUIRK(0x8566);  /* PFXL 96XG3 */
+SWITCHTEC_QUIRK(0x8571);  /* PFXI 24XG3 */
+SWITCHTEC_QUIRK(0x8572);  /* PFXI 32XG3 */
+SWITCHTEC_QUIRK(0x8573);  /* PFXI 48XG3 */
+SWITCHTEC_QUIRK(0x8574);  /* PFXI 64XG3 */
+SWITCHTEC_QUIRK(0x8575);  /* PFXI 80XG3 */
+SWITCHTEC_QUIRK(0x8576);  /* PFXI 96XG3 */
+
+/*
+ * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
+ * not always reset the secondary Nvidia GPU between reboots if the system
+ * is configured to use Hybrid Graphics mode.  This results in the GPU
+ * being left in whatever state it was in during the *previous* boot, which
+ * causes spurious interrupts from the GPU, which in turn causes us to
+ * disable the wrong IRQ and end up breaking the touchpad.  Unsurprisingly,
+ * this also completely breaks nouveau.
+ *
+ * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a
+ * clean state and fixes all these issues.
+ *
+ * When the machine is configured in Dedicated display mode, the issue
+ * doesn't occur.  Fortunately the GPU advertises NoReset+ when in this
+ * mode, so we can detect that and avoid resetting it.
+ */
+static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
+{
+       void __iomem *map;
+       int ret;
+
+       if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
+           pdev->subsystem_device != 0x222e ||
+           !pdev->reset_fn)
+               return;
+
+       if (pci_enable_device_mem(pdev))
+               return;
+
+       /*
+        * Based on nvkm_device_ctor() in
+        * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+        */
+       map = pci_iomap(pdev, 0, 0x23000);
+       if (!map) {
+               pci_err(pdev, "Can't map MMIO space\n");
+               goto out_disable;
+       }
+
+       /*
+        * Make sure the GPU looks like it's been POSTed before resetting
+        * it.
+        */
+       if (ioread32(map + 0x2240c) & 0x2) {
+               pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
+               ret = pci_reset_bus(pdev);
+               if (ret < 0)
+                       pci_err(pdev, "Failed to reset GPU: %d\n", ret);
+       }
+
+       iounmap(map);
+out_disable:
+       pci_disable_device(pdev);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
+                             PCI_CLASS_DISPLAY_VGA, 8,
+                             quirk_reset_lenovo_thinkpad_p50_nvgpu);
index d0b7dd8fb184b041446707dceccda87588ba45a2..77995df7fe547f9e3adc00a148bebf1e65c974a1 100644 (file)
@@ -730,8 +730,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
                cpu_pm_pmu_setup(armpmu, cmd);
                break;
        case CPU_PM_EXIT:
-               cpu_pm_pmu_setup(armpmu, cmd);
        case CPU_PM_ENTER_FAILED:
+               cpu_pm_pmu_setup(armpmu, cmd);
                armpmu->start(armpmu);
                break;
        default:
index 2b8c0851ff33945992de937b1d2091ac73664640..c97e454a9ec74536589679a80b796925ee2bd248 100644 (file)
@@ -1,17 +1,17 @@
 #
 # Phy drivers for Cadence PHYs
 #
-config PHY_CADENCE_DP
-       tristate "Cadence MHDP DisplayPort PHY driver"
+config PHY_CADENCE_TORRENT
+       tristate "Cadence Torrent PHY driver"
        depends on OF
        depends on HAS_IOMEM
        select GENERIC_PHY
        help
-         Support for Cadence MHDP DisplayPort PHY.
+         Support for Cadence Torrent PHY.
 
 config PHY_CADENCE_SIERRA
        tristate "Cadence Sierra PHY Driver"
        depends on OF && HAS_IOMEM && RESET_CONTROLLER
        select GENERIC_PHY
        help
-         Enable this to support the Cadence Sierra PHY driver
\ No newline at end of file
+         Enable this to support the Cadence Sierra PHY driver
index 412349af0492b62a684b216fe90b24be95769cff..636fc1602d56279e0de2a499a5196f5ea145163e 100644 (file)
@@ -1,2 +1,2 @@
-obj-$(CONFIG_PHY_CADENCE_DP)   += phy-cadence-dp.o
+obj-$(CONFIG_PHY_CADENCE_TORRENT)      += phy-cadence-torrent.o
 obj-$(CONFIG_PHY_CADENCE_SIERRA)       += phy-cadence-sierra.o
similarity index 80%
rename from drivers/phy/cadence/phy-cadence-dp.c
rename to drivers/phy/cadence/phy-cadence-torrent.c
index 77fa0266e2e5efc891fecfa6fe186fd6fbfe5ee7..33962d97cc043d0d55b2ebb9a93f6e4f5207bb8a 100644 (file)
@@ -1,11 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Cadence MHDP DisplayPort SD0801 PHY driver.
+ * Cadence Torrent SD0801 PHY driver.
  *
  * Copyright 2018 Cadence Design Systems, Inc.
  *
  */
 
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
 
-#define REF_CLK_19_2MHz
+#define REF_CLK_19_2MHz                19200000
+#define REF_CLK_25MHz          25000000
 
 #define DEFAULT_NUM_LANES      4
 #define MAX_NUM_LANES          4
 #define DEFAULT_MAX_BIT_RATE   8100 /* in Mbps */
 
-#define POLL_TIMEOUT_US                2000
+#define POLL_TIMEOUT_US                5000
 #define LANE_MASK              0x7
 
 /*
@@ -45,6 +47,7 @@
 #define PHY_POWER_STATE_LN_1   0x0008
 #define PHY_POWER_STATE_LN_2   0x0010
 #define PHY_POWER_STATE_LN_3   0x0018
+#define PMA_XCVR_POWER_STATE_REQ_LN_MASK       0x3FU
 #define PHY_PMA_XCVR_POWER_STATE_ACK   0x30
 #define PHY_PMA_CMN_READY              0x34
 #define PHY_PMA_XCVR_TX_VMARGIN                0x38
 #define RX_REE_GCSM2_CTRL(j)                   (0x8000 + 0x0220 + (j) * 0x400)
 #define RX_REE_PERGCSM_CTRL(j)                 (0x8000 + 0x0230 + (j) * 0x400)
 
-struct cdns_dp_phy {
+struct cdns_torrent_phy {
        void __iomem *base;     /* DPTX registers base */
        void __iomem *sd_base;  /* SD0801 registers base */
        u32 num_lanes; /* Number of lanes to use */
        u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
        struct reset_control *phy_rst;
        struct device *dev;
+       struct clk *clk;
+       unsigned long ref_clk_rate;
 };
 
-static int cdns_dp_phy_init(struct phy *phy);
-static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy);
-static int cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy);
-static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy);
-#ifdef REF_CLK_19_2MHz
-static void cdns_dp_phy_pma_cmn_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy);
-static void cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy, u32 rate, bool ssc);
-#else
-static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy);
-static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy, u32 rate, bool ssc);
-#endif
-static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
+enum phy_powerstate {
+       POWERSTATE_A0 = 0,
+       // Powerstate A1 is unused
+       POWERSTATE_A2 = 2,
+       POWERSTATE_A3 = 3,
+};
+
+static int cdns_torrent_dp_init(struct phy *phy);
+static int cdns_torrent_dp_exit(struct phy *phy);
+static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy);
+static int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy);
+static void cdns_torrent_dp_pma_cfg(struct cdns_torrent_phy *cdns_phy);
+static void cdns_torrent_dp_pma_cmn_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy);
+static void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy, u32 rate, bool ssc);
+static void cdns_torrent_dp_pma_cmn_cfg_25mhz(struct cdns_torrent_phy *cdns_phy);
+static void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy, u32 rate, bool ssc);
+static void cdns_torrent_dp_pma_lane_cfg(struct cdns_torrent_phy *cdns_phy,
                                     unsigned int lane);
-static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy,
+static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
                                     u32 rate, u32 lanes);
-static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
+static void cdns_dp_phy_write_field(struct cdns_torrent_phy *cdns_phy,
                                    unsigned int offset,
                                    unsigned char start_bit,
                                    unsigned char num_bits,
                                    unsigned int val);
-static int cdns_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts);
-
-static int cdns_dp_phy_on(struct phy *gphy);
-static int cdns_dp_phy_off(struct phy *gphy);
-
-static const struct phy_ops cdns_dp_phy_ops = {
-       .init           = cdns_dp_phy_init,
-       .configure      = cdns_dp_phy_configure,
-       .power_on       = cdns_dp_phy_on,
-       .power_off      = cdns_dp_phy_off,
+static int cdns_torrent_dp_configure(struct phy *phy, union phy_configure_opts *opts);
+static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy, u32 num_lanes);
+static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
+                                      u32 num_lanes,
+                                      enum phy_powerstate powerstate);
+
+static int cdns_torrent_phy_on(struct phy *gphy);
+static int cdns_torrent_phy_off(struct phy *gphy);
+
+static const struct phy_ops cdns_torrent_phy_ops = {
+       .init           = cdns_torrent_dp_init,
+       .exit           = cdns_torrent_dp_exit,
+       .configure      = cdns_torrent_dp_configure,
+       .power_on       = cdns_torrent_phy_on,
+       .power_off      = cdns_torrent_phy_off,
        .owner          = THIS_MODULE,
 };
 
@@ -213,18 +228,18 @@ static const struct phy_ops cdns_dp_phy_ops = {
        _cdns_dp_phy_write_phy(cdns_phy, offset, val); \
 })
 
-static void _cdns_dp_phy_write_phy(struct cdns_dp_phy *cdns_phy, u32 offset, u16 val)
+static void _cdns_dp_phy_write_phy(struct cdns_torrent_phy *cdns_phy, u32 offset, u16 val)
 {
        writew(val, cdns_phy->sd_base + offset);
 }
 #else
-static void cdns_dp_phy_write_phy(struct cdns_dp_phy *cdns_phy, u32 offset, u16 val)
+static void cdns_dp_phy_write_phy(struct cdns_torrent_phy *cdns_phy, u32 offset, u16 val)
 {
        writew(val, cdns_phy->sd_base + offset);
 }
 #endif
 
-static u16 cdns_dp_phy_read_phy(struct cdns_dp_phy *cdns_phy, u32 offset)
+static u16 cdns_dp_phy_read_phy(struct cdns_torrent_phy *cdns_phy, u32 offset)
 {
        return readw(cdns_phy->sd_base + offset);
 }
@@ -242,18 +257,18 @@ static u16 cdns_dp_phy_read_phy(struct cdns_dp_phy *cdns_phy, u32 offset)
        _cdns_dp_phy_write_dp(cdns_phy, offset, val); \
 })
 
-static void _cdns_dp_phy_write_dp(struct cdns_dp_phy *cdns_phy, u32 offset, u16 val)
+static void _cdns_dp_phy_write_dp(struct cdns_torrent_phy *cdns_phy, u32 offset, u16 val)
 {
        writel(val, cdns_phy->base + offset);
 }
 #else
-static void cdns_dp_phy_write_dp(struct cdns_dp_phy *cdns_phy, u32 offset, u16 val)
+static void cdns_dp_phy_write_dp(struct cdns_torrent_phy *cdns_phy, u32 offset, u16 val)
 {
        writel(val, cdns_phy->base + offset);
 }
 #endif
 
-static u32 cdns_dp_phy_read_dp(struct cdns_dp_phy *cdns_phy, u32 offset)
+static u32 cdns_dp_phy_read_dp(struct cdns_torrent_phy *cdns_phy, u32 offset)
 {
        return readl(cdns_phy->base + offset);
 }
@@ -310,63 +325,46 @@ static const struct coefficients voltage_coeffs[4][4] = {
        }
 };
 
-enum phy_powerstate {
-       POWERSTATE_A0 = 0,
-       // Powerstate A1 is unused
-       POWERSTATE_A2 = 2,
-       POWERSTATE_A3 = 3,
-};
-
-static int cdns_dp_phy_init(struct phy *phy)
+static int cdns_torrent_dp_init(struct phy *phy)
 {
        unsigned char lane_bits;
        int r;
 
-       struct cdns_dp_phy *cdns_phy = phy_get_drvdata(phy);
+       struct cdns_torrent_phy *cdns_phy = phy_get_drvdata(phy);
+
+       r = clk_prepare_enable(cdns_phy->clk);
+       if (r) {
+               dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
+               return r;
+       }
+
+       cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk);
+       if (!(cdns_phy->ref_clk_rate)) {
+               dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
+               clk_disable_unprepare(cdns_phy->clk);
+               return -EINVAL;
+       }
+
+       switch (cdns_phy->ref_clk_rate) {
+       case REF_CLK_19_2MHz:
+       case REF_CLK_25MHz:
+               /* Valid Ref Clock Rate */
+               break;
+       default:
+               dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n");
+               return -EINVAL;
+       }
 
        cdns_dp_phy_write_dp(cdns_phy, PHY_AUX_CTRL, 0x0003); /* enable AUX */
 
        /* PHY PMA registers configuration function */
-       cdns_dp_phy_pma_cfg(cdns_phy);
+       cdns_torrent_dp_pma_cfg(cdns_phy);
 
        /*
         * Set lines power state to A0
         * Set lines pll clk enable to 0
         */
-
-       cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ,
-                               PHY_POWER_STATE_LN_0, 6, 0x0000);
-
-       if (cdns_phy->num_lanes >= 2) {
-               cdns_dp_phy_write_field(cdns_phy,
-                                       PHY_PMA_XCVR_POWER_STATE_REQ,
-                                       PHY_POWER_STATE_LN_1, 6, 0x0000);
-
-               if (cdns_phy->num_lanes == 4) {
-                       cdns_dp_phy_write_field(cdns_phy,
-                                               PHY_PMA_XCVR_POWER_STATE_REQ,
-                                               PHY_POWER_STATE_LN_2, 6, 0);
-                       cdns_dp_phy_write_field(cdns_phy,
-                                               PHY_PMA_XCVR_POWER_STATE_REQ,
-                                               PHY_POWER_STATE_LN_3, 6, 0);
-               }
-       }
-
-       cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN,
-                               0, 1, 0x0000);
-
-       if (cdns_phy->num_lanes >= 2) {
-               cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN,
-                                       1, 1, 0x0000);
-               if (cdns_phy->num_lanes == 4) {
-                       cdns_dp_phy_write_field(cdns_phy,
-                                               PHY_PMA_XCVR_PLLCLK_EN,
-                                               2, 1, 0x0000);
-                       cdns_dp_phy_write_field(cdns_phy,
-                                               PHY_PMA_XCVR_PLLCLK_EN,
-                                               3, 1, 0x0000);
-               }
-       }
+       cdns_torrent_dp_set_a0_pll(cdns_phy, cdns_phy->num_lanes);
 
        /*
         * release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on
@@ -380,32 +378,39 @@ static int cdns_dp_phy_init(struct phy *phy)
 
        /* PHY PMA registers configuration functions */
        /* Initialize PHY with max supported link rate, without SSC. */
-#ifdef REF_CLK_19_2MHz
-       cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(cdns_phy, cdns_phy->max_bit_rate, false);
-#else
-       cdns_dp_phy_pma_cmn_vco_cfg_25mhz(cdns_phy, cdns_phy->max_bit_rate, false);
-#endif
-       cdns_dp_phy_pma_cmn_rate(cdns_phy, cdns_phy->max_bit_rate, cdns_phy->num_lanes);
+       if (cdns_phy->ref_clk_rate ==  REF_CLK_19_2MHz)
+               cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, cdns_phy->max_bit_rate, false);
+       else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz)
+               cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, cdns_phy->max_bit_rate, false);
+       cdns_torrent_dp_pma_cmn_rate(cdns_phy, cdns_phy->max_bit_rate, cdns_phy->num_lanes);
 
        /* take out of reset */
        cdns_dp_phy_write_field(cdns_phy, PHY_RESET, 8, 1, 1);
-       cdns_dp_phy_on(phy);
-       r = cdns_dp_phy_wait_pma_cmn_ready(cdns_phy);
+       cdns_torrent_phy_on(phy);
+       r = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy);
        if (r)
                return r;
 
-       cdns_dp_phy_run(cdns_phy);
+       r = cdns_torrent_dp_run(cdns_phy);
+
+       return r;
+}
+
+static int cdns_torrent_dp_exit(struct phy *phy)
+{
+       struct cdns_torrent_phy *cdns_phy = phy_get_drvdata(phy);
 
+       clk_disable_unprepare(cdns_phy->clk);
        return 0;
 }
 
-static int cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy)
+static int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy)
 {
        unsigned int reg;
        int ret;
 
        ret = cdns_phy_read_dp_poll_timeout(cdns_phy, PHY_PMA_CMN_READY, reg,
-                                           reg & 1, 0, 5000);
+                                           reg & 1, 0, POLL_TIMEOUT_US);
        if (ret == -ETIMEDOUT) {
                dev_err(cdns_phy->dev,
                        "timeout waiting for PMA common ready\n");
@@ -415,26 +420,23 @@ static int cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy)
        return 0;
 }
 
-static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy)
+static void cdns_torrent_dp_pma_cfg(struct cdns_torrent_phy *cdns_phy)
 {
        unsigned int i;
 
-#ifdef REF_CLK_19_2MHz
-       /* PMA common configuration 19.2MHz */
-       cdns_dp_phy_pma_cmn_cfg_19_2mhz(cdns_phy);
-#else
-       /* PMA common configuration 25MHz */
-       cdns_dp_phy_pma_cmn_cfg_25mhz(cdns_phy);
-#endif
+       if (cdns_phy->ref_clk_rate ==  REF_CLK_19_2MHz)
+               /* PMA common configuration 19.2MHz */
+               cdns_torrent_dp_pma_cmn_cfg_19_2mhz(cdns_phy);
+       else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz)
+               /* PMA common configuration 25MHz */
+               cdns_torrent_dp_pma_cmn_cfg_25mhz(cdns_phy);
 
        /* PMA lane configuration to deal with multi-link operation */
        for (i = 0; i < cdns_phy->num_lanes; i++)
-               cdns_dp_phy_pma_lane_cfg(cdns_phy, i);
+               cdns_torrent_dp_pma_lane_cfg(cdns_phy, i);
 }
 
-#ifdef REF_CLK_19_2MHz
-
-static void cdns_dp_phy_pma_cmn_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy)
+static void cdns_torrent_dp_pma_cmn_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy)
 {
        /* refclock registers - assumes 19.2 MHz refclock */
        cdns_dp_phy_write_phy(cdns_phy, CMN_SSM_BIAS_TMR, 0x0014);
@@ -479,7 +481,7 @@ static void cdns_dp_phy_pma_cmn_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy)
  * Set registers responsible for enabling and configuring SSC, with second and
  * third register values provided by parameters.
  */
-static void cdns_dp_phy_enable_ssc_19_2mhz(struct cdns_dp_phy *cdns_phy,
+static void cdns_torrent_dp_enable_ssc_19_2mhz(struct cdns_torrent_phy *cdns_phy,
                                           u32 ctrl2_val, u32 ctrl3_val)
 {
        cdns_dp_phy_write_phy(cdns_phy, CMN_PLL0_SS_CTRL1_M0, 0x0001);
@@ -492,7 +494,7 @@ static void cdns_dp_phy_enable_ssc_19_2mhz(struct cdns_dp_phy *cdns_phy,
        cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_SS_CTRL4_M0, 0x0003);
 }
 
-static void cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy,
+static void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy,
                                                u32 rate, bool ssc)
 {
 
@@ -512,7 +514,7 @@ static void cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy,
                cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_HIGH_THR_M0, 0x00BC);
                cdns_dp_phy_write_phy(cdns_phy, CMN_PDIAG_PLL1_CTRL_M0, 0x0012);
                if (ssc)
-                       cdns_dp_phy_enable_ssc_19_2mhz(cdns_phy, 0x033A, 0x006A);
+                       cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x033A, 0x006A);
                break;
        /* Setting VCO for 9.72GHz */
        case 1620:
@@ -529,7 +531,7 @@ static void cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy,
                cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_HIGH_THR_M0, 0x0152);
                cdns_dp_phy_write_phy(cdns_phy, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
                if (ssc)
-                       cdns_dp_phy_enable_ssc_19_2mhz(cdns_phy, 0x05DD, 0x0069);
+                       cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x05DD, 0x0069);
                break;
        /* Setting VCO for 8.64GHz */
        case 2160:
@@ -545,7 +547,7 @@ static void cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy,
                cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_HIGH_THR_M0, 0x012C);
                cdns_dp_phy_write_phy(cdns_phy, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
                if (ssc)
-                       cdns_dp_phy_enable_ssc_19_2mhz(cdns_phy, 0x0536, 0x0069);
+                       cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x0536, 0x0069);
                break;
        /* Setting VCO for 8.1GHz */
        case 8100:
@@ -560,7 +562,7 @@ static void cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy,
                cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_HIGH_THR_M0, 0x011A);
                cdns_dp_phy_write_phy(cdns_phy, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
                if (ssc)
-                       cdns_dp_phy_enable_ssc_19_2mhz(cdns_phy, 0x04D7, 0x006A);
+                       cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x04D7, 0x006A);
                break;
        }
 
@@ -591,9 +593,8 @@ static void cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy,
        cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_LOCK_PLLCNT_START, 0x0099);
 }
 
-#else
 
-static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy)
+static void cdns_torrent_dp_pma_cmn_cfg_25mhz(struct cdns_torrent_phy *cdns_phy)
 {
        /* refclock registers - assumes 25 MHz refclock */
        cdns_dp_phy_write_phy(cdns_phy, CMN_SSM_BIAS_TMR, 0x0019);
@@ -637,7 +638,7 @@ static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy)
  * Set registers responsible for enabling and configuring SSC, with second
  * register value provided by a parameter.
  */
-static void cdns_dp_phy_enable_ssc_25mhz(struct cdns_dp_phy *cdns_phy, u32 ctrl2_val)
+static void cdns_torrent_dp_enable_ssc_25mhz(struct cdns_torrent_phy *cdns_phy, u32 ctrl2_val)
 {
        cdns_dp_phy_write_phy(cdns_phy, CMN_PLL0_SS_CTRL1_M0, 0x0001);
        cdns_dp_phy_write_phy(cdns_phy, CMN_PLL0_SS_CTRL1_M0, ctrl2_val);
@@ -649,7 +650,7 @@ static void cdns_dp_phy_enable_ssc_25mhz(struct cdns_dp_phy *cdns_phy, u32 ctrl2
        cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_SS_CTRL4_M0, 0x0003);
 }
 
-static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy,
+static void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy,
                                              u32 rate, bool ssc)
 {
        /* Assumes 25 MHz refclock */
@@ -666,7 +667,7 @@ static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy,
                cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_FRACDIVH_M0, 0x0002);
                cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_HIGH_THR_M0, 0x0120);
                if (ssc)
-                       cdns_dp_phy_enable_ssc_25mhz(cdns_phy, 0x0423);
+                       cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x0423);
                break;
        /* Setting VCO for 9.72GHz */
        case 1620:
@@ -681,7 +682,7 @@ static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy,
                cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_FRACDIVH_M0, 0x0002);
                cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_HIGH_THR_M0, 0x0104);
                if (ssc)
-                       cdns_dp_phy_enable_ssc_25mhz(cdns_phy, 0x03B9);
+                       cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x03B9);
                break;
        /* Setting VCO for 8.64GHz */
        case 2160:
@@ -695,7 +696,7 @@ static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy,
                cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_FRACDIVH_M0, 0x0002);
                cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_HIGH_THR_M0, 0x00E7);
                if (ssc)
-                       cdns_dp_phy_enable_ssc_25mhz(cdns_phy, 0x034F);
+                       cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x034F);
                break;
        /* Setting VCO for 8.1GHz */
        case 8100:
@@ -708,7 +709,7 @@ static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy,
                cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_FRACDIVH_M0, 0x0002);
                cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_HIGH_THR_M0, 0x00D8);
                if (ssc)
-                       cdns_dp_phy_enable_ssc_25mhz(cdns_phy, 0x031A);
+                       cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x031A);
                break;
        }
 
@@ -742,9 +743,8 @@ static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy,
        cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_LOCK_PLLCNT_START, 0x00C7);
 }
 
-#endif
 
-static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy,
+static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
                                     u32 rate, u32 lanes)
 {
        unsigned int clk_sel_val = 0;
@@ -789,15 +789,14 @@ static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy,
                                      hsclk_div_val);
 }
 
-static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
+static void cdns_torrent_dp_pma_lane_cfg(struct cdns_torrent_phy *cdns_phy,
                                     unsigned int lane)
 {
        /* Per lane, refclock-dependent receiver detection setting */
-#ifdef REF_CLK_19_2MHz
-       cdns_dp_phy_write_phy(cdns_phy, TX_RCVDET_ST_TMR(lane), 0x0780);
-#else
-       cdns_dp_phy_write_phy(cdns_phy, TX_RCVDET_ST_TMR(lane), 0x09C4);
-#endif
+       if (cdns_phy->ref_clk_rate ==  REF_CLK_19_2MHz)
+               cdns_dp_phy_write_phy(cdns_phy, TX_RCVDET_ST_TMR(lane), 0x0780);
+       else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz)
+               cdns_dp_phy_write_phy(cdns_phy, TX_RCVDET_ST_TMR(lane), 0x09C4);
 
        /* Writing Tx/Rx Power State Controllers registers */
        cdns_dp_phy_write_phy(cdns_phy, TX_PSC_A0(lane), 0x00FB);
@@ -818,12 +817,9 @@ static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
        cdns_dp_phy_write_phy(cdns_phy, XCVR_DIAG_HSCLK_SEL(lane), 0x0000);
 }
 
-static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy)
+static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy)
 {
        unsigned int read_val;
-       u32 write_val1 = 0;
-       u32 write_val2 = 0;
-       u32 mask = 0;
        int ret;
 
        /*
@@ -832,57 +828,24 @@ static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy)
         */
        ret = cdns_phy_read_dp_poll_timeout(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN_ACK,
                                            read_val, read_val & 1, 0, POLL_TIMEOUT_US);
-       if (ret == -ETIMEDOUT)
+       if (ret == -ETIMEDOUT) {
                dev_err(cdns_phy->dev,
                        "timeout waiting for link PLL clock enable ack\n");
-
-       ndelay(100);
-
-       switch (cdns_phy->num_lanes) {
-
-       case 1: /* lane 0 */
-               write_val1 = 0x00000004;
-               write_val2 = 0x00000001;
-               mask = 0x0000003f;
-               break;
-       case 2: /* lane 0-1 */
-               write_val1 = 0x00000404;
-               write_val2 = 0x00000101;
-               mask = 0x00003f3f;
-               break;
-       case 4: /* lane 0-3 */
-               write_val1 = 0x04040404;
-               write_val2 = 0x01010101;
-               mask = 0x3f3f3f3f;
-               break;
+               return ret;
        }
 
-       cdns_dp_phy_write_dp(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ, write_val1);
-
-       ret = cdns_phy_read_dp_poll_timeout(cdns_phy, PHY_PMA_XCVR_POWER_STATE_ACK,
-                                           read_val, (read_val & mask) == write_val1, 0,
-                                           POLL_TIMEOUT_US);
-       if (ret == -ETIMEDOUT)
-               dev_err(cdns_phy->dev,
-                       "timeout waiting for link power state ack\n");
-
-       cdns_dp_phy_write_dp(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ, 0);
        ndelay(100);
 
-       cdns_dp_phy_write_dp(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ, write_val2);
+       ret = cdns_torrent_dp_set_power_state(cdns_phy, cdns_phy->num_lanes, POWERSTATE_A2);
+       if (ret)
+               return ret;
 
-       ret = cdns_phy_read_dp_poll_timeout(cdns_phy, PHY_PMA_XCVR_POWER_STATE_ACK,
-                                           read_val, (read_val & mask) == write_val2, 0,
-                                           POLL_TIMEOUT_US);
-       if (ret == -ETIMEDOUT)
-               dev_err(cdns_phy->dev,
-                       "timeout waiting for link power state ack\n");
+       ret = cdns_torrent_dp_set_power_state(cdns_phy, cdns_phy->num_lanes, POWERSTATE_A0);
 
-       cdns_dp_phy_write_dp(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ, 0);
-       ndelay(100);
+       return ret;
 }
 
-static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
+static void cdns_dp_phy_write_field(struct cdns_torrent_phy *cdns_phy,
                                    unsigned int offset,
                                    unsigned char start_bit,
                                    unsigned char num_bits,
@@ -896,25 +859,25 @@ static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
                                                                 start_bit))));
 }
 
-static int cdns_dp_phy_on(struct phy *phy)
+static int cdns_torrent_phy_on(struct phy *phy)
 {
-       struct cdns_dp_phy *cdns_phy = phy_get_drvdata(phy);
+       struct cdns_torrent_phy *cdns_phy = phy_get_drvdata(phy);
 
        /* Take the PHY lane group out of reset */
        return reset_control_deassert(cdns_phy->phy_rst);
 }
 
-static int cdns_dp_phy_off(struct phy *phy)
+static int cdns_torrent_phy_off(struct phy *phy)
 {
-       struct cdns_dp_phy *cdns_phy = phy_get_drvdata(phy);
+       struct cdns_torrent_phy *cdns_phy = phy_get_drvdata(phy);
 
        return reset_control_assert(cdns_phy->phy_rst);
 }
 
-static int cdns_dp_phy_probe(struct platform_device *pdev)
+static int cdns_torrent_phy_probe(struct platform_device *pdev)
 {
        struct resource *regs;
-       struct cdns_dp_phy *cdns_phy;
+       struct cdns_torrent_phy *cdns_phy;
        struct device *dev = &pdev->dev;
        struct phy_provider *phy_provider;
        struct phy *phy;
@@ -926,9 +889,9 @@ static int cdns_dp_phy_probe(struct platform_device *pdev)
 
        cdns_phy->dev = &pdev->dev;
 
-       phy = devm_phy_create(dev, NULL, &cdns_dp_phy_ops);
+       phy = devm_phy_create(dev, NULL, &cdns_torrent_phy_ops);
        if (IS_ERR(phy)) {
-               dev_err(dev, "failed to create DisplayPort PHY\n");
+               dev_err(dev, "failed to create Torrent PHY\n");
                return PTR_ERR(phy);
        }
 
@@ -983,6 +946,12 @@ static int cdns_dp_phy_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       cdns_phy->clk = devm_clk_get(dev, "refclk");
+       if (IS_ERR(cdns_phy->clk)) {
+               dev_err(dev, "phy ref clock not found\n");
+               return PTR_ERR(cdns_phy->clk);
+       }
+
        phy_set_drvdata(phy, cdns_phy);
 
        phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
@@ -995,13 +964,12 @@ static int cdns_dp_phy_probe(struct platform_device *pdev)
        return PTR_ERR_OR_ZERO(phy_provider);
 }
 
-static int cdns_dp_phy_set_power_state(struct cdns_dp_phy *cdns_phy,
-                                      struct phy_configure_opts_dp *dp,
+static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
+                                      u32 num_lanes,
                                       enum phy_powerstate powerstate)
 {
        /* Register value for power state for a single byte. */
        u32 value_part;
-
        u32 value;
        u32 mask;
        u32 read_val;
@@ -1021,7 +989,7 @@ static int cdns_dp_phy_set_power_state(struct cdns_dp_phy *cdns_phy,
        }
 
        /* Select values of registers and mask, depending on enabled lane count. */
-       switch (dp->lanes) {
+       switch (num_lanes) {
        // lane 0
        case (1):
                value = value_part;
@@ -1058,7 +1026,7 @@ static int cdns_dp_phy_set_power_state(struct cdns_dp_phy *cdns_phy,
 /*
  * Enable or disable PLL for selected lanes.
  */
-static int cdns_dp_phy_set_pll_en(struct cdns_dp_phy *cdns_phy,
+static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy,
                                  struct phy_configure_opts_dp *dp,
                                  bool enable)
 {
@@ -1110,7 +1078,7 @@ static int cdns_dp_phy_set_pll_en(struct cdns_dp_phy *cdns_phy,
  * Perform register operations related to setting link rate, once powerstate is
  * set and PLL disable request was processed.
  */
-static int cdns_dp_phy_configure_rate(struct cdns_dp_phy *cdns_phy,
+static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy,
                                      struct phy_configure_opts_dp *dp)
 {
        u32 ret;
@@ -1130,16 +1098,16 @@ static int cdns_dp_phy_configure_rate(struct cdns_dp_phy *cdns_phy,
        ndelay(200);
 
        /* DP Rate Change - VCO Output settings. */
-#ifdef REF_CLK_19_2MHz
-       /* PMA common configuration 19.2MHz */
-       cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate, dp->ssc);
-       cdns_dp_phy_pma_cmn_cfg_19_2mhz(cdns_phy);
-#else
-       /* PMA common configuration 25MHz */
-       cdns_dp_phy_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate, dp->ssc);
-       cdns_dp_phy_pma_cmn_cfg_25mhz(cdns_phy);
-#endif
-       cdns_dp_phy_pma_cmn_rate(cdns_phy, dp->link_rate, dp->lanes);
+       if (cdns_phy->ref_clk_rate ==  REF_CLK_19_2MHz) {
+               /* PMA common configuration 19.2MHz */
+               cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate, dp->ssc);
+               cdns_torrent_dp_pma_cmn_cfg_19_2mhz(cdns_phy);
+       } else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) {
+               /* PMA common configuration 25MHz */
+               cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate, dp->ssc);
+               cdns_torrent_dp_pma_cmn_cfg_25mhz(cdns_phy);
+       }
+       cdns_torrent_dp_pma_cmn_rate(cdns_phy, dp->link_rate, dp->lanes);
 
        /* Enable the cmn_pll0_en. */
        cdns_dp_phy_write_phy(cdns_phy, PHY_PMA_PLL_RAW_CTRL, 0x3);
@@ -1156,7 +1124,7 @@ static int cdns_dp_phy_configure_rate(struct cdns_dp_phy *cdns_phy,
 /*
  * Verify, that parameters to configure PHY with are correct.
  */
-static int cdns_dp_phy_verify_config(struct cdns_dp_phy *cdns_phy,
+static int cdns_torrent_dp_verify_config(struct cdns_torrent_phy *cdns_phy,
                                     struct phy_configure_opts_dp *dp)
 {
        u8 i;
@@ -1214,24 +1182,26 @@ static int cdns_dp_phy_verify_config(struct cdns_dp_phy *cdns_phy,
 }
 
 /* Set power state A0 and PLL clock enable to 0 on enabled lanes. */
-static void cdns_dp_phy_set_a0_pll(struct cdns_dp_phy *cdns_phy,
-                                  struct phy_configure_opts_dp *dp)
+static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
+                                  u32 num_lanes)
 {
        u32 pwr_state = cdns_dp_phy_read_dp(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ);
        u32 pll_clk_en = cdns_dp_phy_read_dp(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN);
 
        /* Lane 0 is always enabled. */
-       pwr_state &= ~0x1FU;
+       pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << PHY_POWER_STATE_LN_0);
        pll_clk_en &= ~0x01U;
 
-       if (dp->lanes > 1) {
-               pwr_state &= ~(0x1FU << 8);
+       if (num_lanes > 1) {
+               /* lane 1 */
+               pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << PHY_POWER_STATE_LN_1);
                pll_clk_en &= ~(0x01U << 1);
        }
 
-       if (dp->lanes > 2) {
-               pwr_state &= ~(0x1FU << 16);
-               pwr_state &= ~(0x1FU << 24);
+       if (num_lanes > 2) {
+               /* lanes 2 and 3 */
+               pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << PHY_POWER_STATE_LN_2);
+               pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << PHY_POWER_STATE_LN_3);
                pll_clk_en &= ~(0x01U << 2);
                pll_clk_en &= ~(0x01U << 3);
        }
@@ -1241,7 +1211,7 @@ static void cdns_dp_phy_set_a0_pll(struct cdns_dp_phy *cdns_phy,
 }
 
 /* Configure lane count as required. */
-static int cdns_dp_phy_set_lanes(struct cdns_dp_phy *cdns_phy,
+static int cdns_torrent_dp_set_lanes(struct cdns_torrent_phy *cdns_phy,
                                 struct phy_configure_opts_dp *dp)
 {
        u32 value;
@@ -1265,15 +1235,14 @@ static int cdns_dp_phy_set_lanes(struct cdns_dp_phy *cdns_phy,
        value = (value & 0x0000FFF0) | (0x0000000E & lane_mask);
        cdns_dp_phy_write_dp(cdns_phy, PHY_RESET, value);
 
-       cdns_dp_phy_set_a0_pll(cdns_phy, dp);
+       cdns_torrent_dp_set_a0_pll(cdns_phy, dp->lanes);
 
        /* release phy_l0*_reset_n based on used laneCount */
        value = (value & 0x0000FFF0) | (0x0000000F & lane_mask);
        cdns_dp_phy_write_dp(cdns_phy, PHY_RESET, value);
 
        /* Wait, until PHY gets ready after releasing PHY reset signal. */
-       ret = cdns_phy_read_dp_poll_timeout(cdns_phy, PHY_PMA_CMN_READY, value,
-                                           (value & 0x01) != 0, 0, POLL_TIMEOUT_US);
+       ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy);
        if (ret)
                return ret;
 
@@ -1282,48 +1251,37 @@ static int cdns_dp_phy_set_lanes(struct cdns_dp_phy *cdns_phy,
        /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
        cdns_dp_phy_write_dp(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN, 0x0001);
 
-       /* waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the master lane */
-       ret = cdns_phy_read_dp_poll_timeout(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN_ACK,
-                                           value, (value & 0x01) != 0, 0, POLL_TIMEOUT_US);
-       if (ret)
-               return ret;
-
-       ndelay(100);
-
-       ret = cdns_dp_phy_set_power_state(cdns_phy, dp, POWERSTATE_A2);
-       if (ret)
-               return ret;
-       ret = cdns_dp_phy_set_power_state(cdns_phy, dp, POWERSTATE_A0);
+       ret = cdns_torrent_dp_run(cdns_phy);
 
        return ret;
 }
 
 /* Configure link rate as required. */
-static int cdns_dp_phy_set_rate(struct cdns_dp_phy *cdns_phy,
+static int cdns_torrent_dp_set_rate(struct cdns_torrent_phy *cdns_phy,
                                struct phy_configure_opts_dp *dp)
 {
        u32 ret;
 
-       ret = cdns_dp_phy_set_power_state(cdns_phy, dp, POWERSTATE_A3);
+       ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes, POWERSTATE_A3);
        if (ret)
                return ret;
-       ret = cdns_dp_phy_set_pll_en(cdns_phy, dp, false);
+       ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, false);
        if (ret)
                return ret;
        ndelay(200);
 
-       ret = cdns_dp_phy_configure_rate(cdns_phy, dp);
+       ret = cdns_torrent_dp_configure_rate(cdns_phy, dp);
        if (ret)
                return ret;
        ndelay(200);
 
-       ret = cdns_dp_phy_set_pll_en(cdns_phy, dp, true);
+       ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, true);
        if (ret)
                return ret;
-       ret = cdns_dp_phy_set_power_state(cdns_phy, dp, POWERSTATE_A2);
+       ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes, POWERSTATE_A2);
        if (ret)
                return ret;
-       ret = cdns_dp_phy_set_power_state(cdns_phy, dp, POWERSTATE_A0);
+       ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes, POWERSTATE_A0);
        if (ret)
                return ret;
        ndelay(900);
@@ -1332,7 +1290,7 @@ static int cdns_dp_phy_set_rate(struct cdns_dp_phy *cdns_phy,
 }
 
 /* Configure voltage swing and pre-emphasis for all enabled lanes. */
-static void cdns_dp_phy_set_voltages(struct cdns_dp_phy *cdns_phy,
+static void cdns_torrent_dp_set_voltages(struct cdns_torrent_phy *cdns_phy,
                                     struct phy_configure_opts_dp *dp)
 {
        u8 lane;
@@ -1365,9 +1323,9 @@ static void cdns_dp_phy_set_voltages(struct cdns_dp_phy *cdns_phy,
        }
 };
 
-static int cdns_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+static int cdns_torrent_dp_configure(struct phy *phy, union phy_configure_opts *opts)
 {
-       struct cdns_dp_phy *cdns_phy = phy_get_drvdata(phy);
+       struct cdns_torrent_phy *cdns_phy = phy_get_drvdata(phy);
        int ret;
 
        dev_dbg(&phy->dev,
@@ -1384,51 +1342,51 @@ static int cdns_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts
                (opts->dp.set_voltages && opts->dp.lanes > 3)   ? opts->dp.pre[3]       : -1,
                opts->dp.ssc);
 
-       ret = cdns_dp_phy_verify_config(cdns_phy, &opts->dp);
+       ret = cdns_torrent_dp_verify_config(cdns_phy, &opts->dp);
        if (ret) {
                dev_err(&phy->dev, "invalid params for phy configure\n");
                return ret;
        }
 
        if (opts->dp.set_lanes) {
-               ret = cdns_dp_phy_set_lanes(cdns_phy, &opts->dp);
+               ret = cdns_torrent_dp_set_lanes(cdns_phy, &opts->dp);
                if (ret) {
-                       dev_err(&phy->dev, "cdns_dp_phy_set_lanes failed\n");
+                       dev_err(&phy->dev, "cdns_torrent_dp_set_lanes failed\n");
                        return ret;
                }
        }
 
        if (opts->dp.set_rate) {
-               ret = cdns_dp_phy_set_rate(cdns_phy, &opts->dp);
+               ret = cdns_torrent_dp_set_rate(cdns_phy, &opts->dp);
                if (ret) {
-                       dev_err(&phy->dev, "cdns_dp_phy_set_rate failed\n");
+                       dev_err(&phy->dev, "cdns_torrent_dp_set_rate failed\n");
                        return ret;
                }
        }
 
        if (opts->dp.set_voltages)
-               cdns_dp_phy_set_voltages(cdns_phy, &opts->dp);
+               cdns_torrent_dp_set_voltages(cdns_phy, &opts->dp);
 
        return ret;
 }
 
-static const struct of_device_id cdns_dp_phy_of_match[] = {
+static const struct of_device_id cdns_torrent_phy_of_match[] = {
        {
-               .compatible = "cdns,dp-phy"
+               .compatible = "cdns,torrent-phy"
        },
        {}
 };
-MODULE_DEVICE_TABLE(of, cdns_dp_phy_of_match);
+MODULE_DEVICE_TABLE(of, cdns_torrent_phy_of_match);
 
-static struct platform_driver cdns_dp_phy_driver = {
-       .probe  = cdns_dp_phy_probe,
+static struct platform_driver cdns_torrent_phy_driver = {
+       .probe  = cdns_torrent_phy_probe,
        .driver = {
-               .name   = "cdns-dp-phy",
-               .of_match_table = cdns_dp_phy_of_match,
+               .name   = "cdns-torrent-phy",
+               .of_match_table = cdns_torrent_phy_of_match,
        }
 };
-module_platform_driver(cdns_dp_phy_driver);
+module_platform_driver(cdns_torrent_phy_driver);
 
 MODULE_AUTHOR("Cadence Design Systems, Inc.");
-MODULE_DESCRIPTION("Cadence MHDP PHY driver");
+MODULE_DESCRIPTION("Cadence Torrent PHY driver");
 MODULE_LICENSE("GPL v2");
index 97d4dd6ea9247d8fc3ec36ce4021ed17af1d0ac3..aa02b19b7e0e92ea4b42e40bd570d31a70f13071 100644 (file)
@@ -288,6 +288,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
                error = of_property_read_u32(np, "reg", &channel_num);
                if (error || channel_num > 2) {
                        dev_err(dev, "Invalid \"reg\" property\n");
+                       of_node_put(np);
                        return error;
                }
                channel->select_mask = select_mask[channel_num];
@@ -303,6 +304,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
                                                   &rcar_gen2_phy_ops);
                        if (IS_ERR(phy->phy)) {
                                dev_err(dev, "Failed to create PHY\n");
+                               of_node_put(np);
                                return PTR_ERR(phy->phy);
                        }
                        phy_set_drvdata(phy->phy, phy);
index fb8f05e39cf7f359f5c04c5c12db300517102d92..6fb2b696959053f78f6581d55d841900fd0a4e67 100644 (file)
@@ -66,6 +66,7 @@
                                         USB2_OBINT_IDDIGCHG)
 
 /* VBCTRL */
+#define USB2_VBCTRL_OCCLREN            BIT(16)
 #define USB2_VBCTRL_DRVVBUSSEL         BIT(8)
 
 /* LINECTRL1 */
@@ -289,6 +290,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
        u32 val;
 
        val = readl(usb2_base + USB2_VBCTRL);
+       val &= ~USB2_VBCTRL_OCCLREN;
        writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
        writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
        val = readl(usb2_base + USB2_OBINTEN);
index b6b1eb3164c14ec96748c786cd04a48b73af6da3..5677860dc913589b4d5dcd99c5617e1401cc937f 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <linux/reset-controller.h>
+#include <dt-bindings/phy/phy.h>
 
 #define WIZ_SERDES_CTRL                0x404
 #define WIZ_SERDES_TOP_CTRL    0x408
@@ -29,7 +30,8 @@
 
 #define WIZ_MAX_LANES          4
 #define WIZ_MUX_NUM_CLOCKS     3
-#define WIZ_DIV_NUM_CLOCKS     2
+#define WIZ_DIV_NUM_CLOCKS_16G 2
+#define WIZ_DIV_NUM_CLOCKS_10G 1
 
 #define WIZ_SERDES_TYPEC_LN10_SWAP     BIT(30)
 
@@ -57,8 +59,10 @@ static const struct reg_field pll1_refclk_mux_sel =
                                        REG_FIELD(WIZ_SERDES_RST, 29, 29);
 static const struct reg_field pll0_refclk_mux_sel =
                                        REG_FIELD(WIZ_SERDES_RST, 28, 28);
-static const struct reg_field refclk_dig_sel =
+static const struct reg_field refclk_dig_sel_16g =
                                        REG_FIELD(WIZ_SERDES_RST, 24, 25);
+static const struct reg_field refclk_dig_sel_10g =
+                                       REG_FIELD(WIZ_SERDES_RST, 24, 24);
 static const struct reg_field pma_cmn_refclk_int_mode =
                                        REG_FIELD(WIZ_SERDES_TOP_CTRL, 28, 29);
 static const struct reg_field pma_cmn_refclk_mode =
@@ -75,6 +79,8 @@ static const struct reg_field p_enable[WIZ_MAX_LANES] = {
        REG_FIELD(WIZ_LANECTL(3), 30, 31),
 };
 
+enum p_enable { P_ENABLE = 2, P_ENABLE_FORCE = 1, P_ENABLE_DISABLE = 0 };
+
 static const struct reg_field p_align[WIZ_MAX_LANES] = {
        REG_FIELD(WIZ_LANECTL(0), 29, 29),
        REG_FIELD(WIZ_LANECTL(1), 29, 29),
@@ -126,7 +132,7 @@ struct wiz_clk_div_sel {
        const char              *node_name;
 };
 
-static struct wiz_clk_mux_sel clk_mux_sel[] = {
+static struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
        {
                /*
                 * Mux value to be configured for each of the input clocks
@@ -145,6 +151,25 @@ static struct wiz_clk_mux_sel clk_mux_sel[] = {
        },
 };
 
+static struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
+       {
+               /*
+                * Mux value to be configured for each of the input clocks
+                * in the order populated in device tree
+                */
+               .table = { 1, 0 },
+               .node_name = "pll0_refclk",
+       },
+       {
+               .table = { 1, 0 },
+               .node_name = "pll1_refclk",
+       },
+       {
+               .table = { 1, 0 },
+               .node_name = "refclk_dig",
+       },
+};
+
 static struct clk_div_table clk_div_table[] = {
        { .val = 0, .div = 1, },
        { .val = 1, .div = 2, },
@@ -163,10 +188,16 @@ static struct wiz_clk_div_sel clk_div_sel[] = {
        },
 };
 
+enum wiz_type {
+       J721E_WIZ_16G,
+       J721E_WIZ_10G,
+};
+
 struct wiz {
        struct regmap           *regmap;
        struct wiz_clk_mux_sel  *clk_mux_sel;
        struct wiz_clk_div_sel  *clk_div_sel;
+       unsigned int            clk_div_sel_num;
        struct regmap_field     *por_en;
        struct regmap_field     *phy_reset_n;
        struct regmap_field     *p_enable[WIZ_MAX_LANES];
@@ -183,8 +214,10 @@ struct wiz {
        struct platform_device  *serdes_pdev;
        struct reset_controller_dev wiz_phy_reset_dev;
        struct gpio_desc        *gpio_typec_dir;
+       int                     typec_dir_delay;
 
-       bool used_for_dp;
+       enum wiz_type type;
+       u32 lane_modes[WIZ_MAX_LANES];
 };
 
 static int wiz_reset(struct wiz *wiz)
@@ -207,12 +240,17 @@ static int wiz_reset(struct wiz *wiz)
 static int wiz_mode_select(struct wiz *wiz)
 {
        u32 num_lanes = wiz->num_lanes;
+       enum wiz_lane_standard_mode mode;
        int ret;
        int i;
 
        for (i = 0; i < num_lanes; i++) {
-               ret = regmap_field_write(wiz->p_standard_mode[i],
-                                        LANE_MODE_GEN4);
+               if (wiz->lane_modes[i] == PHY_TYPE_DP)
+                       mode = LANE_MODE_GEN1;
+               else
+                       mode = LANE_MODE_GEN4;
+
+               ret = regmap_field_write(wiz->p_standard_mode[i], mode);
                if (ret)
                        return ret;
        }
@@ -262,21 +300,6 @@ static int wiz_init(struct wiz *wiz)
                return ret;
        }
 
-       /* INIT HACK to get DP working. Values from Brian */
-       if (wiz->used_for_dp) {
-               regmap_write(wiz->regmap, 0x408, 0x30000000);
-               regmap_write(wiz->regmap, 0x40c, 0x39000000);
-               regmap_write(wiz->regmap, 0x480, 0x70000000);
-               regmap_write(wiz->regmap, 0x4c0, 0x80000000);
-               regmap_write(wiz->regmap, 0x500, 0x80000000);
-               regmap_write(wiz->regmap, 0x540, 0x80000000);
-               regmap_write(wiz->regmap, 0x484, 0x10001);
-               regmap_write(wiz->regmap, 0x4c4, 0x10001);
-               regmap_write(wiz->regmap, 0x504, 0x10001);
-               regmap_write(wiz->regmap, 0x544, 0x10001);
-               regmap_write(wiz->regmap, 0x5FC, 0x00000);
-       }
-
        return 0;
 }
 
@@ -324,12 +347,14 @@ static int wiz_regfield_init(struct wiz *wiz)
                return PTR_ERR(clk_div_sel->field);
        }
 
-       clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK1];
-       clk_div_sel->field = devm_regmap_field_alloc(dev, regmap,
-                                                    pma_cmn_refclk1_dig_div);
-       if (IS_ERR(clk_div_sel->field)) {
-               dev_err(dev, "PMA_CMN_REFCLK1_DIG_DIV reg field init failed\n");
-               return PTR_ERR(clk_div_sel->field);
+       if (wiz->type == J721E_WIZ_16G) {
+               clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK1];
+               clk_div_sel->field = devm_regmap_field_alloc(dev, regmap,
+                                                            pma_cmn_refclk1_dig_div);
+               if (IS_ERR(clk_div_sel->field)) {
+                       dev_err(dev, "PMA_CMN_REFCLK1_DIG_DIV reg field init failed\n");
+                       return PTR_ERR(clk_div_sel->field);
+               }
        }
 
        clk_mux_sel = &wiz->clk_mux_sel[PLL0_REFCLK];
@@ -349,8 +374,15 @@ static int wiz_regfield_init(struct wiz *wiz)
        }
 
        clk_mux_sel = &wiz->clk_mux_sel[REFCLK_DIG];
-       clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
-                                                    refclk_dig_sel);
+       if (wiz->type == J721E_WIZ_10G)
+               clk_mux_sel->field =
+                       devm_regmap_field_alloc(dev, regmap,
+                                               refclk_dig_sel_10g);
+       else
+               clk_mux_sel->field =
+                       devm_regmap_field_alloc(dev, regmap,
+                                               refclk_dig_sel_16g);
+
        if (IS_ERR(clk_mux_sel->field)) {
                dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n");
                return PTR_ERR(clk_mux_sel->field);
@@ -636,7 +668,7 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
                of_node_put(clk_node);
        }
 
-       for (i = 0; i < WIZ_DIV_NUM_CLOCKS; i++) {
+       for (i = 0; i < wiz->clk_div_sel_num; i++) {
                node_name = clk_div_sel[i].node_name;
                clk_node = of_get_child_by_name(node, node_name);
                if (!clk_node) {
@@ -676,7 +708,7 @@ static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
                return ret;
        }
 
-       ret = regmap_field_write(wiz->p_enable[id - 1], false);
+       ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE_DISABLE);
        return ret;
 }
 
@@ -689,6 +721,9 @@ static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
 
        /* if typec-dir gpio was specified, set LN10 SWAP bit based on that */
        if (id == 0 && wiz->gpio_typec_dir) {
+               if (wiz->typec_dir_delay)
+                       msleep_interruptible(wiz->typec_dir_delay);
+
                if (gpiod_get_value_cansleep(wiz->gpio_typec_dir)) {
                        regmap_update_bits(wiz->regmap, WIZ_SERDES_TYPEC,
                                           WIZ_SERDES_TYPEC_LN10_SWAP,
@@ -704,7 +739,11 @@ static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
                return ret;
        }
 
-       ret = regmap_field_write(wiz->p_enable[id - 1], true);
+       if (wiz->lane_modes[id - 1] == PHY_TYPE_DP)
+               ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE);
+       else
+               ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE_FORCE);
+
        return ret;
 }
 
@@ -722,18 +761,40 @@ static struct regmap_config wiz_regmap_config = {
 
 static const struct of_device_id wiz_id_table[] = {
        {
-               .compatible = "ti,j721e-wiz",
+               .compatible = "ti,j721e-wiz-16g", .data = (void *) J721E_WIZ_16G
+       },
+       {
+               .compatible = "ti,j721e-wiz-10g", .data = (void *) J721E_WIZ_10G
        },
        {}
 };
 MODULE_DEVICE_TABLE(of, wiz_id_table);
 
-static void wiz_check_dp_usage(struct wiz *wiz, struct device_node *child_node)
+static int wiz_get_lane_mode(struct device *dev, int lane_number,
+                            u32 *lane_mode)
 {
-       const char *compat;
+       char property_name[11]; /* 11 is length of "lane0-mode\0" */
+       int ret;
+
+       ret = snprintf(property_name, sizeof(property_name), "lane%u-mode",
+                      lane_number);
 
-       if (of_property_read_string(child_node, "compatible", &compat) == 0)
-               wiz->used_for_dp = !strcmp("cdns,dp-phy", compat);
+       if (ret != 10) { /* 10 is length of "lane0-mode" */
+               dev_err(dev, "%s: bad lane number %d (ret = %d)\n",
+                       __func__, lane_number, ret);
+               return -ENOTSUPP;
+       }
+
+       ret = of_property_read_u32(dev->of_node, property_name, lane_mode);
+       if (ret == -EINVAL) {
+               *lane_mode = PHY_NONE;
+               return 0;
+       } else if (ret) {
+               dev_err(dev, "Getting \"%s\" property failed: %d\n",
+                       property_name, ret);
+       }
+
+       return ret;
 }
 
 static int wiz_probe(struct platform_device *pdev)
@@ -749,11 +810,14 @@ static int wiz_probe(struct platform_device *pdev)
        struct wiz *wiz;
        u32 num_lanes;
        int ret;
+       int i;
 
        wiz = devm_kzalloc(dev, sizeof(*wiz), GFP_KERNEL);
        if (!wiz)
                return -ENOMEM;
 
+       wiz->type = (enum wiz_type) of_device_get_match_data(dev);
+
        child_node = of_get_child_by_name(node, "serdes");
        if (!child_node) {
                dev_err(dev, "Failed to get SERDES child DT node\n");
@@ -792,16 +856,41 @@ static int wiz_probe(struct platform_device *pdev)
                                                      GPIOD_IN);
        if (IS_ERR(wiz->gpio_typec_dir)) {
                ret = PTR_ERR(wiz->gpio_typec_dir);
-               dev_err(dev, "Failed to request typec-dir gpio: %d\n", ret);
-               return ret;
+               if (ret != -EPROBE_DEFER)
+                       dev_err(dev, "Failed to request typec-dir gpio: %d\n", ret);
+               goto err_addr_to_resource;
+       }
+
+       if (wiz->gpio_typec_dir) {
+               ret = of_property_read_u32(node, "typec-dir-debounce",
+                                          &wiz->typec_dir_delay);
+               if (ret && ret != -EINVAL) {
+                       dev_err(dev, "Invalid typec-dir-debounce property\n");
+                       goto err_addr_to_resource;
+               }
+       }
+
+       for (i = 0; i < num_lanes; i++) {
+               ret = wiz_get_lane_mode(dev, i, &wiz->lane_modes[i]);
+               if (ret)
+                       return ret;
        }
 
        wiz->dev = dev;
        wiz->regmap = regmap;
        wiz->num_lanes = num_lanes;
-       wiz->clk_mux_sel = clk_mux_sel;
+       if (wiz->type == J721E_WIZ_10G)
+               wiz->clk_mux_sel = clk_mux_sel_10g;
+       else
+               wiz->clk_mux_sel = clk_mux_sel_16g;
+
        wiz->clk_div_sel = clk_div_sel;
 
+       if (wiz->type == J721E_WIZ_10G)
+               wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G;
+       else
+               wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G;
+
        platform_set_drvdata(pdev, wiz);
 
        ret = wiz_regfield_init(wiz);
@@ -844,8 +933,6 @@ static int wiz_probe(struct platform_device *pdev)
        }
        wiz->serdes_pdev = serdes_pdev;
 
-       wiz_check_dp_usage(wiz, child_node);
-
        ret = wiz_init(wiz);
        if (ret) {
                dev_err(dev, "WIZ initialization failed\n");
index b7e272d6ae812df865a49e019cdf1e919a2219b4..227646eb817c8c82f2dbdea0cd592f04b1367f8d 100644 (file)
@@ -1524,7 +1524,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1532,7 +1531,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "HP"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1540,7 +1538,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1548,7 +1545,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {}
index a613e546717a8d52d42d0b9e9872a9d565dc9e96..564cfaee129d2a54f673642e0e406ec90bd089d6 100644 (file)
@@ -113,6 +113,8 @@ static void mtk_eint_mask(struct irq_data *d)
        void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
                                                eint->regs->mask_set);
 
+       eint->cur_mask[d->hwirq >> 5] &= ~mask;
+
        writel(mask, reg);
 }
 
@@ -123,6 +125,8 @@ static void mtk_eint_unmask(struct irq_data *d)
        void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
                                                eint->regs->mask_clr);
 
+       eint->cur_mask[d->hwirq >> 5] |= mask;
+
        writel(mask, reg);
 
        if (eint->dual_edge[d->hwirq])
@@ -217,19 +221,6 @@ static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
        }
 }
 
-static void mtk_eint_chip_read_mask(const struct mtk_eint *eint,
-                                   void __iomem *base, u32 *buf)
-{
-       int port;
-       void __iomem *reg;
-
-       for (port = 0; port < eint->hw->ports; port++) {
-               reg = base + eint->regs->mask + (port << 2);
-               buf[port] = ~readl_relaxed(reg);
-               /* Mask is 0 when irq is enabled, and 1 when disabled. */
-       }
-}
-
 static int mtk_eint_irq_request_resources(struct irq_data *d)
 {
        struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
@@ -318,7 +309,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
        struct irq_chip *chip = irq_desc_get_chip(desc);
        struct mtk_eint *eint = irq_desc_get_handler_data(desc);
        unsigned int status, eint_num;
-       int offset, index, virq;
+       int offset, mask_offset, index, virq;
        void __iomem *reg =  mtk_eint_get_offset(eint, 0, eint->regs->stat);
        int dual_edge, start_level, curr_level;
 
@@ -328,10 +319,24 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
                status = readl(reg);
                while (status) {
                        offset = __ffs(status);
+                       mask_offset = eint_num >> 5;
                        index = eint_num + offset;
                        virq = irq_find_mapping(eint->domain, index);
                        status &= ~BIT(offset);
 
+                       /*
+                        * If we get an interrupt on pin that was only required
+                        * for wake (but no real interrupt requested), mask the
+                        * interrupt (as would mtk_eint_resume do anyway later
+                        * in the resume sequence).
+                        */
+                       if (eint->wake_mask[mask_offset] & BIT(offset) &&
+                           !(eint->cur_mask[mask_offset] & BIT(offset))) {
+                               writel_relaxed(BIT(offset), reg -
+                                       eint->regs->stat +
+                                       eint->regs->mask_set);
+                       }
+
                        dual_edge = eint->dual_edge[index];
                        if (dual_edge) {
                                /*
@@ -370,7 +375,6 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
 
 int mtk_eint_do_suspend(struct mtk_eint *eint)
 {
-       mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask);
        mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
 
        return 0;
index 4edeb4cae72aa28ba251558a499ce5a13436d237..c4c70dc57dbeec99813a9cfd5542146a6ade1883 100644 (file)
@@ -198,8 +198,8 @@ static const unsigned int uart_rts_b_pins[] = { GPIODV_27 };
 
 static const unsigned int uart_tx_c_pins[]     = { GPIOY_13 };
 static const unsigned int uart_rx_c_pins[]     = { GPIOY_14 };
-static const unsigned int uart_cts_c_pins[]    = { GPIOX_11 };
-static const unsigned int uart_rts_c_pins[]    = { GPIOX_12 };
+static const unsigned int uart_cts_c_pins[]    = { GPIOY_11 };
+static const unsigned int uart_rts_c_pins[]    = { GPIOY_12 };
 
 static const unsigned int i2c_sck_a_pins[]     = { GPIODV_25 };
 static const unsigned int i2c_sda_a_pins[]     = { GPIODV_24 };
@@ -445,10 +445,10 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
        GROUP(pwm_f_x,          3,      18),
 
        /* Bank Y */
-       GROUP(uart_cts_c,       1,      19),
-       GROUP(uart_rts_c,       1,      18),
-       GROUP(uart_tx_c,        1,      17),
-       GROUP(uart_rx_c,        1,      16),
+       GROUP(uart_cts_c,       1,      17),
+       GROUP(uart_rts_c,       1,      16),
+       GROUP(uart_tx_c,        1,      19),
+       GROUP(uart_rx_c,        1,      18),
        GROUP(pwm_a_y,          1,      21),
        GROUP(pwm_f_y,          1,      20),
        GROUP(i2s_out_ch23_y,   1,      5),
index aa48b3f23c7fd199138a6896328679569d2b8a75..3aac640596ad6e06a3fb359bfa8c3019be019def 100644 (file)
@@ -183,10 +183,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
        PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
                      BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
                      18, 2, "gpio", "uart"),
-       PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
-       PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
-       PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
-       PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
+       PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
+       PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
+       PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
+       PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
 
 };
 
@@ -218,11 +218,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = {
 };
 
 static inline void armada_37xx_update_reg(unsigned int *reg,
-                                         unsigned int offset)
+                                         unsigned int *offset)
 {
        /* We never have more than 2 registers */
-       if (offset >= GPIO_PER_REG) {
-               offset -= GPIO_PER_REG;
+       if (*offset >= GPIO_PER_REG) {
+               *offset -= GPIO_PER_REG;
                *reg += sizeof(u32);
        }
 }
@@ -373,7 +373,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg,
 {
        int offset = irqd_to_hwirq(d);
 
-       armada_37xx_update_reg(reg, offset);
+       armada_37xx_update_reg(reg, &offset);
 }
 
 static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
@@ -383,7 +383,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
        unsigned int reg = OUTPUT_EN;
        unsigned int mask;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
 
        return regmap_update_bits(info->regmap, reg, mask, 0);
@@ -396,7 +396,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
        unsigned int reg = OUTPUT_EN;
        unsigned int val, mask;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
        regmap_read(info->regmap, reg, &val);
 
@@ -410,7 +410,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
        unsigned int reg = OUTPUT_EN;
        unsigned int mask, val, ret;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
 
        ret = regmap_update_bits(info->regmap, reg, mask, mask);
@@ -431,7 +431,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
        unsigned int reg = INPUT_VAL;
        unsigned int val, mask;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
 
        regmap_read(info->regmap, reg, &val);
@@ -446,7 +446,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
        unsigned int reg = OUTPUT_VAL;
        unsigned int mask, val;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
        val = value ? mask : 0;
 
index 1425c2874d4028b5140cc74933733a2bf4f8f22b..cd7a5d95b499a1d7d9b70948577d91c840c2806d 100644 (file)
@@ -569,15 +569,25 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
                            !(regval & BIT(INTERRUPT_MASK_OFF)))
                                continue;
                        irq = irq_find_mapping(gc->irq.domain, irqnr + i);
-                       generic_handle_irq(irq);
+                       if (irq != 0)
+                               generic_handle_irq(irq);
 
                        /* Clear interrupt.
                         * We must read the pin register again, in case the
                         * value was changed while executing
                         * generic_handle_irq() above.
+                        * If we didn't find a mapping for the interrupt,
+                        * disable it in order to avoid a system hang caused
+                        * by an interrupt storm.
                         */
                        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
                        regval = readl(regs + i);
+                       if (irq == 0) {
+                               regval &= ~BIT(INTERRUPT_ENABLE_OFF);
+                               dev_dbg(&gpio_dev->pdev->dev,
+                                       "Disabling spurious GPIO IRQ %d\n",
+                                       irqnr + i);
+                       }
                        writel(regval, regs + i);
                        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
                        ret = IRQ_HANDLED;
index cecbce21d01f754ad8b5471d49ad70db19bf35b7..33c3eca0ece97f1c9aec1901554bb4dd5682b15b 100644 (file)
@@ -889,6 +889,10 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
        if (ret < 0)
                goto fail;
 
+       ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
+       if (ret < 0)
+               goto fail;
+
        mcp->irq_controller =
                device_property_read_bool(dev, "interrupt-controller");
        if (mcp->irq && mcp->irq_controller) {
@@ -930,10 +934,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
                        goto fail;
        }
 
-       ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
-       if (ret < 0)
-               goto fail;
-
        if (one_regmap_config) {
                mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL,
                                "mcp23xxx-pinctrl.%d", raw_chip_address);
index f4a61429e06e7bd28c20a9ff0e54cfbf41cee4fe..8d83817935dae789d8e81dacbb29e0e9f4b89c4c 100644 (file)
@@ -3172,6 +3172,7 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
                                                    base,
                                                    &rockchip_regmap_config);
                }
+               of_node_put(node);
        }
 
        bank->irq = irq_of_parse_and_map(bank->of_node, 0);
index 78c2f548b25f1ef5dc3cbe702f2ef8db10416523..8f3468d9f848dda6309a1416892b24bafc0f3eac 100644 (file)
@@ -159,10 +159,8 @@ struct sprd_pinctrl {
        struct sprd_pinctrl_soc_info *info;
 };
 
-enum sprd_pinconf_params {
-       SPRD_PIN_CONFIG_CONTROL = PIN_CONFIG_END + 1,
-       SPRD_PIN_CONFIG_SLEEP_MODE = PIN_CONFIG_END + 2,
-};
+#define SPRD_PIN_CONFIG_CONTROL                (PIN_CONFIG_END + 1)
+#define SPRD_PIN_CONFIG_SLEEP_MODE     (PIN_CONFIG_END + 2)
 
 static int sprd_pinctrl_get_id_by_name(struct sprd_pinctrl *sprd_pctl,
                                       const char *name)
index 1aba75897d1476a3828bad0ffccd19c29f513ac3..26a3f1eb9c6bfdd654174b25ea906b89f1e80fe8 100644 (file)
@@ -40,7 +40,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
 
 static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg)
 {
-       writel(val, pmx->regs[bank] + reg);
+       writel_relaxed(val, pmx->regs[bank] + reg);
+       /* make sure pinmux register write completed */
+       pmx_readl(pmx, bank, reg);
 }
 
 static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
index 088d1c2047e6b3b097229210d819ef5d33cca541..36bd2545afb62d6af03a004d56ce71951a929493 100644 (file)
@@ -685,10 +685,14 @@ static int __init pmc_core_probe(void)
        if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
                pmcdev->map = &cnp_reg_map;
 
-       if (lpit_read_residency_count_address(&slp_s0_addr))
+       if (lpit_read_residency_count_address(&slp_s0_addr)) {
                pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
-       else
+
+               if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
+                       return -ENODEV;
+       } else {
                pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
+       }
 
        pmcdev->regbase = ioremap(pmcdev->base_addr,
                                  pmcdev->map->regmap_length);
index b1d8043762371cc94df320a3c714e09c073d70b0..6a61028cbb3c670f0cfca839f1250fc4d2caaef6 100644 (file)
@@ -421,6 +421,14 @@ static const struct dmi_system_id critclk_systems[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
                },
        },
+       {
+               /* pmc_plt_clk* - are used for ethernet controllers */
+               .ident = "Beckhoff CB4063",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+                       DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
+               },
+       },
        {
                /* pmc_plt_clk* - are used for ethernet controllers */
                .ident = "Beckhoff CB6263",
index 5a2757a7f40885a4c14dd7b6e008ad6136598c6b..5358a80d854f99e0157a38bea979eb1b0912f46b 100644 (file)
@@ -131,7 +131,8 @@ static ssize_t power_supply_show_property(struct device *dev,
                                dev_dbg(dev, "driver has no data for `%s' property\n",
                                        attr->attr.name);
                        else if (ret != -ENODEV && ret != -EAGAIN)
-                               dev_err(dev, "driver failed to report `%s' property: %zd\n",
+                               dev_err_ratelimited(dev,
+                                       "driver failed to report `%s' property: %zd\n",
                                        attr->attr.name, ret);
                        return ret;
                }
index 8ba6abf584de2b6ca5a66573dd8cf2b873f023f7..3958ee03eec1450d3ef1ae32542af835c00c9618 100644 (file)
@@ -323,17 +323,22 @@ static int sbs_get_battery_presence_and_health(
 {
        int ret;
 
-       if (psp == POWER_SUPPLY_PROP_PRESENT) {
-               /* Dummy command; if it succeeds, battery is present. */
-               ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
-               if (ret < 0)
-                       val->intval = 0; /* battery disconnected */
-               else
-                       val->intval = 1; /* battery present */
-       } else { /* POWER_SUPPLY_PROP_HEALTH */
+       /* Dummy command; if it succeeds, battery is present. */
+       ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
+
+       if (ret < 0) { /* battery not present*/
+               if (psp == POWER_SUPPLY_PROP_PRESENT) {
+                       val->intval = 0;
+                       return 0;
+               }
+               return ret;
+       }
+
+       if (psp == POWER_SUPPLY_PROP_PRESENT)
+               val->intval = 1; /* battery present */
+       else /* POWER_SUPPLY_PROP_HEALTH */
                /* SBS spec doesn't have a general health command. */
                val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
-       }
 
        return 0;
 }
@@ -629,12 +634,14 @@ static int sbs_get_property(struct power_supply *psy,
        switch (psp) {
        case POWER_SUPPLY_PROP_PRESENT:
        case POWER_SUPPLY_PROP_HEALTH:
-               if (client->flags & SBS_FLAGS_TI_BQ20Z75)
+               if (chip->flags & SBS_FLAGS_TI_BQ20Z75)
                        ret = sbs_get_ti_battery_presence_and_health(client,
                                                                     psp, val);
                else
                        ret = sbs_get_battery_presence_and_health(client, psp,
                                                                  val);
+
+               /* this can only be true if no gpio is used */
                if (psp == POWER_SUPPLY_PROP_PRESENT)
                        return 0;
                break;
index 8febacb8fc54df3965cf53b64d040ea705bb68cf..0951564b6830a7bed1f4ebe1b64617f180428c49 100644 (file)
@@ -166,6 +166,14 @@ static long pps_cdev_ioctl(struct file *file,
                        pps->params.mode |= PPS_CANWAIT;
                pps->params.api_version = PPS_API_VERS;
 
+               /*
+                * Clear unused fields of pps_kparams to avoid leaking
+                * uninitialized data of the PPS_SETPARAMS caller via
+                * PPS_GETPARAMS
+                */
+               pps->params.assert_off_tu.flags = 0;
+               pps->params.clear_off_tu.flags = 0;
+
                spin_unlock_irq(&pps->lock);
 
                break;
index 5b1b480c329d748ec83fc09eed559182aaf13710..30d13a28f0b78ee5681d4ca7af3e6ff095070ccf 100644 (file)
@@ -340,7 +340,7 @@ static int prupwm_init_prufw(struct device_node *np, struct pru_pwm *pp)
                return ret;
        }
 
-       pp->pruss = pruss_get(pp->pru);
+       pp->pruss = pruss_get(pp->pru, NULL);
        if (IS_ERR(pp->pruss)) {
                ret = PTR_ERR(pp->pruss);
                dev_err(dev, "failed to get pruss handle (%d)\n", ret);
index 0059b24cfdc3c39579b9e70b11b9f0271399db1a..28e1f64134763486d698183d892a50d2121fc1dd 100644 (file)
@@ -58,6 +58,12 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
        /* Calculate the period and prescaler value */
        div = (unsigned long long)clk_get_rate(priv->clk) * state->period;
        do_div(div, NSEC_PER_SEC);
+       if (!div) {
+               /* Clock is too slow to achieve requested period. */
+               dev_dbg(priv->chip.dev, "Can't reach %u ns\n",  state->period);
+               return -EINVAL;
+       }
+
        prd = div;
        while (div > STM32_LPTIM_MAX_ARR) {
                presc++;
index cbe467ff1aba9d8ca72588f51856f2239dd4d0d9..fa0bbda4b3f2e5fad5e95572fe174ffe631a46cb 100644 (file)
@@ -1688,6 +1688,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
 
        if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
                return -EFAULT;
+       dev_info.name[sizeof(dev_info.name) - 1] = '\0';
 
        rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
                   dev_info.comptag, dev_info.destid, dev_info.hopcount);
@@ -1819,6 +1820,7 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
 
        if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
                return -EFAULT;
+       dev_info.name[sizeof(dev_info.name) - 1] = '\0';
 
        mport = priv->md->mport;
 
index f85d6b7a19848503d4ea8c28f7c2feec64ac3038..5d2b2c02cbbeca68e70bbc5f82665a9288ecc809 100644 (file)
@@ -369,7 +369,9 @@ static int pfn_set(void *data, u64 val)
 {
        *(u64 *)data = val;
 
-       return cec_add_elem(val);
+       cec_add_elem(val);
+
+       return 0;
 }
 
 DEFINE_DEBUGFS_ATTRIBUTE(pfn_ops, u64_get, pfn_set, "0x%llx\n");
index 9577d89418468a06f1030ff69d2699f71b5710bc..f312764660e662e6508191e3e990ba2e2a94127f 100644 (file)
@@ -4789,7 +4789,7 @@ static int __init regulator_init(void)
 /* init early to allow our consumers to complete system booting */
 core_initcall(regulator_init);
 
-static int __init regulator_late_cleanup(struct device *dev, void *data)
+static int regulator_late_cleanup(struct device *dev, void *data)
 {
        struct regulator_dev *rdev = dev_to_rdev(dev);
        const struct regulator_ops *ops = rdev->desc->ops;
@@ -4838,17 +4838,8 @@ unlock:
        return 0;
 }
 
-static int __init regulator_init_complete(void)
+static void regulator_init_complete_work_function(struct work_struct *work)
 {
-       /*
-        * Since DT doesn't provide an idiomatic mechanism for
-        * enabling full constraints and since it's much more natural
-        * with DT to provide them just assume that a DT enabled
-        * system has full constraints.
-        */
-       if (of_have_populated_dt())
-               has_full_constraints = true;
-
        /*
         * Regulators may had failed to resolve their input supplies
         * when were registered, either because the input supply was
@@ -4866,6 +4857,35 @@ static int __init regulator_init_complete(void)
         */
        class_for_each_device(&regulator_class, NULL, NULL,
                              regulator_late_cleanup);
+}
+
+static DECLARE_DELAYED_WORK(regulator_init_complete_work,
+                           regulator_init_complete_work_function);
+
+static int __init regulator_init_complete(void)
+{
+       /*
+        * Since DT doesn't provide an idiomatic mechanism for
+        * enabling full constraints and since it's much more natural
+        * with DT to provide them just assume that a DT enabled
+        * system has full constraints.
+        */
+       if (of_have_populated_dt())
+               has_full_constraints = true;
+
+       /*
+        * We punt completion for an arbitrary amount of time since
+        * systems like distros will load many drivers from userspace
+        * so consumers might not always be ready yet, this is
+        * particularly an issue with laptops where this might bounce
+        * the display off then on.  Ideally we'd get a notification
+        * from userspace when this happens but we don't so just wait
+        * a bit and hope we waited long enough.  It'd be better if
+        * we'd only do this on systems that need it, and a kernel
+        * command line option might be useful.
+        */
+       schedule_delayed_work(&regulator_init_complete_work,
+                             msecs_to_jiffies(30000));
 
        class_for_each_device(&regulator_class, NULL, NULL,
                              regulator_register_fill_coupling_array);
index b615a413ca9f6ff3f2313349be5edd38dabb0855..27c0a67cfd0e290ad2d3a9ac65605e7ea88112b1 100644 (file)
@@ -33,7 +33,7 @@
 
 /* LM3632 */
 #define LM3632_BOOST_VSEL_MAX          0x26
-#define LM3632_LDO_VSEL_MAX            0x29
+#define LM3632_LDO_VSEL_MAX            0x28
 #define LM3632_VBOOST_MIN              4500000
 #define LM3632_VLDO_MIN                        4000000
 
index c584bd1ffa9c1e2654b9e3bf137393d756ad5291..7c598c156d9e17eac95bf06e8e0d9dff2ab33cb3 100644 (file)
@@ -373,8 +373,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
        regulator_desc_s2mps11_buck1_4(4),
        regulator_desc_s2mps11_buck5,
        regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
-       regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
+       regulator_desc_s2mps11_buck67810(7, MIN_750_MV, STEP_12_5_MV),
+       regulator_desc_s2mps11_buck67810(8, MIN_750_MV, STEP_12_5_MV),
        regulator_desc_s2mps11_buck9,
        regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
 };
index aa325c8c5fae83d7b066ef933a2f190191d7d2ee..731986d026c40840ed71447a7bd5a880a353b04f 100644 (file)
@@ -573,6 +573,10 @@ static int omap_rproc_start(struct rproc *rproc)
        int ret;
        struct mbox_client *client = &oproc->client;
 
+       /*
+        * We set boot address irrespective of the value of the late attach flag
+        * as boot address takes effect only on a deassert of remoteproc reset.
+        */
        if (oproc->boot_data) {
                ret = omap_rproc_write_dsp_boot_addr(rproc);
                if (ret)
@@ -612,10 +616,12 @@ static int omap_rproc_start(struct rproc *rproc)
                goto put_mbox;
        }
 
-       ret = pdata->device_enable(pdev);
-       if (ret) {
-               dev_err(dev, "omap_device_enable failed: %d\n", ret);
-               goto reset_timers;
+       if (!rproc->late_attach) {
+               ret = pdata->device_enable(pdev);
+               if (ret) {
+                       dev_err(dev, "omap_device_enable failed: %d\n", ret);
+                       goto reset_timers;
+               }
        }
 
        /*
@@ -671,6 +677,16 @@ static int omap_rproc_stop(struct rproc *rproc)
        if (ret)
                goto enable_device;
 
+       /*
+        * During late attach, we use non-zeroing dma ops to prevent the kernel
+        * from overwriting already loaded code and data segments. When
+        * shutting down the processor, we restore the normal zeroing dma ops.
+        * This allows the kernel to clear memory when loading a new remoteproc
+        * binary or during error recovery with the current remoteproc binary.
+        */
+       if (rproc->late_attach)
+               set_dma_ops(dev, &arm_dma_ops);
+
        mbox_free_channel(oproc->mbox);
 
        /*
@@ -1310,6 +1326,11 @@ static int omap_rproc_probe(struct platform_device *pdev)
        if (!rproc)
                return -ENOMEM;
 
+       if (pdata->device_is_enabled && pdata->device_is_enabled(pdev)) {
+               rproc->late_attach = 1;
+               set_dma_ops(&pdev->dev, &arm_dma_m_ops);
+       }
+
        oproc = rproc->priv;
        oproc->rproc = rproc;
        /* All existing OMAP IPU and DSP processors have an MMU */
@@ -1398,6 +1419,8 @@ static int omap_rproc_probe(struct platform_device *pdev)
 release_mem:
        of_reserved_mem_device_release(&pdev->dev);
 free_rproc:
+       if (rproc->late_attach)
+               set_dma_ops(&pdev->dev, &arm_dma_ops);
        rproc_free(rproc);
        return ret;
 }
index e9ab90c19304fdb29d9c223865d53c6a14ec613b..602af839421deeb5d93d9818984635b9ffe840eb 100644 (file)
@@ -188,6 +188,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
        init_completion(&q6v5->stop_done);
 
        q6v5->wdog_irq = platform_get_irq_byname(pdev, "wdog");
+       if (q6v5->wdog_irq < 0) {
+               if (q6v5->wdog_irq != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "failed to retrieve wdog IRQ: %d\n",
+                               q6v5->wdog_irq);
+               return q6v5->wdog_irq;
+       }
+
        ret = devm_request_threaded_irq(&pdev->dev, q6v5->wdog_irq,
                                        NULL, q6v5_wdog_interrupt,
                                        IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -198,8 +206,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
        }
 
        q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal");
-       if (q6v5->fatal_irq == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
+       if (q6v5->fatal_irq < 0) {
+               if (q6v5->fatal_irq != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "failed to retrieve fatal IRQ: %d\n",
+                               q6v5->fatal_irq);
+               return q6v5->fatal_irq;
+       }
 
        ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq,
                                        NULL, q6v5_fatal_interrupt,
@@ -211,8 +224,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
        }
 
        q6v5->ready_irq = platform_get_irq_byname(pdev, "ready");
-       if (q6v5->ready_irq == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
+       if (q6v5->ready_irq < 0) {
+               if (q6v5->ready_irq != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "failed to retrieve ready IRQ: %d\n",
+                               q6v5->ready_irq);
+               return q6v5->ready_irq;
+       }
 
        ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq,
                                        NULL, q6v5_ready_interrupt,
@@ -224,8 +242,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
        }
 
        q6v5->handover_irq = platform_get_irq_byname(pdev, "handover");
-       if (q6v5->handover_irq == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
+       if (q6v5->handover_irq < 0) {
+               if (q6v5->handover_irq != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "failed to retrieve handover IRQ: %d\n",
+                               q6v5->handover_irq);
+               return q6v5->handover_irq;
+       }
 
        ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq,
                                        NULL, q6v5_handover_interrupt,
@@ -238,8 +261,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
        disable_irq(q6v5->handover_irq);
 
        q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack");
-       if (q6v5->stop_irq == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
+       if (q6v5->stop_irq < 0) {
+               if (q6v5->stop_irq != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "failed to retrieve stop-ack IRQ: %d\n",
+                               q6v5->stop_irq);
+               return q6v5->stop_irq;
+       }
 
        ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq,
                                        NULL, q6v5_stop_interrupt,
index beeaa5b3b4873a77170cd8f306715ba02d086a57..3d8126dacd451c010804811e76295cbab8b9be7b 100644 (file)
@@ -1132,6 +1132,9 @@ static int q6v5_probe(struct platform_device *pdev)
        if (!desc)
                return -EINVAL;
 
+       if (desc->need_mem_protection && !qcom_scm_is_available())
+               return -EPROBE_DEFER;
+
        rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
                            desc->hexagon_mba_image, sizeof(*qproc));
        if (!rproc) {
index 383baf194e5463fc9807a616f692b05e9cf5ffca..f71e8d83ac27f392325fcb5b719dec5e52d296bf 100644 (file)
@@ -742,10 +742,13 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
        if (!mapping)
                return -ENOMEM;
 
-       ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
-       if (ret) {
-               dev_err(dev, "failed to map devmem: %d\n", ret);
-               goto out;
+       if (!rproc->late_attach) {
+               ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len,
+                               rsc->flags);
+               if (ret) {
+                       dev_err(dev, "failed to map devmem: %d\n", ret);
+                       goto out;
+               }
        }
 
        /*
@@ -760,8 +763,12 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
        mapping->len = rsc->len;
        list_add_tail(&mapping->node, &rproc->mappings);
 
-       dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
-               rsc->pa, rsc->da, rsc->len);
+       if (!rproc->late_attach)
+               dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
+                       rsc->pa, rsc->da, rsc->len);
+       else
+               dev_dbg(dev, "late-attach: processed devmem pa 0x%x, da 0x%x, len 0x%x\n",
+                       rsc->pa, rsc->da, rsc->len);
 
        return 0;
 
@@ -817,7 +824,13 @@ static int rproc_handle_carveout(struct rproc *rproc,
        if (!carveout)
                return -ENOMEM;
 
-       va = dma_alloc_coherent(dev->parent, rsc->len, &dma, GFP_KERNEL);
+       if (rproc->late_attach) {
+               va = dma_malloc_coherent(dev->parent, rsc->len, &dma,
+                                        GFP_KERNEL);
+       } else {
+               va = dma_alloc_coherent(dev->parent, rsc->len, &dma,
+                                       GFP_KERNEL);
+       }
        if (!va) {
                dev_err(dev->parent,
                        "failed to allocate dma memory: len 0x%x\n", rsc->len);
@@ -852,11 +865,13 @@ static int rproc_handle_carveout(struct rproc *rproc,
                        goto dma_free;
                }
 
-               ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
-                               rsc->flags);
-               if (ret) {
-                       dev_err(dev, "iommu_map failed: %d\n", ret);
-                       goto free_mapping;
+               if (!rproc->late_attach) {
+                       ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
+                                       rsc->flags);
+                       if (ret) {
+                               dev_err(dev, "iommu_map failed: %d\n", ret);
+                               goto free_mapping;
+                       }
                }
 
                /*
@@ -870,8 +885,13 @@ static int rproc_handle_carveout(struct rproc *rproc,
                mapping->len = rsc->len;
                list_add_tail(&mapping->node, &rproc->mappings);
 
-               dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
-                       rsc->da, &dma);
+               if (!rproc->late_attach)
+                       dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
+                               rsc->da, &dma);
+               else
+                       dev_dbg(dev, "late-attach: carveout processed 0x%x to %pad\n",
+                               rsc->da, &dma);
+
        }
 
        /*
@@ -1145,11 +1165,14 @@ static void rproc_resource_cleanup(struct rproc *rproc)
        list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
                size_t unmapped;
 
-               unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
-               if (unmapped != entry->len) {
-                       /* nothing much to do besides complaining */
-                       dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
-                               unmapped);
+               if (!rproc->late_attach) {
+                       unmapped = iommu_unmap(rproc->domain, entry->da,
+                                              entry->len);
+                       if (unmapped != entry->len) {
+                               /* nothing much to do besides complaining */
+                               dev_err(dev, "failed to unmap %u/%zu\n",
+                                       entry->len, unmapped);
+                       }
                }
 
                list_del(&entry->node);
@@ -1228,7 +1251,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
                goto clean_up_resources;
        }
 
-       if (!rproc->skip_load) {
+       if (!rproc->skip_load && !rproc->late_attach) {
                /* load the ELF segments to memory */
                ret = rproc_load_segments(rproc, fw);
                if (ret) {
@@ -1236,6 +1259,8 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
                                ret);
                        goto clean_up_resources;
                }
+       } else {
+               dev_dbg(dev, "Skipped program segments load for pre-booted rproc\n");
        }
 
        /*
@@ -1687,6 +1712,7 @@ void rproc_shutdown(struct rproc *rproc)
                complete_all(&rproc->crash_comp);
 
        rproc->state = RPROC_OFFLINE;
+       rproc->late_attach = 0;
 
        dev_info(dev, "stopped remote processor %s\n", rproc->name);
 
index bd4fa1f9c92a7d653b9a80c373e2606247b366b1..52f73f1c7c4dc81b4865f670ec6920fc46879ffe 100644 (file)
@@ -6,6 +6,7 @@
  *     Suman Anna <s-anna@ti.com>
  */
 
+#include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/of_reserved_mem.h>
@@ -51,6 +52,7 @@ struct k3_dsp_rproc_mem {
  * @ti_sci_id: TI-SCI device identifier
  * @mbox: mailbox channel handle
  * @client: mailbox client to request the mailbox channel
+ * @uses_lreset: flag to denote the need for local reset management
  * @ipc_only: flag to indicate IPC-only mode
  */
 struct k3_dsp_rproc {
@@ -66,6 +68,7 @@ struct k3_dsp_rproc {
        u32 ti_sci_id;
        struct mbox_chan *mbox;
        struct mbox_client client;
+       unsigned int uses_lreset : 1;
        unsigned int ipc_only : 1;
 };
 
@@ -160,6 +163,9 @@ static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
                return ret;
        }
 
+       if (kproc->uses_lreset)
+               return ret;
+
        ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
                                                    kproc->ti_sci_id);
        if (ret) {
@@ -177,6 +183,9 @@ static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
        struct device *dev = kproc->dev;
        int ret;
 
+       if (kproc->uses_lreset)
+               goto lreset;
+
        ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
                                                   kproc->ti_sci_id);
        if (ret) {
@@ -184,6 +193,7 @@ static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
                return ret;
        }
 
+lreset:
        ret = reset_control_deassert(kproc->reset);
        if (ret) {
                dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
@@ -195,6 +205,71 @@ static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
        return ret;
 }
 
+/*
+ * The C66x DSP cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the DSP internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on C66x DSPs to allow loading into the DSP
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the C66x DSP cores run. The local reset on C71x cores is a
+ * no-op and the global reset cannot be released on C71x cores until after
+ * the firmware images are loaded, so this function does nothing for C71x cores.
+ */
+static int k3_dsp_rproc_prepare(struct rproc *rproc)
+{
+       struct k3_dsp_rproc *kproc = rproc->priv;
+       struct device *dev = kproc->dev;
+       int ret;
+
+       /* IPC-only mode does not require the core to be released from reset */
+       if (kproc->ipc_only)
+               return 0;
+
+       /* local reset is no-op on C71x processors */
+       if (!kproc->uses_lreset)
+               return 0;
+
+       ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+                                                   kproc->ti_sci_id);
+       if (ret)
+               dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n",
+                       ret);
+
+       return ret;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable C66x cores. This completes the second portion of
+ * powering down the C66x DSP cores. The cores themselves are only halted in the
+ * .stop() callback through the local reset, and the .unprepare() ops is invoked
+ * by the remoteproc core after the remoteproc is stopped to balance the global
+ * reset.
+ */
+static int k3_dsp_rproc_unprepare(struct rproc *rproc)
+{
+       struct k3_dsp_rproc *kproc = rproc->priv;
+       struct device *dev = kproc->dev;
+       int ret;
+
+       /* do not put back the cores into reset in IPC-only mode */
+       if (kproc->ipc_only)
+               return 0;
+
+       /* local reset is no-op on C71x processors */
+       if (!kproc->uses_lreset)
+               return 0;
+
+       ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+                                                   kproc->ti_sci_id);
+       if (ret)
+               dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+
+       return ret;
+}
+
 /*
  * Power up the DSP remote processor.
  *
@@ -357,6 +432,8 @@ static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, int len,
 }
 
 static const struct rproc_ops k3_dsp_rproc_ops = {
+       .prepare        = k3_dsp_rproc_prepare,
+       .unprepare      = k3_dsp_rproc_unprepare,
        .start          = k3_dsp_rproc_start,
        .stop           = k3_dsp_rproc_stop,
        .kick           = k3_dsp_rproc_kick,
@@ -406,10 +483,23 @@ static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
 
                res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                                                   mem_names[i]);
-               kproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
-               if (IS_ERR(kproc->mem[i].cpu_addr)) {
-                       dev_err(dev, "failed to parse and map %s memory\n",
+               if (!res) {
+                       dev_err(dev, "found no memory resource for %s\n",
                                mem_names[i]);
+                       return -EINVAL;
+               }
+               if (!devm_request_mem_region(dev, res->start,
+                                            resource_size(res),
+                                            dev_name(dev))) {
+                       dev_err(dev, "could not request %s region for resource\n",
+                               mem_names[i]);
+                       return -EBUSY;
+               }
+
+               kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+                                                        resource_size(res));
+               if (IS_ERR(kproc->mem[i].cpu_addr)) {
+                       dev_err(dev, "failed to map %s memory\n", mem_names[i]);
                        return PTR_ERR(kproc->mem[i].cpu_addr);
                }
                kproc->mem[i].bus_addr = res->start;
@@ -583,6 +673,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
        kproc = rproc->priv;
        kproc->rproc = rproc;
        kproc->dev = dev;
+       kproc->uses_lreset = 1;
 
        /* C71x is a 64-bit processor, so customize rproc elf loader ops */
        if (of_device_is_compatible(np, "ti,j721e-c71-dsp")) {
@@ -593,6 +684,8 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
                                rproc_elf64_find_loaded_rsc_table;
                rproc->ops->get_boot_addr = rproc_elf64_get_boot_addr;
                rproc->ops->load = rproc_elf64_load_segments;
+
+               kproc->uses_lreset = 0;
        }
 
        kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
@@ -667,6 +760,22 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
                kproc->ipc_only = 1;
        } else {
                dev_err(dev, "configured DSP for remoteproc mode\n");
+               /*
+                * ensure the DSP local reset is asserted to ensure the DSP
+                * doesn't execute bogus code in .prepare() when the module
+                * reset is released.
+                */
+               if (kproc->uses_lreset) {
+                       ret = reset_control_status(kproc->reset);
+                       if (ret < 0) {
+                               dev_err(dev, "failed to get reset status, status = %d\n",
+                                       ret);
+                               goto release_mem;
+                       } else if (ret == 0) {
+                               dev_warn(dev, "local reset is deasserted for device\n");
+                               k3_dsp_rproc_reset(kproc);
+                       }
+               }
        }
 
        ret = rproc_add(rproc);
index f9c2e474c4ea580dd84046b3767a94a84b193520..860617828cf2130266beffc0ec220d417bd3a7f8 100644 (file)
@@ -385,6 +385,17 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
                dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
                        ret);
 
+       /*
+        * Zero out both TCMs unconditionally (access from v8 Arm core is not
+        * affected by ATCM & BTCM enable configuration values) so that ECC
+        * can be effective on all TCM addresses.
+        */
+       dev_dbg(dev, "zeroing out ATCM memory\n");
+       memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
+
+       dev_dbg(dev, "zeroing out BTCM memory\n");
+       memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
+
        return ret;
 }
 
@@ -596,22 +607,21 @@ static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, int len,
        if (len <= 0)
                return NULL;
 
-       /* handle R5-view of ATCM addresses first using address 0 */
-       size = core->mem[0].size;
-       if (da >= 0 && ((da + len) <= size)) {
-               offset = da;
-               va = core->mem[0].cpu_addr + offset;
-               return (__force void *)va;
-       }
-
-       /* handle SoC-view addresses for ATCM and BTCM */
+       /* handle both R5 and SoC views of ATCM and BTCM */
        for (i = 0; i < core->num_mems; i++) {
                bus_addr = core->mem[i].bus_addr;
                dev_addr = core->mem[i].dev_addr;
                size = core->mem[i].size;
 
-               if (da >= bus_addr &&
-                   ((da + len) <= (bus_addr + size))) {
+               /* handle R5-view addresses of TCMs */
+               if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+                       offset = da - dev_addr;
+                       va = core->mem[i].cpu_addr + offset;
+                       return (__force void *)va;
+               }
+
+               /* handle SoC-view addresses of TCMs */
+               if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
                        offset = da - bus_addr;
                        va = core->mem[i].cpu_addr + offset;
                        return (__force void *)va;
@@ -1090,11 +1100,28 @@ static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
        for (i = 0; i < num_mems; i++) {
                res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                                                   mem_names[i]);
-               core->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
-               if (IS_ERR(core->mem[i].cpu_addr)) {
-                       dev_err(dev, "failed to parse and map %s memory\n",
+               if (!res) {
+                       dev_err(dev, "found no memory resource for %s\n",
+                               mem_names[i]);
+                       ret = -EINVAL;
+                       goto fail;
+               }
+               if (!devm_request_mem_region(dev, res->start,
+                                            resource_size(res),
+                                            dev_name(dev))) {
+                       dev_err(dev, "could not request %s region for resource\n",
                                mem_names[i]);
+                       ret = -EBUSY;
+                       goto fail;
+               }
+
+               core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+                                                       resource_size(res));
+               if (IS_ERR(core->mem[i].cpu_addr)) {
+                       dev_err(dev, "failed to map %s memory\n", mem_names[i]);
                        ret = PTR_ERR(core->mem[i].cpu_addr);
+                       devm_release_mem_region(dev, res->start,
+                                               resource_size(res));
                        goto fail;
                }
                core->mem[i].bus_addr = res->start;
@@ -1179,8 +1206,8 @@ static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
                core->sram[i].bus_addr = res.start;
                core->sram[i].dev_addr = res.start;
                core->sram[i].size = resource_size(&res);
-               core->sram[i].cpu_addr = ioremap(res.start,
-                                                resource_size(&res));
+               core->sram[i].cpu_addr = ioremap_wc(res.start,
+                                                   resource_size(&res));
                if (!core->sram[i].cpu_addr) {
                        dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
                                i, &res.start);
index 85471b7dc53301b5723768f18a230b24d181ad0a..97ddd9b896e561bd854210b0916e9a40f91f6a8f 100644 (file)
@@ -690,7 +690,7 @@ static int rpmsg_kdrv_switch_c2s_dbg_dump_stats(struct rpmsg_remotedev *rdev)
 }
 
 static void rpmsg_kdrv_switch_get_fw_ver(struct rpmsg_remotedev *rdev,
-                                        char *buf, size_t size)
+                                        char *buf, size_t size)
 {
        struct rpmsg_kdrv_switch_private *priv =
                container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
index c04a1edcd571630f49ccfd26b558e2aba06516f9..c3702684b342678d00f86831503720de5c1ab680 100644 (file)
@@ -169,7 +169,12 @@ static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm)
        buf[DT_YEARS] = bin2bcd(tm->tm_year % 100);
 
        ret = regmap_bulk_write(pcf85363->regmap, CTRL_STOP_EN,
-                               tmp, sizeof(tmp));
+                               tmp, 2);
+       if (ret)
+               return ret;
+
+       ret = regmap_bulk_write(pcf85363->regmap, DT_100THS,
+                               buf, sizeof(tmp) - 2);
        if (ret)
                return ret;
 
index b2483a749ac45fa58c4f9a7dd507ca19775222ca..3cf011e1205301d6123f1c237809237951369f66 100644 (file)
@@ -273,6 +273,10 @@ static int snvs_rtc_probe(struct platform_device *pdev)
        if (!data)
                return -ENOMEM;
 
+       data->rtc = devm_rtc_allocate_device(&pdev->dev);
+       if (IS_ERR(data->rtc))
+               return PTR_ERR(data->rtc);
+
        data->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap");
 
        if (IS_ERR(data->regmap)) {
@@ -335,10 +339,9 @@ static int snvs_rtc_probe(struct platform_device *pdev)
                goto error_rtc_device_register;
        }
 
-       data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
-                                       &snvs_rtc_ops, THIS_MODULE);
-       if (IS_ERR(data->rtc)) {
-               ret = PTR_ERR(data->rtc);
+       data->rtc->ops = &snvs_rtc_ops;
+       ret = rtc_register_device(data->rtc);
+       if (ret) {
                dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
                goto error_rtc_device_register;
        }
index b9ce93e9df89295eb72132fcfc81d0257aaa1723..99f86612f7751ad6d47b7abee8661e8deec2b0b2 100644 (file)
@@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
        char msg_format;
        char msg_no;
 
+       /*
+        * intrc values ENODEV, ENOLINK and EPERM
+        * will be optained from sleep_on to indicate that no
+        * IO operation can be started
+        */
+       if (cqr->intrc == -ENODEV)
+               return 1;
+
+       if (cqr->intrc == -ENOLINK)
+               return 1;
+
+       if (cqr->intrc == -EPERM)
+               return 1;
+
        sense = dasd_get_sense(&cqr->irb);
        if (!sense)
                return 0;
@@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
        lcu->flags &= ~NEED_UAC_UPDATE;
        spin_unlock_irqrestore(&lcu->lock, flags);
 
-       do {
-               rc = dasd_sleep_on(cqr);
-               if (rc && suborder_not_supported(cqr))
-                       return -EOPNOTSUPP;
-       } while (rc && (cqr->retries > 0));
-       if (rc) {
+       rc = dasd_sleep_on(cqr);
+       if (rc && !suborder_not_supported(cqr)) {
                spin_lock_irqsave(&lcu->lock, flags);
                lcu->flags |= NEED_UAC_UPDATE;
                spin_unlock_irqrestore(&lcu->lock, flags);
index 93b2862bd3faecbc5702759855db207c43c49f17..674d848e377c8e0e5876c2b663980168950e122f 100644 (file)
@@ -372,7 +372,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
                goto error;
        }
        /* Check for trailing stuff. */
-       if (i == num_devices && strlen(buf) > 0) {
+       if (i == num_devices && buf && strlen(buf) > 0) {
                rc = -EINVAL;
                goto error;
        }
index aea50292264629e63f131dfd2dfd87d3ff108799..df09ed53ab45909a0ba3aebcc3c245325bcf38f3 100644 (file)
@@ -1213,6 +1213,8 @@ device_initcall(cio_settle_init);
 
 int sch_is_pseudo_sch(struct subchannel *sch)
 {
+       if (!sch->dev.parent)
+               return 0;
        return sch == to_css(sch->dev.parent)->pseudo_subchannel;
 }
 
index 9c7d9da42ba0829692d0d8dadbbd1f42935962f3..4b7cc8d425b1c64c6b27e4f3308d4152362a0e21 100644 (file)
@@ -749,6 +749,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
 
        switch (state) {
        case SLSB_P_OUTPUT_EMPTY:
+       case SLSB_P_OUTPUT_PENDING:
                /* the adapter got it */
                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
                        "out empty:%1d %02x", q->nr, count);
@@ -1568,13 +1569,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
                rc = qdio_kick_outbound_q(q, phys_aob);
        } else if (need_siga_sync(q)) {
                rc = qdio_siga_sync_q(q);
+       } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
+                  get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
+                  state == SLSB_CU_OUTPUT_PRIMED) {
+               /* The previous buffer is not processed yet, tack on. */
+               qperf_inc(q, fast_requeue);
        } else {
-               /* try to fast requeue buffers */
-               get_buf_state(q, prev_buf(bufnr), &state, 0);
-               if (state != SLSB_CU_OUTPUT_PRIMED)
-                       rc = qdio_kick_outbound_q(q, 0);
-               else
-                       qperf_inc(q, fast_requeue);
+               rc = qdio_kick_outbound_q(q, 0);
        }
 
        /* in case of SIGA errors we must process the error immediately */
index 78f1be41b05e3fb2cc5c91b31e0ec00877735851..034528a5453ec4b058b86c65f513037371a4e37d 100644 (file)
@@ -151,6 +151,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
                        return -ENOMEM;
                }
                irq_ptr_qs[i] = q;
+               INIT_LIST_HEAD(&q->entry);
        }
        return 0;
 }
@@ -179,6 +180,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
        q->mask = 1 << (31 - i);
        q->nr = i;
        q->handler = handler;
+       INIT_LIST_HEAD(&q->entry);
 }
 
 static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
index 07dea602205bdf2a18bfe85fad5888cf2f91b4dd..6628e0c9e70e3596cb8d95abbd9e985bae1ad5e7 100644 (file)
@@ -79,7 +79,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
        mutex_lock(&tiq_list_lock);
        list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
        mutex_unlock(&tiq_list_lock);
-       xchg(irq_ptr->dsci, 1 << 7);
 }
 
 void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
@@ -87,14 +86,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
        struct qdio_q *q;
 
        q = irq_ptr->input_qs[0];
-       /* if establish triggered an error */
-       if (!q || !q->entry.prev || !q->entry.next)
+       if (!q)
                return;
 
        mutex_lock(&tiq_list_lock);
        list_del_rcu(&q->entry);
        mutex_unlock(&tiq_list_lock);
        synchronize_rcu();
+       INIT_LIST_HEAD(&q->entry);
 }
 
 static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
index 70a006ba4d050d2d3063f6653e610372e7d2d074..4fe06ff7b2c8bcb680a464504d7e85696497645d 100644 (file)
@@ -89,8 +89,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
                                  sizeof(*pa->pa_iova_pfn) +
                                  sizeof(*pa->pa_pfn),
                                  GFP_KERNEL);
-       if (unlikely(!pa->pa_iova_pfn))
+       if (unlikely(!pa->pa_iova_pfn)) {
+               pa->pa_nr = 0;
                return -ENOMEM;
+       }
        pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
 
        pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
index a57b969b897338564601d412d01f5b5cbd1abade..3be54651698a33babdb3797d54a273128eeabd06 100644 (file)
@@ -777,6 +777,8 @@ static int ap_device_probe(struct device *dev)
                drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
                if (!!devres != !!drvres)
                        return -ENODEV;
+               /* (re-)init queue's state machine */
+               ap_queue_reinit_state(to_ap_queue(dev));
        }
 
        /* Add queue/card to list of active queues/cards */
@@ -809,6 +811,8 @@ static int ap_device_remove(struct device *dev)
        struct ap_device *ap_dev = to_ap_dev(dev);
        struct ap_driver *ap_drv = ap_dev->drv;
 
+       if (is_queue_dev(dev))
+               ap_queue_remove(to_ap_queue(dev));
        if (ap_drv->remove)
                ap_drv->remove(ap_dev);
 
@@ -1446,10 +1450,6 @@ static void ap_scan_bus(struct work_struct *unused)
                        aq->ap_dev.device.parent = &ac->ap_dev.device;
                        dev_set_name(&aq->ap_dev.device,
                                     "%02x.%04x", id, dom);
-                       /* Start with a device reset */
-                       spin_lock_bh(&aq->lock);
-                       ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
-                       spin_unlock_bh(&aq->lock);
                        /* Register device */
                        rc = device_register(&aq->ap_dev.device);
                        if (rc) {
index 5246cd8c16a605f6748884b47bdc59d963c3b578..7e85d238767ba1d42938a24b67381221f7cc0593 100644 (file)
@@ -253,6 +253,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
 void ap_queue_remove(struct ap_queue *aq);
 void ap_queue_suspend(struct ap_device *ap_dev);
 void ap_queue_resume(struct ap_device *ap_dev);
+void ap_queue_reinit_state(struct ap_queue *aq);
 
 struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
                               int comp_device_type, unsigned int functions);
index 66f7334bcb03214307fb4f3fecebf91dbb3ba341..0aa4b3ccc948c10cbbd9ac904d6da353e341fd78 100644 (file)
@@ -718,5 +718,20 @@ void ap_queue_remove(struct ap_queue *aq)
 {
        ap_flush_queue(aq);
        del_timer_sync(&aq->timeout);
+
+       /* reset with zero, also clears irq registration */
+       spin_lock_bh(&aq->lock);
+       ap_zapq(aq->qid);
+       aq->state = AP_STATE_BORKED;
+       spin_unlock_bh(&aq->lock);
 }
 EXPORT_SYMBOL(ap_queue_remove);
+
+void ap_queue_reinit_state(struct ap_queue *aq)
+{
+       spin_lock_bh(&aq->lock);
+       aq->state = AP_STATE_RESET_START;
+       ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+       spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_reinit_state);
index f4ae5fa30ec970e99a39387b2b0a7a4b13903889..ff17a00273f77d60f63bf3e20c67b3408d96915b 100644 (file)
@@ -198,7 +198,6 @@ static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
        struct ap_queue *aq = to_ap_queue(&ap_dev->device);
        struct zcrypt_queue *zq = aq->private;
 
-       ap_queue_remove(aq);
        if (zq)
                zcrypt_queue_unregister(zq);
 }
index 35d58dbbc4da3dcc5fa47f5d23a8dfcbe08ff9ae..2a42e5962317a1cf797720219ccac09aa2807638 100644 (file)
@@ -273,7 +273,6 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
        struct ap_queue *aq = to_ap_queue(&ap_dev->device);
        struct zcrypt_queue *zq = aq->private;
 
-       ap_queue_remove(aq);
        if (zq)
                zcrypt_queue_unregister(zq);
 }
index 94d9f7224aea3acbb394c424a88ed2e6ba867c04..baa683c3f5d302f8be3fb7f6d7e6dfd9434f7eb1 100644 (file)
@@ -276,7 +276,6 @@ static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev)
        struct ap_queue *aq = to_ap_queue(&ap_dev->device);
        struct zcrypt_queue *zq = aq->private;
 
-       ap_queue_remove(aq);
        if (zq)
                zcrypt_queue_unregister(zq);
 }
index ebdbc457003fe50e09db6afe554eeaa47f974363..332701db7379dc16e7143deb608463be500c7ca4 100644 (file)
@@ -11,6 +11,7 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/kthread.h>
+#include <linux/bug.h>
 #include "zfcp_ext.h"
 #include "zfcp_reqlist.h"
 
@@ -238,6 +239,12 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
        struct zfcp_erp_action *erp_action;
        struct zfcp_scsi_dev *zfcp_sdev;
 
+       if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
+                        need != ZFCP_ERP_ACTION_REOPEN_PORT &&
+                        need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
+                        need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
+               return NULL;
+
        switch (need) {
        case ZFCP_ERP_ACTION_REOPEN_LUN:
                zfcp_sdev = sdev_to_zfcp(sdev);
index 3c86e27f094deb3fefb423a2d046a7b08d69eb46..df888506e363e475df608a4cc87fc7811c89d302 100644 (file)
 
 struct kmem_cache *zfcp_fsf_qtcb_cache;
 
+static bool ber_stop = true;
+module_param(ber_stop, bool, 0600);
+MODULE_PARM_DESC(ber_stop,
+                "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
+
 static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
 {
        struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
@@ -230,10 +235,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
        case FSF_STATUS_READ_SENSE_DATA_AVAIL:
                break;
        case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
-               dev_warn(&adapter->ccw_device->dev,
-                        "The error threshold for checksum statistics "
-                        "has been exceeded\n");
                zfcp_dbf_hba_bit_err("fssrh_3", req);
+               if (ber_stop) {
+                       dev_warn(&adapter->ccw_device->dev,
+                                "All paths over this FCP device are disused because of excessive bit errors\n");
+                       zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
+               } else {
+                       dev_warn(&adapter->ccw_device->dev,
+                                "The error threshold for checksum statistics has been exceeded\n");
+               }
                break;
        case FSF_STATUS_READ_LINK_DOWN:
                zfcp_fsf_status_read_link_down(req);
@@ -1594,6 +1604,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
        struct zfcp_qdio *qdio = wka_port->adapter->qdio;
        struct zfcp_fsf_req *req;
+       unsigned long req_id = 0;
        int retval = -EIO;
 
        spin_lock_irq(&qdio->req_q_lock);
@@ -1616,6 +1627,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
        hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
        req->data = wka_port;
 
+       req_id = req->req_id;
+
        zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
        retval = zfcp_fsf_req_send(req);
        if (retval)
@@ -1623,7 +1636,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 out:
        spin_unlock_irq(&qdio->req_q_lock);
        if (!retval)
-               zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
+               zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
        return retval;
 }
 
@@ -1649,6 +1662,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
        struct zfcp_qdio *qdio = wka_port->adapter->qdio;
        struct zfcp_fsf_req *req;
+       unsigned long req_id = 0;
        int retval = -EIO;
 
        spin_lock_irq(&qdio->req_q_lock);
@@ -1671,6 +1685,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
        req->data = wka_port;
        req->qtcb->header.port_handle = wka_port->handle;
 
+       req_id = req->req_id;
+
        zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
        retval = zfcp_fsf_req_send(req);
        if (retval)
@@ -1678,7 +1694,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 out:
        spin_unlock_irq(&qdio->req_q_lock);
        if (!retval)
-               zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
+               zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
        return retval;
 }
 
index ec54538f7ae1c818dff22a82c1c6e3e8e6b5a933..67efdf25657f33e83e15aafaafe5b2aafc3e5ede 100644 (file)
@@ -132,6 +132,7 @@ struct airq_info {
        struct airq_iv *aiv;
 };
 static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
+static DEFINE_MUTEX(airq_areas_lock);
 
 #define CCW_CMD_SET_VQ 0x13
 #define CCW_CMD_VDEV_RESET 0x33
@@ -244,9 +245,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
        unsigned long bit, flags;
 
        for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
+               mutex_lock(&airq_areas_lock);
                if (!airq_areas[i])
                        airq_areas[i] = new_airq_info();
                info = airq_areas[i];
+               mutex_unlock(&airq_areas_lock);
                if (!info)
                        return 0;
                write_lock_irqsave(&info->lock, flags);
index 90ea0f5d9bdbbfc78da0b474a1490b184f3bde25..5160d6214a36b040aaf5f6b134899350baaea89d 100644 (file)
@@ -710,6 +710,8 @@ static void NCR5380_main(struct work_struct *work)
                        NCR5380_information_transfer(instance);
                        done = 0;
                }
+               if (!hostdata->connected)
+                       NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
                spin_unlock_irq(&hostdata->lock);
                if (!done)
                        cond_resched();
@@ -984,7 +986,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
        if (!hostdata->selecting) {
                /* Command was aborted */
                NCR5380_write(MODE_REG, MR_BASE);
-               goto out;
+               return NULL;
        }
        if (err < 0) {
                NCR5380_write(MODE_REG, MR_BASE);
@@ -1033,7 +1035,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
        if (!hostdata->selecting) {
                NCR5380_write(MODE_REG, MR_BASE);
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-               goto out;
+               return NULL;
        }
 
        dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
@@ -1106,8 +1108,6 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
                spin_lock_irq(&hostdata->lock);
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
                NCR5380_reselect(instance);
-               if (!hostdata->connected)
-                       NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
                shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n");
                goto out;
        }
@@ -1115,14 +1115,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
        if (err < 0) {
                spin_lock_irq(&hostdata->lock);
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-               NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+
                /* Can't touch cmd if it has been reclaimed by the scsi ML */
-               if (hostdata->selecting) {
-                       cmd->result = DID_BAD_TARGET << 16;
-                       complete_cmd(instance, cmd);
-                       dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n");
-                       cmd = NULL;
-               }
+               if (!hostdata->selecting)
+                       return NULL;
+
+               cmd->result = DID_BAD_TARGET << 16;
+               complete_cmd(instance, cmd);
+               dsprintk(NDEBUG_SELECTION, instance,
+                       "target did not respond within 250ms\n");
+               cmd = NULL;
                goto out;
        }
 
@@ -1150,12 +1152,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
        if (err < 0) {
                shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-               NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
                goto out;
        }
        if (!hostdata->selecting) {
                do_abort(instance);
-               goto out;
+               return NULL;
        }
 
        dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
@@ -1817,9 +1818,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                         */
                                        NCR5380_write(TARGET_COMMAND_REG, 0);
 
-                                       /* Enable reselect interrupts */
-                                       NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-
                                        maybe_release_dma_irq(instance);
                                        return;
                                case MESSAGE_REJECT:
@@ -1851,8 +1849,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                         */
                                        NCR5380_write(TARGET_COMMAND_REG, 0);
 
-                                       /* Enable reselect interrupts */
-                                       NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
 #ifdef SUN3_SCSI_VME
                                        dregs->csr |= CSR_DMA_ENABLE;
 #endif
@@ -1954,7 +1950,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                        cmd->result = DID_ERROR << 16;
                                        complete_cmd(instance, cmd);
                                        maybe_release_dma_irq(instance);
-                                       NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
                                        return;
                                }
                                msgout = NOP;
index 31096a0b0fdd30202068b0376396eaf6b783bf29..8a6d002e67894011183d33de402ec3897acc7e69 100644 (file)
@@ -235,7 +235,7 @@ struct NCR5380_cmd {
 #define NCR5380_PIO_CHUNK_SIZE         256
 
 /* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
-#define NCR5380_REG_POLL_TIME          15
+#define NCR5380_REG_POLL_TIME          10
 
 static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
 {
index 1c5051b1c1253901ed984d6264ffb40bbd8607d2..9e287927b7f988c8c13e06914670b30b42bc2a70 100644 (file)
@@ -578,7 +578,6 @@ ch_release(struct inode *inode, struct file *file)
        scsi_changer *ch = file->private_data;
 
        scsi_device_put(ch->device);
-       ch->device = NULL;
        file->private_data = NULL;
        kref_put(&ch->ref, ch_destroy);
        return 0;
index d1154baa9436a03dac8548153bbacffd503645bf..9c21938ed67ed4e0cadfa4c4a77c4f3c053358c0 100644 (file)
@@ -54,6 +54,7 @@
 #define ALUA_FAILOVER_TIMEOUT          60
 #define ALUA_FAILOVER_RETRIES          5
 #define ALUA_RTPG_DELAY_MSECS          5
+#define ALUA_RTPG_RETRY_DELAY          2
 
 /* device handler flags */
 #define ALUA_OPTIMIZE_STPG             0x01
@@ -696,7 +697,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
        case SCSI_ACCESS_STATE_TRANSITIONING:
                if (time_before(jiffies, pg->expiry)) {
                        /* State transition, retry */
-                       pg->interval = 2;
+                       pg->interval = ALUA_RTPG_RETRY_DELAY;
                        err = SCSI_DH_RETRY;
                } else {
                        struct alua_dh_data *h;
@@ -821,6 +822,8 @@ static void alua_rtpg_work(struct work_struct *work)
                                spin_lock_irqsave(&pg->lock, flags);
                                pg->flags &= ~ALUA_PG_RUNNING;
                                pg->flags |= ALUA_PG_RUN_RTPG;
+                               if (!pg->interval)
+                                       pg->interval = ALUA_RTPG_RETRY_DELAY;
                                spin_unlock_irqrestore(&pg->lock, flags);
                                queue_delayed_work(kaluad_wq, &pg->rtpg_work,
                                                   pg->interval * HZ);
@@ -832,6 +835,8 @@ static void alua_rtpg_work(struct work_struct *work)
                spin_lock_irqsave(&pg->lock, flags);
                if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
                        pg->flags &= ~ALUA_PG_RUNNING;
+                       if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
+                               pg->interval = ALUA_RTPG_RETRY_DELAY;
                        pg->flags |= ALUA_PG_RUN_RTPG;
                        spin_unlock_irqrestore(&pg->lock, flags);
                        queue_delayed_work(kaluad_wq, &pg->rtpg_work,
index d27fabae8ddd9c2e045203ada3d3673799441ce8..6c629ef1bc4e328fbee9f18b1102380e82a5cdfb 100644 (file)
@@ -546,6 +546,8 @@ static void send_mode_select(struct work_struct *work)
        spin_unlock(&ctlr->ms_lock);
 
  retry:
+       memset(cdb, 0, sizeof(cdb));
+
        data_size = rdac_failover_get(ctlr, &list, cdb);
 
        RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
index 7dc4ffa244304ada7bab9b8718af9ebc7066d6ab..24cbd0a2cc69facf325be96b5bc150cb2e882389 100644 (file)
@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
  */
 static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
 {
-       return (struct fcoe_rport *)(rdata + 1);
+       return container_of(rdata, struct fcoe_rport, rdata);
 }
 
 /**
@@ -2281,7 +2281,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
  */
 static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
                              struct sk_buff *skb,
-                             struct fc_rport_priv *rdata)
+                             struct fcoe_rport *frport)
 {
        struct fip_header *fiph;
        struct fip_desc *desc = NULL;
@@ -2289,16 +2289,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
        struct fip_wwn_desc *wwn = NULL;
        struct fip_vn_desc *vn = NULL;
        struct fip_size_desc *size = NULL;
-       struct fcoe_rport *frport;
        size_t rlen;
        size_t dlen;
        u32 desc_mask = 0;
        u32 dtype;
        u8 sub;
 
-       memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
-       frport = fcoe_ctlr_rport(rdata);
-
        fiph = (struct fip_header *)skb->data;
        frport->flags = ntohs(fiph->fip_flags);
 
@@ -2361,15 +2357,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
                        if (dlen != sizeof(struct fip_wwn_desc))
                                goto len_err;
                        wwn = (struct fip_wwn_desc *)desc;
-                       rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+                       frport->rdata.ids.node_name =
+                               get_unaligned_be64(&wwn->fd_wwn);
                        break;
                case FIP_DT_VN_ID:
                        if (dlen != sizeof(struct fip_vn_desc))
                                goto len_err;
                        vn = (struct fip_vn_desc *)desc;
                        memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
-                       rdata->ids.port_id = ntoh24(vn->fd_fc_id);
-                       rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
+                       frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
+                       frport->rdata.ids.port_name =
+                               get_unaligned_be64(&vn->fd_wwpn);
                        break;
                case FIP_DT_FC4F:
                        if (dlen != sizeof(struct fip_fc4_feat))
@@ -2750,10 +2748,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
        struct fip_header *fiph;
        enum fip_vn2vn_subcode sub;
-       struct {
-               struct fc_rport_priv rdata;
-               struct fcoe_rport frport;
-       } buf;
+       struct fcoe_rport frport = { };
        int rc, vlan_id = 0;
 
        fiph = (struct fip_header *)skb->data;
@@ -2769,7 +2764,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
                goto drop;
        }
 
-       rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
+       rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
        if (rc) {
                LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
                goto drop;
@@ -2778,19 +2773,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
        mutex_lock(&fip->ctlr_mutex);
        switch (sub) {
        case FIP_SC_VN_PROBE_REQ:
-               fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
+               fcoe_ctlr_vn_probe_req(fip, &frport.rdata);
                break;
        case FIP_SC_VN_PROBE_REP:
-               fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
+               fcoe_ctlr_vn_probe_reply(fip, &frport.rdata);
                break;
        case FIP_SC_VN_CLAIM_NOTIFY:
-               fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
+               fcoe_ctlr_vn_claim_notify(fip, &frport.rdata);
                break;
        case FIP_SC_VN_CLAIM_REP:
-               fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
+               fcoe_ctlr_vn_claim_resp(fip, &frport.rdata);
                break;
        case FIP_SC_VN_BEACON:
-               fcoe_ctlr_vn_beacon(fip, &buf.rdata);
+               fcoe_ctlr_vn_beacon(fip, &frport.rdata);
                break;
        default:
                LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
@@ -2814,22 +2809,18 @@ drop:
  */
 static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
                              struct sk_buff *skb,
-                             struct fc_rport_priv *rdata)
+                             struct fcoe_rport *frport)
 {
        struct fip_header *fiph;
        struct fip_desc *desc = NULL;
        struct fip_mac_desc *macd = NULL;
        struct fip_wwn_desc *wwn = NULL;
-       struct fcoe_rport *frport;
        size_t rlen;
        size_t dlen;
        u32 desc_mask = 0;
        u32 dtype;
        u8 sub;
 
-       memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
-       frport = fcoe_ctlr_rport(rdata);
-
        fiph = (struct fip_header *)skb->data;
        frport->flags = ntohs(fiph->fip_flags);
 
@@ -2883,7 +2874,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
                        if (dlen != sizeof(struct fip_wwn_desc))
                                goto len_err;
                        wwn = (struct fip_wwn_desc *)desc;
-                       rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+                       frport->rdata.ids.node_name =
+                               get_unaligned_be64(&wwn->fd_wwn);
                        break;
                default:
                        LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
@@ -2994,22 +2986,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
        struct fip_header *fiph;
        enum fip_vlan_subcode sub;
-       struct {
-               struct fc_rport_priv rdata;
-               struct fcoe_rport frport;
-       } buf;
+       struct fcoe_rport frport = { };
        int rc;
 
        fiph = (struct fip_header *)skb->data;
        sub = fiph->fip_subcode;
-       rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
+       rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
        if (rc) {
                LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
                goto drop;
        }
        mutex_lock(&fip->ctlr_mutex);
        if (sub == FIP_SC_VL_REQ)
-               fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
+               fcoe_ctlr_vlan_disc_reply(fip, &frport.rdata);
        mutex_unlock(&fip->ctlr_mutex);
 
 drop:
index c43eccdea65d2d2c8231265b2abd4cee720889b6..f570b8c5d857cce6e8d3d53e4d9cc861b8214558 100644 (file)
@@ -2320,6 +2320,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
        case IOACCEL2_SERV_RESPONSE_COMPLETE:
                switch (c2->error_data.status) {
                case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+                       if (cmd)
+                               cmd->result = 0;
                        break;
                case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
                        cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2479,8 +2481,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
 
        /* check for good status */
        if (likely(c2->error_data.serv_response == 0 &&
-                       c2->error_data.status == 0))
+                       c2->error_data.status == 0)) {
+               cmd->result = 0;
                return hpsa_cmd_free_and_done(h, c, cmd);
+       }
 
        /*
         * Any RAID offload error results in retry which will use
@@ -5617,6 +5621,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
        }
        c = cmd_tagged_alloc(h, cmd);
 
+       /*
+        * This is necessary because the SML doesn't zero out this field during
+        * error recovery.
+        */
+       cmd->result = 0;
+
        /*
         * Call alternate submit routine for I/O accelerated commands.
         * Retries always go down the normal I/O path.
index b64ca977825df32111b9dbfcc1c0b13d21f7c317..71d53bb239e25d2eb30b91b7b80d45d70d8dbcd2 100644 (file)
@@ -4874,8 +4874,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
 
        spin_lock_irqsave(vhost->host->host_lock, flags);
        ibmvfc_purge_requests(vhost, DID_ERROR);
-       ibmvfc_free_event_pool(vhost);
        spin_unlock_irqrestore(vhost->host->host_lock, flags);
+       ibmvfc_free_event_pool(vhost);
 
        ibmvfc_free_mem(vhost);
        spin_lock(&ibmvfc_driver_lock);
index 3d51a936f6d547597daa94f819829e3291f9caaf..90a748551ede5d98f26ba52ff892f7bc57c2109f 100644 (file)
@@ -140,6 +140,7 @@ EXPORT_SYMBOL(fc_rport_lookup);
 struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
 {
        struct fc_rport_priv *rdata;
+       size_t rport_priv_size = sizeof(*rdata);
 
        lockdep_assert_held(&lport->disc.disc_mutex);
 
@@ -147,7 +148,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
        if (rdata)
                return rdata;
 
-       rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
+       if (lport->rport_priv_size > 0)
+               rport_priv_size = lport->rport_priv_size;
+       rdata = kzalloc(rport_priv_size, GFP_KERNEL);
        if (!rdata)
                return NULL;
 
index dd6057359d7c6e681544b9f7b2be5393b39c7f1a..643321fc152dd6b1df74fa1c38d05c54b6d9197b 100644 (file)
@@ -3,6 +3,8 @@
  *
  * Copyright 1998, Michael Schmitz <mschmitz@lbl.gov>
  *
+ * Copyright 2019 Finn Thain
+ *
  * derived in part from:
  */
 /*
@@ -11,6 +13,7 @@
  * Copyright 1995, Russell King
  */
 
+#include <linux/delay.h>
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/ioport.h>
@@ -52,7 +55,7 @@ static int setup_cmd_per_lun = -1;
 module_param(setup_cmd_per_lun, int, 0);
 static int setup_sg_tablesize = -1;
 module_param(setup_sg_tablesize, int, 0);
-static int setup_use_pdma = -1;
+static int setup_use_pdma = 512;
 module_param(setup_use_pdma, int, 0);
 static int setup_hostid = -1;
 module_param(setup_hostid, int, 0);
@@ -89,101 +92,217 @@ static int __init mac_scsi_setup(char *str)
 __setup("mac5380=", mac_scsi_setup);
 #endif /* !MODULE */
 
-/* Pseudo DMA asm originally by Ove Edlund */
-
-#define CP_IO_TO_MEM(s,d,n)                            \
-__asm__ __volatile__                                   \
-    ("    cmp.w  #4,%2\n"                              \
-     "    bls    8f\n"                                 \
-     "    move.w %1,%%d0\n"                            \
-     "    neg.b  %%d0\n"                               \
-     "    and.w  #3,%%d0\n"                            \
-     "    sub.w  %%d0,%2\n"                            \
-     "    bra    2f\n"                                 \
-     " 1: move.b (%0),(%1)+\n"                         \
-     " 2: dbf    %%d0,1b\n"                            \
-     "    move.w %2,%%d0\n"                            \
-     "    lsr.w  #5,%%d0\n"                            \
-     "    bra    4f\n"                                 \
-     " 3: move.l (%0),(%1)+\n"                         \
-     "31: move.l (%0),(%1)+\n"                         \
-     "32: move.l (%0),(%1)+\n"                         \
-     "33: move.l (%0),(%1)+\n"                         \
-     "34: move.l (%0),(%1)+\n"                         \
-     "35: move.l (%0),(%1)+\n"                         \
-     "36: move.l (%0),(%1)+\n"                         \
-     "37: move.l (%0),(%1)+\n"                         \
-     " 4: dbf    %%d0,3b\n"                            \
-     "    move.w %2,%%d0\n"                            \
-     "    lsr.w  #2,%%d0\n"                            \
-     "    and.w  #7,%%d0\n"                            \
-     "    bra    6f\n"                                 \
-     " 5: move.l (%0),(%1)+\n"                         \
-     " 6: dbf    %%d0,5b\n"                            \
-     "    and.w  #3,%2\n"                              \
-     "    bra    8f\n"                                 \
-     " 7: move.b (%0),(%1)+\n"                         \
-     " 8: dbf    %2,7b\n"                              \
-     "    moveq.l #0, %2\n"                            \
-     " 9: \n"                                          \
-     ".section .fixup,\"ax\"\n"                                \
-     "    .even\n"                                     \
-     "91: moveq.l #1, %2\n"                            \
-     "    jra 9b\n"                                    \
-     "94: moveq.l #4, %2\n"                            \
-     "    jra 9b\n"                                    \
-     ".previous\n"                                     \
-     ".section __ex_table,\"a\"\n"                     \
-     "   .align 4\n"                                   \
-     "   .long  1b,91b\n"                              \
-     "   .long  3b,94b\n"                              \
-     "   .long 31b,94b\n"                              \
-     "   .long 32b,94b\n"                              \
-     "   .long 33b,94b\n"                              \
-     "   .long 34b,94b\n"                              \
-     "   .long 35b,94b\n"                              \
-     "   .long 36b,94b\n"                              \
-     "   .long 37b,94b\n"                              \
-     "   .long  5b,94b\n"                              \
-     "   .long  7b,91b\n"                              \
-     ".previous"                                       \
-     : "=a"(s), "=a"(d), "=d"(n)                       \
-     : "0"(s), "1"(d), "2"(n)                          \
-     : "d0")
+/*
+ * According to "Inside Macintosh: Devices", Mac OS requires disk drivers to
+ * specify the number of bytes between the delays expected from a SCSI target.
+ * This allows the operating system to "prevent bus errors when a target fails
+ * to deliver the next byte within the processor bus error timeout period."
+ * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets
+ * so bus errors are unavoidable.
+ *
+ * If a MOVE.B instruction faults, we assume that zero bytes were transferred
+ * and simply retry. That assumption probably depends on target behaviour but
+ * seems to hold up okay. The NOP provides synchronization: without it the
+ * fault can sometimes occur after the program counter has moved past the
+ * offending instruction. Post-increment addressing can't be used.
+ */
+
+#define MOVE_BYTE(operands) \
+       asm volatile ( \
+               "1:     moveb " operands "     \n" \
+               "11:    nop                    \n" \
+               "       addq #1,%0             \n" \
+               "       subq #1,%1             \n" \
+               "40:                           \n" \
+               "                              \n" \
+               ".section .fixup,\"ax\"        \n" \
+               ".even                         \n" \
+               "90:    movel #1, %2           \n" \
+               "       jra 40b                \n" \
+               ".previous                     \n" \
+               "                              \n" \
+               ".section __ex_table,\"a\"     \n" \
+               ".align  4                     \n" \
+               ".long   1b,90b                \n" \
+               ".long  11b,90b                \n" \
+               ".previous                     \n" \
+               : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+/*
+ * If a MOVE.W (or MOVE.L) instruction faults, it cannot be retried because
+ * the residual byte count would be uncertain. In that situation the MOVE_WORD
+ * macro clears n in the fixup section to abort the transfer.
+ */
+
+#define MOVE_WORD(operands) \
+       asm volatile ( \
+               "1:     movew " operands "     \n" \
+               "11:    nop                    \n" \
+               "       subq #2,%1             \n" \
+               "40:                           \n" \
+               "                              \n" \
+               ".section .fixup,\"ax\"        \n" \
+               ".even                         \n" \
+               "90:    movel #0, %1           \n" \
+               "       movel #2, %2           \n" \
+               "       jra 40b                \n" \
+               ".previous                     \n" \
+               "                              \n" \
+               ".section __ex_table,\"a\"     \n" \
+               ".align  4                     \n" \
+               ".long   1b,90b                \n" \
+               ".long  11b,90b                \n" \
+               ".previous                     \n" \
+               : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+#define MOVE_16_WORDS(operands) \
+       asm volatile ( \
+               "1:     movew " operands "     \n" \
+               "2:     movew " operands "     \n" \
+               "3:     movew " operands "     \n" \
+               "4:     movew " operands "     \n" \
+               "5:     movew " operands "     \n" \
+               "6:     movew " operands "     \n" \
+               "7:     movew " operands "     \n" \
+               "8:     movew " operands "     \n" \
+               "9:     movew " operands "     \n" \
+               "10:    movew " operands "     \n" \
+               "11:    movew " operands "     \n" \
+               "12:    movew " operands "     \n" \
+               "13:    movew " operands "     \n" \
+               "14:    movew " operands "     \n" \
+               "15:    movew " operands "     \n" \
+               "16:    movew " operands "     \n" \
+               "17:    nop                    \n" \
+               "       subl  #32,%1           \n" \
+               "40:                           \n" \
+               "                              \n" \
+               ".section .fixup,\"ax\"        \n" \
+               ".even                         \n" \
+               "90:    movel #0, %1           \n" \
+               "       movel #2, %2           \n" \
+               "       jra 40b                \n" \
+               ".previous                     \n" \
+               "                              \n" \
+               ".section __ex_table,\"a\"     \n" \
+               ".align  4                     \n" \
+               ".long   1b,90b                \n" \
+               ".long   2b,90b                \n" \
+               ".long   3b,90b                \n" \
+               ".long   4b,90b                \n" \
+               ".long   5b,90b                \n" \
+               ".long   6b,90b                \n" \
+               ".long   7b,90b                \n" \
+               ".long   8b,90b                \n" \
+               ".long   9b,90b                \n" \
+               ".long  10b,90b                \n" \
+               ".long  11b,90b                \n" \
+               ".long  12b,90b                \n" \
+               ".long  13b,90b                \n" \
+               ".long  14b,90b                \n" \
+               ".long  15b,90b                \n" \
+               ".long  16b,90b                \n" \
+               ".long  17b,90b                \n" \
+               ".previous                     \n" \
+               : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+#define MAC_PDMA_DELAY         32
+
+static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n)
+{
+       unsigned char *addr = start;
+       int result = 0;
+
+       if (n >= 1) {
+               MOVE_BYTE("%3@,%0@");
+               if (result)
+                       goto out;
+       }
+       if (n >= 1 && ((unsigned long)addr & 1)) {
+               MOVE_BYTE("%3@,%0@");
+               if (result)
+                       goto out;
+       }
+       while (n >= 32)
+               MOVE_16_WORDS("%3@,%0@+");
+       while (n >= 2)
+               MOVE_WORD("%3@,%0@+");
+       if (result)
+               return start - addr; /* Negated to indicate uncertain length */
+       if (n == 1)
+               MOVE_BYTE("%3@,%0@");
+out:
+       return addr - start;
+}
+
+static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n)
+{
+       unsigned char *addr = start;
+       int result = 0;
+
+       if (n >= 1) {
+               MOVE_BYTE("%0@,%3@");
+               if (result)
+                       goto out;
+       }
+       if (n >= 1 && ((unsigned long)addr & 1)) {
+               MOVE_BYTE("%0@,%3@");
+               if (result)
+                       goto out;
+       }
+       while (n >= 32)
+               MOVE_16_WORDS("%0@+,%3@");
+       while (n >= 2)
+               MOVE_WORD("%0@+,%3@");
+       if (result)
+               return start - addr; /* Negated to indicate uncertain length */
+       if (n == 1)
+               MOVE_BYTE("%0@,%3@");
+out:
+       return addr - start;
+}
 
 static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
                                 unsigned char *dst, int len)
 {
        u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
        unsigned char *d = dst;
-       int n = len;
-       int transferred;
+
+       hostdata->pdma_residual = len;
 
        while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
                                      BASR_DRQ | BASR_PHASE_MATCH,
                                      BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
-               CP_IO_TO_MEM(s, d, n);
+               int bytes;
 
-               transferred = d - dst - n;
-               hostdata->pdma_residual = len - transferred;
+               bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512));
 
-               /* No bus error. */
-               if (n == 0)
+               if (bytes > 0) {
+                       d += bytes;
+                       hostdata->pdma_residual -= bytes;
+               }
+
+               if (hostdata->pdma_residual == 0)
                        return 0;
 
-               /* Target changed phase early? */
                if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
-                                          BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
-                       scmd_printk(KERN_ERR, hostdata->connected,
+                                          BUS_AND_STATUS_REG, BASR_ACK,
+                                          BASR_ACK, HZ / 64) < 0)
+                       scmd_printk(KERN_DEBUG, hostdata->connected,
                                    "%s: !REQ and !ACK\n", __func__);
                if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
                        return 0;
 
+               if (bytes == 0)
+                       udelay(MAC_PDMA_DELAY);
+
+               if (bytes >= 0)
+                       continue;
+
                dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
-                        "%s: bus error (%d/%d)\n", __func__, transferred, len);
+                        "%s: bus error (%d/%d)\n", __func__, d - dst, len);
                NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
-               d = dst + transferred;
-               n = len - transferred;
+               return -1;
        }
 
        scmd_printk(KERN_ERR, hostdata->connected,
@@ -192,93 +311,27 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
        return -1;
 }
 
-
-#define CP_MEM_TO_IO(s,d,n)                            \
-__asm__ __volatile__                                   \
-    ("    cmp.w  #4,%2\n"                              \
-     "    bls    8f\n"                                 \
-     "    move.w %0,%%d0\n"                            \
-     "    neg.b  %%d0\n"                               \
-     "    and.w  #3,%%d0\n"                            \
-     "    sub.w  %%d0,%2\n"                            \
-     "    bra    2f\n"                                 \
-     " 1: move.b (%0)+,(%1)\n"                         \
-     " 2: dbf    %%d0,1b\n"                            \
-     "    move.w %2,%%d0\n"                            \
-     "    lsr.w  #5,%%d0\n"                            \
-     "    bra    4f\n"                                 \
-     " 3: move.l (%0)+,(%1)\n"                         \
-     "31: move.l (%0)+,(%1)\n"                         \
-     "32: move.l (%0)+,(%1)\n"                         \
-     "33: move.l (%0)+,(%1)\n"                         \
-     "34: move.l (%0)+,(%1)\n"                         \
-     "35: move.l (%0)+,(%1)\n"                         \
-     "36: move.l (%0)+,(%1)\n"                         \
-     "37: move.l (%0)+,(%1)\n"                         \
-     " 4: dbf    %%d0,3b\n"                            \
-     "    move.w %2,%%d0\n"                            \
-     "    lsr.w  #2,%%d0\n"                            \
-     "    and.w  #7,%%d0\n"                            \
-     "    bra    6f\n"                                 \
-     " 5: move.l (%0)+,(%1)\n"                         \
-     " 6: dbf    %%d0,5b\n"                            \
-     "    and.w  #3,%2\n"                              \
-     "    bra    8f\n"                                 \
-     " 7: move.b (%0)+,(%1)\n"                         \
-     " 8: dbf    %2,7b\n"                              \
-     "    moveq.l #0, %2\n"                            \
-     " 9: \n"                                          \
-     ".section .fixup,\"ax\"\n"                                \
-     "    .even\n"                                     \
-     "91: moveq.l #1, %2\n"                            \
-     "    jra 9b\n"                                    \
-     "94: moveq.l #4, %2\n"                            \
-     "    jra 9b\n"                                    \
-     ".previous\n"                                     \
-     ".section __ex_table,\"a\"\n"                     \
-     "   .align 4\n"                                   \
-     "   .long  1b,91b\n"                              \
-     "   .long  3b,94b\n"                              \
-     "   .long 31b,94b\n"                              \
-     "   .long 32b,94b\n"                              \
-     "   .long 33b,94b\n"                              \
-     "   .long 34b,94b\n"                              \
-     "   .long 35b,94b\n"                              \
-     "   .long 36b,94b\n"                              \
-     "   .long 37b,94b\n"                              \
-     "   .long  5b,94b\n"                              \
-     "   .long  7b,91b\n"                              \
-     ".previous"                                       \
-     : "=a"(s), "=a"(d), "=d"(n)                       \
-     : "0"(s), "1"(d), "2"(n)                          \
-     : "d0")
-
 static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
                                  unsigned char *src, int len)
 {
        unsigned char *s = src;
        u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
-       int n = len;
-       int transferred;
+
+       hostdata->pdma_residual = len;
 
        while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
                                      BASR_DRQ | BASR_PHASE_MATCH,
                                      BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
-               CP_MEM_TO_IO(s, d, n);
+               int bytes;
 
-               transferred = s - src - n;
-               hostdata->pdma_residual = len - transferred;
+               bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512));
 
-               /* Target changed phase early? */
-               if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
-                                          BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
-                       scmd_printk(KERN_ERR, hostdata->connected,
-                                   "%s: !REQ and !ACK\n", __func__);
-               if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
-                       return 0;
+               if (bytes > 0) {
+                       s += bytes;
+                       hostdata->pdma_residual -= bytes;
+               }
 
-               /* No bus error. */
-               if (n == 0) {
+               if (hostdata->pdma_residual == 0) {
                        if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
                                                  TCR_LAST_BYTE_SENT,
                                                  TCR_LAST_BYTE_SENT, HZ / 64) < 0)
@@ -287,17 +340,29 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
                        return 0;
                }
 
+               if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+                                          BUS_AND_STATUS_REG, BASR_ACK,
+                                          BASR_ACK, HZ / 64) < 0)
+                       scmd_printk(KERN_DEBUG, hostdata->connected,
+                                   "%s: !REQ and !ACK\n", __func__);
+               if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+                       return 0;
+
+               if (bytes == 0)
+                       udelay(MAC_PDMA_DELAY);
+
+               if (bytes >= 0)
+                       continue;
+
                dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
-                        "%s: bus error (%d/%d)\n", __func__, transferred, len);
+                        "%s: bus error (%d/%d)\n", __func__, s - src, len);
                NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
-               s = src + transferred;
-               n = len - transferred;
+               return -1;
        }
 
        scmd_printk(KERN_ERR, hostdata->connected,
                    "%s: phase mismatch or !DRQ\n", __func__);
        NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
-
        return -1;
 }
 
@@ -305,7 +370,7 @@ static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
                                 struct scsi_cmnd *cmd)
 {
        if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
-           cmd->SCp.this_residual < 16)
+           cmd->SCp.this_residual < setup_use_pdma)
                return 0;
 
        return cmd->SCp.this_residual;
index 8c7154143a4eb1f3db016cc4787b227ade95c531..a84878fbf45d23619e0747bf512acc05819747b4 100644 (file)
@@ -4189,11 +4189,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                 */
                if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
                    pdev->subsystem_device == 0xC000)
-                       return -ENODEV;
+                       goto out_disable_device;
                /* Now check the magic signature byte */
                pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
                if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
-                       return -ENODEV;
+                       goto out_disable_device;
                /* Ok it is probably a megaraid */
        }
 
index acb503ea8f0c4145bf28cb6abe5334e0b787990f..bc37666f998e64e970ab8077baebb6c18c245c1b 100644 (file)
@@ -3025,6 +3025,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
        u32 size;
        unsigned long buff_addr;
        unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+       unsigned long chunk_left_bytes;
        unsigned long src_addr;
        unsigned long flags;
        u32 buff_offset;
@@ -3050,6 +3051,8 @@ megasas_fw_crash_buffer_show(struct device *cdev,
        }
 
        size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+       chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
+       size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
        size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
 
        src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
@@ -5215,7 +5218,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
 {
        u32 max_sectors_1;
        u32 max_sectors_2, tmp_sectors, msix_enable;
-       u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
+       u32 scratch_pad_2, scratch_pad_3, scratch_pad_4, status_reg;
        resource_size_t base_addr;
        struct megasas_register_set __iomem *reg_set;
        struct megasas_ctrl_info *ctrl_info = NULL;
@@ -5223,6 +5226,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
        int i, j, loop, fw_msix_count = 0;
        struct IOV_111 *iovPtr;
        struct fusion_context *fusion;
+       bool do_adp_reset = true;
 
        fusion = instance->ctrl_context;
 
@@ -5271,19 +5275,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
        }
 
        if (megasas_transition_to_ready(instance, 0)) {
-               atomic_set(&instance->fw_reset_no_pci_access, 1);
-               instance->instancet->adp_reset
-                       (instance, instance->reg_set);
-               atomic_set(&instance->fw_reset_no_pci_access, 0);
-               dev_info(&instance->pdev->dev,
-                       "FW restarted successfully from %s!\n",
-                       __func__);
+               if (instance->adapter_type >= INVADER_SERIES) {
+                       status_reg = instance->instancet->read_fw_status_reg(
+                                       instance->reg_set);
+                       do_adp_reset = status_reg & MFI_RESET_ADAPTER;
+               }
 
-               /*waitting for about 30 second before retry*/
-               ssleep(30);
+               if (do_adp_reset) {
+                       atomic_set(&instance->fw_reset_no_pci_access, 1);
+                       instance->instancet->adp_reset
+                               (instance, instance->reg_set);
+                       atomic_set(&instance->fw_reset_no_pci_access, 0);
+                       dev_info(&instance->pdev->dev,
+                                "FW restarted successfully from %s!\n",
+                                __func__);
 
-               if (megasas_transition_to_ready(instance, 0))
+                       /*waiting for about 30 second before retry*/
+                       ssleep(30);
+
+                       if (megasas_transition_to_ready(instance, 0))
+                               goto fail_ready_state;
+               } else {
                        goto fail_ready_state;
+               }
        }
 
        megasas_init_ctrl_params(instance);
@@ -5322,12 +5336,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
                                instance->msix_vectors = (scratch_pad_2
                                        & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
                                fw_msix_count = instance->msix_vectors;
-                       } else { /* Invader series supports more than 8 MSI-x vectors*/
+                       } else {
                                instance->msix_vectors = ((scratch_pad_2
                                        & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
                                        >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
-                               if (instance->msix_vectors > 16)
-                                       instance->msix_combined = true;
+
+                               /*
+                                * For Invader series, > 8 MSI-x vectors
+                                * supported by FW/HW implies combined
+                                * reply queue mode is enabled.
+                                * For Ventura series, > 16 MSI-x vectors
+                                * supported by FW/HW implies combined
+                                * reply queue mode is enabled.
+                                */
+                               switch (instance->adapter_type) {
+                               case INVADER_SERIES:
+                                       if (instance->msix_vectors > 8)
+                                               instance->msix_combined = true;
+                                       break;
+                               case VENTURA_SERIES:
+                                       if (instance->msix_vectors > 16)
+                                               instance->msix_combined = true;
+                                       break;
+                               }
 
                                if (rdpq_enable)
                                        instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
@@ -5862,7 +5893,8 @@ megasas_get_target_prop(struct megasas_instance *instance,
        int ret;
        struct megasas_cmd *cmd;
        struct megasas_dcmd_frame *dcmd;
-       u16 targetId = (sdev->channel % 2) + sdev->id;
+       u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
+                       sdev->id;
 
        cmd = megasas_get_cmd(instance);
 
@@ -6024,13 +6056,13 @@ static int megasas_io_attach(struct megasas_instance *instance)
  * @instance:          Adapter soft state
  * Description:
  *
- * For Ventura, driver/FW will operate in 64bit DMA addresses.
+ * For Ventura, driver/FW will operate in 63bit DMA addresses.
  *
  * For invader-
  *     By default, driver/FW will operate in 32bit DMA addresses
  *     for consistent DMA mapping but if 32 bit consistent
- *     DMA mask fails, driver will try with 64 bit consistent
- *     mask provided FW is true 64bit DMA capable
+ *     DMA mask fails, driver will try with 63 bit consistent
+ *     mask provided FW is true 63bit DMA capable
  *
  * For older controllers(Thunderbolt and MFI based adapters)-
  *     driver/FW will operate in 32 bit consistent DMA addresses.
@@ -6043,15 +6075,15 @@ megasas_set_dma_mask(struct megasas_instance *instance)
        u32 scratch_pad_2;
 
        pdev = instance->pdev;
-       consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
-                               DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+       consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
+                               DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
 
        if (IS_DMA64) {
-               if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+               if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
                    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
                        goto fail_set_dma_mask;
 
-               if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
+               if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
                    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
                     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
                        /*
@@ -6064,7 +6096,7 @@ megasas_set_dma_mask(struct megasas_instance *instance)
                        if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
                                goto fail_set_dma_mask;
                        else if (dma_set_mask_and_coherent(&pdev->dev,
-                                                          DMA_BIT_MASK(64)))
+                                                          DMA_BIT_MASK(63)))
                                goto fail_set_dma_mask;
                }
        } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
@@ -6076,8 +6108,8 @@ megasas_set_dma_mask(struct megasas_instance *instance)
                instance->consistent_mask_64bit = true;
 
        dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
-                ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
-                (instance->consistent_mask_64bit ? "64" : "32"));
+                ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"),
+                (instance->consistent_mask_64bit ? "63" : "32"));
 
        return 0;
 
index 8776330175e343c3a620005e7105140209910922..d2ab52026014f99934989c3a0514f259fd16ceeb 100644 (file)
@@ -2565,12 +2565,14 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
 {
        struct sysinfo s;
        u64 consistent_dma_mask;
+       /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+       int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
 
        if (ioc->is_mcpu_endpoint)
                goto try_32bit;
 
        if (ioc->dma_mask)
-               consistent_dma_mask = DMA_BIT_MASK(64);
+               consistent_dma_mask = DMA_BIT_MASK(dma_mask);
        else
                consistent_dma_mask = DMA_BIT_MASK(32);
 
@@ -2578,11 +2580,11 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
                const uint64_t required_mask =
                    dma_get_required_mask(&pdev->dev);
                if ((required_mask > DMA_BIT_MASK(32)) &&
-                   !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+                   !pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask)) &&
                    !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
                        ioc->base_add_sg_single = &_base_add_sg_single_64;
                        ioc->sge_size = sizeof(Mpi2SGESimple64_t);
-                       ioc->dma_mask = 64;
+                       ioc->dma_mask = dma_mask;
                        goto out;
                }
        }
@@ -2609,7 +2611,7 @@ static int
 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
                                      struct pci_dev *pdev)
 {
-       if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
                        return -ENODEV;
        }
@@ -4545,7 +4547,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                total_sz += sz;
        } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
 
-       if (ioc->dma_mask == 64) {
+       if (ioc->dma_mask > 32) {
                if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
                        pr_warn(MPT3SAS_FMT
                            "no suitable consistent DMA mask for %s\n",
index f8f4d3ea67f3f82097c78a953ec802994a76fb61..15d493f30810fc6556ec79a32d14e81e2d7cf0de 100644 (file)
@@ -2191,6 +2191,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
        dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
            vha->gnl.ldma);
 
+       vha->gnl.l = NULL;
+
        vfree(vha->scan.l);
 
        if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
index 1f1a05a90d3d7f9a29d0da663af27cc8fc2d7d69..34ff4bbc8de10e96d332037df0981ccc31fbecee 100644 (file)
@@ -3360,15 +3360,15 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
        sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
        sp->done = qla24xx_async_gpsc_sp_done;
 
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
-
        ql_dbg(ql_dbg_disc, vha, 0x205e,
            "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
            sp->name, fcport->port_name, sp->handle,
            fcport->loop_id, fcport->d_id.b.domain,
            fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
        return rval;
 
 done_free_sp:
@@ -3729,13 +3729,14 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
        sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
        sp->done = qla2x00_async_gpnid_sp_done;
 
+       ql_dbg(ql_dbg_disc, vha, 0x2067,
+           "Async-%s hdl=%x ID %3phC.\n", sp->name,
+           sp->handle, ct_req->req.port_id.port_id);
+
        rval = qla2x00_start_sp(sp);
        if (rval != QLA_SUCCESS)
                goto done_free_sp;
 
-       ql_dbg(ql_dbg_disc, vha, 0x2067,
-           "Async-%s hdl=%x ID %3phC.\n", sp->name,
-           sp->handle, ct_req->req.port_id.port_id);
        return rval;
 
 done_free_sp:
@@ -4044,6 +4045,41 @@ out:
        }
 }
 
+static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
+    srb_t *sp, int cmd)
+{
+       struct qla_work_evt *e;
+
+       if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
+               return QLA_PARAMETER_ERROR;
+
+       e = qla2x00_alloc_work(vha, cmd);
+       if (!e)
+               return QLA_FUNCTION_FAILED;
+
+       e->u.iosb.sp = sp;
+
+       return qla2x00_post_work(vha, e);
+}
+
+static int qla2x00_post_nvme_gpnft_done_work(struct scsi_qla_host *vha,
+    srb_t *sp, int cmd)
+{
+       struct qla_work_evt *e;
+
+       if (cmd != QLA_EVT_GPNFT)
+               return QLA_PARAMETER_ERROR;
+
+       e = qla2x00_alloc_work(vha, cmd);
+       if (!e)
+               return QLA_FUNCTION_FAILED;
+
+       e->u.gpnft.fc4_type = FC4_TYPE_NVME;
+       e->u.gpnft.sp = sp;
+
+       return qla2x00_post_work(vha, e);
+}
+
 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
        struct srb *sp)
 {
@@ -4144,22 +4180,36 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
 {
        struct srb *sp = s;
        struct scsi_qla_host *vha = sp->vha;
-       struct qla_work_evt *e;
        struct ct_sns_req *ct_req =
                (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
        u16 cmd = be16_to_cpu(ct_req->command);
        u8 fc4_type = sp->gen2;
        unsigned long flags;
+       int rc;
 
        /* gen2 field is holding the fc4type */
        ql_dbg(ql_dbg_disc, vha, 0xffff,
            "Async done-%s res %x FC4Type %x\n",
            sp->name, res, sp->gen2);
 
+       sp->rc = res;
        if (res) {
                unsigned long flags;
+               const char *name = sp->name;
+
+               /*
+                * We are in an Interrupt context, queue up this
+                * sp for GNNFT_DONE work. This will allow all
+                * the resource to get freed up.
+                */
+               rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+                   QLA_EVT_GNNFT_DONE);
+               if (rc) {
+                       /* Cleanup here to prevent memory leak */
+                       qla24xx_sp_unmap(vha, sp);
+                       sp->free(sp);
+               }
 
-               sp->free(sp);
                spin_lock_irqsave(&vha->work_lock, flags);
                vha->scan.scan_flags &= ~SF_SCANNING;
                vha->scan.scan_retry++;
@@ -4170,9 +4220,9 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
                        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                        qla2xxx_wake_dpc(vha);
                } else {
-                       ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
-                           "Async done-%s rescan failed on all retries\n",
-                           sp->name);
+                       ql_dbg(ql_dbg_disc, vha, 0xffff,
+                           "Async done-%s rescan failed on all retries.\n",
+                           name);
                }
                return;
        }
@@ -4187,77 +4237,31 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
                vha->scan.scan_flags &= ~SF_SCANNING;
                spin_unlock_irqrestore(&vha->work_lock, flags);
 
-               e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT);
-               if (!e) {
-                       /*
-                        * please ignore kernel warning. Otherwise,
-                        * we have mem leak.
-                        */
-                       if (sp->u.iocb_cmd.u.ctarg.req) {
-                               dma_free_coherent(&vha->hw->pdev->dev,
-                                   sp->u.iocb_cmd.u.ctarg.req_allocated_size,
-                                   sp->u.iocb_cmd.u.ctarg.req,
-                                   sp->u.iocb_cmd.u.ctarg.req_dma);
-                               sp->u.iocb_cmd.u.ctarg.req = NULL;
-                       }
-                       if (sp->u.iocb_cmd.u.ctarg.rsp) {
-                               dma_free_coherent(&vha->hw->pdev->dev,
-                                   sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
-                                   sp->u.iocb_cmd.u.ctarg.rsp,
-                                   sp->u.iocb_cmd.u.ctarg.rsp_dma);
-                               sp->u.iocb_cmd.u.ctarg.rsp = NULL;
-                       }
-
-                       ql_dbg(ql_dbg_disc, vha, 0xffff,
-                           "Async done-%s unable to alloc work element\n",
-                           sp->name);
-                       sp->free(sp);
+               sp->rc = res;
+               rc = qla2x00_post_nvme_gpnft_done_work(vha, sp, QLA_EVT_GPNFT);
+               if (!rc) {
+                       qla24xx_sp_unmap(vha, sp);
                        set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
                        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                        return;
                }
-               e->u.gpnft.fc4_type = FC4_TYPE_NVME;
-               sp->rc = res;
-               e->u.gpnft.sp = sp;
-
-               qla2x00_post_work(vha, e);
-               return;
        }
 
-       if (cmd == GPN_FT_CMD)
-               e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
-       else
-               e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
-       if (!e) {
-               /* please ignore kernel warning. Otherwise, we have mem leak. */
-               if (sp->u.iocb_cmd.u.ctarg.req) {
-                       dma_free_coherent(&vha->hw->pdev->dev,
-                           sp->u.iocb_cmd.u.ctarg.req_allocated_size,
-                           sp->u.iocb_cmd.u.ctarg.req,
-                           sp->u.iocb_cmd.u.ctarg.req_dma);
-                       sp->u.iocb_cmd.u.ctarg.req = NULL;
-               }
-               if (sp->u.iocb_cmd.u.ctarg.rsp) {
-                       dma_free_coherent(&vha->hw->pdev->dev,
-                           sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
-                           sp->u.iocb_cmd.u.ctarg.rsp,
-                           sp->u.iocb_cmd.u.ctarg.rsp_dma);
-                       sp->u.iocb_cmd.u.ctarg.rsp = NULL;
-               }
+       if (cmd == GPN_FT_CMD) {
+               del_timer(&sp->u.iocb_cmd.timer);
+               rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+                   QLA_EVT_GPNFT_DONE);
+       } else {
+               rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+                   QLA_EVT_GNNFT_DONE);
+       }
 
-               ql_dbg(ql_dbg_disc, vha, 0xffff,
-                   "Async done-%s unable to alloc work element\n",
-                   sp->name);
-               sp->free(sp);
+       if (rc) {
+               qla24xx_sp_unmap(vha, sp);
                set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
                set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                return;
        }
-
-       sp->rc = res;
-       e->u.iosb.sp = sp;
-
-       qla2x00_post_work(vha, e);
 }
 
 /*
@@ -4356,7 +4360,6 @@ void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
 {
        ql_dbg(ql_dbg_disc, vha, 0xffff,
            "%s enter\n", __func__);
-       del_timer(&sp->u.iocb_cmd.timer);
        qla24xx_async_gnnft(vha, sp, sp->gen2);
 }
 
index f84f9bf15027817f89c7d5e4c6695b17a8f90ac2..bee9cfb29152983e965ab744b70bf79ea3389463 100644 (file)
@@ -54,7 +54,7 @@ qla2x00_sp_timeout(struct timer_list *t)
        unsigned long flags;
        struct qla_hw_data *ha = sp->vha->hw;
 
-       WARN_ON(irqs_disabled());
+       WARN_ON_ONCE(irqs_disabled());
        spin_lock_irqsave(&ha->hardware_lock, flags);
        req = sp->qpair->req;
        req->outstanding_cmds[sp->handle] = NULL;
@@ -216,8 +216,13 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
        struct srb_iocb *lio;
        int rval = QLA_FUNCTION_FAILED;
 
-       if (!vha->flags.online)
-               goto done;
+       if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
+           fcport->loop_id == FC_NO_LOOP_ID) {
+               ql_log(ql_log_warn, vha, 0xffff,
+                   "%s: %8phC - not sending command.\n",
+                   __func__, fcport->port_name);
+               return rval;
+       }
 
        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
        if (!sp)
@@ -247,6 +252,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
 
        }
 
+       ql_dbg(ql_dbg_disc, vha, 0x2072,
+           "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
+               "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
+           fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+           fcport->login_retry);
+
        rval = qla2x00_start_sp(sp);
        if (rval != QLA_SUCCESS) {
                fcport->flags |= FCF_LOGIN_NEEDED;
@@ -254,11 +265,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
                goto done_free_sp;
        }
 
-       ql_dbg(ql_dbg_disc, vha, 0x2072,
-           "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
-               "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
-           fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
-           fcport->login_retry);
        return rval;
 
 done_free_sp:
@@ -303,15 +309,16 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
        qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
 
        sp->done = qla2x00_async_logout_sp_done;
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
 
        ql_dbg(ql_dbg_disc, vha, 0x2070,
            "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
            sp->handle, fcport->loop_id, fcport->d_id.b.domain,
                fcport->d_id.b.area, fcport->d_id.b.al_pa,
                fcport->port_name);
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
        return rval;
 
 done_free_sp:
@@ -489,13 +496,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
        sp->done = qla2x00_async_adisc_sp_done;
        if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
                lio->u.logio.flags |= SRB_LOGIN_RETRIED;
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
 
        ql_dbg(ql_dbg_disc, vha, 0x206f,
            "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
            sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
+
        return rval;
 
 done_free_sp:
@@ -792,6 +801,9 @@ qla24xx_async_gnl_sp_done(void *s, int res)
            sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
            sp->u.iocb_cmd.u.mbx.in_mb[2]);
 
+       if (res == QLA_FUNCTION_TIMEOUT)
+               return;
+
        memset(&ea, 0, sizeof(ea));
        ea.sp = sp;
        ea.rc = res;
@@ -975,17 +987,13 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
            "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
            sp->name, res, fcport->port_name, mb[1], mb[2]);
 
-       fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-
-       if (res == QLA_FUNCTION_TIMEOUT)
-               return;
-
        if (res == QLA_FUNCTION_TIMEOUT) {
                dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
                        sp->u.iocb_cmd.u.mbx.in_dma);
                return;
        }
 
+       fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
        memset(&ea, 0, sizeof(ea));
        ea.event = FCME_GPDB_DONE;
        ea.fcport = fcport;
@@ -1120,8 +1128,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
        struct port_database_24xx *pd;
        struct qla_hw_data *ha = vha->hw;
 
-       if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+       if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
+           fcport->loop_id == FC_NO_LOOP_ID) {
+               ql_log(ql_log_warn, vha, 0xffff,
+                   "%s: %8phC - not sending command.\n",
+                   __func__, fcport->port_name);
                return rval;
+       }
 
        fcport->disc_state = DSC_GPDB;
 
@@ -1161,14 +1174,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 
        sp->done = qla24xx_async_gpdb_sp_done;
 
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
-
        ql_dbg(ql_dbg_disc, vha, 0x20dc,
            "Async-%s %8phC hndl %x opt %x\n",
            sp->name, fcport->port_name, sp->handle, opt);
 
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
        return rval;
 
 done_free_sp:
@@ -1698,15 +1710,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
        tm_iocb->u.tmf.data = tag;
        sp->done = qla2x00_tmf_sp_done;
 
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
-
        ql_dbg(ql_dbg_taskm, vha, 0x802f,
            "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
            sp->handle, fcport->loop_id, fcport->d_id.b.domain,
            fcport->d_id.b.area, fcport->d_id.b.al_pa);
 
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
        wait_for_completion(&tm_iocb->u.tmf.comp);
 
        rval = tm_iocb->u.tmf.data;
@@ -1790,14 +1801,14 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
 
        sp->done = qla24xx_abort_sp_done;
 
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
-
        ql_dbg(ql_dbg_async, vha, 0x507c,
            "Abort command issued - hdl=%x, target_id=%x\n",
            cmd_sp->handle, fcport->tgt_id);
 
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
+
        if (wait) {
                wait_for_completion(&abt_iocb->u.abt.comp);
                rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
@@ -1903,8 +1914,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                return;
        }
 
-       if (fcport->disc_state == DSC_DELETE_PEND)
+       if ((fcport->disc_state == DSC_DELETE_PEND) ||
+           (fcport->disc_state == DSC_DELETED)) {
+               set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                return;
+       }
 
        if (ea->sp->gen2 != fcport->login_gen) {
                /* target side must have changed it. */
@@ -4732,7 +4746,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
                ql_log(ql_log_warn, vha, 0xd049,
                    "Failed to allocate ct_sns request.\n");
                kfree(fcport);
-               fcport = NULL;
+               return NULL;
        }
        INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
        INIT_LIST_HEAD(&fcport->gnl_entry);
@@ -6556,8 +6570,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
        }
 
        /* Clear all async request states across all VPs. */
-       list_for_each_entry(fcport, &vha->vp_fcports, list)
+       list_for_each_entry(fcport, &vha->vp_fcports, list) {
                fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+               fcport->scan_state = 0;
+       }
        spin_lock_irqsave(&ha->vport_slock, flags);
        list_for_each_entry(vp, &ha->vp_list, list) {
                atomic_inc(&vp->vref_count);
index 42b8f0d3e580da932237826d07c2c36e49bfa7f3..60b6019a2fcaee23974a34997b8a1de2a3d27957 100644 (file)
@@ -3395,6 +3395,12 @@ skip_dpc:
        return 0;
 
 probe_failed:
+       if (base_vha->gnl.l) {
+               dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
+                               base_vha->gnl.l, base_vha->gnl.ldma);
+               base_vha->gnl.l = NULL;
+       }
+
        if (base_vha->timer_active)
                qla2x00_stop_timer(base_vha);
        base_vha->flags.online = 0;
@@ -3624,7 +3630,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
        if (!atomic_read(&pdev->enable_cnt)) {
                dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
                    base_vha->gnl.l, base_vha->gnl.ldma);
-
+               base_vha->gnl.l = NULL;
                scsi_host_put(base_vha->host);
                kfree(ha);
                pci_set_drvdata(pdev, NULL);
@@ -3663,6 +3669,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
        dma_free_coherent(&ha->pdev->dev,
                base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
 
+       base_vha->gnl.l = NULL;
+
        vfree(base_vha->scan.l);
 
        if (IS_QLAFX00(ha))
@@ -4602,6 +4610,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
                    "Alloc failed for scan database.\n");
                dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
                    vha->gnl.l, vha->gnl.ldma);
+               vha->gnl.l = NULL;
                scsi_remove_host(vha->host);
                return NULL;
        }
@@ -4855,6 +4864,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
        if (fcport) {
                fcport->id_changed = 1;
                fcport->scan_state = QLA_FCPORT_FOUND;
+               fcport->chip_reset = vha->hw->base_qpair->chip_reset;
                memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
 
                if (pla) {
index 9d7feb005acfdccf9b6e08ebdc17d650df58b67b..d6dc320f81a7a0281ca61ccd9d7ecafb1064c50e 100644 (file)
@@ -1023,6 +1023,7 @@ void qlt_free_session_done(struct work_struct *work)
 
        if (logout_started) {
                bool traced = false;
+               u16 cnt = 0;
 
                while (!READ_ONCE(sess->logout_completed)) {
                        if (!traced) {
@@ -1032,6 +1033,9 @@ void qlt_free_session_done(struct work_struct *work)
                                traced = true;
                        }
                        msleep(100);
+                       cnt++;
+                       if (cnt > 200)
+                               break;
                }
 
                ql_dbg(ql_dbg_disc, vha, 0xf087,
@@ -1216,7 +1220,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
                sess->logout_on_delete = 0;
                sess->logo_ack_needed = 0;
                sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
-               sess->scan_state = 0;
        }
 }
 
index b7a8fdfeb2f47babeb014a55c9357b8b4f87fb34..e731af504f0724c1ec7848501aa9ea99c30dc1e2 100644 (file)
@@ -970,6 +970,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
        ses->sdb = scmd->sdb;
        ses->next_rq = scmd->request->next_rq;
        ses->result = scmd->result;
+       ses->resid_len = scmd->req.resid_len;
        ses->underflow = scmd->underflow;
        ses->prot_op = scmd->prot_op;
        ses->eh_eflags = scmd->eh_eflags;
@@ -981,6 +982,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
        memset(&scmd->sdb, 0, sizeof(scmd->sdb));
        scmd->request->next_rq = NULL;
        scmd->result = 0;
+       scmd->req.resid_len = 0;
 
        if (sense_bytes) {
                scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1034,6 +1036,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
        scmd->sdb = ses->sdb;
        scmd->request->next_rq = ses->next_rq;
        scmd->result = ses->result;
+       scmd->req.resid_len = ses->resid_len;
        scmd->underflow = ses->underflow;
        scmd->prot_op = ses->prot_op;
        scmd->eh_eflags = ses->eh_eflags;
index 940b16a80ff1cb38f29c4d475f9433670161123d..595d69a96e6f82412ca1734c1ad9f7df304874fc 100644 (file)
@@ -71,11 +71,11 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
        struct kmem_cache *cache;
        int ret = 0;
 
+       mutex_lock(&scsi_sense_cache_mutex);
        cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
        if (cache)
-               return 0;
+               goto exit;
 
-       mutex_lock(&scsi_sense_cache_mutex);
        if (shost->unchecked_isa_dma) {
                scsi_sense_isadma_cache =
                        kmem_cache_create("scsi_sense_cache(DMA)",
@@ -91,7 +91,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
                if (!scsi_sense_cache)
                        ret = -ENOMEM;
        }
-
+ exit:
        mutex_unlock(&scsi_sense_cache_mutex);
        return ret;
 }
@@ -1252,6 +1252,18 @@ static void scsi_initialize_rq(struct request *rq)
        cmd->retries = 0;
 }
 
+/*
+ * Only called when the request isn't completed by SCSI, and not freed by
+ * SCSI
+ */
+static void scsi_cleanup_rq(struct request *rq)
+{
+       if (rq->rq_flags & RQF_DONTPREP) {
+               scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
+               rq->rq_flags &= ~RQF_DONTPREP;
+       }
+}
+
 /* Add a command to the list used by the aacraid and dpt_i2o drivers */
 void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
 {
@@ -2353,6 +2365,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
        .init_request   = scsi_mq_init_request,
        .exit_request   = scsi_mq_exit_request,
        .initialize_rq_fn = scsi_initialize_rq,
+       .cleanup_rq     = scsi_cleanup_rq,
        .map_queues     = scsi_map_queues,
 };
 
@@ -3073,11 +3086,14 @@ scsi_device_quiesce(struct scsi_device *sdev)
         */
        WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
 
-       blk_set_preempt_only(q);
+       if (sdev->quiesced_by == current)
+               return 0;
+
+       blk_set_pm_only(q);
 
        blk_mq_freeze_queue(q);
        /*
-        * Ensure that the effect of blk_set_preempt_only() will be visible
+        * Ensure that the effect of blk_set_pm_only() will be visible
         * for percpu_ref_tryget() callers that occur after the queue
         * unfreeze even if the queue was already frozen before this function
         * was called. See also https://lwn.net/Articles/573497/.
@@ -3090,7 +3106,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
        if (err == 0)
                sdev->quiesced_by = current;
        else
-               blk_clear_preempt_only(q);
+               blk_clear_pm_only(q);
        mutex_unlock(&sdev->state_mutex);
 
        return err;
@@ -3113,8 +3129,10 @@ void scsi_device_resume(struct scsi_device *sdev)
         * device deleted during suspend)
         */
        mutex_lock(&sdev->state_mutex);
-       sdev->quiesced_by = NULL;
-       blk_clear_preempt_only(sdev->request_queue);
+       if (sdev->quiesced_by) {
+               sdev->quiesced_by = NULL;
+               blk_clear_pm_only(sdev->request_queue);
+       }
        if (sdev->sdev_state == SDEV_QUIESCE)
                scsi_device_set_state(sdev, SDEV_RUNNING);
        mutex_unlock(&sdev->state_mutex);
index bd70339c1242eb1f2515a7a947b007472823a3c8..03d9855a6afd71ef5893b37623dc27c39db1cdbc 100644 (file)
 #include <scsi/scsi_eh.h>
 #include <scsi/scsi_dbg.h>
 
-#define SCSI_LOG_SPOOLSIZE 4096
-
-#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
-#warning SCSI logging bitmask too large
-#endif
-
-struct scsi_log_buf {
-       char buffer[SCSI_LOG_SPOOLSIZE];
-       unsigned long map;
-};
-
-static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
-
 static char *scsi_log_reserve_buffer(size_t *len)
 {
-       struct scsi_log_buf *buf;
-       unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
-       unsigned long idx = 0;
-
-       preempt_disable();
-       buf = this_cpu_ptr(&scsi_format_log);
-       idx = find_first_zero_bit(&buf->map, map_bits);
-       if (likely(idx < map_bits)) {
-               while (test_and_set_bit(idx, &buf->map)) {
-                       idx = find_next_zero_bit(&buf->map, map_bits, idx);
-                       if (idx >= map_bits)
-                               break;
-               }
-       }
-       if (WARN_ON(idx >= map_bits)) {
-               preempt_enable();
-               return NULL;
-       }
-       *len = SCSI_LOG_BUFSIZE;
-       return buf->buffer + idx * SCSI_LOG_BUFSIZE;
+       *len = 128;
+       return kmalloc(*len, GFP_ATOMIC);
 }
 
 static void scsi_log_release_buffer(char *bufptr)
 {
-       struct scsi_log_buf *buf;
-       unsigned long idx;
-       int ret;
-
-       buf = this_cpu_ptr(&scsi_format_log);
-       if (bufptr >= buf->buffer &&
-           bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
-               idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
-               ret = test_and_clear_bit(idx, &buf->map);
-               WARN_ON(!ret);
-       }
-       preempt_enable();
+       kfree(bufptr);
 }
 
 static inline const char *scmd_name(const struct scsi_cmnd *scmd)
index 3aee9464a7bfab94a56d2db0463e3a7c40e0692a..186f779fa60c4d9d9ea9621cbc601769a939bc06 100644 (file)
@@ -723,6 +723,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
                  const char *buf, size_t count)
 {
        struct kernfs_node *kn;
+       struct scsi_device *sdev = to_scsi_device(dev);
+
+       /*
+        * We need to try to get module, avoiding the module been removed
+        * during delete.
+        */
+       if (scsi_device_get(sdev))
+               return -ENODEV;
 
        kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
        WARN_ON_ONCE(!kn);
@@ -737,9 +745,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
         * state into SDEV_DEL.
         */
        device_remove_file(dev, attr);
-       scsi_remove_device(to_scsi_device(dev));
+       scsi_remove_device(sdev);
        if (kn)
                sysfs_unbreak_active_protection(kn);
+       scsi_device_put(sdev);
        return count;
 };
 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
index 77cb45ef55fc577cc6fa537d988f28aef4e8f739..f8a09e6678d4bbc850e68cb1f6ebd5c65061b460 100644 (file)
@@ -1646,7 +1646,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
                /* we need to evaluate the error return  */
                if (scsi_sense_valid(sshdr) &&
                        (sshdr->asc == 0x3a ||  /* medium not present */
-                        sshdr->asc == 0x20))   /* invalid command */
+                        sshdr->asc == 0x20 ||  /* invalid command */
+                        (sshdr->asc == 0x74 && sshdr->ascq == 0x71)))  /* drive is password locked */
                                /* this is no error here */
                                return 0;
 
index a3e5574ffaea400fd3969a85e7cda6abec8b8fd5..c8850d3cbca68d5b3aa544f8badc9df0aa124e84 100644 (file)
@@ -8125,6 +8125,9 @@ int ufshcd_shutdown(struct ufs_hba *hba)
 {
        int ret = 0;
 
+       if (!hba->is_powered)
+               goto out;
+
        if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
                goto out;
 
index 8457f507c2a93500970be0b856c3a79dbcc9dd5f..853313db0413cbfe44a21ba2bfd70609f2825a56 100644 (file)
@@ -40,18 +40,23 @@ struct pruss_match_private_data {
 /**
  * pruss_get() - get the pruss for a given PRU remoteproc
  * @rproc: remoteproc handle of a PRU instance
+ * @pruss_id: integer pointer to fill in the pruss instance id
  *
  * Finds the parent pruss device for a PRU given the @rproc handle of the
- * PRU remote processor. This function increments the pruss device's refcount,
- * so always use pruss_put() to decrement it back once pruss isn't needed
- * anymore.
+ * PRU remote processor. The function will also returns the PRUSS instance id
+ * to requestors if @pruss_id is provided. This can be used by PRU client
+ * drivers to distinguish between multiple PRUSS instances, and build some
+ * customization around a specific PRUSS instance.
+
+ * This function increments the pruss device's refcount, so always use
+ * pruss_put() to decrement it back once pruss isn't needed anymore.
  *
  * Returns the pruss handle on success, and an ERR_PTR on failure using one
  * of the following error values
  *    -EINVAL if invalid parameter
  *    -ENODEV if PRU device or PRUSS device is not found
  */
-struct pruss *pruss_get(struct rproc *rproc)
+struct pruss *pruss_get(struct rproc *rproc, int *pruss_id)
 {
        struct pruss *pruss;
        struct device *dev;
@@ -75,6 +80,8 @@ struct pruss *pruss_get(struct rproc *rproc)
                return ERR_PTR(-ENODEV);
 
        get_device(pruss->dev);
+       if (pruss_id)
+               *pruss_id = pruss->id;
 
        return pruss;
 }
@@ -216,6 +223,29 @@ int pruss_cfg_update(struct pruss *pruss, unsigned int reg,
 }
 EXPORT_SYMBOL_GPL(pruss_cfg_update);
 
+static int pruss_set_id(struct pruss *pruss)
+{
+       int i;
+       int ret = -EINVAL;
+       static const phys_addr_t addrs[] = { 0x4a300000,
+                                            0x54400000, 0x54440000,
+                                            0x4b200000, 0x4b280000,
+                                            0x20a80000, 0x20ac0000,
+                                            0x0b000000, 0x0b100000,
+                                            0x0b200000, };
+       static const int ids[] = { 0, 1, 0, 1, 2, 0, 1, 0, 1, 2 };
+
+       for (i = 0; i < ARRAY_SIZE(addrs); i++) {
+               if (pruss->mem_regions[0].pa == addrs[i]) {
+                       pruss->id = ids[i];
+                       ret = 0;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
 static const
 struct pruss_private_data *pruss_get_private_data(struct platform_device *pdev)
 {
@@ -430,6 +460,10 @@ skip_mux:
        }
        of_node_put(np);
 
+       ret = pruss_set_id(pruss);
+       if (ret < 0)
+               return ret;
+
        platform_set_drvdata(pdev, pruss);
 
        dev_dbg(&pdev->dev, "creating PRU cores and other child platform devices\n");
index 19c8efb9a5ee786b523a96ae9e41226553757963..1ba1556f1987827fb4099e8e60f6ad41446a8699 100644 (file)
@@ -3,8 +3,8 @@
 #
 
 menuconfig SOUNDWIRE
-       bool "SoundWire support"
-       ---help---
+       tristate "SoundWire support"
+       help
          SoundWire is a 2-Pin interface with data and clock line ratified
          by the MIPI Alliance. SoundWire is used for transporting data
          typically related to audio functions. SoundWire interface is
@@ -16,17 +16,12 @@ if SOUNDWIRE
 
 comment "SoundWire Devices"
 
-config SOUNDWIRE_BUS
-       tristate
-       select REGMAP_SOUNDWIRE
-
 config SOUNDWIRE_CADENCE
        tristate
 
 config SOUNDWIRE_INTEL
        tristate "Intel SoundWire Master driver"
        select SOUNDWIRE_CADENCE
-       select SOUNDWIRE_BUS
        depends on X86 && ACPI && SND_SOC
        ---help---
          SoundWire Intel Master driver.
index 5817beaca0e1f22db0bf753cad12c250e8200363..1e2c00163142ef0ffffc88a62b0f5d0fae1750b4 100644 (file)
@@ -4,7 +4,7 @@
 
 #Bus Objs
 soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
-obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o
+obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
 
 #Cadence Objs
 soundwire-cadence-objs := cadence_master.o
index cb6a331f448ab681ed4ef0bd24f8243debf8c232..70f78eda037e805604edbc7b051ee966fc1e5bc6 100644 (file)
@@ -81,8 +81,8 @@
 
 #define CDNS_MCP_INTSET                                0x4C
 
-#define CDNS_SDW_SLAVE_STAT                    0x50
-#define CDNS_MCP_SLAVE_STAT_MASK               BIT(1, 0)
+#define CDNS_MCP_SLAVE_STAT                    0x50
+#define CDNS_MCP_SLAVE_STAT_MASK               GENMASK(1, 0)
 
 #define CDNS_MCP_SLAVE_INTSTAT0                        0x54
 #define CDNS_MCP_SLAVE_INTSTAT1                        0x58
@@ -96,8 +96,8 @@
 #define CDNS_MCP_SLAVE_INTMASK0                        0x5C
 #define CDNS_MCP_SLAVE_INTMASK1                        0x60
 
-#define CDNS_MCP_SLAVE_INTMASK0_MASK           GENMASK(30, 0)
-#define CDNS_MCP_SLAVE_INTMASK1_MASK           GENMASK(16, 0)
+#define CDNS_MCP_SLAVE_INTMASK0_MASK           GENMASK(31, 0)
+#define CDNS_MCP_SLAVE_INTMASK1_MASK           GENMASK(15, 0)
 
 #define CDNS_MCP_PORT_INTSTAT                  0x64
 #define CDNS_MCP_PDI_STAT                      0x6C
index a6e2581ada7038c9631fac412ebb9f9854a6fb9d..29bc99c4a7b664f241bc4dd2f6c38f2c95cfc75c 100644 (file)
@@ -282,6 +282,16 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
 
        if (pcm) {
                count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
+
+               /*
+                * WORKAROUND: on all existing Intel controllers, pdi
+                * number 2 reports channel count as 1 even though it
+                * supports 8 channels. Performing hardcoding for pdi
+                * number 2.
+                */
+               if (pdi_num == 2)
+                       count = 7;
+
        } else {
                count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
                count = ((count & SDW_SHIM_PDMSCAP_CPSS) >>
index 25abf2d1732a0b69e48a89bc8e9250db0fe82299..eab27d41ba83f3ae713b0fccecc02aacb226834d 100644 (file)
@@ -554,7 +554,8 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
        bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
 
        /* handle all the 3-wire mode */
-       if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
+       if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
+           tfr->rx_buf != master->dummy_rx)
                cs |= BCM2835_SPI_CS_REN;
        else
                cs &= ~BCM2835_SPI_CS_REN;
index 3094d818cf06d4751122611bc2eb807e71d965fd..12c1fa5b06c5b6e3da0028e6ab4f72d5b2f5447c 100644 (file)
@@ -178,24 +178,14 @@ static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
                      BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
 }
 
-static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
 {
-       struct spi_master *master = dev_id;
-       struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
-       irqreturn_t ret = IRQ_NONE;
-
-       /* IRQ may be shared, so return if our interrupts are disabled */
-       if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
-             (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
-               return ret;
+       u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
 
        /* check if we have data to read */
-       while (bs->rx_len &&
-              (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
-                 BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
+       for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL);
+            stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT))
                bcm2835aux_rd_fifo(bs);
-               ret = IRQ_HANDLED;
-       }
 
        /* check if we have data to write */
        while (bs->tx_len &&
@@ -203,16 +193,21 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
               (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
                  BCM2835_AUX_SPI_STAT_TX_FULL))) {
                bcm2835aux_wr_fifo(bs);
-               ret = IRQ_HANDLED;
        }
+}
 
-       /* and check if we have reached "done" */
-       while (bs->rx_len &&
-              (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
-                 BCM2835_AUX_SPI_STAT_BUSY))) {
-               bcm2835aux_rd_fifo(bs);
-               ret = IRQ_HANDLED;
-       }
+static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+{
+       struct spi_master *master = dev_id;
+       struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+       /* IRQ may be shared, so return if our interrupts are disabled */
+       if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
+             (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
+               return IRQ_NONE;
+
+       /* do common fifo handling */
+       bcm2835aux_spi_transfer_helper(bs);
 
        if (!bs->tx_len) {
                /* disable tx fifo empty interrupt */
@@ -226,8 +221,7 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
                complete(&master->xfer_completion);
        }
 
-       /* and return */
-       return ret;
+       return IRQ_HANDLED;
 }
 
 static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
@@ -273,7 +267,6 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
 {
        struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
        unsigned long timeout;
-       u32 stat;
 
        /* configure spi */
        bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
@@ -284,24 +277,9 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
 
        /* loop until finished the transfer */
        while (bs->rx_len) {
-               /* read status */
-               stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
-
-               /* fill in tx fifo with remaining data */
-               if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
-                       bcm2835aux_wr_fifo(bs);
-                       continue;
-               }
 
-               /* read data from fifo for both cases */
-               if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
-                       bcm2835aux_rd_fifo(bs);
-                       continue;
-               }
-               if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
-                       bcm2835aux_rd_fifo(bs);
-                       continue;
-               }
+               /* do common fifo handling */
+               bcm2835aux_spi_transfer_helper(bs);
 
                /* there is still data pending to read check the timeout */
                if (bs->rx_len && time_after(jiffies, timeout)) {
index 088772ebef9bdd0b719fc361ac22dafa380ddceb..77838d8fd9bb602d56c6f2900abfd27ff8a6bb7d 100644 (file)
@@ -410,7 +410,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
                return status;
 
        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
-       master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL;
+       master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL | SPI_CS_HIGH;
        master->flags = master_flags;
        master->bus_num = pdev->id;
        /* The master needs to think there is a chipselect even if not connected */
@@ -437,7 +437,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
                spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
        }
        spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
-       spi_gpio->bitbang.flags = SPI_CS_HIGH;
 
        status = spi_bitbang_start(&spi_gpio->bitbang);
        if (status)
index 5d8faeefb0749919a2c13b09511f1871607cb907..64d19896bda3e7ecacaeaee2f19406b4b69a8185 100644 (file)
@@ -8,11 +8,14 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
+#include <linux/sched/signal.h>
 
 #include "ion.h"
 
 static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 {
+       if (fatal_signal_pending(current))
+               return NULL;
        return alloc_pages(pool->gfp_mask, pool->order);
 }
 
index 2edf3ee91300007c4dff503774bca1f2fe04b57b..caf4d4df4bd3044f68a6e26b0bc5145b20e150ef 100644 (file)
@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
 static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
                            unsigned int flags)
 {
-       int divider, base, prescale;
+       unsigned int divider, base, prescale;
 
-       /* This function needs improvment */
+       /* This function needs improvement */
        /* Don't know if divider==0 works. */
 
        for (prescale = 0; prescale < 16; prescale++) {
@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
                        divider = (*nanosec) / base;
                        break;
                case CMDF_ROUND_UP:
-                       divider = (*nanosec) / base;
+                       divider = DIV_ROUND_UP(*nanosec, base);
                        break;
                }
                if (divider < 65536) {
@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
        }
 
        prescale = 15;
-       base = timer_base * (1 << prescale);
+       base = timer_base * (prescale + 1);
        divider = 65535;
        *nanosec = divider * base;
        return (prescale << 16) | (divider);
index 0a089cf5c78f78ef6dd8fcdd23b39862ab3519ba..fe6683effd0570054a5d16813fef9ce813a90a3b 100644 (file)
@@ -100,8 +100,15 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
                unsigned nameoff, maxsize;
 
                dentry_page = read_mapping_page(mapping, i, NULL);
-               if (IS_ERR(dentry_page))
-                       continue;
+               if (dentry_page == ERR_PTR(-ENOMEM)) {
+                       err = -ENOMEM;
+                       break;
+               } else if (IS_ERR(dentry_page)) {
+                       errln("fail to readdir of logical block %u of nid %llu",
+                             i, EROFS_V(dir)->nid);
+                       err = PTR_ERR(dentry_page);
+                       break;
+               }
 
                lock_page(dentry_page);
                de = (struct erofs_dirent *)kmap(dentry_page);
index ad6fe6d9d00abe33e816e9efded5eb0fda0818e6..0f1558c6747efdd9782226b4631c3fa0507b3230 100644 (file)
@@ -311,7 +311,11 @@ z_erofs_vle_work_lookup(struct super_block *sb,
        /* if multiref is disabled, `primary' is always true */
        primary = true;
 
-       DBG_BUGON(work->pageofs != pageofs);
+       if (work->pageofs != pageofs) {
+               DBG_BUGON(1);
+               erofs_workgroup_put(egrp);
+               return ERR_PTR(-EIO);
+       }
 
        /*
         * lock must be taken first to avoid grp->next == NIL between
@@ -853,6 +857,7 @@ repeat:
        for (i = 0; i < nr_pages; ++i)
                pages[i] = NULL;
 
+       err = 0;
        z_erofs_pagevec_ctor_init(&ctor,
                Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
 
@@ -874,8 +879,17 @@ repeat:
                        pagenr = z_erofs_onlinepage_index(page);
 
                DBG_BUGON(pagenr >= nr_pages);
-               DBG_BUGON(pages[pagenr]);
 
+               /*
+                * currently EROFS doesn't support multiref(dedup),
+                * so here erroring out one multiref page.
+                */
+               if (pages[pagenr]) {
+                       DBG_BUGON(1);
+                       SetPageError(pages[pagenr]);
+                       z_erofs_onlinepage_endio(pages[pagenr]);
+                       err = -EIO;
+               }
                pages[pagenr] = page;
        }
        sparsemem_pages = i;
@@ -885,7 +899,6 @@ repeat:
        overlapped = false;
        compressed_pages = grp->compressed_pages;
 
-       err = 0;
        for (i = 0; i < clusterpages; ++i) {
                unsigned pagenr;
 
@@ -911,7 +924,12 @@ repeat:
                        pagenr = z_erofs_onlinepage_index(page);
 
                        DBG_BUGON(pagenr >= nr_pages);
-                       DBG_BUGON(pages[pagenr]);
+                       if (pages[pagenr]) {
+                               DBG_BUGON(1);
+                               SetPageError(pages[pagenr]);
+                               z_erofs_onlinepage_endio(pages[pagenr]);
+                               err = -EIO;
+                       }
                        ++sparsemem_pages;
                        pages[pagenr] = page;
 
@@ -1335,19 +1353,18 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
        err = z_erofs_do_read_page(&f, page, &pagepool);
        (void)z_erofs_vle_work_iter_end(&f.builder);
 
-       if (err) {
+       /* if some compressed cluster ready, need submit them anyway */
+       z_erofs_submit_and_unzip(&f, &pagepool, true);
+
+       if (err)
                errln("%s, failed to read, err [%d]", __func__, err);
-               goto out;
-       }
 
-       z_erofs_submit_and_unzip(&f, &pagepool, true);
-out:
        if (f.m_iter.mpage != NULL)
                put_page(f.m_iter.mpage);
 
        /* clean up the remaining free pages */
        put_pages_list(&pagepool);
-       return 0;
+       return err;
 }
 
 static inline int __z_erofs_vle_normalaccess_readpages(
index a2df02d97a8ebc31896925c3beb978bb3a25c4bd..16fcf633e60f895621b4fa83ecc8ba2b5a01e579 100644 (file)
@@ -819,7 +819,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
        if (par->gamma.curves && gamma) {
                if (fbtft_gamma_parse_str(par, par->gamma.curves, gamma,
                                          strlen(gamma)))
-                       goto alloc_fail;
+                       goto release_framebuf;
        }
 
        /* Transmit buffer */
@@ -836,7 +836,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
        if (txbuflen > 0) {
                txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
                if (!txbuf)
-                       goto alloc_fail;
+                       goto release_framebuf;
                par->txbuf.buf = txbuf;
                par->txbuf.len = txbuflen;
        }
@@ -872,6 +872,9 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
 
        return info;
 
+release_framebuf:
+       framebuffer_release(info);
+
 alloc_fail:
        vfree(vmem);
 
index c747e9ca451860309dab8440bad5705492d014a4..0cef1d6d2e2b0c9963621e8a476c6c1fcc23c0b3 100644 (file)
@@ -538,7 +538,7 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
                break;
        case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
                ret = scnprintf(buf, PAGE_SIZE, "%u\n",
-                               gasket_page_table_num_entries(
+                               gasket_page_table_num_simple_entries(
                                        gasket_dev->page_table[0]));
                break;
        case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
index 1269a983455e57c5434b71dc145f07f1a088623c..13b890b9ef187f959c1decfae7f60169102cb31a 100644 (file)
@@ -422,6 +422,9 @@ static int vpfe_open(struct file *file)
        /* If decoder is not initialized. initialize it */
        if (!video->initialized && vpfe_update_pipe_state(video)) {
                mutex_unlock(&video->lock);
+               v4l2_fh_del(&handle->vfh);
+               v4l2_fh_exit(&handle->vfh);
+               kfree(handle);
                return -ENODEV;
        }
        /* Increment device users counter */
index ceeeb3069a0248d361753050fd595e889c25c58d..212fa06f7c57c0b6949297f6bf6f90db5d36048a 100644 (file)
@@ -247,7 +247,7 @@ static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2)
 }
 
 /* Waits for low-power LP-11 state on data and clock lanes. */
-static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
+static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
 {
        u32 mask, reg;
        int ret;
@@ -258,11 +258,9 @@ static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
        ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
                                 (reg & mask) == mask, 0, 500000);
        if (ret) {
-               v4l2_err(&csi2->sd, "LP-11 timeout, phy_state = 0x%08x\n", reg);
-               return ret;
+               v4l2_warn(&csi2->sd, "LP-11 wait timeout, likely a sensor driver bug, expect capture failures.\n");
+               v4l2_warn(&csi2->sd, "phy_state = 0x%08x\n", reg);
        }
-
-       return 0;
 }
 
 /* Wait for active clock on the clock lane. */
@@ -320,9 +318,7 @@ static int csi2_start(struct csi2_dev *csi2)
        csi2_enable(csi2, true);
 
        /* Step 5 */
-       ret = csi2_dphy_wait_stopstate(csi2);
-       if (ret)
-               goto err_assert_reset;
+       csi2_dphy_wait_stopstate(csi2);
 
        /* Step 6 */
        ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
index 607804aa560d3f54c613277e502250deb0450aff..76f434c1c088a52226a88bfd92c9a89fe62f71ff 100644 (file)
@@ -1755,8 +1755,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
 
        priv->hw->max_signal = 100;
 
-       if (vnt_init(priv))
+       if (vnt_init(priv)) {
+               device_free_info(priv);
                return -ENODEV;
+       }
 
        device_print_info(priv);
        pci_set_drvdata(pcid, priv);
index ccafcc2c87ac980da5f70e52cfa89ad41a0cdf45..70433f756d8e1ff10b8f0e113d167f058019ccbf 100644 (file)
@@ -402,16 +402,19 @@ static void vnt_free_int_bufs(struct vnt_private *priv)
        kfree(priv->int_buf.data_buf);
 }
 
-static bool vnt_alloc_bufs(struct vnt_private *priv)
+static int vnt_alloc_bufs(struct vnt_private *priv)
 {
+       int ret = 0;
        struct vnt_usb_send_context *tx_context;
        struct vnt_rcb *rcb;
        int ii;
 
        for (ii = 0; ii < priv->num_tx_context; ii++) {
                tx_context = kmalloc(sizeof(*tx_context), GFP_KERNEL);
-               if (!tx_context)
+               if (!tx_context) {
+                       ret = -ENOMEM;
                        goto free_tx;
+               }
 
                priv->tx_context[ii] = tx_context;
                tx_context->priv = priv;
@@ -419,16 +422,20 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
 
                /* allocate URBs */
                tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
-               if (!tx_context->urb)
+               if (!tx_context->urb) {
+                       ret = -ENOMEM;
                        goto free_tx;
+               }
 
                tx_context->in_use = false;
        }
 
        for (ii = 0; ii < priv->num_rcb; ii++) {
                priv->rcb[ii] = kzalloc(sizeof(*priv->rcb[ii]), GFP_KERNEL);
-               if (!priv->rcb[ii])
+               if (!priv->rcb[ii]) {
+                       ret = -ENOMEM;
                        goto free_rx_tx;
+               }
 
                rcb = priv->rcb[ii];
 
@@ -436,39 +443,46 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
 
                /* allocate URBs */
                rcb->urb = usb_alloc_urb(0, GFP_KERNEL);
-               if (!rcb->urb)
+               if (!rcb->urb) {
+                       ret = -ENOMEM;
                        goto free_rx_tx;
+               }
 
                rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
-               if (!rcb->skb)
+               if (!rcb->skb) {
+                       ret = -ENOMEM;
                        goto free_rx_tx;
+               }
 
                rcb->in_use = false;
 
                /* submit rx urb */
-               if (vnt_submit_rx_urb(priv, rcb))
+               ret = vnt_submit_rx_urb(priv, rcb);
+               if (ret)
                        goto free_rx_tx;
        }
 
        priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL);
-       if (!priv->interrupt_urb)
+       if (!priv->interrupt_urb) {
+               ret = -ENOMEM;
                goto free_rx_tx;
+       }
 
        priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
        if (!priv->int_buf.data_buf) {
-               usb_free_urb(priv->interrupt_urb);
-               goto free_rx_tx;
+               ret = -ENOMEM;
+               goto free_rx_tx_urb;
        }
 
-       return true;
+       return 0;
 
+free_rx_tx_urb:
+       usb_free_urb(priv->interrupt_urb);
 free_rx_tx:
        vnt_free_rx_bufs(priv);
-
 free_tx:
        vnt_free_tx_bufs(priv);
-
-       return false;
+       return ret;
 }
 
 static void vnt_tx_80211(struct ieee80211_hw *hw,
index 649caae2b60337bfa8f339c70bad87a401d498dd..25798119426b383505d056be2a3915c6163d6d02 100644 (file)
@@ -649,17 +649,17 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
                        goto fail_locks;
                }
 
-               if (wl->gpio_irq && init_irq(dev)) {
-                       ret = -EIO;
-                       goto fail_locks;
-               }
-
                ret = wlan_initialize_threads(dev);
                if (ret < 0) {
                        ret = -EIO;
                        goto fail_wilc_wlan;
                }
 
+               if (wl->gpio_irq && init_irq(dev)) {
+                       ret = -EIO;
+                       goto fail_threads;
+               }
+
                if (!wl->dev_irq_num &&
                    wl->hif_func->enable_interrupt &&
                    wl->hif_func->enable_interrupt(wl)) {
@@ -715,7 +715,7 @@ fail_irq_enable:
 fail_irq_init:
                if (wl->dev_irq_num)
                        deinit_irq(dev);
-
+fail_threads:
                wlan_deinitialize_threads(dev);
 fail_wilc_wlan:
                wilc_wlan_cleanup(dev);
index d4cf09b11e3324503e1323bf6eb07191b78926f1..095df245ced5ab9b3b2b30df3cf81a8ce784b489 100644 (file)
@@ -476,10 +476,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
        /* Set the encryption - we only support wep */
        if (is_wep) {
                if (sme->key) {
-                       if (sme->key_idx >= NUM_WEPKEYS) {
-                               err = -EINVAL;
-                               goto exit;
-                       }
+                       if (sme->key_idx >= NUM_WEPKEYS)
+                               return -EINVAL;
 
                        result = prism2_domibset_uint32(wlandev,
                                DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
index 4e680d753941f71ea299d87b6c0cd18fc307b50f..e2fa3a3bc81dffff1af904a6b9c612ee37fe2d3a 100644 (file)
@@ -89,6 +89,12 @@ out:
        return CHAP_DIGEST_UNKNOWN;
 }
 
+static void chap_close(struct iscsi_conn *conn)
+{
+       kfree(conn->auth_protocol);
+       conn->auth_protocol = NULL;
+}
+
 static struct iscsi_chap *chap_server_open(
        struct iscsi_conn *conn,
        struct iscsi_node_auth *auth,
@@ -126,7 +132,7 @@ static struct iscsi_chap *chap_server_open(
        case CHAP_DIGEST_UNKNOWN:
        default:
                pr_err("Unsupported CHAP_A value\n");
-               kfree(conn->auth_protocol);
+               chap_close(conn);
                return NULL;
        }
 
@@ -141,19 +147,13 @@ static struct iscsi_chap *chap_server_open(
         * Generate Challenge.
         */
        if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) {
-               kfree(conn->auth_protocol);
+               chap_close(conn);
                return NULL;
        }
 
        return chap;
 }
 
-static void chap_close(struct iscsi_conn *conn)
-{
-       kfree(conn->auth_protocol);
-       conn->auth_protocol = NULL;
-}
-
 static int chap_server_compute_md5(
        struct iscsi_conn *conn,
        struct iscsi_node_auth *auth,
index ce1321a5cb7bfe28320baf0fe64641eae1477f6a..854b2bcca7c1a46f213d9d8bd31aa4fb68028626 100644 (file)
@@ -514,8 +514,8 @@ iblock_execute_write_same(struct se_cmd *cmd)
                }
 
                /* Always in 512 byte units for Linux/Block */
-               block_lba += sg->length >> IBLOCK_LBA_SHIFT;
-               sectors -= 1;
+               block_lba += sg->length >> SECTOR_SHIFT;
+               sectors -= sg->length >> SECTOR_SHIFT;
        }
 
        iblock_submit_bios(&list);
@@ -757,7 +757,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                }
 
                /* Always in 512 byte units for Linux/Block */
-               block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+               block_lba += sg->length >> SECTOR_SHIFT;
                sg_num--;
        }
 
index 9cc3843404d44cf08a60d1e6e12fc006061ca743..cefc641145b3be44f6aaa76e2627820bb9a61208 100644 (file)
@@ -9,7 +9,6 @@
 #define IBLOCK_VERSION         "4.0"
 
 #define IBLOCK_MAX_CDBS                16
-#define IBLOCK_LBA_SHIFT       9
 
 struct iblock_req {
        refcount_t pending;
index c46efa47d68a537edbc5a4a7ac9acb9e006730dc..7159e8363b83b0ef624542237474e7330703b415 100644 (file)
@@ -1143,14 +1143,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
        struct se_cmd *se_cmd = cmd->se_cmd;
        struct tcmu_dev *udev = cmd->tcmu_dev;
        bool read_len_valid = false;
-       uint32_t read_len = se_cmd->data_length;
+       uint32_t read_len;
 
        /*
         * cmd has been completed already from timeout, just reclaim
         * data area space and free cmd
         */
-       if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
+       if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+               WARN_ON_ONCE(se_cmd);
                goto out;
+       }
 
        list_del_init(&cmd->queue_entry);
 
@@ -1163,6 +1165,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                goto done;
        }
 
+       read_len = se_cmd->data_length;
        if (se_cmd->data_direction == DMA_FROM_DEVICE &&
            (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
                read_len_valid = true;
@@ -1318,6 +1321,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
                 */
                scsi_status = SAM_STAT_CHECK_CONDITION;
                list_del_init(&cmd->queue_entry);
+               cmd->se_cmd = NULL;
        } else {
                list_del_init(&cmd->queue_entry);
                idr_remove(&udev->commands, id);
@@ -2036,6 +2040,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
 
                idr_remove(&udev->commands, i);
                if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+                       WARN_ON(!cmd->se_cmd);
                        list_del_init(&cmd->queue_entry);
                        if (err_level == 1) {
                                /*
index bf9721fc2824ed1edd7494ad989fab4637862622..be3eafc7682ba81daf95e62e7891f56df05a9c8c 100644 (file)
@@ -296,7 +296,7 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
                mod_delayed_work(system_freezable_wq, &tz->poll_queue,
                                 msecs_to_jiffies(delay));
        else
-               cancel_delayed_work(&tz->poll_queue);
+               cancel_delayed_work_sync(&tz->poll_queue);
 }
 
 static void monitor_thermal_zone(struct thermal_zone_device *tz)
index 40c69a533b240787c2dfa7a9328aecb05ed24bbf..dd5d8ee3792870fccbff4c5ab0b7fb251d007612 100644 (file)
@@ -87,13 +87,17 @@ static struct thermal_hwmon_device *
 thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
 {
        struct thermal_hwmon_device *hwmon;
+       char type[THERMAL_NAME_LENGTH];
 
        mutex_lock(&thermal_hwmon_list_lock);
-       list_for_each_entry(hwmon, &thermal_hwmon_list, node)
-               if (!strcmp(hwmon->type, tz->type)) {
+       list_for_each_entry(hwmon, &thermal_hwmon_list, node) {
+               strcpy(type, tz->type);
+               strreplace(type, '-', '_');
+               if (!strcmp(hwmon->type, type)) {
                        mutex_unlock(&thermal_hwmon_list_lock);
                        return hwmon;
                }
+       }
        mutex_unlock(&thermal_hwmon_list_lock);
 
        return NULL;
index e26d87b6ffc53e4d83a69be455272046b05a0562..aa4de6907f771a14d067358a1578523f29bd7dbc 100644 (file)
@@ -1874,7 +1874,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
                        status = serial8250_rx_chars(up, status);
        }
        serial8250_modem_status(up);
-       if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE))
+       if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE) &&
+               (up->ier & UART_IER_THRI))
                serial8250_tx_chars(up);
 
        spin_unlock_irqrestore(&port->lock, flags);
index 1cb80fe5f95c060540c4ffe030a1fced78ee2ac0..dd8949e8fcd7aa7a07d1d59273bcb962148baae5 100644 (file)
@@ -1270,7 +1270,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
 
                        atmel_port->hd_start_rx = false;
                        atmel_start_rx(port);
-                       return;
                }
 
                atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
index e5389591bb4f1f83a207ee5be7e3577648972599..ad40c75bb58f84d5aceba4ca5340f673b16e0a75 100644 (file)
@@ -407,7 +407,16 @@ static int cpm_uart_startup(struct uart_port *port)
                        clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX);
                }
                cpm_uart_initbd(pinfo);
-               cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+               if (IS_SMC(pinfo)) {
+                       out_be32(&pinfo->smcup->smc_rstate, 0);
+                       out_be32(&pinfo->smcup->smc_tstate, 0);
+                       out_be16(&pinfo->smcup->smc_rbptr,
+                                in_be16(&pinfo->smcup->smc_rbase));
+                       out_be16(&pinfo->smcup->smc_tbptr,
+                                in_be16(&pinfo->smcup->smc_tbase));
+               } else {
+                       cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+               }
        }
        /* Install interrupt handler. */
        retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port);
@@ -861,16 +870,14 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
                 (u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE);
 
 /*
- *  In case SMC1 is being relocated...
+ *  In case SMC is being relocated...
  */
-#if defined (CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
        out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase));
        out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase));
        out_be32(&up->smc_rstate, 0);
        out_be32(&up->smc_tstate, 0);
        out_be16(&up->smc_brkcr, 1);              /* number of break chars */
        out_be16(&up->smc_brkec, 0);
-#endif
 
        /* Set up the uart parameters in the
         * parameter ram.
@@ -884,8 +891,6 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
        out_be16(&up->smc_brkec, 0);
        out_be16(&up->smc_brkcr, 1);
 
-       cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
-
        /* Set UART mode, 8 bit, no parity, one stop.
         * Enable receive and transmit.
         */
index f460cca139e239c066b9aacaaf8f0c22500d7ade..13ac36e2da4f0f2ea36e48c7619f369dde3239bd 100644 (file)
@@ -541,7 +541,11 @@ static int __init digicolor_uart_init(void)
        if (ret)
                return ret;
 
-       return platform_driver_register(&digicolor_uart_platform);
+       ret = platform_driver_register(&digicolor_uart_platform);
+       if (ret)
+               uart_unregister_driver(&digicolor_uart);
+
+       return ret;
 }
 module_init(digicolor_uart_init);
 
index 0f67197a3783ffdd62eccd3b41a813a80d66b6df..105de92b0b3bfa5f48b04b773ecaa1154a51a5c4 100644 (file)
@@ -382,6 +382,7 @@ static void imx_uart_ucrs_restore(struct imx_port *sport,
 }
 #endif
 
+/* called with port.lock taken and irqs caller dependent */
 static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2)
 {
        *ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
@@ -390,6 +391,7 @@ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2)
        mctrl_gpio_set(sport->gpios, sport->port.mctrl);
 }
 
+/* called with port.lock taken and irqs caller dependent */
 static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
 {
        *ucr2 &= ~UCR2_CTSC;
@@ -399,6 +401,7 @@ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
        mctrl_gpio_set(sport->gpios, sport->port.mctrl);
 }
 
+/* called with port.lock taken and irqs caller dependent */
 static void imx_uart_rts_auto(struct imx_port *sport, u32 *ucr2)
 {
        *ucr2 |= UCR2_CTSC;
@@ -1554,6 +1557,16 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
                old_csize = CS8;
        }
 
+       del_timer_sync(&sport->timer);
+
+       /*
+        * Ask the core to calculate the divisor for us.
+        */
+       baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+       quot = uart_get_divisor(port, baud);
+
+       spin_lock_irqsave(&sport->port.lock, flags);
+
        if ((termios->c_cflag & CSIZE) == CS8)
                ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
        else
@@ -1597,16 +1610,6 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
                        ucr2 |= UCR2_PROE;
        }
 
-       del_timer_sync(&sport->timer);
-
-       /*
-        * Ask the core to calculate the divisor for us.
-        */
-       baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
-       quot = uart_get_divisor(port, baud);
-
-       spin_lock_irqsave(&sport->port.lock, flags);
-
        sport->port.read_status_mask = 0;
        if (termios->c_iflag & INPCK)
                sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
index 38c48a02b9206eb8748a954ef2962154e5750f7b..bd3e6cf81af5cfb61d82d70e13dd59015a623aee 100644 (file)
@@ -491,37 +491,48 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
 
 static int max310x_set_baud(struct uart_port *port, int baud)
 {
-       unsigned int mode = 0, clk = port->uartclk, div = clk / baud;
+       unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0;
 
-       /* Check for minimal value for divider */
-       if (div < 16)
-               div = 16;
-
-       if (clk % baud && (div / 16) < 0x8000) {
+       /*
+        * Calculate the integer divisor first. Select a proper mode
+        * in case if the requested baud is too high for the pre-defined
+        * clocks frequency.
+        */
+       div = port->uartclk / baud;
+       if (div < 8) {
+               /* Mode x4 */
+               c = 4;
+               mode = MAX310X_BRGCFG_4XMODE_BIT;
+       } else if (div < 16) {
                /* Mode x2 */
+               c = 8;
                mode = MAX310X_BRGCFG_2XMODE_BIT;
-               clk = port->uartclk * 2;
-               div = clk / baud;
-
-               if (clk % baud && (div / 16) < 0x8000) {
-                       /* Mode x4 */
-                       mode = MAX310X_BRGCFG_4XMODE_BIT;
-                       clk = port->uartclk * 4;
-                       div = clk / baud;
-               }
+       } else {
+               c = 16;
        }
 
-       max310x_port_write(port, MAX310X_BRGDIVMSB_REG, (div / 16) >> 8);
-       max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div / 16);
-       max310x_port_write(port, MAX310X_BRGCFG_REG, (div % 16) | mode);
+       /* Calculate the divisor in accordance with the fraction coefficient */
+       div /= c;
+       F = c*baud;
+
+       /* Calculate the baud rate fraction */
+       if (div > 0)
+               frac = (16*(port->uartclk % F)) / F;
+       else
+               div = 1;
+
+       max310x_port_write(port, MAX310X_BRGDIVMSB_REG, div >> 8);
+       max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div);
+       max310x_port_write(port, MAX310X_BRGCFG_REG, frac | mode);
 
-       return DIV_ROUND_CLOSEST(clk, div);
+       /* Return the actual baud rate we just programmed */
+       return (16*port->uartclk) / (c*(16*div + frac));
 }
 
 static int max310x_update_best_err(unsigned long f, long *besterr)
 {
        /* Use baudrate 115200 for calculate error */
-       long err = f % (115200 * 16);
+       long err = f % (460800 * 16);
 
        if ((*besterr < 0) || (*besterr > err)) {
                *besterr = err;
index 0f41b936da03202cefdec84117dda5662cf5c0a3..310bbae515b048ce574397798dabc32a2f119318 100644 (file)
@@ -383,10 +383,14 @@ no_rx:
 
 static inline void msm_wait_for_xmitr(struct uart_port *port)
 {
+       unsigned int timeout = 500000;
+
        while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
                if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
                        break;
                udelay(1);
+               if (!timeout--)
+                       break;
        }
        msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
 }
index f9bcc2f0e244cd7d721b8fea9f799e4d17f599b4..8c9f1f054a77c7bcf9ea8b9a8c87acc26f0a45d6 100644 (file)
@@ -580,7 +580,7 @@ static int psuart_init_pruss(struct device_node *np, struct pru_suart *pu)
                return ret;
        }
 
-       pu->pruss = pruss_get(pu->pru);
+       pu->pruss = pruss_get(pu->pru, NULL);
        if (IS_ERR(pu->pruss)) {
                ret = PTR_ERR(pu->pruss);
                dev_err(pu->dev, "failed to get pruss handle (%d)\n", ret);
index 8dbeb14a1e3ab8d82f09e637b864be43681d4c96..fe9261ffe3dbcfd96703530d6f90e2210b3a7554 100644 (file)
@@ -1738,6 +1738,7 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
 {
        struct uart_state *state = container_of(port, struct uart_state, port);
        struct uart_port *uport;
+       int ret;
 
        uport = uart_port_check(state);
        if (!uport || uport->flags & UPF_DEAD)
@@ -1748,7 +1749,11 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
        /*
         * Start up the serial port.
         */
-       return uart_startup(tty, state, 0);
+       ret = uart_startup(tty, state, 0);
+       if (ret > 0)
+               tty_port_set_active(port, 1);
+
+       return ret;
 }
 
 static const char *uart_type(struct uart_port *port)
index 1c06325beacaeb3a6e19011385fcc379ef16ce8e..07f318603e7401c1b0420fc937a1f66f08afc448 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/termios.h>
 #include <linux/serial_core.h>
 #include <linux/module.h>
+#include <linux/property.h>
 
 #include "serial_mctrl_gpio.h"
 
@@ -115,6 +116,19 @@ struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx)
 
        for (i = 0; i < UART_GPIO_MAX; i++) {
                enum gpiod_flags flags;
+               char *gpio_str;
+               bool present;
+
+               /* Check if GPIO property exists and continue if not */
+               gpio_str = kasprintf(GFP_KERNEL, "%s-gpios",
+                                    mctrl_gpios_desc[i].name);
+               if (!gpio_str)
+                       continue;
+
+               present = device_property_present(dev, gpio_str);
+               kfree(gpio_str);
+               if (!present)
+                       continue;
 
                if (mctrl_gpios_desc[i].dir_out)
                        flags = GPIOD_OUT_LOW;
index 040832635a64904e98ec2960e99b074760c487b8..5550289e6678b87e71cdb8bca2930c719c27f94e 100644 (file)
@@ -1376,6 +1376,7 @@ static void work_fn_tx(struct work_struct *work)
        struct circ_buf *xmit = &port->state->xmit;
        unsigned long flags;
        dma_addr_t buf;
+       int head, tail;
 
        /*
         * DMA is idle now.
@@ -1385,16 +1386,23 @@ static void work_fn_tx(struct work_struct *work)
         * consistent xmit buffer state.
         */
        spin_lock_irq(&port->lock);
-       buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1));
+       head = xmit->head;
+       tail = xmit->tail;
+       buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1));
        s->tx_dma_len = min_t(unsigned int,
-               CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
-               CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
-       spin_unlock_irq(&port->lock);
+               CIRC_CNT(head, tail, UART_XMIT_SIZE),
+               CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE));
+       if (!s->tx_dma_len) {
+               /* Transmit buffer has been flushed */
+               spin_unlock_irq(&port->lock);
+               return;
+       }
 
        desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
                                           DMA_MEM_TO_DEV,
                                           DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
+               spin_unlock_irq(&port->lock);
                dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
                goto switch_to_pio;
        }
@@ -1402,18 +1410,18 @@ static void work_fn_tx(struct work_struct *work)
        dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
                                   DMA_TO_DEVICE);
 
-       spin_lock_irq(&port->lock);
        desc->callback = sci_dma_tx_complete;
        desc->callback_param = s;
-       spin_unlock_irq(&port->lock);
        s->cookie_tx = dmaengine_submit(desc);
        if (dma_submit_error(s->cookie_tx)) {
+               spin_unlock_irq(&port->lock);
                dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
                goto switch_to_pio;
        }
 
+       spin_unlock_irq(&port->lock);
        dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
-               __func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
+               __func__, xmit->buf, tail, head, s->cookie_tx);
 
        dma_async_issue_pending(chan);
        return;
@@ -1633,11 +1641,18 @@ static void sci_free_dma(struct uart_port *port)
 
 static void sci_flush_buffer(struct uart_port *port)
 {
+       struct sci_port *s = to_sci_port(port);
+
        /*
         * In uart_flush_buffer(), the xmit circular buffer has just been
-        * cleared, so we have to reset tx_dma_len accordingly.
+        * cleared, so we have to reset tx_dma_len accordingly, and stop any
+        * pending transfers
         */
-       to_sci_port(port)->tx_dma_len = 0;
+       s->tx_dma_len = 0;
+       if (s->chan_tx) {
+               dmaengine_terminate_async(s->chan_tx);
+               s->cookie_tx = -EINVAL;
+       }
 }
 #else /* !CONFIG_SERIAL_SH_SCI_DMA */
 static inline void sci_request_dma(struct uart_port *port)
index 828f1143859c8010f9916fada4958e229ce4e555..2774af86763e05c8ff217f82334e6e20548b31e0 100644 (file)
@@ -232,7 +232,7 @@ static inline void sprd_rx(struct uart_port *port)
 
                if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
                        SPRD_LSR_FE | SPRD_LSR_OE))
-                       if (handle_lsr_errors(port, &lsr, &flag))
+                       if (handle_lsr_errors(port, &flag, &lsr))
                                continue;
                if (uart_handle_sysrq_char(port, ch))
                        continue;
index 98d3eadd2fd0380e467b90f3340020ca1f3ccdd6..8df30582266877106c9fc61b93c031598214d92c 100644 (file)
@@ -837,7 +837,8 @@ err_uart:
 static void __exit ulite_exit(void)
 {
        platform_driver_unregister(&ulite_platform_driver);
-       uart_unregister_driver(&ulite_uart_driver);
+       if (ulite_uart_driver.state)
+               uart_unregister_driver(&ulite_uart_driver);
 }
 
 module_init(ulite_init);
index b989ca26fc78858ea27e3b1c17b4a39c0dab23d5..2f0372976459eb771c830442efe94d24c70ba489 100644 (file)
@@ -116,8 +116,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
 
        list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
                tsk = waiter->task;
-               smp_mb();
-               waiter->task = NULL;
+               smp_store_release(&waiter->task, NULL);
                wake_up_process(tsk);
                put_task_struct(tsk);
        }
@@ -217,7 +216,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
        for (;;) {
                set_current_state(TASK_UNINTERRUPTIBLE);
 
-               if (!waiter.task)
+               if (!smp_load_acquire(&waiter.task))
                        break;
                if (!timeout)
                        break;
index 64076343e91859dae75236a86f7db15ffab0690c..3f139b9326afcaba90561daf8f3bf461a5106d92 100644 (file)
@@ -2668,6 +2668,13 @@ static int __cdns3_gadget_init(struct cdns3 *cdns)
        struct cdns3_device *priv_dev;
        int ret = 0;
 
+       /* Restore 32-bit DMA Mask in case we switched from Host mode */
+       ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32));
+       if (ret) {
+               dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret);
+               return ret;
+       }
+
        cdns3_drd_switch_gadget(cdns, 1);
        pm_runtime_get_sync(cdns->dev);
 
index cc7c856126df5a792d39804d7f13ff93593c65c4..169ccfacfc7550b145d49adcd97bf121bb093258 100644 (file)
@@ -708,12 +708,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
        struct ci_hdrc    *ci = container_of(gadget, struct ci_hdrc, gadget);
        unsigned long flags;
 
-       spin_lock_irqsave(&ci->lock, flags);
-       ci->gadget.speed = USB_SPEED_UNKNOWN;
-       ci->remote_wakeup = 0;
-       ci->suspended = 0;
-       spin_unlock_irqrestore(&ci->lock, flags);
-
        /* flush all endpoints */
        gadget_for_each_ep(ep, gadget) {
                usb_ep_fifo_flush(ep);
@@ -731,6 +725,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
                ci->status = NULL;
        }
 
+       spin_lock_irqsave(&ci->lock, flags);
+       ci->gadget.speed = USB_SPEED_UNKNOWN;
+       ci->remote_wakeup = 0;
+       ci->suspended = 0;
+       spin_unlock_irqrestore(&ci->lock, flags);
+
        return 0;
 }
 
@@ -1302,6 +1302,10 @@ static int ep_disable(struct usb_ep *ep)
                return -EBUSY;
 
        spin_lock_irqsave(hwep->lock, flags);
+       if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+               spin_unlock_irqrestore(hwep->lock, flags);
+               return 0;
+       }
 
        /* only internal SW should disable ctrl endpts */
 
@@ -1391,6 +1395,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
                return -EINVAL;
 
        spin_lock_irqsave(hwep->lock, flags);
+       if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+               spin_unlock_irqrestore(hwep->lock, flags);
+               return 0;
+       }
        retval = _ep_queue(ep, req, gfp_flags);
        spin_unlock_irqrestore(hwep->lock, flags);
        return retval;
@@ -1414,8 +1422,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
                return -EINVAL;
 
        spin_lock_irqsave(hwep->lock, flags);
-
-       hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+       if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
+               hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 
        list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
                dma_pool_free(hwep->td_pool, node->ptr, node->dma);
@@ -1486,6 +1494,10 @@ static void ep_fifo_flush(struct usb_ep *ep)
        }
 
        spin_lock_irqsave(hwep->lock, flags);
+       if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+               spin_unlock_irqrestore(hwep->lock, flags);
+               return;
+       }
 
        hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 
@@ -1558,6 +1570,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget)
        int ret = 0;
 
        spin_lock_irqsave(&ci->lock, flags);
+       if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
+               spin_unlock_irqrestore(&ci->lock, flags);
+               return 0;
+       }
        if (!ci->remote_wakeup) {
                ret = -EOPNOTSUPP;
                goto out;
index 5b442bc68a767736438e273507925d5be2140276..59675cc7aa017e0bcce341a5367f2d551608ccc0 100644 (file)
@@ -1333,10 +1333,6 @@ made_compressed_probe:
        tty_port_init(&acm->port);
        acm->port.ops = &acm_port_ops;
 
-       minor = acm_alloc_minor(acm);
-       if (minor < 0)
-               goto alloc_fail1;
-
        ctrlsize = usb_endpoint_maxp(epctrl);
        readsize = usb_endpoint_maxp(epread) *
                                (quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1344,6 +1340,13 @@ made_compressed_probe:
        acm->writesize = usb_endpoint_maxp(epwrite) * 20;
        acm->control = control_interface;
        acm->data = data_interface;
+
+       usb_get_intf(acm->control); /* undone in destruct() */
+
+       minor = acm_alloc_minor(acm);
+       if (minor < 0)
+               goto alloc_fail1;
+
        acm->minor = minor;
        acm->dev = usb_dev;
        if (h.usb_cdc_acm_descriptor)
@@ -1490,7 +1493,6 @@ skip_countries:
        usb_driver_claim_interface(&acm_driver, data_interface, acm);
        usb_set_intfdata(data_interface, acm);
 
-       usb_get_intf(control_interface);
        tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
                        &control_interface->dev);
        if (IS_ERR(tty_dev)) {
index bec581fb7c6361891a81a3a0aa88d3863b686f58..b8a1fdefb515039f2f9dd2f63616900eff5d34c6 100644 (file)
@@ -587,10 +587,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
 {
        struct wdm_device *desc = file->private_data;
 
-       wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
+       wait_event(desc->wait,
+                       /*
+                        * needs both flags. We cannot do with one
+                        * because resetting it would cause a race
+                        * with write() yet we need to signal
+                        * a disconnect
+                        */
+                       !test_bit(WDM_IN_USE, &desc->flags) ||
+                       test_bit(WDM_DISCONNECTING, &desc->flags));
 
        /* cannot dereference desc->intf if WDM_DISCONNECTING */
-       if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
+       if (test_bit(WDM_DISCONNECTING, &desc->flags))
+               return -ENODEV;
+       if (desc->werr < 0)
                dev_err(&desc->intf->dev, "Error in flush path: %d\n",
                        desc->werr);
 
@@ -974,8 +984,6 @@ static void wdm_disconnect(struct usb_interface *intf)
        spin_lock_irqsave(&desc->iuspin, flags);
        set_bit(WDM_DISCONNECTING, &desc->flags);
        set_bit(WDM_READ, &desc->flags);
-       /* to terminate pending flushes */
-       clear_bit(WDM_IN_USE, &desc->flags);
        spin_unlock_irqrestore(&desc->iuspin, flags);
        wake_up_all(&desc->wait);
        mutex_lock(&desc->rlock);
index 407a7a6198a25ba3bb3d078bf53adab23b5e3a34..4a80103675d5961f9607a0516fc2a76a4cf8c91f 100644 (file)
@@ -445,6 +445,7 @@ static void usblp_cleanup(struct usblp *usblp)
        kfree(usblp->readbuf);
        kfree(usblp->device_id_string);
        kfree(usblp->statusbuf);
+       usb_put_intf(usblp->intf);
        kfree(usblp);
 }
 
@@ -461,10 +462,12 @@ static int usblp_release(struct inode *inode, struct file *file)
 
        mutex_lock(&usblp_mutex);
        usblp->used = 0;
-       if (usblp->present) {
+       if (usblp->present)
                usblp_unlink_urbs(usblp);
-               usb_autopm_put_interface(usblp->intf);
-       } else          /* finish cleanup from disconnect */
+
+       usb_autopm_put_interface(usblp->intf);
+
+       if (!usblp->present)            /* finish cleanup from disconnect */
                usblp_cleanup(usblp);
        mutex_unlock(&usblp_mutex);
        return 0;
@@ -1105,7 +1108,7 @@ static int usblp_probe(struct usb_interface *intf,
        init_waitqueue_head(&usblp->wwait);
        init_usb_anchor(&usblp->urbs);
        usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
-       usblp->intf = intf;
+       usblp->intf = usb_get_intf(intf);
 
        /* Malloc device ID string buffer to the largest expected length,
         * since we can re-query it on an ioctl and a dynamic string
@@ -1194,6 +1197,7 @@ abort:
        kfree(usblp->readbuf);
        kfree(usblp->statusbuf);
        kfree(usblp->device_id_string);
+       usb_put_intf(usblp->intf);
        kfree(usblp);
 abort_ret:
        return retval;
index e723ddd79bcf96519538b1e8e0c315d0715bf08e..921ad6998dec73cd47f8400ca7fd3d825441147c 100644 (file)
@@ -925,7 +925,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
        struct usb_bos_descriptor *bos;
        struct usb_dev_cap_header *cap;
        struct usb_ssp_cap_descriptor *ssp_cap;
-       unsigned char *buffer;
+       unsigned char *buffer, *buffer0;
        int length, total_len, num, i, ssac;
        __u8 cap_type;
        int ret;
@@ -970,10 +970,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                        ret = -ENOMSG;
                goto err;
        }
+
+       buffer0 = buffer;
        total_len -= length;
+       buffer += length;
 
        for (i = 0; i < num; i++) {
-               buffer += length;
                cap = (struct usb_dev_cap_header *)buffer;
 
                if (total_len < sizeof(*cap) || total_len < cap->bLength) {
@@ -987,8 +989,6 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                        break;
                }
 
-               total_len -= length;
-
                if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
                        dev_warn(ddev, "descriptor type invalid, skip\n");
                        continue;
@@ -1023,7 +1023,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
                default:
                        break;
                }
+
+               total_len -= length;
+               buffer += length;
        }
+       dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
 
        return 0;
 
index ffccd40ea67da4c5d70a96ec057d4cc32cb04ed4..29c6414f48f1399f9af1e5c5b1cef045adae34d8 100644 (file)
@@ -1792,8 +1792,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        return 0;
 
  error:
-       if (as && as->usbm)
-               dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
        kfree(isopkt);
        kfree(dr);
        if (as)
index 65de6f73b6725200d8ec5cb2301b7a557cdf8a9c..558890ada0e5bdcf8c573af0cc5be59c52c99ef1 100644 (file)
@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
                intf->minor = minor;
                break;
        }
-       up_write(&minor_rwsem);
-       if (intf->minor < 0)
+       if (intf->minor < 0) {
+               up_write(&minor_rwsem);
                return -EXFULL;
+       }
 
        /* create a usb class device for this usb interface */
        snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
                                      MKDEV(USB_MAJOR, minor), class_driver,
                                      "%s", kbasename(name));
        if (IS_ERR(intf->usb_dev)) {
-               down_write(&minor_rwsem);
                usb_minors[minor] = NULL;
                intf->minor = -1;
-               up_write(&minor_rwsem);
                retval = PTR_ERR(intf->usb_dev);
        }
+       up_write(&minor_rwsem);
        return retval;
 }
 EXPORT_SYMBOL_GPL(usb_register_dev);
@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
                return;
 
        dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
+       device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
 
        down_write(&minor_rwsem);
        usb_minors[intf->minor] = NULL;
        up_write(&minor_rwsem);
 
-       device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
        intf->usb_dev = NULL;
        intf->minor = -1;
        destroy_usb_class();
index 03432467b05fb12810d7cac77d954a11ed0a002a..7537681355f6799c00b73fa6062565257e37d8e3 100644 (file)
@@ -216,17 +216,18 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                /* EHCI, OHCI */
                hcd->rsrc_start = pci_resource_start(dev, 0);
                hcd->rsrc_len = pci_resource_len(dev, 0);
-               if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
-                               driver->description)) {
+               if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start,
+                               hcd->rsrc_len, driver->description)) {
                        dev_dbg(&dev->dev, "controller already in use\n");
                        retval = -EBUSY;
                        goto put_hcd;
                }
-               hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+               hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
+                               hcd->rsrc_len);
                if (hcd->regs == NULL) {
                        dev_dbg(&dev->dev, "error mapping memory\n");
                        retval = -EFAULT;
-                       goto release_mem_region;
+                       goto put_hcd;
                }
 
        } else {
@@ -240,8 +241,8 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
                        hcd->rsrc_start = pci_resource_start(dev, region);
                        hcd->rsrc_len = pci_resource_len(dev, region);
-                       if (request_region(hcd->rsrc_start, hcd->rsrc_len,
-                                       driver->description))
+                       if (devm_request_region(&dev->dev, hcd->rsrc_start,
+                                       hcd->rsrc_len, driver->description))
                                break;
                }
                if (region == PCI_ROM_RESOURCE) {
@@ -275,20 +276,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
        }
 
        if (retval != 0)
-               goto unmap_registers;
+               goto put_hcd;
        device_wakeup_enable(hcd->self.controller);
 
        if (pci_dev_run_wake(dev))
                pm_runtime_put_noidle(&dev->dev);
        return retval;
 
-unmap_registers:
-       if (driver->flags & HCD_MEMORY) {
-               iounmap(hcd->regs);
-release_mem_region:
-               release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-       } else
-               release_region(hcd->rsrc_start, hcd->rsrc_len);
 put_hcd:
        usb_put_hcd(hcd);
 disable_pci:
@@ -347,14 +341,6 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
                dev_set_drvdata(&dev->dev, NULL);
                up_read(&companions_rwsem);
        }
-
-       if (hcd->driver->flags & HCD_MEMORY) {
-               iounmap(hcd->regs);
-               release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-       } else {
-               release_region(hcd->rsrc_start, hcd->rsrc_len);
-       }
-
        usb_put_hcd(hcd);
        pci_disable_device(dev);
 }
index eb24ec0e160d40de2802be4e31f3dd03cd449753..8018f813972e0c75d480f4c43275bd84fb6071f6 100644 (file)
@@ -3575,6 +3575,7 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
        struct usb_device *hdev;
        struct usb_device *udev;
        int connect_change = 0;
+       u16 link_state;
        int ret;
 
        hdev = hub->hdev;
@@ -3584,9 +3585,11 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
                        return 0;
                usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND);
        } else {
+               link_state = portstatus & USB_PORT_STAT_LINK_STATE;
                if (!udev || udev->state != USB_STATE_SUSPENDED ||
-                                (portstatus & USB_PORT_STAT_LINK_STATE) !=
-                                USB_SS_PORT_LS_U0)
+                               (link_state != USB_SS_PORT_LS_U0 &&
+                                link_state != USB_SS_PORT_LS_U1 &&
+                                link_state != USB_SS_PORT_LS_U2))
                        return 0;
        }
 
@@ -3958,6 +3961,9 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
  * control transfers to set the hub timeout or enable device-initiated U1/U2
  * will be successful.
  *
+ * If the control transfer to enable device-initiated U1/U2 entry fails, then
+ * hub-initiated U1/U2 will be disabled.
+ *
  * If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI
  * driver know about it.  If that call fails, it should be harmless, and just
  * take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency.
@@ -4012,23 +4018,24 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
                 * host know that this link state won't be enabled.
                 */
                hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
-       } else {
-               /* Only a configured device will accept the Set Feature
-                * U1/U2_ENABLE
-                */
-               if (udev->actconfig)
-                       usb_set_device_initiated_lpm(udev, state, true);
+               return;
+       }
 
-               /* As soon as usb_set_lpm_timeout(timeout) returns 0, the
-                * hub-initiated LPM is enabled. Thus, LPM is enabled no
-                * matter the result of usb_set_device_initiated_lpm().
-                * The only difference is whether device is able to initiate
-                * LPM.
-                */
+       /* Only a configured device will accept the Set Feature
+        * U1/U2_ENABLE
+        */
+       if (udev->actconfig &&
+           usb_set_device_initiated_lpm(udev, state, true) == 0) {
                if (state == USB3_LPM_U1)
                        udev->usb3_lpm_u1_enabled = 1;
                else if (state == USB3_LPM_U2)
                        udev->usb3_lpm_u2_enabled = 1;
+       } else {
+               /* Don't request U1/U2 entry if the device
+                * cannot transition to U1/U2.
+                */
+               usb_set_lpm_timeout(udev, state, 0);
+               hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
        }
 }
 
index 4020ce8db6ce56866b0ab4f4b4ddcf8caef10051..0d3fd208316569bb28095881f2830480592f54e3 100644 (file)
@@ -2211,14 +2211,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
                                (struct usb_cdc_dmm_desc *)buffer;
                        break;
                case USB_CDC_MDLM_TYPE:
-                       if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+                       if (elength < sizeof(struct usb_cdc_mdlm_desc))
                                goto next_desc;
                        if (desc)
                                return -EINVAL;
                        desc = (struct usb_cdc_mdlm_desc *)buffer;
                        break;
                case USB_CDC_MDLM_DETAIL_TYPE:
-                       if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+                       if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
                                goto next_desc;
                        if (detail)
                                return -EINVAL;
index 03614ef64ca47948b09589b9423752365477e2bd..3f68edde0f03a613fee93cbbcba9c15c3ab7261b 100644 (file)
@@ -3125,6 +3125,7 @@ void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
        hsotg->connected = 0;
        hsotg->test_mode = 0;
 
+       /* all endpoints should be shutdown */
        for (ep = 0; ep < hsotg->num_of_eps; ep++) {
                if (hsotg->eps_in[ep])
                        kill_all_requests(hsotg, hsotg->eps_in[ep],
@@ -3175,6 +3176,7 @@ static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
                        GINTSTS_PTXFEMP |  \
                        GINTSTS_RXFLVL)
 
+static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
 /**
  * dwc2_hsotg_core_init - issue softreset to the core
  * @hsotg: The device state
@@ -3189,13 +3191,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
        u32 val;
        u32 usbcfg;
        u32 dcfg = 0;
+       int ep;
 
        /* Kill any ep0 requests as controller will be reinitialized */
        kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
 
-       if (!is_usb_reset)
+       if (!is_usb_reset) {
                if (dwc2_core_reset(hsotg, true))
                        return;
+       } else {
+               /* all endpoints should be shutdown */
+               for (ep = 1; ep < hsotg->num_of_eps; ep++) {
+                       if (hsotg->eps_in[ep])
+                               dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
+                       if (hsotg->eps_out[ep])
+                               dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
+               }
+       }
 
        /*
         * we must now enable ep0 ready for host detection and then
@@ -3993,7 +4005,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
        struct dwc2_hsotg *hsotg = hs_ep->parent;
        int dir_in = hs_ep->dir_in;
        int index = hs_ep->index;
-       unsigned long flags;
        u32 epctrl_reg;
        u32 ctrl;
 
@@ -4011,8 +4022,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
 
        epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
 
-       spin_lock_irqsave(&hsotg->lock, flags);
-
        ctrl = dwc2_readl(hsotg, epctrl_reg);
 
        if (ctrl & DXEPCTL_EPENA)
@@ -4035,10 +4044,22 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
        hs_ep->fifo_index = 0;
        hs_ep->fifo_size = 0;
 
-       spin_unlock_irqrestore(&hsotg->lock, flags);
        return 0;
 }
 
+static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep)
+{
+       struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+       struct dwc2_hsotg *hsotg = hs_ep->parent;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&hsotg->lock, flags);
+       ret = dwc2_hsotg_ep_disable(ep);
+       spin_unlock_irqrestore(&hsotg->lock, flags);
+       return ret;
+}
+
 /**
  * on_list - check request is on the given endpoint
  * @ep: The endpoint to check.
@@ -4186,7 +4207,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
 
 static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
        .enable         = dwc2_hsotg_ep_enable,
-       .disable        = dwc2_hsotg_ep_disable,
+       .disable        = dwc2_hsotg_ep_disable_lock,
        .alloc_request  = dwc2_hsotg_ep_alloc_request,
        .free_request   = dwc2_hsotg_ep_free_request,
        .queue          = dwc2_hsotg_ep_queue_lock,
@@ -4326,9 +4347,9 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
        /* all endpoints should be shutdown */
        for (ep = 1; ep < hsotg->num_of_eps; ep++) {
                if (hsotg->eps_in[ep])
-                       dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
+                       dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
                if (hsotg->eps_out[ep])
-                       dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
+                       dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
        }
 
        spin_lock_irqsave(&hsotg->lock, flags);
@@ -4776,9 +4797,9 @@ int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
 
                for (ep = 0; ep < hsotg->num_of_eps; ep++) {
                        if (hsotg->eps_in[ep])
-                               dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
+                               dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
                        if (hsotg->eps_out[ep])
-                               dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
+                               dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
                }
        }
 
index b8a15840b4ffd574430cdc5d0e57e74a3c116242..dfcabadeed01bccfb86c546631d79e8fcc4ce253 100644 (file)
@@ -1976,6 +1976,7 @@ void composite_disconnect(struct usb_gadget *gadget)
         * disconnect callbacks?
         */
        spin_lock_irqsave(&cdev->lock, flags);
+       cdev->suspended = 0;
        if (cdev->config)
                reset_config(cdev);
        if (cdev->driver->disconnect)
index aa15593a3ac47696966ec4c6d41e761fcbfae1ec..2050993fb58b7448b1d5c7d9ac0cad5d93f0ac34 100644 (file)
@@ -1101,11 +1101,12 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
        ENTER();
 
        if (!is_sync_kiocb(kiocb)) {
-               p = kmalloc(sizeof(io_data), GFP_KERNEL);
+               p = kzalloc(sizeof(io_data), GFP_KERNEL);
                if (unlikely(!p))
                        return -ENOMEM;
                p->aio = true;
        } else {
+               memset(p, 0, sizeof(*p));
                p->aio = false;
        }
 
@@ -1137,11 +1138,12 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
        ENTER();
 
        if (!is_sync_kiocb(kiocb)) {
-               p = kmalloc(sizeof(io_data), GFP_KERNEL);
+               p = kzalloc(sizeof(io_data), GFP_KERNEL);
                if (unlikely(!p))
                        return -ENOMEM;
                p->aio = true;
        } else {
+               memset(p, 0, sizeof(*p));
                p->aio = false;
        }
 
index 1074cb82ec172d2ac464d72d9e52c9461715c868..0b7b4d09785b6e22be0d3a22d111bf9402e003ff 100644 (file)
@@ -261,7 +261,7 @@ struct fsg_common;
 struct fsg_common {
        struct usb_gadget       *gadget;
        struct usb_composite_dev *cdev;
-       struct fsg_dev          *fsg, *new_fsg;
+       struct fsg_dev          *fsg;
        wait_queue_head_t       io_wait;
        wait_queue_head_t       fsg_wait;
 
@@ -290,6 +290,7 @@ struct fsg_common {
        unsigned int            bulk_out_maxpacket;
        enum fsg_state          state;          /* For exception handling */
        unsigned int            exception_req_tag;
+       void                    *exception_arg;
 
        enum data_direction     data_dir;
        u32                     data_size;
@@ -391,7 +392,8 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
 
 /* These routines may be called in process context or in_irq */
 
-static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
+                             void *arg)
 {
        unsigned long           flags;
 
@@ -404,6 +406,7 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
        if (common->state <= new_state) {
                common->exception_req_tag = common->ep0_req_tag;
                common->state = new_state;
+               common->exception_arg = arg;
                if (common->thread_task)
                        send_sig_info(SIGUSR1, SEND_SIG_FORCED,
                                      common->thread_task);
@@ -411,6 +414,10 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
        spin_unlock_irqrestore(&common->lock, flags);
 }
 
+static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+{
+       __raise_exception(common, new_state, NULL);
+}
 
 /*-------------------------------------------------------------------------*/
 
@@ -2285,16 +2292,16 @@ reset:
 static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 {
        struct fsg_dev *fsg = fsg_from_func(f);
-       fsg->common->new_fsg = fsg;
-       raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+
+       __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
        return USB_GADGET_DELAYED_STATUS;
 }
 
 static void fsg_disable(struct usb_function *f)
 {
        struct fsg_dev *fsg = fsg_from_func(f);
-       fsg->common->new_fsg = NULL;
-       raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+
+       __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
 }
 
 
@@ -2307,6 +2314,7 @@ static void handle_exception(struct fsg_common *common)
        enum fsg_state          old_state;
        struct fsg_lun          *curlun;
        unsigned int            exception_req_tag;
+       struct fsg_dev          *new_fsg;
 
        /*
         * Clear the existing signals.  Anything but SIGUSR1 is converted
@@ -2360,6 +2368,7 @@ static void handle_exception(struct fsg_common *common)
        common->next_buffhd_to_fill = &common->buffhds[0];
        common->next_buffhd_to_drain = &common->buffhds[0];
        exception_req_tag = common->exception_req_tag;
+       new_fsg = common->exception_arg;
        old_state = common->state;
        common->state = FSG_STATE_NORMAL;
 
@@ -2413,8 +2422,8 @@ static void handle_exception(struct fsg_common *common)
                break;
 
        case FSG_STATE_CONFIG_CHANGE:
-               do_set_interface(common, common->new_fsg);
-               if (common->new_fsg)
+               do_set_interface(common, new_fsg);
+               if (new_fsg)
                        usb_composite_setup_continue(common->cdev);
                break;
 
@@ -2989,8 +2998,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
 
        DBG(fsg, "unbind\n");
        if (fsg->common->fsg == fsg) {
-               fsg->common->new_fsg = NULL;
-               raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+               __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
                /* FIXME: make interruptible or killable somehow? */
                wait_event(common->fsg_wait, common->fsg != fsg);
        }
index 213b52508621eb591935869b00ac443e62d7b5b4..1505e554d24555506ca560153e80e91ed15c5c0c 100644 (file)
@@ -48,6 +48,7 @@
 #define DRIVER_VERSION "02 May 2005"
 
 #define POWER_BUDGET   500     /* in mA; use 8 for low-power port testing */
+#define POWER_BUDGET_3 900     /* in mA */
 
 static const char      driver_name[] = "dummy_hcd";
 static const char      driver_desc[] = "USB Host+Gadget Emulator";
@@ -2446,7 +2447,7 @@ static int dummy_start_ss(struct dummy_hcd *dum_hcd)
        dum_hcd->rh_state = DUMMY_RH_RUNNING;
        dum_hcd->stream_en_ep = 0;
        INIT_LIST_HEAD(&dum_hcd->urbp_list);
-       dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET;
+       dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3;
        dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
        dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
 #ifdef CONFIG_USB_OTG
index eafc2a00c96a013060621fe073fad6ec925ba389..21921db068f6d80ae6d0fb6fcea8cbd9a8b018e3 100644 (file)
@@ -1165,11 +1165,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
                        tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
 
                        bl = bytes - n;
-                       if (bl > 3)
-                               bl = 3;
+                       if (bl > 4)
+                               bl = 4;
 
                        for (i = 0; i < bl; i++)
-                               data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
+                               data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
                }
                break;
 
index fea02c7ad4f4328901be479a4c2205a69d961d80..a5254e82d628232b8d518668effb89933b0a51e7 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 #include <linux/sys_soc.h>
 #include <linux/uaccess.h>
 #include <linux/usb/ch9.h>
@@ -2378,9 +2379,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
        if (usb3->forced_b_device)
                return -EBUSY;
 
-       if (!strncmp(buf, "host", strlen("host")))
+       if (sysfs_streq(buf, "host"))
                new_mode_is_host = true;
-       else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+       else if (sysfs_streq(buf, "peripheral"))
                new_mode_is_host = false;
        else
                return -EINVAL;
index e64eb47770c8bb0f2b2c89d4abf9d6e3abf85140..2d5a72c15069e4caee454080045ecae6c8afd8d4 100644 (file)
@@ -1627,6 +1627,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                        /* see what we found out */
                        temp = check_reset_complete(fotg210, wIndex, status_reg,
                                        fotg210_readl(fotg210, status_reg));
+
+                       /* restart schedule */
+                       fotg210->command |= CMD_RUN;
+                       fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
                }
 
                if (!(temp & (PORT_RESUME|PORT_RESET))) {
index 09a8ebd955888d6d375d7cee3d0fc53fd1cf0d9c..6968b9f2b76b5865bd70e7366b2701f6b2c50f6f 100644 (file)
@@ -159,7 +159,7 @@ out:
        return result;
 
 error_set_cluster_id:
-       wusb_cluster_id_put(wusbhc->cluster_id);
+       wusb_cluster_id_put(addr);
 error_cluster_id_get:
        goto out;
 
index 210181fd98d2e9d6e850662becf6ec08f7b901c2..af11887f5f9e4b9619534b431d684f50099b2664 100644 (file)
@@ -418,8 +418,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
  * other cases where the next software may expect clean state from the
  * "firmware".  this is bus-neutral, unlike shutdown() methods.
  */
-static void
-ohci_shutdown (struct usb_hcd *hcd)
+static void _ohci_shutdown(struct usb_hcd *hcd)
 {
        struct ohci_hcd *ohci;
 
@@ -435,6 +434,16 @@ ohci_shutdown (struct usb_hcd *hcd)
        ohci->rh_state = OHCI_RH_HALTED;
 }
 
+static void ohci_shutdown(struct usb_hcd *hcd)
+{
+       struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ohci->lock, flags);
+       _ohci_shutdown(hcd);
+       spin_unlock_irqrestore(&ohci->lock, flags);
+}
+
 /*-------------------------------------------------------------------------*
  * HC functions
  *-------------------------------------------------------------------------*/
@@ -752,7 +761,7 @@ static void io_watchdog_func(struct timer_list *t)
  died:
                        usb_hc_died(ohci_to_hcd(ohci));
                        ohci_dump(ohci);
-                       ohci_shutdown(ohci_to_hcd(ohci));
+                       _ohci_shutdown(ohci_to_hcd(ohci));
                        goto done;
                } else {
                        /* No write back because the done queue was empty */
index 3625a5c1a41b81c88176bcc6a0b9e5c94bf4d59a..070c66f86e67d9ccce855378bdcfd5e9ef2e4204 100644 (file)
@@ -205,7 +205,7 @@ int usb_amd_find_chipset_info(void)
 {
        unsigned long flags;
        struct amd_chipset_info info;
-       int ret;
+       int need_pll_quirk = 0;
 
        spin_lock_irqsave(&amd_lock, flags);
 
@@ -219,21 +219,28 @@ int usb_amd_find_chipset_info(void)
        spin_unlock_irqrestore(&amd_lock, flags);
 
        if (!amd_chipset_sb_type_init(&info)) {
-               ret = 0;
                goto commit;
        }
 
-       /* Below chipset generations needn't enable AMD PLL quirk */
-       if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
-                       info.sb_type.gen == AMD_CHIPSET_SB600 ||
-                       info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
-                       (info.sb_type.gen == AMD_CHIPSET_SB700 &&
-                       info.sb_type.rev > 0x3b)) {
+       switch (info.sb_type.gen) {
+       case AMD_CHIPSET_SB700:
+               need_pll_quirk = info.sb_type.rev <= 0x3B;
+               break;
+       case AMD_CHIPSET_SB800:
+       case AMD_CHIPSET_HUDSON2:
+       case AMD_CHIPSET_BOLTON:
+               need_pll_quirk = 1;
+               break;
+       default:
+               need_pll_quirk = 0;
+               break;
+       }
+
+       if (!need_pll_quirk) {
                if (info.smbus_dev) {
                        pci_dev_put(info.smbus_dev);
                        info.smbus_dev = NULL;
                }
-               ret = 0;
                goto commit;
        }
 
@@ -252,7 +259,7 @@ int usb_amd_find_chipset_info(void)
                }
        }
 
-       ret = info.probe_result = 1;
+       need_pll_quirk = info.probe_result = 1;
        printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
 
 commit:
@@ -263,7 +270,7 @@ commit:
 
                /* Mark that we where here */
                amd_chipset.probe_count++;
-               ret = amd_chipset.probe_result;
+               need_pll_quirk = amd_chipset.probe_result;
 
                spin_unlock_irqrestore(&amd_lock, flags);
 
@@ -277,7 +284,7 @@ commit:
                spin_unlock_irqrestore(&amd_lock, flags);
        }
 
-       return ret;
+       return need_pll_quirk;
 }
 EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
 
index 671bce18782c5a788ad1af896ab9066fd4078839..2b0ccd150209fe3a1f55bb73d29a965fe1cd6f26 100644 (file)
@@ -104,7 +104,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
        return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
                of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
                of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
-               of_device_is_compatible(node, "renensas,rcar-gen2-xhci");
+               of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
 }
 
 static int xhci_rcar_is_gen3(struct device *dev)
@@ -238,10 +238,15 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
         * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
         * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
         * xhci_gen_setup().
+        *
+        * And, since the firmware/internal CPU control the USBSTS.STS_HALT
+        * and the process speed is down when the roothub port enters U3,
+        * long delay for the handshake of STS_HALT is neeed in xhci_suspend().
         */
        if (xhci_rcar_is_gen2(hcd->self.controller) ||
-                       xhci_rcar_is_gen3(hcd->self.controller))
-               xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+                       xhci_rcar_is_gen3(hcd->self.controller)) {
+               xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND;
+       }
 
        if (!xhci_rcar_wait_for_pll_active(hcd))
                return -ETIMEDOUT;
index f896a00662efe31a00b0e0bc2c13efe9ed4a55e3..9d79824ab5b61d0fd71ad253ce8d1e536e88f699 100644 (file)
@@ -3154,10 +3154,10 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
        if (usb_urb_dir_out(urb)) {
                len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
                                   seg->bounce_buf, new_buff_len, enqd_len);
-               if (len != seg->bounce_len)
+               if (len != new_buff_len)
                        xhci_warn(xhci,
                                "WARN Wrong bounce buffer write length: %zu != %d\n",
-                               len, seg->bounce_len);
+                               len, new_buff_len);
                seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
                                                 max_pkt, DMA_TO_DEVICE);
        } else {
index b1cce989bd12320b763b9445f70920ef80ca55c7..fe37dacc695fcb0dfb15f47078c8847902feaeb1 100644 (file)
@@ -1148,6 +1148,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
 
        tegra_xusb_ipfs_config(tegra, regs);
 
+       /*
+        * The XUSB Falcon microcontroller can only address 40 bits, so set
+        * the DMA mask accordingly.
+        */
+       err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+               goto put_rpm;
+       }
+
        err = tegra_xusb_load_firmware(tegra);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
index 4ffadca2c71a620c9e0979193054cfb46871cfd8..36865d50171ff912a4b7fc4bf817729e02445d6b 100644 (file)
@@ -1022,7 +1022,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
        writel(command, &xhci->op_regs->command);
        xhci->broken_suspend = 0;
        if (xhci_handshake(&xhci->op_regs->status,
-                               STS_SAVE, 0, 10 * 1000)) {
+                               STS_SAVE, 0, 20 * 1000)) {
        /*
         * AMD SNPS xHC 3.0 occasionally does not clear the
         * SSS bit of USBSTS and when driver tries to poll
@@ -1098,6 +1098,18 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                hibernated = true;
 
        if (!hibernated) {
+               /*
+                * Some controllers might lose power during suspend, so wait
+                * for controller not ready bit to clear, just as in xHC init.
+                */
+               retval = xhci_handshake(&xhci->op_regs->status,
+                                       STS_CNR, 0, 10 * 1000 * 1000);
+               if (retval) {
+                       xhci_warn(xhci, "Controller not ready at resume %d\n",
+                                 retval);
+                       spin_unlock_irq(&xhci->lock);
+                       return retval;
+               }
                /* step 1: restore register */
                xhci_restore_registers(xhci);
                /* step 2: initialize command ring buffer */
@@ -3053,6 +3065,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
        unsigned int ep_index;
        unsigned long flags;
        u32 ep_flag;
+       int err;
 
        xhci = hcd_to_xhci(hcd);
        if (!host_ep->hcpriv)
@@ -3102,7 +3115,17 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
                xhci_free_command(xhci, cfg_cmd);
                goto cleanup;
        }
-       xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
+
+       err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
+                                       ep_index, 0);
+       if (err < 0) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_free_command(xhci, cfg_cmd);
+               xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
+                               __func__, err);
+               goto cleanup;
+       }
+
        xhci_ring_cmd_db(xhci);
        spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -3116,8 +3139,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
                                           ctrl_ctx, ep_flag, ep_flag);
        xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
 
-       xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
+       err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
                                      udev->slot_id, false);
+       if (err < 0) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_free_command(xhci, cfg_cmd);
+               xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
+                               __func__, err);
+               goto cleanup;
+       }
+
        xhci_ring_cmd_db(xhci);
        spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -4631,12 +4662,12 @@ static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
        alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
                desc, state, timeout);
 
-       /* If we found we can't enable hub-initiated LPM, or
+       /* If we found we can't enable hub-initiated LPM, and
         * the U1 or U2 exit latency was too high to allow
-        * device-initiated LPM as well, just stop searching.
+        * device-initiated LPM as well, then we will disable LPM
+        * for this device, so stop searching any further.
         */
-       if (alt_timeout == USB3_LPM_DISABLED ||
-                       alt_timeout == USB3_LPM_DEVICE_INITIATED) {
+       if (alt_timeout == USB3_LPM_DISABLED) {
                *timeout = alt_timeout;
                return -E2BIG;
        }
@@ -4747,10 +4778,12 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
                if (intf->dev.driver) {
                        driver = to_usb_driver(intf->dev.driver);
                        if (driver && driver->disable_hub_initiated_lpm) {
-                               dev_dbg(&udev->dev, "Hub-initiated %s disabled "
-                                               "at request of driver %s\n",
-                                               state_name, driver->name);
-                               return xhci_get_timeout_no_hub_lpm(udev, state);
+                               dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
+                                       state_name, driver->name);
+                               timeout = xhci_get_timeout_no_hub_lpm(udev,
+                                                                     state);
+                               if (timeout == USB3_LPM_DISABLED)
+                                       return timeout;
                        }
                }
 
@@ -5034,11 +5067,18 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                hcd->has_tt = 1;
        } else {
                /*
-                * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
-                * minor revision instead of sbrn. Minor revision is a two digit
-                * BCD containing minor and sub-minor numbers, only show minor.
+                * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
+                * should return 0x31 for sbrn, or that the minor revision
+                * is a two digit BCD containig minor and sub-minor numbers.
+                * This was later clarified in xHCI 1.2.
+                *
+                * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
+                * minor revision set to 0x1 instead of 0x10.
                 */
-               minor_rev = xhci->usb3_rhub.min_rev / 0x10;
+               if (xhci->usb3_rhub.min_rev == 0x1)
+                       minor_rev = 1;
+               else
+                       minor_rev = xhci->usb3_rhub.min_rev / 0x10;
 
                switch (minor_rev) {
                case 2:
index 9f2f563c82ed161912683091a93d2752219c714a..addbb47a50333afc4d48c45c67d645d8699d7ef9 100644 (file)
@@ -721,6 +721,10 @@ static int mts_usb_probe(struct usb_interface *intf,
 
        }
 
+       if (ep_in_current != &ep_in_set[2]) {
+               MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n");
+               return -ENODEV;
+       }
 
        if ( ep_out == -1 ) {
                MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" );
index 68d2f2cd17dda953d2efcca2a99c5083a2213c89..2e3fc63619b7f8b93a126281aac9e59eede88adb 100644 (file)
@@ -46,16 +46,6 @@ config USB_SEVSEG
          To compile this driver as a module, choose M here: the
          module will be called usbsevseg.
 
-config USB_RIO500
-       tristate "USB Diamond Rio500 support"
-       help
-         Say Y here if you want to connect a USB Rio500 mp3 player to your
-         computer's USB port. Please read <file:Documentation/usb/rio.txt>
-         for more information.
-
-         To compile this driver as a module, choose M here: the
-         module will be called rio500.
-
 config USB_LEGOTOWER
        tristate "USB Lego Infrared Tower support"
        help
index 109f54f5b9aa4b8ecc69c9f301e6ff3d668cfd93..0d416eb624bbe12247493afa40fe60242086e0d7 100644 (file)
@@ -17,7 +17,6 @@ obj-$(CONFIG_USB_ISIGHTFW)            += isight_firmware.o
 obj-$(CONFIG_USB_LCD)                  += usblcd.o
 obj-$(CONFIG_USB_LD)                   += ldusb.o
 obj-$(CONFIG_USB_LEGOTOWER)            += legousbtower.o
-obj-$(CONFIG_USB_RIO500)               += rio500.o
 obj-$(CONFIG_USB_TEST)                 += usbtest.o
 obj-$(CONFIG_USB_EHSET_TEST_FIXTURE)    += ehset.o
 obj-$(CONFIG_USB_TRANCEVIBRATOR)       += trancevibrator.o
index 9465fb95d70af83dd2e06a42a4248b5198f6587d..9a51760df0266a293522c2ba84e1776f1b308117 100644 (file)
@@ -75,6 +75,7 @@ struct adu_device {
        char                    serial_number[8];
 
        int                     open_count; /* number of times this port has been opened */
+       unsigned long           disconnected:1;
 
        char            *read_buffer_primary;
        int                     read_buffer_length;
@@ -116,7 +117,7 @@ static void adu_abort_transfers(struct adu_device *dev)
 {
        unsigned long flags;
 
-       if (dev->udev == NULL)
+       if (dev->disconnected)
                return;
 
        /* shutdown transfer */
@@ -148,6 +149,7 @@ static void adu_delete(struct adu_device *dev)
        kfree(dev->read_buffer_secondary);
        kfree(dev->interrupt_in_buffer);
        kfree(dev->interrupt_out_buffer);
+       usb_put_dev(dev->udev);
        kfree(dev);
 }
 
@@ -243,7 +245,7 @@ static int adu_open(struct inode *inode, struct file *file)
        }
 
        dev = usb_get_intfdata(interface);
-       if (!dev || !dev->udev) {
+       if (!dev) {
                retval = -ENODEV;
                goto exit_no_device;
        }
@@ -326,7 +328,7 @@ static int adu_release(struct inode *inode, struct file *file)
        }
 
        adu_release_internal(dev);
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                /* the device was unplugged before the file was released */
                if (!dev->open_count)   /* ... and we're the last user */
                        adu_delete(dev);
@@ -355,7 +357,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
                return -ERESTARTSYS;
 
        /* verify that the device wasn't unplugged */
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                pr_err("No device or device unplugged %d\n", retval);
                goto exit;
@@ -520,7 +522,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
                goto exit_nolock;
 
        /* verify that the device wasn't unplugged */
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                pr_err("No device or device unplugged %d\n", retval);
                goto exit;
@@ -665,7 +667,7 @@ static int adu_probe(struct usb_interface *interface,
 
        mutex_init(&dev->mtx);
        spin_lock_init(&dev->buflock);
-       dev->udev = udev;
+       dev->udev = usb_get_dev(udev);
        init_waitqueue_head(&dev->read_wait);
        init_waitqueue_head(&dev->write_wait);
 
@@ -764,14 +766,18 @@ static void adu_disconnect(struct usb_interface *interface)
 
        dev = usb_get_intfdata(interface);
 
-       mutex_lock(&dev->mtx);  /* not interruptible */
-       dev->udev = NULL;       /* poison */
        usb_deregister_dev(interface, &adu_class);
-       mutex_unlock(&dev->mtx);
+
+       usb_poison_urb(dev->interrupt_in_urb);
+       usb_poison_urb(dev->interrupt_out_urb);
 
        mutex_lock(&adutux_mutex);
        usb_set_intfdata(interface, NULL);
 
+       mutex_lock(&dev->mtx);  /* not interruptible */
+       dev->disconnected = 1;
+       mutex_unlock(&dev->mtx);
+
        /* if the device is not opened, then we clean up right now */
        if (!dev->open_count)
                adu_delete(dev);
index cf5828ce927a835af30de0dd739d0ce7e455a2db..34e6cd6f40d3005fbda6dc428be6c7e19fccf6bb 100644 (file)
@@ -98,6 +98,7 @@ static void chaoskey_free(struct chaoskey *dev)
                usb_free_urb(dev->urb);
                kfree(dev->name);
                kfree(dev->buf);
+               usb_put_intf(dev->interface);
                kfree(dev);
        }
 }
@@ -145,6 +146,8 @@ static int chaoskey_probe(struct usb_interface *interface,
        if (dev == NULL)
                goto out;
 
+       dev->interface = usb_get_intf(interface);
+
        dev->buf = kmalloc(size, GFP_KERNEL);
 
        if (dev->buf == NULL)
@@ -174,8 +177,6 @@ static int chaoskey_probe(struct usb_interface *interface,
                        goto out;
        }
 
-       dev->interface = interface;
-
        dev->in_ep = in_ep;
 
        if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
index c2991b8a65ce455b9e35c15fbb758108b1012071..2d9d9490cdd4759cbf190c5a2be8653add77b3e0 100644 (file)
@@ -87,6 +87,7 @@ struct iowarrior {
        char chip_serial[9];            /* the serial number string of the chip connected */
        int report_size;                /* number of bytes in a report */
        u16 product_id;
+       struct usb_anchor submitted;
 };
 
 /*--------------*/
@@ -243,6 +244,7 @@ static inline void iowarrior_delete(struct iowarrior *dev)
        kfree(dev->int_in_buffer);
        usb_free_urb(dev->int_in_urb);
        kfree(dev->read_queue);
+       usb_put_intf(dev->interface);
        kfree(dev);
 }
 
@@ -424,11 +426,13 @@ static ssize_t iowarrior_write(struct file *file,
                        retval = -EFAULT;
                        goto error;
                }
+               usb_anchor_urb(int_out_urb, &dev->submitted);
                retval = usb_submit_urb(int_out_urb, GFP_KERNEL);
                if (retval) {
                        dev_dbg(&dev->interface->dev,
                                "submit error %d for urb nr.%d\n",
                                retval, atomic_read(&dev->write_busy));
+                       usb_unanchor_urb(int_out_urb);
                        goto error;
                }
                /* submit was ok */
@@ -764,11 +768,13 @@ static int iowarrior_probe(struct usb_interface *interface,
        init_waitqueue_head(&dev->write_wait);
 
        dev->udev = udev;
-       dev->interface = interface;
+       dev->interface = usb_get_intf(interface);
 
        iface_desc = interface->cur_altsetting;
        dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
 
+       init_usb_anchor(&dev->submitted);
+
        res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint);
        if (res) {
                dev_err(&interface->dev, "no interrupt-in endpoint found\n");
@@ -868,8 +874,9 @@ static void iowarrior_disconnect(struct usb_interface *interface)
        usb_set_intfdata(interface, NULL);
 
        minor = dev->minor;
+       mutex_unlock(&iowarrior_open_disc_lock);
+       /* give back our minor - this will call close() locks need to be dropped at this point*/
 
-       /* give back our minor */
        usb_deregister_dev(interface, &iowarrior_class);
 
        mutex_lock(&dev->mutex);
@@ -877,19 +884,19 @@ static void iowarrior_disconnect(struct usb_interface *interface)
        /* prevent device read, write and ioctl */
        dev->present = 0;
 
-       mutex_unlock(&dev->mutex);
-       mutex_unlock(&iowarrior_open_disc_lock);
-
        if (dev->opened) {
                /* There is a process that holds a filedescriptor to the device ,
                   so we only shutdown read-/write-ops going on.
                   Deleting the device is postponed until close() was called.
                 */
                usb_kill_urb(dev->int_in_urb);
+               usb_kill_anchored_urbs(&dev->submitted);
                wake_up_interruptible(&dev->read_wait);
                wake_up_interruptible(&dev->write_wait);
+               mutex_unlock(&dev->mutex);
        } else {
                /* no process is using the device, cleanup now */
+               mutex_unlock(&dev->mutex);
                iowarrior_delete(dev);
        }
 
index 006762b72ff54211f05fed030170bc7f1a70448d..6b3a6fd7d271dd021fb3cac3dccfe7938c3c604f 100644 (file)
@@ -153,6 +153,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in
 struct ld_usb {
        struct mutex            mutex;          /* locks this structure */
        struct usb_interface    *intf;          /* save off the usb interface pointer */
+       unsigned long           disconnected:1;
 
        int                     open_count;     /* number of times this port has been opened */
 
@@ -192,12 +193,10 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
        /* shutdown transfer */
        if (dev->interrupt_in_running) {
                dev->interrupt_in_running = 0;
-               if (dev->intf)
-                       usb_kill_urb(dev->interrupt_in_urb);
+               usb_kill_urb(dev->interrupt_in_urb);
        }
        if (dev->interrupt_out_busy)
-               if (dev->intf)
-                       usb_kill_urb(dev->interrupt_out_urb);
+               usb_kill_urb(dev->interrupt_out_urb);
 }
 
 /**
@@ -205,8 +204,6 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
  */
 static void ld_usb_delete(struct ld_usb *dev)
 {
-       ld_usb_abort_transfers(dev);
-
        /* free data structures */
        usb_free_urb(dev->interrupt_in_urb);
        usb_free_urb(dev->interrupt_out_urb);
@@ -263,7 +260,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
 
 resubmit:
        /* resubmit if we're still running */
-       if (dev->interrupt_in_running && !dev->buffer_overflow && dev->intf) {
+       if (dev->interrupt_in_running && !dev->buffer_overflow) {
                retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
                if (retval) {
                        dev_err(&dev->intf->dev,
@@ -383,16 +380,13 @@ static int ld_usb_release(struct inode *inode, struct file *file)
                goto exit;
        }
 
-       if (mutex_lock_interruptible(&dev->mutex)) {
-               retval = -ERESTARTSYS;
-               goto exit;
-       }
+       mutex_lock(&dev->mutex);
 
        if (dev->open_count != 1) {
                retval = -ENODEV;
                goto unlock_exit;
        }
-       if (dev->intf == NULL) {
+       if (dev->disconnected) {
                /* the device was unplugged before the file was released */
                mutex_unlock(&dev->mutex);
                /* unlock here as ld_usb_delete frees dev */
@@ -423,7 +417,7 @@ static __poll_t ld_usb_poll(struct file *file, poll_table *wait)
 
        dev = file->private_data;
 
-       if (!dev->intf)
+       if (dev->disconnected)
                return EPOLLERR | EPOLLHUP;
 
        poll_wait(file, &dev->read_wait, wait);
@@ -462,7 +456,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
        }
 
        /* verify that the device wasn't unplugged */
-       if (dev->intf == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
                goto unlock_exit;
@@ -470,7 +464,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
 
        /* wait for data */
        spin_lock_irq(&dev->rbsl);
-       if (dev->ring_head == dev->ring_tail) {
+       while (dev->ring_head == dev->ring_tail) {
                dev->interrupt_in_done = 0;
                spin_unlock_irq(&dev->rbsl);
                if (file->f_flags & O_NONBLOCK) {
@@ -480,12 +474,17 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
                retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
                if (retval < 0)
                        goto unlock_exit;
-       } else {
-               spin_unlock_irq(&dev->rbsl);
+
+               spin_lock_irq(&dev->rbsl);
        }
+       spin_unlock_irq(&dev->rbsl);
 
        /* actual_buffer contains actual_length + interrupt_in_buffer */
        actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
+       if (*actual_buffer > dev->interrupt_in_endpoint_size) {
+               retval = -EIO;
+               goto unlock_exit;
+       }
        bytes_to_read = min(count, *actual_buffer);
        if (bytes_to_read < *actual_buffer)
                dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
@@ -542,7 +541,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
        }
 
        /* verify that the device wasn't unplugged */
-       if (dev->intf == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
                goto unlock_exit;
@@ -696,10 +695,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
                dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
 
        dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
-       dev->ring_buffer =
-               kmalloc_array(ring_buffer_size,
-                             sizeof(size_t) + dev->interrupt_in_endpoint_size,
-                             GFP_KERNEL);
+       dev->ring_buffer = kcalloc(ring_buffer_size,
+                       sizeof(size_t) + dev->interrupt_in_endpoint_size,
+                       GFP_KERNEL);
        if (!dev->ring_buffer)
                goto error;
        dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
@@ -764,6 +762,9 @@ static void ld_usb_disconnect(struct usb_interface *intf)
        /* give back our minor */
        usb_deregister_dev(intf, &ld_usb_class);
 
+       usb_poison_urb(dev->interrupt_in_urb);
+       usb_poison_urb(dev->interrupt_out_urb);
+
        mutex_lock(&dev->mutex);
 
        /* if the device is not opened, then we clean up right now */
@@ -771,7 +772,7 @@ static void ld_usb_disconnect(struct usb_interface *intf)
                mutex_unlock(&dev->mutex);
                ld_usb_delete(dev);
        } else {
-               dev->intf = NULL;
+               dev->disconnected = 1;
                /* wake up pollers */
                wake_up_interruptible_all(&dev->read_wait);
                wake_up_interruptible_all(&dev->write_wait);
index 006cf13b21999f58e512579fa62ee399907dba6b..62dab2441ec4fcb9b005070d20e335d0ed84b8df 100644 (file)
@@ -179,7 +179,6 @@ static const struct usb_device_id tower_table[] = {
 };
 
 MODULE_DEVICE_TABLE (usb, tower_table);
-static DEFINE_MUTEX(open_disc_mutex);
 
 #define LEGO_USB_TOWER_MINOR_BASE      160
 
@@ -191,6 +190,7 @@ struct lego_usb_tower {
        unsigned char           minor;          /* the starting minor number for this device */
 
        int                     open_count;     /* number of times this port has been opened */
+       unsigned long           disconnected:1;
 
        char*                   read_buffer;
        size_t                  read_buffer_length; /* this much came in */
@@ -290,14 +290,13 @@ static inline void lego_usb_tower_debug_data(struct device *dev,
  */
 static inline void tower_delete (struct lego_usb_tower *dev)
 {
-       tower_abort_transfers (dev);
-
        /* free data structures */
        usb_free_urb(dev->interrupt_in_urb);
        usb_free_urb(dev->interrupt_out_urb);
        kfree (dev->read_buffer);
        kfree (dev->interrupt_in_buffer);
        kfree (dev->interrupt_out_buffer);
+       usb_put_dev(dev->udev);
        kfree (dev);
 }
 
@@ -332,18 +331,14 @@ static int tower_open (struct inode *inode, struct file *file)
                goto exit;
        }
 
-       mutex_lock(&open_disc_mutex);
        dev = usb_get_intfdata(interface);
-
        if (!dev) {
-               mutex_unlock(&open_disc_mutex);
                retval = -ENODEV;
                goto exit;
        }
 
        /* lock this device */
        if (mutex_lock_interruptible(&dev->lock)) {
-               mutex_unlock(&open_disc_mutex);
                retval = -ERESTARTSYS;
                goto exit;
        }
@@ -351,12 +346,9 @@ static int tower_open (struct inode *inode, struct file *file)
 
        /* allow opening only once */
        if (dev->open_count) {
-               mutex_unlock(&open_disc_mutex);
                retval = -EBUSY;
                goto unlock_exit;
        }
-       dev->open_count = 1;
-       mutex_unlock(&open_disc_mutex);
 
        /* reset the tower */
        result = usb_control_msg (dev->udev,
@@ -396,13 +388,14 @@ static int tower_open (struct inode *inode, struct file *file)
                dev_err(&dev->udev->dev,
                        "Couldn't submit interrupt_in_urb %d\n", retval);
                dev->interrupt_in_running = 0;
-               dev->open_count = 0;
                goto unlock_exit;
        }
 
        /* save device in the file's private structure */
        file->private_data = dev;
 
+       dev->open_count = 1;
+
 unlock_exit:
        mutex_unlock(&dev->lock);
 
@@ -423,22 +416,19 @@ static int tower_release (struct inode *inode, struct file *file)
 
        if (dev == NULL) {
                retval = -ENODEV;
-               goto exit_nolock;
-       }
-
-       mutex_lock(&open_disc_mutex);
-       if (mutex_lock_interruptible(&dev->lock)) {
-               retval = -ERESTARTSYS;
                goto exit;
        }
 
+       mutex_lock(&dev->lock);
+
        if (dev->open_count != 1) {
                dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
                        __func__);
                retval = -ENODEV;
                goto unlock_exit;
        }
-       if (dev->udev == NULL) {
+
+       if (dev->disconnected) {
                /* the device was unplugged before the file was released */
 
                /* unlock here as tower_delete frees dev */
@@ -456,10 +446,7 @@ static int tower_release (struct inode *inode, struct file *file)
 
 unlock_exit:
        mutex_unlock(&dev->lock);
-
 exit:
-       mutex_unlock(&open_disc_mutex);
-exit_nolock:
        return retval;
 }
 
@@ -477,10 +464,9 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
        if (dev->interrupt_in_running) {
                dev->interrupt_in_running = 0;
                mb();
-               if (dev->udev)
-                       usb_kill_urb (dev->interrupt_in_urb);
+               usb_kill_urb(dev->interrupt_in_urb);
        }
-       if (dev->interrupt_out_busy && dev->udev)
+       if (dev->interrupt_out_busy)
                usb_kill_urb(dev->interrupt_out_urb);
 }
 
@@ -516,7 +502,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
 
        dev = file->private_data;
 
-       if (!dev->udev)
+       if (dev->disconnected)
                return EPOLLERR | EPOLLHUP;
 
        poll_wait(file, &dev->read_wait, wait);
@@ -563,7 +549,7 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
        }
 
        /* verify that the device wasn't unplugged */
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                pr_err("No device or device unplugged %d\n", retval);
                goto unlock_exit;
@@ -649,7 +635,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
        }
 
        /* verify that the device wasn't unplugged */
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                pr_err("No device or device unplugged %d\n", retval);
                goto unlock_exit;
@@ -759,7 +745,7 @@ static void tower_interrupt_in_callback (struct urb *urb)
 
 resubmit:
        /* resubmit if we're still running */
-       if (dev->interrupt_in_running && dev->udev) {
+       if (dev->interrupt_in_running) {
                retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
                if (retval)
                        dev_err(&dev->udev->dev,
@@ -822,8 +808,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 
        mutex_init(&dev->lock);
 
-       dev->udev = udev;
+       dev->udev = usb_get_dev(udev);
        dev->open_count = 0;
+       dev->disconnected = 0;
 
        dev->read_buffer = NULL;
        dev->read_buffer_length = 0;
@@ -891,8 +878,10 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
                                  get_version_reply,
                                  sizeof(*get_version_reply),
                                  1000);
-       if (result < 0) {
-               dev_err(idev, "LEGO USB Tower get version control request failed\n");
+       if (result < sizeof(*get_version_reply)) {
+               if (result >= 0)
+                       result = -EIO;
+               dev_err(idev, "get version request failed: %d\n", result);
                retval = result;
                goto error;
        }
@@ -910,7 +899,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
        if (retval) {
                /* something prevented us from registering this driver */
                dev_err(idev, "Not able to get a minor for this device.\n");
-               usb_set_intfdata (interface, NULL);
                goto error;
        }
        dev->minor = interface->minor;
@@ -942,23 +930,24 @@ static void tower_disconnect (struct usb_interface *interface)
        int minor;
 
        dev = usb_get_intfdata (interface);
-       mutex_lock(&open_disc_mutex);
-       usb_set_intfdata (interface, NULL);
 
        minor = dev->minor;
 
-       /* give back our minor */
+       /* give back our minor and prevent further open() */
        usb_deregister_dev (interface, &tower_class);
 
+       /* stop I/O */
+       usb_poison_urb(dev->interrupt_in_urb);
+       usb_poison_urb(dev->interrupt_out_urb);
+
        mutex_lock(&dev->lock);
-       mutex_unlock(&open_disc_mutex);
 
        /* if the device is not opened, then we clean up right now */
        if (!dev->open_count) {
                mutex_unlock(&dev->lock);
                tower_delete (dev);
        } else {
-               dev->udev = NULL;
+               dev->disconnected = 1;
                /* wake up pollers */
                wake_up_interruptible_all(&dev->read_wait);
                wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
deleted file mode 100644 (file)
index a32d61a..0000000
+++ /dev/null
@@ -1,561 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* -*- linux-c -*- */
-
-/* 
- * Driver for USB Rio 500
- *
- * Cesar Miquel (miquel@df.uba.ar)
- * 
- * based on hp_scanner.c by David E. Nelson (dnelson@jump.net)
- *
- * Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee).
- *
- * Changelog:
- * 30/05/2003  replaced lock/unlock kernel with up/down
- *             Daniele Bellucci  bellucda@tiscali.it
- * */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched/signal.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/random.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/usb.h>
-#include <linux/wait.h>
-
-#include "rio500_usb.h"
-
-#define DRIVER_AUTHOR "Cesar Miquel <miquel@df.uba.ar>"
-#define DRIVER_DESC "USB Rio 500 driver"
-
-#define RIO_MINOR      64
-
-/* stall/wait timeout for rio */
-#define NAK_TIMEOUT (HZ)
-
-#define IBUF_SIZE 0x1000
-
-/* Size of the rio buffer */
-#define OBUF_SIZE 0x10000
-
-struct rio_usb_data {
-        struct usb_device *rio_dev;     /* init: probe_rio */
-        unsigned int ifnum;             /* Interface number of the USB device */
-        int isopen;                     /* nz if open */
-        int present;                    /* Device is present on the bus */
-        char *obuf, *ibuf;              /* transfer buffers */
-        char bulk_in_ep, bulk_out_ep;   /* Endpoint assignments */
-        wait_queue_head_t wait_q;       /* for timeouts */
-       struct mutex lock;          /* general race avoidance */
-};
-
-static DEFINE_MUTEX(rio500_mutex);
-static struct rio_usb_data rio_instance;
-
-static int open_rio(struct inode *inode, struct file *file)
-{
-       struct rio_usb_data *rio = &rio_instance;
-
-       /* against disconnect() */
-       mutex_lock(&rio500_mutex);
-       mutex_lock(&(rio->lock));
-
-       if (rio->isopen || !rio->present) {
-               mutex_unlock(&(rio->lock));
-               mutex_unlock(&rio500_mutex);
-               return -EBUSY;
-       }
-       rio->isopen = 1;
-
-       init_waitqueue_head(&rio->wait_q);
-
-       mutex_unlock(&(rio->lock));
-
-       dev_info(&rio->rio_dev->dev, "Rio opened.\n");
-       mutex_unlock(&rio500_mutex);
-
-       return 0;
-}
-
-static int close_rio(struct inode *inode, struct file *file)
-{
-       struct rio_usb_data *rio = &rio_instance;
-
-       /* against disconnect() */
-       mutex_lock(&rio500_mutex);
-       mutex_lock(&(rio->lock));
-
-       rio->isopen = 0;
-       if (!rio->present) {
-               /* cleanup has been delayed */
-               kfree(rio->ibuf);
-               kfree(rio->obuf);
-               rio->ibuf = NULL;
-               rio->obuf = NULL;
-       } else {
-               dev_info(&rio->rio_dev->dev, "Rio closed.\n");
-       }
-       mutex_unlock(&(rio->lock));
-       mutex_unlock(&rio500_mutex);
-       return 0;
-}
-
-static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct RioCommand rio_cmd;
-       struct rio_usb_data *rio = &rio_instance;
-       void __user *data;
-       unsigned char *buffer;
-       int result, requesttype;
-       int retries;
-       int retval=0;
-
-       mutex_lock(&(rio->lock));
-        /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-               retval = -ENODEV;
-               goto err_out;
-       }
-
-       switch (cmd) {
-       case RIO_RECV_COMMAND:
-               data = (void __user *) arg;
-               if (data == NULL)
-                       break;
-               if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
-                       retval = -EFAULT;
-                       goto err_out;
-               }
-               if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
-                       retval = -EINVAL;
-                       goto err_out;
-               }
-               buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
-               if (buffer == NULL) {
-                       retval = -ENOMEM;
-                       goto err_out;
-               }
-               if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
-                       retval = -EFAULT;
-                       free_page((unsigned long) buffer);
-                       goto err_out;
-               }
-
-               requesttype = rio_cmd.requesttype | USB_DIR_IN |
-                   USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-               dev_dbg(&rio->rio_dev->dev,
-                       "sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
-                       requesttype, rio_cmd.request, rio_cmd.value,
-                       rio_cmd.index, rio_cmd.length);
-               /* Send rio control message */
-               retries = 3;
-               while (retries) {
-                       result = usb_control_msg(rio->rio_dev,
-                                                usb_rcvctrlpipe(rio-> rio_dev, 0),
-                                                rio_cmd.request,
-                                                requesttype,
-                                                rio_cmd.value,
-                                                rio_cmd.index, buffer,
-                                                rio_cmd.length,
-                                                jiffies_to_msecs(rio_cmd.timeout));
-                       if (result == -ETIMEDOUT)
-                               retries--;
-                       else if (result < 0) {
-                               dev_err(&rio->rio_dev->dev,
-                                       "Error executing ioctrl. code = %d\n",
-                                       result);
-                               retries = 0;
-                       } else {
-                               dev_dbg(&rio->rio_dev->dev,
-                                       "Executed ioctl. Result = %d (data=%02x)\n",
-                                       result, buffer[0]);
-                               if (copy_to_user(rio_cmd.buffer, buffer,
-                                                rio_cmd.length)) {
-                                       free_page((unsigned long) buffer);
-                                       retval = -EFAULT;
-                                       goto err_out;
-                               }
-                               retries = 0;
-                       }
-
-                       /* rio_cmd.buffer contains a raw stream of single byte
-                          data which has been returned from rio.  Data is
-                          interpreted at application level.  For data that
-                          will be cast to data types longer than 1 byte, data
-                          will be little_endian and will potentially need to
-                          be swapped at the app level */
-
-               }
-               free_page((unsigned long) buffer);
-               break;
-
-       case RIO_SEND_COMMAND:
-               data = (void __user *) arg;
-               if (data == NULL)
-                       break;
-               if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
-                       retval = -EFAULT;
-                       goto err_out;
-               }
-               if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
-                       retval = -EINVAL;
-                       goto err_out;
-               }
-               buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
-               if (buffer == NULL) {
-                       retval = -ENOMEM;
-                       goto err_out;
-               }
-               if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
-                       free_page((unsigned long)buffer);
-                       retval = -EFAULT;
-                       goto err_out;
-               }
-
-               requesttype = rio_cmd.requesttype | USB_DIR_OUT |
-                   USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-               dev_dbg(&rio->rio_dev->dev,
-                       "sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
-                       requesttype, rio_cmd.request, rio_cmd.value,
-                       rio_cmd.index, rio_cmd.length);
-               /* Send rio control message */
-               retries = 3;
-               while (retries) {
-                       result = usb_control_msg(rio->rio_dev,
-                                                usb_sndctrlpipe(rio-> rio_dev, 0),
-                                                rio_cmd.request,
-                                                requesttype,
-                                                rio_cmd.value,
-                                                rio_cmd.index, buffer,
-                                                rio_cmd.length,
-                                                jiffies_to_msecs(rio_cmd.timeout));
-                       if (result == -ETIMEDOUT)
-                               retries--;
-                       else if (result < 0) {
-                               dev_err(&rio->rio_dev->dev,
-                                       "Error executing ioctrl. code = %d\n",
-                                       result);
-                               retries = 0;
-                       } else {
-                               dev_dbg(&rio->rio_dev->dev,
-                                       "Executed ioctl. Result = %d\n", result);
-                               retries = 0;
-
-                       }
-
-               }
-               free_page((unsigned long) buffer);
-               break;
-
-       default:
-               retval = -ENOTTY;
-               break;
-       }
-
-
-err_out:
-       mutex_unlock(&(rio->lock));
-       return retval;
-}
-
-static ssize_t
-write_rio(struct file *file, const char __user *buffer,
-         size_t count, loff_t * ppos)
-{
-       DEFINE_WAIT(wait);
-       struct rio_usb_data *rio = &rio_instance;
-
-       unsigned long copy_size;
-       unsigned long bytes_written = 0;
-       unsigned int partial;
-
-       int result = 0;
-       int maxretry;
-       int errn = 0;
-       int intr;
-
-       intr = mutex_lock_interruptible(&(rio->lock));
-       if (intr)
-               return -EINTR;
-        /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-               mutex_unlock(&(rio->lock));
-               return -ENODEV;
-       }
-
-
-
-       do {
-               unsigned long thistime;
-               char *obuf = rio->obuf;
-
-               thistime = copy_size =
-                   (count >= OBUF_SIZE) ? OBUF_SIZE : count;
-               if (copy_from_user(rio->obuf, buffer, copy_size)) {
-                       errn = -EFAULT;
-                       goto error;
-               }
-               maxretry = 5;
-               while (thistime) {
-                       if (!rio->rio_dev) {
-                               errn = -ENODEV;
-                               goto error;
-                       }
-                       if (signal_pending(current)) {
-                               mutex_unlock(&(rio->lock));
-                               return bytes_written ? bytes_written : -EINTR;
-                       }
-
-                       result = usb_bulk_msg(rio->rio_dev,
-                                        usb_sndbulkpipe(rio->rio_dev, 2),
-                                        obuf, thistime, &partial, 5000);
-
-                       dev_dbg(&rio->rio_dev->dev,
-                               "write stats: result:%d thistime:%lu partial:%u\n",
-                               result, thistime, partial);
-
-                       if (result == -ETIMEDOUT) {     /* NAK - so hold for a while */
-                               if (!maxretry--) {
-                                       errn = -ETIME;
-                                       goto error;
-                               }
-                               prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
-                               schedule_timeout(NAK_TIMEOUT);
-                               finish_wait(&rio->wait_q, &wait);
-                               continue;
-                       } else if (!result && partial) {
-                               obuf += partial;
-                               thistime -= partial;
-                       } else
-                               break;
-               }
-               if (result) {
-                       dev_err(&rio->rio_dev->dev, "Write Whoops - %x\n",
-                               result);
-                       errn = -EIO;
-                       goto error;
-               }
-               bytes_written += copy_size;
-               count -= copy_size;
-               buffer += copy_size;
-       } while (count > 0);
-
-       mutex_unlock(&(rio->lock));
-
-       return bytes_written ? bytes_written : -EIO;
-
-error:
-       mutex_unlock(&(rio->lock));
-       return errn;
-}
-
-static ssize_t
-read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
-{
-       DEFINE_WAIT(wait);
-       struct rio_usb_data *rio = &rio_instance;
-       ssize_t read_count;
-       unsigned int partial;
-       int this_read;
-       int result;
-       int maxretry = 10;
-       char *ibuf;
-       int intr;
-
-       intr = mutex_lock_interruptible(&(rio->lock));
-       if (intr)
-               return -EINTR;
-       /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-               mutex_unlock(&(rio->lock));
-               return -ENODEV;
-       }
-
-       ibuf = rio->ibuf;
-
-       read_count = 0;
-
-
-       while (count > 0) {
-               if (signal_pending(current)) {
-                       mutex_unlock(&(rio->lock));
-                       return read_count ? read_count : -EINTR;
-               }
-               if (!rio->rio_dev) {
-                       mutex_unlock(&(rio->lock));
-                       return -ENODEV;
-               }
-               this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
-
-               result = usb_bulk_msg(rio->rio_dev,
-                                     usb_rcvbulkpipe(rio->rio_dev, 1),
-                                     ibuf, this_read, &partial,
-                                     8000);
-
-               dev_dbg(&rio->rio_dev->dev,
-                       "read stats: result:%d this_read:%u partial:%u\n",
-                       result, this_read, partial);
-
-               if (partial) {
-                       count = this_read = partial;
-               } else if (result == -ETIMEDOUT || result == 15) {      /* FIXME: 15 ??? */
-                       if (!maxretry--) {
-                               mutex_unlock(&(rio->lock));
-                               dev_err(&rio->rio_dev->dev,
-                                       "read_rio: maxretry timeout\n");
-                               return -ETIME;
-                       }
-                       prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
-                       schedule_timeout(NAK_TIMEOUT);
-                       finish_wait(&rio->wait_q, &wait);
-                       continue;
-               } else if (result != -EREMOTEIO) {
-                       mutex_unlock(&(rio->lock));
-                       dev_err(&rio->rio_dev->dev,
-                               "Read Whoops - result:%d partial:%u this_read:%u\n",
-                               result, partial, this_read);
-                       return -EIO;
-               } else {
-                       mutex_unlock(&(rio->lock));
-                       return (0);
-               }
-
-               if (this_read) {
-                       if (copy_to_user(buffer, ibuf, this_read)) {
-                               mutex_unlock(&(rio->lock));
-                               return -EFAULT;
-                       }
-                       count -= this_read;
-                       read_count += this_read;
-                       buffer += this_read;
-               }
-       }
-       mutex_unlock(&(rio->lock));
-       return read_count;
-}
-
-static const struct file_operations usb_rio_fops = {
-       .owner =        THIS_MODULE,
-       .read =         read_rio,
-       .write =        write_rio,
-       .unlocked_ioctl = ioctl_rio,
-       .open =         open_rio,
-       .release =      close_rio,
-       .llseek =       noop_llseek,
-};
-
-static struct usb_class_driver usb_rio_class = {
-       .name =         "rio500%d",
-       .fops =         &usb_rio_fops,
-       .minor_base =   RIO_MINOR,
-};
-
-static int probe_rio(struct usb_interface *intf,
-                    const struct usb_device_id *id)
-{
-       struct usb_device *dev = interface_to_usbdev(intf);
-       struct rio_usb_data *rio = &rio_instance;
-       int retval = 0;
-
-       mutex_lock(&rio500_mutex);
-       if (rio->present) {
-               dev_info(&intf->dev, "Second USB Rio at address %d refused\n", dev->devnum);
-               retval = -EBUSY;
-               goto bail_out;
-       } else {
-               dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum);
-       }
-
-       retval = usb_register_dev(intf, &usb_rio_class);
-       if (retval) {
-               dev_err(&dev->dev,
-                       "Not able to get a minor for this device.\n");
-               retval = -ENOMEM;
-               goto bail_out;
-       }
-
-       rio->rio_dev = dev;
-
-       if (!(rio->obuf = kmalloc(OBUF_SIZE, GFP_KERNEL))) {
-               dev_err(&dev->dev,
-                       "probe_rio: Not enough memory for the output buffer\n");
-               usb_deregister_dev(intf, &usb_rio_class);
-               retval = -ENOMEM;
-               goto bail_out;
-       }
-       dev_dbg(&intf->dev, "obuf address:%p\n", rio->obuf);
-
-       if (!(rio->ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL))) {
-               dev_err(&dev->dev,
-                       "probe_rio: Not enough memory for the input buffer\n");
-               usb_deregister_dev(intf, &usb_rio_class);
-               kfree(rio->obuf);
-               retval = -ENOMEM;
-               goto bail_out;
-       }
-       dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf);
-
-       mutex_init(&(rio->lock));
-
-       usb_set_intfdata (intf, rio);
-       rio->present = 1;
-bail_out:
-       mutex_unlock(&rio500_mutex);
-
-       return retval;
-}
-
-static void disconnect_rio(struct usb_interface *intf)
-{
-       struct rio_usb_data *rio = usb_get_intfdata (intf);
-
-       usb_set_intfdata (intf, NULL);
-       mutex_lock(&rio500_mutex);
-       if (rio) {
-               usb_deregister_dev(intf, &usb_rio_class);
-
-               mutex_lock(&(rio->lock));
-               if (rio->isopen) {
-                       rio->isopen = 0;
-                       /* better let it finish - the release will do whats needed */
-                       rio->rio_dev = NULL;
-                       mutex_unlock(&(rio->lock));
-                       mutex_unlock(&rio500_mutex);
-                       return;
-               }
-               kfree(rio->ibuf);
-               kfree(rio->obuf);
-
-               dev_info(&intf->dev, "USB Rio disconnected.\n");
-
-               rio->present = 0;
-               mutex_unlock(&(rio->lock));
-       }
-       mutex_unlock(&rio500_mutex);
-}
-
-static const struct usb_device_id rio_table[] = {
-       { USB_DEVICE(0x0841, 1) },              /* Rio 500 */
-       { }                                     /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE (usb, rio_table);
-
-static struct usb_driver rio_driver = {
-       .name =         "rio500",
-       .probe =        probe_rio,
-       .disconnect =   disconnect_rio,
-       .id_table =     rio_table,
-};
-
-module_usb_driver(rio_driver);
-
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h
deleted file mode 100644 (file)
index 6db7a58..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*  ----------------------------------------------------------------------
-    Copyright (C) 2000  Cesar Miquel  (miquel@df.uba.ar)
-    ---------------------------------------------------------------------- */
-
-#define RIO_SEND_COMMAND                       0x1
-#define RIO_RECV_COMMAND                       0x2
-
-#define RIO_DIR_OUT                            0x0
-#define RIO_DIR_IN                             0x1
-
-struct RioCommand {
-       short length;
-       int request;
-       int requesttype;
-       int value;
-       int index;
-       void __user *buffer;
-       int timeout;
-};
index 9ba4a4e68d9140f450d4e58c51b96068bc716100..aa982d3ca36bebebccb9bda4e61fbd3cc85f73bd 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/mutex.h>
+#include <linux/rwsem.h>
 #include <linux/uaccess.h>
 #include <linux/usb.h>
 
@@ -57,6 +58,8 @@ struct usb_lcd {
                                                           using up all RAM */
        struct usb_anchor       submitted;              /* URBs to wait for
                                                           before suspend */
+       struct rw_semaphore     io_rwsem;
+       unsigned long           disconnected:1;
 };
 #define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
 
@@ -142,6 +145,13 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
 
        dev = file->private_data;
 
+       down_read(&dev->io_rwsem);
+
+       if (dev->disconnected) {
+               retval = -ENODEV;
+               goto out_up_io;
+       }
+
        /* do a blocking bulk read to get data from the device */
        retval = usb_bulk_msg(dev->udev,
                              usb_rcvbulkpipe(dev->udev,
@@ -158,6 +168,9 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
                        retval = bytes_read;
        }
 
+out_up_io:
+       up_read(&dev->io_rwsem);
+
        return retval;
 }
 
@@ -237,11 +250,18 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
        if (r < 0)
                return -EINTR;
 
+       down_read(&dev->io_rwsem);
+
+       if (dev->disconnected) {
+               retval = -ENODEV;
+               goto err_up_io;
+       }
+
        /* create a urb, and a buffer for it, and copy the data to the urb */
        urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!urb) {
                retval = -ENOMEM;
-               goto err_no_buf;
+               goto err_up_io;
        }
 
        buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL,
@@ -278,6 +298,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
           the USB core will eventually free it entirely */
        usb_free_urb(urb);
 
+       up_read(&dev->io_rwsem);
 exit:
        return count;
 error_unanchor:
@@ -285,7 +306,8 @@ error_unanchor:
 error:
        usb_free_coherent(dev->udev, count, buf, urb->transfer_dma);
        usb_free_urb(urb);
-err_no_buf:
+err_up_io:
+       up_read(&dev->io_rwsem);
        up(&dev->limit_sem);
        return retval;
 }
@@ -325,6 +347,7 @@ static int lcd_probe(struct usb_interface *interface,
 
        kref_init(&dev->kref);
        sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
+       init_rwsem(&dev->io_rwsem);
        init_usb_anchor(&dev->submitted);
 
        dev->udev = usb_get_dev(interface_to_usbdev(interface));
@@ -422,6 +445,12 @@ static void lcd_disconnect(struct usb_interface *interface)
        /* give back our minor */
        usb_deregister_dev(interface, &lcd_class);
 
+       down_write(&dev->io_rwsem);
+       dev->disconnected = 1;
+       up_write(&dev->io_rwsem);
+
+       usb_kill_anchored_urbs(&dev->submitted);
+
        /* decrement our usage count */
        kref_put(&dev->kref, lcd_delete);
 
index 7b306aa22d2589518d696111cb2750bdc3bed4c0..be0505b8b5d4e5515996faf25ef7485e978c9248 100644 (file)
@@ -60,6 +60,7 @@ struct usb_yurex {
 
        struct kref             kref;
        struct mutex            io_mutex;
+       unsigned long           disconnected:1;
        struct fasync_struct    *async_queue;
        wait_queue_head_t       waitq;
 
@@ -92,7 +93,6 @@ static void yurex_delete(struct kref *kref)
 
        dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
-       usb_put_dev(dev->udev);
        if (dev->cntl_urb) {
                usb_kill_urb(dev->cntl_urb);
                kfree(dev->cntl_req);
@@ -108,6 +108,8 @@ static void yurex_delete(struct kref *kref)
                                dev->int_buffer, dev->urb->transfer_dma);
                usb_free_urb(dev->urb);
        }
+       usb_put_intf(dev->interface);
+       usb_put_dev(dev->udev);
        kfree(dev);
 }
 
@@ -132,6 +134,7 @@ static void yurex_interrupt(struct urb *urb)
        switch (status) {
        case 0: /*success*/
                break;
+       /* The device is terminated or messed up, give up */
        case -EOVERFLOW:
                dev_err(&dev->interface->dev,
                        "%s - overflow with length %d, actual length is %d\n",
@@ -140,12 +143,13 @@ static void yurex_interrupt(struct urb *urb)
        case -ENOENT:
        case -ESHUTDOWN:
        case -EILSEQ:
-               /* The device is terminated, clean up */
+       case -EPROTO:
+       case -ETIME:
                return;
        default:
                dev_err(&dev->interface->dev,
                        "%s - unknown status received: %d\n", __func__, status);
-               goto exit;
+               return;
        }
 
        /* handle received message */
@@ -177,7 +181,6 @@ static void yurex_interrupt(struct urb *urb)
                break;
        }
 
-exit:
        retval = usb_submit_urb(dev->urb, GFP_ATOMIC);
        if (retval) {
                dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n",
@@ -204,7 +207,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
        init_waitqueue_head(&dev->waitq);
 
        dev->udev = usb_get_dev(interface_to_usbdev(interface));
-       dev->interface = interface;
+       dev->interface = usb_get_intf(interface);
 
        /* set up the endpoint information */
        iface_desc = interface->cur_altsetting;
@@ -315,8 +318,9 @@ static void yurex_disconnect(struct usb_interface *interface)
 
        /* prevent more I/O from starting */
        usb_poison_urb(dev->urb);
+       usb_poison_urb(dev->cntl_urb);
        mutex_lock(&dev->io_mutex);
-       dev->interface = NULL;
+       dev->disconnected = 1;
        mutex_unlock(&dev->io_mutex);
 
        /* wakeup waiters */
@@ -404,7 +408,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
        dev = file->private_data;
 
        mutex_lock(&dev->io_mutex);
-       if (!dev->interface) {          /* already disconnected */
+       if (dev->disconnected) {                /* already disconnected */
                mutex_unlock(&dev->io_mutex);
                return -ENODEV;
        }
@@ -439,7 +443,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
                goto error;
 
        mutex_lock(&dev->io_mutex);
-       if (!dev->interface) {          /* already disconnected */
+       if (dev->disconnected) {                /* already disconnected */
                mutex_unlock(&dev->io_mutex);
                retval = -ENODEV;
                goto error;
index 6137f7942c05cabc1423771c21a0855aaec3be70..c47b721b8bca4f0821feee7777ea68ec4437b11e 100644 (file)
@@ -207,6 +207,7 @@ struct usbhs_priv;
 /* DCPCTR */
 #define BSTS           (1 << 15)       /* Buffer Status */
 #define SUREQ          (1 << 14)       /* Sending SETUP Token */
+#define INBUFM         (1 << 14)       /* (PIPEnCTR) Transfer Buffer Monitor */
 #define CSSTS          (1 << 12)       /* CSSTS Status */
 #define        ACLRM           (1 << 9)        /* Buffer Auto-Clear Mode */
 #define SQCLR          (1 << 8)        /* Toggle Bit Clear */
index 6036cbae8c78d2b76234878961748d43b64bdbc2..aeb53ec5cc6ab409dddc2cc2dbca38ecc86eb274 100644 (file)
@@ -89,7 +89,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
        list_del_init(&pkt->node);
 }
 
-static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
 {
        return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
 }
index 88d1816bcda22c8d989c7665cfe1fc8478ef1ae9..c3d3cc35cee0f640784d48b5f3a471a9608ac6cf 100644 (file)
@@ -97,5 +97,6 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
                    void *buf, int len, int zero, int sequence);
 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
 void usbhs_pkt_start(struct usbhs_pipe *pipe);
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe);
 
 #endif /* RENESAS_USB_FIFO_H */
index 59cac40aafcc13be4623a9f6c3158d97d6cbe1a3..7feac4128a2d453d38191aaba019550d1eac08fb 100644 (file)
@@ -721,8 +721,7 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
        struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
        struct device *dev = usbhsg_gpriv_to_dev(gpriv);
        unsigned long flags;
-
-       usbhsg_pipe_disable(uep);
+       int ret = 0;
 
        dev_dbg(dev, "set halt %d (pipe %d)\n",
                halt, usbhs_pipe_number(pipe));
@@ -730,6 +729,18 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
        /********************  spin lock ********************/
        usbhs_lock(priv, flags);
 
+       /*
+        * According to usb_ep_set_halt()'s description, this function should
+        * return -EAGAIN if the IN endpoint has any queue or data. Note
+        * that the usbhs_pipe_is_dir_in() returns false if the pipe is an
+        * IN endpoint in the gadget mode.
+        */
+       if (!usbhs_pipe_is_dir_in(pipe) && (__usbhsf_pkt_get(pipe) ||
+           usbhs_pipe_contains_transmittable_data(pipe))) {
+               ret = -EAGAIN;
+               goto out;
+       }
+
        if (halt)
                usbhs_pipe_stall(pipe);
        else
@@ -740,10 +751,11 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
        else
                usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE);
 
+out:
        usbhs_unlock(priv, flags);
        /********************  spin unlock ******************/
 
-       return 0;
+       return ret;
 }
 
 static int usbhsg_ep_set_halt(struct usb_ep *ep, int value)
index c4922b96c93bcec16e3b60010eed556306914d6c..9e5afdde1adbf8263e9a72bc67b8b7dea034085b 100644 (file)
@@ -277,6 +277,21 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe)
        return -EBUSY;
 }
 
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe)
+{
+       u16 val;
+
+       /* Do not support for DCP pipe */
+       if (usbhs_pipe_is_dcp(pipe))
+               return false;
+
+       val = usbhsp_pipectrl_get(pipe);
+       if (val & INBUFM)
+               return true;
+
+       return false;
+}
+
 /*
  *             PID ctrl
  */
index 3080423e600c7ec6777a950fde20823a72bfe17b..3b130529408ba9991aa4ef5d1f83c4040323eac1 100644 (file)
@@ -83,6 +83,7 @@ void usbhs_pipe_clear(struct usbhs_pipe *pipe);
 void usbhs_pipe_clear_without_sequence(struct usbhs_pipe *pipe,
                                       int needs_bfre, int bfre_enable);
 int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe);
 void usbhs_pipe_enable(struct usbhs_pipe *pipe);
 void usbhs_pipe_disable(struct usbhs_pipe *pipe);
 void usbhs_pipe_stall(struct usbhs_pipe *pipe);
index e18735e004634015fc29f3e874844e7935cdfe64..f06706efb7f14d0cdb741821bdf30a7d69c3ce2e 100644 (file)
@@ -1020,6 +1020,9 @@ static const struct usb_device_id id_table_combined[] = {
        /* EZPrototypes devices */
        { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
        { USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) },
+       /* Sienna devices */
+       { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
+       { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
        { }                                     /* Terminating entry */
 };
 
index f12d806220b4a9c1d32abeaa350f4c705f98a509..22d66217cb412b99689ecc18f517066433fc337b 100644 (file)
@@ -39,6 +39,9 @@
 
 #define FTDI_LUMEL_PD12_PID    0x6002
 
+/* Sienna Serial Interface by Secyourit GmbH */
+#define FTDI_SIENNA_PID                0x8348
+
 /* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
 #define CYBER_CORTEX_AV_PID    0x8698
 
 #define BANDB_TTL3USB9M_PID    0xAC50
 #define BANDB_ZZ_PROG1_USB_PID 0xBA02
 
+/*
+ * Echelon USB Serial Interface
+ */
+#define ECHELON_VID            0x0920
+#define ECHELON_U20_PID                0x7500
+
 /*
  * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
  */
index d34779fe4a8d046e616cdb2e32c3258cda0c218f..e66a59ef43a1cb59cfa0d41b8af8c7316ededa69 100644 (file)
@@ -1741,8 +1741,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint,
 
        ep_desc = find_ep(serial, endpoint);
        if (!ep_desc) {
-               /* leak the urb, something's wrong and the callers don't care */
-               return urb;
+               usb_free_urb(urb);
+               return NULL;
        }
        if (usb_endpoint_xfer_int(ep_desc)) {
                ep_type_name = "INT";
index e0a4749ba565e0227c27cacba3dbdaf0b8372f07..3cc659a62782e46b87ac183a9073ef4ed139aa9a 100644 (file)
@@ -419,6 +419,7 @@ static void option_instat_callback(struct urb *urb);
 #define CINTERION_PRODUCT_PH8_AUDIO            0x0083
 #define CINTERION_PRODUCT_AHXX_2RMNET          0x0084
 #define CINTERION_PRODUCT_AHXX_AUDIO           0x0085
+#define CINTERION_PRODUCT_CLS8                 0x00b0
 
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID                     0x0b3c
@@ -968,6 +969,11 @@ static const struct usb_device_id option_ids[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
 
+       /* Motorola devices */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) },    /* mdm6600 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) },    /* mdm9600 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) },    /* mdm ram dl */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) },    /* mdm qc dl */
 
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -1149,6 +1155,14 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
          .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff),    /* Telit FN980 (rmnet) */
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff),    /* Telit FN980 (MBIM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff),    /* Telit FN980 (RNDIS) */
+         .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff),    /* Telit FN980 (ECM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1549,6 +1563,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
          .driver_info = RSVD(2) },
        { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) },    /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1841,6 +1856,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
        { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+       { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
+         .driver_info = RSVD(0) | RSVD(4) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
@@ -1952,11 +1969,15 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),                     /* D-Link DWM-222 */
          .driver_info = RSVD(4) },
+       { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff),                     /* D-Link DWM-222 A2 */
+         .driver_info = RSVD(4) },
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) },    /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) },    /* D-Link DWM-156/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) },    /* D-Link DWM-156/A3 */
        { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff),                     /* Olicard 600 */
          .driver_info = RSVD(4) },
+       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff),                     /* BroadMobi BM818 */
+         .driver_info = RSVD(4) },
        { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                   /* OLICARD300 - MT6225 */
        { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
index e3c5832337e0bdbb83df78285a697ac5f5a8af3f..c9201e0a82417d08981006db6db3481705ae6a4f 100644 (file)
@@ -776,7 +776,6 @@ static void ti_close(struct usb_serial_port *port)
        struct ti_port *tport;
        int port_number;
        int status;
-       int do_unlock;
        unsigned long flags;
 
        tdev = usb_get_serial_data(port->serial);
@@ -800,16 +799,13 @@ static void ti_close(struct usb_serial_port *port)
                        "%s - cannot send close port command, %d\n"
                                                        , __func__, status);
 
-       /* if mutex_lock is interrupted, continue anyway */
-       do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
+       mutex_lock(&tdev->td_open_close_lock);
        --tport->tp_tdev->td_open_port_count;
-       if (tport->tp_tdev->td_open_port_count <= 0) {
+       if (tport->tp_tdev->td_open_port_count == 0) {
                /* last port is closed, shut down interrupt urb */
                usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
-               tport->tp_tdev->td_open_port_count = 0;
        }
-       if (do_unlock)
-               mutex_unlock(&tdev->td_open_close_lock);
+       mutex_unlock(&tdev->td_open_close_lock);
 }
 
 
index f7aaa7f079e1a041f666cc0b0fefbc4da0474972..434153790982191b693716c527177b0fadaa7f52 100644 (file)
@@ -311,10 +311,7 @@ static void serial_cleanup(struct tty_struct *tty)
        serial = port->serial;
        owner = serial->type->driver.owner;
 
-       mutex_lock(&serial->disc_mutex);
-       if (!serial->disconnected)
-               usb_autopm_put_interface(serial->interface);
-       mutex_unlock(&serial->disc_mutex);
+       usb_autopm_put_interface(serial->interface);
 
        usb_serial_put(serial);
        module_put(owner);
index cc794e25a0b6ed043149685eb1400492a977b2c3..1d9ce9cbc831d1035a6ad086b10f1f99e3188a63 100644 (file)
@@ -38,7 +38,7 @@ MODULE_LICENSE("GPL");
 
 static int auto_delink_en = 1;
 module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
+MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
 
 #ifdef CONFIG_REALTEK_AUTOPM
 static int ss_en = 1;
@@ -996,12 +996,15 @@ static int init_realtek_cr(struct us_data *us)
                        goto INIT_FAIL;
        }
 
-       if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
-           CHECK_FW_VER(chip, 0x5901))
-               SET_AUTO_DELINK(chip);
-       if (STATUS_LEN(chip) == 16) {
-               if (SUPPORT_AUTO_DELINK(chip))
+       if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
+           CHECK_PID(chip, 0x0159)) {
+               if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
+                               CHECK_FW_VER(chip, 0x5901))
                        SET_AUTO_DELINK(chip);
+               if (STATUS_LEN(chip) == 16) {
+                       if (SUPPORT_AUTO_DELINK(chip))
+                               SET_AUTO_DELINK(chip);
+               }
        }
 #ifdef CONFIG_REALTEK_AUTOPM
        if (ss_en)
index ea0d27a94afe058b3671ad6c66989ecbbdb98568..1cd9b6305b06042fecf75942c4f79af41747feea 100644 (file)
@@ -2100,7 +2100,7 @@ UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
                US_FL_IGNORE_RESIDUE ),
 
 /* Reported by Michael Büsch <m@bues.ch> */
-UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0116,
+UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0117,
                "JMicron",
                "USB to ATA/ATAPI Bridge",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
index 3457c1fdebd1b2191a97206ab6e92dd35a89e40d..819ae3b2bd7e881248fcf3e9f8501d30eb0c3950 100644 (file)
@@ -37,6 +37,7 @@
        S(SRC_ATTACHED),                        \
        S(SRC_STARTUP),                         \
        S(SRC_SEND_CAPABILITIES),               \
+       S(SRC_SEND_CAPABILITIES_TIMEOUT),       \
        S(SRC_NEGOTIATE_CAPABILITIES),          \
        S(SRC_TRANSITION_SUPPLY),               \
        S(SRC_READY),                           \
@@ -378,7 +379,8 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
                        return SNK_UNATTACHED;
                else if (port->try_role == TYPEC_SOURCE)
                        return SRC_UNATTACHED;
-               else if (port->tcpc->config->default_role == TYPEC_SINK)
+               else if (port->tcpc->config &&
+                        port->tcpc->config->default_role == TYPEC_SINK)
                        return SNK_UNATTACHED;
                /* Fall through to return SRC_UNATTACHED */
        } else if (port->port_type == TYPEC_PORT_SNK) {
@@ -585,7 +587,20 @@ static void tcpm_debugfs_init(struct tcpm_port *port)
 
 static void tcpm_debugfs_exit(struct tcpm_port *port)
 {
+       int i;
+
+       mutex_lock(&port->logbuffer_lock);
+       for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
+               kfree(port->logbuffer[i]);
+               port->logbuffer[i] = NULL;
+       }
+       mutex_unlock(&port->logbuffer_lock);
+
        debugfs_remove(port->dentry);
+       if (list_empty(&rootdir->d_subdirs)) {
+               debugfs_remove(rootdir);
+               rootdir = NULL;
+       }
 }
 
 #else
@@ -1094,7 +1109,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
                        break;
                case CMD_ATTENTION:
                        /* Attention command does not have response */
-                       typec_altmode_attention(adev, p[1]);
+                       if (adev)
+                               typec_altmode_attention(adev, p[1]);
                        return 0;
                default:
                        break;
@@ -1146,20 +1162,26 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
                        }
                        break;
                case CMD_ENTER_MODE:
-                       typec_altmode_update_active(pdev, true);
-
-                       if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
-                               response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
-                               response[0] |= VDO_OPOS(adev->mode);
-                               return 1;
+                       if (adev && pdev) {
+                               typec_altmode_update_active(pdev, true);
+
+                               if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
+                                       response[0] = VDO(adev->svid, 1,
+                                                         CMD_EXIT_MODE);
+                                       response[0] |= VDO_OPOS(adev->mode);
+                                       return 1;
+                               }
                        }
                        return 0;
                case CMD_EXIT_MODE:
-                       typec_altmode_update_active(pdev, false);
+                       if (adev && pdev) {
+                               typec_altmode_update_active(pdev, false);
 
-                       /* Back to USB Operation */
-                       WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
-                                                    NULL));
+                               /* Back to USB Operation */
+                               WARN_ON(typec_altmode_notify(adev,
+                                                            TYPEC_STATE_USB,
+                                                            NULL));
+                       }
                        break;
                default:
                        break;
@@ -1169,8 +1191,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
                switch (cmd) {
                case CMD_ENTER_MODE:
                        /* Back to USB Operation */
-                       WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
-                                                    NULL));
+                       if (adev)
+                               WARN_ON(typec_altmode_notify(adev,
+                                                            TYPEC_STATE_USB,
+                                                            NULL));
                        break;
                default:
                        break;
@@ -1181,7 +1205,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
        }
 
        /* Informing the alternate mode drivers about everything */
-       typec_altmode_vdm(adev, p[0], &p[1], cnt);
+       if (adev)
+               typec_altmode_vdm(adev, p[0], &p[1], cnt);
 
        return rlen;
 }
@@ -1421,7 +1446,7 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
                                else if ((pdo_min_voltage(pdo[i]) ==
                                          pdo_min_voltage(pdo[i - 1])) &&
                                         (pdo_max_voltage(pdo[i]) ==
-                                         pdo_min_voltage(pdo[i - 1])))
+                                         pdo_max_voltage(pdo[i - 1])))
                                        return PDO_ERR_DUPE_PDO;
                                break;
                        /*
@@ -2963,10 +2988,34 @@ static void run_state_machine(struct tcpm_port *port)
                        /* port->hard_reset_count = 0; */
                        port->caps_count = 0;
                        port->pd_capable = true;
-                       tcpm_set_state_cond(port, hard_reset_state(port),
+                       tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
                                            PD_T_SEND_SOURCE_CAP);
                }
                break;
+       case SRC_SEND_CAPABILITIES_TIMEOUT:
+               /*
+                * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
+                *
+                * PD 2.0 sinks are supposed to accept src-capabilities with a
+                * 3.0 header and simply ignore any src PDOs which the sink does
+                * not understand such as PPS but some 2.0 sinks instead ignore
+                * the entire PD_DATA_SOURCE_CAP message, causing contract
+                * negotiation to fail.
+                *
+                * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
+                * sending src-capabilities with a lower PD revision to
+                * make these broken sinks work.
+                */
+               if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
+                       tcpm_set_state(port, HARD_RESET_SEND, 0);
+               } else if (port->negotiated_rev > PD_REV20) {
+                       port->negotiated_rev--;
+                       port->hard_reset_count = 0;
+                       tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+               } else {
+                       tcpm_set_state(port, hard_reset_state(port), 0);
+               }
+               break;
        case SRC_NEGOTIATE_CAPABILITIES:
                ret = tcpm_pd_check_request(port);
                if (ret < 0) {
@@ -4083,7 +4132,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role)
        mutex_lock(&port->lock);
        if (tcpc->try_role)
                ret = tcpc->try_role(tcpc, role);
-       if (!ret && !tcpc->config->try_role_hw)
+       if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
                port->try_role = role;
        port->try_src_count = 0;
        port->try_snk_count = 0;
@@ -4730,7 +4779,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
        port->typec_caps.prefer_role = tcfg->default_role;
        port->typec_caps.type = tcfg->type;
        port->typec_caps.data = tcfg->data;
-       port->self_powered = port->tcpc->config->self_powered;
+       port->self_powered = tcfg->self_powered;
 
        return 0;
 }
index f101347e3ea351cdbefa359a910167001dd6fc7d..e0cf11f798c54c0423409a91901dbac98760a7ad 100644 (file)
@@ -59,6 +59,7 @@ struct usb_skel {
        spinlock_t              err_lock;               /* lock for errors */
        struct kref             kref;
        struct mutex            io_mutex;               /* synchronize I/O with disconnect */
+       unsigned long           disconnected:1;
        wait_queue_head_t       bulk_in_wait;           /* to wait for an ongoing read */
 };
 #define to_skel_dev(d) container_of(d, struct usb_skel, kref)
@@ -71,6 +72,7 @@ static void skel_delete(struct kref *kref)
        struct usb_skel *dev = to_skel_dev(kref);
 
        usb_free_urb(dev->bulk_in_urb);
+       usb_put_intf(dev->interface);
        usb_put_dev(dev->udev);
        kfree(dev->bulk_in_buffer);
        kfree(dev);
@@ -122,10 +124,7 @@ static int skel_release(struct inode *inode, struct file *file)
                return -ENODEV;
 
        /* allow the device to be autosuspended */
-       mutex_lock(&dev->io_mutex);
-       if (dev->interface)
-               usb_autopm_put_interface(dev->interface);
-       mutex_unlock(&dev->io_mutex);
+       usb_autopm_put_interface(dev->interface);
 
        /* decrement the count on our device */
        kref_put(&dev->kref, skel_delete);
@@ -238,7 +237,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
        if (rv < 0)
                return rv;
 
-       if (!dev->interface) {          /* disconnect() was called */
+       if (dev->disconnected) {                /* disconnect() was called */
                rv = -ENODEV;
                goto exit;
        }
@@ -420,7 +419,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
 
        /* this lock makes sure we don't submit URBs to gone devices */
        mutex_lock(&dev->io_mutex);
-       if (!dev->interface) {          /* disconnect() was called */
+       if (dev->disconnected) {                /* disconnect() was called */
                mutex_unlock(&dev->io_mutex);
                retval = -ENODEV;
                goto error;
@@ -505,7 +504,7 @@ static int skel_probe(struct usb_interface *interface,
        init_waitqueue_head(&dev->bulk_in_wait);
 
        dev->udev = usb_get_dev(interface_to_usbdev(interface));
-       dev->interface = interface;
+       dev->interface = usb_get_intf(interface);
 
        /* set up the endpoint information */
        /* use only the first bulk-in and bulk-out endpoints */
@@ -571,7 +570,7 @@ static void skel_disconnect(struct usb_interface *interface)
 
        /* prevent more I/O from starting */
        mutex_lock(&dev->io_mutex);
-       dev->interface = NULL;
+       dev->disconnected = 1;
        mutex_unlock(&dev->io_mutex);
 
        usb_kill_anchored_urbs(&dev->submitted);
index 6cf00d9f512b7a214ff00988be49e12b05e1e533..a92c2868d902146758797d11d020ff936ad9516b 100644 (file)
@@ -373,11 +373,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
        pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
 
        /*
-        * Try to reset the device.  The success of this is dependent on
-        * being able to lock the device, which is not always possible.
+        * Try to get the locks ourselves to prevent a deadlock. The
+        * success of this is dependent on being able to lock the device,
+        * which is not always possible.
+        * We can not use the "try" reset interface here, which will
+        * overwrite the previously restored configuration information.
         */
-       if (vdev->reset_works && !pci_try_reset_function(pdev))
-               vdev->needs_reset = false;
+       if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
+               if (device_trylock(&pdev->dev)) {
+                       if (!__pci_reset_function_locked(pdev))
+                               vdev->needs_reset = false;
+                       device_unlock(&pdev->dev);
+               }
+               pci_cfg_access_unlock(pdev);
+       }
 
        pci_restore_state(pdev);
 out:
index 39155d7cc8946ead49d2a55ebec37006ac0d95a1..124356dc39e14589e6cd94e7e7a078073836d667 100644 (file)
@@ -36,7 +36,7 @@
 
 #include "vhost.h"
 
-static int experimental_zcopytx = 1;
+static int experimental_zcopytx = 0;
 module_param(experimental_zcopytx, int, 0444);
 MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
                                       " 1 -Enable; 0 - Disable");
@@ -497,12 +497,6 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
        return iov_iter_count(iter);
 }
 
-static bool vhost_exceeds_weight(int pkts, int total_len)
-{
-       return total_len >= VHOST_NET_WEIGHT ||
-              pkts >= VHOST_NET_PKT_WEIGHT;
-}
-
 static int get_tx_bufs(struct vhost_net *net,
                       struct vhost_net_virtqueue *nvq,
                       struct msghdr *msg,
@@ -557,7 +551,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
        int err;
        int sent_pkts = 0;
 
-       for (;;) {
+       do {
                bool busyloop_intr = false;
 
                head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
@@ -598,11 +592,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
                                 err, len);
                if (++nvq->done_idx >= VHOST_NET_BATCH)
                        vhost_net_signal_used(nvq);
-               if (vhost_exceeds_weight(++sent_pkts, total_len)) {
-                       vhost_poll_queue(&vq->poll);
-                       break;
-               }
-       }
+       } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
 
        vhost_net_signal_used(nvq);
 }
@@ -626,7 +616,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
        bool zcopy_used;
        int sent_pkts = 0;
 
-       for (;;) {
+       do {
                bool busyloop_intr;
 
                /* Release DMAs done buffers first */
@@ -701,11 +691,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
                else
                        vhost_zerocopy_signal_used(net, vq);
                vhost_net_tx_packet(net);
-               if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) {
-                       vhost_poll_queue(&vq->poll);
-                       break;
-               }
-       }
+       } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
 }
 
 /* Expects to be always run from workqueue - which acts as
@@ -941,8 +927,11 @@ static void handle_rx(struct vhost_net *net)
                vq->log : NULL;
        mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
 
-       while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
-                                                     &busyloop_intr))) {
+       do {
+               sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
+                                                     &busyloop_intr);
+               if (!sock_len)
+                       break;
                sock_len += sock_hlen;
                vhost_len = sock_len + vhost_hlen;
                headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
@@ -1027,14 +1016,11 @@ static void handle_rx(struct vhost_net *net)
                        vhost_log_write(vq, vq_log, log, vhost_len,
                                        vq->iov, in);
                total_len += vhost_len;
-               if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
-                       vhost_poll_queue(&vq->poll);
-                       goto out;
-               }
-       }
+       } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
+
        if (unlikely(busyloop_intr))
                vhost_poll_queue(&vq->poll);
-       else
+       else if (!sock_len)
                vhost_net_enable_vq(net, vq);
 out:
        vhost_net_signal_used(nvq);
@@ -1115,7 +1101,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
                vhost_net_buf_init(&n->vqs[i].rxq);
        }
        vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
-                      UIO_MAXIOV + VHOST_NET_BATCH);
+                      UIO_MAXIOV + VHOST_NET_BATCH,
+                      VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);
 
        vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
        vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
index 0cfa925be4ec62df05d07222a95dad558f76ea90..5e298d9287f1c6dece8471eb38d13daf574ec894 100644 (file)
 #define VHOST_SCSI_PREALLOC_UPAGES 2048
 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
 
+/* Max number of requests before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * request.
+ */
+#define VHOST_SCSI_WEIGHT 256
+
 struct vhost_scsi_inflight {
        /* Wait for the flush operation to finish */
        struct completion comp;
@@ -811,7 +817,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
        u64 tag;
        u32 exp_data_len, data_direction;
        unsigned int out = 0, in = 0;
-       int head, ret, prot_bytes;
+       int head, ret, prot_bytes, c = 0;
        size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
        size_t out_size, in_size;
        u16 lun;
@@ -830,7 +836,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 
        vhost_disable_notify(&vs->dev, vq);
 
-       for (;;) {
+       do {
                head = vhost_get_vq_desc(vq, vq->iov,
                                         ARRAY_SIZE(vq->iov), &out, &in,
                                         NULL, NULL);
@@ -1045,7 +1051,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                 */
                INIT_WORK(&cmd->work, vhost_scsi_submission_work);
                queue_work(vhost_scsi_workqueue, &cmd->work);
-       }
+       } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
 out:
        mutex_unlock(&vq->mutex);
 }
@@ -1398,7 +1404,8 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
                vqs[i] = &vs->vqs[i].vq;
                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
        }
-       vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
+       vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
+                      VHOST_SCSI_WEIGHT, 0);
 
        vhost_scsi_init_inflight(vs, NULL);
 
index 40589850eb33c83c06d9211fa86aa95af655dd9e..a9be2d8e98df74c021c90bd358fa0a08fc2371c9 100644 (file)
  * Using this limit prevents one virtqueue from starving others. */
 #define VHOST_TEST_WEIGHT 0x80000
 
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * pkts.
+ */
+#define VHOST_TEST_PKT_WEIGHT 256
+
 enum {
        VHOST_TEST_VQ = 0,
        VHOST_TEST_VQ_MAX = 1,
@@ -81,10 +87,8 @@ static void handle_vq(struct vhost_test *n)
                }
                vhost_add_used_and_signal(&n->dev, vq, head, 0);
                total_len += len;
-               if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
-                       vhost_poll_queue(&vq->poll);
+               if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
                        break;
-               }
        }
 
        mutex_unlock(&vq->mutex);
@@ -116,7 +120,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
        dev = &n->dev;
        vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
        n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
-       vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
+       vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
+                      VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
 
        f->private_data = n;
 
index c163bc15976abd8e0de6380d1fcc446a6a8988ee..98b6eb902df9ecdacb6dfc8ecfc19571818b9003 100644 (file)
@@ -413,8 +413,24 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
                vhost_vq_free_iovecs(dev->vqs[i]);
 }
 
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
+                         int pkts, int total_len)
+{
+       struct vhost_dev *dev = vq->dev;
+
+       if ((dev->byte_weight && total_len >= dev->byte_weight) ||
+           pkts >= dev->weight) {
+               vhost_poll_queue(&vq->poll);
+               return true;
+       }
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
+
 void vhost_dev_init(struct vhost_dev *dev,
-                   struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
+                   struct vhost_virtqueue **vqs, int nvqs,
+                   int iov_limit, int weight, int byte_weight)
 {
        struct vhost_virtqueue *vq;
        int i;
@@ -428,6 +444,8 @@ void vhost_dev_init(struct vhost_dev *dev,
        dev->mm = NULL;
        dev->worker = NULL;
        dev->iov_limit = iov_limit;
+       dev->weight = weight;
+       dev->byte_weight = byte_weight;
        init_llist_head(&dev->work_list);
        init_waitqueue_head(&dev->wait);
        INIT_LIST_HEAD(&dev->read_list);
@@ -2055,7 +2073,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
                /* If this is an input descriptor, increment that count. */
                if (access == VHOST_ACCESS_WO) {
                        *in_num += ret;
-                       if (unlikely(log)) {
+                       if (unlikely(log && ret)) {
                                log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
                                log[*log_num].len = vhost32_to_cpu(vq, desc.len);
                                ++*log_num;
@@ -2198,7 +2216,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
                        /* If this is an input descriptor,
                         * increment that count. */
                        *in_num += ret;
-                       if (unlikely(log)) {
+                       if (unlikely(log && ret)) {
                                log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
                                log[*log_num].len = vhost32_to_cpu(vq, desc.len);
                                ++*log_num;
index 9490e7ddb3404891515908cb8e67d74563640ebd..27a78a9b8cc7dc6e21626f1134d34055ca71dfe2 100644 (file)
@@ -171,10 +171,13 @@ struct vhost_dev {
        struct list_head pending_list;
        wait_queue_head_t wait;
        int iov_limit;
+       int weight;
+       int byte_weight;
 };
 
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
-                   int nvqs, int iov_limit);
+                   int nvqs, int iov_limit, int weight, int byte_weight);
 long vhost_dev_set_owner(struct vhost_dev *dev);
 bool vhost_dev_has_owner(struct vhost_dev *dev);
 long vhost_dev_check_owner(struct vhost_dev *);
index e440f87ae1d60630ae195d23e6d0b2fac67cbd05..bab495d73195f985ef30b26c36f5ddb0b33a5ec3 100644 (file)
 #include "vhost.h"
 
 #define VHOST_VSOCK_DEFAULT_HOST_CID   2
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_VSOCK_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * small pkts.
+ */
+#define VHOST_VSOCK_PKT_WEIGHT 256
 
 enum {
        VHOST_VSOCK_FEATURES = VHOST_FEATURES,
@@ -78,6 +86,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
                            struct vhost_virtqueue *vq)
 {
        struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+       int pkts = 0, total_len = 0;
        bool added = false;
        bool restart_tx = false;
 
@@ -89,7 +98,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
        /* Avoid further vmexits, we're already processing the virtqueue */
        vhost_disable_notify(&vsock->dev, vq);
 
-       for (;;) {
+       do {
                struct virtio_vsock_pkt *pkt;
                struct iov_iter iov_iter;
                unsigned out, in;
@@ -174,8 +183,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
                 */
                virtio_transport_deliver_tap_pkt(pkt);
 
+               total_len += pkt->len;
                virtio_transport_free_pkt(pkt);
-       }
+       } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
        if (added)
                vhost_signal(&vsock->dev, vq);
 
@@ -350,7 +360,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
        struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
                                                 dev);
        struct virtio_vsock_pkt *pkt;
-       int head;
+       int head, pkts = 0, total_len = 0;
        unsigned int out, in;
        bool added = false;
 
@@ -360,7 +370,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
                goto out;
 
        vhost_disable_notify(&vsock->dev, vq);
-       for (;;) {
+       do {
                u32 len;
 
                if (!vhost_vsock_more_replies(vsock)) {
@@ -401,9 +411,11 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
                else
                        virtio_transport_free_pkt(pkt);
 
-               vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
+               len += sizeof(pkt->hdr);
+               vhost_add_used(vq, head, len);
+               total_len += len;
                added = true;
-       }
+       } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
 
 no_more_replies:
        if (added)
@@ -531,7 +543,9 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
        vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
        vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
 
-       vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
+       vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
+                      UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
+                      VHOST_VSOCK_WEIGHT);
 
        file->private_data = vsock;
        spin_lock_init(&vsock->send_pkt_list_lock);
index 9f39f0c360e0c00affa5837213b3db76c7ef9742..cc1006375cacb6289116cdf2975ac932e09c47b9 100644 (file)
@@ -122,28 +122,13 @@ static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si)
  */
 static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
 {
-       static const int default_resolutions[][2] = {
-               {  800,  600 },
-               { 1024,  768 },
-               { 1280, 1024 },
-       };
-       u32 i, right_margin;
-
-       for (i = 0; i < ARRAY_SIZE(default_resolutions); i++) {
-               if (default_resolutions[i][0] == si->lfb_width &&
-                   default_resolutions[i][1] == si->lfb_height)
-                       break;
-       }
-       /* If not a default resolution used for textmode, this should be fine */
-       if (i >= ARRAY_SIZE(default_resolutions))
-               return true;
-
-       /* If the right margin is 5 times smaller then the left one, reject */
-       right_margin = si->lfb_width - (bgrt_tab.image_offset_x + bmp_width);
-       if (right_margin < (bgrt_tab.image_offset_x / 5))
-               return false;
+       /*
+        * All x86 firmwares horizontally center the image (the yoffset
+        * calculations differ between boards, but xoffset is predictable).
+        */
+       u32 expected_xoffset = (si->lfb_width - bmp_width) / 2;
 
-       return true;
+       return bgrt_tab.image_offset_x == expected_xoffset;
 }
 #else
 static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
index 6439231f2db22ec13a227ed009598d58cadd737f..da565f39c9b066aa38bcf21d1aabb9fa042d0c1d 100644 (file)
@@ -433,7 +433,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
        if (ret < 0)
                return ret;
 
-       ret = ssd1307fb_write_cmd(par->client, 0x0);
+       ret = ssd1307fb_write_cmd(par->client, par->page_offset);
        if (ret < 0)
                return ret;
 
index b165c46aca741b6d10c036f0f761e0faad2fbbd6..001bfee950e92430dd9d85a9afb52ed2cd89de0a 100644 (file)
@@ -481,6 +481,14 @@ config DAVINCI_WATCHDOG
          NOTE: once enabled, this timer cannot be disabled.
          Say N if you are unsure.
 
+config K3_RTI_WATCHDOG
+       tristate "Texas Instruments K3 RTI watchdog"
+       depends on ARCH_K3 || COMPILE_TEST
+       select WATCHDOG_CORE
+       help
+         Say Y here if you want to include support for the K3 watchdog
+         timer (RTI module) available in the K3 generation of processors.
+
 config ORION_WATCHDOG
        tristate "Orion watchdog"
        depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || (COMPILE_TEST && !ARCH_EBSA110)
index bf92e7bf9ce082200e159953d0ca8eb4403a3a71..f3fe8f43a00a4de6609dc0ef67a9258c31c5e22c 100644 (file)
@@ -57,6 +57,7 @@ obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
 obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
 obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
 obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
+obj-$(CONFIG_K3_RTI_WATCHDOG) += rti_wdt.o
 obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
 obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o
 obj-$(CONFIG_RN5T618_WATCHDOG) += rn5t618_wdt.o
index 1abe4d021fd27171bae9fb7a289c72916f041486..ffde179a9bb2c54792d2e1894a0f4abb66e43c52 100644 (file)
@@ -38,6 +38,7 @@ static const struct aspeed_wdt_config ast2500_config = {
 static const struct of_device_id aspeed_wdt_of_table[] = {
        { .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
        { .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
+       { .compatible = "aspeed,ast2600-wdt", .data = &ast2500_config },
        { },
 };
 MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
@@ -264,7 +265,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
                set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
        }
 
-       if (of_device_is_compatible(np, "aspeed,ast2500-wdt")) {
+       if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
+               (of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
                u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
 
                reg &= config->ext_pulse_width_mask;
index ed05514cc2dce77f2ce46cd8d941b6b20f5f1be3..e6c27b71b136d9e6852afb6238d5f6d04692a07f 100644 (file)
@@ -249,6 +249,7 @@ module_param(nowayout, bool, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
                                __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
+MODULE_ALIAS("platform:bcm2835-wdt");
 MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
 MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer");
 MODULE_LICENSE("GPL");
index 7e7bdcbbc741b395da7a5ecc97a5d5878dfe521e..9f3123b045364ccd491cd05d4e3e453bc976cc28 100644 (file)
@@ -55,7 +55,7 @@
 
 #define IMX2_WDT_WMCR          0x08            /* Misc Register */
 
-#define IMX2_WDT_MAX_TIME      128
+#define IMX2_WDT_MAX_TIME      128U
 #define IMX2_WDT_DEFAULT_TIME  60              /* in seconds */
 
 #define WDOG_SEC_TO_COUNT(s)   ((s * 2 - 1) << 8)
@@ -180,7 +180,7 @@ static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
 {
        unsigned int actual;
 
-       actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
+       actual = min(new_timeout, IMX2_WDT_MAX_TIME);
        __imx2_wdt_set_timeout(wdog, actual);
        wdog->timeout = new_timeout;
        return 0;
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
new file mode 100644 (file)
index 0000000..d868d28
--- /dev/null
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Watchdog driver for the K3 RTI module
+ *
+ * (c) Copyright 2019 Texas Instruments Inc.
+ * All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+
+#define MODULE_NAME "rti-wdt"
+#define DEFAULT_HEARTBEAT 60
+#define MAX_HEARTBEAT     6000 /* really the max margin is 264/27MHz*/
+
+/* Timer register set definition */
+#define RTIDWDCTRL     0x90
+#define RTIDWDPRLD     0x94
+#define RTIWDSTATUS    0x98
+#define RTIWDKEY       0x9c
+#define RTIDWDCNTR     0xa0
+#define RTIWWDRXCTRL   0xa4
+#define RTIWWDSIZECTRL 0xa8
+
+#define RTIWWDRX_NMI   0xa
+
+#define RTIWWDSIZE_50P 0x50
+
+#define WDENABLE_KEY   0xa98559da
+
+#define WDKEY_SEQ0             0xe51a
+#define WDKEY_SEQ1             0xa35c
+
+#define WDT_PRELOAD_SHIFT      13
+
+#define WDT_PRELOAD_MAX                0xfff
+
+#define DWDST                  BIT(1)
+
+static int heartbeat;
+
+/*
+ * struct to hold data for each WDT device
+ * @base - base io address of WD device
+ * @clk - source clock of WDT
+ * @wdd - hold watchdog device as is in WDT core
+ */
+struct rti_wdt_device {
+       void __iomem            *base;
+       struct clk              *clk;
+       struct watchdog_device  wdd;
+};
+
+static int rti_wdt_start(struct watchdog_device *wdd)
+{
+       u32 timer_margin;
+       unsigned long freq;
+       struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
+
+       freq = clk_get_rate(wdt->clk);
+
+       /* set timeout period */
+       timer_margin = (u64)wdd->timeout * freq;
+       timer_margin >>= WDT_PRELOAD_SHIFT;
+       if (timer_margin > WDT_PRELOAD_MAX)
+               timer_margin = WDT_PRELOAD_MAX;
+       writel_relaxed(timer_margin, wdt->base + RTIDWDPRLD);
+
+       /* Set min heartbeat to 1.1x window size */
+       wdd->min_hw_heartbeat_ms = 11 * wdd->timeout * 1000 / 20;
+
+       /* Generate NMI when wdt expires */
+       writel_relaxed(RTIWWDRX_NMI, wdt->base + RTIWWDRXCTRL);
+
+       /* Window size 50% */
+       writel_relaxed(RTIWWDSIZE_50P, wdt->base + RTIWWDSIZECTRL);
+
+       readl_relaxed(wdt->base + RTIWWDSIZECTRL);
+
+       /* enable watchdog */
+       writel_relaxed(WDENABLE_KEY, wdt->base + RTIDWDCTRL);
+       return 0;
+}
+
+static int rti_wdt_ping(struct watchdog_device *wdd)
+{
+       struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
+
+       /* put watchdog in service state */
+       writel_relaxed(WDKEY_SEQ0, wdt->base + RTIWDKEY);
+       /* put watchdog in active state */
+       writel_relaxed(WDKEY_SEQ1, wdt->base + RTIWDKEY);
+
+       if (readl_relaxed(wdt->base + RTIWDSTATUS))
+               WARN_ON_ONCE(1);
+
+       return 0;
+}
+
+static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+       u64 timer_counter;
+       unsigned long freq;
+       u32 val;
+       struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
+
+       /* if timeout has occurred then return 0 */
+       val = readl_relaxed(wdt->base + RTIWDSTATUS);
+       if (val & DWDST)
+               return 0;
+
+       freq = clk_get_rate(wdt->clk);
+       if (!freq)
+               return 0;
+
+       timer_counter = readl_relaxed(wdt->base + RTIDWDCNTR);
+
+       do_div(timer_counter, freq);
+
+       return timer_counter;
+}
+
+static const struct watchdog_info rti_wdt_info = {
+       .options = WDIOF_KEEPALIVEPING,
+       .identity = "K3 RTI Watchdog",
+};
+
+static const struct watchdog_ops rti_wdt_ops = {
+       .owner          = THIS_MODULE,
+       .start          = rti_wdt_start,
+       .ping           = rti_wdt_ping,
+       .get_timeleft   = rti_wdt_get_timeleft,
+};
+
+static int rti_wdt_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+       struct device *dev = &pdev->dev;
+       struct resource *wdt_mem;
+       struct watchdog_device *wdd;
+       struct rti_wdt_device *wdt;
+
+       wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+       if (!wdt)
+               return -ENOMEM;
+
+       wdt->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(wdt->clk)) {
+               if (PTR_ERR(wdt->clk) != -EPROBE_DEFER)
+                       dev_err(dev, "failed to get clock\n");
+               return PTR_ERR(wdt->clk);
+       }
+
+       pm_runtime_enable(dev);
+       ret = pm_runtime_get_sync(dev);
+       if (ret) {
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "runtime pm failed\n");
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, wdt);
+
+       wdd = &wdt->wdd;
+       wdd->info = &rti_wdt_info;
+       wdd->ops = &rti_wdt_ops;
+       wdd->min_timeout = 1;
+       /* Set min heartbeat to 1.1x window size */
+       wdd->min_hw_heartbeat_ms = 11 * DEFAULT_HEARTBEAT * 1000 / 20;
+       wdd->max_hw_heartbeat_ms = MAX_HEARTBEAT;
+       wdd->timeout = DEFAULT_HEARTBEAT;
+       wdd->parent = dev;
+
+       set_bit(WDOG_RESET_KEEPALIVE, &wdd->status);
+
+       watchdog_init_timeout(wdd, heartbeat, dev);
+
+       dev_info(dev, "heartbeat %d sec\n", wdd->timeout);
+
+       watchdog_set_drvdata(wdd, wdt);
+       watchdog_set_nowayout(wdd, 1);
+       watchdog_set_restart_priority(wdd, 128);
+
+       wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       wdt->base = devm_ioremap_resource(dev, wdt_mem);
+       if (IS_ERR(wdt->base)) {
+               ret = PTR_ERR(wdt->base);
+               goto err_iomap;
+       }
+
+       ret = watchdog_register_device(wdd);
+       if (ret) {
+               dev_err(dev, "cannot register watchdog device\n");
+               goto err_iomap;
+       }
+
+       return 0;
+
+err_iomap:
+       pm_runtime_put_sync(&pdev->dev);
+
+       return ret;
+}
+
+static int rti_wdt_remove(struct platform_device *pdev)
+{
+       struct rti_wdt_device *wdt = platform_get_drvdata(pdev);
+
+       watchdog_unregister_device(&wdt->wdd);
+       pm_runtime_put(&pdev->dev);
+
+       return 0;
+}
+
+static const struct of_device_id rti_wdt_of_match[] = {
+       { .compatible = "ti,rti-wdt", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, rti_wdt_of_match);
+
+static struct platform_driver rti_wdt_driver = {
+       .driver = {
+               .name = "rti-wdt",
+               .of_match_table = rti_wdt_of_match,
+       },
+       .probe = rti_wdt_probe,
+       .remove = rti_wdt_remove,
+};
+
+module_platform_driver(rti_wdt_driver);
+
+MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>");
+MODULE_DESCRIPTION("K3 RTI Watchdog Driver");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+                "Watchdog heartbeat period in seconds from 1 to "
+                __MODULE_STRING(MAX_HEARTBEAT) ", default "
+                __MODULE_STRING(DEFAULT_HEARTBEAT));
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rti-wdt");
index ffbdc4642ea55cbe3dd6e6105e4b47c0d35bb163..42e25a0792b8a594a71ecce176f3927130ff83a1 100644 (file)
@@ -267,6 +267,8 @@ static int watchdog_start(struct watchdog_device *wdd)
                set_bit(WDOG_ACTIVE, &wdd->status);
                wd_data->last_keepalive = started_at;
                watchdog_update_worker(wdd);
+               if (test_bit(WDOG_RESET_KEEPALIVE, &wdd->status))
+                       wd_data->last_hw_keepalive = started_at;
        }
 
        return err;
index 7ab6caef599c59c57e1de6e25b03a073652472c7..d4e8b717ce2b2d1b9068ee686023b4de4fb9741e 100644 (file)
@@ -527,8 +527,15 @@ static void balloon_process(struct work_struct *work)
                                state = reserve_additional_memory();
                }
 
-               if (credit < 0)
-                       state = decrease_reservation(-credit, GFP_BALLOON);
+               if (credit < 0) {
+                       long n_pages;
+
+                       n_pages = min(-credit, si_mem_available());
+                       state = decrease_reservation(n_pages, GFP_BALLOON);
+                       if (state == BP_DONE && n_pages != -credit &&
+                           n_pages < totalreserve_pages)
+                               state = BP_EAGAIN;
+               }
 
                state = update_schedule(state);
 
@@ -567,6 +574,9 @@ static int add_ballooned_pages(int nr_pages)
                }
        }
 
+       if (si_mem_available() < nr_pages)
+               return -ENOMEM;
+
        st = decrease_reservation(nr_pages, GFP_USER);
        if (st != BP_DONE)
                return -ENOMEM;
@@ -696,7 +706,7 @@ static int __init balloon_init(void)
        balloon_stats.schedule_delay = 1;
        balloon_stats.max_schedule_delay = 32;
        balloon_stats.retry_count = 1;
-       balloon_stats.max_retry_count = RETRY_UNLIMITED;
+       balloon_stats.max_retry_count = 4;
 
 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
        set_online_page_callback(&xen_online_page);
index fe1f16351f9411455b15576cdc83cb6db7d98281..8d49b91d92cd33a343eac0cb9810412cfeaab99b 100644 (file)
@@ -1293,7 +1293,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
 }
 
 /* Rebind an evtchn so that it gets delivered to a specific cpu */
-int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
+static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
 {
        struct evtchn_bind_vcpu bind_vcpu;
        int masked;
@@ -1327,7 +1327,6 @@ int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(xen_rebind_evtchn_to_cpu);
 
 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
                            bool force)
@@ -1341,6 +1340,15 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
        return ret;
 }
 
+/* To be called with desc->lock held. */
+int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
+{
+       struct irq_data *d = irq_desc_get_irq_data(desc);
+
+       return set_affinity_irq(d, cpumask_of(tcpu), false);
+}
+EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
+
 static void enable_dynirq(struct irq_data *data)
 {
        int evtchn = evtchn_from_irq(data->irq);
index 6d1a5e58968ffdfb42a71e5f984a52475ea0ca9c..47c70b826a6abf227960030cb09dd3a45fec4746 100644 (file)
@@ -447,7 +447,7 @@ static void evtchn_bind_interdom_next_vcpu(int evtchn)
        this_cpu_write(bind_last_selected_cpu, selected_cpu);
 
        /* unmask expects irqs to be disabled */
-       xen_rebind_evtchn_to_cpu(evtchn, selected_cpu);
+       xen_set_affinity_evtchn(desc, selected_cpu);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 }
 
index 7494dbeb4409c0a6c10fc615f275e552fe3e0807..db58aaa4dc5983ac41b2c6c228440749790f7412 100644 (file)
@@ -29,6 +29,8 @@
 #include "../pci/pci.h"
 #ifdef CONFIG_PCI_MMCONFIG
 #include <asm/pci_x86.h>
+
+static int xen_mcfg_late(void);
 #endif
 
 static bool __read_mostly pci_seg_supported = true;
@@ -40,7 +42,18 @@ static int xen_add_device(struct device *dev)
 #ifdef CONFIG_PCI_IOV
        struct pci_dev *physfn = pci_dev->physfn;
 #endif
-
+#ifdef CONFIG_PCI_MMCONFIG
+       static bool pci_mcfg_reserved = false;
+       /*
+        * Reserve MCFG areas in Xen on first invocation due to this being
+        * potentially called from inside of acpi_init immediately after
+        * MCFG table has been finally parsed.
+        */
+       if (!pci_mcfg_reserved) {
+               xen_mcfg_late();
+               pci_mcfg_reserved = true;
+       }
+#endif
        if (pci_seg_supported) {
                struct {
                        struct physdev_pci_device_add add;
@@ -213,7 +226,7 @@ static int __init register_xen_pci_notifier(void)
 arch_initcall(register_xen_pci_notifier);
 
 #ifdef CONFIG_PCI_MMCONFIG
-static int __init xen_mcfg_late(void)
+static int xen_mcfg_late(void)
 {
        struct pci_mmcfg_region *cfg;
        int rc;
@@ -252,8 +265,4 @@ static int __init xen_mcfg_late(void)
        }
        return 0;
 }
-/*
- * Needs to be done after acpi_init which are subsys_initcall.
- */
-subsys_initcall_sync(xen_mcfg_late);
 #endif
index aa081f8067283bece0384cfb01d4bf5d1d2aa6cc..3d9997595d900fb2eaeaf6325494f8ed2d02a39b 100644 (file)
@@ -357,8 +357,8 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
        /* Convert the size to actually allocated. */
        size = 1UL << (order + XEN_PAGE_SHIFT);
 
-       if (((dev_addr + size - 1 <= dma_mask)) ||
-           range_straddles_page_boundary(phys, size))
+       if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
+                    range_straddles_page_boundary(phys, size)))
                xen_destroy_contiguous_region(phys, order);
 
        xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
index 73427d8e01161ae55fa561599927bb68536bdbd0..e5694133ebe57f37950b134c26c3fbc2a231a221 100644 (file)
@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
 {
        int err;
        u16 old_value;
-       pci_power_t new_state, old_state;
+       pci_power_t new_state;
 
        err = pci_read_config_word(dev, offset, &old_value);
        if (err)
                goto out;
 
-       old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
        new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
 
        new_value &= PM_OK_BITS;
index 39c63152a35800665ca5deadb9656b13fe90088c..454c6826abdb417eb9e47534f808987a0f54cc57 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/miscdevice.h>
+#include <linux/workqueue.h>
 
 #include <xen/xenbus.h>
 #include <xen/xen.h>
@@ -116,6 +117,8 @@ struct xenbus_file_priv {
        wait_queue_head_t read_waitq;
 
        struct kref kref;
+
+       struct work_struct wq;
 };
 
 /* Read out any raw xenbus messages queued up. */
@@ -300,14 +303,14 @@ static void watch_fired(struct xenbus_watch *watch,
        mutex_unlock(&adap->dev_data->reply_mutex);
 }
 
-static void xenbus_file_free(struct kref *kref)
+static void xenbus_worker(struct work_struct *wq)
 {
        struct xenbus_file_priv *u;
        struct xenbus_transaction_holder *trans, *tmp;
        struct watch_adapter *watch, *tmp_watch;
        struct read_buffer *rb, *tmp_rb;
 
-       u = container_of(kref, struct xenbus_file_priv, kref);
+       u = container_of(wq, struct xenbus_file_priv, wq);
 
        /*
         * No need for locking here because there are no other users,
@@ -333,6 +336,18 @@ static void xenbus_file_free(struct kref *kref)
        kfree(u);
 }
 
+static void xenbus_file_free(struct kref *kref)
+{
+       struct xenbus_file_priv *u;
+
+       /*
+        * We might be called in xenbus_thread().
+        * Use workqueue to avoid deadlock.
+        */
+       u = container_of(kref, struct xenbus_file_priv, kref);
+       schedule_work(&u->wq);
+}
+
 static struct xenbus_transaction_holder *xenbus_get_transaction(
        struct xenbus_file_priv *u, uint32_t tx_id)
 {
@@ -652,6 +667,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
        INIT_LIST_HEAD(&u->watches);
        INIT_LIST_HEAD(&u->read_buffers);
        init_waitqueue_head(&u->read_waitq);
+       INIT_WORK(&u->wq, xenbus_worker);
 
        mutex_init(&u->reply_mutex);
        mutex_init(&u->msgbuffer_mutex);
index 9eb34701a566c84cd05b4923427c5ca3f73ba12c..a43a8d2436db5de30bf9d852256a4806e894bee2 100644 (file)
@@ -66,6 +66,8 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
        if (!v9ses->cachetag) {
                if (v9fs_random_cachetag(v9ses) < 0) {
                        v9ses->fscache = NULL;
+                       kfree(v9ses->cachetag);
+                       v9ses->cachetag = NULL;
                        return;
                }
        }
index e1cbdfdb7c684fd24fdb6f25ee03f4e253e9ef58..1970693035109f9b20341209954a41f5b4d4496c 100644 (file)
@@ -50,8 +50,9 @@
  * @page: structure to page
  *
  */
-static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
+static int v9fs_fid_readpage(void *data, struct page *page)
 {
+       struct p9_fid *fid = data;
        struct inode *inode = page->mapping->host;
        struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
        struct iov_iter to;
@@ -122,7 +123,8 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
        if (ret == 0)
                return ret;
 
-       ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
+       ret = read_cache_pages(mapping, pages, v9fs_fid_readpage,
+                       filp->private_data);
        p9_debug(P9_DEBUG_VFS, "  = %d\n", ret);
        return ret;
 }
index 05454a7e22dc2e60d1547af3776067d4856a297f..550d0b169d7c289e31275d89a9cd32478f8f54fb 100644 (file)
@@ -528,6 +528,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
        v9inode = V9FS_I(inode);
        mutex_lock(&v9inode->v_mutex);
        if (!v9inode->writeback_fid &&
+           (vma->vm_flags & VM_SHARED) &&
            (vma->vm_flags & VM_WRITE)) {
                /*
                 * clone a fid and add it to writeback_fid
@@ -629,6 +630,8 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
                        (vma->vm_end - vma->vm_start - 1),
        };
 
+       if (!(vma->vm_flags & VM_SHARED))
+               return;
 
        p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
 
index 7e099a7a4eb1e50bc9914e73432ecc751d0b21ed..4dc15b26348945cf308301a1c190056b976630ba 100644 (file)
@@ -369,6 +369,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
        struct buffer_head *bh;
        struct object_info root_obj;
        unsigned char *b_data;
+       unsigned int blocksize;
        struct adfs_sb_info *asb;
        struct inode *root;
        int ret = -EINVAL;
@@ -420,8 +421,10 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
                goto error_free_bh;
        }
 
+       blocksize = 1 << dr->log2secsize;
        brelse(bh);
-       if (sb_set_blocksize(sb, 1 << dr->log2secsize)) {
+
+       if (sb_set_blocksize(sb, blocksize)) {
                bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
                if (!bh) {
                        adfs_error(sb, "couldn't read superblock on "
index 5f261fbf2182b22a47fc93b7c6fee35f113e0097..4ad70125029988ae29ad81dc7bf0022f57a8c2b8 100644 (file)
@@ -276,9 +276,9 @@ static void afs_break_one_callback(struct afs_server *server,
                        struct afs_super_info *as = AFS_FS_S(cbi->sb);
                        struct afs_volume *volume = as->volume;
 
-                       write_lock(&volume->cb_break_lock);
+                       write_lock(&volume->cb_v_break_lock);
                        volume->cb_v_break++;
-                       write_unlock(&volume->cb_break_lock);
+                       write_unlock(&volume->cb_v_break_lock);
                } else {
                        data.volume = NULL;
                        data.fid = *fid;
index 6127f0fcd62c4e376bd2554c1003aedb40aab471..ee07162d35c7a2b260dacbf6e6897e5230335dd9 100644 (file)
@@ -76,6 +76,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
                        cell = rcu_dereference_raw(net->ws_cell);
                        if (cell) {
                                afs_get_cell(cell);
+                               ret = 0;
                                break;
                        }
                        ret = -EDESTADDRREQ;
@@ -110,6 +111,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
 
        done_seqretry(&net->cells_lock, seq);
 
+       if (ret != 0 && cell)
+               afs_put_cell(net, cell);
+
        return ret == 0 ? cell : ERR_PTR(ret);
 }
 
index 9e51d6fe7e8f975f34f877217a28a8e99bcfa5e4..40c6860d4c63226304cb633c9181970ec389ac23 100644 (file)
@@ -423,18 +423,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
        struct afs_call *call = container_of(work, struct afs_call, work);
        struct afs_uuid *r = call->request;
 
-       struct {
-               __be32  match;
-       } reply;
-
        _enter("");
 
        if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
-               reply.match = htonl(0);
+               afs_send_empty_reply(call);
        else
-               reply.match = htonl(1);
+               rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+                                       1, 1, "K-1");
 
-       afs_send_simple_reply(call, &reply, sizeof(reply));
        afs_put_call(call);
        _leave("");
 }
index 855bf2b79fed4117559f6f011cacd3b43f74b927..54e7f6f1405e298db38aafae45a499b41519dfd4 100644 (file)
@@ -937,7 +937,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        dir_version = (long)dir->status.data_version;
        de_version = (long)dentry->d_fsdata;
        if (de_version == dir_version)
-               goto out_valid;
+               goto out_valid_noupdate;
 
        dir_version = (long)dir->invalid_before;
        if (de_version - dir_version >= 0)
@@ -1001,6 +1001,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
 
 out_valid:
        dentry->d_fsdata = (void *)dir_version;
+out_valid_noupdate:
        dput(parent);
        key_put(key);
        _leave(" = 1 [valid]");
index 7d4f26198573d7f6a4dffb7ff4a82ee0f8fbb573..843d3b970b845050701c02febcc585f00321fd95 100644 (file)
@@ -193,11 +193,13 @@ void afs_put_read(struct afs_read *req)
        int i;
 
        if (refcount_dec_and_test(&req->usage)) {
-               for (i = 0; i < req->nr_pages; i++)
-                       if (req->pages[i])
-                               put_page(req->pages[i]);
-               if (req->pages != req->array)
-                       kfree(req->pages);
+               if (req->pages) {
+                       for (i = 0; i < req->nr_pages; i++)
+                               if (req->pages[i])
+                                       put_page(req->pages[i]);
+                       if (req->pages != req->array)
+                               kfree(req->pages);
+               }
                kfree(req);
        }
 }
index 34c02fdcc25f107ccceca1ca26a304eb37f6e247..aea19614c08222f787705a8d60cdcbbd515ef4ff 100644 (file)
@@ -477,7 +477,7 @@ struct afs_volume {
        unsigned int            servers_seq;    /* Incremented each time ->servers changes */
 
        unsigned                cb_v_break;     /* Break-everything counter. */
-       rwlock_t                cb_break_lock;
+       rwlock_t                cb_v_break_lock;
 
        afs_voltype_t           type;           /* type of volume */
        short                   error;
index c3b740813fc719850ca188f892d4f653352e8600..c7dd47eaff29d5f4dfb0be09ccd69289a9fdc7bb 100644 (file)
@@ -60,23 +60,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
                struct afs_uuid__xdr *xdr;
                struct afs_uuid *uuid;
                int j;
+               int n = entry->nr_servers;
 
                tmp = ntohl(uvldb->serverFlags[i]);
                if (tmp & AFS_VLSF_DONTUSE ||
                    (new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
                        continue;
                if (tmp & AFS_VLSF_RWVOL) {
-                       entry->fs_mask[i] |= AFS_VOL_VTM_RW;
+                       entry->fs_mask[n] |= AFS_VOL_VTM_RW;
                        if (vlflags & AFS_VLF_BACKEXISTS)
-                               entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
+                               entry->fs_mask[n] |= AFS_VOL_VTM_BAK;
                }
                if (tmp & AFS_VLSF_ROVOL)
-                       entry->fs_mask[i] |= AFS_VOL_VTM_RO;
-               if (!entry->fs_mask[i])
+                       entry->fs_mask[n] |= AFS_VOL_VTM_RO;
+               if (!entry->fs_mask[n])
                        continue;
 
                xdr = &uvldb->serverNumber[i];
-               uuid = (struct afs_uuid *)&entry->fs_server[i];
+               uuid = (struct afs_uuid *)&entry->fs_server[n];
                uuid->time_low                  = xdr->time_low;
                uuid->time_mid                  = htons(ntohl(xdr->time_mid));
                uuid->time_hi_and_version       = htons(ntohl(xdr->time_hi_and_version));
index 3037bd01f617d13b1589d823cb6bdc112014bdca..5ec186ec56519ce677e0cddda50f8f59014c3fd6 100644 (file)
@@ -47,6 +47,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_mount_params *params,
        atomic_set(&volume->usage, 1);
        INIT_LIST_HEAD(&volume->proc_link);
        rwlock_init(&volume->servers_lock);
+       rwlock_init(&volume->cb_v_break_lock);
        memcpy(volume->name, vldb->name, vldb->name_len + 1);
 
        slist = afs_alloc_server_list(params->cell, params->key, vldb, type_mask);
index efae2fb0930aaa96c10595a462616d8caf418a9e..e7fd0b5b9234e64fb7d2e3b93289be83d7fd3d10 100644 (file)
@@ -1137,6 +1137,18 @@ static int load_elf_binary(struct linux_binprm *bprm)
        current->mm->start_stack = bprm->p;
 
        if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
+               /*
+                * For architectures with ELF randomization, when executing
+                * a loader directly (i.e. no interpreter listed in ELF
+                * headers), move the brk area out of the mmap region
+                * (since it grows up, and may collide early with the stack
+                * growing down), and into the unused ELF_ET_DYN_BASE region.
+                */
+               if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
+                   loc->elf_ex.e_type == ET_DYN && !interpreter)
+                       current->mm->brk = current->mm->start_brk =
+                               ELF_ET_DYN_BASE;
+
                current->mm->brk = current->mm->start_brk =
                        arch_randomize_brk(current->mm);
 #ifdef compat_brk_randomized
index ac6c383d63140bc395f4e6beef7037d803036259..19855659f65030d57d1e56fb5f93341ab4efe477 100644 (file)
@@ -1485,7 +1485,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
                goto out;
        }
 
-       trans = btrfs_attach_transaction(root);
+       trans = btrfs_join_transaction_nostart(root);
        if (IS_ERR(trans)) {
                if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
                        ret = PTR_ERR(trans);
index 9bfa66592aa7b2a16722858ce3887848c19e5256..c71e534ca7ef6fbdff743400a4b06146e9774058 100644 (file)
@@ -42,6 +42,22 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type)
        return NULL;
 }
 
+bool btrfs_compress_is_valid_type(const char *str, size_t len)
+{
+       int i;
+
+       for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
+               size_t comp_len = strlen(btrfs_compress_types[i]);
+
+               if (len < comp_len)
+                       continue;
+
+               if (!strncmp(btrfs_compress_types[i], str, comp_len))
+                       return true;
+       }
+       return false;
+}
+
 static int btrfs_decompress_bio(struct compressed_bio *cb);
 
 static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
index ddda9b80bf2044edc3ae210bb401ce8e728fdbe2..f97d90a1fa53124f2d50377e128c1037ddcd3d7c 100644 (file)
@@ -127,6 +127,7 @@ extern const struct btrfs_compress_op btrfs_lzo_compress;
 extern const struct btrfs_compress_op btrfs_zstd_compress;
 
 const char* btrfs_compress_type2str(enum btrfs_compression_type type);
+bool btrfs_compress_is_valid_type(const char *str, size_t len);
 
 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
 
index 79ac1ebabaf78488da2b812455a064f9ea035479..9fd383285f0eab9b6f3334aa7ffc635880be11a9 100644 (file)
@@ -1374,6 +1374,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
        struct tree_mod_elem *tm;
        struct extent_buffer *eb = NULL;
        struct extent_buffer *eb_root;
+       u64 eb_root_owner = 0;
        struct extent_buffer *old;
        struct tree_mod_root *old_root = NULL;
        u64 old_generation = 0;
@@ -1411,6 +1412,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
                        free_extent_buffer(old);
                }
        } else if (old_root) {
+               eb_root_owner = btrfs_header_owner(eb_root);
                btrfs_tree_read_unlock(eb_root);
                free_extent_buffer(eb_root);
                eb = alloc_dummy_extent_buffer(fs_info, logical);
@@ -1428,7 +1430,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
        if (old_root) {
                btrfs_set_header_bytenr(eb, eb->start);
                btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
-               btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
+               btrfs_set_header_owner(eb, eb_root_owner);
                btrfs_set_header_level(eb, old_root->level);
                btrfs_set_header_generation(eb, old_generation);
        }
@@ -5514,6 +5516,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
        advance_left = advance_right = 0;
 
        while (1) {
+               cond_resched();
                if (advance_left && !left_end_reached) {
                        ret = tree_advance(fs_info, left_path, &left_level,
                                        left_root_level,
index 82682da5a40dd71f1a74d7f5c55cf89c77c05306..faca485ccd8f479f04718202fd16194afe751877 100644 (file)
@@ -39,6 +39,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep;
 extern struct kmem_cache *btrfs_bit_radix_cachep;
 extern struct kmem_cache *btrfs_path_cachep;
 extern struct kmem_cache *btrfs_free_space_cachep;
+extern struct kmem_cache *btrfs_free_space_bitmap_cachep;
 struct btrfs_ordered_sum;
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
@@ -3200,6 +3201,9 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
                                    struct btrfs_trans_handle *trans, int mode,
                                    u64 start, u64 num_bytes, u64 min_size,
                                    loff_t actual_len, u64 *alloc_hint);
+int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
+               u64 start, u64 end, int *page_started, unsigned long *nr_written,
+               struct writeback_control *wbc);
 extern const struct dentry_operations btrfs_dentry_operations;
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 void btrfs_test_inode_set_ops(struct inode *inode);
index 0cc800d22a0814d7b41c1d3f2b2eb6666747878e..72c745682996f0146d45356d913f14bf05245fb1 100644 (file)
@@ -7367,6 +7367,14 @@ search:
                         */
                        if ((flags & extra) && !(block_group->flags & extra))
                                goto loop;
+
+                       /*
+                        * This block group has different flags than we want.
+                        * It's possible that we have MIXED_GROUP flag but no
+                        * block group is mixed.  Just skip such block group.
+                        */
+                       btrfs_release_block_group(block_group, delalloc);
+                       continue;
                }
 
 have_block_group:
@@ -9992,6 +10000,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
                        btrfs_err(info,
 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
                                  cache->key.objectid);
+                       btrfs_put_block_group(cache);
                        ret = -EINVAL;
                        goto error;
                }
@@ -10478,22 +10487,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        }
        spin_unlock(&block_group->lock);
 
-       if (remove_em) {
-               struct extent_map_tree *em_tree;
-
-               em_tree = &fs_info->mapping_tree.map_tree;
-               write_lock(&em_tree->lock);
-               /*
-                * The em might be in the pending_chunks list, so make sure the
-                * chunk mutex is locked, since remove_extent_mapping() will
-                * delete us from that list.
-                */
-               remove_extent_mapping(em_tree, em);
-               write_unlock(&em_tree->lock);
-               /* once for the tree */
-               free_extent_map(em);
-       }
-
        mutex_unlock(&fs_info->chunk_mutex);
 
        ret = remove_block_group_free_space(trans, block_group);
@@ -10510,6 +10503,24 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                goto out;
 
        ret = btrfs_del_item(trans, root, path);
+       if (ret)
+               goto out;
+
+       if (remove_em) {
+               struct extent_map_tree *em_tree;
+
+               em_tree = &fs_info->mapping_tree.map_tree;
+               write_lock(&em_tree->lock);
+               /*
+                * The em might be in the pending_chunks list, so make sure the
+                * chunk mutex is locked, since remove_extent_mapping() will
+                * delete us from that list.
+                */
+               remove_extent_mapping(em_tree, em);
+               write_unlock(&em_tree->lock);
+               /* once for the tree */
+               free_extent_map(em);
+       }
 out:
        btrfs_free_path(path);
        return ret;
index 90b0a6eff535067ddcb4a469a09e592014d8d46f..cb598eb4f3bd1ce7854cd24c0a755592cb15590b 100644 (file)
@@ -3199,7 +3199,7 @@ static void update_nr_written(struct writeback_control *wbc,
 /*
  * helper for __extent_writepage, doing all of the delayed allocation setup.
  *
- * This returns 1 if our fill_delalloc function did all the work required
+ * This returns 1 if btrfs_run_delalloc_range function did all the work required
  * to write the page (copy into inline extent).  In this case the IO has
  * been started and the page is already unlocked.
  *
@@ -3220,7 +3220,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
        int ret;
        int page_started = 0;
 
-       if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
+       if (epd->extent_locked)
                return 0;
 
        while (delalloc_end < page_end) {
@@ -3233,18 +3233,16 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
                        delalloc_start = delalloc_end + 1;
                        continue;
                }
-               ret = tree->ops->fill_delalloc(inode, page,
-                                              delalloc_start,
-                                              delalloc_end,
-                                              &page_started,
-                                              nr_written, wbc);
+               ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
+                               delalloc_end, &page_started, nr_written, wbc);
                /* File system has been set read-only */
                if (ret) {
                        SetPageError(page);
-                       /* fill_delalloc should be return < 0 for error
-                        * but just in case, we use > 0 here meaning the
-                        * IO is started, so we don't want to return > 0
-                        * unless things are going well.
+                       /*
+                        * btrfs_run_delalloc_range should return < 0 for error
+                        * but just in case, we use > 0 here meaning the IO is
+                        * started, so we don't want to return > 0 unless
+                        * things are going well.
                         */
                        ret = ret < 0 ? ret : -EIO;
                        goto done;
index b4d03e677e1d79885c339fcd1c9480e6066d3ef1..ed27becd963c587a1d5849c4918d3958cd3822df 100644 (file)
@@ -106,11 +106,6 @@ struct extent_io_ops {
        /*
         * Optional hooks, called if the pointer is not NULL
         */
-       int (*fill_delalloc)(void *private_data, struct page *locked_page,
-                            u64 start, u64 end, int *page_started,
-                            unsigned long *nr_written,
-                            struct writeback_control *wbc);
-
        int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
        void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
                                      struct extent_state *state, int uptodate);
index e24c0a69ff5d43c72ee3e183c230b2adad639331..4870440d6424a826e4e376123b155b641270a60e 100644 (file)
@@ -2056,25 +2056,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        struct btrfs_trans_handle *trans;
        struct btrfs_log_ctx ctx;
        int ret = 0, err;
-       u64 len;
 
-       /*
-        * If the inode needs a full sync, make sure we use a full range to
-        * avoid log tree corruption, due to hole detection racing with ordered
-        * extent completion for adjacent ranges, and assertion failures during
-        * hole detection.
-        */
-       if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
-                    &BTRFS_I(inode)->runtime_flags)) {
-               start = 0;
-               end = LLONG_MAX;
-       }
-
-       /*
-        * The range length can be represented by u64, we have to do the typecasts
-        * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
-        */
-       len = (u64)end - (u64)start + 1;
        trace_btrfs_sync_file(file, datasync);
 
        btrfs_init_log_ctx(&ctx, inode);
@@ -2100,6 +2082,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        atomic_inc(&root->log_batch);
 
+       /*
+        * If the inode needs a full sync, make sure we use a full range to
+        * avoid log tree corruption, due to hole detection racing with ordered
+        * extent completion for adjacent ranges, and assertion failures during
+        * hole detection. Do this while holding the inode lock, to avoid races
+        * with other tasks.
+        */
+       if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                    &BTRFS_I(inode)->runtime_flags)) {
+               start = 0;
+               end = LLONG_MAX;
+       }
+
        /*
         * Before we acquired the inode's lock, someone may have dirtied more
         * pages in the target range. We need to make sure that writeback for
@@ -2127,8 +2122,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        /*
         * We have to do this here to avoid the priority inversion of waiting on
         * IO of a lower priority task while holding a transaciton open.
+        *
+        * Also, the range length can be represented by u64, we have to do the
+        * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
         */
-       ret = btrfs_wait_ordered_range(inode, start, len);
+       ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
        if (ret) {
                up_write(&BTRFS_I(inode)->dio_sem);
                inode_unlock(inode);
@@ -2732,6 +2730,11 @@ out_only_mutex:
                 * for detecting, at fsync time, if the inode isn't yet in the
                 * log tree or it's there but not up to date.
                 */
+               struct timespec64 now = current_time(inode);
+
+               inode_inc_iversion(inode);
+               inode->i_mtime = now;
+               inode->i_ctime = now;
                trans = btrfs_start_transaction(root, 1);
                if (IS_ERR(trans)) {
                        err = PTR_ERR(trans);
index 8ecf8c0e5fe65063ce368ccf9eb5de8ba1ebc463..4381e0aba8c015a4c8d525e711fb095351b6c6a9 100644 (file)
@@ -763,7 +763,8 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
                } else {
                        ASSERT(num_bitmaps);
                        num_bitmaps--;
-                       e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
+                       e->bitmap = kmem_cache_zalloc(
+                                       btrfs_free_space_bitmap_cachep, GFP_NOFS);
                        if (!e->bitmap) {
                                kmem_cache_free(
                                        btrfs_free_space_cachep, e);
@@ -1864,7 +1865,7 @@ static void free_bitmap(struct btrfs_free_space_ctl *ctl,
                        struct btrfs_free_space *bitmap_info)
 {
        unlink_free_space(ctl, bitmap_info);
-       kfree(bitmap_info->bitmap);
+       kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
        kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
        ctl->total_bitmaps--;
        ctl->op->recalc_thresholds(ctl);
@@ -2118,7 +2119,8 @@ new_bitmap:
                }
 
                /* allocate the bitmap */
-               info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
+               info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
+                                                GFP_NOFS);
                spin_lock(&ctl->tree_lock);
                if (!info->bitmap) {
                        ret = -ENOMEM;
@@ -2130,7 +2132,8 @@ new_bitmap:
 out:
        if (info) {
                if (info->bitmap)
-                       kfree(info->bitmap);
+                       kmem_cache_free(btrfs_free_space_bitmap_cachep,
+                                       info->bitmap);
                kmem_cache_free(btrfs_free_space_cachep, info);
        }
 
@@ -2786,7 +2789,8 @@ out:
        if (entry->bytes == 0) {
                ctl->free_extents--;
                if (entry->bitmap) {
-                       kfree(entry->bitmap);
+                       kmem_cache_free(btrfs_free_space_bitmap_cachep,
+                                       entry->bitmap);
                        ctl->total_bitmaps--;
                        ctl->op->recalc_thresholds(ctl);
                }
@@ -3594,7 +3598,7 @@ again:
        }
 
        if (!map) {
-               map = kzalloc(PAGE_SIZE, GFP_NOFS);
+               map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
                if (!map) {
                        kmem_cache_free(btrfs_free_space_cachep, info);
                        return -ENOMEM;
@@ -3624,7 +3628,7 @@ again:
        if (info)
                kmem_cache_free(btrfs_free_space_cachep, info);
        if (map)
-               kfree(map);
+               kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
        return 0;
 }
 
index c1cd3fe2b29534938cf2784721616c84e2c6926f..37332f83a3a9650dafc79ac01752c537be0ba87c 100644 (file)
@@ -72,6 +72,7 @@ static struct kmem_cache *btrfs_inode_cachep;
 struct kmem_cache *btrfs_trans_handle_cachep;
 struct kmem_cache *btrfs_path_cachep;
 struct kmem_cache *btrfs_free_space_cachep;
+struct kmem_cache *btrfs_free_space_bitmap_cachep;
 
 #define S_SHIFT 12
 static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
@@ -110,17 +111,17 @@ static void __endio_write_update_ordered(struct inode *inode,
  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
  * to be released, which we want to happen only when finishing the ordered
- * extent (btrfs_finish_ordered_io()). Also note that the caller of the
- * fill_delalloc() callback already does proper cleanup for the first page of
- * the range, that is, it invokes the callback writepage_end_io_hook() for the
- * range of the first page.
+ * extent (btrfs_finish_ordered_io()).
  */
 static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
-                                                const u64 offset,
-                                                const u64 bytes)
+                                                struct page *locked_page,
+                                                u64 offset, u64 bytes)
 {
        unsigned long index = offset >> PAGE_SHIFT;
        unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
+       u64 page_start = page_offset(locked_page);
+       u64 page_end = page_start + PAGE_SIZE - 1;
+
        struct page *page;
 
        while (index <= end_index) {
@@ -131,8 +132,18 @@ static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
                ClearPagePrivate2(page);
                put_page(page);
        }
-       return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
-                                           bytes - PAGE_SIZE, false);
+
+       /*
+        * In case this page belongs to the delalloc range being instantiated
+        * then skip it, since the first page of a range is going to be
+        * properly cleaned up by the caller of run_delalloc_range
+        */
+       if (page_start >= offset && page_end <= (offset + bytes - 1)) {
+               offset += PAGE_SIZE;
+               bytes -= PAGE_SIZE;
+       }
+
+       return __endio_write_update_ordered(inode, offset, bytes, false);
 }
 
 static int btrfs_dirty_inode(struct inode *inode);
@@ -388,10 +399,31 @@ static noinline int add_async_extent(struct async_cow *cow,
        return 0;
 }
 
+/*
+ * Check if the inode has flags compatible with compression
+ */
+static inline bool inode_can_compress(struct inode *inode)
+{
+       if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW ||
+           BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
+               return false;
+       return true;
+}
+
+/*
+ * Check if the inode needs to be submitted to compression, based on mount
+ * options, defragmentation, properties or heuristics.
+ */
 static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 
+       if (!inode_can_compress(inode)) {
+               WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
+                       KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
+                       btrfs_ino(BTRFS_I(inode)));
+               return 0;
+       }
        /* force compress */
        if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
                return 1;
@@ -1578,12 +1610,12 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
 }
 
 /*
- * extent_io.c call back to do delayed allocation processing
+ * Function to process delayed allocation (create CoW) for ranges which are
+ * being touched for the first time.
  */
-static int run_delalloc_range(void *private_data, struct page *locked_page,
-                             u64 start, u64 end, int *page_started,
-                             unsigned long *nr_written,
-                             struct writeback_control *wbc)
+int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
+               u64 start, u64 end, int *page_started, unsigned long *nr_written,
+               struct writeback_control *wbc)
 {
        struct inode *inode = private_data;
        int ret;
@@ -1596,7 +1628,8 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
        } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
                ret = run_delalloc_nocow(inode, locked_page, start, end,
                                         page_started, 0, nr_written);
-       } else if (!inode_need_compress(inode, start, end)) {
+       } else if (!inode_can_compress(inode) ||
+                  !inode_need_compress(inode, start, end)) {
                ret = cow_file_range(inode, locked_page, start, end, end,
                                      page_started, nr_written, 1, NULL);
        } else {
@@ -1607,7 +1640,8 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
                                           write_flags);
        }
        if (ret)
-               btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
+               btrfs_cleanup_ordered_extents(inode, locked_page, start,
+                                             end - start + 1);
        return ret;
 }
 
@@ -9328,6 +9362,7 @@ void __cold btrfs_destroy_cachep(void)
        kmem_cache_destroy(btrfs_trans_handle_cachep);
        kmem_cache_destroy(btrfs_path_cachep);
        kmem_cache_destroy(btrfs_free_space_cachep);
+       kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
 }
 
 int __init btrfs_init_cachep(void)
@@ -9357,6 +9392,12 @@ int __init btrfs_init_cachep(void)
        if (!btrfs_free_space_cachep)
                goto fail;
 
+       btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
+                                                       PAGE_SIZE, PAGE_SIZE,
+                                                       SLAB_RED_ZONE, NULL);
+       if (!btrfs_free_space_bitmap_cachep)
+               goto fail;
+
        return 0;
 fail:
        btrfs_destroy_cachep();
@@ -10576,7 +10617,6 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
        .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
 
        /* optional callbacks */
-       .fill_delalloc = run_delalloc_range,
        .writepage_end_io_hook = btrfs_writepage_end_io_hook,
        .writepage_start_hook = btrfs_writepage_start_hook,
        .set_bit_hook = btrfs_set_bit_hook,
index 61d22a56c0ba4e7d43f0552854f4ac4e82443218..6980a0e13f18eab5eda2f227fc9cfa45c2fcfa1b 100644 (file)
@@ -366,11 +366,7 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
 
 static int prop_compression_validate(const char *value, size_t len)
 {
-       if (!strncmp("lzo", value, 3))
-               return 0;
-       else if (!strncmp("zlib", value, 4))
-               return 0;
-       else if (!strncmp("zstd", value, 4))
+       if (btrfs_compress_is_valid_type(value, len))
                return 0;
 
        return -EINVAL;
index e46e83e876001c5f5547228afd6ebfc62373abd1..3ea2008dcde3eaa6e94a20cf4ba21f1a6035dbdb 100644 (file)
@@ -2249,6 +2249,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
        int ret = 0;
        int i;
        u64 *i_qgroups;
+       bool committing = false;
        struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_root *quota_root;
        struct btrfs_qgroup *srcgroup;
@@ -2256,7 +2257,25 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
        u32 level_size = 0;
        u64 nums;
 
-       mutex_lock(&fs_info->qgroup_ioctl_lock);
+       /*
+        * There are only two callers of this function.
+        *
+        * One in create_subvol() in the ioctl context, which needs to hold
+        * the qgroup_ioctl_lock.
+        *
+        * The other one in create_pending_snapshot() where no other qgroup
+        * code can modify the fs as they all need to either start a new trans
+        * or hold a trans handler, thus we don't need to hold
+        * qgroup_ioctl_lock.
+        * This would avoid long and complex lock chain and make lockdep happy.
+        */
+       spin_lock(&fs_info->trans_lock);
+       if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
+               committing = true;
+       spin_unlock(&fs_info->trans_lock);
+
+       if (!committing)
+               mutex_lock(&fs_info->qgroup_ioctl_lock);
        if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
                goto out;
 
@@ -2420,7 +2439,8 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
 unlock:
        spin_unlock(&fs_info->qgroup_lock);
 out:
-       mutex_unlock(&fs_info->qgroup_ioctl_lock);
+       if (!committing)
+               mutex_unlock(&fs_info->qgroup_ioctl_lock);
        return ret;
 }
 
@@ -2776,9 +2796,6 @@ out:
        btrfs_free_path(path);
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
-       if (!btrfs_fs_closing(fs_info))
-               fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
-
        if (err > 0 &&
            fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
                fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -2794,16 +2811,30 @@ out:
        trans = btrfs_start_transaction(fs_info->quota_root, 1);
        if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
+               trans = NULL;
                btrfs_err(fs_info,
                          "fail to start transaction for status update: %d",
                          err);
-               goto done;
        }
-       ret = update_qgroup_status_item(trans);
-       if (ret < 0) {
-               err = ret;
-               btrfs_err(fs_info, "fail to update qgroup status: %d", err);
+
+       mutex_lock(&fs_info->qgroup_rescan_lock);
+       if (!btrfs_fs_closing(fs_info))
+               fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+       if (trans) {
+               ret = update_qgroup_status_item(trans);
+               if (ret < 0) {
+                       err = ret;
+                       btrfs_err(fs_info, "fail to update qgroup status: %d",
+                                 err);
+               }
        }
+       fs_info->qgroup_rescan_running = false;
+       complete_all(&fs_info->qgroup_rescan_completion);
+       mutex_unlock(&fs_info->qgroup_rescan_lock);
+
+       if (!trans)
+               return;
+
        btrfs_end_transaction(trans);
 
        if (btrfs_fs_closing(fs_info)) {
@@ -2814,12 +2845,6 @@ out:
        } else {
                btrfs_err(fs_info, "qgroup scan failed with %d", err);
        }
-
-done:
-       mutex_lock(&fs_info->qgroup_rescan_lock);
-       fs_info->qgroup_rescan_running = false;
-       mutex_unlock(&fs_info->qgroup_rescan_lock);
-       complete_all(&fs_info->qgroup_rescan_completion);
 }
 
 /*
@@ -3047,6 +3072,9 @@ cleanup:
        while ((unode = ulist_next(&reserved->range_changed, &uiter)))
                clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
                                 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
+       /* Also free data bytes of already reserved one */
+       btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
+                                 orig_reserved, BTRFS_QGROUP_RSV_DATA);
        extent_changeset_release(reserved);
        return ret;
 }
@@ -3091,7 +3119,7 @@ static int qgroup_free_reserved_data(struct inode *inode,
                 * EXTENT_QGROUP_RESERVED, we won't double free.
                 * So not need to rush.
                 */
-               ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
+               ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree,
                                free_start, free_start + free_len - 1,
                                EXTENT_QGROUP_RESERVED, &changeset);
                if (ret < 0)
index e5b9e596bb9216f5b7516f5e883a7c45eb06754b..cd2a5864e103cabed50efdc03ba50ebe100c7a5b 100644 (file)
@@ -511,7 +511,7 @@ static int process_leaf(struct btrfs_root *root,
        struct btrfs_extent_data_ref *dref;
        struct btrfs_shared_data_ref *sref;
        u32 count;
-       int i = 0, tree_block_level = 0, ret;
+       int i = 0, tree_block_level = 0, ret = 0;
        struct btrfs_key key;
        int nritems = btrfs_header_nritems(leaf);
 
index 5d57ed62934555acf88d7c99b5ba340dfed6f571..bccd9dede2af4b0b8c99cf2151e04beb66b6154d 100644 (file)
@@ -3187,6 +3187,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        if (!page) {
                                btrfs_delalloc_release_metadata(BTRFS_I(inode),
                                                        PAGE_SIZE, true);
+                               btrfs_delalloc_release_extents(BTRFS_I(inode),
+                                                       PAGE_SIZE, true);
                                ret = -ENOMEM;
                                goto out;
                        }
index 3be1456b5116b3e80dca1f466f374c4eb46a28cd..916c397704679a77de7052eb3984d2582057d06e 100644 (file)
@@ -322,6 +322,7 @@ static struct full_stripe_lock *insert_full_stripe_lock(
        struct rb_node *parent = NULL;
        struct full_stripe_lock *entry;
        struct full_stripe_lock *ret;
+       unsigned int nofs_flag;
 
        lockdep_assert_held(&locks_root->lock);
 
@@ -339,8 +340,17 @@ static struct full_stripe_lock *insert_full_stripe_lock(
                }
        }
 
-       /* Insert new lock */
+       /*
+        * Insert new lock.
+        *
+        * We must use GFP_NOFS because the scrub task might be waiting for a
+        * worker task executing this function and in turn a transaction commit
+        * might be waiting the scrub task to pause (which needs to wait for all
+        * the worker tasks to complete before pausing).
+        */
+       nofs_flag = memalloc_nofs_save();
        ret = kmalloc(sizeof(*ret), GFP_KERNEL);
+       memalloc_nofs_restore(nofs_flag);
        if (!ret)
                return ERR_PTR(-ENOMEM);
        ret->logical = fstripe_logical;
@@ -568,12 +578,11 @@ static void scrub_put_ctx(struct scrub_ctx *sctx)
                scrub_free_ctx(sctx);
 }
 
-static noinline_for_stack
-struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
+static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
+               struct btrfs_fs_info *fs_info, int is_dev_replace)
 {
        struct scrub_ctx *sctx;
        int             i;
-       struct btrfs_fs_info *fs_info = dev->fs_info;
 
        sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
        if (!sctx)
@@ -582,7 +591,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
        sctx->is_dev_replace = is_dev_replace;
        sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
        sctx->curr = -1;
-       sctx->fs_info = dev->fs_info;
+       sctx->fs_info = fs_info;
+       INIT_LIST_HEAD(&sctx->csum_list);
        for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
                struct scrub_bio *sbio;
 
@@ -607,7 +617,6 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
        atomic_set(&sctx->workers_pending, 0);
        atomic_set(&sctx->cancel_req, 0);
        sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
-       INIT_LIST_HEAD(&sctx->csum_list);
 
        spin_lock_init(&sctx->list_lock);
        spin_lock_init(&sctx->stat_lock);
@@ -1622,8 +1631,19 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
        mutex_lock(&sctx->wr_lock);
 again:
        if (!sctx->wr_curr_bio) {
+               unsigned int nofs_flag;
+
+               /*
+                * We must use GFP_NOFS because the scrub task might be waiting
+                * for a worker task executing this function and in turn a
+                * transaction commit might be waiting the scrub task to pause
+                * (which needs to wait for all the worker tasks to complete
+                * before pausing).
+                */
+               nofs_flag = memalloc_nofs_save();
                sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
                                              GFP_KERNEL);
+               memalloc_nofs_restore(nofs_flag);
                if (!sctx->wr_curr_bio) {
                        mutex_unlock(&sctx->wr_lock);
                        return -ENOMEM;
@@ -3022,8 +3042,7 @@ out:
 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
                                           struct map_lookup *map,
                                           struct btrfs_device *scrub_dev,
-                                          int num, u64 base, u64 length,
-                                          int is_dev_replace)
+                                          int num, u64 base, u64 length)
 {
        struct btrfs_path *path, *ppath;
        struct btrfs_fs_info *fs_info = sctx->fs_info;
@@ -3299,7 +3318,7 @@ again:
                        extent_physical = extent_logical - logical + physical;
                        extent_dev = scrub_dev;
                        extent_mirror_num = mirror_num;
-                       if (is_dev_replace)
+                       if (sctx->is_dev_replace)
                                scrub_remap_extent(fs_info, extent_logical,
                                                   extent_len, &extent_physical,
                                                   &extent_dev,
@@ -3397,8 +3416,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
                                          struct btrfs_device *scrub_dev,
                                          u64 chunk_offset, u64 length,
                                          u64 dev_offset,
-                                         struct btrfs_block_group_cache *cache,
-                                         int is_dev_replace)
+                                         struct btrfs_block_group_cache *cache)
 {
        struct btrfs_fs_info *fs_info = sctx->fs_info;
        struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
@@ -3435,8 +3453,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
                if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
                    map->stripes[i].physical == dev_offset) {
                        ret = scrub_stripe(sctx, map, scrub_dev, i,
-                                          chunk_offset, length,
-                                          is_dev_replace);
+                                          chunk_offset, length);
                        if (ret)
                                goto out;
                }
@@ -3449,8 +3466,7 @@ out:
 
 static noinline_for_stack
 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
-                          struct btrfs_device *scrub_dev, u64 start, u64 end,
-                          int is_dev_replace)
+                          struct btrfs_device *scrub_dev, u64 start, u64 end)
 {
        struct btrfs_dev_extent *dev_extent = NULL;
        struct btrfs_path *path;
@@ -3544,7 +3560,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                 */
                scrub_pause_on(fs_info);
                ret = btrfs_inc_block_group_ro(cache);
-               if (!ret && is_dev_replace) {
+               if (!ret && sctx->is_dev_replace) {
                        /*
                         * If we are doing a device replace wait for any tasks
                         * that started dellaloc right before we set the block
@@ -3609,7 +3625,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                dev_replace->item_needs_writeback = 1;
                btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
                ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
-                                 found_key.offset, cache, is_dev_replace);
+                                 found_key.offset, cache);
 
                /*
                 * flush, submit all pending read and write bios, afterwards
@@ -3670,7 +3686,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                btrfs_put_block_group(cache);
                if (ret)
                        break;
-               if (is_dev_replace &&
+               if (sctx->is_dev_replace &&
                    atomic64_read(&dev_replace->num_write_errors) > 0) {
                        ret = -EIO;
                        break;
@@ -3762,16 +3778,6 @@ fail_scrub_workers:
        return -ENOMEM;
 }
 
-static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
-{
-       if (--fs_info->scrub_workers_refcnt == 0) {
-               btrfs_destroy_workqueue(fs_info->scrub_workers);
-               btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
-               btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
-       }
-       WARN_ON(fs_info->scrub_workers_refcnt < 0);
-}
-
 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
                    u64 end, struct btrfs_scrub_progress *progress,
                    int readonly, int is_dev_replace)
@@ -3779,6 +3785,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
        struct scrub_ctx *sctx;
        int ret;
        struct btrfs_device *dev;
+       unsigned int nofs_flag;
+       struct btrfs_workqueue *scrub_workers = NULL;
+       struct btrfs_workqueue *scrub_wr_comp = NULL;
+       struct btrfs_workqueue *scrub_parity = NULL;
 
        if (btrfs_fs_closing(fs_info))
                return -EINVAL;
@@ -3820,13 +3830,18 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
                return -EINVAL;
        }
 
+       /* Allocate outside of device_list_mutex */
+       sctx = scrub_setup_ctx(fs_info, is_dev_replace);
+       if (IS_ERR(sctx))
+               return PTR_ERR(sctx);
 
        mutex_lock(&fs_info->fs_devices->device_list_mutex);
        dev = btrfs_find_device(fs_info, devid, NULL, NULL);
        if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
                     !is_dev_replace)) {
                mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto out_free_ctx;
        }
 
        if (!is_dev_replace && !readonly &&
@@ -3834,7 +3849,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
                mutex_unlock(&fs_info->fs_devices->device_list_mutex);
                btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
                                rcu_str_deref(dev->name));
-               return -EROFS;
+               ret = -EROFS;
+               goto out_free_ctx;
        }
 
        mutex_lock(&fs_info->scrub_lock);
@@ -3842,7 +3858,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
            test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
                mutex_unlock(&fs_info->scrub_lock);
                mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-               return -EIO;
+               ret = -EIO;
+               goto out_free_ctx;
        }
 
        btrfs_dev_replace_read_lock(&fs_info->dev_replace);
@@ -3852,7 +3869,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
                btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
                mutex_unlock(&fs_info->scrub_lock);
                mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-               return -EINPROGRESS;
+               ret = -EINPROGRESS;
+               goto out_free_ctx;
        }
        btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
 
@@ -3860,16 +3878,9 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
        if (ret) {
                mutex_unlock(&fs_info->scrub_lock);
                mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-               return ret;
+               goto out_free_ctx;
        }
 
-       sctx = scrub_setup_ctx(dev, is_dev_replace);
-       if (IS_ERR(sctx)) {
-               mutex_unlock(&fs_info->scrub_lock);
-               mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-               scrub_workers_put(fs_info);
-               return PTR_ERR(sctx);
-       }
        sctx->readonly = readonly;
        dev->scrub_ctx = sctx;
        mutex_unlock(&fs_info->fs_devices->device_list_mutex);
@@ -3882,6 +3893,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
        atomic_inc(&fs_info->scrubs_running);
        mutex_unlock(&fs_info->scrub_lock);
 
+       /*
+        * In order to avoid deadlock with reclaim when there is a transaction
+        * trying to pause scrub, make sure we use GFP_NOFS for all the
+        * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
+        * invoked by our callees. The pausing request is done when the
+        * transaction commit starts, and it blocks the transaction until scrub
+        * is paused (done at specific points at scrub_stripe() or right above
+        * before incrementing fs_info->scrubs_running).
+        */
+       nofs_flag = memalloc_nofs_save();
        if (!is_dev_replace) {
                /*
                 * by holding device list mutex, we can
@@ -3893,8 +3914,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
        }
 
        if (!ret)
-               ret = scrub_enumerate_chunks(sctx, dev, start, end,
-                                            is_dev_replace);
+               ret = scrub_enumerate_chunks(sctx, dev, start, end);
+       memalloc_nofs_restore(nofs_flag);
 
        wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
        atomic_dec(&fs_info->scrubs_running);
@@ -3907,11 +3928,23 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 
        mutex_lock(&fs_info->scrub_lock);
        dev->scrub_ctx = NULL;
-       scrub_workers_put(fs_info);
+       if (--fs_info->scrub_workers_refcnt == 0) {
+               scrub_workers = fs_info->scrub_workers;
+               scrub_wr_comp = fs_info->scrub_wr_completion_workers;
+               scrub_parity = fs_info->scrub_parity_workers;
+       }
        mutex_unlock(&fs_info->scrub_lock);
 
+       btrfs_destroy_workqueue(scrub_workers);
+       btrfs_destroy_workqueue(scrub_wr_comp);
+       btrfs_destroy_workqueue(scrub_parity);
        scrub_put_ctx(sctx);
 
+       return ret;
+
+out_free_ctx:
+       scrub_free_ctx(sctx);
+
        return ret;
 }
 
index 258392b75048e5fa22723b7cd1f74c3cd0c00b5d..48ddbc187e58be308a9000a950a631fc72d0fa82 100644 (file)
@@ -6272,68 +6272,21 @@ static int changed_extent(struct send_ctx *sctx,
 {
        int ret = 0;
 
-       if (sctx->cur_ino != sctx->cmp_key->objectid) {
-
-               if (result == BTRFS_COMPARE_TREE_CHANGED) {
-                       struct extent_buffer *leaf_l;
-                       struct extent_buffer *leaf_r;
-                       struct btrfs_file_extent_item *ei_l;
-                       struct btrfs_file_extent_item *ei_r;
-
-                       leaf_l = sctx->left_path->nodes[0];
-                       leaf_r = sctx->right_path->nodes[0];
-                       ei_l = btrfs_item_ptr(leaf_l,
-                                             sctx->left_path->slots[0],
-                                             struct btrfs_file_extent_item);
-                       ei_r = btrfs_item_ptr(leaf_r,
-                                             sctx->right_path->slots[0],
-                                             struct btrfs_file_extent_item);
-
-                       /*
-                        * We may have found an extent item that has changed
-                        * only its disk_bytenr field and the corresponding
-                        * inode item was not updated. This case happens due to
-                        * very specific timings during relocation when a leaf
-                        * that contains file extent items is COWed while
-                        * relocation is ongoing and its in the stage where it
-                        * updates data pointers. So when this happens we can
-                        * safely ignore it since we know it's the same extent,
-                        * but just at different logical and physical locations
-                        * (when an extent is fully replaced with a new one, we
-                        * know the generation number must have changed too,
-                        * since snapshot creation implies committing the current
-                        * transaction, and the inode item must have been updated
-                        * as well).
-                        * This replacement of the disk_bytenr happens at
-                        * relocation.c:replace_file_extents() through
-                        * relocation.c:btrfs_reloc_cow_block().
-                        */
-                       if (btrfs_file_extent_generation(leaf_l, ei_l) ==
-                           btrfs_file_extent_generation(leaf_r, ei_r) &&
-                           btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
-                           btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
-                           btrfs_file_extent_compression(leaf_l, ei_l) ==
-                           btrfs_file_extent_compression(leaf_r, ei_r) &&
-                           btrfs_file_extent_encryption(leaf_l, ei_l) ==
-                           btrfs_file_extent_encryption(leaf_r, ei_r) &&
-                           btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
-                           btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
-                           btrfs_file_extent_type(leaf_l, ei_l) ==
-                           btrfs_file_extent_type(leaf_r, ei_r) &&
-                           btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
-                           btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
-                           btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
-                           btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
-                           btrfs_file_extent_offset(leaf_l, ei_l) ==
-                           btrfs_file_extent_offset(leaf_r, ei_r) &&
-                           btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
-                           btrfs_file_extent_num_bytes(leaf_r, ei_r))
-                               return 0;
-               }
-
-               inconsistent_snapshot_error(sctx, result, "extent");
-               return -EIO;
-       }
+       /*
+        * We have found an extent item that changed without the inode item
+        * having changed. This can happen either after relocation (where the
+        * disk_bytenr of an extent item is replaced at
+        * relocation.c:replace_file_extents()) or after deduplication into a
+        * file in both the parent and send snapshots (where an extent item can
+        * get modified or replaced with a new one). Note that deduplication
+        * updates the inode item, but it only changes the iversion (sequence
+        * field in the inode item) of the inode, so if a file is deduplicated
+        * the same amount of times in both the parent and send snapshots, its
+        * iversion becames the same in both snapshots, whence the inode item is
+        * the same on both snapshots.
+        */
+       if (sctx->cur_ino != sctx->cmp_key->objectid)
+               return 0;
 
        if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
                if (result != BTRFS_COMPARE_TREE_DELETED)
index bb8f6c020d227c0a66329188e08fb5425d1c86ba..26317bca56499a406492dd84e4a602e2ee00f1eb 100644 (file)
@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
        [TRANS_STATE_COMMIT_START]      = (__TRANS_START | __TRANS_ATTACH),
        [TRANS_STATE_COMMIT_DOING]      = (__TRANS_START |
                                           __TRANS_ATTACH |
-                                          __TRANS_JOIN),
+                                          __TRANS_JOIN |
+                                          __TRANS_JOIN_NOSTART),
        [TRANS_STATE_UNBLOCKED]         = (__TRANS_START |
                                           __TRANS_ATTACH |
                                           __TRANS_JOIN |
-                                          __TRANS_JOIN_NOLOCK),
+                                          __TRANS_JOIN_NOLOCK |
+                                          __TRANS_JOIN_NOSTART),
        [TRANS_STATE_COMPLETED]         = (__TRANS_START |
                                           __TRANS_ATTACH |
                                           __TRANS_JOIN |
-                                          __TRANS_JOIN_NOLOCK),
+                                          __TRANS_JOIN_NOLOCK |
+                                          __TRANS_JOIN_NOSTART),
 };
 
 void btrfs_put_transaction(struct btrfs_transaction *transaction)
@@ -531,7 +534,8 @@ again:
                ret = join_transaction(fs_info, type);
                if (ret == -EBUSY) {
                        wait_current_trans(fs_info);
-                       if (unlikely(type == TRANS_ATTACH))
+                       if (unlikely(type == TRANS_ATTACH ||
+                                    type == TRANS_JOIN_NOSTART))
                                ret = -ENOENT;
                }
        } while (ret == -EBUSY);
@@ -647,6 +651,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
                                 BTRFS_RESERVE_NO_FLUSH, true);
 }
 
+/*
+ * Similar to regular join but it never starts a transaction when none is
+ * running or after waiting for the current one to finish.
+ */
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
+{
+       return start_transaction(root, 0, TRANS_JOIN_NOSTART,
+                                BTRFS_RESERVE_NO_FLUSH, true);
+}
+
 /*
  * btrfs_attach_transaction() - catch the running transaction
  *
@@ -2027,6 +2041,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
                }
        } else {
                spin_unlock(&fs_info->trans_lock);
+               /*
+                * The previous transaction was aborted and was already removed
+                * from the list of transactions at fs_info->trans_list. So we
+                * abort to prevent writing a new superblock that reflects a
+                * corrupt state (pointing to trees with unwritten nodes/leafs).
+                */
+               if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
+                       ret = -EROFS;
+                       goto cleanup_transaction;
+               }
        }
 
        extwriter_counter_dec(cur_trans, trans->type);
index 4cbb1b55387dc801894624f939804e253e17f414..c1d34cc70472223a81ca22de9bde27ccfc9276d5 100644 (file)
@@ -97,11 +97,13 @@ struct btrfs_transaction {
 #define __TRANS_JOIN           (1U << 11)
 #define __TRANS_JOIN_NOLOCK    (1U << 12)
 #define __TRANS_DUMMY          (1U << 13)
+#define __TRANS_JOIN_NOSTART   (1U << 14)
 
 #define TRANS_START            (__TRANS_START | __TRANS_FREEZABLE)
 #define TRANS_ATTACH           (__TRANS_ATTACH)
 #define TRANS_JOIN             (__TRANS_JOIN | __TRANS_FREEZABLE)
 #define TRANS_JOIN_NOLOCK      (__TRANS_JOIN_NOLOCK)
+#define TRANS_JOIN_NOSTART     (__TRANS_JOIN_NOSTART)
 
 #define TRANS_EXTWRITERS       (__TRANS_START | __TRANS_ATTACH)
 
@@ -187,6 +189,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
                                        int min_factor);
 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
                                        struct btrfs_root *root);
index 0d5840d20efcb435edfeaf30892984fba4feaf4f..4d4f57f0f08c864020268a489ff69f99edf4d437 100644 (file)
@@ -2860,7 +2860,8 @@ out:
  * in the tree of log roots
  */
 static int update_log_root(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *log)
+                          struct btrfs_root *log,
+                          struct btrfs_root_item *root_item)
 {
        struct btrfs_fs_info *fs_info = log->fs_info;
        int ret;
@@ -2868,10 +2869,10 @@ static int update_log_root(struct btrfs_trans_handle *trans,
        if (log->log_transid == 1) {
                /* insert root item on the first sync */
                ret = btrfs_insert_root(trans, fs_info->log_root_tree,
-                               &log->root_key, &log->root_item);
+                               &log->root_key, root_item);
        } else {
                ret = btrfs_update_root(trans, fs_info->log_root_tree,
-                               &log->root_key, &log->root_item);
+                               &log->root_key, root_item);
        }
        return ret;
 }
@@ -2969,6 +2970,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_root *log = root->log_root;
        struct btrfs_root *log_root_tree = fs_info->log_root_tree;
+       struct btrfs_root_item new_root_item;
        int log_transid = 0;
        struct btrfs_log_ctx root_log_ctx;
        struct blk_plug plug;
@@ -3032,17 +3034,25 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                goto out;
        }
 
+       /*
+        * We _must_ update under the root->log_mutex in order to make sure we
+        * have a consistent view of the log root we are trying to commit at
+        * this moment.
+        *
+        * We _must_ copy this into a local copy, because we are not holding the
+        * log_root_tree->log_mutex yet.  This is important because when we
+        * commit the log_root_tree we must have a consistent view of the
+        * log_root_tree when we update the super block to point at the
+        * log_root_tree bytenr.  If we update the log_root_tree here we'll race
+        * with the commit and possibly point at the new block which we may not
+        * have written out.
+        */
        btrfs_set_root_node(&log->root_item, log->node);
+       memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
 
        root->log_transid++;
        log->log_transid = root->log_transid;
        root->log_start_pid = 0;
-       /*
-        * Update or create log root item under the root's log_mutex to prevent
-        * races with concurrent log syncs that can lead to failure to update
-        * log root item because it was not created yet.
-        */
-       ret = update_log_root(trans, log);
        /*
         * IO has been started, blocks of the log tree have WRITTEN flag set
         * in their headers. new modifications of the log will be written to
@@ -3063,6 +3073,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        mutex_unlock(&log_root_tree->log_mutex);
 
        mutex_lock(&log_root_tree->log_mutex);
+
+       /*
+        * Now we are safe to update the log_root_tree because we're under the
+        * log_mutex, and we're a current writer so we're holding the commit
+        * open until we drop the log_mutex.
+        */
+       ret = update_log_root(trans, log, &new_root_item);
+
        if (atomic_dec_and_test(&log_root_tree->log_writers)) {
                /* atomic_dec_and_test implies a barrier */
                cond_wake_up_nomb(&log_root_tree->log_writer_wait);
@@ -3262,6 +3280,30 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+/*
+ * Check if an inode was logged in the current transaction. We can't always rely
+ * on an inode's logged_trans value, because it's an in-memory only field and
+ * therefore not persisted. This means that its value is lost if the inode gets
+ * evicted and loaded again from disk (in which case it has a value of 0, and
+ * certainly it is smaller then any possible transaction ID), when that happens
+ * the full_sync flag is set in the inode's runtime flags, so on that case we
+ * assume eviction happened and ignore the logged_trans value, assuming the
+ * worst case, that the inode was logged before in the current transaction.
+ */
+static bool inode_logged(struct btrfs_trans_handle *trans,
+                        struct btrfs_inode *inode)
+{
+       if (inode->logged_trans == trans->transid)
+               return true;
+
+       if (inode->last_trans == trans->transid &&
+           test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
+           !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
+               return true;
+
+       return false;
+}
+
 /*
  * If both a file and directory are logged, and unlinks or renames are
  * mixed in, we have a few interesting corners:
@@ -3296,7 +3338,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
        int bytes_del = 0;
        u64 dir_ino = btrfs_ino(dir);
 
-       if (dir->logged_trans < trans->transid)
+       if (!inode_logged(trans, dir))
                return 0;
 
        ret = join_running_log_trans(root);
@@ -3401,7 +3443,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
        u64 index;
        int ret;
 
-       if (inode->logged_trans < trans->transid)
+       if (!inode_logged(trans, inode))
                return 0;
 
        ret = join_running_log_trans(root);
@@ -5083,7 +5125,7 @@ again:
                                                BTRFS_I(other_inode),
                                                LOG_OTHER_INODE, 0, LLONG_MAX,
                                                ctx);
-                               iput(other_inode);
+                               btrfs_add_delayed_iput(other_inode);
                                if (err)
                                        goto out_unlock;
                                else
@@ -5250,9 +5292,19 @@ log_extents:
                }
        }
 
+       /*
+        * Don't update last_log_commit if we logged that an inode exists after
+        * it was loaded to memory (full_sync bit set).
+        * This is to prevent data loss when we do a write to the inode, then
+        * the inode gets evicted after all delalloc was flushed, then we log
+        * it exists (due to a rename for example) and then fsync it. This last
+        * fsync would do nothing (not logging the extents previously written).
+        */
        spin_lock(&inode->lock);
        inode->logged_trans = trans->transid;
-       inode->last_log_commit = inode->last_sub_trans;
+       if (inode_only != LOG_INODE_EXISTS ||
+           !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
+               inode->last_log_commit = inode->last_sub_trans;
        spin_unlock(&inode->lock);
 out_unlock:
        mutex_unlock(&inode->log_mutex);
@@ -5485,7 +5537,7 @@ process_leaf:
                        }
 
                        if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
-                               iput(di_inode);
+                               btrfs_add_delayed_iput(di_inode);
                                break;
                        }
 
@@ -5497,7 +5549,7 @@ process_leaf:
                        if (!ret &&
                            btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
                                ret = 1;
-                       iput(di_inode);
+                       btrfs_add_delayed_iput(di_inode);
                        if (ret)
                                goto next_dir_inode;
                        if (ctx->log_new_dentries) {
@@ -5644,7 +5696,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
                        if (!ret && ctx && ctx->log_new_dentries)
                                ret = log_new_dir_dentries(trans, root,
                                                   BTRFS_I(dir_inode), ctx);
-                       iput(dir_inode);
+                       btrfs_add_delayed_iput(dir_inode);
                        if (ret)
                                goto out;
                }
index 2fd000308be763958aab22c11a12bd7995f771e7..a8297e7489d987364beaaddf46beb75e5ac23fcf 100644 (file)
@@ -5040,8 +5040,7 @@ static inline int btrfs_chunk_max_errors(struct map_lookup *map)
 
        if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
                         BTRFS_BLOCK_GROUP_RAID10 |
-                        BTRFS_BLOCK_GROUP_RAID5 |
-                        BTRFS_BLOCK_GROUP_DUP)) {
+                        BTRFS_BLOCK_GROUP_RAID5)) {
                max_errors = 1;
        } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
                max_errors = 2;
@@ -7412,6 +7411,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
        struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
        struct extent_map *em;
        struct map_lookup *map;
+       struct btrfs_device *dev;
        u64 stripe_len;
        bool found = false;
        int ret = 0;
@@ -7461,6 +7461,34 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
                        physical_offset, devid);
                ret = -EUCLEAN;
        }
+
+       /* Make sure no dev extent is beyond device bondary */
+       dev = btrfs_find_device(fs_info, devid, NULL, NULL);
+       if (!dev) {
+               btrfs_err(fs_info, "failed to find devid %llu", devid);
+               ret = -EUCLEAN;
+               goto out;
+       }
+
+       /* It's possible this device is a dummy for seed device */
+       if (dev->disk_total_bytes == 0) {
+               dev = find_device(fs_info->fs_devices->seed, devid, NULL);
+               if (!dev) {
+                       btrfs_err(fs_info, "failed to find seed devid %llu",
+                                 devid);
+                       ret = -EUCLEAN;
+                       goto out;
+               }
+       }
+
+       if (physical_offset + physical_len > dev->disk_total_bytes) {
+               btrfs_err(fs_info,
+"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
+                         devid, physical_offset, physical_len,
+                         dev->disk_total_bytes);
+               ret = -EUCLEAN;
+               goto out;
+       }
 out:
        free_extent_map(em);
        return ret;
index 9c332a6f66678a04cfe4b6cf05a33253c85f30f5..476728bdae8c668d3e3eb3eed90f3a892edae9bf 100644 (file)
@@ -913,8 +913,9 @@ get_more_pages:
                        if (page_offset(page) >= ceph_wbc.i_size) {
                                dout("%p page eof %llu\n",
                                     page, ceph_wbc.i_size);
-                               if (ceph_wbc.size_stable ||
-                                   page_offset(page) >= i_size_read(inode))
+                               if ((ceph_wbc.size_stable ||
+                                   page_offset(page) >= i_size_read(inode)) &&
+                                   clear_page_dirty_for_io(page))
                                        mapping->a_ops->invalidatepage(page,
                                                                0, PAGE_SIZE);
                                unlock_page(page);
index c7542e8dd096c52ff4c6f0111530a0ad45adb99a..db547af01b593243e79d24a25bded16b0c703632 100644 (file)
@@ -1237,20 +1237,23 @@ static int send_cap_msg(struct cap_msg_args *arg)
 }
 
 /*
- * Queue cap releases when an inode is dropped from our cache.  Since
- * inode is about to be destroyed, there is no need for i_ceph_lock.
+ * Queue cap releases when an inode is dropped from our cache.
  */
 void ceph_queue_caps_release(struct inode *inode)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct rb_node *p;
 
+       /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
+        * may call __ceph_caps_issued_mask() on a freeing inode. */
+       spin_lock(&ci->i_ceph_lock);
        p = rb_first(&ci->i_caps);
        while (p) {
                struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
                p = rb_next(p);
                __ceph_remove_cap(cap, true);
        }
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -1277,6 +1280,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
 {
        struct ceph_inode_info *ci = cap->ci;
        struct inode *inode = &ci->vfs_inode;
+       struct ceph_buffer *old_blob = NULL;
        struct cap_msg_args arg;
        int held, revoking;
        int wake = 0;
@@ -1341,7 +1345,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        ci->i_requested_max_size = arg.max_size;
 
        if (flushing & CEPH_CAP_XATTR_EXCL) {
-               __ceph_build_xattrs_blob(ci);
+               old_blob = __ceph_build_xattrs_blob(ci);
                arg.xattr_version = ci->i_xattrs.version;
                arg.xattr_buf = ci->i_xattrs.blob;
        } else {
@@ -1376,6 +1380,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
 
        spin_unlock(&ci->i_ceph_lock);
 
+       ceph_buffer_put(old_blob);
+
        ret = send_cap_msg(&arg);
        if (ret < 0) {
                dout("error sending cap msg, must requeue %p\n", inode);
index 3e518c2ae2bf962450925d9871431d3b0e05a876..8196c21d8623c3430eff56114e6b632983301648 100644 (file)
@@ -528,13 +528,16 @@ static void ceph_i_callback(struct rcu_head *head)
        kmem_cache_free(ceph_inode_cachep, ci);
 }
 
-void ceph_destroy_inode(struct inode *inode)
+void ceph_evict_inode(struct inode *inode)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_inode_frag *frag;
        struct rb_node *n;
 
-       dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
+       dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
+
+       truncate_inode_pages_final(&inode->i_data);
+       clear_inode(inode);
 
        ceph_fscache_unregister_inode_cookie(ci);
 
@@ -576,7 +579,10 @@ void ceph_destroy_inode(struct inode *inode)
                ceph_buffer_put(ci->i_xattrs.prealloc_blob);
 
        ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
+}
 
+void ceph_destroy_inode(struct inode *inode)
+{
        call_rcu(&inode->i_rcu, ceph_i_callback);
 }
 
@@ -742,6 +748,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
        int issued, new_issued, info_caps;
        struct timespec64 mtime, atime, ctime;
        struct ceph_buffer *xattr_blob = NULL;
+       struct ceph_buffer *old_blob = NULL;
        struct ceph_string *pool_ns = NULL;
        struct ceph_cap *new_cap = NULL;
        int err = 0;
@@ -800,7 +807,12 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
 
        /* update inode */
        inode->i_rdev = le32_to_cpu(info->rdev);
-       inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+       /* directories have fl_stripe_unit set to zero */
+       if (le32_to_cpu(info->layout.fl_stripe_unit))
+               inode->i_blkbits =
+                       fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+       else
+               inode->i_blkbits = CEPH_BLOCK_SHIFT;
 
        __ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
 
@@ -878,7 +890,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
        if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
            le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
                if (ci->i_xattrs.blob)
-                       ceph_buffer_put(ci->i_xattrs.blob);
+                       old_blob = ci->i_xattrs.blob;
                ci->i_xattrs.blob = xattr_blob;
                if (xattr_blob)
                        memcpy(ci->i_xattrs.blob->vec.iov_base,
@@ -1017,8 +1029,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
 out:
        if (new_cap)
                ceph_put_cap(mdsc, new_cap);
-       if (xattr_blob)
-               ceph_buffer_put(xattr_blob);
+       ceph_buffer_put(old_blob);
+       ceph_buffer_put(xattr_blob);
        ceph_put_string(pool_ns);
        return err;
 }
index 9dae2ec7e1fa89705f649b1c3349520b8bb23543..6a8f4a99582e57d7f1d08a97d81f49808176fcc3 100644 (file)
@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
                req->r_wait_for_completion = ceph_lock_wait_for_completion;
 
        err = ceph_mdsc_do_request(mdsc, inode, req);
-
-       if (operation == CEPH_MDS_OP_GETFILELOCK) {
+       if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
                fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
                if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
                        fl->fl_type = F_RDLCK;
index bfcf11c70bfad60816ead0fc305d37b3759f50e0..09db6d08614d235797745951980c685e282cdf75 100644 (file)
@@ -3640,7 +3640,9 @@ static void delayed_work(struct work_struct *work)
                                pr_info("mds%d hung\n", s->s_mds);
                        }
                }
-               if (s->s_state < CEPH_MDS_SESSION_OPEN) {
+               if (s->s_state == CEPH_MDS_SESSION_NEW ||
+                   s->s_state == CEPH_MDS_SESSION_RESTARTING ||
+                   s->s_state == CEPH_MDS_SESSION_REJECTED) {
                        /* this mds is failed or recovering, just wait */
                        ceph_put_mds_session(s);
                        continue;
index 1f46b02f7314974a58ad7c290dc810f68629f6b3..5cf7b5f4db947375c499b5aeb5b7db6772ba8a7b 100644 (file)
@@ -460,6 +460,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
        struct inode *inode = &ci->vfs_inode;
        struct ceph_cap_snap *capsnap;
        struct ceph_snap_context *old_snapc, *new_snapc;
+       struct ceph_buffer *old_blob = NULL;
        int used, dirty;
 
        capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
@@ -536,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
        capsnap->gid = inode->i_gid;
 
        if (dirty & CEPH_CAP_XATTR_EXCL) {
-               __ceph_build_xattrs_blob(ci);
+               old_blob = __ceph_build_xattrs_blob(ci);
                capsnap->xattr_blob =
                        ceph_buffer_get(ci->i_xattrs.blob);
                capsnap->xattr_version = ci->i_xattrs.version;
@@ -579,6 +580,7 @@ update_snapc:
        }
        spin_unlock(&ci->i_ceph_lock);
 
+       ceph_buffer_put(old_blob);
        kfree(capsnap);
        ceph_put_snap_context(old_snapc);
 }
index c5cf46e43f2e70b8fdbc9cd2e1c2c2e6acae98d3..ccab249a37f6aa7143b27a3680fb4011fc1121d7 100644 (file)
@@ -830,6 +830,7 @@ static const struct super_operations ceph_super_ops = {
        .destroy_inode  = ceph_destroy_inode,
        .write_inode    = ceph_write_inode,
        .drop_inode     = ceph_drop_inode,
+       .evict_inode    = ceph_evict_inode,
        .sync_fs        = ceph_sync_fs,
        .put_super      = ceph_put_super,
        .remount_fs     = ceph_remount,
index 582e28fd1b7bf1c6bda93b4796eb92cc4883fdd2..8d3eabf06d66aeb7f5fe273f726b27075f9a5441 100644 (file)
@@ -526,7 +526,12 @@ static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
                                           long long release_count,
                                           long long ordered_count)
 {
-       smp_mb__before_atomic();
+       /*
+        * Makes sure operations that setup readdir cache (update page
+        * cache and i_size) are strongly ordered w.r.t. the following
+        * atomic64_set() operations.
+        */
+       smp_mb();
        atomic64_set(&ci->i_complete_seq[0], release_count);
        atomic64_set(&ci->i_complete_seq[1], ordered_count);
 }
@@ -849,6 +854,7 @@ static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
 extern const struct inode_operations ceph_file_iops;
 
 extern struct inode *ceph_alloc_inode(struct super_block *sb);
+extern void ceph_evict_inode(struct inode *inode);
 extern void ceph_destroy_inode(struct inode *inode);
 extern int ceph_drop_inode(struct inode *inode);
 
@@ -891,7 +897,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
 int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
 ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
 extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
-extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci);
+extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
 extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
 extern void __init ceph_xattr_init(void);
 extern void ceph_xattr_exit(void);
index 5cc8b94f8206972ce6ce988b92b5103504b335b1..5e4f3f833e85e3ab8002dcd79fe4c92de4292d4a 100644 (file)
@@ -79,7 +79,7 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
        const char *ns_field = " pool_namespace=";
        char buf[128];
        size_t len, total_len = 0;
-       int ret;
+       ssize_t ret;
 
        pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
 
@@ -103,11 +103,8 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
        if (pool_ns)
                total_len += strlen(ns_field) + pool_ns->len;
 
-       if (!size) {
-               ret = total_len;
-       } else if (total_len > size) {
-               ret = -ERANGE;
-       } else {
+       ret = total_len;
+       if (size >= total_len) {
                memcpy(val, buf, len);
                ret = len;
                if (pool_name) {
@@ -737,12 +734,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
 
 /*
  * If there are dirty xattrs, reencode xattrs into the prealloc_blob
- * and swap into place.
+ * and swap into place.  It returns the old i_xattrs.blob (or NULL) so
+ * that it can be freed by the caller as the i_ceph_lock is likely to be
+ * held.
  */
-void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
+struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
 {
        struct rb_node *p;
        struct ceph_inode_xattr *xattr = NULL;
+       struct ceph_buffer *old_blob = NULL;
        void *dest;
 
        dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
@@ -773,12 +773,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
                        dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
 
                if (ci->i_xattrs.blob)
-                       ceph_buffer_put(ci->i_xattrs.blob);
+                       old_blob = ci->i_xattrs.blob;
                ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
                ci->i_xattrs.prealloc_blob = NULL;
                ci->i_xattrs.dirty = false;
                ci->i_xattrs.version++;
        }
+
+       return old_blob;
 }
 
 static inline int __get_request_mask(struct inode *in) {
@@ -817,8 +819,11 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
                if (err)
                        return err;
                err = -ENODATA;
-               if (!(vxattr->exists_cb && !vxattr->exists_cb(ci)))
+               if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
                        err = vxattr->getxattr_cb(ci, value, size);
+                       if (size && size < err)
+                               err = -ERANGE;
+               }
                return err;
        }
 
@@ -1011,6 +1016,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        struct ceph_cap_flush *prealloc_cf = NULL;
+       struct ceph_buffer *old_blob = NULL;
        int issued;
        int err;
        int dirty = 0;
@@ -1084,13 +1090,15 @@ retry:
                struct ceph_buffer *blob;
 
                spin_unlock(&ci->i_ceph_lock);
-               dout(" preaallocating new blob size=%d\n", required_blob_size);
+               ceph_buffer_put(old_blob); /* Shouldn't be required */
+               dout(" pre-allocating new blob size=%d\n", required_blob_size);
                blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
                if (!blob)
                        goto do_sync_unlocked;
                spin_lock(&ci->i_ceph_lock);
+               /* prealloc_blob can't be released while holding i_ceph_lock */
                if (ci->i_xattrs.prealloc_blob)
-                       ceph_buffer_put(ci->i_xattrs.prealloc_blob);
+                       old_blob = ci->i_xattrs.prealloc_blob;
                ci->i_xattrs.prealloc_blob = blob;
                goto retry;
        }
@@ -1106,6 +1114,7 @@ retry:
        }
 
        spin_unlock(&ci->i_ceph_lock);
+       ceph_buffer_put(old_blob);
        if (lock_snap_rwsem)
                up_read(&mdsc->snap_rwsem);
        if (dirty)
index 9731d0d891e7e030e3d4c4c692d2ce653d4b8029..aba2b48d4da1a2ba40111dbdef4d78323ea9fab1 100644 (file)
@@ -72,5 +72,10 @@ struct cifs_sb_info {
        struct delayed_work prune_tlinks;
        struct rcu_head rcu;
        char *prepath;
+       /*
+        * Indicate whether serverino option was turned off later
+        * (cifs_autodisable_serverino) in order to match new mounts.
+        */
+       bool mnt_cifs_serverino_autodisabled;
 };
 #endif                         /* _CIFS_FS_SB_H */
index fb32f3d6925e82b59290f5afb7a09dbed5372c52..d5457015801d8be935353826fcb1b4dbf99c56c8 100644 (file)
@@ -292,6 +292,7 @@ cifs_alloc_inode(struct super_block *sb)
        cifs_inode->uniqueid = 0;
        cifs_inode->createtime = 0;
        cifs_inode->epoch = 0;
+       spin_lock_init(&cifs_inode->open_file_lock);
        generate_random_uuid(cifs_inode->lease_key);
 
        /*
@@ -427,6 +428,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
        cifs_show_security(s, tcon->ses);
        cifs_show_cache_flavor(s, cifs_sb);
 
+       if (tcon->no_lease)
+               seq_puts(s, ",nolease");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
                seq_puts(s, ",multiuser");
        else if (tcon->ses->user_name)
index 6f227cc781e5d10be04cbb5f2c5222d2ed27e939..4dbae6e268d6ad126c37d229b71109c9a9f839bd 100644 (file)
@@ -543,6 +543,7 @@ struct smb_vol {
        bool noblocksnd:1;
        bool noautotune:1;
        bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
+       bool no_lease:1;     /* disable requesting leases */
        bool fsc:1;     /* enable fscache */
        bool mfsymlinks:1; /* use Minshall+French Symlinks */
        bool multiuser:1;
@@ -1004,6 +1005,7 @@ struct cifs_tcon {
        bool need_reopen_files:1; /* need to reopen tcon file handles */
        bool use_resilient:1; /* use resilient instead of durable handles */
        bool use_persistent:1; /* use persistent instead of durable handles */
+       bool no_lease:1;    /* Do not request leases on files or directories */
        __le32 capabilities;
        __u32 share_flags;
        __u32 maximal_access;
@@ -1287,6 +1289,7 @@ struct cifsInodeInfo {
        struct rw_semaphore lock_sem;   /* protect the fields above */
        /* BB add in lists for dirty pages i.e. write caching info for oplock */
        struct list_head openFileList;
+       spinlock_t      open_file_lock; /* protects openFileList */
        __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
        unsigned int oplock;            /* oplock/lease level we have */
        unsigned int epoch;             /* used to track lease state changes */
@@ -1563,6 +1566,25 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
        kfree(param);
 }
 
+static inline bool is_interrupt_error(int error)
+{
+       switch (error) {
+       case -EINTR:
+       case -ERESTARTSYS:
+       case -ERESTARTNOHAND:
+       case -ERESTARTNOINTR:
+               return true;
+       }
+       return false;
+}
+
+static inline bool is_retryable_error(int error)
+{
+       if (is_interrupt_error(error) || error == -EAGAIN)
+               return true;
+       return false;
+}
+
 #define   MID_FREE 0
 #define   MID_REQUEST_ALLOCATED 1
 #define   MID_REQUEST_SUBMITTED 2
@@ -1668,10 +1690,14 @@ require use of the stronger protocol */
  *  tcp_ses_lock protects:
  *     list operations on tcp and SMB session lists
  *  tcon->open_file_lock protects the list of open files hanging off the tcon
+ *  inode->open_file_lock protects the openFileList hanging off the inode
  *  cfile->file_info_lock protects counters and fields in cifs file struct
  *  f_owner.lock protects certain per file struct operations
  *  mapping->page_lock protects certain per page operations
  *
+ *  Note that the cifs_tcon.open_file_lock should be taken before
+ *  not after the cifsInodeInfo.open_file_lock
+ *
  *  Semaphores
  *  ----------
  *  sesSem     operations on smb session
index 269471c8f42bfd6214eac6745aaa328173bcbb02..86a54b809c4843b842d30a8aa8a19058fef6285b 100644 (file)
@@ -2033,16 +2033,17 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
 
                wdata2->cfile = find_writable_file(CIFS_I(inode), false);
                if (!wdata2->cfile) {
-                       cifs_dbg(VFS, "No writable handles for inode\n");
+                       cifs_dbg(VFS, "No writable handle to retry writepages\n");
                        rc = -EBADF;
-                       break;
+               } else {
+                       wdata2->pid = wdata2->cfile->pid;
+                       rc = server->ops->async_writev(wdata2,
+                                                      cifs_writedata_release);
                }
-               wdata2->pid = wdata2->cfile->pid;
-               rc = server->ops->async_writev(wdata2, cifs_writedata_release);
 
                for (j = 0; j < nr_pages; j++) {
                        unlock_page(wdata2->pages[j]);
-                       if (rc != 0 && rc != -EAGAIN) {
+                       if (rc != 0 && !is_retryable_error(rc)) {
                                SetPageError(wdata2->pages[j]);
                                end_page_writeback(wdata2->pages[j]);
                                put_page(wdata2->pages[j]);
@@ -2051,8 +2052,9 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
 
                if (rc) {
                        kref_put(&wdata2->refcount, cifs_writedata_release);
-                       if (rc == -EAGAIN)
+                       if (is_retryable_error(rc))
                                continue;
+                       i += nr_pages;
                        break;
                }
 
@@ -2060,7 +2062,15 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
                i += nr_pages;
        } while (i < wdata->nr_pages);
 
-       mapping_set_error(inode->i_mapping, rc);
+       /* cleanup remaining pages from the original wdata */
+       for (; i < wdata->nr_pages; i++) {
+               SetPageError(wdata->pages[i]);
+               end_page_writeback(wdata->pages[i]);
+               put_page(wdata->pages[i]);
+       }
+
+       if (rc != 0 && !is_retryable_error(rc))
+               mapping_set_error(inode->i_mapping, rc);
        kref_put(&wdata->refcount, cifs_writedata_release);
 }
 
index f31339db45fdb1603dae12137bd322e45e8386a8..966e493c82e5734671fea040fb450a3c1ed6a978 100644 (file)
@@ -70,7 +70,7 @@ enum {
        Opt_user_xattr, Opt_nouser_xattr,
        Opt_forceuid, Opt_noforceuid,
        Opt_forcegid, Opt_noforcegid,
-       Opt_noblocksend, Opt_noautotune,
+       Opt_noblocksend, Opt_noautotune, Opt_nolease,
        Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
        Opt_mapposix, Opt_nomapposix,
        Opt_mapchars, Opt_nomapchars, Opt_sfu,
@@ -129,6 +129,7 @@ static const match_table_t cifs_mount_option_tokens = {
        { Opt_noforcegid, "noforcegid" },
        { Opt_noblocksend, "noblocksend" },
        { Opt_noautotune, "noautotune" },
+       { Opt_nolease, "nolease" },
        { Opt_hard, "hard" },
        { Opt_soft, "soft" },
        { Opt_perm, "perm" },
@@ -563,10 +564,10 @@ static bool
 server_unresponsive(struct TCP_Server_Info *server)
 {
        /*
-        * We need to wait 2 echo intervals to make sure we handle such
+        * We need to wait 3 echo intervals to make sure we handle such
         * situations right:
         * 1s  client sends a normal SMB request
-        * 2s  client gets a response
+        * 3s  client gets a response
         * 30s echo workqueue job pops, and decides we got a response recently
         *     and don't need to send another
         * ...
@@ -575,9 +576,9 @@ server_unresponsive(struct TCP_Server_Info *server)
         */
        if ((server->tcpStatus == CifsGood ||
            server->tcpStatus == CifsNeedNegotiate) &&
-           time_after(jiffies, server->lstrp + 2 * server->echo_interval)) {
+           time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
                cifs_dbg(VFS, "Server %s has not responded in %lu seconds. Reconnecting...\n",
-                        server->hostname, (2 * server->echo_interval) / HZ);
+                        server->hostname, (3 * server->echo_interval) / HZ);
                cifs_reconnect(server);
                wake_up(&server->response_q);
                return true;
@@ -1542,6 +1543,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                case Opt_noautotune:
                        vol->noautotune = 1;
                        break;
+               case Opt_nolease:
+                       vol->no_lease = 1;
+                       break;
                case Opt_hard:
                        vol->retry = 1;
                        break;
@@ -2756,6 +2760,7 @@ static int
 cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
 {
        int rc = 0;
+       int is_domain = 0;
        const char *delim, *payload;
        char *desc;
        ssize_t len;
@@ -2803,6 +2808,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
                        rc = PTR_ERR(key);
                        goto out_err;
                }
+               is_domain = 1;
        }
 
        down_read(&key->sem);
@@ -2860,6 +2866,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
                goto out_key_put;
        }
 
+       /*
+        * If we have a domain key then we must set the domainName in the
+        * for the request.
+        */
+       if (is_domain && ses->domainName) {
+               vol->domainname = kstrndup(ses->domainName,
+                                          strlen(ses->domainName),
+                                          GFP_KERNEL);
+               if (!vol->domainname) {
+                       cifs_dbg(FYI, "Unable to allocate %zd bytes for "
+                                "domain\n", len);
+                       rc = -ENOMEM;
+                       kfree(vol->username);
+                       vol->username = NULL;
+                       kzfree(vol->password);
+                       vol->password = NULL;
+                       goto out_key_put;
+               }
+       }
+
 out_key_put:
        up_read(&key->sem);
        key_put(key);
@@ -3001,6 +3027,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
                return 0;
        if (tcon->snapshot_time != volume_info->snapshot_time)
                return 0;
+       if (tcon->no_lease != volume_info->no_lease)
+               return 0;
        return 1;
 }
 
@@ -3209,6 +3237,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
        tcon->nocase = volume_info->nocase;
        tcon->nohandlecache = volume_info->nohandlecache;
        tcon->local_lease = volume_info->local_lease;
+       tcon->no_lease = volume_info->no_lease;
        INIT_LIST_HEAD(&tcon->pending_opens);
 
        spin_lock(&cifs_tcp_ses_lock);
@@ -3247,12 +3276,16 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
 {
        struct cifs_sb_info *old = CIFS_SB(sb);
        struct cifs_sb_info *new = mnt_data->cifs_sb;
+       unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
+       unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
 
        if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
                return 0;
 
-       if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) !=
-           (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
+       if (old->mnt_cifs_serverino_autodisabled)
+               newflags &= ~CIFS_MOUNT_SERVER_INUM;
+
+       if (oldflags != newflags)
                return 0;
 
        /*
index 907e85d65bb4e09b5fdc8f7c1e6c35ef56b519ad..2fb6fa51fd3ceab398cce84829318a3e79c9dadc 100644 (file)
@@ -840,10 +840,16 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
 static int
 cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
 {
+       struct inode *inode;
+
        if (flags & LOOKUP_RCU)
                return -ECHILD;
 
        if (d_really_is_positive(direntry)) {
+               inode = d_inode(direntry);
+               if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
+                       CIFS_I(inode)->time = 0; /* force reval */
+
                if (cifs_revalidate_dentry(direntry))
                        return 0;
                else {
@@ -854,7 +860,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
                         * attributes will have been updated by
                         * cifs_revalidate_dentry().
                         */
-                       if (IS_AUTOMOUNT(d_inode(direntry)) &&
+                       if (IS_AUTOMOUNT(inode) &&
                           !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
                                spin_lock(&direntry->d_lock);
                                direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
index 23cee91ed442e6063041c5425dc2719d898bd73f..b4e33ef2ff31509125d8d9dc4b1c53a9f5aba003 100644 (file)
@@ -252,6 +252,12 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
                rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
                                         xid, fid);
 
+       if (rc) {
+               server->ops->close(xid, tcon, fid);
+               if (rc == -ESTALE)
+                       rc = -EOPENSTALE;
+       }
+
 out:
        kfree(buf);
        return rc;
@@ -336,10 +342,12 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
        list_add(&cfile->tlist, &tcon->openFileList);
 
        /* if readable file instance put first in list*/
+       spin_lock(&cinode->open_file_lock);
        if (file->f_mode & FMODE_READ)
                list_add(&cfile->flist, &cinode->openFileList);
        else
                list_add_tail(&cfile->flist, &cinode->openFileList);
+       spin_unlock(&cinode->open_file_lock);
        spin_unlock(&tcon->open_file_lock);
 
        if (fid->purge_cache)
@@ -395,10 +403,11 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
        bool oplock_break_cancelled;
 
        spin_lock(&tcon->open_file_lock);
-
+       spin_lock(&cifsi->open_file_lock);
        spin_lock(&cifs_file->file_info_lock);
        if (--cifs_file->count > 0) {
                spin_unlock(&cifs_file->file_info_lock);
+               spin_unlock(&cifsi->open_file_lock);
                spin_unlock(&tcon->open_file_lock);
                return;
        }
@@ -427,6 +436,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
                cifs_set_oplock_level(cifsi, 0);
        }
 
+       spin_unlock(&cifsi->open_file_lock);
        spin_unlock(&tcon->open_file_lock);
 
        oplock_break_cancelled = wait_oplock_handler ?
@@ -749,7 +759,8 @@ reopen_success:
 
        if (can_flush) {
                rc = filemap_write_and_wait(inode->i_mapping);
-               mapping_set_error(inode->i_mapping, rc);
+               if (!is_interrupt_error(rc))
+                       mapping_set_error(inode->i_mapping, rc);
 
                if (tcon->unix_ext)
                        rc = cifs_get_inode_info_unix(&inode, full_path,
@@ -1830,13 +1841,12 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
 {
        struct cifsFileInfo *open_file = NULL;
        struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
-       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 
        /* only filter by fsuid on multiuser mounts */
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
                fsuid_only = false;
 
-       spin_lock(&tcon->open_file_lock);
+       spin_lock(&cifs_inode->open_file_lock);
        /* we could simply get the first_list_entry since write-only entries
           are always at the end of the list but since the first entry might
           have a close pending, we go through the whole list */
@@ -1848,7 +1858,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
                                /* found a good file */
                                /* lock it so it will not be closed on us */
                                cifsFileInfo_get(open_file);
-                               spin_unlock(&tcon->open_file_lock);
+                               spin_unlock(&cifs_inode->open_file_lock);
                                return open_file;
                        } /* else might as well continue, and look for
                             another, or simply have the caller reopen it
@@ -1856,7 +1866,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
                } else /* write only file */
                        break; /* write only files are last so must be done */
        }
-       spin_unlock(&tcon->open_file_lock);
+       spin_unlock(&cifs_inode->open_file_lock);
        return NULL;
 }
 
@@ -1865,7 +1875,6 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 {
        struct cifsFileInfo *open_file, *inv_file = NULL;
        struct cifs_sb_info *cifs_sb;
-       struct cifs_tcon *tcon;
        bool any_available = false;
        int rc;
        unsigned int refind = 0;
@@ -1881,16 +1890,15 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
        }
 
        cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
-       tcon = cifs_sb_master_tcon(cifs_sb);
 
        /* only filter by fsuid on multiuser mounts */
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
                fsuid_only = false;
 
-       spin_lock(&tcon->open_file_lock);
+       spin_lock(&cifs_inode->open_file_lock);
 refind_writable:
        if (refind > MAX_REOPEN_ATT) {
-               spin_unlock(&tcon->open_file_lock);
+               spin_unlock(&cifs_inode->open_file_lock);
                return NULL;
        }
        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
@@ -1902,7 +1910,7 @@ refind_writable:
                        if (!open_file->invalidHandle) {
                                /* found a good writable file */
                                cifsFileInfo_get(open_file);
-                               spin_unlock(&tcon->open_file_lock);
+                               spin_unlock(&cifs_inode->open_file_lock);
                                return open_file;
                        } else {
                                if (!inv_file)
@@ -1921,21 +1929,21 @@ refind_writable:
                cifsFileInfo_get(inv_file);
        }
 
-       spin_unlock(&tcon->open_file_lock);
+       spin_unlock(&cifs_inode->open_file_lock);
 
        if (inv_file) {
                rc = cifs_reopen_file(inv_file, false);
                if (!rc)
                        return inv_file;
                else {
-                       spin_lock(&tcon->open_file_lock);
+                       spin_lock(&cifs_inode->open_file_lock);
                        list_move_tail(&inv_file->flist,
                                        &cifs_inode->openFileList);
-                       spin_unlock(&tcon->open_file_lock);
+                       spin_unlock(&cifs_inode->open_file_lock);
                        cifsFileInfo_put(inv_file);
                        ++refind;
                        inv_file = NULL;
-                       spin_lock(&tcon->open_file_lock);
+                       spin_lock(&cifs_inode->open_file_lock);
                        goto refind_writable;
                }
        }
@@ -2137,6 +2145,7 @@ static int cifs_writepages(struct address_space *mapping,
        pgoff_t end, index;
        struct cifs_writedata *wdata;
        int rc = 0;
+       int saved_rc = 0;
 
        /*
         * If wsize is smaller than the page cache size, default to writing
@@ -2163,8 +2172,10 @@ retry:
 
                rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
                                                   &wsize, &credits);
-               if (rc)
+               if (rc != 0) {
+                       done = true;
                        break;
+               }
 
                tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
 
@@ -2172,6 +2183,7 @@ retry:
                                                  &found_pages);
                if (!wdata) {
                        rc = -ENOMEM;
+                       done = true;
                        add_credits_and_wake_if(server, credits, 0);
                        break;
                }
@@ -2200,7 +2212,7 @@ retry:
                if (rc != 0) {
                        add_credits_and_wake_if(server, wdata->credits, 0);
                        for (i = 0; i < nr_pages; ++i) {
-                               if (rc == -EAGAIN)
+                               if (is_retryable_error(rc))
                                        redirty_page_for_writepage(wbc,
                                                           wdata->pages[i]);
                                else
@@ -2208,7 +2220,7 @@ retry:
                                end_page_writeback(wdata->pages[i]);
                                put_page(wdata->pages[i]);
                        }
-                       if (rc != -EAGAIN)
+                       if (!is_retryable_error(rc))
                                mapping_set_error(mapping, rc);
                }
                kref_put(&wdata->refcount, cifs_writedata_release);
@@ -2218,6 +2230,15 @@ retry:
                        continue;
                }
 
+               /* Return immediately if we received a signal during writing */
+               if (is_interrupt_error(rc)) {
+                       done = true;
+                       break;
+               }
+
+               if (rc != 0 && saved_rc == 0)
+                       saved_rc = rc;
+
                wbc->nr_to_write -= nr_pages;
                if (wbc->nr_to_write <= 0)
                        done = true;
@@ -2235,6 +2256,9 @@ retry:
                goto retry;
        }
 
+       if (saved_rc != 0)
+               rc = saved_rc;
+
        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
                mapping->writeback_index = index;
 
@@ -2266,8 +2290,8 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
        set_page_writeback(page);
 retry_write:
        rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
-       if (rc == -EAGAIN) {
-               if (wbc->sync_mode == WB_SYNC_ALL)
+       if (is_retryable_error(rc)) {
+               if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
                        goto retry_write;
                redirty_page_for_writepage(wbc, page);
        } else if (rc != 0) {
@@ -3980,17 +4004,15 @@ static int cifs_readpage(struct file *file, struct page *page)
 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
 {
        struct cifsFileInfo *open_file;
-       struct cifs_tcon *tcon =
-               cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
 
-       spin_lock(&tcon->open_file_lock);
+       spin_lock(&cifs_inode->open_file_lock);
        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
                if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
-                       spin_unlock(&tcon->open_file_lock);
+                       spin_unlock(&cifs_inode->open_file_lock);
                        return 1;
                }
        }
-       spin_unlock(&tcon->open_file_lock);
+       spin_unlock(&cifs_inode->open_file_lock);
        return 0;
 }
 
index 1fadd314ae7f955ef501937c92f62c5c5cd1f13c..26154db6c87f1625366e39f09afa9a7bdc5fe1dc 100644 (file)
@@ -410,6 +410,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
                /* if uniqueid is different, return error */
                if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
                    CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
+                       CIFS_I(*pinode)->time = 0; /* force reval */
                        rc = -ESTALE;
                        goto cgiiu_exit;
                }
@@ -417,6 +418,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
                /* if filetype is different, return error */
                if (unlikely(((*pinode)->i_mode & S_IFMT) !=
                    (fattr.cf_mode & S_IFMT))) {
+                       CIFS_I(*pinode)->time = 0; /* force reval */
                        rc = -ESTALE;
                        goto cgiiu_exit;
                }
@@ -926,6 +928,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                /* if uniqueid is different, return error */
                if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
                    CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
+                       CIFS_I(*inode)->time = 0; /* force reval */
                        rc = -ESTALE;
                        goto cgii_exit;
                }
@@ -933,6 +936,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                /* if filetype is different, return error */
                if (unlikely(((*inode)->i_mode & S_IFMT) !=
                    (fattr.cf_mode & S_IFMT))) {
+                       CIFS_I(*inode)->time = 0; /* force reval */
                        rc = -ESTALE;
                        goto cgii_exit;
                }
@@ -2261,6 +2265,11 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
         * the flush returns error?
         */
        rc = filemap_write_and_wait(inode->i_mapping);
+       if (is_interrupt_error(rc)) {
+               rc = -ERESTARTSYS;
+               goto out;
+       }
+
        mapping_set_error(inode->i_mapping, rc);
        rc = 0;
 
@@ -2404,6 +2413,11 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
         * the flush returns error?
         */
        rc = filemap_write_and_wait(inode->i_mapping);
+       if (is_interrupt_error(rc)) {
+               rc = -ERESTARTSYS;
+               goto cifs_setattr_exit;
+       }
+
        mapping_set_error(inode->i_mapping, rc);
        rc = 0;
 
index facc94e159a1672775a9127a8f9bfe8677b4e6f6..e45f8e321371c2253a871bdcf12f963edbfe4439 100644 (file)
@@ -523,6 +523,7 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
 {
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
                cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
+               cifs_sb->mnt_cifs_serverino_autodisabled = true;
                cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n",
                         cifs_sb_master_tcon(cifs_sb)->treeName);
        }
index 47db8eb6cbcf463f4dedf05b8eb28ec4f951c6ce..c7f0c8566442592a9aeece43eaaf0fa312e7f84e 100644 (file)
@@ -183,6 +183,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
        /* we do not want to loop forever */
        last_mid = cur_mid;
        cur_mid++;
+       /* avoid 0xFFFF MID */
+       if (cur_mid == 0xffff)
+               cur_mid++;
 
        /*
         * This nested loop looks more expensive than it is.
index 0ccf8f9b63a2e78336a31e4b8b8b0ec931b17c1c..f0d966da7f378e9b29c999275b38294e0f2ababc 100644 (file)
@@ -553,7 +553,50 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
        oparams.fid = pfid;
        oparams.reconnect = false;
 
+       /*
+        * We do not hold the lock for the open because in case
+        * SMB2_open needs to reconnect, it will end up calling
+        * cifs_mark_open_files_invalid() which takes the lock again
+        * thus causing a deadlock
+        */
+       mutex_unlock(&tcon->crfid.fid_mutex);
        rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
+       mutex_lock(&tcon->crfid.fid_mutex);
+
+       /*
+        * Now we need to check again as the cached root might have
+        * been successfully re-opened from a concurrent process
+        */
+
+       if (tcon->crfid.is_valid) {
+               /* work was already done */
+
+               /* stash fids for close() later */
+               struct cifs_fid fid = {
+                       .persistent_fid = pfid->persistent_fid,
+                       .volatile_fid = pfid->volatile_fid,
+               };
+
+               /*
+                * Caller expects this func to set pfid to a valid
+                * cached root, so we copy the existing one and get a
+                * reference
+                */
+               memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
+               kref_get(&tcon->crfid.refcount);
+
+               mutex_unlock(&tcon->crfid.fid_mutex);
+
+               if (rc == 0) {
+                       /* close extra handle outside of critical section */
+                       SMB2_close(xid, tcon, fid.persistent_fid,
+                                  fid.volatile_fid);
+               }
+               return 0;
+       }
+
+       /* Cached root is still invalid, continue normaly */
+
        if (rc == 0) {
                memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
                tcon->crfid.tcon = tcon;
@@ -561,6 +604,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
                kref_init(&tcon->crfid.refcount);
                kref_get(&tcon->crfid.refcount);
        }
+
        mutex_unlock(&tcon->crfid.fid_mutex);
        return rc;
 }
@@ -2354,6 +2398,11 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
        if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
                return;
 
+       /* Check if the server granted an oplock rather than a lease */
+       if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
+               return smb2_set_oplock_level(cinode, oplock, epoch,
+                                            purge_cache);
+
        if (oplock & SMB2_LEASE_READ_CACHING_HE) {
                new_oplock |= CIFS_CACHE_READ_FLG;
                strcat(message, "R");
@@ -2545,7 +2594,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
 static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
                                   unsigned int buflen)
 {
-       sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
+       void *addr;
+       /*
+        * VMAP_STACK (at least) puts stack into the vmalloc address space
+        */
+       if (is_vmalloc_addr(buf))
+               addr = vmalloc_to_page(buf);
+       else
+               addr = virt_to_page(buf);
+       sg_set_page(sg, addr, buflen, offset_in_page(buf));
 }
 
 /* Assumes the first rqst has a transform header as the first iov.
@@ -3121,7 +3178,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
 {
        int ret, length;
        char *buf = server->smallbuf;
-       char *tmpbuf;
        struct smb2_sync_hdr *shdr;
        unsigned int pdu_length = server->pdu_size;
        unsigned int buf_size;
@@ -3151,18 +3207,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
                return length;
 
        next_is_large = server->large_buf;
- one_more:
+one_more:
        shdr = (struct smb2_sync_hdr *)buf;
        if (shdr->NextCommand) {
-               if (next_is_large) {
-                       tmpbuf = server->bigbuf;
+               if (next_is_large)
                        next_buffer = (char *)cifs_buf_get();
-               } else {
-                       tmpbuf = server->smallbuf;
+               else
                        next_buffer = (char *)cifs_small_buf_get();
-               }
                memcpy(next_buffer,
-                      tmpbuf + le32_to_cpu(shdr->NextCommand),
+                      buf + le32_to_cpu(shdr->NextCommand),
                       pdu_length - le32_to_cpu(shdr->NextCommand));
        }
 
@@ -3191,12 +3244,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
                pdu_length -= le32_to_cpu(shdr->NextCommand);
                server->large_buf = next_is_large;
                if (next_is_large)
-                       server->bigbuf = next_buffer;
+                       server->bigbuf = buf = next_buffer;
                else
-                       server->smallbuf = next_buffer;
-
-               buf += le32_to_cpu(shdr->NextCommand);
+                       server->smallbuf = buf = next_buffer;
                goto one_more;
+       } else if (ret != 0) {
+               /*
+                * ret != 0 here means that we didn't get to handle_mid() thus
+                * server->smallbuf and server->bigbuf are still valid. We need
+                * to free next_buffer because it is not going to be used
+                * anywhere.
+                */
+               if (next_is_large)
+                       free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
+               else
+                       free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
        }
 
        return ret;
index c181f1621e1af09c62685ea023e2f57935fd2293..b1f5d0d28335a1bc35ae87ec41e75a3d43ee6c95 100644 (file)
@@ -168,7 +168,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
        if (tcon == NULL)
                return 0;
 
-       if (smb2_command == SMB2_TREE_CONNECT)
+       if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
                return 0;
 
        if (tcon->tidStatus == CifsExiting) {
@@ -712,6 +712,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
                } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
                        /* ops set to 3.0 by default for default so update */
                        ses->server->ops = &smb21_operations;
+                       ses->server->vals = &smb21_values;
                }
        } else if (le16_to_cpu(rsp->DialectRevision) !=
                                ses->server->vals->protocol_id) {
@@ -1006,7 +1007,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
        else
                req->SecurityMode = 0;
 
+#ifdef CONFIG_CIFS_DFS_UPCALL
+       req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
+#else
        req->Capabilities = 0;
+#endif /* DFS_UPCALL */
+
        req->Channel = 0; /* MBZ */
 
        sess_data->iov[0].iov_base = (char *)req;
@@ -2186,7 +2192,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
        iov[1].iov_len = uni_path_len;
        iov[1].iov_base = path;
 
-       if (!server->oplocks)
+       if ((!server->oplocks) || (tcon->no_lease))
                *oplock = SMB2_OPLOCK_LEVEL_NONE;
 
        if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
index 5fdb9a509a97f13fddba3099dc9d9720a3b78d51..1959931e14c1ea53c68fc3dd96aa4e41683985ba 100644 (file)
@@ -2090,7 +2090,8 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
  * rqst: the data to write
  * return value: 0 if successfully write, otherwise error code
  */
-int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+int smbd_send(struct TCP_Server_Info *server,
+       int num_rqst, struct smb_rqst *rqst_array)
 {
        struct smbd_connection *info = server->smbd_conn;
        struct kvec vec;
@@ -2102,6 +2103,8 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
                info->max_send_size - sizeof(struct smbd_data_transfer);
        struct kvec *iov;
        int rc;
+       struct smb_rqst *rqst;
+       int rqst_idx;
 
        info->smbd_send_pending++;
        if (info->transport_status != SMBD_CONNECTED) {
@@ -2109,47 +2112,41 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
                goto done;
        }
 
-       /*
-        * Skip the RFC1002 length defined in MS-SMB2 section 2.1
-        * It is used only for TCP transport in the iov[0]
-        * In future we may want to add a transport layer under protocol
-        * layer so this will only be issued to TCP transport
-        */
-
-       if (rqst->rq_iov[0].iov_len != 4) {
-               log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
-               return -EINVAL;
-       }
-
        /*
         * Add in the page array if there is one. The caller needs to set
         * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
         * ends at page boundary
         */
-       buflen = smb_rqst_len(server, rqst);
+       remaining_data_length = 0;
+       for (i = 0; i < num_rqst; i++)
+               remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
 
-       if (buflen + sizeof(struct smbd_data_transfer) >
+       if (remaining_data_length + sizeof(struct smbd_data_transfer) >
                info->max_fragmented_send_size) {
                log_write(ERR, "payload size %d > max size %d\n",
-                       buflen, info->max_fragmented_send_size);
+                       remaining_data_length, info->max_fragmented_send_size);
                rc = -EINVAL;
                goto done;
        }
 
-       iov = &rqst->rq_iov[1];
+       rqst_idx = 0;
+
+next_rqst:
+       rqst = &rqst_array[rqst_idx];
+       iov = rqst->rq_iov;
 
-       cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
-       for (i = 0; i < rqst->rq_nvec-1; i++)
+       cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
+               rqst_idx, smb_rqst_len(server, rqst));
+       for (i = 0; i < rqst->rq_nvec; i++)
                dump_smb(iov[i].iov_base, iov[i].iov_len);
 
-       remaining_data_length = buflen;
 
-       log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
-               "rq_tailsz=%d buflen=%d\n",
-               rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
-               rqst->rq_tailsz, buflen);
+       log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
+               "rq_tailsz=%d buflen=%lu\n",
+               rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
+               rqst->rq_tailsz, smb_rqst_len(server, rqst));
 
-       start = i = iov[0].iov_len ? 0 : 1;
+       start = i = 0;
        buflen = 0;
        while (true) {
                buflen += iov[i].iov_len;
@@ -2197,14 +2194,14 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
                                                goto done;
                                }
                                i++;
-                               if (i == rqst->rq_nvec-1)
+                               if (i == rqst->rq_nvec)
                                        break;
                        }
                        start = i;
                        buflen = 0;
                } else {
                        i++;
-                       if (i == rqst->rq_nvec-1) {
+                       if (i == rqst->rq_nvec) {
                                /* send out all remaining vecs */
                                remaining_data_length -= buflen;
                                log_write(INFO,
@@ -2248,6 +2245,10 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
                }
        }
 
+       rqst_idx++;
+       if (rqst_idx < num_rqst)
+               goto next_rqst;
+
 done:
        /*
         * As an optimization, we don't wait for individual I/O to finish
index a11096254f2965d02478132af55e9ccf6613c578..b5c240ff21919b0d09563159fc3c2236741ac799 100644 (file)
@@ -292,7 +292,8 @@ void smbd_destroy(struct smbd_connection *info);
 
 /* Interface for carrying upper layer I/O through send/recv */
 int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
-int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst);
+int smbd_send(struct TCP_Server_Info *server,
+       int num_rqst, struct smb_rqst *rqst);
 
 enum mr_state {
        MR_READY,
@@ -332,7 +333,7 @@ static inline void *smbd_get_connection(
 static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
 static inline void smbd_destroy(struct smbd_connection *info) {}
 static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
-static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; }
+static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
 #endif
 
 #endif
index f2938bd95c40e076a90f0bec50bae24d1a830547..fe77f41bff9f2438094542207a32e9af7928f97c 100644 (file)
@@ -287,7 +287,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
        __be32 rfc1002_marker;
 
        if (cifs_rdma_enabled(server) && server->smbd_conn) {
-               rc = smbd_send(server, rqst);
+               rc = smbd_send(server, num_rqst, rqst);
                goto smbd_done;
        }
        if (ssocket == NULL)
index 50ddb795aaeb3b49586d8cf3454a197145db9488..a2db401a58edc405a86781539041b386ed08659f 100644 (file)
@@ -31,7 +31,7 @@
 #include "cifs_fs_sb.h"
 #include "cifs_unicode.h"
 
-#define MAX_EA_VALUE_SIZE 65535
+#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
 #define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
 #define CIFS_XATTR_ATTRIB "cifs.dosattrib"  /* full name: user.cifs.dosattrib */
 #define CIFS_XATTR_CREATETIME "cifs.creationtime"  /* user.cifs.creationtime */
index 1cbc1f2298ee480f283320fe2b9d6ca24ad3f9de..43d371551d2b1f571e0a1a87f9ac340d45e0d189 100644 (file)
 #include "coda_linux.h"
 #include "coda_int.h"
 
+struct coda_vm_ops {
+       atomic_t refcnt;
+       struct file *coda_file;
+       const struct vm_operations_struct *host_vm_ops;
+       struct vm_operations_struct vm_ops;
+};
+
 static ssize_t
 coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
@@ -61,6 +68,34 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
        return ret;
 }
 
+static void
+coda_vm_open(struct vm_area_struct *vma)
+{
+       struct coda_vm_ops *cvm_ops =
+               container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
+
+       atomic_inc(&cvm_ops->refcnt);
+
+       if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open)
+               cvm_ops->host_vm_ops->open(vma);
+}
+
+static void
+coda_vm_close(struct vm_area_struct *vma)
+{
+       struct coda_vm_ops *cvm_ops =
+               container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
+
+       if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close)
+               cvm_ops->host_vm_ops->close(vma);
+
+       if (atomic_dec_and_test(&cvm_ops->refcnt)) {
+               vma->vm_ops = cvm_ops->host_vm_ops;
+               fput(cvm_ops->coda_file);
+               kfree(cvm_ops);
+       }
+}
+
 static int
 coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
 {
@@ -68,6 +103,8 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
        struct coda_inode_info *cii;
        struct file *host_file;
        struct inode *coda_inode, *host_inode;
+       struct coda_vm_ops *cvm_ops;
+       int ret;
 
        cfi = CODA_FTOC(coda_file);
        BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
@@ -76,6 +113,13 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
        if (!host_file->f_op->mmap)
                return -ENODEV;
 
+       if (WARN_ON(coda_file != vma->vm_file))
+               return -EIO;
+
+       cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL);
+       if (!cvm_ops)
+               return -ENOMEM;
+
        coda_inode = file_inode(coda_file);
        host_inode = file_inode(host_file);
 
@@ -89,6 +133,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
         * the container file on us! */
        else if (coda_inode->i_mapping != host_inode->i_mapping) {
                spin_unlock(&cii->c_lock);
+               kfree(cvm_ops);
                return -EBUSY;
        }
 
@@ -97,7 +142,29 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
        cfi->cfi_mapcount++;
        spin_unlock(&cii->c_lock);
 
-       return call_mmap(host_file, vma);
+       vma->vm_file = get_file(host_file);
+       ret = call_mmap(vma->vm_file, vma);
+
+       if (ret) {
+               /* if call_mmap fails, our caller will put coda_file so we
+                * should drop the reference to the host_file that we got.
+                */
+               fput(host_file);
+               kfree(cvm_ops);
+       } else {
+               /* here we add redirects for the open/close vm_operations */
+               cvm_ops->host_vm_ops = vma->vm_ops;
+               if (vma->vm_ops)
+                       cvm_ops->vm_ops = *vma->vm_ops;
+
+               cvm_ops->vm_ops.open = coda_vm_open;
+               cvm_ops->vm_ops.close = coda_vm_close;
+               cvm_ops->coda_file = coda_file;
+               atomic_set(&cvm_ops->refcnt, 1);
+
+               vma->vm_ops = &cvm_ops->vm_ops;
+       }
+       return ret;
 }
 
 int coda_open(struct inode *coda_inode, struct file *coda_file)
@@ -207,4 +274,3 @@ const struct file_operations coda_file_operations = {
        .fsync          = coda_fsync,
        .splice_read    = generic_file_splice_read,
 };
-
index c5234c21b539405a452417ce4d9cb9e977186637..55824cba324538aad639b6eda68863ad115054c6 100644 (file)
@@ -187,8 +187,11 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
        if (req->uc_opcode == CODA_OPEN_BY_FD) {
                struct coda_open_by_fd_out *outp =
                        (struct coda_open_by_fd_out *)req->uc_data;
-               if (!outp->oh.result)
+               if (!outp->oh.result) {
                        outp->fh = fget(outp->fd);
+                       if (!outp->fh)
+                               return -EBADF;
+               }
        }
 
         wake_up(&req->uc_sleep);
index a9b00942e87d767fac5bea23999d954e29b0440f..8f08095ee54e98961a04980db41aebf9122514a5 100644 (file)
@@ -894,9 +894,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
 COMPATIBLE_IOCTL(PPPIOCATTCHAN)
 COMPATIBLE_IOCTL(PPPIOCGCHAN)
 COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
-/* PPPOX */
-COMPATIBLE_IOCTL(PPPOEIOCSFWD)
-COMPATIBLE_IOCTL(PPPOEIOCDFWD)
 /* Big A */
 /* sparc only */
 /* Big Q for sound/OSS */
index 0f46cf550907fcc4a0675b85df9e3331f010e744..c83ddff3ff4ac4a647fb1f4c29ee34dc8f5fb8c1 100644 (file)
@@ -149,7 +149,10 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
        struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
 
-       BUG_ON(len == 0);
+       if (WARN_ON_ONCE(len <= 0))
+               return -EINVAL;
+       if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0))
+               return -EINVAL;
 
        BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
        BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
@@ -241,8 +244,6 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,
        struct page *ciphertext_page = page;
        int err;
 
-       BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
-
        if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
                /* with inplace-encryption we just encrypt the page */
                err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
@@ -254,7 +255,8 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,
                return ciphertext_page;
        }
 
-       BUG_ON(!PageLocked(page));
+       if (WARN_ON_ONCE(!PageLocked(page)))
+               return ERR_PTR(-EINVAL);
 
        ctx = fscrypt_get_ctx(inode, gfp_flags);
        if (IS_ERR(ctx))
@@ -302,8 +304,9 @@ EXPORT_SYMBOL(fscrypt_encrypt_page);
 int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
                        unsigned int len, unsigned int offs, u64 lblk_num)
 {
-       if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
-               BUG_ON(!PageLocked(page));
+       if (WARN_ON_ONCE(!PageLocked(page) &&
+                        !(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)))
+               return -EINVAL;
 
        return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
                                      len, offs, GFP_NOFS);
index 75a289c31c7e5dabb4e39c53b2b3cdd26ea7931f..f0d932fa39c20db0367e7db9129d520719fd2660 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -659,7 +659,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
         * guaranteed to either see new references or prevent new
         * references from being established.
         */
-       unmap_mapping_range(mapping, 0, 0, 1);
+       unmap_mapping_range(mapping, 0, 0, 0);
 
        while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
                                min(end - index, (pgoff_t)PAGEVEC_SIZE),
index a5e4a221435c04bdf97e1219ca3bd022ca864e6c..a93ebffe84b38e0700324d5eb39b5e5babedf8c4 100644 (file)
@@ -1630,8 +1630,10 @@ static void clean_writequeues(void)
 
 static void work_stop(void)
 {
-       destroy_workqueue(recv_workqueue);
-       destroy_workqueue(send_workqueue);
+       if (recv_workqueue)
+               destroy_workqueue(recv_workqueue);
+       if (send_workqueue)
+               destroy_workqueue(send_workqueue);
 }
 
 static int work_start(void)
@@ -1691,13 +1693,17 @@ static void work_flush(void)
        struct hlist_node *n;
        struct connection *con;
 
-       flush_workqueue(recv_workqueue);
-       flush_workqueue(send_workqueue);
+       if (recv_workqueue)
+               flush_workqueue(recv_workqueue);
+       if (send_workqueue)
+               flush_workqueue(send_workqueue);
        do {
                ok = 1;
                foreach_conn(stop_conn);
-               flush_workqueue(recv_workqueue);
-               flush_workqueue(send_workqueue);
+               if (recv_workqueue)
+                       flush_workqueue(recv_workqueue);
+               if (send_workqueue)
+                       flush_workqueue(send_workqueue);
                for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
                        hlist_for_each_entry_safe(con, n,
                                                  &connection_hash[i], list) {
index 4dd842f728465591cc7982d635f544c0084b064a..708f931c36f14adace91af82229a6043386c9baf 100644 (file)
@@ -1018,8 +1018,10 @@ int ecryptfs_read_and_validate_header_region(struct inode *inode)
 
        rc = ecryptfs_read_lower(file_size, 0, ECRYPTFS_SIZE_AND_MARKER_BYTES,
                                 inode);
-       if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
-               return rc >= 0 ? -EINVAL : rc;
+       if (rc < 0)
+               return rc;
+       else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
+               return -EINVAL;
        rc = ecryptfs_validate_marker(marker);
        if (!rc)
                ecryptfs_i_size_init(file_size, inode);
@@ -1381,8 +1383,10 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
                                     ecryptfs_inode_to_lower(inode),
                                     ECRYPTFS_XATTR_NAME, file_size,
                                     ECRYPTFS_SIZE_AND_MARKER_BYTES);
-       if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
-               return rc >= 0 ? -EINVAL : rc;
+       if (rc < 0)
+               return rc;
+       else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
+               return -EINVAL;
        rc = ecryptfs_validate_marker(marker);
        if (!rc)
                ecryptfs_i_size_init(file_size, inode);
index 433b1257694ab7eec831f2d762f980ca95183956..561ea64829eceaf2d72a6146cb073bcde97fcdc5 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1826,7 +1826,7 @@ static int __do_execve_file(int fd, struct filename *filename,
        membarrier_execve(current);
        rseq_execve(current);
        acct_update_integrals(current);
-       task_numa_free(current);
+       task_numa_free(current, false);
        free_bprm(bprm);
        kfree(pathbuf);
        if (filename)
index 913061c0de1b35f358f2dcbe8ffd5baad7c3ed04..7edc8172c53ad0cb8cb0badaa782b327c6d062de 100644 (file)
@@ -38,6 +38,7 @@ int __init ext4_init_system_zone(void)
 
 void ext4_exit_system_zone(void)
 {
+       rcu_barrier();
        kmem_cache_destroy(ext4_system_zone_cachep);
 }
 
@@ -49,17 +50,26 @@ static inline int can_merge(struct ext4_system_zone *entry1,
        return 0;
 }
 
+static void release_system_zone(struct ext4_system_blocks *system_blks)
+{
+       struct ext4_system_zone *entry, *n;
+
+       rbtree_postorder_for_each_entry_safe(entry, n,
+                               &system_blks->root, node)
+               kmem_cache_free(ext4_system_zone_cachep, entry);
+}
+
 /*
  * Mark a range of blocks as belonging to the "system zone" --- that
  * is, filesystem metadata blocks which should never be used by
  * inodes.
  */
-static int add_system_zone(struct ext4_sb_info *sbi,
+static int add_system_zone(struct ext4_system_blocks *system_blks,
                           ext4_fsblk_t start_blk,
                           unsigned int count)
 {
        struct ext4_system_zone *new_entry = NULL, *entry;
-       struct rb_node **n = &sbi->system_blks.rb_node, *node;
+       struct rb_node **n = &system_blks->root.rb_node, *node;
        struct rb_node *parent = NULL, *new_node = NULL;
 
        while (*n) {
@@ -91,7 +101,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
                new_node = &new_entry->node;
 
                rb_link_node(new_node, parent, n);
-               rb_insert_color(new_node, &sbi->system_blks);
+               rb_insert_color(new_node, &system_blks->root);
        }
 
        /* Can we merge to the left? */
@@ -101,7 +111,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
                if (can_merge(entry, new_entry)) {
                        new_entry->start_blk = entry->start_blk;
                        new_entry->count += entry->count;
-                       rb_erase(node, &sbi->system_blks);
+                       rb_erase(node, &system_blks->root);
                        kmem_cache_free(ext4_system_zone_cachep, entry);
                }
        }
@@ -112,7 +122,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
                entry = rb_entry(node, struct ext4_system_zone, node);
                if (can_merge(new_entry, entry)) {
                        new_entry->count += entry->count;
-                       rb_erase(node, &sbi->system_blks);
+                       rb_erase(node, &system_blks->root);
                        kmem_cache_free(ext4_system_zone_cachep, entry);
                }
        }
@@ -126,7 +136,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
        int first = 1;
 
        printk(KERN_INFO "System zones: ");
-       node = rb_first(&sbi->system_blks);
+       node = rb_first(&sbi->system_blks->root);
        while (node) {
                entry = rb_entry(node, struct ext4_system_zone, node);
                printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
@@ -137,87 +147,213 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
        printk(KERN_CONT "\n");
 }
 
+/*
+ * Returns 1 if the passed-in block region (start_blk,
+ * start_blk+count) is valid; 0 if some part of the block region
+ * overlaps with filesystem metadata blocks.
+ */
+static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
+                                    struct ext4_system_blocks *system_blks,
+                                    ext4_fsblk_t start_blk,
+                                    unsigned int count)
+{
+       struct ext4_system_zone *entry;
+       struct rb_node *n;
+
+       if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+           (start_blk + count < start_blk) ||
+           (start_blk + count > ext4_blocks_count(sbi->s_es))) {
+               sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
+               return 0;
+       }
+
+       if (system_blks == NULL)
+               return 1;
+
+       n = system_blks->root.rb_node;
+       while (n) {
+               entry = rb_entry(n, struct ext4_system_zone, node);
+               if (start_blk + count - 1 < entry->start_blk)
+                       n = n->rb_left;
+               else if (start_blk >= (entry->start_blk + entry->count))
+                       n = n->rb_right;
+               else {
+                       sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
+                       return 0;
+               }
+       }
+       return 1;
+}
+
+static int ext4_protect_reserved_inode(struct super_block *sb,
+                                      struct ext4_system_blocks *system_blks,
+                                      u32 ino)
+{
+       struct inode *inode;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_map_blocks map;
+       u32 i = 0, num;
+       int err = 0, n;
+
+       if ((ino < EXT4_ROOT_INO) ||
+           (ino > le32_to_cpu(sbi->s_es->s_inodes_count)))
+               return -EINVAL;
+       inode = ext4_iget(sb, ino, EXT4_IGET_SPECIAL);
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
+       num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+       while (i < num) {
+               map.m_lblk = i;
+               map.m_len = num - i;
+               n = ext4_map_blocks(NULL, inode, &map, 0);
+               if (n < 0) {
+                       err = n;
+                       break;
+               }
+               if (n == 0) {
+                       i++;
+               } else {
+                       if (!ext4_data_block_valid_rcu(sbi, system_blks,
+                                               map.m_pblk, n)) {
+                               ext4_error(sb, "blocks %llu-%llu from inode %u "
+                                          "overlap system zone", map.m_pblk,
+                                          map.m_pblk + map.m_len - 1, ino);
+                               err = -EFSCORRUPTED;
+                               break;
+                       }
+                       err = add_system_zone(system_blks, map.m_pblk, n);
+                       if (err < 0)
+                               break;
+                       i += n;
+               }
+       }
+       iput(inode);
+       return err;
+}
+
+static void ext4_destroy_system_zone(struct rcu_head *rcu)
+{
+       struct ext4_system_blocks *system_blks;
+
+       system_blks = container_of(rcu, struct ext4_system_blocks, rcu);
+       release_system_zone(system_blks);
+       kfree(system_blks);
+}
+
+/*
+ * Build system zone rbtree which is used for block validity checking.
+ *
+ * The update of system_blks pointer in this function is protected by
+ * sb->s_umount semaphore. However we have to be careful as we can be
+ * racing with ext4_data_block_valid() calls reading system_blks rbtree
+ * protected only by RCU. That's why we first build the rbtree and then
+ * swap it in place.
+ */
 int ext4_setup_system_zone(struct super_block *sb)
 {
        ext4_group_t ngroups = ext4_get_groups_count(sb);
        struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_system_blocks *system_blks;
        struct ext4_group_desc *gdp;
        ext4_group_t i;
        int flex_size = ext4_flex_bg_size(sbi);
        int ret;
 
        if (!test_opt(sb, BLOCK_VALIDITY)) {
-               if (sbi->system_blks.rb_node)
+               if (sbi->system_blks)
                        ext4_release_system_zone(sb);
                return 0;
        }
-       if (sbi->system_blks.rb_node)
+       if (sbi->system_blks)
                return 0;
 
+       system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
+       if (!system_blks)
+               return -ENOMEM;
+
        for (i=0; i < ngroups; i++) {
                if (ext4_bg_has_super(sb, i) &&
                    ((i < 5) || ((i % flex_size) == 0)))
-                       add_system_zone(sbi, ext4_group_first_block_no(sb, i),
+                       add_system_zone(system_blks,
+                                       ext4_group_first_block_no(sb, i),
                                        ext4_bg_num_gdb(sb, i) + 1);
                gdp = ext4_get_group_desc(sb, i, NULL);
-               ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1);
+               ret = add_system_zone(system_blks,
+                               ext4_block_bitmap(sb, gdp), 1);
                if (ret)
-                       return ret;
-               ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1);
+                       goto err;
+               ret = add_system_zone(system_blks,
+                               ext4_inode_bitmap(sb, gdp), 1);
                if (ret)
-                       return ret;
-               ret = add_system_zone(sbi, ext4_inode_table(sb, gdp),
+                       goto err;
+               ret = add_system_zone(system_blks,
+                               ext4_inode_table(sb, gdp),
                                sbi->s_itb_per_group);
                if (ret)
-                       return ret;
+                       goto err;
+       }
+       if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
+               ret = ext4_protect_reserved_inode(sb, system_blks,
+                               le32_to_cpu(sbi->s_es->s_journal_inum));
+               if (ret)
+                       goto err;
        }
 
+       /*
+        * System blks rbtree complete, announce it once to prevent racing
+        * with ext4_data_block_valid() accessing the rbtree at the same
+        * time.
+        */
+       rcu_assign_pointer(sbi->system_blks, system_blks);
+
        if (test_opt(sb, DEBUG))
                debug_print_tree(sbi);
        return 0;
+err:
+       release_system_zone(system_blks);
+       kfree(system_blks);
+       return ret;
 }
 
-/* Called when the filesystem is unmounted */
+/*
+ * Called when the filesystem is unmounted or when remounting it with
+ * noblock_validity specified.
+ *
+ * The update of system_blks pointer in this function is protected by
+ * sb->s_umount semaphore. However we have to be careful as we can be
+ * racing with ext4_data_block_valid() calls reading system_blks rbtree
+ * protected only by RCU. So we first clear the system_blks pointer and
+ * then free the rbtree only after RCU grace period expires.
+ */
 void ext4_release_system_zone(struct super_block *sb)
 {
-       struct ext4_system_zone *entry, *n;
+       struct ext4_system_blocks *system_blks;
 
-       rbtree_postorder_for_each_entry_safe(entry, n,
-                       &EXT4_SB(sb)->system_blks, node)
-               kmem_cache_free(ext4_system_zone_cachep, entry);
+       system_blks = rcu_dereference_protected(EXT4_SB(sb)->system_blks,
+                                       lockdep_is_held(&sb->s_umount));
+       rcu_assign_pointer(EXT4_SB(sb)->system_blks, NULL);
 
-       EXT4_SB(sb)->system_blks = RB_ROOT;
+       if (system_blks)
+               call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
 }
 
-/*
- * Returns 1 if the passed-in block region (start_blk,
- * start_blk+count) is valid; 0 if some part of the block region
- * overlaps with filesystem metadata blocks.
- */
 int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
                          unsigned int count)
 {
-       struct ext4_system_zone *entry;
-       struct rb_node *n = sbi->system_blks.rb_node;
+       struct ext4_system_blocks *system_blks;
+       int ret;
 
-       if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
-           (start_blk + count < start_blk) ||
-           (start_blk + count > ext4_blocks_count(sbi->s_es))) {
-               sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
-               return 0;
-       }
-       while (n) {
-               entry = rb_entry(n, struct ext4_system_zone, node);
-               if (start_blk + count - 1 < entry->start_blk)
-                       n = n->rb_left;
-               else if (start_blk >= (entry->start_blk + entry->count))
-                       n = n->rb_right;
-               else {
-                       sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
-                       return 0;
-               }
-       }
-       return 1;
+       /*
+        * Lock the system zone to prevent it being released concurrently
+        * when doing a remount which inverse current "[no]block_validity"
+        * mount option.
+        */
+       rcu_read_lock();
+       system_blks = rcu_dereference(sbi->system_blks);
+       ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
+                                       count);
+       rcu_read_unlock();
+       return ret;
 }
 
 int ext4_check_blockref(const char *function, unsigned int line,
@@ -227,6 +363,11 @@ int ext4_check_blockref(const char *function, unsigned int line,
        __le32 *bref = p;
        unsigned int blk;
 
+       if (ext4_has_feature_journal(inode->i_sb) &&
+           (inode->i_ino ==
+            le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+               return 0;
+
        while (bref < p+max) {
                blk = le32_to_cpu(*bref++);
                if (blk &&
index f93f9881ec184c351e69af715a849d1a01562b95..46d5c40f2835fec84ec26620113231fb619644b7 100644 (file)
@@ -108,7 +108,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
        struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        struct buffer_head *bh = NULL;
-       int dir_has_error = 0;
        struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
 
        if (ext4_encrypted_inode(inode)) {
@@ -144,8 +143,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
                        return err;
        }
 
-       offset = ctx->pos & (sb->s_blocksize - 1);
-
        while (ctx->pos < inode->i_size) {
                struct ext4_map_blocks map;
 
@@ -154,9 +151,18 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
                        goto errout;
                }
                cond_resched();
+               offset = ctx->pos & (sb->s_blocksize - 1);
                map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
                map.m_len = 1;
                err = ext4_map_blocks(NULL, inode, &map, 0);
+               if (err == 0) {
+                       /* m_len should never be zero but let's avoid
+                        * an infinite loop if it somehow is */
+                       if (map.m_len == 0)
+                               map.m_len = 1;
+                       ctx->pos += map.m_len * sb->s_blocksize;
+                       continue;
+               }
                if (err > 0) {
                        pgoff_t index = map.m_pblk >>
                                        (PAGE_SHIFT - inode->i_blkbits);
@@ -175,13 +181,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
                }
 
                if (!bh) {
-                       if (!dir_has_error) {
-                               EXT4_ERROR_FILE(file, 0,
-                                               "directory contains a "
-                                               "hole at offset %llu",
-                                          (unsigned long long) ctx->pos);
-                               dir_has_error = 1;
-                       }
                        /* corrupt size?  Maybe no more blocks to read */
                        if (ctx->pos > inode->i_blocks << 9)
                                break;
index 1ee51d3a978ad63505f1e375b9fc4f64efa83ca4..f8456a423c4ea7405e0870216250b491077cd1e0 100644 (file)
@@ -194,6 +194,14 @@ struct ext4_map_blocks {
        unsigned int m_flags;
 };
 
+/*
+ * Block validity checking, system zone rbtree.
+ */
+struct ext4_system_blocks {
+       struct rb_root root;
+       struct rcu_head rcu;
+};
+
 /*
  * Flags for ext4_io_end->flags
  */
@@ -1409,7 +1417,7 @@ struct ext4_sb_info {
        int s_jquota_fmt;                       /* Format of quota to use */
 #endif
        unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
-       struct rb_root system_blks;
+       struct ext4_system_blocks __rcu *system_blks;
 
 #ifdef EXTENTS_STATS
        /* ext4 extents stats */
index df908ef79ccea1ac594d26f8489799d9912afdb1..402dc366117eaa0275a2f9ba902ca2b93be3f08d 100644 (file)
@@ -361,20 +361,20 @@ static inline int ext4_journal_force_commit(journal_t *journal)
 }
 
 static inline int ext4_jbd2_inode_add_write(handle_t *handle,
-                                           struct inode *inode)
+               struct inode *inode, loff_t start_byte, loff_t length)
 {
        if (ext4_handle_valid(handle))
-               return jbd2_journal_inode_add_write(handle,
-                                                   EXT4_I(inode)->jinode);
+               return jbd2_journal_inode_ranged_write(handle,
+                               EXT4_I(inode)->jinode, start_byte, length);
        return 0;
 }
 
 static inline int ext4_jbd2_inode_add_wait(handle_t *handle,
-                                          struct inode *inode)
+               struct inode *inode, loff_t start_byte, loff_t length)
 {
        if (ext4_handle_valid(handle))
-               return jbd2_journal_inode_add_wait(handle,
-                                                  EXT4_I(inode)->jinode);
+               return jbd2_journal_inode_ranged_wait(handle,
+                               EXT4_I(inode)->jinode, start_byte, length);
        return 0;
 }
 
index 45aea792d22a04617852b5d37e6dcd7d8dafc494..f81eb1785af2be017b48fbaade6cedda1aa5c453 100644 (file)
@@ -518,10 +518,14 @@ __read_extent_tree_block(const char *function, unsigned int line,
        }
        if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
                return bh;
-       err = __ext4_ext_check(function, line, inode,
-                              ext_block_hdr(bh), depth, pblk);
-       if (err)
-               goto errout;
+       if (!ext4_has_feature_journal(inode->i_sb) ||
+           (inode->i_ino !=
+            le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
+               err = __ext4_ext_check(function, line, inode,
+                                      ext_block_hdr(bh), depth, pblk);
+               if (err)
+                       goto errout;
+       }
        set_buffer_verified(bh);
        /*
         * If this is a leaf block, cache all of its entries
@@ -3744,8 +3748,8 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
         * illegal.
         */
        if (ee_block != map->m_lblk || ee_len > map->m_len) {
-#ifdef EXT4_DEBUG
-               ext4_warning("Inode (%ld) finished: extent logical block %llu,"
+#ifdef CONFIG_EXT4_DEBUG
+               ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
                             " len %u; IO logical block %llu, len %u",
                             inode->i_ino, (unsigned long long)ee_block, ee_len,
                             (unsigned long long)map->m_lblk, map->m_len);
index 2c5baa5e8291165e07d5609b650c38c13eda587f..f4a24a46245eacebf90e771bf34defc8019bc792 100644 (file)
@@ -165,6 +165,10 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
        ret = generic_write_checks(iocb, from);
        if (ret <= 0)
                return ret;
+
+       if (unlikely(IS_IMMUTABLE(inode)))
+               return -EPERM;
+
        /*
         * If we have encountered a bitmap-format file, the size limit
         * is smaller than s_maxbytes, which is for extent-mapped files.
index 05dc5a4ba481b85677e9f6ca3cb8da19a7f6e46c..a0c94c365a4c0a72ea0155a007ca0377019d47a9 100644 (file)
@@ -399,6 +399,10 @@ static int __check_block_validity(struct inode *inode, const char *func,
                                unsigned int line,
                                struct ext4_map_blocks *map)
 {
+       if (ext4_has_feature_journal(inode->i_sb) &&
+           (inode->i_ino ==
+            le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+               return 0;
        if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
                                   map->m_len)) {
                ext4_error_inode(inode, func, line, map->m_pblk,
@@ -729,10 +733,16 @@ out_sem:
                    !(flags & EXT4_GET_BLOCKS_ZERO) &&
                    !ext4_is_quota_file(inode) &&
                    ext4_should_order_data(inode)) {
+                       loff_t start_byte =
+                               (loff_t)map->m_lblk << inode->i_blkbits;
+                       loff_t length = (loff_t)map->m_len << inode->i_blkbits;
+
                        if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
-                               ret = ext4_jbd2_inode_add_wait(handle, inode);
+                               ret = ext4_jbd2_inode_add_wait(handle, inode,
+                                               start_byte, length);
                        else
-                               ret = ext4_jbd2_inode_add_write(handle, inode);
+                               ret = ext4_jbd2_inode_add_write(handle, inode,
+                                               start_byte, length);
                        if (ret)
                                return ret;
                }
@@ -4058,7 +4068,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
                err = 0;
                mark_buffer_dirty(bh);
                if (ext4_should_order_data(inode))
-                       err = ext4_jbd2_inode_add_write(handle, inode);
+                       err = ext4_jbd2_inode_add_write(handle, inode, from,
+                                       length);
        }
 
 unlock:
@@ -4254,6 +4265,15 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 
        trace_ext4_punch_hole(inode, offset, length, 0);
 
+       ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+       if (ext4_has_inline_data(inode)) {
+               down_write(&EXT4_I(inode)->i_mmap_sem);
+               ret = ext4_convert_inline_data(inode);
+               up_write(&EXT4_I(inode)->i_mmap_sem);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * Write out all dirty pages to avoid race conditions
         * Then release them.
@@ -5491,6 +5511,14 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
                return -EIO;
 
+       if (unlikely(IS_IMMUTABLE(inode)))
+               return -EPERM;
+
+       if (unlikely(IS_APPEND(inode) &&
+                    (ia_valid & (ATTR_MODE | ATTR_UID |
+                                 ATTR_GID | ATTR_TIMES_SET))))
+               return -EPERM;
+
        error = setattr_prepare(dentry, attr);
        if (error)
                return error;
@@ -6190,6 +6218,9 @@ int ext4_page_mkwrite(struct vm_fault *vmf)
        get_block_t *get_block;
        int retries = 0;
 
+       if (unlikely(IS_IMMUTABLE(inode)))
+               return VM_FAULT_SIGBUS;
+
        sb_start_pagefault(inode->i_sb);
        file_update_time(vma->vm_file);
 
index 53d57cdf3c4d8876f10095b703f80f799aece343..abb6fcff0a1d3b8bd7b2c1a4cc9c69cc9f500d3a 100644 (file)
@@ -268,6 +268,29 @@ static int uuid_is_zero(__u8 u[16])
 }
 #endif
 
+/*
+ * If immutable is set and we are not clearing it, we're not allowed to change
+ * anything else in the inode.  Don't error out if we're only trying to set
+ * immutable on an immutable file.
+ */
+static int ext4_ioctl_check_immutable(struct inode *inode, __u32 new_projid,
+                                     unsigned int flags)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       unsigned int oldflags = ei->i_flags;
+
+       if (!(oldflags & EXT4_IMMUTABLE_FL) || !(flags & EXT4_IMMUTABLE_FL))
+               return 0;
+
+       if ((oldflags & ~EXT4_IMMUTABLE_FL) != (flags & ~EXT4_IMMUTABLE_FL))
+               return -EPERM;
+       if (ext4_has_feature_project(inode->i_sb) &&
+           __kprojid_val(ei->i_projid) != new_projid)
+               return -EPERM;
+
+       return 0;
+}
+
 static int ext4_ioctl_setflags(struct inode *inode,
                               unsigned int flags)
 {
@@ -321,6 +344,20 @@ static int ext4_ioctl_setflags(struct inode *inode,
                        goto flags_out;
        }
 
+       /*
+        * Wait for all pending directio and then flush all the dirty pages
+        * for this file.  The flush marks all the pages readonly, so any
+        * subsequent attempt to write to the file (particularly mmap pages)
+        * will come through the filesystem and fail.
+        */
+       if (S_ISREG(inode->i_mode) && !IS_IMMUTABLE(inode) &&
+           (flags & EXT4_IMMUTABLE_FL)) {
+               inode_dio_wait(inode);
+               err = filemap_write_and_wait(inode->i_mapping);
+               if (err)
+                       goto flags_out;
+       }
+
        handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
        if (IS_ERR(handle)) {
                err = PTR_ERR(handle);
@@ -750,7 +787,11 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        return err;
 
                inode_lock(inode);
-               err = ext4_ioctl_setflags(inode, flags);
+               err = ext4_ioctl_check_immutable(inode,
+                               from_kprojid(&init_user_ns, ei->i_projid),
+                               flags);
+               if (!err)
+                       err = ext4_ioctl_setflags(inode, flags);
                inode_unlock(inode);
                mnt_drop_write_file(filp);
                return err;
@@ -1120,6 +1161,9 @@ resizefs_out:
                        goto out;
                flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) |
                         (flags & EXT4_FL_XFLAG_VISIBLE);
+               err = ext4_ioctl_check_immutable(inode, fa.fsx_projid, flags);
+               if (err)
+                       goto out;
                err = ext4_ioctl_setflags(inode, flags);
                if (err)
                        goto out;
index 2f5be02fc6f6a2194f45adb5429f658c2d0a184a..287631bb09e753ae8c6a008742279bce568dd553 100644 (file)
@@ -390,7 +390,8 @@ data_copy:
 
        /* Even in case of data=writeback it is reasonable to pin
         * inode to transaction, to prevent unexpected data loss */
-       *err = ext4_jbd2_inode_add_write(handle, orig_inode);
+       *err = ext4_jbd2_inode_add_write(handle, orig_inode,
+                       (loff_t)orig_page_offset << PAGE_SHIFT, replaced_size);
 
 unlock_pages:
        unlock_page(pagep[0]);
index 4c5aa5df657314f763a659ea5284d7c2a51b9df3..61dc1b0e4465d3e44e3dcf61190f7f1a747320e7 100644 (file)
@@ -81,8 +81,18 @@ static struct buffer_head *ext4_append(handle_t *handle,
 static int ext4_dx_csum_verify(struct inode *inode,
                               struct ext4_dir_entry *dirent);
 
+/*
+ * Hints to ext4_read_dirblock regarding whether we expect a directory
+ * block being read to be an index block, or a block containing
+ * directory entries (and if the latter, whether it was found via a
+ * logical block in an htree index block).  This is used to control
+ * what sort of sanity checkinig ext4_read_dirblock() will do on the
+ * directory block read from the storage device.  EITHER will means
+ * the caller doesn't know what kind of directory block will be read,
+ * so no specific verification will be done.
+ */
 typedef enum {
-       EITHER, INDEX, DIRENT
+       EITHER, INDEX, DIRENT, DIRENT_HTREE
 } dirblock_type_t;
 
 #define ext4_read_dirblock(inode, block, type) \
@@ -108,11 +118,14 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
 
                return bh;
        }
-       if (!bh) {
+       if (!bh && (type == INDEX || type == DIRENT_HTREE)) {
                ext4_error_inode(inode, func, line, block,
-                                "Directory hole found");
+                                "Directory hole found for htree %s block",
+                                (type == INDEX) ? "index" : "leaf");
                return ERR_PTR(-EFSCORRUPTED);
        }
+       if (!bh)
+               return NULL;
        dirent = (struct ext4_dir_entry *) bh->b_data;
        /* Determine whether or not we have an index block */
        if (is_dx(inode)) {
@@ -979,7 +992,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
 
        dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
                                                        (unsigned long)block));
-       bh = ext4_read_dirblock(dir, block, DIRENT);
+       bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
        if (IS_ERR(bh))
                return PTR_ERR(bh);
 
@@ -1509,7 +1522,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
                return (struct buffer_head *) frame;
        do {
                block = dx_get_block(frame->at);
-               bh = ext4_read_dirblock(dir, block, DIRENT);
+               bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
                if (IS_ERR(bh))
                        goto errout;
 
@@ -2079,6 +2092,11 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
        blocks = dir->i_size >> sb->s_blocksize_bits;
        for (block = 0; block < blocks; block++) {
                bh = ext4_read_dirblock(dir, block, DIRENT);
+               if (bh == NULL) {
+                       bh = ext4_bread(handle, dir, block,
+                                       EXT4_GET_BLOCKS_CREATE);
+                       goto add_to_new_block;
+               }
                if (IS_ERR(bh)) {
                        retval = PTR_ERR(bh);
                        bh = NULL;
@@ -2099,6 +2117,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
                brelse(bh);
        }
        bh = ext4_append(handle, dir, &block);
+add_to_new_block:
        if (IS_ERR(bh)) {
                retval = PTR_ERR(bh);
                bh = NULL;
@@ -2143,7 +2162,7 @@ again:
                return PTR_ERR(frame);
        entries = frame->entries;
        at = frame->at;
-       bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
+       bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT_HTREE);
        if (IS_ERR(bh)) {
                err = PTR_ERR(bh);
                bh = NULL;
@@ -2691,7 +2710,10 @@ bool ext4_empty_dir(struct inode *inode)
                EXT4_ERROR_INODE(inode, "invalid size");
                return true;
        }
-       bh = ext4_read_dirblock(inode, 0, EITHER);
+       /* The first directory block must not be a hole,
+        * so treat it as DIRENT_HTREE
+        */
+       bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
        if (IS_ERR(bh))
                return true;
 
@@ -2713,6 +2735,10 @@ bool ext4_empty_dir(struct inode *inode)
                        brelse(bh);
                        lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
                        bh = ext4_read_dirblock(inode, lblock, EITHER);
+                       if (bh == NULL) {
+                               offset += sb->s_blocksize;
+                               continue;
+                       }
                        if (IS_ERR(bh))
                                return true;
                        de = (struct ext4_dir_entry_2 *) bh->b_data;
@@ -3256,7 +3282,10 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
        struct buffer_head *bh;
 
        if (!ext4_has_inline_data(inode)) {
-               bh = ext4_read_dirblock(inode, 0, EITHER);
+               /* The first directory block must not be a hole, so
+                * treat it as DIRENT_HTREE
+                */
+               bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
                if (IS_ERR(bh)) {
                        *retval = PTR_ERR(bh);
                        return NULL;
index 59d0472013f437616f06bba75b346dc036c3fa57..388500eec72912ccd336f7241f1402d50778d292 100644 (file)
@@ -849,6 +849,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
        unsigned int cp_blks = 1 + __cp_payload(sbi);
        block_t cp_blk_no;
        int i;
+       int err;
 
        sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
                                 GFP_KERNEL);
@@ -876,6 +877,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
        } else if (cp2) {
                cur_page = cp2;
        } else {
+               err = -EFSCORRUPTED;
                goto fail_no_cp;
        }
 
@@ -888,8 +890,10 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
                sbi->cur_cp_pack = 2;
 
        /* Sanity checking of checkpoint */
-       if (f2fs_sanity_check_ckpt(sbi))
+       if (f2fs_sanity_check_ckpt(sbi)) {
+               err = -EFSCORRUPTED;
                goto free_fail_no_cp;
+       }
 
        if (cp_blks <= 1)
                goto done;
@@ -903,8 +907,10 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
                unsigned char *ckpt = (unsigned char *)sbi->ckpt;
 
                cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
-               if (IS_ERR(cur_page))
+               if (IS_ERR(cur_page)) {
+                       err = PTR_ERR(cur_page);
                        goto free_fail_no_cp;
+               }
                sit_bitmap_ptr = page_address(cur_page);
                memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
                f2fs_put_page(cur_page, 1);
@@ -919,7 +925,7 @@ free_fail_no_cp:
        f2fs_put_page(cp2, 1);
 fail_no_cp:
        kfree(sbi->ckpt);
-       return -EINVAL;
+       return err;
 }
 
 static void __add_dirty_inode(struct inode *inode, enum inode_type type)
index 4d02e76b648a21496796fd0949b09d3ab6f315f8..9511466bc78574690be213e6ee22facf49b84419 100644 (file)
@@ -449,7 +449,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
 
        if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
                        __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
-               return -EFAULT;
+               return -EFSCORRUPTED;
 
        trace_f2fs_submit_page_bio(page, fio);
        f2fs_trace_ios(fio, 0);
@@ -1071,7 +1071,7 @@ next_block:
 
        if (__is_valid_data_blkaddr(blkaddr) &&
                !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
-               err = -EFAULT;
+               err = -EFSCORRUPTED;
                goto sync_out;
        }
 
@@ -1755,7 +1755,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
 
                if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
                                                        DATA_GENERIC))
-                       return -EFAULT;
+                       return -EFSCORRUPTED;
 
                ipu_force = true;
                fio->need_lock = LOCK_DONE;
@@ -1781,7 +1781,7 @@ got_it:
        if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
                !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
                                                        DATA_GENERIC)) {
-               err = -EFAULT;
+               err = -EFSCORRUPTED;
                goto out_writepage;
        }
        /*
index 44ea7ac69ef48bffeed3eaa45f3b3ac5625397ce..fb216488d67a99494bdef34f67320e0c3e7abc99 100644 (file)
@@ -3487,3 +3487,7 @@ extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
 #endif
 
 #endif
+
+#define EFSBADCRC      EBADMSG         /* Bad CRC detected */
+#define EFSCORRUPTED   EUCLEAN         /* Filesystem is corrupted */
+
index d44b57a363ff11b824dfd520f43c6d39871c3a0e..dd29a49143f54930444865e4c9dffeda35ffb600 100644 (file)
@@ -636,7 +636,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
 
        if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
                                                DATA_GENERIC))) {
-               err = -EFAULT;
+               err = -EFSCORRUPTED;
                goto put_page;
        }
 got_it:
index 92703efde36e2ef18c5529c5dbc74bee5c1cb4e5..6bbb5f6801e26f4c013030f95b23e36c05e2ff9c 100644 (file)
@@ -146,7 +146,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
                        "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
                        "run fsck to fix.",
                        __func__, dn->inode->i_ino, dn->data_blkaddr);
-               return -EINVAL;
+               return -EFSCORRUPTED;
        }
 
        f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
@@ -389,7 +389,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
                        "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
                        "run fsck to fix.",
                        __func__, dir->i_ino, dn.data_blkaddr);
-               err = -EINVAL;
+               err = -EFSCORRUPTED;
                goto out;
        }
 
index 0f31df01e36c604df05a6231742e0c7e62569203..540d45759621afdd37af834cfeab724c09ca2b79 100644 (file)
@@ -76,7 +76,7 @@ static int __written_first_block(struct f2fs_sb_info *sbi,
        if (!__is_valid_data_blkaddr(addr))
                return 1;
        if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
-               return -EFAULT;
+               return -EFSCORRUPTED;
        return 0;
 }
 
@@ -361,7 +361,7 @@ static int do_read_inode(struct inode *inode)
 
        if (!sanity_check_inode(inode, node_page)) {
                f2fs_put_page(node_page, 1);
-               return -EINVAL;
+               return -EFSCORRUPTED;
        }
 
        /* check data exist */
index e2d9edad758cdfb5e5ca7b209b140d8ae3ef014c..aa8f19e1bdb3d855a37955ce84d38df64ead3e94 100644 (file)
@@ -40,7 +40,7 @@ int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
                f2fs_msg(sbi->sb, KERN_WARNING,
                                "%s: out-of-range nid=%x, run fsck to fix.",
                                __func__, nid);
-               return -EINVAL;
+               return -EFSCORRUPTED;
        }
        return 0;
 }
@@ -1284,7 +1284,7 @@ static int read_node_page(struct page *page, int op_flags)
        if (PageUptodate(page)) {
                if (!f2fs_inode_chksum_verify(sbi, page)) {
                        ClearPageUptodate(page);
-                       return -EBADMSG;
+                       return -EFSBADCRC;
                }
                return LOCKED_PAGE;
        }
@@ -1370,7 +1370,7 @@ repeat:
        }
 
        if (!f2fs_inode_chksum_verify(sbi, page)) {
-               err = -EBADMSG;
+               err = -EFSBADCRC;
                goto out_err;
        }
 page_hit:
index bf5c5f4fa77ea26af535eff7d50fa3b3ac955d86..0b224f4a4a65652efc6b952603beae14d8ef7e77 100644 (file)
@@ -491,7 +491,7 @@ retry_dn:
                        "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
                        inode->i_ino, ofs_of_node(dn.node_page),
                        ofs_of_node(page));
-               err = -EFAULT;
+               err = -EFSCORRUPTED;
                goto err;
        }
 
index 8fc3edb6760c2fe97ca1b09c1199b61e9be7cf8e..10d5dcdb34be6ec19b5f35cf64e64f902d90c9a8 100644 (file)
@@ -2657,7 +2657,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
        if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
                f2fs_msg(sbi->sb, KERN_WARNING,
                        "Found FS corruption, run fsck to fix.");
-               return -EIO;
+               return -EFSCORRUPTED;
        }
 
        /* start/end segment number in main_area */
@@ -3079,7 +3079,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
 
        if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               return -EFAULT;
+               return -EFSCORRUPTED;
        }
 
        stat_inc_inplace_blocks(fio->sbi);
@@ -3966,7 +3966,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
                                        "Wrong journal entry on segno %u",
                                        start);
                        set_sbi_flag(sbi, SBI_NEED_FSCK);
-                       err = -EINVAL;
+                       err = -EFSCORRUPTED;
                        break;
                }
 
@@ -4007,7 +4007,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
                        "SIT is corrupted node# %u vs %u",
                        total_node_blocks, valid_node_count(sbi));
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               err = -EINVAL;
+               err = -EFSCORRUPTED;
        }
 
        return err;
@@ -4098,6 +4098,41 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
        return init_victim_secmap(sbi);
 }
 
+static int sanity_check_curseg(struct f2fs_sb_info *sbi)
+{
+       int i;
+
+       /*
+        * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
+        * In LFS curseg, all blkaddr after .next_blkoff should be unused.
+        */
+       for (i = 0; i < NO_CHECK_TYPE; i++) {
+               struct curseg_info *curseg = CURSEG_I(sbi, i);
+               struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
+               unsigned int blkofs = curseg->next_blkoff;
+
+               if (f2fs_test_bit(blkofs, se->cur_valid_map))
+                       goto out;
+
+               if (curseg->alloc_type == SSR)
+                       continue;
+
+               for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
+                       if (!f2fs_test_bit(blkofs, se->cur_valid_map))
+                               continue;
+out:
+                       f2fs_msg(sbi->sb, KERN_ERR,
+                               "Current segment's next free block offset is "
+                               "inconsistent with bitmap, logtype:%u, "
+                               "segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
+                               i, curseg->segno, curseg->alloc_type,
+                               curseg->next_blkoff, blkofs);
+                       return -EFSCORRUPTED;
+               }
+       }
+       return 0;
+}
+
 /*
  * Update min, max modified time for cost-benefit GC algorithm
  */
@@ -4193,6 +4228,10 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
        if (err)
                return err;
 
+       err = sanity_check_curseg(sbi);
+       if (err)
+               return err;
+
        init_min_max_mtime(sbi);
        return 0;
 }
index 5079532cb176b79fa38168a91c1e38aafda33c38..9c2a55ad61bc581ce717a30a9a438820c5f97929 100644 (file)
@@ -684,7 +684,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
                                "Mismatch valid blocks %d vs. %d",
                                        GET_SIT_VBLOCKS(raw_sit), valid_blocks);
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               return -EINVAL;
+               return -EFSCORRUPTED;
        }
 
        /* check segment usage, and check boundary of a given segment number */
@@ -694,7 +694,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
                                "Wrong valid blocks %d or segno %u",
                                        GET_SIT_VBLOCKS(raw_sit), segno);
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               return -EINVAL;
+               return -EFSCORRUPTED;
        }
        return 0;
 }
index 1871031e2d5eb78d6b7da9041b57cf2fc33c65cb..6851afc3bf805f8a951b6b59d9b418cea594f053 100644 (file)
@@ -2196,11 +2196,11 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
        struct super_block *sb = sbi->sb;
        unsigned int blocksize;
 
-       if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
+       if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
                f2fs_msg(sb, KERN_INFO,
                        "Magic Mismatch, valid(0x%x) - read(0x%x)",
                        F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
-               return 1;
+               return -EINVAL;
        }
 
        /* Currently, support only 4KB page cache size */
@@ -2208,7 +2208,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                f2fs_msg(sb, KERN_INFO,
                        "Invalid page_cache_size (%lu), supports only 4KB\n",
                        PAGE_SIZE);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* Currently, support only 4KB block size */
@@ -2217,7 +2217,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                f2fs_msg(sb, KERN_INFO,
                        "Invalid blocksize (%u), supports only 4KB\n",
                        blocksize);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* check log blocks per segment */
@@ -2225,7 +2225,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                f2fs_msg(sb, KERN_INFO,
                        "Invalid log blocks per segment (%u)\n",
                        le32_to_cpu(raw_super->log_blocks_per_seg));
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* Currently, support 512/1024/2048/4096 bytes sector size */
@@ -2235,7 +2235,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                                F2FS_MIN_LOG_SECTOR_SIZE) {
                f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
                        le32_to_cpu(raw_super->log_sectorsize));
-               return 1;
+               return -EFSCORRUPTED;
        }
        if (le32_to_cpu(raw_super->log_sectors_per_block) +
                le32_to_cpu(raw_super->log_sectorsize) !=
@@ -2244,7 +2244,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                        "Invalid log sectors per block(%u) log sectorsize(%u)",
                        le32_to_cpu(raw_super->log_sectors_per_block),
                        le32_to_cpu(raw_super->log_sectorsize));
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        segment_count = le32_to_cpu(raw_super->segment_count);
@@ -2260,7 +2260,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                f2fs_msg(sb, KERN_INFO,
                        "Invalid segment count (%u)",
                        segment_count);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if (total_sections > segment_count ||
@@ -2269,28 +2269,28 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                f2fs_msg(sb, KERN_INFO,
                        "Invalid segment/section count (%u, %u x %u)",
                        segment_count, total_sections, segs_per_sec);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if ((segment_count / segs_per_sec) < total_sections) {
                f2fs_msg(sb, KERN_INFO,
                        "Small segment_count (%u < %u * %u)",
                        segment_count, segs_per_sec, total_sections);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
                f2fs_msg(sb, KERN_INFO,
                        "Wrong segment_count / block_count (%u > %llu)",
                        segment_count, le64_to_cpu(raw_super->block_count));
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if (secs_per_zone > total_sections || !secs_per_zone) {
                f2fs_msg(sb, KERN_INFO,
                        "Wrong secs_per_zone / total_sections (%u, %u)",
                        secs_per_zone, total_sections);
-               return 1;
+               return -EFSCORRUPTED;
        }
        if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
                        raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
@@ -2301,7 +2301,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                        le32_to_cpu(raw_super->extension_count),
                        raw_super->hot_ext_count,
                        F2FS_MAX_EXTENSION);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if (le32_to_cpu(raw_super->cp_payload) >
@@ -2310,7 +2310,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                        "Insane cp_payload (%u > %u)",
                        le32_to_cpu(raw_super->cp_payload),
                        blocks_per_seg - F2FS_CP_PACKS);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* check reserved ino info */
@@ -2322,12 +2322,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                        le32_to_cpu(raw_super->node_ino),
                        le32_to_cpu(raw_super->meta_ino),
                        le32_to_cpu(raw_super->root_ino));
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
        if (sanity_check_area_boundary(sbi, bh))
-               return 1;
+               return -EFSCORRUPTED;
 
        return 0;
 }
@@ -2413,11 +2413,11 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
                }
        }
        for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
-               for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
+               for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
                        if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
                                le32_to_cpu(ckpt->cur_data_segno[j])) {
                                f2fs_msg(sbi->sb, KERN_ERR,
-                                       "Data segment (%u) and Data segment (%u)"
+                                       "Node segment (%u) and Data segment (%u)"
                                        " has the same segno: %u", i, j,
                                        le32_to_cpu(ckpt->cur_node_segno[i]));
                                return 1;
@@ -2612,11 +2612,11 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
                }
 
                /* sanity checking of raw super */
-               if (sanity_check_raw_super(sbi, bh)) {
+               err = sanity_check_raw_super(sbi, bh);
+               if (err) {
                        f2fs_msg(sb, KERN_ERR,
                                "Can't find valid F2FS filesystem in %dth superblock",
                                block + 1);
-                       err = -EINVAL;
                        brelse(bh);
                        continue;
                }
index 88e30f7cf9e14e7616ffaace315dc7c27d8ce887..1dae74f7cccac4bb063f34ee51fa90857e2bca0a 100644 (file)
@@ -349,7 +349,7 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
 
        *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
        if (!*xe) {
-               err = -EFAULT;
+               err = -EFSCORRUPTED;
                goto out;
        }
 check:
@@ -625,7 +625,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
        /* find entry with wanted name. */
        here = __find_xattr(base_addr, last_base_addr, index, len, name);
        if (!here) {
-               error = -EFAULT;
+               error = -EFSCORRUPTED;
                goto exit;
        }
 
index 7f5f3699fc6c086cb169c0f0dd9782971408480f..de60c05c0ca1d955e69d5d58cd3c27dd5063b7af 100644 (file)
@@ -1097,8 +1097,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
                        err = -ENOMEM;
                        goto error;
                }
+               /* Avoid race with userspace read via bdev */
+               lock_buffer(bhs[n]);
                memset(bhs[n]->b_data, 0, sb->s_blocksize);
                set_buffer_uptodate(bhs[n]);
+               unlock_buffer(bhs[n]);
                mark_buffer_dirty_inode(bhs[n], dir);
 
                n++;
@@ -1155,6 +1158,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
        fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
 
        de = (struct msdos_dir_entry *)bhs[0]->b_data;
+       /* Avoid race with userspace read via bdev */
+       lock_buffer(bhs[0]);
        /* filling the new directory slots ("." and ".." entries) */
        memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
        memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
@@ -1177,6 +1182,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
        de[0].size = de[1].size = 0;
        memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
        set_buffer_uptodate(bhs[0]);
+       unlock_buffer(bhs[0]);
        mark_buffer_dirty_inode(bhs[0], dir);
 
        err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
@@ -1234,11 +1240,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
 
                        /* fill the directory entry */
                        copy = min(size, sb->s_blocksize);
+                       /* Avoid race with userspace read via bdev */
+                       lock_buffer(bhs[n]);
                        memcpy(bhs[n]->b_data, slots, copy);
-                       slots += copy;
-                       size -= copy;
                        set_buffer_uptodate(bhs[n]);
+                       unlock_buffer(bhs[n]);
                        mark_buffer_dirty_inode(bhs[n], dir);
+                       slots += copy;
+                       size -= copy;
                        if (!size)
                                break;
                        n++;
index f58c0cacc531df1c6c439325cb6c574cd704486d..4c6c635bc8aaac76c9df5391bb1b0cf58ef8ad6f 100644 (file)
@@ -390,8 +390,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
                                err = -ENOMEM;
                                goto error;
                        }
+                       /* Avoid race with userspace read via bdev */
+                       lock_buffer(c_bh);
                        memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
                        set_buffer_uptodate(c_bh);
+                       unlock_buffer(c_bh);
                        mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
                        if (sb->s_flags & SB_SYNCHRONOUS)
                                err = sync_dirty_buffer(c_bh);
index 9544e2f8b79ff814302d05486b3b878ef3f38839..7ee86d8f313d0feb5db522d4de7c491ace364850 100644 (file)
@@ -721,6 +721,7 @@ void wbc_detach_inode(struct writeback_control *wbc)
 void wbc_account_io(struct writeback_control *wbc, struct page *page,
                    size_t bytes)
 {
+       struct cgroup_subsys_state *css;
        int id;
 
        /*
@@ -732,7 +733,12 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
        if (!wbc->wb)
                return;
 
-       id = mem_cgroup_css_from_page(page)->id;
+       css = mem_cgroup_css_from_page(page);
+       /* dead cgroups shouldn't contribute to inode ownership arbitration */
+       if (!(css->flags & CSS_ONLINE))
+               return;
+
+       id = css->id;
 
        if (id == wbc->wb_id) {
                wbc->wb_bytes += bytes;
index 8f68181256c00bf78df1668dbb95b133488caf11..f057c213c453a8bb2e755d17b5778beb15aab474 100644 (file)
@@ -518,6 +518,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
        rc = cuse_send_init(cc);
        if (rc) {
                fuse_dev_free(fud);
+               fuse_conn_put(&cc->fc);
                return rc;
        }
        file->private_data = fud;
index 6ee471b72a34da2b9057bf3a3d993bab458115f5..6d39143cfa09460c9a3ca18c1036dc022225d7d7 100644 (file)
@@ -331,7 +331,7 @@ static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
        req->in.h.len = sizeof(struct fuse_in_header) +
                len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
        list_add_tail(&req->list, &fiq->pending);
-       wake_up_locked(&fiq->waitq);
+       wake_up(&fiq->waitq);
        kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 }
 
@@ -343,16 +343,16 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
        forget->forget_one.nodeid = nodeid;
        forget->forget_one.nlookup = nlookup;
 
-       spin_lock(&fiq->waitq.lock);
+       spin_lock(&fiq->lock);
        if (fiq->connected) {
                fiq->forget_list_tail->next = forget;
                fiq->forget_list_tail = forget;
-               wake_up_locked(&fiq->waitq);
+               wake_up(&fiq->waitq);
                kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
        } else {
                kfree(forget);
        }
-       spin_unlock(&fiq->waitq.lock);
+       spin_unlock(&fiq->lock);
 }
 
 static void flush_bg_queue(struct fuse_conn *fc)
@@ -365,10 +365,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
                req = list_entry(fc->bg_queue.next, struct fuse_req, list);
                list_del(&req->list);
                fc->active_background++;
-               spin_lock(&fiq->waitq.lock);
+               spin_lock(&fiq->lock);
                req->in.h.unique = fuse_get_unique(fiq);
                queue_request(fiq, req);
-               spin_unlock(&fiq->waitq.lock);
+               spin_unlock(&fiq->lock);
        }
 }
 
@@ -387,9 +387,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
        if (test_and_set_bit(FR_FINISHED, &req->flags))
                goto put_request;
 
-       spin_lock(&fiq->waitq.lock);
+       spin_lock(&fiq->lock);
        list_del_init(&req->intr_entry);
-       spin_unlock(&fiq->waitq.lock);
+       spin_unlock(&fiq->lock);
        WARN_ON(test_bit(FR_PENDING, &req->flags));
        WARN_ON(test_bit(FR_SENT, &req->flags));
        if (test_bit(FR_BACKGROUND, &req->flags)) {
@@ -427,16 +427,16 @@ put_request:
 
 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
 {
-       spin_lock(&fiq->waitq.lock);
+       spin_lock(&fiq->lock);
        if (test_bit(FR_FINISHED, &req->flags)) {
-               spin_unlock(&fiq->waitq.lock);
+               spin_unlock(&fiq->lock);
                return;
        }
        if (list_empty(&req->intr_entry)) {
                list_add_tail(&req->intr_entry, &fiq->interrupts);
-               wake_up_locked(&fiq->waitq);
+               wake_up(&fiq->waitq);
        }
-       spin_unlock(&fiq->waitq.lock);
+       spin_unlock(&fiq->lock);
        kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 }
 
@@ -466,16 +466,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
                if (!err)
                        return;
 
-               spin_lock(&fiq->waitq.lock);
+               spin_lock(&fiq->lock);
                /* Request is not yet in userspace, bail out */
                if (test_bit(FR_PENDING, &req->flags)) {
                        list_del(&req->list);
-                       spin_unlock(&fiq->waitq.lock);
+                       spin_unlock(&fiq->lock);
                        __fuse_put_request(req);
                        req->out.h.error = -EINTR;
                        return;
                }
-               spin_unlock(&fiq->waitq.lock);
+               spin_unlock(&fiq->lock);
        }
 
        /*
@@ -490,9 +490,9 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
        struct fuse_iqueue *fiq = &fc->iq;
 
        BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
-       spin_lock(&fiq->waitq.lock);
+       spin_lock(&fiq->lock);
        if (!fiq->connected) {
-               spin_unlock(&fiq->waitq.lock);
+               spin_unlock(&fiq->lock);
                req->out.h.error = -ENOTCONN;
        } else {
                req->in.h.unique = fuse_get_unique(fiq);
@@ -500,7 +500,7 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
                /* acquire extra reference, since request is still needed
                   after request_end() */
                __fuse_get_request(req);
-               spin_unlock(&fiq->waitq.lock);
+               spin_unlock(&fiq->lock);
 
                request_wait_answer(fc, req);
                /* Pairs with smp_wmb() in request_end() */
@@ -633,12 +633,12 @@ static int fuse_request_send_notify_reply(struct fuse_conn *fc,
 
        __clear_bit(FR_ISREPLY, &req->flags);
        req->in.h.unique = unique;
-       spin_lock(&fiq->waitq.lock);
+       spin_lock(&fiq->lock);
        if (fiq->connected) {
                queue_request(fiq, req);
                err = 0;
        }
-       spin_unlock(&fiq->waitq.lock);
+       spin_unlock(&fiq->lock);
 
        return err;
 }
@@ -1082,12 +1082,12 @@ static int request_pending(struct fuse_iqueue *fiq)
  * Unlike other requests this is assembled on demand, without a need
  * to allocate a separate fuse_req structure.
  *
- * Called with fiq->waitq.lock held, releases it
+ * Called with fiq->lock held, releases it
  */
 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
                               struct fuse_copy_state *cs,
                               size_t nbytes, struct fuse_req *req)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
 {
        struct fuse_in_header ih;
        struct fuse_interrupt_in arg;
@@ -1103,7 +1103,7 @@ __releases(fiq->waitq.lock)
        ih.unique = req->intr_unique;
        arg.unique = req->in.h.unique;
 
-       spin_unlock(&fiq->waitq.lock);
+       spin_unlock(&fiq->lock);
        if (nbytes < reqsize)
                return -EINVAL;
 
@@ -1140,7 +1140,7 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
                                   struct fuse_copy_state *cs,
                                   size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
 {
        int err;
        struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
@@ -1154,7 +1154,7 @@ __releases(fiq->waitq.lock)
                .len = sizeof(ih) + sizeof(arg),
        };
 
-       spin_unlock(&fiq->waitq.lock);
+       spin_unlock(&fiq->lock);
        kfree(forget);
        if (nbytes < ih.len)
                return -EINVAL;
@@ -1172,7 +1172,7 @@ __releases(fiq->waitq.lock)
 
 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
                                   struct fuse_copy_state *cs, size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
 {
        int err;
        unsigned max_forgets;
@@ -1186,13 +1186,13 @@ __releases(fiq->waitq.lock)
        };
 
        if (nbytes < ih.len) {
-               spin_unlock(&fiq->waitq.lock);
+               spin_unlock(&fiq->lock);
                return -EINVAL;
        }
 
        max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
        head = dequeue_forget(fiq, max_forgets, &count);
-       spin_unlock(&fiq->waitq.lock);
+       spin_unlock(&fiq->lock);
 
        arg.count = count;
        ih.len += count * sizeof(struct fuse_forget_one);
@@ -1222,7 +1222,7 @@ __releases(fiq->waitq.lock)
 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
                            struct fuse_copy_state *cs,
                            size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
 {
        if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
                return fuse_read_single_forget(fiq, cs, nbytes);
@@ -1251,16 +1251,19 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
        unsigned reqsize;
 
  restart:
-       spin_lock(&fiq->waitq.lock);
-       err = -EAGAIN;
-       if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
-           !request_pending(fiq))
-               goto err_unlock;
+       for (;;) {
+               spin_lock(&fiq->lock);
+               if (!fiq->connected || request_pending(fiq))
+                       break;
+               spin_unlock(&fiq->lock);
 
-       err = wait_event_interruptible_exclusive_locked(fiq->waitq,
+               if (file->f_flags & O_NONBLOCK)
+                       return -EAGAIN;
+               err = wait_event_interruptible_exclusive(fiq->waitq,
                                !fiq->connected || request_pending(fiq));
-       if (err)
-               goto err_unlock;
+               if (err)
+                       return err;
+       }
 
        if (!fiq->connected) {
                err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
@@ -1284,7 +1287,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
        req = list_entry(fiq->pending.next, struct fuse_req, list);
        clear_bit(FR_PENDING, &req->flags);
        list_del_init(&req->list);
-       spin_unlock(&fiq->waitq.lock);
+       spin_unlock(&fiq->lock);
 
        in = &req->in;
        reqsize = in->h.len;
@@ -1341,7 +1344,7 @@ out_end:
        return err;
 
  err_unlock:
-       spin_unlock(&fiq->waitq.lock);
+       spin_unlock(&fiq->lock);
        return err;
 }
 
@@ -2054,12 +2057,12 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
        fiq = &fud->fc->iq;
        poll_wait(file, &fiq->waitq, wait);
 
-       spin_lock(&fiq->waitq.lock);
+       spin_lock(&fiq->lock);
        if (!fiq->connected)
                mask = EPOLLERR;
        else if (request_pending(fiq))
                mask |= EPOLLIN | EPOLLRDNORM;
-       spin_unlock(&fiq->waitq.lock);
+       spin_unlock(&fiq->lock);
 
        return mask;
 }
@@ -2150,15 +2153,15 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
                fc->max_background = UINT_MAX;
                flush_bg_queue(fc);
 
-               spin_lock(&fiq->waitq.lock);
+               spin_lock(&fiq->lock);
                fiq->connected = 0;
                list_for_each_entry(req, &fiq->pending, list)
                        clear_bit(FR_PENDING, &req->flags);
                list_splice_tail_init(&fiq->pending, &to_end);
                while (forget_pending(fiq))
                        kfree(dequeue_forget(fiq, 1, NULL));
-               wake_up_all_locked(&fiq->waitq);
-               spin_unlock(&fiq->waitq.lock);
+               wake_up_all(&fiq->waitq);
+               spin_unlock(&fiq->lock);
                kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
                end_polls(fc);
                wake_up_all(&fc->blocked_waitq);
index 9a22aa580fe7a5f56149efd2c35ab829000d09bb..96d46b3ad23585f79c6bd464a157623d00044284 100644 (file)
@@ -1700,6 +1700,7 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc)
                WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
 
                redirty_page_for_writepage(wbc, page);
+               unlock_page(page);
                return 0;
        }
 
index cec8b8e749695789b59c0f4e30d5a96972136045..900bdcf79bfc09c3b4ac7bf1bb8c907b404c8db1 100644 (file)
@@ -388,6 +388,9 @@ struct fuse_iqueue {
        /** Connection established */
        unsigned connected;
 
+       /** Lock protecting accesses to members of this structure */
+       spinlock_t lock;
+
        /** Readers of the connection are waiting on this */
        wait_queue_head_t waitq;
 
index db9e60b7eb691bc4f0814c694577f60d363f32a1..cb018315ecaf54e7b9c6b02e33228a7e683fd5d0 100644 (file)
@@ -585,6 +585,7 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
 static void fuse_iqueue_init(struct fuse_iqueue *fiq)
 {
        memset(fiq, 0, sizeof(struct fuse_iqueue));
+       spin_lock_init(&fiq->lock);
        init_waitqueue_head(&fiq->waitq);
        INIT_LIST_HEAD(&fiq->pending);
        INIT_LIST_HEAD(&fiq->interrupts);
index 7f8bb0868c0f8bef5c41c491d2e0f79e826d1b2f..52feccedd7a441a0a423789f73ebd997f7a023a0 100644 (file)
@@ -392,6 +392,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
        return mp->mp_aheight - x - 1;
 }
 
+static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
+{
+       sector_t factor = 1, block = 0;
+       int hgt;
+
+       for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
+               if (hgt < mp->mp_aheight)
+                       block += mp->mp_list[hgt] * factor;
+               factor *= sdp->sd_inptrs;
+       }
+       return block;
+}
+
 static void release_metapath(struct metapath *mp)
 {
        int i;
@@ -432,60 +445,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt
        return ptr - first;
 }
 
-typedef const __be64 *(*gfs2_metadata_walker)(
-               struct metapath *mp,
-               const __be64 *start, const __be64 *end,
-               u64 factor, void *data);
+enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
 
-#define WALK_STOP ((__be64 *)0)
-#define WALK_NEXT ((__be64 *)1)
+/*
+ * gfs2_metadata_walker - walk an indirect block
+ * @mp: Metapath to indirect block
+ * @ptrs: Number of pointers to look at
+ *
+ * When returning WALK_FOLLOW, the walker must update @mp to point at the right
+ * indirect block to follow.
+ */
+typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
+                                                  unsigned int ptrs);
+
+/*
+ * gfs2_walk_metadata - walk a tree of indirect blocks
+ * @inode: The inode
+ * @mp: Starting point of walk
+ * @max_len: Maximum number of blocks to walk
+ * @walker: Called during the walk
+ *
+ * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
+ * past the end of metadata, and a negative error code otherwise.
+ */
 
-static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
-               u64 len, struct metapath *mp, gfs2_metadata_walker walker,
-               void *data)
+static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
+               u64 max_len, gfs2_metadata_walker walker)
 {
-       struct metapath clone;
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
-       const __be64 *start, *end, *ptr;
        u64 factor = 1;
        unsigned int hgt;
-       int ret = 0;
+       int ret;
 
-       for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
+       /*
+        * The walk starts in the lowest allocated indirect block, which may be
+        * before the position indicated by @mp.  Adjust @max_len accordingly
+        * to avoid a short walk.
+        */
+       for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
+               max_len += mp->mp_list[hgt] * factor;
+               mp->mp_list[hgt] = 0;
                factor *= sdp->sd_inptrs;
+       }
 
        for (;;) {
-               u64 step;
+               u16 start = mp->mp_list[hgt];
+               enum walker_status status;
+               unsigned int ptrs;
+               u64 len;
 
                /* Walk indirect block. */
-               start = metapointer(hgt, mp);
-               end = metaend(hgt, mp);
-
-               step = (end - start) * factor;
-               if (step > len)
-                       end = start + DIV_ROUND_UP_ULL(len, factor);
-
-               ptr = walker(mp, start, end, factor, data);
-               if (ptr == WALK_STOP)
+               ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
+               len = ptrs * factor;
+               if (len > max_len)
+                       ptrs = DIV_ROUND_UP_ULL(max_len, factor);
+               status = walker(mp, ptrs);
+               switch (status) {
+               case WALK_STOP:
+                       return 1;
+               case WALK_FOLLOW:
+                       BUG_ON(mp->mp_aheight == mp->mp_fheight);
+                       ptrs = mp->mp_list[hgt] - start;
+                       len = ptrs * factor;
                        break;
-               if (step >= len)
+               case WALK_CONTINUE:
                        break;
-               len -= step;
-               if (ptr != WALK_NEXT) {
-                       BUG_ON(!*ptr);
-                       mp->mp_list[hgt] += ptr - start;
-                       goto fill_up_metapath;
                }
+               if (len >= max_len)
+                       break;
+               max_len -= len;
+               if (status == WALK_FOLLOW)
+                       goto fill_up_metapath;
 
 lower_metapath:
                /* Decrease height of metapath. */
-               if (mp != &clone) {
-                       clone_metapath(&clone, mp);
-                       mp = &clone;
-               }
                brelse(mp->mp_bh[hgt]);
                mp->mp_bh[hgt] = NULL;
+               mp->mp_list[hgt] = 0;
                if (!hgt)
                        break;
                hgt--;
@@ -493,10 +530,7 @@ lower_metapath:
 
                /* Advance in metadata tree. */
                (mp->mp_list[hgt])++;
-               start = metapointer(hgt, mp);
-               end = metaend(hgt, mp);
-               if (start >= end) {
-                       mp->mp_list[hgt] = 0;
+               if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
                        if (!hgt)
                                break;
                        goto lower_metapath;
@@ -504,44 +538,36 @@ lower_metapath:
 
 fill_up_metapath:
                /* Increase height of metapath. */
-               if (mp != &clone) {
-                       clone_metapath(&clone, mp);
-                       mp = &clone;
-               }
                ret = fillup_metapath(ip, mp, ip->i_height - 1);
                if (ret < 0)
-                       break;
+                       return ret;
                hgt += ret;
                for (; ret; ret--)
                        do_div(factor, sdp->sd_inptrs);
                mp->mp_aheight = hgt + 1;
        }
-       if (mp == &clone)
-               release_metapath(mp);
-       return ret;
+       return 0;
 }
 
-struct gfs2_hole_walker_args {
-       u64 blocks;
-};
-
-static const __be64 *gfs2_hole_walker(struct metapath *mp,
-               const __be64 *start, const __be64 *end,
-               u64 factor, void *data)
+static enum walker_status gfs2_hole_walker(struct metapath *mp,
+                                          unsigned int ptrs)
 {
-       struct gfs2_hole_walker_args *args = data;
-       const __be64 *ptr;
+       const __be64 *start, *ptr, *end;
+       unsigned int hgt;
+
+       hgt = mp->mp_aheight - 1;
+       start = metapointer(hgt, mp);
+       end = start + ptrs;
 
        for (ptr = start; ptr < end; ptr++) {
                if (*ptr) {
-                       args->blocks += (ptr - start) * factor;
+                       mp->mp_list[hgt] += ptr - start;
                        if (mp->mp_aheight == mp->mp_fheight)
                                return WALK_STOP;
-                       return ptr;  /* increase height */
+                       return WALK_FOLLOW;
                }
        }
-       args->blocks += (end - start) * factor;
-       return WALK_NEXT;
+       return WALK_CONTINUE;
 }
 
 /**
@@ -559,12 +585,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp,
 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
                          struct metapath *mp, struct iomap *iomap)
 {
-       struct gfs2_hole_walker_args args = { };
-       int ret = 0;
+       struct metapath clone;
+       u64 hole_size;
+       int ret;
 
-       ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
-       if (!ret)
-               iomap->length = args.blocks << inode->i_blkbits;
+       clone_metapath(&clone, mp);
+       ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
+       if (ret < 0)
+               goto out;
+
+       if (ret == 1)
+               hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
+       else
+               hole_size = len;
+       iomap->length = hole_size << inode->i_blkbits;
+       ret = 0;
+
+out:
+       release_metapath(&clone);
        return ret;
 }
 
@@ -1592,6 +1630,7 @@ out_unlock:
                        brelse(dibh);
                        up_write(&ip->i_rw_mutex);
                        gfs2_trans_end(sdp);
+                       buf_in_tr = false;
                }
                gfs2_glock_dq_uninit(rd_gh);
                cond_resched();
index 65ea0355a4f64fe88db9aa7ff22d3a0bb9e5fd2e..24f86ffe11d7477dd2939d4e66ae9d1a1dc22532 100644 (file)
@@ -187,14 +187,15 @@ static int journal_wait_on_commit_record(journal_t *journal,
  * use writepages() because with dealyed allocation we may be doing
  * block allocation in writepages().
  */
-static int journal_submit_inode_data_buffers(struct address_space *mapping)
+static int journal_submit_inode_data_buffers(struct address_space *mapping,
+               loff_t dirty_start, loff_t dirty_end)
 {
        int ret;
        struct writeback_control wbc = {
                .sync_mode =  WB_SYNC_ALL,
                .nr_to_write = mapping->nrpages * 2,
-               .range_start = 0,
-               .range_end = i_size_read(mapping->host),
+               .range_start = dirty_start,
+               .range_end = dirty_end,
        };
 
        ret = generic_writepages(mapping, &wbc);
@@ -218,6 +219,9 @@ static int journal_submit_data_buffers(journal_t *journal,
 
        spin_lock(&journal->j_list_lock);
        list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
+               loff_t dirty_start = jinode->i_dirty_start;
+               loff_t dirty_end = jinode->i_dirty_end;
+
                if (!(jinode->i_flags & JI_WRITE_DATA))
                        continue;
                mapping = jinode->i_vfs_inode->i_mapping;
@@ -230,7 +234,8 @@ static int journal_submit_data_buffers(journal_t *journal,
                 * only allocated blocks here.
                 */
                trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
-               err = journal_submit_inode_data_buffers(mapping);
+               err = journal_submit_inode_data_buffers(mapping, dirty_start,
+                               dirty_end);
                if (!ret)
                        ret = err;
                spin_lock(&journal->j_list_lock);
@@ -257,12 +262,16 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
        /* For locking, see the comment in journal_submit_data_buffers() */
        spin_lock(&journal->j_list_lock);
        list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
+               loff_t dirty_start = jinode->i_dirty_start;
+               loff_t dirty_end = jinode->i_dirty_end;
+
                if (!(jinode->i_flags & JI_WAIT_DATA))
                        continue;
                jinode->i_flags |= JI_COMMIT_RUNNING;
                spin_unlock(&journal->j_list_lock);
-               err = filemap_fdatawait_keep_errors(
-                               jinode->i_vfs_inode->i_mapping);
+               err = filemap_fdatawait_range_keep_errors(
+                               jinode->i_vfs_inode->i_mapping, dirty_start,
+                               dirty_end);
                if (!ret)
                        ret = err;
                spin_lock(&journal->j_list_lock);
@@ -282,6 +291,8 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
                                &jinode->i_transaction->t_inode_list);
                } else {
                        jinode->i_transaction = NULL;
+                       jinode->i_dirty_start = 0;
+                       jinode->i_dirty_end = 0;
                }
        }
        spin_unlock(&journal->j_list_lock);
index e9cf88f0bc291769fd90f7de98b310acccfcf426..df390a69c49a8cdd94401b6fd1e448d71c881ec2 100644 (file)
@@ -94,6 +94,8 @@ EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
 EXPORT_SYMBOL(jbd2_journal_force_commit);
 EXPORT_SYMBOL(jbd2_journal_inode_add_write);
 EXPORT_SYMBOL(jbd2_journal_inode_add_wait);
+EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);
+EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait);
 EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
 EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
 EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
@@ -2588,6 +2590,8 @@ void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode)
        jinode->i_next_transaction = NULL;
        jinode->i_vfs_inode = inode;
        jinode->i_flags = 0;
+       jinode->i_dirty_start = 0;
+       jinode->i_dirty_end = 0;
        INIT_LIST_HEAD(&jinode->i_list);
 }
 
index e20a6703531f41168ed15fd6fa2ad7ca9865cdbe..911ff18249b75615e2151a50c4f6d3fa9436ccdc 100644 (file)
@@ -2500,7 +2500,7 @@ void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
  * File inode in the inode list of the handle's transaction
  */
 static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
-                                  unsigned long flags)
+               unsigned long flags, loff_t start_byte, loff_t end_byte)
 {
        transaction_t *transaction = handle->h_transaction;
        journal_t *journal;
@@ -2512,26 +2512,17 @@ static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
        jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
                        transaction->t_tid);
 
-       /*
-        * First check whether inode isn't already on the transaction's
-        * lists without taking the lock. Note that this check is safe
-        * without the lock as we cannot race with somebody removing inode
-        * from the transaction. The reason is that we remove inode from the
-        * transaction only in journal_release_jbd_inode() and when we commit
-        * the transaction. We are guarded from the first case by holding
-        * a reference to the inode. We are safe against the second case
-        * because if jinode->i_transaction == transaction, commit code
-        * cannot touch the transaction because we hold reference to it,
-        * and if jinode->i_next_transaction == transaction, commit code
-        * will only file the inode where we want it.
-        */
-       if ((jinode->i_transaction == transaction ||
-           jinode->i_next_transaction == transaction) &&
-           (jinode->i_flags & flags) == flags)
-               return 0;
-
        spin_lock(&journal->j_list_lock);
        jinode->i_flags |= flags;
+
+       if (jinode->i_dirty_end) {
+               jinode->i_dirty_start = min(jinode->i_dirty_start, start_byte);
+               jinode->i_dirty_end = max(jinode->i_dirty_end, end_byte);
+       } else {
+               jinode->i_dirty_start = start_byte;
+               jinode->i_dirty_end = end_byte;
+       }
+
        /* Is inode already attached where we need it? */
        if (jinode->i_transaction == transaction ||
            jinode->i_next_transaction == transaction)
@@ -2566,12 +2557,28 @@ done:
 int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *jinode)
 {
        return jbd2_journal_file_inode(handle, jinode,
-                                      JI_WRITE_DATA | JI_WAIT_DATA);
+                       JI_WRITE_DATA | JI_WAIT_DATA, 0, LLONG_MAX);
 }
 
 int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *jinode)
 {
-       return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA);
+       return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA, 0,
+                       LLONG_MAX);
+}
+
+int jbd2_journal_inode_ranged_write(handle_t *handle,
+               struct jbd2_inode *jinode, loff_t start_byte, loff_t length)
+{
+       return jbd2_journal_file_inode(handle, jinode,
+                       JI_WRITE_DATA | JI_WAIT_DATA, start_byte,
+                       start_byte + length - 1);
+}
+
+int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode *jinode,
+               loff_t start_byte, loff_t length)
+{
+       return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA,
+                       start_byte, start_byte + length - 1);
 }
 
 /*
index 0fb590d79f30ed78d0626fd79f24cacf9e1a72b8..bd2d193d0a2a11d516dd78d5d6910603526fe7d7 100644 (file)
@@ -86,58 +86,47 @@ int dcache_dir_close(struct inode *inode, struct file *file)
 EXPORT_SYMBOL(dcache_dir_close);
 
 /* parent is locked at least shared */
-static struct dentry *next_positive(struct dentry *parent,
-                                   struct list_head *from,
-                                   int count)
+/*
+ * Returns an element of siblings' list.
+ * We are looking for <count>th positive after <p>; if
+ * found, dentry is grabbed and passed to caller via *<res>.
+ * If no such element exists, the anchor of list is returned
+ * and *<res> is set to NULL.
+ */
+static struct list_head *scan_positives(struct dentry *cursor,
+                                       struct list_head *p,
+                                       loff_t count,
+                                       struct dentry **res)
 {
-       unsigned *seq = &parent->d_inode->i_dir_seq, n;
-       struct dentry *res;
-       struct list_head *p;
-       bool skipped;
-       int i;
+       struct dentry *dentry = cursor->d_parent, *found = NULL;
 
-retry:
-       i = count;
-       skipped = false;
-       n = smp_load_acquire(seq) & ~1;
-       res = NULL;
-       rcu_read_lock();
-       for (p = from->next; p != &parent->d_subdirs; p = p->next) {
+       spin_lock(&dentry->d_lock);
+       while ((p = p->next) != &dentry->d_subdirs) {
                struct dentry *d = list_entry(p, struct dentry, d_child);
-               if (!simple_positive(d)) {
-                       skipped = true;
-               } else if (!--i) {
-                       res = d;
-                       break;
+               // we must at least skip cursors, to avoid livelocks
+               if (d->d_flags & DCACHE_DENTRY_CURSOR)
+                       continue;
+               if (simple_positive(d) && !--count) {
+                       spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+                       if (simple_positive(d))
+                               found = dget_dlock(d);
+                       spin_unlock(&d->d_lock);
+                       if (likely(found))
+                               break;
+                       count = 1;
+               }
+               if (need_resched()) {
+                       list_move(&cursor->d_child, p);
+                       p = &cursor->d_child;
+                       spin_unlock(&dentry->d_lock);
+                       cond_resched();
+                       spin_lock(&dentry->d_lock);
                }
        }
-       rcu_read_unlock();
-       if (skipped) {
-               smp_rmb();
-               if (unlikely(*seq != n))
-                       goto retry;
-       }
-       return res;
-}
-
-static void move_cursor(struct dentry *cursor, struct list_head *after)
-{
-       struct dentry *parent = cursor->d_parent;
-       unsigned n, *seq = &parent->d_inode->i_dir_seq;
-       spin_lock(&parent->d_lock);
-       for (;;) {
-               n = *seq;
-               if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
-                       break;
-               cpu_relax();
-       }
-       __list_del(cursor->d_child.prev, cursor->d_child.next);
-       if (after)
-               list_add(&cursor->d_child, after);
-       else
-               list_add_tail(&cursor->d_child, &parent->d_subdirs);
-       smp_store_release(seq, n + 2);
-       spin_unlock(&parent->d_lock);
+       spin_unlock(&dentry->d_lock);
+       dput(*res);
+       *res = found;
+       return p;
 }
 
 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
@@ -153,17 +142,28 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
                        return -EINVAL;
        }
        if (offset != file->f_pos) {
+               struct dentry *cursor = file->private_data;
+               struct dentry *to = NULL;
+               struct list_head *p;
+
                file->f_pos = offset;
-               if (file->f_pos >= 2) {
-                       struct dentry *cursor = file->private_data;
-                       struct dentry *to;
-                       loff_t n = file->f_pos - 2;
-
-                       inode_lock_shared(dentry->d_inode);
-                       to = next_positive(dentry, &dentry->d_subdirs, n);
-                       move_cursor(cursor, to ? &to->d_child : NULL);
-                       inode_unlock_shared(dentry->d_inode);
+               inode_lock_shared(dentry->d_inode);
+
+               if (file->f_pos > 2) {
+                       p = scan_positives(cursor, &dentry->d_subdirs,
+                                          file->f_pos - 2, &to);
+                       spin_lock(&dentry->d_lock);
+                       list_move(&cursor->d_child, p);
+                       spin_unlock(&dentry->d_lock);
+               } else {
+                       spin_lock(&dentry->d_lock);
+                       list_del_init(&cursor->d_child);
+                       spin_unlock(&dentry->d_lock);
                }
+
+               dput(to);
+
+               inode_unlock_shared(dentry->d_inode);
        }
        return offset;
 }
@@ -185,25 +185,29 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
 {
        struct dentry *dentry = file->f_path.dentry;
        struct dentry *cursor = file->private_data;
-       struct list_head *p = &cursor->d_child;
-       struct dentry *next;
-       bool moved = false;
+       struct list_head *anchor = &dentry->d_subdirs;
+       struct dentry *next = NULL;
+       struct list_head *p;
 
        if (!dir_emit_dots(file, ctx))
                return 0;
 
        if (ctx->pos == 2)
-               p = &dentry->d_subdirs;
-       while ((next = next_positive(dentry, p, 1)) != NULL) {
+               p = anchor;
+       else
+               p = &cursor->d_child;
+
+       while ((p = scan_positives(cursor, p, 1, &next)) != anchor) {
                if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
                              d_inode(next)->i_ino, dt_type(d_inode(next))))
                        break;
-               moved = true;
-               p = &next->d_child;
                ctx->pos++;
        }
-       if (moved)
-               move_cursor(cursor, p);
+       spin_lock(&dentry->d_lock);
+       list_move_tail(&cursor->d_child, p);
+       spin_unlock(&dentry->d_lock);
+       dput(next);
+
        return 0;
 }
 EXPORT_SYMBOL(dcache_readdir);
index c092661147b3054ee81f85205f61ed674cd1a674..0a2b59c1ecb3d066bbdf8b35bf8cea2676a56bfb 100644 (file)
@@ -416,10 +416,10 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
                clp = nfs_match_client(cl_init);
                if (clp) {
                        spin_unlock(&nn->nfs_client_lock);
-                       if (IS_ERR(clp))
-                               return clp;
                        if (new)
                                new->rpc_ops->free_client(new);
+                       if (IS_ERR(clp))
+                               return clp;
                        return nfs_found_client(cl_init, clp);
                }
                if (new) {
index 75fe92eaa68188800ec2a00f9fc4a2fa972bfd08..1624618c2bc72a456070e77311b8b070d063a605 100644 (file)
@@ -153,7 +153,7 @@ again:
                /* Block nfs4_proc_unlck */
                mutex_lock(&sp->so_delegreturn_mutex);
                seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
-               err = nfs4_open_delegation_recall(ctx, state, stateid, type);
+               err = nfs4_open_delegation_recall(ctx, state, stateid);
                if (!err)
                        err = nfs_delegation_claim_locks(ctx, state, stateid);
                if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
index bb1ef8c37af42706c909d9f6880ca44fd33b5d00..c95477823fa6b0eff0acc5651f5f84ae049475a2 100644 (file)
@@ -61,7 +61,7 @@ void nfs_reap_expired_delegations(struct nfs_client *clp);
 
 /* NFSv4 delegation-related procedures */
 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
-int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
+int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
 bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, struct rpc_cred **cred);
 bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
index 8bfaa658b2c190ddfa61f8a52acb4895b9f63b1d..b8d6860879528662561853fd89a681a2ecb5fbfa 100644 (file)
@@ -1072,6 +1072,100 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
        return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU);
 }
 
+static int
+nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
+                          struct inode *inode, int error)
+{
+       switch (error) {
+       case 1:
+               dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
+                       __func__, dentry);
+               return 1;
+       case 0:
+               nfs_mark_for_revalidate(dir);
+               if (inode && S_ISDIR(inode->i_mode)) {
+                       /* Purge readdir caches. */
+                       nfs_zap_caches(inode);
+                       /*
+                        * We can't d_drop the root of a disconnected tree:
+                        * its d_hash is on the s_anon list and d_drop() would hide
+                        * it from shrink_dcache_for_unmount(), leading to busy
+                        * inodes on unmount and further oopses.
+                        */
+                       if (IS_ROOT(dentry))
+                               return 1;
+               }
+               dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
+                               __func__, dentry);
+               return 0;
+       }
+       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
+                               __func__, dentry, error);
+       return error;
+}
+
+static int
+nfs_lookup_revalidate_negative(struct inode *dir, struct dentry *dentry,
+                              unsigned int flags)
+{
+       int ret = 1;
+       if (nfs_neg_need_reval(dir, dentry, flags)) {
+               if (flags & LOOKUP_RCU)
+                       return -ECHILD;
+               ret = 0;
+       }
+       return nfs_lookup_revalidate_done(dir, dentry, NULL, ret);
+}
+
+static int
+nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry,
+                               struct inode *inode)
+{
+       nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+       return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
+}
+
+static int
+nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
+                            struct inode *inode)
+{
+       struct nfs_fh *fhandle;
+       struct nfs_fattr *fattr;
+       struct nfs4_label *label;
+       int ret;
+
+       ret = -ENOMEM;
+       fhandle = nfs_alloc_fhandle();
+       fattr = nfs_alloc_fattr();
+       label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
+       if (fhandle == NULL || fattr == NULL || IS_ERR(label))
+               goto out;
+
+       ret = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
+       if (ret < 0) {
+               if (ret == -ESTALE || ret == -ENOENT)
+                       ret = 0;
+               goto out;
+       }
+       ret = 0;
+       if (nfs_compare_fh(NFS_FH(inode), fhandle))
+               goto out;
+       if (nfs_refresh_inode(inode, fattr) < 0)
+               goto out;
+
+       nfs_setsecurity(inode, fattr, label);
+       nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+
+       /* set a readdirplus hint that we had a cache miss */
+       nfs_force_use_readdirplus(dir);
+       ret = 1;
+out:
+       nfs_free_fattr(fattr);
+       nfs_free_fhandle(fhandle);
+       nfs4_label_free(label);
+       return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
+}
+
 /*
  * This is called every time the dcache has a lookup hit,
  * and we should check whether we can really trust that
@@ -1083,58 +1177,36 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
  * If the parent directory is seen to have changed, we throw out the
  * cached dentry and do a new lookup.
  */
-static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int
+nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+                        unsigned int flags)
 {
-       struct inode *dir;
        struct inode *inode;
-       struct dentry *parent;
-       struct nfs_fh *fhandle = NULL;
-       struct nfs_fattr *fattr = NULL;
-       struct nfs4_label *label = NULL;
        int error;
 
-       if (flags & LOOKUP_RCU) {
-               parent = READ_ONCE(dentry->d_parent);
-               dir = d_inode_rcu(parent);
-               if (!dir)
-                       return -ECHILD;
-       } else {
-               parent = dget_parent(dentry);
-               dir = d_inode(parent);
-       }
        nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
        inode = d_inode(dentry);
 
-       if (!inode) {
-               if (nfs_neg_need_reval(dir, dentry, flags)) {
-                       if (flags & LOOKUP_RCU)
-                               return -ECHILD;
-                       goto out_bad;
-               }
-               goto out_valid;
-       }
+       if (!inode)
+               return nfs_lookup_revalidate_negative(dir, dentry, flags);
 
        if (is_bad_inode(inode)) {
-               if (flags & LOOKUP_RCU)
-                       return -ECHILD;
                dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
                                __func__, dentry);
                goto out_bad;
        }
 
        if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
-               goto out_set_verifier;
+               return nfs_lookup_revalidate_delegated(dir, dentry, inode);
 
        /* Force a full look up iff the parent directory has changed */
        if (!(flags & (LOOKUP_EXCL | LOOKUP_REVAL)) &&
            nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
                error = nfs_lookup_verify_inode(inode, flags);
                if (error) {
-                       if (flags & LOOKUP_RCU)
-                               return -ECHILD;
                        if (error == -ESTALE)
-                               goto out_zap_parent;
-                       goto out_error;
+                               nfs_zap_caches(dir);
+                       goto out_bad;
                }
                nfs_advise_use_readdirplus(dir);
                goto out_valid;
@@ -1146,81 +1218,45 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
        if (NFS_STALE(inode))
                goto out_bad;
 
-       error = -ENOMEM;
-       fhandle = nfs_alloc_fhandle();
-       fattr = nfs_alloc_fattr();
-       if (fhandle == NULL || fattr == NULL)
-               goto out_error;
-
-       label = nfs4_label_alloc(NFS_SERVER(inode), GFP_NOWAIT);
-       if (IS_ERR(label))
-               goto out_error;
-
        trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
-       error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
+       error = nfs_lookup_revalidate_dentry(dir, dentry, inode);
        trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
-       if (error == -ESTALE || error == -ENOENT)
-               goto out_bad;
-       if (error)
-               goto out_error;
-       if (nfs_compare_fh(NFS_FH(inode), fhandle))
-               goto out_bad;
-       if ((error = nfs_refresh_inode(inode, fattr)) != 0)
-               goto out_bad;
-
-       nfs_setsecurity(inode, fattr, label);
-
-       nfs_free_fattr(fattr);
-       nfs_free_fhandle(fhandle);
-       nfs4_label_free(label);
+       return error;
+out_valid:
+       return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
+out_bad:
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+       return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
+}
 
-       /* set a readdirplus hint that we had a cache miss */
-       nfs_force_use_readdirplus(dir);
+static int
+__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
+                       int (*reval)(struct inode *, struct dentry *, unsigned int))
+{
+       struct dentry *parent;
+       struct inode *dir;
+       int ret;
 
-out_set_verifier:
-       nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- out_valid:
        if (flags & LOOKUP_RCU) {
+               parent = READ_ONCE(dentry->d_parent);
+               dir = d_inode_rcu(parent);
+               if (!dir)
+                       return -ECHILD;
+               ret = reval(dir, dentry, flags);
                if (parent != READ_ONCE(dentry->d_parent))
                        return -ECHILD;
-       } else
+       } else {
+               parent = dget_parent(dentry);
+               ret = reval(d_inode(parent), dentry, flags);
                dput(parent);
-       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
-                       __func__, dentry);
-       return 1;
-out_zap_parent:
-       nfs_zap_caches(dir);
- out_bad:
-       WARN_ON(flags & LOOKUP_RCU);
-       nfs_free_fattr(fattr);
-       nfs_free_fhandle(fhandle);
-       nfs4_label_free(label);
-       nfs_mark_for_revalidate(dir);
-       if (inode && S_ISDIR(inode->i_mode)) {
-               /* Purge readdir caches. */
-               nfs_zap_caches(inode);
-               /*
-                * We can't d_drop the root of a disconnected tree:
-                * its d_hash is on the s_anon list and d_drop() would hide
-                * it from shrink_dcache_for_unmount(), leading to busy
-                * inodes on unmount and further oopses.
-                */
-               if (IS_ROOT(dentry))
-                       goto out_valid;
        }
-       dput(parent);
-       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
-                       __func__, dentry);
-       return 0;
-out_error:
-       WARN_ON(flags & LOOKUP_RCU);
-       nfs_free_fattr(fattr);
-       nfs_free_fhandle(fhandle);
-       nfs4_label_free(label);
-       dput(parent);
-       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
-                       __func__, dentry, error);
-       return error;
+       return ret;
+}
+
+static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
 }
 
 /*
@@ -1450,7 +1486,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
        if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
                nfs_file_set_open_context(file, ctx);
        else
-               err = -ESTALE;
+               err = -EOPENSTALE;
 out:
        return err;
 }
@@ -1579,62 +1615,55 @@ no_open:
 }
 EXPORT_SYMBOL_GPL(nfs_atomic_open);
 
-static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int
+nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+                         unsigned int flags)
 {
        struct inode *inode;
-       int ret = 0;
 
        if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
-               goto no_open;
+               goto full_reval;
        if (d_mountpoint(dentry))
-               goto no_open;
-       if (NFS_SB(dentry->d_sb)->caps & NFS_CAP_ATOMIC_OPEN_V1)
-               goto no_open;
+               goto full_reval;
 
        inode = d_inode(dentry);
 
        /* We can't create new files in nfs_open_revalidate(), so we
         * optimize away revalidation of negative dentries.
         */
-       if (inode == NULL) {
-               struct dentry *parent;
-               struct inode *dir;
-
-               if (flags & LOOKUP_RCU) {
-                       parent = READ_ONCE(dentry->d_parent);
-                       dir = d_inode_rcu(parent);
-                       if (!dir)
-                               return -ECHILD;
-               } else {
-                       parent = dget_parent(dentry);
-                       dir = d_inode(parent);
-               }
-               if (!nfs_neg_need_reval(dir, dentry, flags))
-                       ret = 1;
-               else if (flags & LOOKUP_RCU)
-                       ret = -ECHILD;
-               if (!(flags & LOOKUP_RCU))
-                       dput(parent);
-               else if (parent != READ_ONCE(dentry->d_parent))
-                       return -ECHILD;
-               goto out;
-       }
+       if (inode == NULL)
+               goto full_reval;
+
+       if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
+               return nfs_lookup_revalidate_delegated(dir, dentry, inode);
 
        /* NFS only supports OPEN on regular files */
        if (!S_ISREG(inode->i_mode))
-               goto no_open;
+               goto full_reval;
+
        /* We cannot do exclusive creation on a positive dentry */
-       if (flags & LOOKUP_EXCL)
-               goto no_open;
+       if (flags & (LOOKUP_EXCL | LOOKUP_REVAL))
+               goto reval_dentry;
+
+       /* Check if the directory changed */
+       if (!nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU))
+               goto reval_dentry;
 
        /* Let f_op->open() actually open (and revalidate) the file */
-       ret = 1;
+       return 1;
+reval_dentry:
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+       return nfs_lookup_revalidate_dentry(dir, dentry, inode);;
 
-out:
-       return ret;
+full_reval:
+       return nfs_do_lookup_revalidate(dir, dentry, flags);
+}
 
-no_open:
-       return nfs_lookup_revalidate(dentry, flags);
+static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       return __nfs_lookup_revalidate(dentry, flags,
+                       nfs4_do_lookup_revalidate);
 }
 
 #endif /* CONFIG_NFSV4 */
index 33824a0a57bfe5de9e31f4d13e4d2eebc3b7b2df..29b70337dcd9fd14a9217195a6787b3bb377aba4 100644 (file)
@@ -122,32 +122,49 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
 }
 
 static void
-nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
+nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
+                           const struct nfs_pgio_header *hdr,
+                           ssize_t dreq_len)
 {
-       int i;
-       ssize_t count;
+       struct nfs_direct_mirror *mirror = &dreq->mirrors[hdr->pgio_mirror_idx];
+
+       if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
+             test_bit(NFS_IOHDR_EOF, &hdr->flags)))
+               return;
+       if (dreq->max_count >= dreq_len) {
+               dreq->max_count = dreq_len;
+               if (dreq->count > dreq_len)
+                       dreq->count = dreq_len;
+
+               if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+                       dreq->error = hdr->error;
+               else /* Clear outstanding error if this is EOF */
+                       dreq->error = 0;
+       }
+       if (mirror->count > dreq_len)
+               mirror->count = dreq_len;
+}
+
+static void
+nfs_direct_count_bytes(struct nfs_direct_req *dreq,
+                      const struct nfs_pgio_header *hdr)
+{
+       struct nfs_direct_mirror *mirror = &dreq->mirrors[hdr->pgio_mirror_idx];
+       loff_t hdr_end = hdr->io_start + hdr->good_bytes;
+       ssize_t dreq_len = 0;
 
-       WARN_ON_ONCE(dreq->count >= dreq->max_count);
+       if (hdr_end > dreq->io_start)
+               dreq_len = hdr_end - dreq->io_start;
 
-       if (dreq->mirror_count == 1) {
-               dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
-               dreq->count += hdr->good_bytes;
-       } else {
-               /* mirrored writes */
-               count = dreq->mirrors[hdr->pgio_mirror_idx].count;
-               if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
-                       count = hdr->io_start + hdr->good_bytes - dreq->io_start;
-                       dreq->mirrors[hdr->pgio_mirror_idx].count = count;
-               }
-               /* update the dreq->count by finding the minimum agreed count from all
-                * mirrors */
-               count = dreq->mirrors[0].count;
+       nfs_direct_handle_truncated(dreq, hdr, dreq_len);
 
-               for (i = 1; i < dreq->mirror_count; i++)
-                       count = min(count, dreq->mirrors[i].count);
+       if (dreq_len > dreq->max_count)
+               dreq_len = dreq->max_count;
 
-               dreq->count = count;
-       }
+       if (mirror->count < dreq_len)
+               mirror->count = dreq_len;
+       if (dreq->count < dreq_len)
+               dreq->count = dreq_len;
 }
 
 /*
@@ -400,15 +417,13 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
        unsigned long bytes = 0;
        struct nfs_direct_req *dreq = hdr->dreq;
 
-       if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
-               goto out_put;
-
        spin_lock(&dreq->lock);
-       if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
-               dreq->error = hdr->error;
-       else
-               nfs_direct_good_bytes(dreq, hdr);
+       if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+               spin_unlock(&dreq->lock);
+               goto out_put;
+       }
 
+       nfs_direct_count_bytes(dreq, hdr);
        spin_unlock(&dreq->lock);
 
        while (!list_empty(&hdr->pages)) {
@@ -428,7 +443,7 @@ out_put:
        hdr->release(hdr);
 }
 
-static void nfs_read_sync_pgio_error(struct list_head *head)
+static void nfs_read_sync_pgio_error(struct list_head *head, int error)
 {
        struct nfs_page *req;
 
@@ -645,6 +660,9 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
        nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 
        dreq->count = 0;
+       dreq->max_count = 0;
+       list_for_each_entry(req, &reqs, wb_list)
+               dreq->max_count += req->wb_bytes;
        dreq->verf.committed = NFS_INVALID_STABLE_HOW;
        nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
        for (i = 0; i < dreq->mirror_count; i++)
@@ -664,8 +682,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 
        list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
                if (!nfs_pageio_add_request(&desc, req)) {
-                       nfs_list_remove_request(req);
-                       nfs_list_add_request(req, &failed);
+                       nfs_list_move_request(req, &failed);
                        spin_lock(&cinfo.inode->i_lock);
                        dreq->flags = 0;
                        if (desc.pg_error < 0)
@@ -775,17 +792,16 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
        bool request_commit = false;
        struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 
-       if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
-               goto out_put;
-
        nfs_init_cinfo_from_dreq(&cinfo, dreq);
 
        spin_lock(&dreq->lock);
+       if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+               spin_unlock(&dreq->lock);
+               goto out_put;
+       }
 
-       if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
-               dreq->error = hdr->error;
-       if (dreq->error == 0) {
-               nfs_direct_good_bytes(dreq, hdr);
+       nfs_direct_count_bytes(dreq, hdr);
+       if (hdr->good_bytes != 0) {
                if (nfs_write_need_commit(hdr)) {
                        if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
                                request_commit = true;
@@ -821,7 +837,7 @@ out_put:
        hdr->release(hdr);
 }
 
-static void nfs_write_sync_pgio_error(struct list_head *head)
+static void nfs_write_sync_pgio_error(struct list_head *head, int error)
 {
        struct nfs_page *req;
 
index 364028c710a8bf605a8a420ecbfd869b9d2c50a5..8da239b6cc16ffb7790015debe6eca4ae505ecd6 100644 (file)
@@ -307,7 +307,7 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
        if (status == 0)
                return 0;
 
-       if (mirror->mirror_ds == NULL)
+       if (IS_ERR_OR_NULL(mirror->mirror_ds))
                return -EINVAL;
 
        dserr = kmalloc(sizeof(*dserr), gfp_flags);
index 4dc887813c71d312fe09df36ddbbd78a0b27024a..a7bc4e0494f92f61ea7989310ffd8ae6ebadc21b 100644 (file)
@@ -118,6 +118,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
        struct rb_node **p, *parent;
        int diff;
 
+       nfss->fscache_key = NULL;
+       nfss->fscache = NULL;
+       if (!(nfss->options & NFS_OPTION_FSCACHE))
+               return;
        if (!uniq) {
                uniq = "";
                ulen = 1;
@@ -230,10 +234,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
 void nfs_fscache_init_inode(struct inode *inode)
 {
        struct nfs_fscache_inode_auxdata auxdata;
+       struct nfs_server *nfss = NFS_SERVER(inode);
        struct nfs_inode *nfsi = NFS_I(inode);
 
        nfsi->fscache = NULL;
-       if (!S_ISREG(inode->i_mode))
+       if (!(nfss->fscache && S_ISREG(inode->i_mode)))
                return;
 
        memset(&auxdata, 0, sizeof(auxdata));
index 161ba2edb9d0410f7722cc4b7ba62f8d4614cc80..6363ea956858124a503a1ae8c14604dfd2726999 100644 (file)
@@ -186,7 +186,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
  */
 static inline const char *nfs_server_fscache_state(struct nfs_server *server)
 {
-       if (server->fscache && (server->options & NFS_OPTION_FSCACHE))
+       if (server->fscache)
                return "yes";
        return "no ";
 }
index b65aee481d131d00734c057cd16f4532f2898211..e4cd3a2fe6989cfd9a1c0fa50e26e3683cd04927 100644 (file)
@@ -1100,6 +1100,7 @@ int nfs_open(struct inode *inode, struct file *filp)
        nfs_fscache_open_file(inode, filp);
        return 0;
 }
+EXPORT_SYMBOL_GPL(nfs_open);
 
 /*
  * This function is called whenever some part of NFS notices that
index 63287d911c0882d86508c56d75ac835960a6eee7..5b61520dce8884230cb91092bf522aa321c4815f 100644 (file)
@@ -469,7 +469,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
 
 extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
 extern void nfs4_put_state_owner(struct nfs4_state_owner *);
-extern void nfs4_purge_state_owners(struct nfs_server *);
+extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
+extern void nfs4_free_state_owners(struct list_head *head);
 extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
 extern void nfs4_put_open_state(struct nfs4_state *);
 extern void nfs4_close_state(struct nfs4_state *, fmode_t);
index 8f53455c476530998396023edabf72bf3692d5ce..86991bcfbeb129d58c11f3d48cced99669337ca9 100644 (file)
@@ -754,9 +754,12 @@ out:
 
 static void nfs4_destroy_server(struct nfs_server *server)
 {
+       LIST_HEAD(freeme);
+
        nfs_server_return_all_delegations(server);
        unset_pnfs_layoutdriver(server);
-       nfs4_purge_state_owners(server);
+       nfs4_purge_state_owners(server, &freeme);
+       nfs4_free_state_owners(&freeme);
 }
 
 /*
index 134858507268f15a352f5df2c90762bf41e4b30b..75d3cf86f17231038da8d42aa9f267e151411de3 100644 (file)
@@ -49,7 +49,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
                return err;
 
        if ((openflags & O_ACCMODE) == 3)
-               openflags--;
+               return nfs_open(inode, filp);
 
        /* We can't create new files here */
        openflags &= ~(O_CREAT|O_EXCL);
@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                switch (err) {
-               case -EPERM:
-               case -EACCES:
-               case -EDQUOT:
-               case -ENOSPC:
-               case -EROFS:
-                       goto out_put_ctx;
                default:
+                       goto out_put_ctx;
+               case -ENOENT:
+               case -ESTALE:
+               case -EISDIR:
+               case -ENOTDIR:
+               case -ELOOP:
                        goto out_drop;
                }
        }
index 1de855e0ae611f2f51596d64d7e5a01b57f77925..621e3cf90f4eb9faf67711feef1da9cfa30f9229 100644 (file)
@@ -1355,12 +1355,20 @@ static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
        return false;
 }
 
-static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
+static int can_open_cached(struct nfs4_state *state, fmode_t mode,
+               int open_mode, enum open_claim_type4 claim)
 {
        int ret = 0;
 
        if (open_mode & (O_EXCL|O_TRUNC))
                goto out;
+       switch (claim) {
+       case NFS4_OPEN_CLAIM_NULL:
+       case NFS4_OPEN_CLAIM_FH:
+               goto out;
+       default:
+               break;
+       }
        switch (mode & (FMODE_READ|FMODE_WRITE)) {
                case FMODE_READ:
                        ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
@@ -1753,7 +1761,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
 
        for (;;) {
                spin_lock(&state->owner->so_lock);
-               if (can_open_cached(state, fmode, open_mode)) {
+               if (can_open_cached(state, fmode, open_mode, claim)) {
                        update_open_stateflags(state, fmode);
                        spin_unlock(&state->owner->so_lock);
                        goto out_return_state;
@@ -2105,12 +2113,10 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
                case -NFS4ERR_BAD_HIGH_SLOT:
                case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
                case -NFS4ERR_DEADSESSION:
-                       set_bit(NFS_DELEGATED_STATE, &state->flags);
                        nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
                        return -EAGAIN;
                case -NFS4ERR_STALE_CLIENTID:
                case -NFS4ERR_STALE_STATEID:
-                       set_bit(NFS_DELEGATED_STATE, &state->flags);
                        /* Don't recall a delegation if it was lost */
                        nfs4_schedule_lease_recovery(server->nfs_client);
                        return -EAGAIN;
@@ -2131,7 +2137,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
                        return -EAGAIN;
                case -NFS4ERR_DELAY:
                case -NFS4ERR_GRACE:
-                       set_bit(NFS_DELEGATED_STATE, &state->flags);
                        ssleep(1);
                        return -EAGAIN;
                case -ENOMEM:
@@ -2147,8 +2152,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
 }
 
 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
-               struct nfs4_state *state, const nfs4_stateid *stateid,
-               fmode_t type)
+               struct nfs4_state *state, const nfs4_stateid *stateid)
 {
        struct nfs_server *server = NFS_SERVER(state->inode);
        struct nfs4_opendata *opendata;
@@ -2159,20 +2163,23 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
        if (IS_ERR(opendata))
                return PTR_ERR(opendata);
        nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
-       nfs_state_clear_delegation(state);
-       switch (type & (FMODE_READ|FMODE_WRITE)) {
-       case FMODE_READ|FMODE_WRITE:
-       case FMODE_WRITE:
+       if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
                err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
                if (err)
-                       break;
+                       goto out;
+       }
+       if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
                err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
                if (err)
-                       break;
-               /* Fall through */
-       case FMODE_READ:
+                       goto out;
+       }
+       if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
                err = nfs4_open_recover_helper(opendata, FMODE_READ);
+               if (err)
+                       goto out;
        }
+       nfs_state_clear_delegation(state);
+out:
        nfs4_opendata_put(opendata);
        return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
 }
@@ -2282,7 +2289,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
        if (data->state != NULL) {
                struct nfs_delegation *delegation;
 
-               if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
+               if (can_open_cached(data->state, data->o_arg.fmode,
+                                       data->o_arg.open_flags, claim))
                        goto out_no_action;
                rcu_read_lock();
                delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
@@ -3124,7 +3132,7 @@ static int _nfs4_do_setattr(struct inode *inode,
 
        if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
                /* Use that stateid */
-       } else if (ctx != NULL) {
+       } else if (ctx != NULL && ctx->state) {
                struct nfs_lock_context *l_ctx;
                if (!nfs4_valid_open_stateid(ctx->state))
                        return -EBADF;
index 3ba2087469ac88226ee33599870221293b63efaf..c36ef75f2054bb3803f8764c1e884745be6b055b 100644 (file)
@@ -628,24 +628,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
 /**
  * nfs4_purge_state_owners - Release all cached state owners
  * @server: nfs_server with cached state owners to release
+ * @head: resulting list of state owners
  *
  * Called at umount time.  Remaining state owners will be on
  * the LRU with ref count of zero.
+ * Note that the state owners are not freed, but are added
+ * to the list @head, which can later be used as an argument
+ * to nfs4_free_state_owners.
  */
-void nfs4_purge_state_owners(struct nfs_server *server)
+void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
 {
        struct nfs_client *clp = server->nfs_client;
        struct nfs4_state_owner *sp, *tmp;
-       LIST_HEAD(doomed);
 
        spin_lock(&clp->cl_lock);
        list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
-               list_move(&sp->so_lru, &doomed);
+               list_move(&sp->so_lru, head);
                nfs4_remove_state_owner_locked(sp);
        }
        spin_unlock(&clp->cl_lock);
+}
 
-       list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+/**
+ * nfs4_purge_state_owners - Release all cached state owners
+ * @head: resulting list of state owners
+ *
+ * Frees a list of state owners that was generated by
+ * nfs4_purge_state_owners
+ */
+void nfs4_free_state_owners(struct list_head *head)
+{
+       struct nfs4_state_owner *sp, *tmp;
+
+       list_for_each_entry_safe(sp, tmp, head, so_lru) {
                list_del(&sp->so_lru);
                nfs4_free_state_owner(sp);
        }
@@ -1843,12 +1858,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
        struct nfs4_state_owner *sp;
        struct nfs_server *server;
        struct rb_node *pos;
+       LIST_HEAD(freeme);
        int status = 0;
 
 restart:
        rcu_read_lock();
        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
-               nfs4_purge_state_owners(server);
+               nfs4_purge_state_owners(server, &freeme);
                spin_lock(&clp->cl_lock);
                for (pos = rb_first(&server->state_owners);
                     pos != NULL;
@@ -1877,6 +1893,7 @@ restart:
                spin_unlock(&clp->cl_lock);
        }
        rcu_read_unlock();
+       nfs4_free_state_owners(&freeme);
        return 0;
 }
 
index b7bde12d8cd518ce7b8da11d7ae3b6239ca3292f..1c0227c78a7bc4a9b0f57549c94e297deb1b9875 100644 (file)
@@ -1171,7 +1171,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
                } else
                        *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
        }
-       if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
+       if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) {
                *p++ = cpu_to_be32(label->lfs);
                *p++ = cpu_to_be32(label->pi);
                *p++ = cpu_to_be32(label->len);
index 0ec6bce3dd692969ac8fb5681a8cdc1af0617fc9..9cf59e2622f8e284d6cc85742eb0dc2ecd929ef5 100644 (file)
@@ -567,7 +567,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
        }
 
        hdr->res.fattr   = &hdr->fattr;
-       hdr->res.count   = count;
+       hdr->res.count   = 0;
        hdr->res.eof     = 0;
        hdr->res.verf    = &hdr->verf;
        nfs_fattr_init(&hdr->fattr);
@@ -769,8 +769,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
        pageused = 0;
        while (!list_empty(head)) {
                req = nfs_list_entry(head->next);
-               nfs_list_remove_request(req);
-               nfs_list_add_request(req, &hdr->pages);
+               nfs_list_move_request(req, &hdr->pages);
 
                if (!last_page || last_page != req->wb_page) {
                        pageused++;
@@ -962,8 +961,7 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
        }
        if (!nfs_can_coalesce_requests(prev, req, desc))
                return 0;
-       nfs_list_remove_request(req);
-       nfs_list_add_request(req, &mirror->pg_list);
+       nfs_list_move_request(req, &mirror->pg_list);
        mirror->pg_count += req->wb_bytes;
        return 1;
 }
@@ -995,9 +993,8 @@ nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
 {
        LIST_HEAD(head);
 
-       nfs_list_remove_request(req);
-       nfs_list_add_request(req, &head);
-       desc->pg_completion_ops->error_cleanup(&head);
+       nfs_list_move_request(req, &head);
+       desc->pg_completion_ops->error_cleanup(&head, desc->pg_error);
 }
 
 /**
@@ -1133,7 +1130,8 @@ static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
 
        for (midx = 0; midx < desc->pg_mirror_count; midx++) {
                mirror = &desc->pg_mirrors[midx];
-               desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
+               desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
+                               desc->pg_error);
        }
 }
 
@@ -1235,21 +1233,23 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
 int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
                      struct nfs_pgio_header *hdr)
 {
-       LIST_HEAD(failed);
+       LIST_HEAD(pages);
 
        desc->pg_io_completion = hdr->io_completion;
        desc->pg_dreq = hdr->dreq;
-       while (!list_empty(&hdr->pages)) {
-               struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+       list_splice_init(&hdr->pages, &pages);
+       while (!list_empty(&pages)) {
+               struct nfs_page *req = nfs_list_entry(pages.next);
 
-               nfs_list_remove_request(req);
                if (!nfs_pageio_add_request(desc, req))
-                       nfs_list_add_request(req, &failed);
+                       break;
        }
        nfs_pageio_complete(desc);
-       if (!list_empty(&failed)) {
-               list_move(&failed, &hdr->pages);
-               return desc->pg_error < 0 ? desc->pg_error : -EIO;
+       if (!list_empty(&pages)) {
+               int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
+               hdr->completion_ops->error_cleanup(&pages, err);
+               nfs_set_pgio_error(hdr, err, hdr->io_start);
+               return err;
        }
        return 0;
 }
index 7d9a51e6b847c65df159d6632a98ac891370f80f..c818f9886f61888a07d15d630a479725b0014e50 100644 (file)
@@ -1426,10 +1426,15 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
        const nfs4_stateid *res_stateid = NULL;
        struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
 
-       if (ret == 0) {
-               arg_stateid = &args->stateid;
+       switch (ret) {
+       case -NFS4ERR_NOMATCHING_LAYOUT:
+               break;
+       case 0:
                if (res->lrs_present)
                        res_stateid = &res->stateid;
+               /* Fallthrough */
+       default:
+               arg_stateid = &args->stateid;
        }
        pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
                        res_stateid);
@@ -1866,8 +1871,8 @@ lookup_again:
            atomic_read(&lo->plh_outstanding) != 0) {
                spin_unlock(&ino->i_lock);
                lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
-                                       atomic_read(&lo->plh_outstanding)));
-               if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
+                                       !atomic_read(&lo->plh_outstanding)));
+               if (IS_ERR(lseg))
                        goto out_put_layout_hdr;
                pnfs_put_layout_hdr(lo);
                goto lookup_again;
index e0c257bd62b938372f957b58177458fe37c9f0da..0e0335e77ce68c1044fc4cef4397f2e6fc4f817f 100644 (file)
@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
                /* Emulate the eof flag, which isn't normally needed in NFSv2
                 * as it is guaranteed to always return the file attributes
                 */
-               if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
+               if ((hdr->res.count == 0 && hdr->args.count > 0) ||
+                   hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
                        hdr->res.eof = 1;
        }
        return 0;
@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
 
 static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
 {
-       if (task->tk_status >= 0)
+       if (task->tk_status >= 0) {
+               hdr->res.count = hdr->args.count;
                nfs_writeback_update_inode(hdr);
+       }
        return 0;
 }
 
index 48d7277c60a9793b3684e9587a69d26af410a8fb..09d5c282f50e92ef7c9735162e7cd9f63a922deb 100644 (file)
@@ -205,7 +205,7 @@ static void nfs_initiate_read(struct nfs_pgio_header *hdr,
 }
 
 static void
-nfs_async_read_error(struct list_head *head)
+nfs_async_read_error(struct list_head *head, int error)
 {
        struct nfs_page *req;
 
index 6df9b85caf20560b8bafd83777ccabb7ec2f8760..d90efdea9fbd6fba6058d38c1e6221368eac6007 100644 (file)
@@ -2239,6 +2239,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
            data->acdirmin != nfss->acdirmin / HZ ||
            data->acdirmax != nfss->acdirmax / HZ ||
            data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
+           (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
            data->nfs_server.port != nfss->port ||
            data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
            !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
index 51d0b7913c04cbdde0d1567e70c02345ad0180a7..5ab997912d8d5c45984fdb0a6c68e55aa29e14fe 100644 (file)
@@ -1394,20 +1394,27 @@ static void nfs_redirty_request(struct nfs_page *req)
        nfs_release_request(req);
 }
 
-static void nfs_async_write_error(struct list_head *head)
+static void nfs_async_write_error(struct list_head *head, int error)
 {
        struct nfs_page *req;
 
        while (!list_empty(head)) {
                req = nfs_list_entry(head->next);
                nfs_list_remove_request(req);
+               if (nfs_error_is_fatal(error)) {
+                       nfs_context_set_write_error(req->wb_context, error);
+                       if (nfs_error_is_fatal_on_server(error)) {
+                               nfs_write_error_remove_page(req);
+                               continue;
+                       }
+               }
                nfs_redirty_request(req);
        }
 }
 
 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
 {
-       nfs_async_write_error(&hdr->pages);
+       nfs_async_write_error(&hdr->pages, 0);
        filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset,
                        hdr->args.offset + hdr->args.count - 1);
 }
index 29dee9630eeccfce780719f1faf62c437eb52e7d..a18b8d7a30759b468f0afb33322cdd97703ff63c 100644 (file)
@@ -148,10 +148,13 @@ struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
        /*
         * For queues with unlimited length lost events are not expected and
         * can possibly have security implications. Avoid losing events when
-        * memory is short.
+        * memory is short. For the limited size queues, avoid OOM killer in the
+        * target monitoring memcg as it may have security repercussion.
         */
        if (group->max_events == UINT_MAX)
                gfp |= __GFP_NOFAIL;
+       else
+               gfp |= __GFP_RETRY_MAYFAIL;
 
        /* Whoever is interested in the event, pays for the allocation. */
        memalloc_use_memcg(group->memcg);
index f4184b4f38154443816a61e89d461f352e83d76f..16b8702af0e7f443cd81465a555b3f64ec0878d3 100644 (file)
@@ -99,9 +99,13 @@ int inotify_handle_event(struct fsnotify_group *group,
        i_mark = container_of(inode_mark, struct inotify_inode_mark,
                              fsn_mark);
 
-       /* Whoever is interested in the event, pays for the allocation. */
+       /*
+        * Whoever is interested in the event, pays for the allocation. Do not
+        * trigger OOM killer in the target monitoring memcg as it may have
+        * security repercussion.
+        */
        memalloc_use_memcg(group->memcg);
-       event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT);
+       event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
        memalloc_unuse_memcg();
 
        if (unlikely(!event)) {
index 63d701cd1e2e70a1d5962c253ca5625d418d7992..c8e9b7031d9ad50d3d9d5ce8362cbe94405f77ca 100644 (file)
@@ -105,7 +105,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
        enum dlm_status status;
        int actions = 0;
        int in_use;
-        u8 owner;
+       u8 owner;
+       int recovery_wait = 0;
 
        mlog(0, "master_node = %d, valblk = %d\n", master_node,
             flags & LKM_VALBLK);
@@ -208,9 +209,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
                }
                if (flags & LKM_CANCEL)
                        lock->cancel_pending = 0;
-               else
-                       lock->unlock_pending = 0;
-
+               else {
+                       if (!lock->unlock_pending)
+                               recovery_wait = 1;
+                       else
+                               lock->unlock_pending = 0;
+               }
        }
 
        /* get an extra ref on lock.  if we are just switching
@@ -244,6 +248,17 @@ leave:
        spin_unlock(&res->spinlock);
        wake_up(&res->wq);
 
+       if (recovery_wait) {
+               spin_lock(&res->spinlock);
+               /* Unlock request will directly succeed after owner dies,
+                * and the lock is already removed from grant list. We have to
+                * wait for RECOVERING done or we miss the chance to purge it
+                * since the removement is much faster than RECOVERING proc.
+                */
+               __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
+               spin_unlock(&res->spinlock);
+       }
+
        /* let the caller's final dlm_lock_put handle the actual kfree */
        if (actions & DLM_UNLOCK_FREE_LOCK) {
                /* this should always be coupled with list removal */
index bd3475694e83a06501a055e73fd1403f81123eef..c492cbb2410f6f583ad1a94c3bd4dddc20ef5b5f 100644 (file)
@@ -231,7 +231,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
        /* At this point, we know that no more recovery threads can be
         * launched, so wait for any recovery completion work to
         * complete. */
-       flush_workqueue(osb->ocfs2_wq);
+       if (osb->ocfs2_wq)
+               flush_workqueue(osb->ocfs2_wq);
 
        /*
         * Now that recovery is shut down, and the osb is about to be
index 30208233f65ba44b1e4241d27326fe0b7b4d5fa8..a46aff7135d35e5845ccdd635d68616eccd77761 100644 (file)
@@ -391,7 +391,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
        struct ocfs2_dinode *alloc = NULL;
 
        cancel_delayed_work(&osb->la_enable_wq);
-       flush_workqueue(osb->ocfs2_wq);
+       if (osb->ocfs2_wq)
+               flush_workqueue(osb->ocfs2_wq);
 
        if (osb->local_alloc_state == OCFS2_LA_UNUSED)
                goto out;
index 3a24ce3deb01306d3027cdec670a7f852164f5fe..c146e12a8601fe0b11e1478997a04dc8c5708781 100644 (file)
@@ -3833,7 +3833,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
        u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
        int low_bucket = 0, bucket, high_bucket;
        struct ocfs2_xattr_bucket *search;
-       u32 last_hash;
        u64 blkno, lower_blkno = 0;
 
        search = ocfs2_xattr_bucket_new(inode);
@@ -3877,8 +3876,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
                if (xh->xh_count)
                        xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
 
-               last_hash = le32_to_cpu(xe->xe_name_hash);
-
                /* record lower_blkno which may be the insert place. */
                lower_blkno = blkno;
 
index a00350018a4792e758d5749e6294902cf32f2174..878478745924b8ac6e324ab0bb7b2624a4d6684d 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -373,6 +373,25 @@ long do_faccessat(int dfd, const char __user *filename, int mode)
                                override_cred->cap_permitted;
        }
 
+       /*
+        * The new set of credentials can *only* be used in
+        * task-synchronous circumstances, and does not need
+        * RCU freeing, unless somebody then takes a separate
+        * reference to it.
+        *
+        * NOTE! This is _only_ true because this credential
+        * is used purely for override_creds() that installs
+        * it as the subjective cred. Other threads will be
+        * accessing ->real_cred, not the subjective cred.
+        *
+        * If somebody _does_ make a copy of this (using the
+        * 'get_current_cred()' function), that will clear the
+        * non_rcu field, because now that other user may be
+        * expecting RCU freeing. But normal thread-synchronous
+        * cred accesses will keep things non-RCY.
+        */
+       override_cred->non_rcu = 1;
+
        old_cred = override_creds(override_cred);
 retry:
        res = user_path_at(dfd, filename, lookup_flags, &path);
index 54e5d17d7f3e5c91488b6ec4cab54c8177850fd3..6fe303850c9e3390c3a1c440d072a0a21b329231 100644 (file)
@@ -230,9 +230,8 @@ static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
        /* Encode an upper or lower file handle */
        fh = ovl_encode_real_fh(enc_lower ? ovl_dentry_lower(dentry) :
                                ovl_dentry_upper(dentry), !enc_lower);
-       err = PTR_ERR(fh);
        if (IS_ERR(fh))
-               goto fail;
+               return PTR_ERR(fh);
 
        err = -EOVERFLOW;
        if (fh->len > buflen)
index f0389849fd807879cfb61dd2f185dff4d097e0c7..4f4964eeb0861eccf20f90ca67d2b3059e908ee8 100644 (file)
@@ -386,7 +386,8 @@ static bool ovl_can_list(const char *s)
                return true;
 
        /* Never list trusted.overlay, list other trusted for superuser only */
-       return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+       return !ovl_is_private_xattr(s) &&
+              ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
 }
 
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
index 6ed1ace8f8b30092e9455de3ace45461d53a2eeb..1a1adc697c55326b062e7ebc1ea1a64be0b1d72a 100644 (file)
@@ -69,6 +69,7 @@ struct ovl_fs {
        bool workdir_locked;
        /* Traps in ovl inode cache */
        struct inode *upperdir_trap;
+       struct inode *workbasedir_trap;
        struct inode *workdir_trap;
        struct inode *indexdir_trap;
        /* Inode numbers in all layers do not use the high xino_bits */
index 2d028c02621fa82ea7bb819d3aa5886014d8e7cc..127df4a85c8a5859f785953b4625f99a93853db2 100644 (file)
@@ -217,6 +217,7 @@ static void ovl_free_fs(struct ovl_fs *ofs)
 {
        unsigned i;
 
+       iput(ofs->workbasedir_trap);
        iput(ofs->indexdir_trap);
        iput(ofs->workdir_trap);
        iput(ofs->upperdir_trap);
@@ -1007,6 +1008,25 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
        return 0;
 }
 
+/*
+ * Determine how we treat concurrent use of upperdir/workdir based on the
+ * index feature. This is papering over mount leaks of container runtimes,
+ * for example, an old overlay mount is leaked and now its upperdir is
+ * attempted to be used as a lower layer in a new overlay mount.
+ */
+static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
+{
+       if (ofs->config.index) {
+               pr_err("overlayfs: %s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
+                      name);
+               return -EBUSY;
+       } else {
+               pr_warn("overlayfs: %s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
+                       name);
+               return 0;
+       }
+}
+
 static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
                         struct path *upperpath)
 {
@@ -1044,14 +1064,12 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
        upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
        ofs->upper_mnt = upper_mnt;
 
-       err = -EBUSY;
        if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
                ofs->upperdir_locked = true;
-       } else if (ofs->config.index) {
-               pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
-               goto out;
        } else {
-               pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
+               err = ovl_report_in_use(ofs, "upperdir");
+               if (err)
+                       goto out;
        }
 
        err = 0;
@@ -1161,16 +1179,19 @@ static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
 
        ofs->workbasedir = dget(workpath.dentry);
 
-       err = -EBUSY;
        if (ovl_inuse_trylock(ofs->workbasedir)) {
                ofs->workdir_locked = true;
-       } else if (ofs->config.index) {
-               pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
-               goto out;
        } else {
-               pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
+               err = ovl_report_in_use(ofs, "workdir");
+               if (err)
+                       goto out;
        }
 
+       err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap,
+                            "workdir");
+       if (err)
+               goto out;
+
        err = ovl_make_workdir(sb, ofs, &workpath);
 
 out:
@@ -1289,16 +1310,16 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
                if (err < 0)
                        goto out;
 
-               err = -EBUSY;
-               if (ovl_is_inuse(stack[i].dentry)) {
-                       pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n");
-                       goto out;
-               }
-
                err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir");
                if (err)
                        goto out;
 
+               if (ovl_is_inuse(stack[i].dentry)) {
+                       err = ovl_report_in_use(ofs, "lowerdir");
+                       if (err)
+                               goto out;
+               }
+
                mnt = clone_private_mount(&stack[i]);
                err = PTR_ERR(mnt);
                if (IS_ERR(mnt)) {
@@ -1445,8 +1466,8 @@ out_err:
  * - another layer of this overlayfs instance
  * - upper/work dir of any overlayfs instance
  */
-static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
-                          const char *name)
+static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
+                          struct dentry *dentry, const char *name)
 {
        struct dentry *next = dentry, *parent;
        int err = 0;
@@ -1458,13 +1479,11 @@ static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
 
        /* Walk back ancestors to root (inclusive) looking for traps */
        while (!err && parent != next) {
-               if (ovl_is_inuse(parent)) {
-                       err = -EBUSY;
-                       pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n",
-                              name);
-               } else if (ovl_lookup_trap_inode(sb, parent)) {
+               if (ovl_lookup_trap_inode(sb, parent)) {
                        err = -ELOOP;
                        pr_err("overlayfs: overlapping %s path\n", name);
+               } else if (ovl_is_inuse(parent)) {
+                       err = ovl_report_in_use(ofs, name);
                }
                next = parent;
                parent = dget_parent(next);
@@ -1485,7 +1504,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
        int i, err;
 
        if (ofs->upper_mnt) {
-               err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir");
+               err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root,
+                                     "upperdir");
                if (err)
                        return err;
 
@@ -1496,13 +1516,14 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
                 * workbasedir.  In that case, we already have their traps in
                 * inode cache and we will catch that case on lookup.
                 */
-               err = ovl_check_layer(sb, ofs->workbasedir, "workdir");
+               err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
                if (err)
                        return err;
        }
 
        for (i = 0; i < ofs->numlower; i++) {
-               err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root,
+               err = ovl_check_layer(sb, ofs,
+                                     ofs->lower_layers[i].mnt->mnt_root,
                                      "lowerdir");
                if (err)
                        return err;
index f999e8bd3771c911f08ac8a137ff41a1bfa25bed..3b9b726b1a6ca1810675f4dd25b88c9c08d56e77 100644 (file)
@@ -205,12 +205,53 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
        return result;
 }
 
+/*
+ * If the user used setproctitle(), we just get the string from
+ * user space at arg_start, and limit it to a maximum of one page.
+ */
+static ssize_t get_mm_proctitle(struct mm_struct *mm, char __user *buf,
+                               size_t count, unsigned long pos,
+                               unsigned long arg_start)
+{
+       char *page;
+       int ret, got;
+
+       if (pos >= PAGE_SIZE)
+               return 0;
+
+       page = (char *)__get_free_page(GFP_KERNEL);
+       if (!page)
+               return -ENOMEM;
+
+       ret = 0;
+       got = access_remote_vm(mm, arg_start, page, PAGE_SIZE, FOLL_ANON);
+       if (got > 0) {
+               int len = strnlen(page, got);
+
+               /* Include the NUL character if it was found */
+               if (len < got)
+                       len++;
+
+               if (len > pos) {
+                       len -= pos;
+                       if (len > count)
+                               len = count;
+                       len -= copy_to_user(buf, page+pos, len);
+                       if (!len)
+                               len = -EFAULT;
+                       ret = len;
+               }
+       }
+       free_page((unsigned long)page);
+       return ret;
+}
+
 static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
                              size_t count, loff_t *ppos)
 {
        unsigned long arg_start, arg_end, env_start, env_end;
        unsigned long pos, len;
-       char *page;
+       char *page, c;
 
        /* Check if process spawned far enough to have cmdline. */
        if (!mm->env_end)
@@ -227,28 +268,42 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
                return 0;
 
        /*
-        * We have traditionally allowed the user to re-write
-        * the argument strings and overflow the end result
-        * into the environment section. But only do that if
-        * the environment area is contiguous to the arguments.
+        * We allow setproctitle() to overwrite the argument
+        * strings, and overflow past the original end. But
+        * only when it overflows into the environment area.
         */
-       if (env_start != arg_end || env_start >= env_end)
+       if (env_start != arg_end || env_end < env_start)
                env_start = env_end = arg_end;
-
-       /* .. and limit it to a maximum of one page of slop */
-       if (env_end >= arg_end + PAGE_SIZE)
-               env_end = arg_end + PAGE_SIZE - 1;
+       len = env_end - arg_start;
 
        /* We're not going to care if "*ppos" has high bits set */
-       pos = arg_start + *ppos;
-
-       /* .. but we do check the result is in the proper range */
-       if (pos < arg_start || pos >= env_end)
+       pos = *ppos;
+       if (pos >= len)
+               return 0;
+       if (count > len - pos)
+               count = len - pos;
+       if (!count)
                return 0;
 
-       /* .. and we never go past env_end */
-       if (env_end - pos < count)
-               count = env_end - pos;
+       /*
+        * Magical special case: if the argv[] end byte is not
+        * zero, the user has overwritten it with setproctitle(3).
+        *
+        * Possible future enhancement: do this only once when
+        * pos is 0, and set a flag in the 'struct file'.
+        */
+       if (access_remote_vm(mm, arg_end-1, &c, 1, FOLL_ANON) == 1 && c)
+               return get_mm_proctitle(mm, buf, count, pos, arg_start);
+
+       /*
+        * For the non-setproctitle() case we limit things strictly
+        * to the [arg_start, arg_end[ range.
+        */
+       pos += arg_start;
+       if (pos < arg_start || pos >= arg_end)
+               return 0;
+       if (count > arg_end - pos)
+               count = arg_end - pos;
 
        page = (char *)__get_free_page(GFP_KERNEL);
        if (!page)
@@ -258,48 +313,11 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
        while (count) {
                int got;
                size_t size = min_t(size_t, PAGE_SIZE, count);
-               long offset;
 
-               /*
-                * Are we already starting past the official end?
-                * We always include the last byte that is *supposed*
-                * to be NUL
-                */
-               offset = (pos >= arg_end) ? pos - arg_end + 1 : 0;
-
-               got = access_remote_vm(mm, pos - offset, page, size + offset, FOLL_ANON);
-               if (got <= offset)
+               got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
+               if (got <= 0)
                        break;
-               got -= offset;
-
-               /* Don't walk past a NUL character once you hit arg_end */
-               if (pos + got >= arg_end) {
-                       int n = 0;
-
-                       /*
-                        * If we started before 'arg_end' but ended up
-                        * at or after it, we start the NUL character
-                        * check at arg_end-1 (where we expect the normal
-                        * EOF to be).
-                        *
-                        * NOTE! This is smaller than 'got', because
-                        * pos + got >= arg_end
-                        */
-                       if (pos < arg_end)
-                               n = arg_end - pos - 1;
-
-                       /* Cut off at first NUL after 'n' */
-                       got = n + strnlen(page+n, offset+got-n);
-                       if (got < offset)
-                               break;
-                       got -= offset;
-
-                       /* Include the NUL if it existed */
-                       if (got < size)
-                               got++;
-               }
-
-               got -= copy_to_user(buf, page+offset, got);
+               got -= copy_to_user(buf, page, got);
                if (unlikely(!got)) {
                        if (!len)
                                len = -EFAULT;
@@ -1960,9 +1978,12 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
                goto out;
 
        if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
-               down_read(&mm->mmap_sem);
-               exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end);
-               up_read(&mm->mmap_sem);
+               status = down_read_killable(&mm->mmap_sem);
+               if (!status) {
+                       exact_vma_exists = !!find_exact_vma(mm, vm_start,
+                                                           vm_end);
+                       up_read(&mm->mmap_sem);
+               }
        }
 
        mmput(mm);
@@ -2008,8 +2029,11 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
        if (rc)
                goto out_mmput;
 
+       rc = down_read_killable(&mm->mmap_sem);
+       if (rc)
+               goto out_mmput;
+
        rc = -ENOENT;
-       down_read(&mm->mmap_sem);
        vma = find_exact_vma(mm, vm_start, vm_end);
        if (vma && vma->vm_file) {
                *path = vma->vm_file->f_path;
@@ -2105,7 +2129,11 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
        if (!mm)
                goto out_put_task;
 
-       down_read(&mm->mmap_sem);
+       result = ERR_PTR(-EINTR);
+       if (down_read_killable(&mm->mmap_sem))
+               goto out_put_mm;
+
+       result = ERR_PTR(-ENOENT);
        vma = find_exact_vma(mm, vm_start, vm_end);
        if (!vma)
                goto out_no_vma;
@@ -2116,6 +2144,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
 
 out_no_vma:
        up_read(&mm->mmap_sem);
+out_put_mm:
        mmput(mm);
 out_put_task:
        put_task_struct(task);
@@ -2157,7 +2186,12 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
        mm = get_task_mm(task);
        if (!mm)
                goto out_put_task;
-       down_read(&mm->mmap_sem);
+
+       ret = down_read_killable(&mm->mmap_sem);
+       if (ret) {
+               mmput(mm);
+               goto out_put_task;
+       }
 
        nr_files = 0;
 
index 792c78a491747e248d99dc613c88e0beb8cffd0c..64293df0faa3bcdb57f1680bc9cd3fdd53aea951 100644 (file)
@@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
                return -EINVAL;
 
        while (count > 0) {
-               if (pfn_valid(pfn))
-                       ppage = pfn_to_page(pfn);
-               else
-                       ppage = NULL;
+               /*
+                * TODO: ZONE_DEVICE support requires to identify
+                * memmaps that were actually initialized.
+                */
+               ppage = pfn_to_online_page(pfn);
+
                if (!ppage || PageSlab(ppage))
                        pcount = 0;
                else
@@ -216,10 +218,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
                return -EINVAL;
 
        while (count > 0) {
-               if (pfn_valid(pfn))
-                       ppage = pfn_to_page(pfn);
-               else
-                       ppage = NULL;
+               /*
+                * TODO: ZONE_DEVICE support requires to identify
+                * memmaps that were actually initialized.
+                */
+               ppage = pfn_to_online_page(pfn);
 
                if (put_user(stable_page_flags(ppage), out)) {
                        ret = -EFAULT;
@@ -261,10 +264,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
                return -EINVAL;
 
        while (count > 0) {
-               if (pfn_valid(pfn))
-                       ppage = pfn_to_page(pfn);
-               else
-                       ppage = NULL;
+               /*
+                * TODO: ZONE_DEVICE support requires to identify
+                * memmaps that were actually initialized.
+                */
+               ppage = pfn_to_online_page(pfn);
 
                if (ppage)
                        ino = page_cgroup_ino(ppage);
index 7325baa8f9d474f166c1bbef54b584a028b287fb..c95f32b83a942c3b39350bc76bde54800c4d53fe 100644 (file)
@@ -498,6 +498,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
 
        if (root->set_ownership)
                root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
+       else {
+               inode->i_uid = GLOBAL_ROOT_UID;
+               inode->i_gid = GLOBAL_ROOT_GID;
+       }
 
        return inode;
 }
index c5819baee35c0c7f73ea91a45afcdbd378f74e24..71aba44c4fa6ddacb6cf42f709ea5d14da80ec96 100644 (file)
@@ -166,7 +166,11 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
        if (!mm || !mmget_not_zero(mm))
                return NULL;
 
-       down_read(&mm->mmap_sem);
+       if (down_read_killable(&mm->mmap_sem)) {
+               mmput(mm);
+               return ERR_PTR(-EINTR);
+       }
+
        hold_task_mempolicy(priv);
        priv->tail_vma = get_gate_vma(mm);
 
@@ -826,7 +830,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 
        memset(&mss, 0, sizeof(mss));
 
-       down_read(&mm->mmap_sem);
+       ret = down_read_killable(&mm->mmap_sem);
+       if (ret)
+               goto out_put_mm;
+
        hold_task_mempolicy(priv);
 
        for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
@@ -843,8 +850,9 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 
        release_task_mempolicy(priv);
        up_read(&mm->mmap_sem);
-       mmput(mm);
 
+out_put_mm:
+       mmput(mm);
 out_put_task:
        put_task_struct(priv->task);
        priv->task = NULL;
@@ -1127,7 +1135,10 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                        goto out_mm;
                }
 
-               down_read(&mm->mmap_sem);
+               if (down_read_killable(&mm->mmap_sem)) {
+                       count = -EINTR;
+                       goto out_mm;
+               }
                tlb_gather_mmu(&tlb, mm, 0, -1);
                if (type == CLEAR_REFS_SOFT_DIRTY) {
                        for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -1531,7 +1542,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
                /* overflow ? */
                if (end < start_vaddr || end > end_vaddr)
                        end = end_vaddr;
-               down_read(&mm->mmap_sem);
+               ret = down_read_killable(&mm->mmap_sem);
+               if (ret)
+                       goto out_free;
                ret = walk_page_range(start_vaddr, end, &pagemap_walk);
                up_read(&mm->mmap_sem);
                start_vaddr = end;
index 0b63d68dedb2a018e716aa4cb4e93663a8b46992..5161894a6d623370b2b9bb2275f572cdd50dd08a 100644 (file)
@@ -211,7 +211,11 @@ static void *m_start(struct seq_file *m, loff_t *pos)
        if (!mm || !mmget_not_zero(mm))
                return NULL;
 
-       down_read(&mm->mmap_sem);
+       if (down_read_killable(&mm->mmap_sem)) {
+               mmput(mm);
+               return ERR_PTR(-EINTR);
+       }
+
        /* start from the Nth VMA */
        for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
                if (n-- == 0)
index 8cf2218b46a759229b26025bd159ab83fe125cf5..6f90d91a8733ad68aa94d71b4caba623cdee0409 100644 (file)
@@ -330,10 +330,6 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
                goto fail;
        inode->i_mode = S_IFREG | 0444;
        inode->i_fop = &pstore_file_operations;
-       private = kzalloc(sizeof(*private), GFP_KERNEL);
-       if (!private)
-               goto fail_alloc;
-       private->record = record;
 
        switch (record->type) {
        case PSTORE_TYPE_DMESG:
@@ -383,12 +379,16 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
                break;
        }
 
+       private = kzalloc(sizeof(*private), GFP_KERNEL);
+       if (!private)
+               goto fail_inode;
+
        dentry = d_alloc_name(root, name);
        if (!dentry)
                goto fail_private;
 
+       private->record = record;
        inode->i_size = private->total_size = size;
-
        inode->i_private = private;
 
        if (record->time.tv_sec)
@@ -404,7 +404,7 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
 
 fail_private:
        free_pstore_private(private);
-fail_alloc:
+fail_inode:
        iput(inode);
 
 fail:
index 316c16463b20fe1d9985ee839e54c125f629fb2d..015d74ee31a035255f1d32c5d3c4077b1b367142 100644 (file)
@@ -162,6 +162,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
        if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n",
                   (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type,
                   &header_length) == 3) {
+               time->tv_nsec *= 1000;
                if (data_type == 'C')
                        *compressed = true;
                else
@@ -169,6 +170,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
        } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n",
                          (time64_t *)&time->tv_sec, &time->tv_nsec,
                          &header_length) == 2) {
+               time->tv_nsec *= 1000;
                *compressed = false;
        } else {
                time->tv_sec = 0;
index 85fd7a8ee29eb4ff19b141c5f527dbe4a5019812..5fb5ee5b8cd7095a7aee1843072cbf9b7f178d22 100644 (file)
@@ -1888,10 +1888,7 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
 }
 EXPORT_SYMBOL(vfs_clone_file_range);
 
-/*
- * Read a page's worth of file data into the page cache.  Return the page
- * locked.
- */
+/* Read a page's worth of file data into the page cache. */
 static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
 {
        struct address_space *mapping;
@@ -1907,10 +1904,32 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
                put_page(page);
                return ERR_PTR(-EIO);
        }
-       lock_page(page);
        return page;
 }
 
+/*
+ * Lock two pages, ensuring that we lock in offset order if the pages are from
+ * the same file.
+ */
+static void vfs_lock_two_pages(struct page *page1, struct page *page2)
+{
+       /* Always lock in order of increasing index. */
+       if (page1->index > page2->index)
+               swap(page1, page2);
+
+       lock_page(page1);
+       if (page1 != page2)
+               lock_page(page2);
+}
+
+/* Unlock two pages, being careful not to unlock the same page twice. */
+static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
+{
+       unlock_page(page1);
+       if (page1 != page2)
+               unlock_page(page2);
+}
+
 /*
  * Compare extents of two files to see if they are the same.
  * Caller must have locked both inodes to prevent write races.
@@ -1948,10 +1967,24 @@ int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
                dest_page = vfs_dedupe_get_page(dest, destoff);
                if (IS_ERR(dest_page)) {
                        error = PTR_ERR(dest_page);
-                       unlock_page(src_page);
                        put_page(src_page);
                        goto out_error;
                }
+
+               vfs_lock_two_pages(src_page, dest_page);
+
+               /*
+                * Now that we've locked both pages, make sure they're still
+                * mapped to the file data we're interested in.  If not,
+                * someone is invalidating pages on us and we lose.
+                */
+               if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
+                   src_page->mapping != src->i_mapping ||
+                   dest_page->mapping != dest->i_mapping) {
+                       same = false;
+                       goto unlock;
+               }
+
                src_addr = kmap_atomic(src_page);
                dest_addr = kmap_atomic(dest_page);
 
@@ -1963,8 +1996,8 @@ int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
 
                kunmap_atomic(dest_addr);
                kunmap_atomic(src_addr);
-               unlock_page(dest_page);
-               unlock_page(src_page);
+unlock:
+               vfs_unlock_two_pages(src_page, dest_page);
                put_page(dest_page);
                put_page(src_page);
 
index 1dea7a8a52550e234ce3c05565ad8cfa56eeb1f2..05e58b56f6202f6f2297b269f81d79e6be453e00 100644 (file)
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
                }
                if (seq_has_overflowed(m))
                        goto Eoverflow;
+               p = m->op->next(m, p, &m->index);
                if (pos + m->count > offset) {
                        m->from = offset - pos;
                        m->count -= m->from;
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
                }
                pos += m->count;
                m->count = 0;
-               p = m->op->next(m, p, &m->index);
                if (pos == offset)
                        break;
        }
index f0216629621d6b52f21f19f9b4010181cfe45d4f..56f655f757ffb36cb53c9f60f35695652d929529 100644 (file)
@@ -304,19 +304,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
 static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
 {
        struct compat_statfs64 buf;
-       if (sizeof(ubuf->f_bsize) == 4) {
-               if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
-                    kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
-                       return -EOVERFLOW;
-               /* f_files and f_ffree may be -1; it's okay
-                * to stuff that into 32 bits */
-               if (kbuf->f_files != 0xffffffffffffffffULL
-                && (kbuf->f_files & 0xffffffff00000000ULL))
-                       return -EOVERFLOW;
-               if (kbuf->f_ffree != 0xffffffffffffffffULL
-                && (kbuf->f_ffree & 0xffffffff00000000ULL))
-                       return -EOVERFLOW;
-       }
+
+       if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
+               return -EOVERFLOW;
+
        memset(&buf, 0, sizeof(struct compat_statfs64));
        buf.f_type = kbuf->f_type;
        buf.f_bsize = kbuf->f_bsize;
index bf416e5127431aae03a6211bbd89ab069bb7ceda..f15ac37956e7a2f1f9e9606f7f9736d7acbeaab8 100644 (file)
@@ -1165,8 +1165,8 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
  *   o exact match, i.e. the found zero-level znode contains key @key, then %1
  *     is returned and slot number of the matched branch is stored in @n;
  *   o not exact match, which means that zero-level znode does not contain
- *     @key, then %0 is returned and slot number of the closest branch is stored
- *     in @n;
+ *     @key, then %0 is returned and slot number of the closest branch or %-1
+ *     is stored in @n; In this case calling tnc_next() is mandatory.
  *   o @key is so small that it is even less than the lowest key of the
  *     leftmost zero-level node, then %0 is returned and %0 is stored in @n.
  *
@@ -1883,13 +1883,19 @@ int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
 
 static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
                            struct ubifs_dent_node *dent, uint32_t cookie,
-                           struct ubifs_znode **zn, int *n)
+                           struct ubifs_znode **zn, int *n, int exact)
 {
        int err;
        struct ubifs_znode *znode = *zn;
        struct ubifs_zbranch *zbr;
        union ubifs_key *dkey;
 
+       if (!exact) {
+               err = tnc_next(c, &znode, n);
+               if (err)
+                       return err;
+       }
+
        for (;;) {
                zbr = &znode->zbranch[*n];
                dkey = &zbr->key;
@@ -1931,7 +1937,7 @@ static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
        if (unlikely(err < 0))
                goto out_unlock;
 
-       err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
+       err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
 
 out_unlock:
        mutex_unlock(&c->tnc_mutex);
@@ -2718,7 +2724,7 @@ int ubifs_tnc_remove_dh(struct ubifs_info *c, const union ubifs_key *key,
                if (unlikely(err < 0))
                        goto out_free;
 
-               err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
+               err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
                if (err)
                        goto out_free;
        }
index e1ebdbe40032e35e1d1dbd0bb05a33687aa161ba..9c2955f67f708af5922a9f6421576b67f8a43f48 100644 (file)
@@ -881,6 +881,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
        /* len == 0 means wake all */
        struct userfaultfd_wake_range range = { .len = 0, };
        unsigned long new_flags;
+       bool still_valid;
 
        WRITE_ONCE(ctx->released, true);
 
@@ -896,8 +897,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
         * taking the mmap_sem for writing.
         */
        down_write(&mm->mmap_sem);
-       if (!mmget_still_valid(mm))
-               goto skip_mm;
+       still_valid = mmget_still_valid(mm);
        prev = NULL;
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                cond_resched();
@@ -908,19 +908,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                        continue;
                }
                new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
-               prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
-                                new_flags, vma->anon_vma,
-                                vma->vm_file, vma->vm_pgoff,
-                                vma_policy(vma),
-                                NULL_VM_UFFD_CTX);
-               if (prev)
-                       vma = prev;
-               else
-                       prev = vma;
+               if (still_valid) {
+                       prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+                                        new_flags, vma->anon_vma,
+                                        vma->vm_file, vma->vm_pgoff,
+                                        vma_policy(vma),
+                                        NULL_VM_UFFD_CTX);
+                       if (prev)
+                               vma = prev;
+                       else
+                               prev = vma;
+               }
                vma->vm_flags = new_flags;
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
        }
-skip_mm:
        up_write(&mm->mmap_sem);
        mmput(mm);
 wakeup:
index e701ebc36c069f5696c5b6287474bf37fad4b05c..e2ba2a3b63b20a6378283e35e1c58c939f1d2476 100644 (file)
@@ -281,7 +281,7 @@ xfs_ag_resv_init(
                         */
                        ask = used = 0;
 
-                       mp->m_inotbt_nores = true;
+                       mp->m_finobt_nores = true;
 
                        error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask,
                                        &used);
index c6299f82a6e496ac00b1ef27953a5ca5313cb9f8..6410d3e00ce07dfcf1f3422a04ce977c2b3446dd 100644 (file)
@@ -191,6 +191,121 @@ xfs_attr_calc_size(
        return nblks;
 }
 
+STATIC int
+xfs_attr_try_sf_addname(
+       struct xfs_inode        *dp,
+       struct xfs_da_args      *args)
+{
+
+       struct xfs_mount        *mp = dp->i_mount;
+       int                     error, error2;
+
+       error = xfs_attr_shortform_addname(args);
+       if (error == -ENOSPC)
+               return error;
+
+       /*
+        * Commit the shortform mods, and we're done.
+        * NOTE: this is also the error path (EEXIST, etc).
+        */
+       if (!error && (args->flags & ATTR_KERNOTIME) == 0)
+               xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
+
+       if (mp->m_flags & XFS_MOUNT_WSYNC)
+               xfs_trans_set_sync(args->trans);
+
+       error2 = xfs_trans_commit(args->trans);
+       args->trans = NULL;
+       return error ? error : error2;
+}
+
+/*
+ * Set the attribute specified in @args.
+ */
+int
+xfs_attr_set_args(
+       struct xfs_da_args      *args)
+{
+       struct xfs_inode        *dp = args->dp;
+       struct xfs_buf          *leaf_bp = NULL;
+       int                     error;
+
+       /*
+        * If the attribute list is non-existent or a shortform list,
+        * upgrade it to a single-leaf-block attribute list.
+        */
+       if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
+           (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+            dp->i_d.di_anextents == 0)) {
+
+               /*
+                * Build initial attribute list (if required).
+                */
+               if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
+                       xfs_attr_shortform_create(args);
+
+               /*
+                * Try to add the attr to the attribute list in the inode.
+                */
+               error = xfs_attr_try_sf_addname(dp, args);
+               if (error != -ENOSPC)
+                       return error;
+
+               /*
+                * It won't fit in the shortform, transform to a leaf block.
+                * GROT: another possible req'mt for a double-split btree op.
+                */
+               error = xfs_attr_shortform_to_leaf(args, &leaf_bp);
+               if (error)
+                       return error;
+
+               /*
+                * Prevent the leaf buffer from being unlocked so that a
+                * concurrent AIL push cannot grab the half-baked leaf
+                * buffer and run into problems with the write verifier.
+                * Once we're done rolling the transaction we can release
+                * the hold and add the attr to the leaf.
+                */
+               xfs_trans_bhold(args->trans, leaf_bp);
+               error = xfs_defer_finish(&args->trans);
+               xfs_trans_bhold_release(args->trans, leaf_bp);
+               if (error) {
+                       xfs_trans_brelse(args->trans, leaf_bp);
+                       return error;
+               }
+       }
+
+       if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
+               error = xfs_attr_leaf_addname(args);
+       else
+               error = xfs_attr_node_addname(args);
+       return error;
+}
+
+/*
+ * Remove the attribute specified in @args.
+ */
+int
+xfs_attr_remove_args(
+       struct xfs_da_args      *args)
+{
+       struct xfs_inode        *dp = args->dp;
+       int                     error;
+
+       if (!xfs_inode_hasattr(dp)) {
+               error = -ENOATTR;
+       } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+               ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
+               error = xfs_attr_shortform_remove(args);
+       } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+               error = xfs_attr_leaf_removename(args);
+       } else {
+               error = xfs_attr_node_removename(args);
+       }
+
+       return error;
+}
+
 int
 xfs_attr_set(
        struct xfs_inode        *dp,
@@ -200,11 +315,10 @@ xfs_attr_set(
        int                     flags)
 {
        struct xfs_mount        *mp = dp->i_mount;
-       struct xfs_buf          *leaf_bp = NULL;
        struct xfs_da_args      args;
        struct xfs_trans_res    tres;
        int                     rsvd = (flags & ATTR_ROOT) != 0;
-       int                     error, err2, local;
+       int                     error, local;
 
        XFS_STATS_INC(mp, xs_attr_set);
 
@@ -255,93 +369,17 @@ xfs_attr_set(
        error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
                                rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
                                       XFS_QMOPT_RES_REGBLKS);
-       if (error) {
-               xfs_iunlock(dp, XFS_ILOCK_EXCL);
-               xfs_trans_cancel(args.trans);
-               return error;
-       }
+       if (error)
+               goto out_trans_cancel;
 
        xfs_trans_ijoin(args.trans, dp, 0);
-
-       /*
-        * If the attribute list is non-existent or a shortform list,
-        * upgrade it to a single-leaf-block attribute list.
-        */
-       if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
-           (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
-            dp->i_d.di_anextents == 0)) {
-
-               /*
-                * Build initial attribute list (if required).
-                */
-               if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
-                       xfs_attr_shortform_create(&args);
-
-               /*
-                * Try to add the attr to the attribute list in
-                * the inode.
-                */
-               error = xfs_attr_shortform_addname(&args);
-               if (error != -ENOSPC) {
-                       /*
-                        * Commit the shortform mods, and we're done.
-                        * NOTE: this is also the error path (EEXIST, etc).
-                        */
-                       ASSERT(args.trans != NULL);
-
-                       /*
-                        * If this is a synchronous mount, make sure that
-                        * the transaction goes to disk before returning
-                        * to the user.
-                        */
-                       if (mp->m_flags & XFS_MOUNT_WSYNC)
-                               xfs_trans_set_sync(args.trans);
-
-                       if (!error && (flags & ATTR_KERNOTIME) == 0) {
-                               xfs_trans_ichgtime(args.trans, dp,
-                                                       XFS_ICHGTIME_CHG);
-                       }
-                       err2 = xfs_trans_commit(args.trans);
-                       xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
-                       return error ? error : err2;
-               }
-
-               /*
-                * It won't fit in the shortform, transform to a leaf block.
-                * GROT: another possible req'mt for a double-split btree op.
-                */
-               error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
-               if (error)
-                       goto out;
-               /*
-                * Prevent the leaf buffer from being unlocked so that a
-                * concurrent AIL push cannot grab the half-baked leaf
-                * buffer and run into problems with the write verifier.
-                */
-               xfs_trans_bhold(args.trans, leaf_bp);
-               error = xfs_defer_finish(&args.trans);
-               if (error)
-                       goto out;
-
-               /*
-                * Commit the leaf transformation.  We'll need another (linked)
-                * transaction to add the new attribute to the leaf, which
-                * means that we have to hold & join the leaf buffer here too.
-                */
-               error = xfs_trans_roll_inode(&args.trans, dp);
-               if (error)
-                       goto out;
-               xfs_trans_bjoin(args.trans, leaf_bp);
-               leaf_bp = NULL;
-       }
-
-       if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
-               error = xfs_attr_leaf_addname(&args);
-       else
-               error = xfs_attr_node_addname(&args);
+       error = xfs_attr_set_args(&args);
        if (error)
-               goto out;
+               goto out_trans_cancel;
+       if (!args.trans) {
+               /* shortform attribute has already been committed */
+               goto out_unlock;
+       }
 
        /*
         * If this is a synchronous mount, make sure that the
@@ -358,17 +396,14 @@ xfs_attr_set(
         */
        xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
        error = xfs_trans_commit(args.trans);
+out_unlock:
        xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
        return error;
 
-out:
-       if (leaf_bp)
-               xfs_trans_brelse(args.trans, leaf_bp);
+out_trans_cancel:
        if (args.trans)
                xfs_trans_cancel(args.trans);
-       xfs_iunlock(dp, XFS_ILOCK_EXCL);
-       return error;
+       goto out_unlock;
 }
 
 /*
@@ -423,17 +458,7 @@ xfs_attr_remove(
         */
        xfs_trans_ijoin(args.trans, dp, 0);
 
-       if (!xfs_inode_hasattr(dp)) {
-               error = -ENOATTR;
-       } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-               ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
-               error = xfs_attr_shortform_remove(&args);
-       } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
-               error = xfs_attr_leaf_removename(&args);
-       } else {
-               error = xfs_attr_node_removename(&args);
-       }
-
+       error = xfs_attr_remove_args(&args);
        if (error)
                goto out;
 
similarity index 98%
rename from fs/xfs/xfs_attr.h
rename to fs/xfs/libxfs/xfs_attr.h
index 033ff8c478e2e5d1af9b4656422f02bb278c6dcc..cc04ee0aacfbea408923a6db8fad74aa08e8a84e 100644 (file)
@@ -140,7 +140,9 @@ int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
                 unsigned char *value, int *valuelenp, int flags);
 int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
                 unsigned char *value, int valuelen, int flags);
+int xfs_attr_set_args(struct xfs_da_args *args);
 int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
+int xfs_attr_remove_args(struct xfs_da_args *args);
 int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
                  int flags, struct attrlist_cursor_kern *cursor);
 
index 3a496ffe6551c727ad883387fb1b2a08ec386c9d..38dc0b43c36651ce35199d2b745315bc6d9dd00f 100644 (file)
@@ -1019,6 +1019,34 @@ xfs_bmap_add_attrfork_local(
        return -EFSCORRUPTED;
 }
 
+/* Set an inode attr fork off based on the format */
+int
+xfs_bmap_set_attrforkoff(
+       struct xfs_inode        *ip,
+       int                     size,
+       int                     *version)
+{
+       switch (ip->i_d.di_format) {
+       case XFS_DINODE_FMT_DEV:
+               ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
+               break;
+       case XFS_DINODE_FMT_LOCAL:
+       case XFS_DINODE_FMT_EXTENTS:
+       case XFS_DINODE_FMT_BTREE:
+               ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
+               if (!ip->i_d.di_forkoff)
+                       ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
+               else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
+                       *version = 2;
+               break;
+       default:
+               ASSERT(0);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /*
  * Convert inode from non-attributed to attributed.
  * Must not be in a transaction, ip must not be locked.
@@ -1070,26 +1098,9 @@ xfs_bmap_add_attrfork(
 
        xfs_trans_ijoin(tp, ip, 0);
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
-       switch (ip->i_d.di_format) {
-       case XFS_DINODE_FMT_DEV:
-               ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
-               break;
-       case XFS_DINODE_FMT_LOCAL:
-       case XFS_DINODE_FMT_EXTENTS:
-       case XFS_DINODE_FMT_BTREE:
-               ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
-               if (!ip->i_d.di_forkoff)
-                       ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
-               else if (mp->m_flags & XFS_MOUNT_ATTR2)
-                       version = 2;
-               break;
-       default:
-               ASSERT(0);
-               error = -EINVAL;
+       error = xfs_bmap_set_attrforkoff(ip, size, &version);
+       if (error)
                goto trans_cancel;
-       }
-
        ASSERT(ip->i_afp == NULL);
        ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
        ip->i_afp->if_flags = XFS_IFEXTENTS;
@@ -1178,7 +1189,10 @@ xfs_iread_extents(
         * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
         */
        level = be16_to_cpu(block->bb_level);
-       ASSERT(level > 0);
+       if (unlikely(level == 0)) {
+               XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+               return -EFSCORRUPTED;
+       }
        pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
        bno = be64_to_cpu(*pp);
 
@@ -3827,15 +3841,28 @@ xfs_bmapi_read(
        XFS_STATS_INC(mp, xs_blk_mapr);
 
        ifp = XFS_IFORK_PTR(ip, whichfork);
+       if (!ifp) {
+               /* No CoW fork?  Return a hole. */
+               if (whichfork == XFS_COW_FORK) {
+                       mval->br_startoff = bno;
+                       mval->br_startblock = HOLESTARTBLOCK;
+                       mval->br_blockcount = len;
+                       mval->br_state = XFS_EXT_NORM;
+                       *nmap = 1;
+                       return 0;
+               }
 
-       /* No CoW fork?  Return a hole. */
-       if (whichfork == XFS_COW_FORK && !ifp) {
-               mval->br_startoff = bno;
-               mval->br_startblock = HOLESTARTBLOCK;
-               mval->br_blockcount = len;
-               mval->br_state = XFS_EXT_NORM;
-               *nmap = 1;
-               return 0;
+               /*
+                * A missing attr ifork implies that the inode says we're in
+                * extents or btree format but failed to pass the inode fork
+                * verifier while trying to load it.  Treat that as a file
+                * corruption too.
+                */
+#ifdef DEBUG
+               xfs_alert(mp, "%s: inode %llu missing fork %d",
+                               __func__, ip->i_ino, whichfork);
+#endif /* DEBUG */
+               return -EFSCORRUPTED;
        }
 
        if (!(ifp->if_flags & XFS_IFEXTENTS)) {
index b6e9b639e731a1fafd1116b9decb22872dc765af..488dc8860fd7c551b02e21eeae3462b96e268eb8 100644 (file)
@@ -183,6 +183,7 @@ void        xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
                xfs_filblks_t len);
 void   xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
 int    xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
+int    xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
 void   xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
 void   __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
                xfs_filblks_t len, struct xfs_owner_info *oinfo,
index e792b167150a025ec67dc9c231264792bfdd0923..c52beee31836ac794ecb482524dd27251ef9c5de 100644 (file)
@@ -266,13 +266,15 @@ xfs_defer_trans_roll(
 
        trace_xfs_defer_trans_roll(tp, _RET_IP_);
 
-       /* Roll the transaction. */
+       /*
+        * Roll the transaction.  Rolling always given a new transaction (even
+        * if committing the old one fails!) to hand back to the caller, so we
+        * join the held resources to the new transaction so that we always
+        * return with the held resources joined to @tpp, no matter what
+        * happened.
+        */
        error = xfs_trans_roll(tpp);
        tp = *tpp;
-       if (error) {
-               trace_xfs_defer_trans_roll_error(tp, error);
-               return error;
-       }
 
        /* Rejoin the joined inodes. */
        for (i = 0; i < ipcount; i++)
@@ -284,6 +286,8 @@ xfs_defer_trans_roll(
                xfs_trans_bhold(tp, bplist[i]);
        }
 
+       if (error)
+               trace_xfs_defer_trans_roll_error(tp, error);
        return error;
 }
 
index 86c50208a14374e2a1588b5686df8d30dc677c57..adb2f6df5a1141c0a41d6328959668e751da366a 100644 (file)
@@ -124,7 +124,7 @@ xfs_finobt_alloc_block(
        union xfs_btree_ptr     *new,
        int                     *stat)
 {
-       if (cur->bc_mp->m_inotbt_nores)
+       if (cur->bc_mp->m_finobt_nores)
                return xfs_inobt_alloc_block(cur, start, new, stat);
        return __xfs_inobt_alloc_block(cur, start, new, stat,
                        XFS_AG_RESV_METADATA);
@@ -157,7 +157,7 @@ xfs_finobt_free_block(
        struct xfs_btree_cur    *cur,
        struct xfs_buf          *bp)
 {
-       if (cur->bc_mp->m_inotbt_nores)
+       if (cur->bc_mp->m_finobt_nores)
                return xfs_inobt_free_block(cur, bp);
        return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
 }
index a58034049995b4c6a7db190164ea886ec5113dff..3d213a7394c5b747dfb5cffc17dfb3d44d66cf03 100644 (file)
@@ -555,6 +555,7 @@ xfs_attr_put_listent(
        attrlist_ent_t *aep;
        int arraytop;
 
+       ASSERT(!context->seen_enough);
        ASSERT(!(context->flags & ATTR_KERNOVAL));
        ASSERT(context->count >= 0);
        ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
index 211b06e4702e77cdb58162c0098b48e09e870a93..41ad9eaab6ce9d8284bcd58c8e2e94963d52c5d5 100644 (file)
@@ -1080,7 +1080,7 @@ xfs_adjust_extent_unmap_boundaries(
        return 0;
 }
 
-static int
+int
 xfs_flush_unmap_range(
        struct xfs_inode        *ip,
        xfs_off_t               offset,
index 87363d136bb618145c396223da5b4755bdd572c8..9c73d012f56ab91053bbbe9ea687bd2136568d18 100644 (file)
@@ -76,6 +76,8 @@ int   xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
 xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
 
 xfs_extnum_t xfs_bmap_count_leaves(struct xfs_ifork *ifp, xfs_filblks_t *count);
+int   xfs_flush_unmap_range(struct xfs_inode *ip, xfs_off_t offset,
+                           xfs_off_t len);
 int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
                          int whichfork, xfs_extnum_t *nextents,
                          xfs_filblks_t *count);
index 87e6dd5326d5daf8bfa3b883b129b350d150877e..a1af984e4913e94e88b0eac261c5479728d8b24e 100644 (file)
@@ -277,7 +277,8 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
 
 /*
  * Ensure that the given in-core dquot has a buffer on disk backing it, and
- * return the buffer. This is called when the bmapi finds a hole.
+ * return the buffer locked and held. This is called when the bmapi finds a
+ * hole.
  */
 STATIC int
 xfs_dquot_disk_alloc(
@@ -355,13 +356,14 @@ xfs_dquot_disk_alloc(
         * If everything succeeds, the caller of this function is returned a
         * buffer that is locked and held to the transaction.  The caller
         * is responsible for unlocking any buffer passed back, either
-        * manually or by committing the transaction.
+        * manually or by committing the transaction.  On error, the buffer is
+        * released and not passed back.
         */
        xfs_trans_bhold(tp, bp);
        error = xfs_defer_finish(tpp);
-       tp = *tpp;
        if (error) {
-               xfs_buf_relse(bp);
+               xfs_trans_bhold_release(*tpp, bp);
+               xfs_trans_brelse(*tpp, bp);
                return error;
        }
        *bpp = bp;
@@ -521,7 +523,6 @@ xfs_qm_dqread_alloc(
        struct xfs_buf          **bpp)
 {
        struct xfs_trans        *tp;
-       struct xfs_buf          *bp;
        int                     error;
 
        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
@@ -529,7 +530,7 @@ xfs_qm_dqread_alloc(
        if (error)
                goto err;
 
-       error = xfs_dquot_disk_alloc(&tp, dqp, &bp);
+       error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
        if (error)
                goto err_cancel;
 
@@ -539,10 +540,10 @@ xfs_qm_dqread_alloc(
                 * Buffer was held to the transaction, so we have to unlock it
                 * manually here because we're not passing it back.
                 */
-               xfs_buf_relse(bp);
+               xfs_buf_relse(*bpp);
+               *bpp = NULL;
                goto err;
        }
-       *bpp = bp;
        return 0;
 
 err_cancel:
index 61a5ad2600e865a6b11a8345f956eba10203abd2..259549698ba7e607b105360f36ececb7e92fff51 100644 (file)
@@ -517,6 +517,9 @@ xfs_file_dio_aio_write(
        }
 
        if (iocb->ki_flags & IOCB_NOWAIT) {
+               /* unaligned dio always waits, bail */
+               if (unaligned_io)
+                       return -EAGAIN;
                if (!xfs_ilock_nowait(ip, iolock))
                        return -EAGAIN;
        } else {
@@ -529,18 +532,14 @@ xfs_file_dio_aio_write(
        count = iov_iter_count(from);
 
        /*
-        * If we are doing unaligned IO, wait for all other IO to drain,
-        * otherwise demote the lock if we had to take the exclusive lock
-        * for other reasons in xfs_file_aio_write_checks.
+        * If we are doing unaligned IO, we can't allow any other overlapping IO
+        * in-flight at the same time or we risk data corruption. Wait for all
+        * other IO to drain before we submit. If the IO is aligned, demote the
+        * iolock if we had to take the exclusive lock in
+        * xfs_file_aio_write_checks() for other reasons.
         */
        if (unaligned_io) {
-               /* If we are going to wait for other DIO to finish, bail */
-               if (iocb->ki_flags & IOCB_NOWAIT) {
-                       if (atomic_read(&inode->i_dio_count))
-                               return -EAGAIN;
-               } else {
-                       inode_dio_wait(inode);
-               }
+               inode_dio_wait(inode);
        } else if (iolock == XFS_IOLOCK_EXCL) {
                xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
                iolock = XFS_IOLOCK_SHARED;
@@ -548,6 +547,14 @@ xfs_file_dio_aio_write(
 
        trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
        ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
+
+       /*
+        * If unaligned, this is the only IO in-flight. If it has not yet
+        * completed, wait on it before we release the iolock to prevent
+        * subsequent overlapping IO.
+        */
+       if (ret == -EIOCBQUEUED && unaligned_io)
+               inode_dio_wait(inode);
 out:
        xfs_iunlock(ip, iolock);
 
index 7c00b8bedfe358ae508f8e10ff25771618529a1d..09fd602507effcddf57492e0a410691880d4acba 100644 (file)
@@ -534,6 +534,7 @@ xfs_fs_reserve_ag_blocks(
        int                     error = 0;
        int                     err2;
 
+       mp->m_finobt_nores = false;
        for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
                pag = xfs_perag_get(mp, agno);
                err2 = xfs_ag_resv_init(pag, NULL);
index 05db9540e4597536211446475d301eac834e972e..5ed84d6c70597f02e0accd3bc907e1773d1e2293 100644 (file)
@@ -1332,7 +1332,7 @@ xfs_create_tmpfile(
        if (error)
                goto out_trans_cancel;
 
-       error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip);
+       error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
        if (error)
                goto out_trans_cancel;
 
@@ -1754,7 +1754,7 @@ xfs_inactive_ifree(
         * now remains allocated and sits on the unlinked list until the fs is
         * repaired.
         */
-       if (unlikely(mp->m_inotbt_nores)) {
+       if (unlikely(mp->m_finobt_nores)) {
                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
                                XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
                                &tp);
@@ -1907,11 +1907,8 @@ xfs_inactive(
 }
 
 /*
- * This is called when the inode's link count goes to 0 or we are creating a
- * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be
- * set to true as the link count is dropped to zero by the VFS after we've
- * created the file successfully, so we have to add it to the unlinked list
- * while the link count is non-zero.
+ * This is called when the inode's link count has gone to 0 or we are creating
+ * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
  *
  * We place the on-disk inode on a list in the AGI.  It will be pulled from this
  * list when the inode is freed.
@@ -1931,6 +1928,7 @@ xfs_iunlink(
        int             offset;
        int             error;
 
+       ASSERT(VFS_I(ip)->i_nlink == 0);
        ASSERT(VFS_I(ip)->i_mode != 0);
 
        /*
@@ -2837,11 +2835,9 @@ xfs_rename_alloc_whiteout(
 
        /*
         * Prepare the tmpfile inode as if it were created through the VFS.
-        * Otherwise, the link increment paths will complain about nlink 0->1.
-        * Drop the link count as done by d_tmpfile(), complete the inode setup
-        * and flag it as linkable.
+        * Complete the inode setup and flag it as linkable.  nlink is already
+        * zero, so we can skip the drop_nlink.
         */
-       drop_nlink(VFS_I(tmpfile));
        xfs_setup_iops(tmpfile);
        xfs_finish_inode_setup(tmpfile);
        VFS_I(tmpfile)->i_state |= I_LINKABLE;
index f48ffd7a8d3e491d76defe66961194a635276115..e427ad097e2eeb7d14bc381309efcd94c838f72d 100644 (file)
@@ -191,9 +191,18 @@ xfs_generic_create(
 
        xfs_setup_iops(ip);
 
-       if (tmpfile)
+       if (tmpfile) {
+               /*
+                * The VFS requires that any inode fed to d_tmpfile must have
+                * nlink == 1 so that it can decrement the nlink in d_tmpfile.
+                * However, we created the temp file with nlink == 0 because
+                * we're not allowed to put an inode with nlink > 0 on the
+                * unlinked list.  Therefore we have to set nlink to 1 so that
+                * d_tmpfile can immediately set it back to zero.
+                */
+               set_nlink(inode, 1);
                d_tmpfile(dentry, inode);
-       else
+       else
                d_instantiate(dentry, inode);
 
        xfs_finish_inode_setup(ip);
@@ -522,6 +531,10 @@ xfs_vn_getattr(
                }
        }
 
+       /*
+        * Note: If you add another clause to set an attribute flag, please
+        * update attributes_mask below.
+        */
        if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
                stat->attributes |= STATX_ATTR_IMMUTABLE;
        if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
@@ -529,6 +542,10 @@ xfs_vn_getattr(
        if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
                stat->attributes |= STATX_ATTR_NODUMP;
 
+       stat->attributes_mask |= (STATX_ATTR_IMMUTABLE |
+                                 STATX_ATTR_APPEND |
+                                 STATX_ATTR_NODUMP);
+
        switch (inode->i_mode & S_IFMT) {
        case S_IFBLK:
        case S_IFCHR:
@@ -786,6 +803,7 @@ xfs_setattr_nonsize(
 
 out_cancel:
        xfs_trans_cancel(tp);
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out_dqrele:
        xfs_qm_dqrele(udqp);
        xfs_qm_dqrele(gdqp);
index 7964513c3128159744a6edc486155c8ef41364a2..7e0bf952e087d9ce8ae854081190c9bf55a1bc51 100644 (file)
@@ -127,7 +127,7 @@ typedef struct xfs_mount {
        struct mutex            m_growlock;     /* growfs mutex */
        int                     m_fixedfsid[2]; /* unchanged for life of FS */
        uint64_t                m_flags;        /* global mount flags */
-       bool                    m_inotbt_nores; /* no per-AG finobt resv. */
+       bool                    m_finobt_nores; /* no per-AG finobt resv. */
        int                     m_ialloc_inos;  /* inodes in inode allocation */
        int                     m_ialloc_blks;  /* blocks in inode allocation */
        int                     m_ialloc_min_blks;/* min blocks in sparse inode
index 7088f44c0c5947d4fccf5df4315d56158e8da5c3..f3c393f309e19c8b99e6f2c683c84766efa6c1a8 100644 (file)
@@ -1368,9 +1368,19 @@ xfs_reflink_remap_prep(
        if (ret)
                goto out_unlock;
 
-       /* Zap any page cache for the destination file's range. */
-       truncate_inode_pages_range(&inode_out->i_data, pos_out,
-                                  PAGE_ALIGN(pos_out + *len) - 1);
+       /*
+        * If pos_out > EOF, we may have dirtied blocks between EOF and
+        * pos_out. In that case, we need to extend the flush and unmap to cover
+        * from EOF to the end of the copy length.
+        */
+       if (pos_out > XFS_ISIZE(dest)) {
+               loff_t  flen = *len + (pos_out - XFS_ISIZE(dest));
+               ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen);
+       } else {
+               ret = xfs_flush_unmap_range(dest, pos_out, *len);
+       }
+       if (ret)
+               goto out_unlock;
 
        /* If we're altering the file contents... */
        if (!is_dedupe) {
index 207ee302b1bb9f4039b8963b7ae1a68ce8ac8487..dce8114e3198c349b2f36b312a71adc92236cf48 100644 (file)
@@ -1561,6 +1561,13 @@ xfs_mount_alloc(
        INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
        INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
        mp->m_kobj.kobject.kset = xfs_kset;
+       /*
+        * We don't create the finobt per-ag space reservation until after log
+        * recovery, so we must set this to true so that an ifree transaction
+        * started during log recovery will not depend on space reservations
+        * for finobt expansion.
+        */
+       mp->m_finobt_nores = true;
        return mp;
 }
 
index 63ee1d5bf1d77a33d7f760f0f10d722266488cb1..9a63016009a1394f41beaff8323a5568b6ceab22 100644 (file)
@@ -129,6 +129,9 @@ __xfs_xattr_put_listent(
        char *offset;
        int arraytop;
 
+       if (context->count < 0 || context->seen_enough)
+               return;
+
        if (!context->alist)
                goto compute_size;
 
index c50ef7e6b94252bab51e09a5db207f15df6b8157..1d4ef0621174defba5b05aae44fa2ef057183ebb 100644 (file)
@@ -1472,8 +1472,11 @@ struct acpi_pptt_processor {
 
 /* Flags */
 
-#define ACPI_PPTT_PHYSICAL_PACKAGE          (1)        /* Physical package */
-#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID   (2)        /* ACPI Processor ID valid */
+#define ACPI_PPTT_PHYSICAL_PACKAGE          (1)
+#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID   (1<<1)
+#define ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD  (1<<2)     /* ACPI 6.3 */
+#define ACPI_PPTT_ACPI_LEAF_NODE            (1<<3)     /* ACPI 6.3 */
+#define ACPI_PPTT_ACPI_IDENTICAL            (1<<4)     /* ACPI 6.3 */
 
 /* 1: Cache Type Structure */
 
index 20561a60db9c4f077c68ee1a182eac38ff96d4dd..d4fb510a4fbe9e4a6dbe6d7ef85eec45dec65647 100644 (file)
@@ -104,8 +104,10 @@ extern void warn_slowpath_null(const char *file, const int line);
        warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
 #else
 extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
-#define __WARN()               __WARN_TAINT(TAINT_WARN)
-#define __WARN_printf(arg...)  do { __warn_printk(arg); __WARN(); } while (0)
+#define __WARN() do { \
+       printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
+} while (0)
+#define __WARN_printf(arg...)  __WARN_printf_taint(TAINT_WARN, arg)
 #define __WARN_printf_taint(taint, arg...)                             \
        do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
 #endif
index c64bea7a52bebd5c7332203e1e3a78bfa69b84c3..e9f20b813a699a3fb7630fe76aba3d1ab60c892e 100644 (file)
@@ -7,24 +7,6 @@
 #include <linux/compiler.h>
 #include <linux/log2.h>
 
-/*
- * Runtime evaluation of get_order()
- */
-static inline __attribute_const__
-int __get_order(unsigned long size)
-{
-       int order;
-
-       size--;
-       size >>= PAGE_SHIFT;
-#if BITS_PER_LONG == 32
-       order = fls(size);
-#else
-       order = fls64(size);
-#endif
-       return order;
-}
-
 /**
  * get_order - Determine the allocation order of a memory size
  * @size: The size for which to get the order
@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
  * to hold an object of the specified size.
  *
  * The result is undefined if the size is 0.
- *
- * This function may be used to initialise variables with compile time
- * evaluations of constants.
  */
-#define get_order(n)                                           \
-(                                                              \
-       __builtin_constant_p(n) ? (                             \
-               ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT :     \
-               (((n) < (1UL << PAGE_SHIFT)) ? 0 :              \
-                ilog2((n) - 1) - PAGE_SHIFT + 1)               \
-       ) :                                                     \
-       __get_order(n)                                          \
-)
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+       if (__builtin_constant_p(size)) {
+               if (!size)
+                       return BITS_PER_LONG - PAGE_SHIFT;
+
+               if (size < (1UL << PAGE_SHIFT))
+                       return 0;
+
+               return ilog2((size) - 1) - PAGE_SHIFT + 1;
+       }
+
+       size--;
+       size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+       return fls(size);
+#else
+       return fls64(size);
+#endif
+}
 
 #endif /* __ASSEMBLY__ */
 
index 7d9598dc578d3d039a5fc2bd821024e425f71ed6..a2654f4f3cbcc2201ed56d20771286f532890d0f 100644 (file)
@@ -116,6 +116,7 @@ struct omap_dm_timer {
        u32 errata;
        struct platform_device *pdev;
        struct list_head node;
+       u32 late_attach;
 };
 
 int omap_dm_timer_reserve_systimer(int id);
index f9c6e0e3aec7d049ab63315108ba816c15e47a18..fa117e11458aef8e5e06013b6fcbe880351fdfdd 100644 (file)
@@ -174,7 +174,13 @@ struct drm_device {
         * races and imprecision over longer time periods, hence exposing a
         * hardware vblank counter is always recommended.
         *
-        * If non-zeor, &drm_crtc_funcs.get_vblank_counter must be set.
+        * This is the statically configured device wide maximum. The driver
+        * can instead choose to use a runtime configurable per-crtc value
+        * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count
+        * must be left at zero. See drm_crtc_set_max_vblank_count() on how
+        * to use the per-crtc value.
+        *
+        * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
         */
        u32 max_vblank_count;           /**< size of vblank counter register */
 
index c0d4df6a606fc00bbc0809321777c154738f574b..9d3b745c3107763a84269eb5a48484a698bb4bdd 100644 (file)
@@ -40,6 +40,7 @@
 #define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
 #define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
 #define DATA_BLOCK_TILED_DISPLAY 0x12
+#define DATA_BLOCK_CTA 0x81
 
 #define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
 
@@ -90,4 +91,13 @@ struct displayid_detailed_timing_block {
        struct displayid_block base;
        struct displayid_detailed_timings_1 timings[0];
 };
+
+#define for_each_displayid_db(displayid, block, idx, length) \
+       for ((block) = (struct displayid_block *)&(displayid)[idx]; \
+            (idx) + sizeof(struct displayid_block) <= (length) && \
+            (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
+            (block)->num_bytes > 0; \
+            (idx) += (block)->num_bytes + sizeof(struct displayid_block), \
+            (block) = (struct displayid_block *)&(displayid)[idx])
+
 #endif
index d25a9603ab570518eb38945a920f459dae343397..e9c676381fd4f204c255e3c59ff49ffd3d59e43a 100644 (file)
@@ -128,6 +128,26 @@ struct drm_vblank_crtc {
         * @last: Protected by &drm_device.vbl_lock, used for wraparound handling.
         */
        u32 last;
+       /**
+        * @max_vblank_count:
+        *
+        * Maximum value of the vblank registers for this crtc. This value +1
+        * will result in a wrap-around of the vblank register. It is used
+        * by the vblank core to handle wrap-arounds.
+        *
+        * If set to zero the vblank core will try to guess the elapsed vblanks
+        * between times when the vblank interrupt is disabled through
+        * high-precision timestamps. That approach is suffering from small
+        * races and imprecision over longer time periods, hence exposing a
+        * hardware vblank counter is always recommended.
+        *
+        * This is the runtime configurable per-crtc maximum set through
+        * drm_crtc_set_max_vblank_count(). If this is used the driver
+        * must leave the device wide &drm_device.max_vblank_count at zero.
+        *
+        * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
+        */
+       u32 max_vblank_count;
        /**
         * @inmodeset: Tracks whether the vblank is disabled due to a modeset.
         * For legacy driver bit 2 additionally tracks whether an additional
@@ -206,4 +226,6 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
 void drm_calc_timestamping_constants(struct drm_crtc *crtc,
                                     const struct drm_display_mode *mode);
 wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc);
+void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
+                                  u32 max_vblank_count);
 #endif
index fbf5cfc9b352f7a005071909479624e46253f516..fd965ffbb92e33fcef415033b1a78a09849bda05 100644 (file)
        INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
        INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
        INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
+       INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
        INTEL_VGA_DEVICE(0x3E9A, info)  /* SRV GT2 */
 
 /* CFL H */
index d16e8755f6a9111c9a50af1d6351335547ad45fa..ef571fc037f1800abece7f8ca070c53f3e6ae18a 100644 (file)
@@ -16,5 +16,6 @@
 #define PHY_TYPE_USB2          3
 #define PHY_TYPE_USB3          4
 #define PHY_TYPE_UFS           5
+#define PHY_TYPE_DP            6
 
 #endif /* _DT_BINDINGS_PHY */
index 90ac450745f1847f9bb9e5b36ba6a0753d0eca05..561fefc2a9801856b71c23725c369ad0f37961e7 100644 (file)
@@ -361,6 +361,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
 
 void kvm_vgic_load(struct kvm_vcpu *vcpu);
 void kvm_vgic_put(struct kvm_vcpu *vcpu);
+void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
 
 #define irqchip_in_kernel(k)   (!!((k)->arch.vgic.in_kernel))
 #define vgic_initialized(k)    ((k)->arch.vgic.initialized)
index de8d3d3fa6512e3e9382e23cf4fcd03c36b77097..59a416dfcaaa2ac0798657a502eb2590026771a0 100644 (file)
@@ -326,7 +326,10 @@ void acpi_set_irq_model(enum acpi_irq_model_id model,
 #ifdef CONFIG_X86_IO_APIC
 extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
 #else
-#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
+static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
+{
+       return -1;
+}
 #endif
 /*
  * This function undoes the effect of one call to acpi_register_gsi().
@@ -1288,10 +1291,15 @@ static inline int lpit_read_residency_count_address(u64 *address)
 #endif
 
 #ifdef CONFIG_ACPI_PPTT
+int acpi_pptt_cpu_is_thread(unsigned int cpu);
 int find_acpi_cpu_topology(unsigned int cpu, int level);
 int find_acpi_cpu_topology_package(unsigned int cpu);
 int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
 #else
+static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
+{
+       return -EINVAL;
+}
 static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
 {
        return -EINVAL;
index 1da59c16f6377e6f77f5cd1f774a00d374757a57..2885dce1ad496368e9f931ebba86ef69b90df05c 100644 (file)
@@ -114,6 +114,7 @@ typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
 typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
 typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
 typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
+typedef void (cleanup_rq_fn)(struct request *);
 
 
 struct blk_mq_ops {
@@ -165,6 +166,12 @@ struct blk_mq_ops {
        /* Called from inside blk_get_request() */
        void (*initialize_rq_fn)(struct request *rq);
 
+       /*
+        * Called before freeing one request which isn't completed yet,
+        * and usually for freeing the driver private data
+        */
+       cleanup_rq_fn           *cleanup_rq;
+
        map_queues_fn           *map_queues;
 
 #ifdef CONFIG_BLK_DEBUG_FS
@@ -324,4 +331,10 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
        for ((i) = 0; (i) < (hctx)->nr_ctx &&                           \
             ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
 
+static inline void blk_mq_cleanup_rq(struct request *rq)
+{
+       if (rq->q->mq_ops->cleanup_rq)
+               rq->q->mq_ops->cleanup_rq(rq);
+}
+
 #endif
index 6980014357d477793079164ce67393659def6052..d51e10f50e7551944adea98100499bdb5aac9218 100644 (file)
@@ -504,6 +504,12 @@ struct request_queue {
         * various queue flags, see QUEUE_* below
         */
        unsigned long           queue_flags;
+       /*
+        * Number of contexts that have called blk_set_pm_only(). If this
+        * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
+        * processed.
+        */
+       atomic_t                pm_only;
 
        /*
         * ida allocated id for this queue.  Used to index queues from
@@ -698,7 +704,6 @@ struct request_queue {
 #define QUEUE_FLAG_REGISTERED  26      /* queue has been registered to a disk */
 #define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
 #define QUEUE_FLAG_QUIESCED    28      /* queue has been quiesced */
-#define QUEUE_FLAG_PREEMPT_ONLY        29      /* only process REQ_PREEMPT requests */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_SAME_COMP)    |       \
@@ -736,12 +741,11 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
        ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
                             REQ_FAILFAST_DRIVER))
 #define blk_queue_quiesced(q)  test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
-#define blk_queue_preempt_only(q)                              \
-       test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
+#define blk_queue_pm_only(q)   atomic_read(&(q)->pm_only)
 #define blk_queue_fua(q)       test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
 
-extern int blk_set_preempt_only(struct request_queue *q);
-extern void blk_clear_preempt_only(struct request_queue *q);
+extern void blk_set_pm_only(struct request_queue *q);
+extern void blk_clear_pm_only(struct request_queue *q);
 
 static inline int queue_in_flight(struct request_queue *q)
 {
index fe5916550da8c5c4da102dd91385eef8123167f6..f639bd0122f3958b28daf0724e5e1a889d7e4a66 100644 (file)
@@ -47,6 +47,11 @@ void generic_bug_clear_once(void);
 
 #else  /* !CONFIG_GENERIC_BUG */
 
+static inline void *find_bug(unsigned long bugaddr)
+{
+       return NULL;
+}
+
 static inline enum bug_trap_type report_bug(unsigned long bug_addr,
                                            struct pt_regs *regs)
 {
index 7e9c991c95e03fc1df55553461d95f9153d37cd7..43ed9e77cf81a6dec3fac4a8de3648d63f4d454d 100644 (file)
@@ -173,6 +173,8 @@ struct ccp_aes_engine {
        enum ccp_aes_mode mode;
        enum ccp_aes_action action;
 
+       u32 authsize;
+
        struct scatterlist *key;
        u32 key_len;            /* In bytes */
 
index 5e58bb29b1a36b55e86bf8951cd04eaf149853e5..11cdc7c60480f503aa510b5b54368ac66df45a86 100644 (file)
@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
 
 static inline void ceph_buffer_put(struct ceph_buffer *b)
 {
-       kref_put(&b->kref, ceph_buffer_release);
+       if (b)
+               kref_put(&b->kref, ceph_buffer_release);
 }
 
 extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
index a6090154b2ab75a32b0932708508eb69715af82e..a01ebb630abc82ef4bfcb8aaf263dbae8f0816c5 100644 (file)
@@ -207,6 +207,7 @@ struct css_set {
         */
        struct list_head tasks;
        struct list_head mg_tasks;
+       struct list_head dying_tasks;
 
        /* all css_task_iters currently walking this cset */
        struct list_head task_iters;
index 8937d48a5389d1c9eb3d2da2676559760a4ba323..b4854b48a4f3ddbfbf115b716ce60feb70a80ad2 100644 (file)
@@ -43,6 +43,9 @@
 /* walk all threaded css_sets in the domain */
 #define CSS_TASK_ITER_THREADED         (1U << 1)
 
+/* internal flags */
+#define CSS_TASK_ITER_SKIPPED          (1U << 16)
+
 /* a css_task_iter should be treated as an opaque object */
 struct css_task_iter {
        struct cgroup_subsys            *ss;
@@ -57,6 +60,7 @@ struct css_task_iter {
        struct list_head                *task_pos;
        struct list_head                *tasks_head;
        struct list_head                *mg_tasks_head;
+       struct list_head                *dying_tasks_head;
 
        struct css_set                  *cur_cset;
        struct css_set                  *cur_dcset;
index d30209b9cef81773b26e62f79b67de402e56ff68..0ca0c83fdb1c4864037d3162939d4a38b655581c 100644 (file)
@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
 #ifndef _CODA_HEADER_
 #define _CODA_HEADER_
 
-#if defined(__linux__)
 typedef unsigned long long u_quad_t;
-#endif
+
 #include <uapi/linux/coda.h>
 #endif 
index 15170954aa2b3d35b1f817c782e30febee6349f0..57d2b2faf6a3e13fce381dc963d57fba2d6e4f71 100644 (file)
@@ -19,6 +19,17 @@ struct venus_comm {
        struct mutex        vc_mutex;
 };
 
+/* messages between coda filesystem in kernel and Venus */
+struct upc_req {
+       struct list_head        uc_chain;
+       caddr_t                 uc_data;
+       u_short                 uc_flags;
+       u_short                 uc_inSize;  /* Size is at most 5000 bytes */
+       u_short                 uc_outSize;
+       u_short                 uc_opcode;  /* copied from data to save lookup */
+       int                     uc_unique;
+       wait_queue_head_t       uc_sleep;   /* process' wait queue */
+};
 
 static inline struct venus_comm *coda_vcp(struct super_block *sb)
 {
index d64d8c2bbdabc5be1fd29d9f91247b654d3f86b0..d67c0035165c2a90273d8fbc90aa7529ec729846 100644 (file)
@@ -116,10 +116,10 @@ enum cpuhp_state {
        CPUHP_AP_PERF_ARM_ACPI_STARTING,
        CPUHP_AP_PERF_ARM_STARTING,
        CPUHP_AP_ARM_L2X0_STARTING,
+       CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
        CPUHP_AP_ARM_ARCH_TIMER_STARTING,
        CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
        CPUHP_AP_JCORE_TIMER_STARTING,
-       CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
        CPUHP_AP_ARM_TWD_STARTING,
        CPUHP_AP_QCOM_TIMER_STARTING,
        CPUHP_AP_ARMADA_TIMER_STARTING,
@@ -170,6 +170,7 @@ enum cpuhp_state {
        CPUHP_AP_WATCHDOG_ONLINE,
        CPUHP_AP_WORKQUEUE_ONLINE,
        CPUHP_AP_RCUTREE_ONLINE,
+       CPUHP_AP_BASE_CACHEINFO_ONLINE,
        CPUHP_AP_ONLINE_DYN,
        CPUHP_AP_ONLINE_DYN_END         = CPUHP_AP_ONLINE_DYN + 30,
        CPUHP_AP_X86_HPET_ONLINE,
index 7eed6101c7914a63dcec50a7f287a6618eb2cd08..1dc351d8548bfaa9f6afa4024910277fb84d4f2f 100644 (file)
@@ -150,7 +150,11 @@ struct cred {
        struct user_struct *user;       /* real user ID subscription */
        struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
        struct group_info *group_info;  /* supplementary groups for euid/fsgid */
-       struct rcu_head rcu;            /* RCU deletion hook */
+       /* RCU deletion */
+       union {
+               int non_rcu;                    /* Can we skip RCU deletion? */
+               struct rcu_head rcu;            /* RCU deletion hook */
+       };
 } __randomize_layout;
 
 extern void __put_cred(struct cred *);
@@ -248,6 +252,7 @@ static inline const struct cred *get_cred(const struct cred *cred)
 {
        struct cred *nonconst_cred = (struct cred *) cred;
        validate_creds(cred);
+       nonconst_cred->non_rcu = 0;
        return get_new_cred(nonconst_cred);
 }
 
index bef2e36c01b4b9769227f45d4960e46aef234d31..91f9f95ad5066703b412f9c5d5da3a46e7f16fb1 100644 (file)
@@ -62,7 +62,8 @@ typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
                                            struct request *rq,
                                            union map_info *map_context,
                                            struct request **clone);
-typedef void (*dm_release_clone_request_fn) (struct request *clone);
+typedef void (*dm_release_clone_request_fn) (struct request *clone,
+                                            union map_info *map_context);
 
 /*
  * Returns:
index 3f1066a9e1c3a6f87e9f19ba3f3949d8b81e0c20..19dd8852602c4b1a8453a8f5673c60a5b17dc7bd 100644 (file)
@@ -1332,6 +1332,7 @@ extern int (*platform_notify_remove)(struct device *dev);
  */
 extern struct device *get_device(struct device *dev);
 extern void put_device(struct device *dev);
+extern bool kill_device(struct device *dev);
 
 #ifdef CONFIG_DEVTMPFS
 extern int devtmpfs_create_node(struct device *dev);
index 1db6a6b46d0d3dbdb10dbc74cb9e481345f0a9ef..dcc707dafc7a86ba7070dbdf913a5b5069c2eaca 100644 (file)
@@ -161,7 +161,8 @@ static inline int is_device_dma_capable(struct device *dev)
  * Don't use them in device drivers.
  */
 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
-                                      dma_addr_t *dma_handle, void **ret);
+                               dma_addr_t *dma_handle, void **ret,
+                               bool zero);
 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
 
 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
@@ -173,7 +174,7 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
                                  size_t size, int *ret);
 
 #else
-#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
+#define dma_alloc_from_dev_coherent(dev, size, handle, ret, zero) (0)
 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
 
@@ -505,9 +506,9 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
 #define arch_dma_alloc_attrs(dev)      (true)
 #endif
 
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
-                                      dma_addr_t *dma_handle, gfp_t flag,
-                                      unsigned long attrs)
+static inline void *dma_malloc_attrs(struct device *dev, size_t size,
+                                    dma_addr_t *dma_handle, gfp_t flag,
+                                    unsigned long attrs, bool zero)
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
        void *cpu_addr;
@@ -515,7 +516,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
        BUG_ON(!ops);
        WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
 
-       if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
+       if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr, zero))
                return cpu_addr;
 
        /* let the implementation decide on the zone to allocate from: */
@@ -531,6 +532,13 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
        return cpu_addr;
 }
 
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+                                   dma_addr_t *dma_handle, gfp_t flag,
+                                   unsigned long attrs)
+{
+       return dma_malloc_attrs(dev, size, dma_handle, flag, attrs, true);
+}
+
 static inline void dma_free_attrs(struct device *dev, size_t size,
                                     void *cpu_addr, dma_addr_t dma_handle,
                                     unsigned long attrs)
@@ -563,6 +571,12 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
        return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
 }
 
+static inline void *dma_malloc_coherent(struct device *dev, size_t size,
+                                       dma_addr_t *dma_handle, gfp_t flag)
+{
+       return dma_malloc_attrs(dev, size, dma_handle, flag, 0, false);
+}
+
 static inline void dma_free_coherent(struct device *dev, size_t size,
                void *cpu_addr, dma_addr_t dma_handle)
 {
index d4e1b43a53c37b2da32052391e6bd2f8f6234ff0..92420009b9bcbffbb904d9261e958190562f1eb7 100644 (file)
@@ -2651,6 +2651,8 @@ extern int filemap_flush(struct address_space *);
 extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
 extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
                                   loff_t lend);
+extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
+               loff_t start_byte, loff_t end_byte);
 
 static inline int filemap_fdatawait(struct address_space *mapping)
 {
index 39745b8bdd65d7893d184bbfa7bae221cd0cc0f0..b3115d1a7d494e684f41ed2b00a2b6792a487f78 100644 (file)
@@ -240,30 +240,6 @@ static inline int irq_to_gpio(unsigned irq)
        return -EINVAL;
 }
 
-static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
-                      unsigned int gpio_offset, unsigned int pin_offset,
-                      unsigned int npins)
-{
-       WARN_ON(1);
-       return -EINVAL;
-}
-
-static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
-                       struct pinctrl_dev *pctldev,
-                       unsigned int gpio_offset, const char *pin_group)
-{
-       WARN_ON(1);
-       return -EINVAL;
-}
-
-static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
-{
-       WARN_ON(1);
-}
-
 static inline int devm_gpio_request(struct device *dev, unsigned gpio,
                                    const char *label)
 {
index acc4279ad5e3f3950cf4ea3d9ac65396527fe5d0..412098b24f58ba06e74c2d6af8234bd56680faec 100644 (file)
@@ -222,7 +222,7 @@ static inline void gpiod_put(struct gpio_desc *desc)
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 
 static inline void gpiod_put_array(struct gpio_descs *descs)
@@ -230,7 +230,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(descs);
 }
 
 static inline struct gpio_desc *__must_check
@@ -283,7 +283,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 
 static inline void devm_gpiod_put_array(struct device *dev,
@@ -292,32 +292,32 @@ static inline void devm_gpiod_put_array(struct device *dev,
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(descs);
 }
 
 
 static inline int gpiod_get_direction(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 static inline int gpiod_direction_input(struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 
@@ -325,7 +325,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
 static inline int gpiod_get_value(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_get_array_value(unsigned int array_size,
@@ -333,25 +333,25 @@ static inline int gpiod_get_array_value(unsigned int array_size,
                                        int *value_array)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline void gpiod_set_value(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 static inline void gpiod_set_array_value(unsigned int array_size,
                                         struct gpio_desc **desc_array,
                                         int *value_array)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
 }
 static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_get_raw_array_value(unsigned int array_size,
@@ -359,27 +359,27 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
                                            int *value_array)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 static inline int gpiod_set_raw_array_value(unsigned int array_size,
                                             struct gpio_desc **desc_array,
                                             int *value_array)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 
 static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
@@ -387,25 +387,25 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
                                     int *value_array)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
                                            struct gpio_desc **desc_array,
                                            int *value_array)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
 }
 static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
@@ -413,55 +413,55 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
                                               int *value_array)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
                                                int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
                                                struct gpio_desc **desc_array,
                                                int *value_array)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 
 static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 
 static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 
 static inline int gpiod_is_active_low(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_cansleep(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 
 static inline int gpiod_to_irq(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -EINVAL;
 }
 
@@ -469,7 +469,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
                                          const char *name)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -EINVAL;
 }
 
@@ -481,7 +481,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
 static inline int desc_to_gpio(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -EINVAL;
 }
 
index 89110d896d72d7573df5140b7613f48d9b9ea5e4..aef6e2f738027a03d81bb08769f9f5951aeba7de 100644 (file)
@@ -310,6 +310,8 @@ struct host1x_device {
        struct list_head clients;
 
        bool registered;
+
+       struct device_dma_parameters dma_parms;
 };
 
 static inline struct host1x_device *to_host1x_device(struct device *dev)
index 9493d4a388dbb9a3ac71b0fe9e56566eb90c8327..8fde789f2eff37be221886c980b01e685b3eb893 100644 (file)
@@ -225,7 +225,7 @@ enum hwmon_power_attributes {
 #define HWMON_P_LABEL                  BIT(hwmon_power_label)
 #define HWMON_P_ALARM                  BIT(hwmon_power_alarm)
 #define HWMON_P_CAP_ALARM              BIT(hwmon_power_cap_alarm)
-#define HWMON_P_MIN_ALARM              BIT(hwmon_power_max_alarm)
+#define HWMON_P_MIN_ALARM              BIT(hwmon_power_min_alarm)
 #define HWMON_P_MAX_ALARM              BIT(hwmon_power_max_alarm)
 #define HWMON_P_LCRIT_ALARM            BIT(hwmon_power_lcrit_alarm)
 #define HWMON_P_CRIT_ALARM             BIT(hwmon_power_crit_alarm)
index 9c03a7d5e400df9f2853c60c9816c150c447d937..c83478271c2e03788f5d5e731923966f29131a5f 100644 (file)
@@ -3185,4 +3185,57 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
        return true;
 }
 
+struct element {
+       u8 id;
+       u8 datalen;
+       u8 data[];
+} __packed;
+
+/* element iteration helpers */
+#define for_each_element(_elem, _data, _datalen)                       \
+       for (_elem = (const struct element *)(_data);                   \
+            (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >=    \
+               (int)sizeof(*_elem) &&                                  \
+            (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >=    \
+               (int)sizeof(*_elem) + _elem->datalen;                   \
+            _elem = (const struct element *)(_elem->data + _elem->datalen))
+
+#define for_each_element_id(element, _id, data, datalen)               \
+       for_each_element(element, data, datalen)                        \
+               if (element->id == (_id))
+
+#define for_each_element_extid(element, extid, data, datalen)          \
+       for_each_element(element, data, datalen)                        \
+               if (element->id == WLAN_EID_EXTENSION &&                \
+                   element->datalen > 0 &&                             \
+                   element->data[0] == (extid))
+
+#define for_each_subelement(sub, element)                              \
+       for_each_element(sub, (element)->data, (element)->datalen)
+
+#define for_each_subelement_id(sub, id, element)                       \
+       for_each_element_id(sub, id, (element)->data, (element)->datalen)
+
+#define for_each_subelement_extid(sub, extid, element)                 \
+       for_each_element_extid(sub, extid, (element)->data, (element)->datalen)
+
+/**
+ * for_each_element_completed - determine if element parsing consumed all data
+ * @element: element pointer after for_each_element() or friends
+ * @data: same data pointer as passed to for_each_element() or friends
+ * @datalen: same data length as passed to for_each_element() or friends
+ *
+ * This function returns %true if all the data was parsed or considered
+ * while walking the elements. Only use this if your for_each_element()
+ * loop cannot be broken out of, otherwise it always returns %false.
+ *
+ * If some data was malformed, this returns %false since the last parsed
+ * element will not fill the whole remaining data.
+ */
+static inline bool for_each_element_completed(const struct element *element,
+                                             const void *data, size_t datalen)
+{
+       return (const u8 *)element == (const u8 *)data + datalen;
+}
+
 #endif /* LINUX_IEEE80211_H */
index ba7a9b0c7c57e64ef794a55f62330705adbe7267..24e9b360da659f66746eca60091078ace1e894e5 100644 (file)
@@ -84,6 +84,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
 extern void unregister_pppox_proto(int proto_num);
 extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
 extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+#define PPPOEIOCSFWD32    _IOW(0xB1 ,0, compat_size_t)
 
 /* PPPoX socket states */
 enum {
index 928442dda565f147b501dca93601731a92581b2d..84fbe73d2ec08adabd746c2918733ecb7c1ccba3 100644 (file)
@@ -156,6 +156,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
 void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
 void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
        unsigned long start_pfn);
+bool has_iova_flush_queue(struct iova_domain *iovad);
 int init_iova_flush_queue(struct iova_domain *iovad,
                          iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -236,6 +237,11 @@ static inline void init_iova_domain(struct iova_domain *iovad,
 {
 }
 
+static inline bool has_iova_flush_queue(struct iova_domain *iovad)
+{
+       return false;
+}
+
 static inline int init_iova_flush_queue(struct iova_domain *iovad,
                                        iova_flush_cb flush_cb,
                                        iova_entry_dtor entry_dtor)
index 583b82b5a1e98e0558154acba7e1dbcb726468da..1cf1b9b8e97548af5122bc586feaaeddea3c03aa 100644 (file)
@@ -454,6 +454,22 @@ struct jbd2_inode {
         * @i_flags: Flags of inode [j_list_lock]
         */
        unsigned long i_flags;
+
+       /**
+        * @i_dirty_start:
+        *
+        * Offset in bytes where the dirty range for this inode starts.
+        * [j_list_lock]
+        */
+       loff_t i_dirty_start;
+
+       /**
+        * @i_dirty_end:
+        *
+        * Inclusive offset in bytes where the dirty range for this inode
+        * ends. [j_list_lock]
+        */
+       loff_t i_dirty_end;
 };
 
 struct jbd2_revoke_table_s;
@@ -1399,6 +1415,12 @@ extern int          jbd2_journal_force_commit(journal_t *);
 extern int        jbd2_journal_force_commit_nested(journal_t *);
 extern int        jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode);
 extern int        jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode);
+extern int        jbd2_journal_inode_ranged_write(handle_t *handle,
+                       struct jbd2_inode *inode, loff_t start_byte,
+                       loff_t length);
+extern int        jbd2_journal_inode_ranged_wait(handle_t *handle,
+                       struct jbd2_inode *inode, loff_t start_byte,
+                       loff_t length);
 extern int        jbd2_journal_begin_ordered_truncate(journal_t *journal,
                                struct jbd2_inode *inode, loff_t new_size);
 extern void       jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
index 4631008a022ff878636e318d6d91ccfb0778531f..3dfb098b7706566df21f6124cf54171d2aef77cf 100644 (file)
 #define DIV_ROUND_DOWN_ULL(ll, d) \
        ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
 
-#define DIV_ROUND_UP_ULL(ll, d)                DIV_ROUND_DOWN_ULL((ll) + (d) - 1, (d))
+#define DIV_ROUND_UP_ULL(ll, d) \
+       DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
 
 #if BITS_PER_LONG == 32
 # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
index 30efb3663892346d3d323d74c6443f7ce6f09fd8..d42a36e4e6c24ed97f7ceebbc844b382128b1276 100644 (file)
@@ -818,6 +818,7 @@ void kvm_arch_check_processor_compat(void *rtn);
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
 
 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
 /*
index cbd9d8495690212fc689d3ce3a7caf0812d45523..88e1e6304a7193bca45a14500f7ade4632dbd335 100644 (file)
@@ -117,6 +117,7 @@ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
 unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
                        resource_size_t hw_addr, resource_size_t size);
 int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
+void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
 resource_size_t logic_pio_to_hwaddr(unsigned long pio);
 unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
 
index 804516e4f483286e973ec2127f6980a0b98533de..3386399feadc82483bd44bab1b09285e05bb8c07 100644 (file)
@@ -188,6 +188,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
 struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
 struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
 void mlx5_fc_query_cached(struct mlx5_fc *counter,
                          u64 *bytes, u64 *packets, u64 *lastuse);
 int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
index f043d65b9bac2d65b0b8c8053e22a9051d03769f..177f11c96187b7ff8f06af56589bfe59060e0eae 100644 (file)
@@ -5623,7 +5623,12 @@ struct mlx5_ifc_modify_cq_in_bits {
 
        struct mlx5_ifc_cqc_bits cq_context;
 
-       u8         reserved_at_280[0x600];
+       u8         reserved_at_280[0x60];
+
+       u8         cq_umem_valid[0x1];
+       u8         reserved_at_2e1[0x1f];
+
+       u8         reserved_at_300[0x580];
 
        u8         pas[0][0x40];
 };
index f6513714d92ad3544c0fec0679db9bff6ca248d3..22dfd0ae2e003aa1130bfcb25d69ac543883c5d3 100644 (file)
@@ -490,6 +490,15 @@ void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
 
 void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq);
 
+/*
+ * May be called from host driver's system/runtime suspend/resume callbacks,
+ * to know if SDIO IRQs has been claimed.
+ */
+static inline bool sdio_irq_claimed(struct mmc_host *host)
+{
+       return host->sdio_irqs > 0;
+}
+
 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
 {
        host->ops->enable_sdio_irq(host, 0);
index e27572d30d97751ba70a9c5d753997faaac49d51..ad69430fd0eb5a9123727e2054682971de3feca3 100644 (file)
@@ -164,6 +164,16 @@ nfs_list_add_request(struct nfs_page *req, struct list_head *head)
        list_add_tail(&req->wb_list, head);
 }
 
+/**
+ * nfs_list_move_request - Move a request to a new list
+ * @req: request
+ * @head: head of list into which to insert the request.
+ */
+static inline void
+nfs_list_move_request(struct nfs_page *req, struct list_head *head)
+{
+       list_move_tail(&req->wb_list, head);
+}
 
 /**
  * nfs_list_remove_request - Remove a request from its wb_list
index bd1c889a9ed956c14543117a8835eb4ae4474a9a..cab24a127feb34bab337102fcb69500ca18a1d54 100644 (file)
@@ -1539,7 +1539,7 @@ struct nfs_commit_data {
 };
 
 struct nfs_pgio_completion_ops {
-       void    (*error_cleanup)(struct list_head *head);
+       void    (*error_cleanup)(struct list_head *head, int);
        void    (*init_hdr)(struct nfs_pgio_header *hdr);
        void    (*completion)(struct nfs_pgio_header *hdr);
        void    (*reschedule_io)(struct nfs_pgio_header *hdr);
index 0fff52675a6b29d642ed9b932a60b64a6da46491..ad8021b0efb7185efdb005ff5a7fdb8b315adacd 100644 (file)
 
 struct pci_epc;
 
+enum pci_epc_interface_type {
+       PRIMARY_INTERFACE,
+       SECONDARY_INTERFACE,
+};
+
 enum pci_epc_irq_type {
        PCI_EPC_IRQ_UNKNOWN,
        PCI_EPC_IRQ_LEGACY,
@@ -20,6 +25,19 @@ enum pci_epc_irq_type {
        PCI_EPC_IRQ_MSIX,
 };
 
+static inline const char *
+pci_epc_interface_string(enum pci_epc_interface_type type)
+{
+       switch (type) {
+       case PRIMARY_INTERFACE:
+               return "primary";
+       case SECONDARY_INTERFACE:
+               return "secondary";
+       default:
+               return "UNKNOWN interface";
+       }
+}
+
 /**
  * struct pci_epc_ops - set of function pointers for performing EPC operations
  * @epf_init: ops to perform EPC specific initialization
@@ -39,6 +57,7 @@ enum pci_epc_irq_type {
  * @get_msix: ops to get the number of MSI-X interrupts allocated by the RC
  *          from the MSI-X capability register
  * @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
+ * @map_msi_irq: ops to map physical address to MSI address and return MSI data
  * @start: ops to start the PCI link
  * @stop: ops to stop the PCI link
  * @owner: the module owner containing the ops
@@ -67,6 +86,9 @@ struct pci_epc_ops {
        int     (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
        int     (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
                             enum pci_epc_irq_type type, u16 interrupt_num);
+       int     (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+                              phys_addr_t phys_addr, u8 interrupt_num,
+                              u32 entry_size, u32 *msi_data);
        int     (*start)(struct pci_epc *epc);
        void    (*stop)(struct pci_epc *epc);
        const struct pci_epc_features* (*get_features)(struct pci_epc *epc,
@@ -172,9 +194,11 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
                 struct module *owner);
 void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
 void pci_epc_destroy(struct pci_epc *epc);
-int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
+                   enum pci_epc_interface_type type);
 void pci_epc_linkup(struct pci_epc *epc);
-void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
+                       enum pci_epc_interface_type type);
 int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
                         struct pci_epf_header *hdr);
 int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
@@ -192,17 +216,25 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
 int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
                     u16 interrupts);
 int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+                       phys_addr_t phys_addr, u8 interrupt_num,
+                       u32 entry_size, u32 *msi_data);
 int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
                      enum pci_epc_irq_type type, u16 interrupt_num);
 int pci_epc_start(struct pci_epc *epc);
 void pci_epc_stop(struct pci_epc *epc);
+void pci_epc_of_parse_header(struct device_node *node,
+                            struct pci_epf_header *header);
 const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
                                                    u8 func_no, u8 vfunc_no);
-unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
-                                       *epc_features);
+int pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features);
+int pci_epc_get_next_free_bar(const struct pci_epc_features
+                             *epc_features, enum pci_barno bar);
 struct pci_epc *pci_epc_get(const char *epc_name);
 void pci_epc_put(struct pci_epc *epc);
-
+struct pci_epc *of_pci_epc_get(struct device_node *node, int index);
+struct pci_epc *of_pci_epc_get_by_name(struct device_node *node,
+                                      const char *epc_name);
 int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size,
                       size_t page_size);
 void pci_epc_mem_exit(struct pci_epc *epc);
index 02090eb41563ed82f719285c0d4b0aa2d1a577ae..80505106c7707a6fac610dd22f95a49790852257 100644 (file)
 
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
+#include <linux/of.h>
 #include <linux/pci.h>
 
 struct pci_epf;
+enum pci_epc_interface_type;
 
 enum pci_barno {
        BAR_0,
@@ -101,6 +103,7 @@ struct pci_epf_bar {
 /**
  * struct pci_epf - represents the PCI EPF device
  * @dev: the PCI EPF device
+ * @node: the device tree node of the PCI EPF device
  * @name: the name of the PCI EPF device
  * @header: represents standard configuration header
  * @bar: represents the BAR of EPF device
@@ -122,9 +125,22 @@ struct pci_epf_bar {
  * @is_vf: true - virtual function, false - physical function
  * @vfunction_num_map: bitmap to manage virtual function number
  * @pci_vepf: list of virtual endpoint function associated with this function
+ * @sec_epc: the secondary EPC device to which this EPF device is bound
+ * @sec_epc_list: to add pci_epf as list of PCI endpoint functions to secondary
+ *   EPC device
+ * @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC
+ * @sec_epc_func_no: unique (physical) function number within the secondary EPC
+ * @sec_epc_vfunc_no: unique virtual function number within a physical function
+ *   associated with secondary EPC
+ * @sec_epc_vfunction_num_map: bitmap to manage virtual function number
+ *   associated with the physical function of the
+ *   secondary EPC
+ * @sec_epc_pci_vepf: list of virtual endpoint function associated with the
+ *   physical function of the secondary EPC
  */
 struct pci_epf {
        struct device           dev;
+       struct device_node      *node;
        const char              *name;
        struct pci_epf_header   *header;
        struct pci_epf_bar      bar[6];
@@ -147,6 +163,15 @@ struct pci_epf {
        unsigned int            is_vf;
        unsigned long           vfunction_num_map;
        struct list_head        pci_vepf;
+
+       /* Below members are to attach secondary EPC to an endpoint function */
+       struct pci_epc          *sec_epc;
+       struct list_head        sec_epc_list;
+       struct pci_epf_bar      sec_epc_bar[6];
+       u8                      sec_epc_func_no;
+       u8                      sec_epc_vfunc_no;
+       unsigned long           sec_epc_vfunction_num_map;
+       struct list_head        sec_epc_pci_vepf;
 };
 
 #define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev)
@@ -164,16 +189,18 @@ static inline void *epf_get_drvdata(struct pci_epf *epf)
        return dev_get_drvdata(&epf->dev);
 }
 
-const struct pci_epf_device_id *
-pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf);
 struct pci_epf *pci_epf_create(const char *name);
+struct pci_epf *pci_epf_of_create(struct device_node *node);
+struct pci_epf *devm_pci_epf_of_create(struct device *dev,
+                                      struct device_node *node);
 void pci_epf_destroy(struct pci_epf *epf);
 int __pci_epf_register_driver(struct pci_epf_driver *driver,
                              struct module *owner);
 void pci_epf_unregister_driver(struct pci_epf_driver *driver);
 void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
-                         size_t align);
-void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
+                         size_t align, enum pci_epc_interface_type type);
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
+                       enum pci_epc_interface_type type);
 int pci_epf_bind(struct pci_epf *epf);
 void pci_epf_unbind(struct pci_epf *epf);
 int pci_epf_init_dma_chan(struct pci_epf *epf);
index d157983b84cf9258fa43b639accc8ee3904a080c..b2820a834a5eb2d5e0077fc66b0bd4ee8d7db5f7 100644 (file)
 #define PCI_DEVICE_ID_TI_X620          0xac8d
 #define PCI_DEVICE_ID_TI_X420          0xac8e
 #define PCI_DEVICE_ID_TI_XX20_FM       0xac8f
+#define PCI_DEVICE_ID_TI_J721E         0xb00d
 #define PCI_DEVICE_ID_TI_DRA74x                0xb500
 #define PCI_DEVICE_ID_TI_DRA72x                0xb501
 
index 42fc852bf51237f07a909d61dec66f792060fd14..b22bc81f3669ad0d56f21ff8280d4c143fc09766 100644 (file)
@@ -1030,6 +1030,11 @@ static inline int in_software_context(struct perf_event *event)
        return event->ctx->pmu->task_ctx_nr == perf_sw_context;
 }
 
+static inline int is_exclusive_pmu(struct pmu *pmu)
+{
+       return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
+}
+
 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
index 757a0f9e26f9a7b6c4b1da910fac2efcee9c658a..1f6d3b038ae0e07a4319f310338d934c7a657965 100644 (file)
@@ -28,6 +28,7 @@ struct omap_dm_timer_ops {
        int     (*free)(struct omap_dm_timer *timer);
 
        void    (*enable)(struct omap_dm_timer *timer);
+       int     (*is_enabled)(struct omap_dm_timer *timer);
        void    (*disable)(struct omap_dm_timer *timer);
 
        int     (*get_irq)(struct omap_dm_timer *timer);
index 6669dc9394daafd33547d2fcc4e4a750687630ac..1ce96b23c55d271292466f831649801fe24917d4 100644 (file)
@@ -17,6 +17,7 @@ struct iommu_platform_data {
        int (*assert_reset)(struct platform_device *pdev, const char *name);
        int (*deassert_reset)(struct platform_device *pdev, const char *name);
        int (*device_enable)(struct platform_device *pdev);
+       bool (*device_is_enabled)(struct platform_device *pdev);
        int (*device_idle)(struct platform_device *pdev);
        int (*set_pwrdm_constraint)(struct platform_device *pdev, bool request,
                                    u8 *pwrst);
index 55e3c007e264f2f3591283be422e6c54594e070a..e1b255ca350361e4d01d86b46e6067289b602f0e 100644 (file)
@@ -15,10 +15,12 @@ struct platform_device;
  * struct omap_rproc_pdata - omap remoteproc's platform data
  * @device_enable: omap-specific handler for enabling a device
  * @device_shutdown: omap-specific handler for shutting down a device
+ * @device_is_enabled: omap-specific handler to check if device is booted
  */
 struct omap_rproc_pdata {
        int (*device_enable)(struct platform_device *pdev);
        int (*device_shutdown)(struct platform_device *pdev);
+       bool (*device_is_enabled)(struct platform_device *pdev);
 };
 
 #endif /* _PLAT_REMOTEPROC_H */
index ba639a684e304cba70b148c56f890e9621d76f3d..3e38dddbb7c50b6ade237fd498985f23b6905ca2 100644 (file)
@@ -154,7 +154,7 @@ struct pruss;
 
 #if IS_ENABLED(CONFIG_TI_PRUSS)
 
-struct pruss *pruss_get(struct rproc *rproc);
+struct pruss *pruss_get(struct rproc *rproc, int *pruss_id);
 void pruss_put(struct pruss *pruss);
 int pruss_request_mem_region(struct pruss *pruss, enum pruss_mem mem_id,
                             struct pruss_mem_region *region);
@@ -167,7 +167,7 @@ int pruss_intc_trigger(unsigned int irq);
 
 #else
 
-static inline struct pruss *pruss_get(struct rproc *rproc)
+static inline struct pruss *pruss_get(struct rproc *rproc, int *pruss_id)
 {
        return ERR_PTR(-ENOTSUPP);
 }
index 60985d89395f704817bb7550e39b4601410f5694..14526b02b2e70a8eca5108baab37b79bc4ae9425 100644 (file)
@@ -39,6 +39,7 @@ struct pruss_intc_config {
 /**
  * struct pruss - PRUSS parent structure
  * @dev: pruss device pointer
+ * @id: pruss instance id
  * @cfg_base: base iomap for CFG region
  * @cfg: regmap for config region
  * @mem_regions: data for each of the PRUSS memory regions
@@ -47,6 +48,7 @@ struct pruss_intc_config {
  */
 struct pruss {
        struct device *dev;
+       u32 id;
        void __iomem *cfg_base;
        struct regmap *cfg;
        struct pruss_mem_region mem_regions[PRUSS_MEM_MAX];
index dc905a4ff8d712294b2211b76ff43aced5799758..185d948297011fe6bd540e4847da2ff5ffa63761 100644 (file)
@@ -22,7 +22,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
 /* i_mutex must being held */
 static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
 {
-       return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
+       return (ia->ia_valid & ATTR_SIZE) ||
                (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
                (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
 }
index e102c5bccbb9fd2f323bc7cc9c4c288d98845897..68cbe111420bc05b698eb192c5f6c5191325829e 100644 (file)
@@ -620,7 +620,7 @@ static inline void rcu_preempt_sleep_check(void) { }
  * read-side critical sections may be preempted and they may also block, but
  * only when acquiring spinlocks that are subject to priority inheritance.
  */
-static inline void rcu_read_lock(void)
+static __always_inline void rcu_read_lock(void)
 {
        __rcu_read_lock();
        __acquire(RCU);
index ddb311a6d197c018613cde5125776b8ec40f9f61..fa28f68fe3f498d9086b4cd70eda12d98f93e04f 100644 (file)
@@ -525,6 +525,7 @@ struct rproc_dump_segment {
  * @deny_sysfs_ops: flag to not permit sysfs operations on state and firmware
  * @skip_firmware_request: flag to skip requesting the firmware
  * @skip_load: flag to skip the loading of firmware segments
+ * @late_attach: flag indicating remote core has been externally pre-booted
  * @dump_segments: list of segments in the firmware
  */
 struct rproc {
@@ -563,6 +564,7 @@ struct rproc {
        unsigned int deny_sysfs_ops             : 1;
        unsigned int skip_firmware_request      : 1;
        unsigned int skip_load                  : 1;
+       unsigned int late_attach                : 1;
        struct list_head dump_segments;
 };
 
index cdaeb19e02f8b13f5327102ee69f2547379dcac5..075fa7411139f9fb18574d67dee9a87c5c7b58c6 100644 (file)
@@ -157,8 +157,8 @@ struct rpmsg_remotedev {
        enum rpmsg_remotedev_type type;
        union {
                struct {
-                       struct rpmsg_remotedev_display_ops *ops;
-                       struct rpmsg_remotedev_display_cb *cb_ops;
+                       const struct rpmsg_remotedev_display_ops *ops;
+                       const struct rpmsg_remotedev_display_cb *cb_ops;
                } display;
 
                struct {
index 5dc024e283979e79a76a383e61c084b5a27d1818..20f5ba262cc0d1c0a7b2c929151cf67e4e38183d 100644 (file)
@@ -1023,7 +1023,15 @@ struct task_struct {
        u64                             last_sum_exec_runtime;
        struct callback_head            numa_work;
 
-       struct numa_group               *numa_group;
+       /*
+        * This pointer is only modified for current in syscall and
+        * pagefault context (and for tasks being destroyed), so it can be read
+        * from any of the following contexts:
+        *  - RCU read-side critical section
+        *  - current->numa_group from everywhere
+        *  - task's runqueue locked, task not running
+        */
+       struct numa_group __rcu         *numa_group;
 
        /*
         * numa_faults is an array split into four regions:
index 0d10b7ce0da744608dd87ceaa472af63420641cc..e9d4e389aed937074bb0ff4a399528fc0a269ed4 100644 (file)
@@ -330,6 +330,8 @@ enum {
 
 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
 {
+       if (current->mm != mm)
+               return;
        if (likely(!(atomic_read(&mm->membarrier_state) &
                     MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
                return;
index e7dd04a84ba89d79507d461cee5b839269ff0b8a..3988762efe15c0e5a80602e2c9acb6a5820a740e 100644 (file)
@@ -19,7 +19,7 @@
 extern void task_numa_fault(int last_node, int node, int pages, int flags);
 extern pid_t task_numa_group_id(struct task_struct *p);
 extern void set_numabalancing_state(bool enabled);
-extern void task_numa_free(struct task_struct *p);
+extern void task_numa_free(struct task_struct *p, bool final);
 extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
                                        int src_nid, int dst_cpu);
 #else
@@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
 static inline void set_numabalancing_state(bool enabled)
 {
 }
-static inline void task_numa_free(struct task_struct *p)
+static inline void task_numa_free(struct task_struct *p, bool final)
 {
 }
 static inline bool should_numa_migrate_memory(struct task_struct *p,
index 44985c4a1e86214dca6579b42bda3c6d666bcdc9..bebcb4f196dcf6287d7139a3effbb1d4b38a6c8a 100644 (file)
@@ -119,6 +119,7 @@ struct watchdog_device {
 #define WDOG_STOP_ON_REBOOT    2       /* Should be stopped on reboot */
 #define WDOG_HW_RUNNING                3       /* True if HW watchdog running */
 #define WDOG_STOP_ON_UNREGISTER        4       /* Should be stopped on unregister */
+#define WDOG_RESET_KEEPALIVE   5       /* Reset keepalive timers at start */
        struct list_head deferred;
 };
 
index dc4b412e8fa1e2feb6676eb4eec9394241a2035c..59bf280e9715c901024349565e04551437239403 100644 (file)
@@ -333,67 +333,6 @@ void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
 u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
                           unsigned int *offset);
 
-/**
- * cec_set_edid_phys_addr() - find and set the physical address
- *
- * @edid:      pointer to the EDID data
- * @size:      size in bytes of the EDID data
- * @phys_addr: the new physical address
- *
- * This function finds the location of the physical address in the EDID
- * and fills in the given physical address and updates the checksum
- * at the end of the EDID block. It does nothing if the EDID doesn't
- * contain a physical address.
- */
-void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
-
-/**
- * cec_phys_addr_for_input() - calculate the PA for an input
- *
- * @phys_addr: the physical address of the parent
- * @input:     the number of the input port, must be between 1 and 15
- *
- * This function calculates a new physical address based on the input
- * port number. For example:
- *
- * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
- *
- * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
- *
- * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
- *
- * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
- *
- * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
- */
-u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
-
-/**
- * cec_phys_addr_validate() - validate a physical address from an EDID
- *
- * @phys_addr: the physical address to validate
- * @parent:    if not %NULL, then this is filled with the parents PA.
- * @port:      if not %NULL, then this is filled with the input port.
- *
- * This validates a physical address as read from an EDID. If the
- * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
- * then it will return -EINVAL.
- *
- * The parent PA is passed into %parent and the input port is passed into
- * %port. For example:
- *
- * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
- *
- * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
- *
- * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
- *
- * PA = f.f.f.f: has parent f.f.f.f and input port 0.
- *
- * Return: 0 if the PA is valid, -EINVAL if not.
- */
-int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
-
 #else
 
 static inline int cec_register_adapter(struct cec_adapter *adap,
@@ -428,25 +367,6 @@ static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
        return CEC_PHYS_ADDR_INVALID;
 }
 
-static inline void cec_set_edid_phys_addr(u8 *edid, unsigned int size,
-                                         u16 phys_addr)
-{
-}
-
-static inline u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
-{
-       return CEC_PHYS_ADDR_INVALID;
-}
-
-static inline int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
-{
-       if (parent)
-               *parent = phys_addr;
-       if (port)
-               *port = 0;
-       return 0;
-}
-
 #endif
 
 /**
index 17cb27df1b813412146c7503c3429de0224c99d2..4e7732d3908c45240c79a6ef4f19ebef4b486f12 100644 (file)
@@ -234,4 +234,10 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
                         const struct hdmi_vendor_infoframe *hdmi,
                         unsigned int height);
 
+u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
+                           unsigned int *offset);
+void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
+u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input);
+int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
+
 #endif
index 970303448c9029e2aaab37e3735bc83e0490de04..0c82d7ea6ee14cab4ec95c6c5b0bf04bb69f01bc 100644 (file)
@@ -15,6 +15,7 @@
 struct tcf_idrinfo {
        spinlock_t      lock;
        struct idr      action_idr;
+       struct net      *net;
 };
 
 struct tc_action_ops;
@@ -107,7 +108,7 @@ struct tc_action_net {
 };
 
 static inline
-int tc_action_net_init(struct tc_action_net *tn,
+int tc_action_net_init(struct net *net, struct tc_action_net *tn,
                       const struct tc_action_ops *ops)
 {
        int err = 0;
@@ -116,6 +117,7 @@ int tc_action_net_init(struct tc_action_net *tn,
        if (!tn->idrinfo)
                return -ENOMEM;
        tn->ops = ops;
+       tn->idrinfo->net = net;
        spin_lock_init(&tn->idrinfo->lock);
        idr_init(&tn->idrinfo->action_idr);
        return err;
index 67e0a990144a6e0572e442b6a637541b84f1625c..468deae5d603efaa20f72e7e01c174a8dd2d7d66 100644 (file)
@@ -6562,6 +6562,21 @@ int cfg80211_external_auth_request(struct net_device *netdev,
                                   struct cfg80211_external_auth_params *params,
                                   gfp_t gfp);
 
+/**
+ * cfg80211_iftype_allowed - check whether the interface can be allowed
+ * @wiphy: the wiphy
+ * @iftype: interface type
+ * @is_4addr: use_4addr flag, must be '0' when check_swif is '1'
+ * @check_swif: check iftype against software interfaces
+ *
+ * Check whether the interface is allowed to operate; additionally, this API
+ * can be used to check iftype against the software interfaces when
+ * check_swif is '1'.
+ */
+bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
+                            bool is_4addr, u8 check_swif);
+
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
index 6cf0870414c783720026a151ba94307a2bb4ad30..ffc8ee0ea5e5b19afdb507681c322056b176c563 100644 (file)
@@ -313,8 +313,9 @@ static inline bool dst_hold_safe(struct dst_entry *dst)
  * @skb: buffer
  *
  * If dst is not yet refcounted and not destroyed, grab a ref on it.
+ * Returns true if dst is refcounted.
  */
-static inline void skb_dst_force(struct sk_buff *skb)
+static inline bool skb_dst_force(struct sk_buff *skb)
 {
        if (skb_dst_is_noref(skb)) {
                struct dst_entry *dst = skb_dst(skb);
@@ -325,6 +326,8 @@ static inline void skb_dst_force(struct sk_buff *skb)
 
                skb->_skb_refdst = (unsigned long)dst;
        }
+
+       return skb->_skb_refdst != 0UL;
 }
 
 
index a0d2e0bb9a94b90da24b22a327253dc6e9c0e8ff..0e3c0d83bd9913a22ac0b0ea201d14c6af63ec02 100644 (file)
@@ -806,11 +806,12 @@ struct ipvs_master_sync_state {
        struct ip_vs_sync_buff  *sync_buff;
        unsigned long           sync_queue_len;
        unsigned int            sync_queue_delay;
-       struct task_struct      *master_thread;
        struct delayed_work     master_wakeup_work;
        struct netns_ipvs       *ipvs;
 };
 
+struct ip_vs_sync_thread_data;
+
 /* How much time to keep dests in trash */
 #define IP_VS_DEST_TRASH_PERIOD                (120 * HZ)
 
@@ -941,7 +942,8 @@ struct netns_ipvs {
        spinlock_t              sync_lock;
        struct ipvs_master_sync_state *ms;
        spinlock_t              sync_buff_lock;
-       struct task_struct      **backup_threads;
+       struct ip_vs_sync_thread_data *master_tinfo;
+       struct ip_vs_sync_thread_data *backup_tinfo;
        int                     threads_mask;
        volatile int            sync_state;
        struct mutex            sync_mutex;
index f2be5d041ba3ad53e6411fb4b0c42d4e757cf26e..7685cbda9f28b5b123336b089312eb638a68785f 100644 (file)
@@ -418,8 +418,7 @@ struct nft_set {
        unsigned char                   *udata;
        /* runtime data below here */
        const struct nft_set_ops        *ops ____cacheline_aligned;
-       u16                             flags:13,
-                                       bound:1,
+       u16                             flags:14,
                                        genmask:2;
        u8                              klen;
        u8                              dlen;
@@ -1337,12 +1336,15 @@ struct nft_trans_rule {
 struct nft_trans_set {
        struct nft_set                  *set;
        u32                             set_id;
+       bool                            bound;
 };
 
 #define nft_trans_set(trans)   \
        (((struct nft_trans_set *)trans->data)->set)
 #define nft_trans_set_id(trans)        \
        (((struct nft_trans_set *)trans->data)->set_id)
+#define nft_trans_set_bound(trans)     \
+       (((struct nft_trans_set *)trans->data)->bound)
 
 struct nft_trans_chain {
        bool                            update;
@@ -1373,12 +1375,15 @@ struct nft_trans_table {
 struct nft_trans_elem {
        struct nft_set                  *set;
        struct nft_set_elem             elem;
+       bool                            bound;
 };
 
 #define nft_trans_elem_set(trans)      \
        (((struct nft_trans_elem *)trans->data)->set)
 #define nft_trans_elem(trans)  \
        (((struct nft_trans_elem *)trans->data)->elem)
+#define nft_trans_elem_set_bound(trans)        \
+       (((struct nft_trans_elem *)trans->data)->bound)
 
 struct nft_trans_obj {
        struct nft_object               *obj;
index 9b80f814ab043b733ec3efbcb0c30f9e1f690815..94cb37a7bf7569c78bbad36f225fa1f90c782fd5 100644 (file)
@@ -12,6 +12,7 @@ struct psample_group {
        u32 group_num;
        u32 refcount;
        u32 seq;
+       struct rcu_head rcu;
 };
 
 struct psample_group *psample_group_get(struct net *net, u32 group_num);
index 8a5f70c7cdf24b08d863b79afe5bdc7710e6d88f..5e69fba181bc6b9d84130ea85c1787907ebd33b8 100644 (file)
@@ -21,7 +21,8 @@ struct sock_reuseport {
        unsigned int            synq_overflow_ts;
        /* ID stays the same even after the size of socks[] grows. */
        unsigned int            reuseport_id;
-       bool                    bind_inany;
+       unsigned int            bind_inany:1;
+       unsigned int            has_conns:1;
        struct bpf_prog __rcu   *prog;          /* optional BPF sock selector */
        struct sock             *socks[0];      /* array of sock pointers */
 };
@@ -35,6 +36,24 @@ extern struct sock *reuseport_select_sock(struct sock *sk,
                                          struct sk_buff *skb,
                                          int hdr_len);
 extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
+
+static inline bool reuseport_has_conns(struct sock *sk, bool set)
+{
+       struct sock_reuseport *reuse;
+       bool ret = false;
+
+       rcu_read_lock();
+       reuse = rcu_dereference(sk->sk_reuseport_cb);
+       if (reuse) {
+               if (set)
+                       reuse->has_conns = 1;
+               ret = reuse->has_conns;
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 int reuseport_get_id(struct sock_reuseport *reuse);
 
 #endif  /* _SOCK_REUSEPORT_H */
index e75661f92daaaa861a5c416d73674e38dccd1f9e..abcf53a6db045974a7a130b4e4e8035854709b2e 100644 (file)
@@ -1054,7 +1054,8 @@ void tcp_get_default_congestion_control(struct net *net, char *name);
 void tcp_get_available_congestion_control(char *buf, size_t len);
 void tcp_get_allowed_congestion_control(char *buf, size_t len);
 int tcp_set_allowed_congestion_control(char *allowed);
-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
+int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
+                              bool reinit, bool cap_net_admin);
 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
 
@@ -1646,6 +1647,11 @@ static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
        return skb_rb_first(&sk->tcp_rtx_queue);
 }
 
+static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
+{
+       return skb_rb_last(&sk->tcp_rtx_queue);
+}
+
 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
 {
        return skb_peek(&sk->sk_write_queue);
index 95411057589146c810f56b65c3c2d148c672055c..98f5ad0319a2b538d0a76cc90f9480e4ef2f700d 100644 (file)
@@ -234,6 +234,7 @@ struct tls_offload_context_rx {
        (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
         TLS_DRIVER_STATE_SIZE)
 
+void tls_ctx_free(struct tls_context *ctx);
 int wait_on_pending_writer(struct sock *sk, long *timeo);
 int tls_sk_query(struct sock *sk, int optname, char __user *optval,
                int __user *optlen);
index ec299fcf55f7aff42e5e0fcc330865a664ceb28e..b7d63c3970d1821e519100d28986dbe5d601d0cd 100644 (file)
@@ -71,6 +71,7 @@
 
 extern struct workqueue_struct *ib_wq;
 extern struct workqueue_struct *ib_comp_wq;
+extern struct workqueue_struct *ib_comp_unbound_wq;
 
 union ib_gid {
        u8      raw[16];
@@ -290,8 +291,8 @@ struct ib_rss_caps {
 };
 
 enum ib_tm_cap_flags {
-       /*  Support tag matching on RC transport */
-       IB_TM_CAP_RC                = 1 << 0,
+       /*  Support tag matching with rendezvous offload for RC transport */
+       IB_TM_CAP_RNDV_RC = 1 << 0,
 };
 
 struct ib_tm_caps {
@@ -1576,9 +1577,10 @@ struct ib_ah {
 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
 
 enum ib_poll_context {
-       IB_POLL_DIRECT,         /* caller context, no hw completions */
-       IB_POLL_SOFTIRQ,        /* poll from softirq context */
-       IB_POLL_WORKQUEUE,      /* poll from workqueue */
+       IB_POLL_DIRECT,            /* caller context, no hw completions */
+       IB_POLL_SOFTIRQ,           /* poll from softirq context */
+       IB_POLL_WORKQUEUE,         /* poll from workqueue */
+       IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
 };
 
 struct ib_cq {
@@ -1595,6 +1597,7 @@ struct ib_cq {
                struct irq_poll         iop;
                struct work_struct      work;
        };
+       struct workqueue_struct *comp_wq;
        /*
         * Implementation details of the RDMA core, don't use in drivers:
         */
index bb8092fa1e364114fc60458e86c7bc73f3beeb93..58507c7783cf01cf64ea2987af9472fd75e44428 100644 (file)
@@ -241,6 +241,7 @@ struct fcoe_fcf {
  * @vn_mac:    VN_Node assigned MAC address for data
  */
 struct fcoe_rport {
+       struct fc_rport_priv rdata;
        unsigned long time;
        u16 fcoe_len;
        u16 flags;
index e03bd9d41fa8fd693fa9bcf88620bc2db887980d..7b196d23462646364e01d896b2c43cae2d755b35 100644 (file)
@@ -6,8 +6,6 @@ struct scsi_cmnd;
 struct scsi_device;
 struct scsi_sense_hdr;
 
-#define SCSI_LOG_BUFSIZE 128
-
 extern void scsi_print_command(struct scsi_cmnd *);
 extern size_t __scsi_format_command(char *, size_t,
                                   const unsigned char *, size_t);
index 2b7e227960e1602123aecf88b11f9be00e975655..91f403341dd7a2cbafe2b8a6d3159687e538efcb 100644 (file)
@@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
 struct scsi_eh_save {
        /* saved state */
        int result;
+       unsigned int resid_len;
        int eh_eflags;
        enum dma_data_direction data_direction;
        unsigned underflow;
index e87f2d5b3cc656c852d0892ff37f3d723dd3b46d..127c2713b543a9652b25201e30426b4058ae25b4 100644 (file)
@@ -171,10 +171,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
        if (snd_BUG_ON(!stream))
                return;
 
-       if (stream->direction == SND_COMPRESS_PLAYBACK)
-               stream->runtime->state = SNDRV_PCM_STATE_SETUP;
-       else
-               stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+       stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 
        wake_up(&stream->runtime->sleep);
 }
index fdaaafdc7a0039a0c4a6e93b6d2b7ed88ea5b74a..5165e3b308990c56d29d524a09ab033a82a2fcb5 100644 (file)
@@ -353,6 +353,8 @@ struct device;
 #define SND_SOC_DAPM_WILL_PMD   0x80    /* called at start of sequence */
 #define SND_SOC_DAPM_PRE_POST_PMD \
                                (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
+#define SND_SOC_DAPM_PRE_POST_PMU \
+                               (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
 
 /* convenience event type detection */
 #define SND_SOC_DAPM_EVENT_ON(e)       \
index b401c4e3639459e6e4e876cd1fae17e42251fec5..eb3f668b8bce43da19ed345c36524b4fd920bc26 100644 (file)
@@ -1655,6 +1655,7 @@ TRACE_EVENT(qgroup_update_reserve,
                __entry->qgid           = qgroup->qgroupid;
                __entry->cur_reserved   = qgroup->rsv.values[type];
                __entry->diff           = diff;
+               __entry->type           = type;
        ),
 
        TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
@@ -1677,6 +1678,7 @@ TRACE_EVENT(qgroup_meta_reserve,
        TP_fast_assign_btrfs(root->fs_info,
                __entry->refroot        = root->objectid;
                __entry->diff           = diff;
+               __entry->type           = type;
        ),
 
        TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
@@ -1693,7 +1695,6 @@ TRACE_EVENT(qgroup_meta_convert,
        TP_STRUCT__entry_btrfs(
                __field(        u64,    refroot                 )
                __field(        s64,    diff                    )
-               __field(        int,    type                    )
        ),
 
        TP_fast_assign_btrfs(root->fs_info,
index 6d182746afab36a486183b9a378f51f114f344fc..0fe169c6afd84634f8242467b516e1db2fe60002 100644 (file)
@@ -500,10 +500,10 @@ rxrpc_tx_points;
 #define E_(a, b)       { a, b }
 
 TRACE_EVENT(rxrpc_local,
-           TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
+           TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
                     int usage, const void *where),
 
-           TP_ARGS(local, op, usage, where),
+           TP_ARGS(local_debug_id, op, usage, where),
 
            TP_STRUCT__entry(
                    __field(unsigned int,       local           )
@@ -513,7 +513,7 @@ TRACE_EVENT(rxrpc_local,
                             ),
 
            TP_fast_assign(
-                   __entry->local = local->debug_id;
+                   __entry->local = local_debug_id;
                    __entry->op = op;
                    __entry->usage = usage;
                    __entry->where = where;
@@ -1073,7 +1073,7 @@ TRACE_EVENT(rxrpc_recvmsg,
                             ),
 
            TP_fast_assign(
-                   __entry->call = call->debug_id;
+                   __entry->call = call ? call->debug_id : 0;
                    __entry->why = why;
                    __entry->seq = seq;
                    __entry->offset = offset;
@@ -1381,7 +1381,7 @@ TRACE_EVENT(rxrpc_rx_eproto,
                             ),
 
            TP_fast_assign(
-                   __entry->call = call->debug_id;
+                   __entry->call = call ? call->debug_id : 0;
                    __entry->serial = serial;
                    __entry->why = why;
                           ),
index 2932600ce271cfd1908ac123b435e419a3e04e4e..d143e277cdaf25739e69ef718b123f2a8bdd653a 100644 (file)
@@ -2486,6 +2486,7 @@ struct bpf_prog_info {
        char name[BPF_OBJ_NAME_LEN];
        __u32 ifindex;
        __u32 gpl_compatible:1;
+       __u32 :31; /* alignment pad */
        __u64 netns_dev;
        __u64 netns_ino;
        __u32 nr_jited_ksyms;
index aa6623efd2dd085e50b899285e1bc2c9a732a03e..d50d51a57fe4ec27ae11846548c7dd739014fa70 100644 (file)
@@ -7,19 +7,6 @@
 #define CODA_PSDEV_MAJOR 67
 #define MAX_CODADEVS  5           /* how many do we allow */
 
-
-/* messages between coda filesystem in kernel and Venus */
-struct upc_req {
-       struct list_head    uc_chain;
-       caddr_t             uc_data;
-       u_short             uc_flags;
-       u_short             uc_inSize;  /* Size is at most 5000 bytes */
-       u_short             uc_outSize;
-       u_short             uc_opcode;  /* copied from data to save lookup */
-       int                 uc_unique;
-       wait_queue_head_t   uc_sleep;   /* process' wait queue */
-};
-
 #define CODA_REQ_ASYNC  0x1
 #define CODA_REQ_READ   0x2
 #define CODA_REQ_WRITE  0x4
index 4941628a4fb9b919720d3fd9a949828f2c397fc0..5ec88e7548a9f55e19c1b90f990f2f617ca3c723 100644 (file)
@@ -16,6 +16,7 @@
 #define CAPI_MSG_BASELEN               8
 #define CAPI_DATA_B3_REQ_LEN           (CAPI_MSG_BASELEN+4+4+2+2+2)
 #define CAPI_DATA_B3_RESP_LEN          (CAPI_MSG_BASELEN+4+2)
+#define CAPI_DISCONNECT_B3_RESP_LEN    (CAPI_MSG_BASELEN+4)
 
 /*----- CAPI commands -----*/
 #define CAPI_ALERT                 0x01
index 7b8c9e19bad1c2bf72c21dcf6b358559d5e4b4ea..0f3cb13db8e93efe9d6319395f21849e896f7a02 100644 (file)
 
 /* keyctl structures */
 struct keyctl_dh_params {
-       __s32 private;
+       union {
+#ifndef __cplusplus
+               __s32 private;
+#endif
+               __s32 priv;
+       };
        __s32 prime;
        __s32 base;
 };
index 5c8a4d760ee3443c0292112f18852a807b037184..b5123ab8d54a8c7fd4e75d6ee00fad938c1ded81 100644 (file)
@@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
        struct nf_acct  *nfacct;
 };
 
+struct xt_nfacct_match_info_v1 {
+       char            name[NFACCT_NAME_MAX];
+       struct nf_acct  *nfacct __attribute__((aligned(8)));
+};
+
 #endif /* _XT_NFACCT_MATCH_H */
index a7e66ab11d1d656d8fb9f861f4b46949a7ac124b..c23f91ae5fe8b17c6176c91027b63840dd1c2a0b 100644 (file)
@@ -29,7 +29,7 @@
 
 #include <linux/types.h>
 #include <linux/magic.h>
-
+#include <asm/byteorder.h>
 
 #define NILFS_INODE_BMAP_SIZE  7
 
@@ -533,19 +533,19 @@ enum {
 static inline void                                                     \
 nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp)               \
 {                                                                      \
-       cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) |          \
-                                  (1UL << NILFS_CHECKPOINT_##flag));   \
+       cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) |      \
+                                    (1UL << NILFS_CHECKPOINT_##flag)); \
 }                                                                      \
 static inline void                                                     \
 nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp)             \
 {                                                                      \
-       cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) &          \
+       cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) &      \
                                   ~(1UL << NILFS_CHECKPOINT_##flag));  \
 }                                                                      \
 static inline int                                                      \
 nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp)             \
 {                                                                      \
-       return !!(le32_to_cpu(cp->cp_flags) &                           \
+       return !!(__le32_to_cpu(cp->cp_flags) &                         \
                  (1UL << NILFS_CHECKPOINT_##flag));                    \
 }
 
@@ -595,20 +595,20 @@ enum {
 static inline void                                                     \
 nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su)         \
 {                                                                      \
-       su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) |          \
+       su->su_flags = __cpu_to_le32(__le32_to_cpu(su->su_flags) |      \
                                   (1UL << NILFS_SEGMENT_USAGE_##flag));\
 }                                                                      \
 static inline void                                                     \
 nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su)       \
 {                                                                      \
        su->su_flags =                                                  \
-               cpu_to_le32(le32_to_cpu(su->su_flags) &                 \
+               __cpu_to_le32(__le32_to_cpu(su->su_flags) &             \
                            ~(1UL << NILFS_SEGMENT_USAGE_##flag));      \
 }                                                                      \
 static inline int                                                      \
 nilfs_segment_usage_##name(const struct nilfs_segment_usage *su)       \
 {                                                                      \
-       return !!(le32_to_cpu(su->su_flags) &                           \
+       return !!(__le32_to_cpu(su->su_flags) &                         \
                  (1UL << NILFS_SEGMENT_USAGE_##flag));                 \
 }
 
@@ -619,15 +619,15 @@ NILFS_SEGMENT_USAGE_FNS(ERROR, error)
 static inline void
 nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
 {
-       su->su_lastmod = cpu_to_le64(0);
-       su->su_nblocks = cpu_to_le32(0);
-       su->su_flags = cpu_to_le32(0);
+       su->su_lastmod = __cpu_to_le64(0);
+       su->su_nblocks = __cpu_to_le32(0);
+       su->su_flags = __cpu_to_le32(0);
 }
 
 static inline int
 nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
 {
-       return !le32_to_cpu(su->su_flags);
+       return !__le32_to_cpu(su->su_flags);
 }
 
 /**
index 7acc16f349427a772f507e6a25b66b5b95b210d7..fa43dd5a7b3dcc84d29ecf441e28986e2a6bcec4 100644 (file)
@@ -2732,7 +2732,7 @@ enum nl80211_attrs {
 #define NL80211_HT_CAPABILITY_LEN              26
 #define NL80211_VHT_CAPABILITY_LEN             12
 #define NL80211_HE_MIN_CAPABILITY_LEN           16
-#define NL80211_HE_MAX_CAPABILITY_LEN           51
+#define NL80211_HE_MAX_CAPABILITY_LEN           54
 #define NL80211_MAX_NR_CIPHER_SUITES           5
 #define NL80211_MAX_NR_AKM_SUITES              2
 
index c3e6bc643a7bde4bf770f1c191a472d84c4af945..1650d39decaec96869b194bfdf4a0a910316d6f4 100644 (file)
@@ -3,6 +3,7 @@
 #define _XEN_EVENTS_H
 
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #ifdef CONFIG_PCI_MSI
 #include <linux/msi.h>
 #endif
@@ -59,7 +60,7 @@ void evtchn_put(unsigned int evtchn);
 
 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
 void rebind_evtchn_irq(int evtchn, int irq);
-int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu);
+int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
 
 static inline void notify_remote_via_evtchn(int port)
 {
index cd5fb00fcb549645a646bef2c3d72132cb2550cd..dab8d63459f63958bc303aed7c398b61e418b68e 100644 (file)
@@ -524,7 +524,7 @@ static void __init free_initrd(void)
        unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
        unsigned long crashk_end   = (unsigned long)__va(crashk_res.end);
 #endif
-       if (do_retain_initrd)
+       if (do_retain_initrd || !initrd_start)
                goto skip;
 
 #ifdef CONFIG_KEXEC_CORE
index bce7af1546d9c5b07532388b72ab8e4e599f523f..de4070d5472f2d6ce2dd52b0f1a08fda5b89a255 100644 (file)
@@ -389,7 +389,6 @@ static void mqueue_evict_inode(struct inode *inode)
 {
        struct mqueue_inode_info *info;
        struct user_struct *user;
-       unsigned long mq_bytes, mq_treesize;
        struct ipc_namespace *ipc_ns;
        struct msg_msg *msg, *nmsg;
        LIST_HEAD(tmp_msg);
@@ -412,16 +411,18 @@ static void mqueue_evict_inode(struct inode *inode)
                free_msg(msg);
        }
 
-       /* Total amount of bytes accounted for the mqueue */
-       mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
-               min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
-               sizeof(struct posix_msg_tree_node);
-
-       mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
-                                 info->attr.mq_msgsize);
-
        user = info->user;
        if (user) {
+               unsigned long mq_bytes, mq_treesize;
+
+               /* Total amount of bytes accounted for the mqueue */
+               mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+                       min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+                       sizeof(struct posix_msg_tree_node);
+
+               mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+                                         info->attr.mq_msgsize);
+
                spin_lock(&mq_lock);
                user->mq_bytes -= mq_bytes;
                /*
index 0488b8258321257aac395f9ece7bef5e04004918..ffc39a7e028d7e62705e5beb8e966201bc869014 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-y := core.o
+CFLAGS_core.o += $(call cc-disable-warning, override-init)
 
 obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o
 obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
index 118e3a8fc764692d5093371b1bba71817fbb86e0..6e544e364821e92846df0ac7f175363b4af51a5a 100644 (file)
@@ -1454,19 +1454,25 @@ static int bpf_prog_load(union bpf_attr *attr)
        if (err)
                goto free_used_maps;
 
+       /* Upon success of bpf_prog_alloc_id(), the BPF prog is
+        * effectively publicly exposed. However, retrieving via
+        * bpf_prog_get_fd_by_id() will take another reference,
+        * therefore it cannot be gone underneath us.
+        *
+        * Only for the time /after/ successful bpf_prog_new_fd()
+        * and before returning to userspace, we might just hold
+        * one reference and any parallel close on that fd could
+        * rip everything out. Hence, below notifications must
+        * happen before bpf_prog_new_fd().
+        *
+        * Also, any failure handling from this point onwards must
+        * be using bpf_prog_put() given the program is exposed.
+        */
+       bpf_prog_kallsyms_add(prog);
+
        err = bpf_prog_new_fd(prog);
-       if (err < 0) {
-               /* failed to allocate fd.
-                * bpf_prog_put() is needed because the above
-                * bpf_prog_alloc_id() has published the prog
-                * to the userspace and the userspace may
-                * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
-                */
+       if (err < 0)
                bpf_prog_put(prog);
-               return err;
-       }
-
-       bpf_prog_kallsyms_add(prog);
        return err;
 
 free_used_maps:
index 81441117f6114ecc9b89823cb032cdfd9a570eed..78ef274b036ed13524019ab336cbded16858864e 100644 (file)
@@ -212,7 +212,8 @@ static struct cftype cgroup_base_files[];
 
 static int cgroup_apply_control(struct cgroup *cgrp);
 static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
-static void css_task_iter_advance(struct css_task_iter *it);
+static void css_task_iter_skip(struct css_task_iter *it,
+                              struct task_struct *task);
 static int cgroup_destroy_locked(struct cgroup *cgrp);
 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
                                              struct cgroup_subsys *ss);
@@ -672,6 +673,7 @@ struct css_set init_css_set = {
        .dom_cset               = &init_css_set,
        .tasks                  = LIST_HEAD_INIT(init_css_set.tasks),
        .mg_tasks               = LIST_HEAD_INIT(init_css_set.mg_tasks),
+       .dying_tasks            = LIST_HEAD_INIT(init_css_set.dying_tasks),
        .task_iters             = LIST_HEAD_INIT(init_css_set.task_iters),
        .threaded_csets         = LIST_HEAD_INIT(init_css_set.threaded_csets),
        .cgrp_links             = LIST_HEAD_INIT(init_css_set.cgrp_links),
@@ -775,6 +777,21 @@ static void css_set_update_populated(struct css_set *cset, bool populated)
                cgroup_update_populated(link->cgrp, populated);
 }
 
+/*
+ * @task is leaving, advance task iterators which are pointing to it so
+ * that they can resume at the next position.  Advancing an iterator might
+ * remove it from the list, use safe walk.  See css_task_iter_skip() for
+ * details.
+ */
+static void css_set_skip_task_iters(struct css_set *cset,
+                                   struct task_struct *task)
+{
+       struct css_task_iter *it, *pos;
+
+       list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
+               css_task_iter_skip(it, task);
+}
+
 /**
  * css_set_move_task - move a task from one css_set to another
  * @task: task being moved
@@ -800,22 +817,9 @@ static void css_set_move_task(struct task_struct *task,
                css_set_update_populated(to_cset, true);
 
        if (from_cset) {
-               struct css_task_iter *it, *pos;
-
                WARN_ON_ONCE(list_empty(&task->cg_list));
 
-               /*
-                * @task is leaving, advance task iterators which are
-                * pointing to it so that they can resume at the next
-                * position.  Advancing an iterator might remove it from
-                * the list, use safe walk.  See css_task_iter_advance*()
-                * for details.
-                */
-               list_for_each_entry_safe(it, pos, &from_cset->task_iters,
-                                        iters_node)
-                       if (it->task_pos == &task->cg_list)
-                               css_task_iter_advance(it);
-
+               css_set_skip_task_iters(from_cset, task);
                list_del_init(&task->cg_list);
                if (!css_set_populated(from_cset))
                        css_set_update_populated(from_cset, false);
@@ -1142,6 +1146,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
        cset->dom_cset = cset;
        INIT_LIST_HEAD(&cset->tasks);
        INIT_LIST_HEAD(&cset->mg_tasks);
+       INIT_LIST_HEAD(&cset->dying_tasks);
        INIT_LIST_HEAD(&cset->task_iters);
        INIT_LIST_HEAD(&cset->threaded_csets);
        INIT_HLIST_NODE(&cset->hlist);
@@ -4149,15 +4154,18 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
                        it->task_pos = NULL;
                        return;
                }
-       } while (!css_set_populated(cset));
+       } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
 
        if (!list_empty(&cset->tasks))
                it->task_pos = cset->tasks.next;
-       else
+       else if (!list_empty(&cset->mg_tasks))
                it->task_pos = cset->mg_tasks.next;
+       else
+               it->task_pos = cset->dying_tasks.next;
 
        it->tasks_head = &cset->tasks;
        it->mg_tasks_head = &cset->mg_tasks;
+       it->dying_tasks_head = &cset->dying_tasks;
 
        /*
         * We don't keep css_sets locked across iteration steps and thus
@@ -4183,9 +4191,20 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
        list_add(&it->iters_node, &cset->task_iters);
 }
 
+static void css_task_iter_skip(struct css_task_iter *it,
+                              struct task_struct *task)
+{
+       lockdep_assert_held(&css_set_lock);
+
+       if (it->task_pos == &task->cg_list) {
+               it->task_pos = it->task_pos->next;
+               it->flags |= CSS_TASK_ITER_SKIPPED;
+       }
+}
+
 static void css_task_iter_advance(struct css_task_iter *it)
 {
-       struct list_head *next;
+       struct task_struct *task;
 
        lockdep_assert_held(&css_set_lock);
 repeat:
@@ -4195,25 +4214,40 @@ repeat:
                 * consumed first and then ->mg_tasks.  After ->mg_tasks,
                 * we move onto the next cset.
                 */
-               next = it->task_pos->next;
-
-               if (next == it->tasks_head)
-                       next = it->mg_tasks_head->next;
+               if (it->flags & CSS_TASK_ITER_SKIPPED)
+                       it->flags &= ~CSS_TASK_ITER_SKIPPED;
+               else
+                       it->task_pos = it->task_pos->next;
 
-               if (next == it->mg_tasks_head)
+               if (it->task_pos == it->tasks_head)
+                       it->task_pos = it->mg_tasks_head->next;
+               if (it->task_pos == it->mg_tasks_head)
+                       it->task_pos = it->dying_tasks_head->next;
+               if (it->task_pos == it->dying_tasks_head)
                        css_task_iter_advance_css_set(it);
-               else
-                       it->task_pos = next;
        } else {
                /* called from start, proceed to the first cset */
                css_task_iter_advance_css_set(it);
        }
 
-       /* if PROCS, skip over tasks which aren't group leaders */
-       if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
-           !thread_group_leader(list_entry(it->task_pos, struct task_struct,
-                                           cg_list)))
-               goto repeat;
+       if (!it->task_pos)
+               return;
+
+       task = list_entry(it->task_pos, struct task_struct, cg_list);
+
+       if (it->flags & CSS_TASK_ITER_PROCS) {
+               /* if PROCS, skip over tasks which aren't group leaders */
+               if (!thread_group_leader(task))
+                       goto repeat;
+
+               /* and dying leaders w/o live member threads */
+               if (!atomic_read(&task->signal->live))
+                       goto repeat;
+       } else {
+               /* skip all dying ones */
+               if (task->flags & PF_EXITING)
+                       goto repeat;
+       }
 }
 
 /**
@@ -4269,6 +4303,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
 
        spin_lock_irq(&css_set_lock);
 
+       /* @it may be half-advanced by skips, finish advancing */
+       if (it->flags & CSS_TASK_ITER_SKIPPED)
+               css_task_iter_advance(it);
+
        if (it->task_pos) {
                it->cur_task = list_entry(it->task_pos, struct task_struct,
                                          cg_list);
@@ -5670,6 +5708,7 @@ void cgroup_exit(struct task_struct *tsk)
        if (!list_empty(&tsk->cg_list)) {
                spin_lock_irq(&css_set_lock);
                css_set_move_task(tsk, cset, NULL, false);
+               list_add_tail(&tsk->cg_list, &cset->dying_tasks);
                cset->nr_tasks--;
                spin_unlock_irq(&css_set_lock);
        } else {
@@ -5690,6 +5729,13 @@ void cgroup_release(struct task_struct *task)
        do_each_subsys_mask(ss, ssid, have_release_callback) {
                ss->release(task);
        } while_each_subsys_mask();
+
+       if (use_task_css_set_links) {
+               spin_lock_irq(&css_set_lock);
+               css_set_skip_task_iters(task_css_set(task), task);
+               list_del_init(&task->cg_list);
+               spin_unlock_irq(&css_set_lock);
+       }
 }
 
 void cgroup_free(struct task_struct *task)
index 46aefe5c0e35ef2869c3e324b711ed54b3f5b1c1..d9f855cb9f6f97e6ffeb061135649b662b2a3f34 100644 (file)
@@ -1925,6 +1925,9 @@ static ssize_t write_cpuhp_fail(struct device *dev,
        if (ret)
                return ret;
 
+       if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
+               return -EINVAL;
+
        /*
         * Cannot fail STARTING/DYING callbacks.
         */
index efd04b2ec84c2c4675940a688cf7c084541efa9f..5ab1f7ec946e13a5848ced77d0ecec559ea4d53e 100644 (file)
@@ -147,7 +147,10 @@ void __put_cred(struct cred *cred)
        BUG_ON(cred == current->cred);
        BUG_ON(cred == current->real_cred);
 
-       call_rcu(&cred->rcu, put_cred_rcu);
+       if (cred->non_rcu)
+               put_cred_rcu(&cred->rcu);
+       else
+               call_rcu(&cred->rcu, put_cred_rcu);
 }
 EXPORT_SYMBOL(__put_cred);
 
@@ -258,6 +261,7 @@ struct cred *prepare_creds(void)
        old = task->cred;
        memcpy(new, old, sizeof(struct cred));
 
+       new->non_rcu = 0;
        atomic_set(&new->usage, 1);
        set_cred_subscribers(new, 0);
        get_group_info(new->group_info);
@@ -537,7 +541,19 @@ const struct cred *override_creds(const struct cred *new)
 
        validate_creds(old);
        validate_creds(new);
-       get_cred(new);
+
+       /*
+        * NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
+        *
+        * That means that we do not clear the 'non_rcu' flag, since
+        * we are only installing the cred into the thread-synchronous
+        * '->cred' pointer, not the '->real_cred' pointer that is
+        * visible to other threads under RCU.
+        *
+        * Also note that we did validate_creds() manually, not depending
+        * on the validation in 'get_cred()'.
+        */
+       get_new_cred((struct cred *)new);
        alter_cred_subscribers(new, 1);
        rcu_assign_pointer(current->cred, new);
        alter_cred_subscribers(old, -1);
@@ -620,6 +636,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
        validate_creds(old);
 
        *new = *old;
+       new->non_rcu = 0;
        atomic_set(&new->usage, 1);
        set_cred_subscribers(new, 0);
        get_uid(new->user);
index 597d40893862696ed76457c7071c8d5fd074f612..39128ff5821e950e2ed101b456ff247c49ce6f5d 100644 (file)
@@ -161,7 +161,7 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
 
 static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
-               ssize_t size, dma_addr_t *dma_handle)
+               ssize_t size, dma_addr_t *dma_handle, bool zero)
 {
        int order = get_order(size);
        unsigned long flags;
@@ -183,7 +183,8 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
        *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
        ret = mem->virt_base + (pageno << PAGE_SHIFT);
        spin_unlock_irqrestore(&mem->spinlock, flags);
-       memset(ret, 0, size);
+       if (zero)
+               memset(ret, 0, size);
        return ret;
 err:
        spin_unlock_irqrestore(&mem->spinlock, flags);
@@ -205,14 +206,14 @@ err:
  * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
  */
 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
-               dma_addr_t *dma_handle, void **ret)
+               dma_addr_t *dma_handle, void **ret, bool zero)
 {
        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 
        if (!mem)
                return 0;
 
-       *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
+       *ret = __dma_alloc_from_coherent(mem, size, dma_handle, zero);
        if (*ret)
                return 1;
 
@@ -231,7 +232,7 @@ void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
                return NULL;
 
        return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
-                       dma_handle);
+                       dma_handle, true);
 }
 
 static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
index fc482c8e0bd880711ef61d654dbd0bfaa6dc784c..57fb4dcff4349546109fbff0953e2b6f3f90a3aa 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/binfmts.h>
+#include <linux/elfcore.h>
 
 Elf_Half __weak elf_core_extra_phdrs(void)
 {
index 171b83ebed4a371a6878f6e54b86e831016b5087..625ba462e5bbd0657420f7be5eb5f352414f2ab0 100644 (file)
@@ -2541,6 +2541,9 @@ unlock:
        return ret;
 }
 
+static bool exclusive_event_installable(struct perf_event *event,
+                                       struct perf_event_context *ctx);
+
 /*
  * Attach a performance event to a context.
  *
@@ -2555,6 +2558,8 @@ perf_install_in_context(struct perf_event_context *ctx,
 
        lockdep_assert_held(&ctx->mutex);
 
+       WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
+
        if (event->cpu != -1)
                event->cpu = cpu;
 
@@ -4341,7 +4346,7 @@ static int exclusive_event_init(struct perf_event *event)
 {
        struct pmu *pmu = event->pmu;
 
-       if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
+       if (!is_exclusive_pmu(pmu))
                return 0;
 
        /*
@@ -4372,7 +4377,7 @@ static void exclusive_event_destroy(struct perf_event *event)
 {
        struct pmu *pmu = event->pmu;
 
-       if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
+       if (!is_exclusive_pmu(pmu))
                return;
 
        /* see comment in exclusive_event_init() */
@@ -4392,14 +4397,15 @@ static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
        return false;
 }
 
-/* Called under the same ctx::mutex as perf_install_in_context() */
 static bool exclusive_event_installable(struct perf_event *event,
                                        struct perf_event_context *ctx)
 {
        struct perf_event *iter_event;
        struct pmu *pmu = event->pmu;
 
-       if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
+       lockdep_assert_held(&ctx->mutex);
+
+       if (!is_exclusive_pmu(pmu))
                return true;
 
        list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
@@ -4446,12 +4452,20 @@ static void _free_event(struct perf_event *event)
        if (event->destroy)
                event->destroy(event);
 
-       if (event->ctx)
-               put_ctx(event->ctx);
-
+       /*
+        * Must be after ->destroy(), due to uprobe_perf_close() using
+        * hw.target.
+        */
        if (event->hw.target)
                put_task_struct(event->hw.target);
 
+       /*
+        * perf_event_free_task() relies on put_ctx() being 'last', in particular
+        * all task references must be cleaned up.
+        */
+       if (event->ctx)
+               put_ctx(event->ctx);
+
        exclusive_event_destroy(event);
        module_put(event->pmu->module);
 
@@ -4631,8 +4645,17 @@ again:
        mutex_unlock(&event->child_mutex);
 
        list_for_each_entry_safe(child, tmp, &free_list, child_list) {
+               void *var = &child->ctx->refcount;
+
                list_del(&child->child_list);
                free_event(child);
+
+               /*
+                * Wake any perf_event_free_task() waiting for this event to be
+                * freed.
+                */
+               smp_mb(); /* pairs with wait_var_event() */
+               wake_up_var(var);
        }
 
 no_ctx:
@@ -5906,7 +5929,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user,
        if (user_mode(regs)) {
                regs_user->abi = perf_reg_abi(current);
                regs_user->regs = regs;
-       } else if (current->mm) {
+       } else if (!(current->flags & PF_KTHREAD)) {
                perf_get_regs_user(regs_user, regs, regs_user_copy);
        } else {
                regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
@@ -6790,7 +6813,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
 static int __perf_pmu_output_stop(void *info)
 {
        struct perf_event *event = info;
-       struct pmu *pmu = event->pmu;
+       struct pmu *pmu = event->ctx->pmu;
        struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
        struct remote_output ro = {
                .rb     = event->rb,
@@ -10613,11 +10636,6 @@ SYSCALL_DEFINE5(perf_event_open,
                goto err_alloc;
        }
 
-       if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
-               err = -EBUSY;
-               goto err_context;
-       }
-
        /*
         * Look up the group leader (we will attach this event to it):
         */
@@ -10705,6 +10723,18 @@ SYSCALL_DEFINE5(perf_event_open,
                                move_group = 0;
                        }
                }
+
+               /*
+                * Failure to create exclusive events returns -EBUSY.
+                */
+               err = -EBUSY;
+               if (!exclusive_event_installable(group_leader, ctx))
+                       goto err_locked;
+
+               for_each_sibling_event(sibling, group_leader) {
+                       if (!exclusive_event_installable(sibling, ctx))
+                               goto err_locked;
+               }
        } else {
                mutex_lock(&ctx->mutex);
        }
@@ -10741,9 +10771,6 @@ SYSCALL_DEFINE5(perf_event_open,
         * because we need to serialize with concurrent event creation.
         */
        if (!exclusive_event_installable(event, ctx)) {
-               /* exclusive and group stuff are assumed mutually exclusive */
-               WARN_ON_ONCE(move_group);
-
                err = -EBUSY;
                goto err_locked;
        }
@@ -10930,7 +10957,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
                goto err_unlock;
        }
 
-       perf_install_in_context(ctx, event, cpu);
+       perf_install_in_context(ctx, event, event->cpu);
        perf_unpin_context(ctx);
        mutex_unlock(&ctx->mutex);
 
@@ -11210,11 +11237,11 @@ static void perf_free_event(struct perf_event *event,
 }
 
 /*
- * Free an unexposed, unused context as created by inheritance by
- * perf_event_init_task below, used by fork() in case of fail.
+ * Free a context as created by inheritance by perf_event_init_task() below,
+ * used by fork() in case of fail.
  *
- * Not all locks are strictly required, but take them anyway to be nice and
- * help out with the lockdep assertions.
+ * Even though the task has never lived, the context and events have been
+ * exposed through the child_list, so we must take care tearing it all down.
  */
 void perf_event_free_task(struct task_struct *task)
 {
@@ -11244,7 +11271,23 @@ void perf_event_free_task(struct task_struct *task)
                        perf_free_event(event, ctx);
 
                mutex_unlock(&ctx->mutex);
-               put_ctx(ctx);
+
+               /*
+                * perf_event_release_kernel() could've stolen some of our
+                * child events and still have them on its free_list. In that
+                * case we must wait for these events to have been freed (in
+                * particular all their references to this task must've been
+                * dropped).
+                *
+                * Without this copy_process() will unconditionally free this
+                * task (irrespective of its reference count) and
+                * _free_event()'s put_task_struct(event->hw.target) will be a
+                * use-after-free.
+                *
+                * Wait for all events to drop their context reference.
+                */
+               wait_var_event(&ctx->refcount, atomic_read(&ctx->refcount) == 1);
+               put_ctx(ctx); /* must be last */
        }
 }
 
index d6b56180827c73f29fa21eef238c73c1a01540ad..bf3f2d325faaf0b936f131e46f72ca9a63c39bba 100644 (file)
@@ -426,7 +426,7 @@ static int hw_breakpoint_parse(struct perf_event *bp,
 
 int register_perf_hw_breakpoint(struct perf_event *bp)
 {
-       struct arch_hw_breakpoint hw;
+       struct arch_hw_breakpoint hw = { };
        int err;
 
        err = reserve_bp_slot(bp);
@@ -474,7 +474,7 @@ int
 modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
                                bool check)
 {
-       struct arch_hw_breakpoint hw;
+       struct arch_hw_breakpoint hw = { };
        int err;
 
        err = hw_breakpoint_parse(bp, attr, &hw);
index 5c0964dc805ac7bf7683fc128ec002dc82ef77c4..e10de9836dd77ec9a2a08c2991f79f6748a110ea 100644 (file)
@@ -194,6 +194,7 @@ repeat:
        rcu_read_unlock();
 
        proc_flush_task(p);
+       cgroup_release(p);
 
        write_lock_irq(&tasklist_lock);
        ptrace_release_task(p);
@@ -219,7 +220,6 @@ repeat:
        }
 
        write_unlock_irq(&tasklist_lock);
-       cgroup_release(p);
        release_thread(p);
        call_rcu(&p->rcu, delayed_put_task_struct);
 
index 69874db3fba8319289a6a5725d81a30fa00d635a..aef1430bdce0b0b4eb78f4939637441baacab5fa 100644 (file)
@@ -679,7 +679,7 @@ void __put_task_struct(struct task_struct *tsk)
        WARN_ON(tsk == current);
 
        cgroup_free(tsk);
-       task_numa_free(tsk);
+       task_numa_free(tsk, true);
        security_task_free(tsk);
        exit_creds(tsk);
        delayacct_tsk_free(tsk);
@@ -2623,7 +2623,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
        struct ctl_table t;
        int ret;
        int threads = max_threads;
-       int min = MIN_THREADS;
+       int min = 1;
        int max = MAX_THREADS;
 
        t = *table;
@@ -2635,7 +2635,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
        if (ret || !write)
                return ret;
 
-       set_max_threads(threads);
+       max_threads = threads;
 
        return 0;
 }
index 16cbf6beb276844a532f9fc886cf6dd0f5c8f91e..ae60cae24e9aa3db1a6ac950c474e83854f73b37 100644 (file)
@@ -90,7 +90,7 @@ unsigned long probe_irq_on(void)
                        /* It triggered already - consider it spurious. */
                        if (!(desc->istate & IRQS_WAITING)) {
                                desc->istate &= ~IRQS_AUTODETECT;
-                               irq_shutdown(desc);
+                               irq_shutdown_and_deactivate(desc);
                        } else
                                if (i < 32)
                                        mask |= 1 << i;
@@ -127,7 +127,7 @@ unsigned int probe_irq_mask(unsigned long val)
                                mask |= 1 << i;
 
                        desc->istate &= ~IRQS_AUTODETECT;
-                       irq_shutdown(desc);
+                       irq_shutdown_and_deactivate(desc);
                }
                raw_spin_unlock_irq(&desc->lock);
        }
@@ -169,7 +169,7 @@ int probe_irq_off(unsigned long val)
                                nr_of_irqs++;
                        }
                        desc->istate &= ~IRQS_AUTODETECT;
-                       irq_shutdown(desc);
+                       irq_shutdown_and_deactivate(desc);
                }
                raw_spin_unlock_irq(&desc->lock);
        }
index 379e89c706c9afbae8ee220051bed22092b04d48..09d914e486a2d32d84b0e002560b834da64427fa 100644 (file)
@@ -314,6 +314,12 @@ void irq_shutdown(struct irq_desc *desc)
                }
                irq_state_clr_started(desc);
        }
+}
+
+
+void irq_shutdown_and_deactivate(struct irq_desc *desc)
+{
+       irq_shutdown(desc);
        /*
         * This must be called even if the interrupt was never started up,
         * because the activation can happen before the interrupt is
index 5b1072e394b26d069bcbd44c55b68dc642bd30c9..6c7ca2e983a595ff561396e3d3b9400072426c56 100644 (file)
@@ -116,7 +116,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
                 */
                if (irqd_affinity_is_managed(d)) {
                        irqd_set_managed_shutdown(d);
-                       irq_shutdown(desc);
+                       irq_shutdown_and_deactivate(desc);
                        return false;
                }
                affinity = cpu_online_mask;
index e74e7eea76cf1996e11bfd31bf71dc59903dce7c..ea57f3d397fe35fcaacb227b0be9387f062c8c1f 100644 (file)
@@ -80,6 +80,7 @@ extern int irq_activate_and_startup(struct irq_desc *desc, bool resend);
 extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
 
 extern void irq_shutdown(struct irq_desc *desc);
+extern void irq_shutdown_and_deactivate(struct irq_desc *desc);
 extern void irq_enable(struct irq_desc *desc);
 extern void irq_disable(struct irq_desc *desc);
 extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
@@ -94,6 +95,10 @@ static inline void irq_mark_irq(unsigned int irq) { }
 extern void irq_mark_irq(unsigned int irq);
 #endif
 
+extern int __irq_get_irqchip_state(struct irq_data *data,
+                                  enum irqchip_irq_state which,
+                                  bool *state);
+
 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
 
 irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags);
index 8e009cee651742946174c1872f83b60b4dba21f8..26814a14013cbf5826c5a94b981a9d0202f8d92b 100644 (file)
@@ -294,6 +294,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
        }
 }
 
+static void irq_sysfs_del(struct irq_desc *desc)
+{
+       /*
+        * If irq_sysfs_init() has not yet been invoked (early boot), then
+        * irq_kobj_base is NULL and the descriptor was never added.
+        * kobject_del() complains about a object with no parent, so make
+        * it conditional.
+        */
+       if (irq_kobj_base)
+               kobject_del(&desc->kobj);
+}
+
 static int __init irq_sysfs_init(void)
 {
        struct irq_desc *desc;
@@ -324,6 +336,7 @@ static struct kobj_type irq_kobj_type = {
 };
 
 static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
+static void irq_sysfs_del(struct irq_desc *desc) {}
 
 #endif /* CONFIG_SYSFS */
 
@@ -437,7 +450,7 @@ static void free_desc(unsigned int irq)
         * The sysfs entry must be serialized against a concurrent
         * irq_sysfs_init() as well.
         */
-       kobject_del(&desc->kobj);
+       irq_sysfs_del(desc);
        delete_irq_desc(irq);
 
        /*
index cd4f9f3e8345ccb6025940da949405ced55437d6..23bcfa71077fe30f9733a2a3134295cd20d61fec 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/sched/rt.h>
@@ -34,8 +35,9 @@ static int __init setup_forced_irqthreads(char *arg)
 early_param("threadirqs", setup_forced_irqthreads);
 #endif
 
-static void __synchronize_hardirq(struct irq_desc *desc)
+static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
 {
+       struct irq_data *irqd = irq_desc_get_irq_data(desc);
        bool inprogress;
 
        do {
@@ -51,6 +53,20 @@ static void __synchronize_hardirq(struct irq_desc *desc)
                /* Ok, that indicated we're done: double-check carefully. */
                raw_spin_lock_irqsave(&desc->lock, flags);
                inprogress = irqd_irq_inprogress(&desc->irq_data);
+
+               /*
+                * If requested and supported, check at the chip whether it
+                * is in flight at the hardware level, i.e. already pending
+                * in a CPU and waiting for service and acknowledge.
+                */
+               if (!inprogress && sync_chip) {
+                       /*
+                        * Ignore the return code. inprogress is only updated
+                        * when the chip supports it.
+                        */
+                       __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
+                                               &inprogress);
+               }
                raw_spin_unlock_irqrestore(&desc->lock, flags);
 
                /* Oops, that failed? */
@@ -73,13 +89,18 @@ static void __synchronize_hardirq(struct irq_desc *desc)
  *     Returns: false if a threaded handler is active.
  *
  *     This function may be called - with care - from IRQ context.
+ *
+ *     It does not check whether there is an interrupt in flight at the
+ *     hardware level, but not serviced yet, as this might deadlock when
+ *     called with interrupts disabled and the target CPU of the interrupt
+ *     is the current CPU.
  */
 bool synchronize_hardirq(unsigned int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
 
        if (desc) {
-               __synchronize_hardirq(desc);
+               __synchronize_hardirq(desc, false);
                return !atomic_read(&desc->threads_active);
        }
 
@@ -95,14 +116,19 @@ EXPORT_SYMBOL(synchronize_hardirq);
  *     to complete before returning. If you use this function while
  *     holding a resource the IRQ handler may need you will deadlock.
  *
- *     This function may be called - with care - from IRQ context.
+ *     Can only be called from preemptible code as it might sleep when
+ *     an interrupt thread is associated to @irq.
+ *
+ *     It optionally makes sure (when the irq chip supports that method)
+ *     that the interrupt is not pending in any CPU and waiting for
+ *     service.
  */
 void synchronize_irq(unsigned int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
 
        if (desc) {
-               __synchronize_hardirq(desc);
+               __synchronize_hardirq(desc, true);
                /*
                 * We made sure that no hardirq handler is
                 * running. Now verify that no threaded handlers are
@@ -1619,6 +1645,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
        /* If this was the last handler, shut down the IRQ line: */
        if (!desc->action) {
                irq_settings_clr_disable_unlazy(desc);
+               /* Only shutdown. Deactivate after synchronize_hardirq() */
                irq_shutdown(desc);
        }
 
@@ -1647,8 +1674,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
 
        unregister_handler_proc(irq, action);
 
-       /* Make sure it's not being used on another CPU: */
-       synchronize_hardirq(irq);
+       /*
+        * Make sure it's not being used on another CPU and if the chip
+        * supports it also make sure that there is no (not yet serviced)
+        * interrupt in flight at the hardware level.
+        */
+       __synchronize_hardirq(desc, true);
 
 #ifdef CONFIG_DEBUG_SHIRQ
        /*
@@ -1688,6 +1719,14 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
                 * require it to deallocate resources over the slow bus.
                 */
                chip_bus_lock(desc);
+               /*
+                * There is no interrupt on the fly anymore. Deactivate it
+                * completely.
+                */
+               raw_spin_lock_irqsave(&desc->lock, flags);
+               irq_domain_deactivate_irq(&desc->irq_data);
+               raw_spin_unlock_irqrestore(&desc->lock, flags);
+
                irq_release_resources(desc);
                chip_bus_sync_unlock(desc);
                irq_remove_timings(desc);
@@ -2173,6 +2212,28 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
 }
 EXPORT_SYMBOL_GPL(__request_percpu_irq);
 
+int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
+                           bool *state)
+{
+       struct irq_chip *chip;
+       int err = -EINVAL;
+
+       do {
+               chip = irq_data_get_irq_chip(data);
+               if (chip->irq_get_irqchip_state)
+                       break;
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+               data = data->parent_data;
+#else
+               data = NULL;
+#endif
+       } while (data);
+
+       if (data)
+               err = chip->irq_get_irqchip_state(data, which, state);
+       return err;
+}
+
 /**
  *     irq_get_irqchip_state - returns the irqchip state of a interrupt.
  *     @irq: Interrupt line that is forwarded to a VM
@@ -2191,7 +2252,6 @@ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
 {
        struct irq_desc *desc;
        struct irq_data *data;
-       struct irq_chip *chip;
        unsigned long flags;
        int err = -EINVAL;
 
@@ -2201,19 +2261,7 @@ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
 
        data = irq_desc_get_irq_data(desc);
 
-       do {
-               chip = irq_data_get_irq_chip(data);
-               if (chip->irq_get_irqchip_state)
-                       break;
-#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-               data = data->parent_data;
-#else
-               data = NULL;
-#endif
-       } while (data);
-
-       if (data)
-               err = chip->irq_get_irqchip_state(data, which, state);
+       err = __irq_get_irqchip_state(data, which, state);
 
        irq_put_desc_busunlock(desc, flags);
        return err;
index 95414ad3506a919e21561d057e54ba27b8eb53c2..98c04ca5fa43d6a4515fff52a2d956c250777083 100644 (file)
@@ -36,6 +36,8 @@ static void resend_irqs(unsigned long arg)
                irq = find_first_bit(irqs_resend, nr_irqs);
                clear_bit(irq, irqs_resend);
                desc = irq_to_desc(irq);
+               if (!desc)
+                       continue;
                local_irq_disable();
                desc->handle_irq(desc);
                local_irq_enable();
index 02a0b01380d8ef678bf544054cacf586292fb0e1..ed87dac8378cced392e7ef41ca414392fd8e43f0 100644 (file)
@@ -262,8 +262,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
 {
        char namebuf[KSYM_NAME_LEN];
 
-       if (is_ksym_addr(addr))
-               return !!get_symbol_pos(addr, symbolsize, offset);
+       if (is_ksym_addr(addr)) {
+               get_symbol_pos(addr, symbolsize, offset);
+               return 1;
+       }
        return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
               !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
 }
index 23a83a4da38a1dffc5203c828a384531b6cce9b2..f50b90d0d1c284a6702e852fc125be4b124d6f17 100644 (file)
@@ -301,6 +301,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
 {
        struct page *pages;
 
+       if (fatal_signal_pending(current))
+               return NULL;
        pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
        if (pages) {
                unsigned int count, i;
index 29ff6635d259745f8268e30ddf86ddf4022dde3d..b8efca9dc2cbbefb32257e6c4956bb5bc95e3ec8 100644 (file)
@@ -483,6 +483,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
  */
 static void do_optimize_kprobes(void)
 {
+       lockdep_assert_held(&text_mutex);
        /*
         * The optimization/unoptimization refers online_cpus via
         * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -500,9 +501,7 @@ static void do_optimize_kprobes(void)
            list_empty(&optimizing_list))
                return;
 
-       mutex_lock(&text_mutex);
        arch_optimize_kprobes(&optimizing_list);
-       mutex_unlock(&text_mutex);
 }
 
 /*
@@ -513,6 +512,7 @@ static void do_unoptimize_kprobes(void)
 {
        struct optimized_kprobe *op, *tmp;
 
+       lockdep_assert_held(&text_mutex);
        /* See comment in do_optimize_kprobes() */
        lockdep_assert_cpus_held();
 
@@ -520,7 +520,6 @@ static void do_unoptimize_kprobes(void)
        if (list_empty(&unoptimizing_list))
                return;
 
-       mutex_lock(&text_mutex);
        arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
        /* Loop free_list for disarming */
        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
@@ -537,7 +536,6 @@ static void do_unoptimize_kprobes(void)
                } else
                        list_del_init(&op->list);
        }
-       mutex_unlock(&text_mutex);
 }
 
 /* Reclaim all kprobes on the free_list */
@@ -563,6 +561,7 @@ static void kprobe_optimizer(struct work_struct *work)
 {
        mutex_lock(&kprobe_mutex);
        cpus_read_lock();
+       mutex_lock(&text_mutex);
        /* Lock modules while optimizing kprobes */
        mutex_lock(&module_mutex);
 
@@ -590,6 +589,7 @@ static void kprobe_optimizer(struct work_struct *work)
        do_free_cleaned_kprobes();
 
        mutex_unlock(&module_mutex);
+       mutex_unlock(&text_mutex);
        cpus_read_unlock();
        mutex_unlock(&kprobe_mutex);
 
@@ -1505,7 +1505,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
        /* Ensure it is not in reserved area nor out of text */
        if (!kernel_text_address((unsigned long) p->addr) ||
            within_kprobe_blacklist((unsigned long) p->addr) ||
-           jump_label_text_reserved(p->addr, p->addr)) {
+           jump_label_text_reserved(p->addr, p->addr) ||
+           find_bug((unsigned long)p->addr)) {
                ret = -EINVAL;
                goto out;
        }
index 722c27c40e5b3b10b16517e0d4f2f7937574b4b6..a1250ad591c1d853641c77b1bbb729889977e5ba 100644 (file)
@@ -1027,6 +1027,7 @@ err:
        pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
                patch->mod->name, obj->mod->name, obj->mod->name);
        mod->klp_alive = false;
+       obj->mod = NULL;
        klp_cleanup_module_patches_limited(mod, patch);
        mutex_unlock(&klp_mutex);
 
index 26b57e24476f19ee9409ee4f90463b9afa7036b0..1e272f6a01e73e0359a874ef580a623c1af59b3d 100644 (file)
@@ -3326,17 +3326,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        if (depth) {
                hlock = curr->held_locks + depth - 1;
                if (hlock->class_idx == class_idx && nest_lock) {
-                       if (hlock->references) {
-                               /*
-                                * Check: unsigned int references:12, overflow.
-                                */
-                               if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
-                                       return 0;
+                       if (!references)
+                               references++;
 
+                       if (!hlock->references)
                                hlock->references++;
-                       } else {
-                               hlock->references = 2;
-                       }
+
+                       hlock->references += references;
+
+                       /* Overflow */
+                       if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
+                               return 0;
 
                        return 1;
                }
@@ -3605,6 +3605,9 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
        unsigned int depth;
        int i;
 
+       if (unlikely(!debug_locks))
+               return 0;
+
        depth = curr->lockdep_depth;
        /*
         * This function is about (re)setting the class of a held lock,
index 3dd980dfba2de3cefba6562e9301c1951a238907..6fcc4650f0c489bb51ee97caf8aab44ea819f7e8 100644 (file)
@@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
 
 static int lockdep_stats_show(struct seq_file *m, void *v)
 {
-       struct lock_class *class;
        unsigned long nr_unused = 0, nr_uncategorized = 0,
                      nr_irq_safe = 0, nr_irq_unsafe = 0,
                      nr_softirq_safe = 0, nr_softirq_unsafe = 0,
@@ -210,6 +209,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
                      nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
                      sum_forward_deps = 0;
 
+#ifdef CONFIG_PROVE_LOCKING
+       struct lock_class *class;
+
        list_for_each_entry(class, &all_lock_classes, lock_entry) {
 
                if (class->usage_mask == 0)
@@ -241,12 +243,12 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
                if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
                        nr_hardirq_read_unsafe++;
 
-#ifdef CONFIG_PROVE_LOCKING
                sum_forward_deps += lockdep_count_forward_deps(class);
-#endif
        }
 #ifdef CONFIG_DEBUG_LOCKDEP
        DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
+#endif
+
 #endif
        seq_printf(m, " lock-classes:                  %11lu [max: %lu]\n",
                        nr_lock_classes, MAX_LOCKDEP_KEYS);
index 5a0cf5f9008cafd0a4efb4dea2cc28116d724cc1..82104d3dd18e4641064f29baac5a8353fd0330e2 100644 (file)
@@ -271,7 +271,7 @@ pv_wait_early(struct pv_node *prev, int loop)
        if ((loop & PV_PREV_CHECK_MASK) != 0)
                return false;
 
-       return READ_ONCE(prev->state) != vcpu_running || vcpu_is_preempted(prev->cpu);
+       return READ_ONCE(prev->state) != vcpu_running;
 }
 
 /*
index b8f37376856bdfaddd0d367a4478b4c2726d238f..8257110bf599cf3fe0a39d21ff285da5d3483d21 100644 (file)
 
 /*
  * Modules' sections will be aligned on page boundaries
- * to ensure complete separation of code and data, but
- * only when CONFIG_STRICT_MODULE_RWX=y
+ * to ensure complete separation of code and data
  */
-#ifdef CONFIG_STRICT_MODULE_RWX
 # define debug_align(X) ALIGN(X, PAGE_SIZE)
-#else
-# define debug_align(X) (X)
-#endif
 
 /* If this is set, the section belongs in the init part of the module */
 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
@@ -1699,6 +1694,8 @@ static int add_usage_links(struct module *mod)
        return ret;
 }
 
+static void module_remove_modinfo_attrs(struct module *mod, int end);
+
 static int module_add_modinfo_attrs(struct module *mod)
 {
        struct module_attribute *attr;
@@ -1713,24 +1710,34 @@ static int module_add_modinfo_attrs(struct module *mod)
                return -ENOMEM;
 
        temp_attr = mod->modinfo_attrs;
-       for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
+       for (i = 0; (attr = modinfo_attrs[i]); i++) {
                if (!attr->test || attr->test(mod)) {
                        memcpy(temp_attr, attr, sizeof(*temp_attr));
                        sysfs_attr_init(&temp_attr->attr);
                        error = sysfs_create_file(&mod->mkobj.kobj,
                                        &temp_attr->attr);
+                       if (error)
+                               goto error_out;
                        ++temp_attr;
                }
        }
+
+       return 0;
+
+error_out:
+       if (i > 0)
+               module_remove_modinfo_attrs(mod, --i);
        return error;
 }
 
-static void module_remove_modinfo_attrs(struct module *mod)
+static void module_remove_modinfo_attrs(struct module *mod, int end)
 {
        struct module_attribute *attr;
        int i;
 
        for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
+               if (end >= 0 && i > end)
+                       break;
                /* pick a field to test for end of list */
                if (!attr->attr.name)
                        break;
@@ -1818,7 +1825,7 @@ static int mod_sysfs_setup(struct module *mod,
        return 0;
 
 out_unreg_modinfo_attrs:
-       module_remove_modinfo_attrs(mod);
+       module_remove_modinfo_attrs(mod, -1);
 out_unreg_param:
        module_param_sysfs_remove(mod);
 out_unreg_holders:
@@ -1854,7 +1861,7 @@ static void mod_sysfs_fini(struct module *mod)
 {
 }
 
-static void module_remove_modinfo_attrs(struct module *mod)
+static void module_remove_modinfo_attrs(struct module *mod, int end)
 {
 }
 
@@ -1870,14 +1877,14 @@ static void init_param_lock(struct module *mod)
 static void mod_sysfs_teardown(struct module *mod)
 {
        del_usage_links(mod);
-       module_remove_modinfo_attrs(mod);
+       module_remove_modinfo_attrs(mod, -1);
        module_param_sysfs_remove(mod);
        kobject_put(mod->mkobj.drivers_dir);
        kobject_put(mod->holders_dir);
        mod_sysfs_fini(mod);
 }
 
-#ifdef CONFIG_STRICT_MODULE_RWX
+#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
 /*
  * LKM RO/NX protection: protect module's text/ro-data
  * from modification and any data from execution.
@@ -1900,6 +1907,7 @@ static void frob_text(const struct module_layout *layout,
                   layout->text_size >> PAGE_SHIFT);
 }
 
+#ifdef CONFIG_STRICT_MODULE_RWX
 static void frob_rodata(const struct module_layout *layout,
                        int (*set_memory)(unsigned long start, int num_pages))
 {
@@ -1949,13 +1957,9 @@ void module_enable_ro(const struct module *mod, bool after_init)
                return;
 
        frob_text(&mod->core_layout, set_memory_ro);
-       frob_text(&mod->core_layout, set_memory_x);
 
        frob_rodata(&mod->core_layout, set_memory_ro);
-
        frob_text(&mod->init_layout, set_memory_ro);
-       frob_text(&mod->init_layout, set_memory_x);
-
        frob_rodata(&mod->init_layout, set_memory_ro);
 
        if (after_init)
@@ -2036,11 +2040,23 @@ static void disable_ro_nx(const struct module_layout *layout)
        frob_writable_data(layout, set_memory_x);
 }
 
-#else
+#else /* !CONFIG_STRICT_MODULE_RWX */
 static void disable_ro_nx(const struct module_layout *layout) { }
 static void module_enable_nx(const struct module *mod) { }
 static void module_disable_nx(const struct module *mod) { }
-#endif
+#endif /*  CONFIG_STRICT_MODULE_RWX */
+
+static void module_enable_x(const struct module *mod)
+{
+       frob_text(&mod->core_layout, set_memory_x);
+       frob_text(&mod->init_layout, set_memory_x);
+}
+#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
+static void disable_ro_nx(const struct module_layout *layout) { }
+static void module_enable_nx(const struct module *mod) { }
+static void module_disable_nx(const struct module *mod) { }
+static void module_enable_x(const struct module *mod) { }
+#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
 
 #ifdef CONFIG_LIVEPATCH
 /*
@@ -3388,8 +3404,7 @@ static bool finished_loading(const char *name)
        sched_annotate_sleep();
        mutex_lock(&module_mutex);
        mod = find_module_all(name, strlen(name), true);
-       ret = !mod || mod->state == MODULE_STATE_LIVE
-               || mod->state == MODULE_STATE_GOING;
+       ret = !mod || mod->state == MODULE_STATE_LIVE;
        mutex_unlock(&module_mutex);
 
        return ret;
@@ -3559,8 +3574,7 @@ again:
        mutex_lock(&module_mutex);
        old = find_module_all(mod->name, strlen(mod->name), true);
        if (old != NULL) {
-               if (old->state == MODULE_STATE_COMING
-                   || old->state == MODULE_STATE_UNFORMED) {
+               if (old->state != MODULE_STATE_LIVE) {
                        /* Wait in case it fails to load. */
                        mutex_unlock(&module_mutex);
                        err = wait_event_interruptible(module_wq,
@@ -3599,6 +3613,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
 
        module_enable_ro(mod, false);
        module_enable_nx(mod);
+       module_enable_x(mod);
 
        /* Mark state as coming so strong_try_module_get() ignores us,
         * but kallsyms etc. can see us. */
index d568cc56405f8835d499df7b6555a406e99c9e96..6c06b3039faed01d992965f73c95bc218880e286 100644 (file)
@@ -267,7 +267,12 @@ static void padata_reorder(struct parallel_data *pd)
         * The next object that needs serialization might have arrived to
         * the reorder queues in the meantime, we will be called again
         * from the timer function if no one else cares for it.
+        *
+        * Ensure reorder_objects is read after pd->lock is dropped so we see
+        * an increment from another task in padata_do_serial.  Pairs with
+        * smp_mb__after_atomic in padata_do_serial.
         */
+       smp_mb();
        if (atomic_read(&pd->reorder_objects)
                        && !(pinst->flags & PADATA_RESET))
                mod_timer(&pd->timer, jiffies + HZ);
@@ -387,6 +392,13 @@ void padata_do_serial(struct padata_priv *padata)
        list_add_tail(&padata->list, &pqueue->reorder.list);
        spin_unlock(&pqueue->reorder.lock);
 
+       /*
+        * Ensure the atomic_inc of reorder_objects above is ordered correctly
+        * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
+        * in padata_reorder.
+        */
+       smp_mb__after_atomic();
+
        put_cpu();
 
        /* If we're running on the wrong CPU, call padata_reorder() via a
index 6a6df23acd1a5144ecf307d848813ea6618d8f19..72e001e3753e34484cc6ff5c6afec2d6162528e2 100644 (file)
@@ -149,6 +149,7 @@ void panic(const char *fmt, ...)
         * after setting panic_cpu) from invoking panic() again.
         */
        local_irq_disable();
+       preempt_disable_notrace();
 
        /*
         * It's possible to come here directly from a panic-assertion and
index 2a2ac53d8b8bb845f25581b549a1ea4136564692..95271f180687e0367943aac2077fba2ff6cf7a61 100644 (file)
@@ -325,7 +325,7 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
        }
 
        read_lock(&tasklist_lock);
-       force_sig(SIGKILL, pid_ns->child_reaper);
+       send_sig(SIGKILL, pid_ns->child_reaper, 1);
        read_unlock(&tasklist_lock);
 
        do_exit(0);
index 06045abd188774565ca688a2a13135cf5241240b..d0d03223b45b1c30f24cb40f11c2c098dc0ca78c 100644 (file)
@@ -3210,7 +3210,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
        /* move first record forward until length fits into the buffer */
        seq = dumper->cur_seq;
        idx = dumper->cur_idx;
-       while (l > size && seq < dumper->next_seq) {
+       while (l >= size && seq < dumper->next_seq) {
                struct printk_log *msg = log_from_idx(idx);
 
                l -= msg_print_text(msg, true, NULL, 0);
index 30e1bc68503b5f5f3453d1c4a217f271d50ff59c..bce773cc5e4169022d8a1f5fbc6380771ae41fe2 100644 (file)
@@ -318,24 +318,27 @@ int release_resource(struct resource *old)
 
 EXPORT_SYMBOL(release_resource);
 
-/*
- * Finds the lowest iomem resource existing within [res->start.res->end).
- * The caller must specify res->start, res->end, res->flags, and optionally
- * desc.  If found, returns 0, res is overwritten, if not found, returns -1.
- * This function walks the whole tree and not just first level children until
- * and unless first_level_children_only is true.
+/**
+ * Finds the lowest iomem resource that covers part of [start..end].  The
+ * caller must specify start, end, flags, and desc (which may be
+ * IORES_DESC_NONE).
+ *
+ * If a resource is found, returns 0 and *res is overwritten with the part
+ * of the resource that's within [start..end]; if none is found, returns
+ * -ENODEV.  Returns -EINVAL for invalid parameters.
+ *
+ * This function walks the whole tree and not just first level children
+ * unless @first_level_children_only is true.
  */
-static int find_next_iomem_res(struct resource *res, unsigned long desc,
-                              bool first_level_children_only)
+static int find_next_iomem_res(resource_size_t start, resource_size_t end,
+                              unsigned long flags, unsigned long desc,
+                              bool first_level_children_only,
+                              struct resource *res)
 {
-       resource_size_t start, end;
        struct resource *p;
        bool sibling_only = false;
 
        BUG_ON(!res);
-
-       start = res->start;
-       end = res->end;
        BUG_ON(start >= end);
 
        if (first_level_children_only)
@@ -344,7 +347,7 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
        read_lock(&resource_lock);
 
        for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) {
-               if ((p->flags & res->flags) != res->flags)
+               if ((p->flags & flags) != flags)
                        continue;
                if ((desc != IORES_DESC_NONE) && (desc != p->desc))
                        continue;
@@ -352,39 +355,38 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
                        p = NULL;
                        break;
                }
-               if ((p->end >= start) && (p->start < end))
+               if ((p->end >= start) && (p->start <= end))
                        break;
        }
 
+       if (p) {
+               /* copy data */
+               res->start = max(start, p->start);
+               res->end = min(end, p->end);
+               res->flags = p->flags;
+               res->desc = p->desc;
+       }
+
        read_unlock(&resource_lock);
-       if (!p)
-               return -1;
-       /* copy data */
-       if (res->start < p->start)
-               res->start = p->start;
-       if (res->end > p->end)
-               res->end = p->end;
-       res->flags = p->flags;
-       res->desc = p->desc;
-       return 0;
+       return p ? 0 : -ENODEV;
 }
 
-static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
-                                bool first_level_children_only,
-                                void *arg,
+static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
+                                unsigned long flags, unsigned long desc,
+                                bool first_level_children_only, void *arg,
                                 int (*func)(struct resource *, void *))
 {
-       u64 orig_end = res->end;
+       struct resource res;
        int ret = -1;
 
-       while ((res->start < res->end) &&
-              !find_next_iomem_res(res, desc, first_level_children_only)) {
-               ret = (*func)(res, arg);
+       while (start < end &&
+              !find_next_iomem_res(start, end, flags, desc,
+                                   first_level_children_only, &res)) {
+               ret = (*func)(&res, arg);
                if (ret)
                        break;
 
-               res->start = res->end + 1;
-               res->end = orig_end;
+               start = res.end + 1;
        }
 
        return ret;
@@ -407,13 +409,7 @@ static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
                u64 end, void *arg, int (*func)(struct resource *, void *))
 {
-       struct resource res;
-
-       res.start = start;
-       res.end = end;
-       res.flags = flags;
-
-       return __walk_iomem_res_desc(&res, desc, false, arg, func);
+       return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func);
 }
 EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
 
@@ -427,13 +423,9 @@ EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
 int walk_system_ram_res(u64 start, u64 end, void *arg,
                                int (*func)(struct resource *, void *))
 {
-       struct resource res;
+       unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
-       res.start = start;
-       res.end = end;
-       res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
-
-       return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
+       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
                                     arg, func);
 }
 
@@ -444,13 +436,9 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
 int walk_mem_res(u64 start, u64 end, void *arg,
                 int (*func)(struct resource *, void *))
 {
-       struct resource res;
+       unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 
-       res.start = start;
-       res.end = end;
-       res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-
-       return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
+       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
                                     arg, func);
 }
 
@@ -464,25 +452,25 @@ int walk_mem_res(u64 start, u64 end, void *arg,
 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
                void *arg, int (*func)(unsigned long, unsigned long, void *))
 {
+       resource_size_t start, end;
+       unsigned long flags;
        struct resource res;
        unsigned long pfn, end_pfn;
-       u64 orig_end;
        int ret = -1;
 
-       res.start = (u64) start_pfn << PAGE_SHIFT;
-       res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
-       res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
-       orig_end = res.end;
-       while ((res.start < res.end) &&
-               (find_next_iomem_res(&res, IORES_DESC_NONE, true) >= 0)) {
+       start = (u64) start_pfn << PAGE_SHIFT;
+       end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
+       flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+       while (start < end &&
+              !find_next_iomem_res(start, end, flags, IORES_DESC_NONE,
+                                   true, &res)) {
                pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
                end_pfn = (res.end + 1) >> PAGE_SHIFT;
                if (end_pfn > pfn)
                        ret = (*func)(pfn, end_pfn - pfn, arg);
                if (ret)
                        break;
-               res.start = res.end + 1;
-               res.end = orig_end;
+               start = res.end + 1;
        }
        return ret;
 }
index 6859ea1d5c0492b33e647da9c642c1fd6d726485..78ecdfae25b692c63b84c1ae0b88f5cd2fc61f78 100644 (file)
@@ -1077,7 +1077,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
        if (cpumask_equal(&p->cpus_allowed, new_mask))
                goto out;
 
-       if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
+       dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+       if (dest_cpu >= nr_cpu_ids) {
                ret = -EINVAL;
                goto out;
        }
@@ -1098,7 +1099,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
        if (cpumask_test_cpu(task_cpu(p), new_mask))
                goto out;
 
-       dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
        if (task_running(rq, p) || p->state == TASK_WAKING) {
                struct migration_arg arg = { p, dest_cpu };
                /* Need help from migration thread: drop lock and wait. */
@@ -3066,8 +3066,36 @@ void scheduler_tick(void)
 
 struct tick_work {
        int                     cpu;
+       atomic_t                state;
        struct delayed_work     work;
 };
+/* Values for ->state, see diagram below. */
+#define TICK_SCHED_REMOTE_OFFLINE      0
+#define TICK_SCHED_REMOTE_OFFLINING    1
+#define TICK_SCHED_REMOTE_RUNNING      2
+
+/*
+ * State diagram for ->state:
+ *
+ *
+ *          TICK_SCHED_REMOTE_OFFLINE
+ *                    |   ^
+ *                    |   |
+ *                    |   | sched_tick_remote()
+ *                    |   |
+ *                    |   |
+ *                    +--TICK_SCHED_REMOTE_OFFLINING
+ *                    |   ^
+ *                    |   |
+ * sched_tick_start() |   | sched_tick_stop()
+ *                    |   |
+ *                    V   |
+ *          TICK_SCHED_REMOTE_RUNNING
+ *
+ *
+ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
+ * and sched_tick_start() are happy to leave the state in RUNNING.
+ */
 
 static struct tick_work __percpu *tick_work_cpu;
 
@@ -3080,6 +3108,7 @@ static void sched_tick_remote(struct work_struct *work)
        struct task_struct *curr;
        struct rq_flags rf;
        u64 delta;
+       int os;
 
        /*
         * Handle the tick only if it appears the remote CPU is running in full
@@ -3093,7 +3122,7 @@ static void sched_tick_remote(struct work_struct *work)
 
        rq_lock_irq(rq, &rf);
        curr = rq->curr;
-       if (is_idle_task(curr))
+       if (is_idle_task(curr) || cpu_is_offline(cpu))
                goto out_unlock;
 
        update_rq_clock(rq);
@@ -3113,13 +3142,18 @@ out_requeue:
        /*
         * Run the remote tick once per second (1Hz). This arbitrary
         * frequency is large enough to avoid overload but short enough
-        * to keep scheduler internal stats reasonably up to date.
+        * to keep scheduler internal stats reasonably up to date.  But
+        * first update state to reflect hotplug activity if required.
         */
-       queue_delayed_work(system_unbound_wq, dwork, HZ);
+       os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
+       WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
+       if (os == TICK_SCHED_REMOTE_RUNNING)
+               queue_delayed_work(system_unbound_wq, dwork, HZ);
 }
 
 static void sched_tick_start(int cpu)
 {
+       int os;
        struct tick_work *twork;
 
        if (housekeeping_cpu(cpu, HK_FLAG_TICK))
@@ -3128,15 +3162,20 @@ static void sched_tick_start(int cpu)
        WARN_ON_ONCE(!tick_work_cpu);
 
        twork = per_cpu_ptr(tick_work_cpu, cpu);
-       twork->cpu = cpu;
-       INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
-       queue_delayed_work(system_unbound_wq, &twork->work, HZ);
+       os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
+       WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
+       if (os == TICK_SCHED_REMOTE_OFFLINE) {
+               twork->cpu = cpu;
+               INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
+               queue_delayed_work(system_unbound_wq, &twork->work, HZ);
+       }
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
 static void sched_tick_stop(int cpu)
 {
        struct tick_work *twork;
+       int os;
 
        if (housekeeping_cpu(cpu, HK_FLAG_TICK))
                return;
@@ -3144,7 +3183,10 @@ static void sched_tick_stop(int cpu)
        WARN_ON_ONCE(!tick_work_cpu);
 
        twork = per_cpu_ptr(tick_work_cpu, cpu);
-       cancel_delayed_work_sync(&twork->work);
+       /* There cannot be competing actions, but don't rely on stop-machine. */
+       os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
+       WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
+       /* Don't cancel, as this would mess up the state machine. */
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
@@ -3152,7 +3194,6 @@ int __init sched_tick_offload_init(void)
 {
        tick_work_cpu = alloc_percpu(struct tick_work);
        BUG_ON(!tick_work_cpu);
-
        return 0;
 }
 
@@ -5133,7 +5174,7 @@ long __sched io_schedule_timeout(long timeout)
 }
 EXPORT_SYMBOL(io_schedule_timeout);
 
-void io_schedule(void)
+void __sched io_schedule(void)
 {
        int token;
 
@@ -6453,10 +6494,6 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
 #ifdef CONFIG_RT_GROUP_SCHED
                if (!sched_rt_can_attach(css_tg(css), task))
                        return -EINVAL;
-#else
-               /* We don't support RT-tasks being in separate groups */
-               if (task->sched_class != &fair_sched_class)
-                       return -EINVAL;
 #endif
                /*
                 * Serialize against wake_up_new_task() such that if its
index 4e3625109b28d8db76c8912248fd145c3af118bf..54fcff656ecd7f250f5770ae8973bc0c3d48cd8d 100644 (file)
@@ -40,6 +40,7 @@ struct sugov_policy {
        struct task_struct      *thread;
        bool                    work_in_progress;
 
+       bool                    limits_changed;
        bool                    need_freq_update;
 };
 
@@ -90,8 +91,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
            !cpufreq_this_cpu_can_update(sg_policy->policy))
                return false;
 
-       if (unlikely(sg_policy->need_freq_update))
+       if (unlikely(sg_policy->limits_changed)) {
+               sg_policy->limits_changed = false;
+               sg_policy->need_freq_update = true;
                return true;
+       }
 
        delta_ns = time - sg_policy->last_freq_update_time;
 
@@ -114,6 +118,7 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
                              unsigned int next_freq)
 {
        struct cpufreq_policy *policy = sg_policy->policy;
+       int cpu;
 
        if (!sugov_update_next_freq(sg_policy, time, next_freq))
                return;
@@ -123,7 +128,11 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
                return;
 
        policy->cur = next_freq;
-       trace_cpu_frequency(next_freq, smp_processor_id());
+
+       if (trace_cpu_frequency_enabled()) {
+               for_each_cpu(cpu, policy->cpus)
+                       trace_cpu_frequency(next_freq, cpu);
+       }
 }
 
 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
@@ -405,7 +414,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
 {
        if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
-               sg_policy->need_freq_update = true;
+               sg_policy->limits_changed = true;
 }
 
 static void sugov_update_single(struct update_util_data *hook, u64 time,
@@ -425,7 +434,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
        if (!sugov_should_update_freq(sg_policy, time))
                return;
 
-       busy = sugov_cpu_is_busy(sg_cpu);
+       /* Limits may have changed, don't skip frequency update */
+       busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
 
        util = sugov_get_util(sg_cpu);
        max = sg_cpu->max;
@@ -798,6 +808,7 @@ static int sugov_start(struct cpufreq_policy *policy)
        sg_policy->last_freq_update_time        = 0;
        sg_policy->next_freq                    = 0;
        sg_policy->work_in_progress             = false;
+       sg_policy->limits_changed               = false;
        sg_policy->need_freq_update             = false;
        sg_policy->cached_raw_freq              = 0;
 
@@ -849,7 +860,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
                mutex_unlock(&sg_policy->work_lock);
        }
 
-       sg_policy->need_freq_update = true;
+       sg_policy->limits_changed = true;
 }
 
 static struct cpufreq_governor schedutil_gov = {
index 72c07059ef3717c1e8b2b4bae7a1311ea0d74088..ebec37cb3be9a865b7f90adacb3adaef4bea997d 100644 (file)
@@ -529,6 +529,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 {
        struct rq *later_rq = NULL;
+       struct dl_bw *dl_b;
 
        later_rq = find_lock_later_rq(p, rq);
        if (!later_rq) {
@@ -557,6 +558,38 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
                double_lock_balance(rq, later_rq);
        }
 
+       if (p->dl.dl_non_contending || p->dl.dl_throttled) {
+               /*
+                * Inactive timer is armed (or callback is running, but
+                * waiting for us to release rq locks). In any case, when it
+                * will fire (or continue), it will see running_bw of this
+                * task migrated to later_rq (and correctly handle it).
+                */
+               sub_running_bw(&p->dl, &rq->dl);
+               sub_rq_bw(&p->dl, &rq->dl);
+
+               add_rq_bw(&p->dl, &later_rq->dl);
+               add_running_bw(&p->dl, &later_rq->dl);
+       } else {
+               sub_rq_bw(&p->dl, &rq->dl);
+               add_rq_bw(&p->dl, &later_rq->dl);
+       }
+
+       /*
+        * And we finally need to fixup root_domain(s) bandwidth accounting,
+        * since p is still hanging out in the old (now moved to default) root
+        * domain.
+        */
+       dl_b = &rq->rd->dl_bw;
+       raw_spin_lock(&dl_b->lock);
+       __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
+       raw_spin_unlock(&dl_b->lock);
+
+       dl_b = &later_rq->rd->dl_bw;
+       raw_spin_lock(&dl_b->lock);
+       __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
+       raw_spin_unlock(&dl_b->lock);
+
        set_task_cpu(p, later_rq->cpu);
        double_unlock_balance(later_rq, rq);
 
index 4a433608ba74af0fae9226a76a92e039b0c8932e..32d2dac680a7043d023f98204e8fa47994aff8e7 100644 (file)
@@ -1053,6 +1053,21 @@ struct numa_group {
        unsigned long faults[0];
 };
 
+/*
+ * For functions that can be called in multiple contexts that permit reading
+ * ->numa_group (see struct task_struct for locking rules).
+ */
+static struct numa_group *deref_task_numa_group(struct task_struct *p)
+{
+       return rcu_dereference_check(p->numa_group, p == current ||
+               (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu)));
+}
+
+static struct numa_group *deref_curr_numa_group(struct task_struct *p)
+{
+       return rcu_dereference_protected(p->numa_group, p == current);
+}
+
 static inline unsigned long group_faults_priv(struct numa_group *ng);
 static inline unsigned long group_faults_shared(struct numa_group *ng);
 
@@ -1096,10 +1111,12 @@ static unsigned int task_scan_start(struct task_struct *p)
 {
        unsigned long smin = task_scan_min(p);
        unsigned long period = smin;
+       struct numa_group *ng;
 
        /* Scale the maximum scan period with the amount of shared memory. */
-       if (p->numa_group) {
-               struct numa_group *ng = p->numa_group;
+       rcu_read_lock();
+       ng = rcu_dereference(p->numa_group);
+       if (ng) {
                unsigned long shared = group_faults_shared(ng);
                unsigned long private = group_faults_priv(ng);
 
@@ -1107,6 +1124,7 @@ static unsigned int task_scan_start(struct task_struct *p)
                period *= shared + 1;
                period /= private + shared + 1;
        }
+       rcu_read_unlock();
 
        return max(smin, period);
 }
@@ -1115,13 +1133,14 @@ static unsigned int task_scan_max(struct task_struct *p)
 {
        unsigned long smin = task_scan_min(p);
        unsigned long smax;
+       struct numa_group *ng;
 
        /* Watch for min being lower than max due to floor calculations */
        smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
 
        /* Scale the maximum scan period with the amount of shared memory. */
-       if (p->numa_group) {
-               struct numa_group *ng = p->numa_group;
+       ng = deref_curr_numa_group(p);
+       if (ng) {
                unsigned long shared = group_faults_shared(ng);
                unsigned long private = group_faults_priv(ng);
                unsigned long period = smax;
@@ -1153,7 +1172,7 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
        p->numa_scan_period             = sysctl_numa_balancing_scan_delay;
        p->numa_work.next               = &p->numa_work;
        p->numa_faults                  = NULL;
-       p->numa_group                   = NULL;
+       RCU_INIT_POINTER(p->numa_group, NULL);
        p->last_task_numa_placement     = 0;
        p->last_sum_exec_runtime        = 0;
 
@@ -1200,7 +1219,16 @@ static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
 
 pid_t task_numa_group_id(struct task_struct *p)
 {
-       return p->numa_group ? p->numa_group->gid : 0;
+       struct numa_group *ng;
+       pid_t gid = 0;
+
+       rcu_read_lock();
+       ng = rcu_dereference(p->numa_group);
+       if (ng)
+               gid = ng->gid;
+       rcu_read_unlock();
+
+       return gid;
 }
 
 /*
@@ -1225,11 +1253,13 @@ static inline unsigned long task_faults(struct task_struct *p, int nid)
 
 static inline unsigned long group_faults(struct task_struct *p, int nid)
 {
-       if (!p->numa_group)
+       struct numa_group *ng = deref_task_numa_group(p);
+
+       if (!ng)
                return 0;
 
-       return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
-               p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
+       return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
+               ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
 }
 
 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
@@ -1367,12 +1397,13 @@ static inline unsigned long task_weight(struct task_struct *p, int nid,
 static inline unsigned long group_weight(struct task_struct *p, int nid,
                                         int dist)
 {
+       struct numa_group *ng = deref_task_numa_group(p);
        unsigned long faults, total_faults;
 
-       if (!p->numa_group)
+       if (!ng)
                return 0;
 
-       total_faults = p->numa_group->total_faults;
+       total_faults = ng->total_faults;
 
        if (!total_faults)
                return 0;
@@ -1386,7 +1417,7 @@ static inline unsigned long group_weight(struct task_struct *p, int nid,
 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
                                int src_nid, int dst_cpu)
 {
-       struct numa_group *ng = p->numa_group;
+       struct numa_group *ng = deref_curr_numa_group(p);
        int dst_nid = cpu_to_node(dst_cpu);
        int last_cpupid, this_cpupid;
 
@@ -1592,13 +1623,14 @@ static bool load_too_imbalanced(long src_load, long dst_load,
 static void task_numa_compare(struct task_numa_env *env,
                              long taskimp, long groupimp, bool maymove)
 {
+       struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
        struct rq *dst_rq = cpu_rq(env->dst_cpu);
+       long imp = p_ng ? groupimp : taskimp;
        struct task_struct *cur;
        long src_load, dst_load;
-       long load;
-       long imp = env->p->numa_group ? groupimp : taskimp;
-       long moveimp = imp;
        int dist = env->dist;
+       long moveimp = imp;
+       long load;
 
        if (READ_ONCE(dst_rq->numa_migrate_on))
                return;
@@ -1637,21 +1669,22 @@ static void task_numa_compare(struct task_numa_env *env,
         * If dst and source tasks are in the same NUMA group, or not
         * in any group then look only at task weights.
         */
-       if (cur->numa_group == env->p->numa_group) {
+       cur_ng = rcu_dereference(cur->numa_group);
+       if (cur_ng == p_ng) {
                imp = taskimp + task_weight(cur, env->src_nid, dist) -
                      task_weight(cur, env->dst_nid, dist);
                /*
                 * Add some hysteresis to prevent swapping the
                 * tasks within a group over tiny differences.
                 */
-               if (cur->numa_group)
+               if (cur_ng)
                        imp -= imp / 16;
        } else {
                /*
                 * Compare the group weights. If a task is all by itself
                 * (not part of a group), use the task weight instead.
                 */
-               if (cur->numa_group && env->p->numa_group)
+               if (cur_ng && p_ng)
                        imp += group_weight(cur, env->src_nid, dist) -
                               group_weight(cur, env->dst_nid, dist);
                else
@@ -1749,11 +1782,12 @@ static int task_numa_migrate(struct task_struct *p)
                .best_imp = 0,
                .best_cpu = -1,
        };
+       unsigned long taskweight, groupweight;
        struct sched_domain *sd;
+       long taskimp, groupimp;
+       struct numa_group *ng;
        struct rq *best_rq;
-       unsigned long taskweight, groupweight;
        int nid, ret, dist;
-       long taskimp, groupimp;
 
        /*
         * Pick the lowest SD_NUMA domain, as that would have the smallest
@@ -1799,7 +1833,8 @@ static int task_numa_migrate(struct task_struct *p)
         *   multiple NUMA nodes; in order to better consolidate the group,
         *   we need to check other locations.
         */
-       if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
+       ng = deref_curr_numa_group(p);
+       if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
                for_each_online_node(nid) {
                        if (nid == env.src_nid || nid == p->numa_preferred_nid)
                                continue;
@@ -1832,7 +1867,7 @@ static int task_numa_migrate(struct task_struct *p)
         * A task that migrated to a second choice node will be better off
         * trying for a better one later. Do not set the preferred node here.
         */
-       if (p->numa_group) {
+       if (ng) {
                if (env.best_cpu == -1)
                        nid = env.src_nid;
                else
@@ -2127,6 +2162,7 @@ static void task_numa_placement(struct task_struct *p)
        unsigned long total_faults;
        u64 runtime, period;
        spinlock_t *group_lock = NULL;
+       struct numa_group *ng;
 
        /*
         * The p->mm->numa_scan_seq field gets updated without
@@ -2144,8 +2180,9 @@ static void task_numa_placement(struct task_struct *p)
        runtime = numa_get_avg_runtime(p, &period);
 
        /* If the task is part of a group prevent parallel updates to group stats */
-       if (p->numa_group) {
-               group_lock = &p->numa_group->lock;
+       ng = deref_curr_numa_group(p);
+       if (ng) {
+               group_lock = &ng->lock;
                spin_lock_irq(group_lock);
        }
 
@@ -2186,7 +2223,7 @@ static void task_numa_placement(struct task_struct *p)
                        p->numa_faults[cpu_idx] += f_diff;
                        faults += p->numa_faults[mem_idx];
                        p->total_numa_faults += diff;
-                       if (p->numa_group) {
+                       if (ng) {
                                /*
                                 * safe because we can only change our own group
                                 *
@@ -2194,14 +2231,14 @@ static void task_numa_placement(struct task_struct *p)
                                 * nid and priv in a specific region because it
                                 * is at the beginning of the numa_faults array.
                                 */
-                               p->numa_group->faults[mem_idx] += diff;
-                               p->numa_group->faults_cpu[mem_idx] += f_diff;
-                               p->numa_group->total_faults += diff;
-                               group_faults += p->numa_group->faults[mem_idx];
+                               ng->faults[mem_idx] += diff;
+                               ng->faults_cpu[mem_idx] += f_diff;
+                               ng->total_faults += diff;
+                               group_faults += ng->faults[mem_idx];
                        }
                }
 
-               if (!p->numa_group) {
+               if (!ng) {
                        if (faults > max_faults) {
                                max_faults = faults;
                                max_nid = nid;
@@ -2212,8 +2249,8 @@ static void task_numa_placement(struct task_struct *p)
                }
        }
 
-       if (p->numa_group) {
-               numa_group_count_active_nodes(p->numa_group);
+       if (ng) {
+               numa_group_count_active_nodes(ng);
                spin_unlock_irq(group_lock);
                max_nid = preferred_group_nid(p, max_nid);
        }
@@ -2247,7 +2284,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
        int cpu = cpupid_to_cpu(cpupid);
        int i;
 
-       if (unlikely(!p->numa_group)) {
+       if (unlikely(!deref_curr_numa_group(p))) {
                unsigned int size = sizeof(struct numa_group) +
                                    4*nr_node_ids*sizeof(unsigned long);
 
@@ -2283,7 +2320,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
        if (!grp)
                goto no_join;
 
-       my_grp = p->numa_group;
+       my_grp = deref_curr_numa_group(p);
        if (grp == my_grp)
                goto no_join;
 
@@ -2345,13 +2382,24 @@ no_join:
        return;
 }
 
-void task_numa_free(struct task_struct *p)
+/*
+ * Get rid of NUMA staticstics associated with a task (either current or dead).
+ * If @final is set, the task is dead and has reached refcount zero, so we can
+ * safely free all relevant data structures. Otherwise, there might be
+ * concurrent reads from places like load balancing and procfs, and we should
+ * reset the data back to default state without freeing ->numa_faults.
+ */
+void task_numa_free(struct task_struct *p, bool final)
 {
-       struct numa_group *grp = p->numa_group;
-       void *numa_faults = p->numa_faults;
+       /* safe: p either is current or is being freed by current */
+       struct numa_group *grp = rcu_dereference_raw(p->numa_group);
+       unsigned long *numa_faults = p->numa_faults;
        unsigned long flags;
        int i;
 
+       if (!numa_faults)
+               return;
+
        if (grp) {
                spin_lock_irqsave(&grp->lock, flags);
                for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
@@ -2364,8 +2412,14 @@ void task_numa_free(struct task_struct *p)
                put_numa_group(grp);
        }
 
-       p->numa_faults = NULL;
-       kfree(numa_faults);
+       if (final) {
+               p->numa_faults = NULL;
+               kfree(numa_faults);
+       } else {
+               p->total_numa_faults = 0;
+               for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+                       numa_faults[i] = 0;
+       }
 }
 
 /*
@@ -2418,7 +2472,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
         * actively using should be counted as local. This allows the
         * scan rate to slow down when a workload has settled down.
         */
-       ng = p->numa_group;
+       ng = deref_curr_numa_group(p);
        if (!priv && !local && ng && ng->active_nodes > 1 &&
                                numa_is_active_node(cpu_node, ng) &&
                                numa_is_active_node(mem_node, ng))
@@ -4366,6 +4420,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
        if (likely(cfs_rq->runtime_remaining > 0))
                return;
 
+       if (cfs_rq->throttled)
+               return;
        /*
         * if we're unable to extend our runtime we resched so that the active
         * hierarchy can be throttled
@@ -4561,6 +4617,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
                if (!cfs_rq_throttled(cfs_rq))
                        goto next;
 
+               /* By the above check, this should never be true */
+               SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
+
                runtime = -cfs_rq->runtime_remaining + 1;
                if (runtime > remaining)
                        runtime = remaining;
@@ -8804,9 +8863,10 @@ more_balance:
 out_balanced:
        /*
         * We reach balance although we may have faced some affinity
-        * constraints. Clear the imbalance flag if it was set.
+        * constraints. Clear the imbalance flag only if other tasks got
+        * a chance to move and fix the imbalance.
         */
-       if (sd_parent) {
+       if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
                int *group_imbalance = &sd_parent->groups->sgc->imbalance;
 
                if (*group_imbalance)
@@ -10019,18 +10079,18 @@ err:
 void online_fair_sched_group(struct task_group *tg)
 {
        struct sched_entity *se;
+       struct rq_flags rf;
        struct rq *rq;
        int i;
 
        for_each_possible_cpu(i) {
                rq = cpu_rq(i);
                se = tg->se[i];
-
-               raw_spin_lock_irq(&rq->lock);
+               rq_lock_irq(rq, &rf);
                update_rq_clock(rq);
                attach_entity_cfs_rq(se);
                sync_throttle(tg, i);
-               raw_spin_unlock_irq(&rq->lock);
+               rq_unlock_irq(rq, &rf);
        }
 }
 
@@ -10218,18 +10278,22 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
 {
        int node;
        unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
+       struct numa_group *ng;
 
+       rcu_read_lock();
+       ng = rcu_dereference(p->numa_group);
        for_each_online_node(node) {
                if (p->numa_faults) {
                        tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
                        tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
                }
-               if (p->numa_group) {
-                       gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
-                       gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
+               if (ng) {
+                       gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
+                       gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
                }
                print_numa_stats(m, node, tsf, tpf, gsf, gpf);
        }
+       rcu_read_unlock();
 }
 #endif /* CONFIG_NUMA_BALANCING */
 #endif /* CONFIG_SCHED_DEBUG */
index bf1b52f2cdfad6e7c8f58122cd1db10226a33a9d..f1bf7b6ec93576ec4c13d3fe9b9847ad8fd6476a 100644 (file)
@@ -241,13 +241,14 @@ static void do_idle(void)
                check_pgt_cache();
                rmb();
 
+               local_irq_disable();
+
                if (cpu_is_offline(cpu)) {
-                       tick_nohz_idle_stop_tick_protected();
+                       tick_nohz_idle_stop_tick();
                        cpuhp_report_idle_dead();
                        arch_cpu_idle_dead();
                }
 
-               local_irq_disable();
                arch_cpu_idle_enter();
 
                /*
index 76e0eaf4654e07ec99fb5cfa8c356ab3e136f4e7..dd27e632b1babf76e58276489b58e88a787bcf3c 100644 (file)
@@ -235,7 +235,7 @@ static int membarrier_register_private_expedited(int flags)
         * groups, which use the same mm. (CLONE_VM but not
         * CLONE_THREAD).
         */
-       if (atomic_read(&mm->membarrier_state) & state)
+       if ((atomic_read(&mm->membarrier_state) & state) == state)
                return 0;
        atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state);
        if (flags & MEMBARRIER_FLAG_SYNC_CORE)
index a26473674fb797411b7c3b1e8790c64dcfd7ed51..c529706bed11531eea6e4301f126a0aaadefce15 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /* Generated by Documentation/scheduler/sched-pelt; do not modify. */
 
-static const u32 runnable_avg_yN_inv[] = {
+static const u32 runnable_avg_yN_inv[] __maybe_unused = {
        0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
        0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
        0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
index fdeb9bc6affb484398d9d366e358e44c79cd6df2..f4255a65c44b2347bd4559137e53c2bf7636cd0c 100644 (file)
@@ -676,7 +676,7 @@ static int alarm_timer_create(struct k_itimer *new_timer)
        enum  alarmtimer_type type;
 
        if (!alarmtimer_get_rtcdev())
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        if (!capable(CAP_WAKE_ALARM))
                return -EPERM;
@@ -794,7 +794,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
        int ret = 0;
 
        if (!alarmtimer_get_rtcdev())
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        if (flags & ~TIMER_ABSTIME)
                return -EINVAL;
index 6b23cd584295f57787e7689191550a252140dd7e..e1110a7bd3e64afefcd20eacb45df112d16ac69e 100644 (file)
@@ -43,6 +43,7 @@ static u64                    tick_length_base;
 #define MAX_TICKADJ            500LL           /* usecs */
 #define MAX_TICKADJ_SCALED \
        (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
+#define MAX_TAI_OFFSET         100000
 
 /*
  * phase-lock loop variables
@@ -698,7 +699,8 @@ static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai
                time_constant = max(time_constant, 0l);
        }
 
-       if (txc->modes & ADJ_TAI && txc->constant >= 0)
+       if (txc->modes & ADJ_TAI &&
+                       txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET)
                *time_tai = txc->constant;
 
        if (txc->modes & ADJ_OFFSET)
index 76801b9b481eb6d4c19155204cfcdd4123c7611f..d62d7ae5201c5c5a525c48aa2a3197d3e4126041 100644 (file)
@@ -375,7 +375,8 @@ static int posix_cpu_timer_del(struct k_itimer *timer)
        struct sighand_struct *sighand;
        struct task_struct *p = timer->it.cpu.task;
 
-       WARN_ON_ONCE(p == NULL);
+       if (WARN_ON_ONCE(!p))
+               return -EINVAL;
 
        /*
         * Protect against sighand release/switch in exit/exec and process/
@@ -580,7 +581,8 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
        u64 old_expires, new_expires, old_incr, val;
        int ret;
 
-       WARN_ON_ONCE(p == NULL);
+       if (WARN_ON_ONCE(!p))
+               return -EINVAL;
 
        /*
         * Use the to_ktime conversion because that clamps the maximum
@@ -716,10 +718,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
 
 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
 {
-       u64 now;
        struct task_struct *p = timer->it.cpu.task;
+       u64 now;
 
-       WARN_ON_ONCE(p == NULL);
+       if (WARN_ON_ONCE(!p))
+               return;
 
        /*
         * Easy part: convert the reload time.
@@ -1004,12 +1007,13 @@ static void check_process_timers(struct task_struct *tsk,
  */
 static void posix_cpu_timer_rearm(struct k_itimer *timer)
 {
+       struct task_struct *p = timer->it.cpu.task;
        struct sighand_struct *sighand;
        unsigned long flags;
-       struct task_struct *p = timer->it.cpu.task;
        u64 now;
 
-       WARN_ON_ONCE(p == NULL);
+       if (WARN_ON_ONCE(!p))
+               return;
 
        /*
         * Fetch the current sample and update the timer's expiry time.
@@ -1206,7 +1210,9 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
        u64 now;
        int ret;
 
-       WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
+       if (WARN_ON_ONCE(clock_idx >= CPUCLOCK_SCHED))
+               return;
+
        ret = cpu_timer_sample_group(clock_idx, tsk, &now);
 
        if (oldval && ret != -EINVAL) {
index a59641fb88b6963ac837a3b4cf4657c24e05098b..a836efd34589595f12087f11c9798e9ebb23293b 100644 (file)
@@ -44,34 +44,39 @@ static int bc_shutdown(struct clock_event_device *evt)
  */
 static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
 {
-       int bc_moved;
        /*
-        * We try to cancel the timer first. If the callback is on
-        * flight on some other cpu then we let it handle it. If we
-        * were able to cancel the timer nothing can rearm it as we
-        * own broadcast_lock.
+        * This is called either from enter/exit idle code or from the
+        * broadcast handler. In all cases tick_broadcast_lock is held.
         *
-        * However we can also be called from the event handler of
-        * ce_broadcast_hrtimer itself when it expires. We cannot
-        * restart the timer because we are in the callback, but we
-        * can set the expiry time and let the callback return
-        * HRTIMER_RESTART.
+        * hrtimer_cancel() cannot be called here neither from the
+        * broadcast handler nor from the enter/exit idle code. The idle
+        * code can run into the problem described in bc_shutdown() and the
+        * broadcast handler cannot wait for itself to complete for obvious
+        * reasons.
         *
-        * Since we are in the idle loop at this point and because
-        * hrtimer_{start/cancel} functions call into tracing,
-        * calls to these functions must be bound within RCU_NONIDLE.
+        * Each caller tries to arm the hrtimer on its own CPU, but if the
+        * hrtimer callbback function is currently running, then
+        * hrtimer_start() cannot move it and the timer stays on the CPU on
+        * which it is assigned at the moment.
+        *
+        * As this can be called from idle code, the hrtimer_start()
+        * invocation has to be wrapped with RCU_NONIDLE() as
+        * hrtimer_start() can call into tracing.
         */
-       RCU_NONIDLE({
-                       bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
-                       if (bc_moved)
-                               hrtimer_start(&bctimer, expires,
-                                             HRTIMER_MODE_ABS_PINNED);});
-       if (bc_moved) {
-               /* Bind the "device" to the cpu */
-               bc->bound_on = smp_processor_id();
-       } else if (bc->bound_on == smp_processor_id()) {
-               hrtimer_set_expires(&bctimer, expires);
-       }
+       RCU_NONIDLE( {
+               hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
+               /*
+                * The core tick broadcast mode expects bc->bound_on to be set
+                * correctly to prevent a CPU which has the broadcast hrtimer
+                * armed from going deep idle.
+                *
+                * As tick_broadcast_lock is held, nothing can change the cpu
+                * base which was just established in hrtimer_start() above. So
+                * the below access is safe even without holding the hrtimer
+                * base lock.
+                */
+               bc->bound_on = bctimer.base->cpu_base->cpu;
+       } );
        return 0;
 }
 
@@ -97,10 +102,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
 {
        ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
 
-       if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
-               if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
-                       return HRTIMER_RESTART;
-
        return HRTIMER_NORESTART;
 }
 
index 443edcddac8ab2a468a63ec599c2756032fae14a..c2708e1f0c69f9785991c06448766bc08bf7f32d 100644 (file)
@@ -823,7 +823,7 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
 
        } while (read_seqcount_retry(&tk_core.seq, seq));
 
-       return base + nsecs;
+       return ktime_add_ns(base, nsecs);
 }
 EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
 
index fa49cd753dea74ff442e49a637895ca71993703d..ae64cb819a9a85621b0ace2e986e9970239dc9cf 100644 (file)
@@ -1590,24 +1590,26 @@ void timer_clear_idle(void)
 static int collect_expired_timers(struct timer_base *base,
                                  struct hlist_head *heads)
 {
+       unsigned long now = READ_ONCE(jiffies);
+
        /*
         * NOHZ optimization. After a long idle sleep we need to forward the
         * base to current jiffies. Avoid a loop by searching the bitfield for
         * the next expiring timer.
         */
-       if ((long)(jiffies - base->clk) > 2) {
+       if ((long)(now - base->clk) > 2) {
                unsigned long next = __next_timer_interrupt(base);
 
                /*
                 * If the next timer is ahead of time forward to current
                 * jiffies, otherwise forward to the next expiry time:
                 */
-               if (time_after(next, jiffies)) {
+               if (time_after(next, now)) {
                        /*
                         * The call site will increment base->clk and then
                         * terminate the expiry loop immediately.
                         */
-                       base->clk = jiffies;
+                       base->clk = now;
                        return 0;
                }
                base->clk = next;
index d647dabdac97a3b592b25cdc929909df92b04b8a..07afcfe2a61b7c02ec6d6e43687fae1344775373 100644 (file)
@@ -287,23 +287,6 @@ static inline void timer_list_header(struct seq_file *m, u64 now)
        SEQ_printf(m, "\n");
 }
 
-static int timer_list_show(struct seq_file *m, void *v)
-{
-       struct timer_list_iter *iter = v;
-
-       if (iter->cpu == -1 && !iter->second_pass)
-               timer_list_header(m, iter->now);
-       else if (!iter->second_pass)
-               print_cpu(m, iter->cpu, iter->now);
-#ifdef CONFIG_GENERIC_CLOCKEVENTS
-       else if (iter->cpu == -1 && iter->second_pass)
-               timer_list_show_tickdevices_header(m);
-       else
-               print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
-#endif
-       return 0;
-}
-
 void sysrq_timer_list_show(void)
 {
        u64 now = ktime_to_ns(ktime_get());
@@ -322,6 +305,24 @@ void sysrq_timer_list_show(void)
        return;
 }
 
+#ifdef CONFIG_PROC_FS
+static int timer_list_show(struct seq_file *m, void *v)
+{
+       struct timer_list_iter *iter = v;
+
+       if (iter->cpu == -1 && !iter->second_pass)
+               timer_list_header(m, iter->now);
+       else if (!iter->second_pass)
+               print_cpu(m, iter->cpu, iter->now);
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+       else if (iter->cpu == -1 && iter->second_pass)
+               timer_list_show_tickdevices_header(m);
+       else
+               print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
+#endif
+       return 0;
+}
+
 static void *move_iter(struct timer_list_iter *iter, loff_t offset)
 {
        for (; offset; offset--) {
@@ -381,3 +382,4 @@ static int __init init_timer_list_procfs(void)
        return 0;
 }
 __initcall(init_timer_list_procfs);
+#endif
index 118ecce143866bf8034093ce15baaef38124a709..b84eb9659e95eee6deea151ff909fe7d77c141c4 100644 (file)
@@ -1647,6 +1647,11 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
        return  keep_regs;
 }
 
+static struct ftrace_ops *
+ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
+static struct ftrace_ops *
+ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
+
 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
                                     int filter_hash,
                                     bool inc)
@@ -1775,15 +1780,17 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
                        }
 
                        /*
-                        * If the rec had TRAMP enabled, then it needs to
-                        * be cleared. As TRAMP can only be enabled iff
-                        * there is only a single ops attached to it.
-                        * In otherwords, always disable it on decrementing.
-                        * In the future, we may set it if rec count is
-                        * decremented to one, and the ops that is left
-                        * has a trampoline.
+                        * The TRAMP needs to be set only if rec count
+                        * is decremented to one, and the ops that is
+                        * left has a trampoline. As TRAMP can only be
+                        * enabled if there is only a single ops attached
+                        * to it.
                         */
-                       rec->flags &= ~FTRACE_FL_TRAMP;
+                       if (ftrace_rec_count(rec) == 1 &&
+                           ftrace_find_tramp_ops_any(rec))
+                               rec->flags |= FTRACE_FL_TRAMP;
+                       else
+                               rec->flags &= ~FTRACE_FL_TRAMP;
 
                        /*
                         * flags will be cleared in ftrace_check_record()
@@ -1976,11 +1983,6 @@ static void print_ip_ins(const char *fmt, const unsigned char *p)
                printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
 }
 
-static struct ftrace_ops *
-ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
-static struct ftrace_ops *
-ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
-
 enum ftrace_bug_type ftrace_bug_type;
 const void *ftrace_expected;
 
@@ -3110,6 +3112,14 @@ t_probe_next(struct seq_file *m, loff_t *pos)
                hnd = &iter->probe_entry->hlist;
 
        hash = iter->probe->ops.func_hash->filter_hash;
+
+       /*
+        * A probe being registered may temporarily have an empty hash
+        * and it's at the end of the func_probes list.
+        */
+       if (!hash || hash == EMPTY_HASH)
+               return NULL;
+
        size = 1 << hash->size_bits;
 
  retry:
@@ -3547,21 +3557,22 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
        struct ftrace_hash *hash;
        struct list_head *mod_head;
        struct trace_array *tr = ops->private;
-       int ret = 0;
+       int ret = -ENOMEM;
 
        ftrace_ops_init(ops);
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
+       if (tr && trace_array_get(tr) < 0)
+               return -ENODEV;
+
        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
        if (!iter)
-               return -ENOMEM;
+               goto out;
 
-       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
-               kfree(iter);
-               return -ENOMEM;
-       }
+       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
+               goto out;
 
        iter->ops = ops;
        iter->flags = flag;
@@ -3591,13 +3602,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
 
                if (!iter->hash) {
                        trace_parser_put(&iter->parser);
-                       kfree(iter);
-                       ret = -ENOMEM;
                        goto out_unlock;
                }
        } else
                iter->hash = hash;
 
+       ret = 0;
+
        if (file->f_mode & FMODE_READ) {
                iter->pg = ftrace_pages_start;
 
@@ -3609,7 +3620,6 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                        /* Failed */
                        free_ftrace_hash(iter->hash);
                        trace_parser_put(&iter->parser);
-                       kfree(iter);
                }
        } else
                file->private_data = iter;
@@ -3617,6 +3627,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
  out_unlock:
        mutex_unlock(&ops->func_hash->regex_lock);
 
+ out:
+       if (ret) {
+               kfree(iter);
+               if (tr)
+                       trace_array_put(tr);
+       }
+
        return ret;
 }
 
@@ -4305,12 +4322,21 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
 
        mutex_unlock(&ftrace_lock);
 
+       /*
+        * Note, there's a small window here that the func_hash->filter_hash
+        * may be NULL or empty. Need to be carefule when reading the loop.
+        */
        mutex_lock(&probe->ops.func_hash->regex_lock);
 
        orig_hash = &probe->ops.func_hash->filter_hash;
        old_hash = *orig_hash;
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
 
+       if (!hash) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
        ret = ftrace_match_records(hash, glob, strlen(glob));
 
        /* Nothing found? */
@@ -5005,6 +5031,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        mutex_unlock(&iter->ops->func_hash->regex_lock);
        free_ftrace_hash(iter->hash);
+       if (iter->tr)
+               trace_array_put(iter->tr);
        kfree(iter);
 
        return 0;
index 3b0de19b9ed75d86cbd078c308057c0dbef5d5ec..27b17ea44f745f230ae9c099bcc5ca5844e4ffcb 100644 (file)
@@ -4154,9 +4154,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
        if (tracing_disabled)
                return -ENODEV;
 
+       if (trace_array_get(tr) < 0)
+               return -ENODEV;
+
        ret = seq_open(file, &show_traces_seq_ops);
-       if (ret)
+       if (ret) {
+               trace_array_put(tr);
                return ret;
+       }
 
        m = file->private_data;
        m->private = tr;
@@ -4164,6 +4169,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
        return 0;
 }
 
+static int show_traces_release(struct inode *inode, struct file *file)
+{
+       struct trace_array *tr = inode->i_private;
+
+       trace_array_put(tr);
+       return seq_release(inode, file);
+}
+
 static ssize_t
 tracing_write_stub(struct file *filp, const char __user *ubuf,
                   size_t count, loff_t *ppos)
@@ -4194,8 +4207,8 @@ static const struct file_operations tracing_fops = {
 static const struct file_operations show_traces_fops = {
        .open           = show_traces_open,
        .read           = seq_read,
-       .release        = seq_release,
        .llseek         = seq_lseek,
+       .release        = show_traces_release,
 };
 
 static ssize_t
index e6945b55c688e9e230c49522b9c43350a356c8af..f5b3bf0e69f615770a0050cbd02da2fe4424e995 100644 (file)
@@ -272,9 +272,11 @@ int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
                goto out;
        }
 
+       mutex_lock(&event_mutex);
        ret = perf_trace_event_init(tp_event, p_event);
        if (ret)
                destroy_local_trace_kprobe(tp_event);
+       mutex_unlock(&event_mutex);
 out:
        kfree(func);
        return ret;
@@ -282,8 +284,10 @@ out:
 
 void perf_kprobe_destroy(struct perf_event *p_event)
 {
+       mutex_lock(&event_mutex);
        perf_trace_event_close(p_event);
        perf_trace_event_unreg(p_event);
+       mutex_unlock(&event_mutex);
 
        destroy_local_trace_kprobe(p_event->tp_event);
 }
index 3f34cfb66a85f2742d368854ab5a4bf666a766f3..bdf104596d122150b3ae9d8aa849dece9fb4e519 100644 (file)
@@ -2526,6 +2526,8 @@ static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
                return NULL;
        }
 
+       alias->var_ref_idx = var_ref->var_ref_idx;
+
        return alias;
 }
 
index 1e6db9cbe4dc518b0f0a482cf1c7e5eeffa0a3c4..8030e24dbf1481bed0452d5cf63114917fbf7ba9 100644 (file)
@@ -150,7 +150,7 @@ void trace_hwlat_callback(bool enter)
                if (enter)
                        nmi_ts_start = time_get();
                else
-                       nmi_total_ts = time_get() - nmi_ts_start;
+                       nmi_total_ts += time_get() - nmi_ts_start;
        }
 
        if (enter)
@@ -256,6 +256,8 @@ static int get_sample(void)
                /* Keep a running maximum ever recorded hardware latency */
                if (sample > tr->max_latency)
                        tr->max_latency = sample;
+               if (outer_sample > tr->max_latency)
+                       tr->max_latency = outer_sample;
        }
 
 out:
index 3dea52f7be9c1d3faa324babd86f1360a5c3be68..46a910acce3f06dd294e1c719c27c26dea20a447 100644 (file)
@@ -570,7 +570,7 @@ config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
        int "Maximum kmemleak early log entries"
        depends on DEBUG_KMEMLEAK
        range 200 40000
-       default 400
+       default 16000
        help
          Kmemleak must track all the memory allocations to avoid
          reporting false positives. Since memory may be allocated or
index feea48fd1a0dd6ae7913b6fad19cc52e20664785..905027574e5d8041d1394dc4e5582061ed084e2f 100644 (file)
@@ -35,7 +35,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
        struct logic_pio_hwaddr *range;
        resource_size_t start;
        resource_size_t end;
-       resource_size_t mmio_sz = 0;
+       resource_size_t mmio_end = 0;
        resource_size_t iio_sz = MMIO_UPPER_LIMIT;
        int ret = 0;
 
@@ -46,7 +46,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
        end = new_range->hw_start + new_range->size;
 
        mutex_lock(&io_range_mutex);
-       list_for_each_entry_rcu(range, &io_range_list, list) {
+       list_for_each_entry(range, &io_range_list, list) {
                if (range->fwnode == new_range->fwnode) {
                        /* range already there */
                        goto end_register;
@@ -56,7 +56,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
                        /* for MMIO ranges we need to check for overlap */
                        if (start >= range->hw_start + range->size ||
                            end < range->hw_start) {
-                               mmio_sz += range->size;
+                               mmio_end = range->io_start + range->size;
                        } else {
                                ret = -EFAULT;
                                goto end_register;
@@ -69,16 +69,16 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
 
        /* range not registered yet, check for available space */
        if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
-               if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) {
+               if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
                        /* if it's too big check if 64K space can be reserved */
-                       if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
+                       if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
                                ret = -E2BIG;
                                goto end_register;
                        }
                        new_range->size = SZ_64K;
                        pr_warn("Requested IO range too big, new size set to 64K\n");
                }
-               new_range->io_start = mmio_sz;
+               new_range->io_start = mmio_end;
        } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
                if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
                        ret = -E2BIG;
@@ -98,6 +98,20 @@ end_register:
        return ret;
 }
 
+/**
+ * logic_pio_unregister_range - unregister a logical PIO range for a host
+ * @range: pointer to the IO range which has been already registered.
+ *
+ * Unregister a previously-registered IO range node.
+ */
+void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
+{
+       mutex_lock(&io_range_mutex);
+       list_del_rcu(&range->list);
+       mutex_unlock(&io_range_mutex);
+       synchronize_rcu();
+}
+
 /**
  * find_io_range_by_fwnode - find logical PIO range for given FW node
  * @fwnode: FW node handle associated with logical PIO range
@@ -108,26 +122,38 @@ end_register:
  */
 struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
 {
-       struct logic_pio_hwaddr *range;
+       struct logic_pio_hwaddr *range, *found_range = NULL;
 
+       rcu_read_lock();
        list_for_each_entry_rcu(range, &io_range_list, list) {
-               if (range->fwnode == fwnode)
-                       return range;
+               if (range->fwnode == fwnode) {
+                       found_range = range;
+                       break;
+               }
        }
-       return NULL;
+       rcu_read_unlock();
+
+       return found_range;
 }
 
 /* Return a registered range given an input PIO token */
 static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
 {
-       struct logic_pio_hwaddr *range;
+       struct logic_pio_hwaddr *range, *found_range = NULL;
 
+       rcu_read_lock();
        list_for_each_entry_rcu(range, &io_range_list, list) {
-               if (in_range(pio, range->io_start, range->size))
-                       return range;
+               if (in_range(pio, range->io_start, range->size)) {
+                       found_range = range;
+                       break;
+               }
        }
-       pr_err("PIO entry token %lx invalid\n", pio);
-       return NULL;
+       rcu_read_unlock();
+
+       if (!found_range)
+               pr_err("PIO entry token 0x%lx invalid\n", pio);
+
+       return found_range;
 }
 
 /**
@@ -180,14 +206,23 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
 {
        struct logic_pio_hwaddr *range;
 
+       rcu_read_lock();
        list_for_each_entry_rcu(range, &io_range_list, list) {
                if (range->flags != LOGIC_PIO_CPU_MMIO)
                        continue;
-               if (in_range(addr, range->hw_start, range->size))
-                       return addr - range->hw_start + range->io_start;
+               if (in_range(addr, range->hw_start, range->size)) {
+                       unsigned long cpuaddr;
+
+                       cpuaddr = addr - range->hw_start + range->io_start;
+
+                       rcu_read_unlock();
+                       return cpuaddr;
+               }
        }
-       pr_err("addr %llx not registered in io_range_list\n",
-              (unsigned long long) addr);
+       rcu_read_unlock();
+
+       pr_err("addr %pa not registered in io_range_list\n", &addr);
+
        return ~0UL;
 }
 
index 1db74eb098d0eb6e8123d07ed4be795a09bf82ce..121beb2f09307d7471aa0ce6238483d3a8d6cb30 100644 (file)
        BUG_ON(pad < 0 || pad >= nn);
 
        /* Does the caller provide the syndrome ? */
-       if (s != NULL)
-               goto decode;
+       if (s != NULL) {
+               for (i = 0; i < nroots; i++) {
+                       /* The syndrome is in index form,
+                        * so nn represents zero
+                        */
+                       if (s[i] != nn)
+                               goto decode;
+               }
+
+               /* syndrome is zero, no errors to correct  */
+               return 0;
+       }
 
        /* form the syndromes; i.e., evaluate data(x) at roots of
         * g(x) */
        if (no_eras > 0) {
                /* Init lambda to be the erasure locator polynomial */
                lambda[1] = alpha_to[rs_modnn(rs,
-                                             prim * (nn - 1 - eras_pos[0]))];
+                                       prim * (nn - 1 - (eras_pos[0] + pad)))];
                for (i = 1; i < no_eras; i++) {
-                       u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i]));
+                       u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad)));
                        for (j = i + 1; j > 0; j--) {
                                tmp = index_of[lambda[j - 1]];
                                if (tmp != nn) {
index 7c6096a7170486449736d82a37fbd50326ac169e..8c3036c37ba0eacdc5989cc49e55bac4ef921f9b 100644 (file)
@@ -652,17 +652,18 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
 {
        if (!miter->__remaining) {
                struct scatterlist *sg;
-               unsigned long pgoffset;
 
                if (!__sg_page_iter_next(&miter->piter))
                        return false;
 
                sg = miter->piter.sg;
-               pgoffset = miter->piter.sg_pgoffset;
 
-               miter->__offset = pgoffset ? 0 : sg->offset;
+               miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
+               miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
+               miter->__offset &= PAGE_SIZE - 1;
                miter->__remaining = sg->offset + sg->length -
-                               (pgoffset << PAGE_SHIFT) - miter->__offset;
+                                    (miter->piter.sg_pgoffset << PAGE_SHIFT) -
+                                    miter->__offset;
                miter->__remaining = min_t(unsigned long, miter->__remaining,
                                           PAGE_SIZE - miter->__offset);
        }
index fd48a15a0710c7c28a0e6fd7b2b7c4ad89a5312d..a74b1aae74618b8a6954d5d531f9a7767a5165b0 100644 (file)
@@ -894,8 +894,11 @@ static int __init test_firmware_init(void)
                return -ENOMEM;
 
        rc = __test_firmware_config_init();
-       if (rc)
+       if (rc) {
+               kfree(test_fw_config);
+               pr_err("could not init firmware test config: %d\n", rc);
                return rc;
+       }
 
        rc = misc_register(&test_fw_misc_device);
        if (rc) {
index fc680562d8b69fc80cf2dbe9fab71d9e65303662..7a4b6f6c5473c7cf43e6f86a21ff795dc2389486 100644 (file)
@@ -486,16 +486,17 @@ static int __init test_overflow_shift(void)
  * Deal with the various forms of allocator arguments. See comments above
  * the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
  */
-#define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL)
-#define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE)
+#define alloc_GFP               (GFP_KERNEL | __GFP_NOWARN)
+#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP)
+#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE)
 #define alloc000(alloc, arg, sz) alloc(sz)
 #define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE)
-#define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL)
+#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP)
 #define free0(free, arg, ptr)   free(ptr)
 #define free1(free, arg, ptr)   free(arg, ptr)
 
-/* Wrap around to 8K */
-#define TEST_SIZE              (9 << PAGE_SHIFT)
+/* Wrap around to 16K */
+#define TEST_SIZE              (5 * 4096)
 
 #define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
 static int __init test_ ## func (void *arg)                            \
index 0fcdb82dca8667604a0a0756803b7c7914fc3aa1..98a787e7a1fd6426cebd738e0e766c0dc8f502bc 100644 (file)
@@ -35,7 +35,7 @@ static __init int memset16_selftest(void)
 fail:
        kfree(p);
        if (i < 256)
-               return (i << 24) | (j << 16) | k;
+               return (i << 24) | (j << 16) | k | 0x8000;
        return 0;
 }
 
@@ -71,7 +71,7 @@ static __init int memset32_selftest(void)
 fail:
        kfree(p);
        if (i < 256)
-               return (i << 24) | (j << 16) | k;
+               return (i << 24) | (j << 16) | k | 0x8000;
        return 0;
 }
 
@@ -107,7 +107,7 @@ static __init int memset64_selftest(void)
 fail:
        kfree(p);
        if (i < 256)
-               return (i << 24) | (j << 16) | k;
+               return (i << 24) | (j << 16) | k | 0x8000;
        return 0;
 }
 
index 5939549c0e7bc19b6e4afb7ccf775e2c64b91922..9135c29add6243eee12d508bb67e3bd84f18b76d 100644 (file)
@@ -93,9 +93,9 @@
  *       goto errout;
  *   }
  *
- *   pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
+ *   pos = textsearch_find_continuous(conf, &state, example, strlen(example));
  *   if (pos != UINT_MAX)
- *       panic("Oh my god, dancing chickens at \%d\n", pos);
+ *       panic("Oh my god, dancing chickens at %d\n", pos);
  *
  *   textsearch_destroy(conf);
  */
index 476dfe13a701f76d4e2d29c30a6ce0e944e12bb8..4c2864270a39b0239f7a38d025763ba7d988a2ee 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -282,6 +282,12 @@ int __init cma_declare_contiguous(phys_addr_t base,
         */
        alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
                          max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
+       if (fixed && base & (alignment - 1)) {
+               ret = -EINVAL;
+               pr_err("Region at %pa must be aligned to %pa bytes\n",
+                       &base, &alignment);
+               goto err;
+       }
        base = ALIGN(base, alignment);
        size = ALIGN(size, alignment);
        limit &= ~(alignment - 1);
@@ -312,6 +318,13 @@ int __init cma_declare_contiguous(phys_addr_t base,
        if (limit == 0 || limit > memblock_end)
                limit = memblock_end;
 
+       if (base + size > limit) {
+               ret = -EINVAL;
+               pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
+                       &size, &base, &limit);
+               goto err;
+       }
+
        /* Reserve memory */
        if (fixed) {
                if (memblock_is_region_reserved(base, size) ||
index faca45ebe62dfbb58099661158334ab9ce0155f8..5079ddbec8f9e2fd23344019f01fcdca6f9a3968 100644 (file)
@@ -1540,6 +1540,17 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
        unsigned long end_pfn = zone_end_pfn(zone);
        const bool sync = cc->mode != MIGRATE_ASYNC;
 
+       /*
+        * These counters track activities during zone compaction.  Initialize
+        * them before compacting a new zone.
+        */
+       cc->total_migrate_scanned = 0;
+       cc->total_free_scanned = 0;
+       cc->nr_migratepages = 0;
+       cc->nr_freepages = 0;
+       INIT_LIST_HEAD(&cc->freepages);
+       INIT_LIST_HEAD(&cc->migratepages);
+
        cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
        ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
                                                        cc->classzone_idx);
@@ -1703,10 +1714,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
 {
        enum compact_result ret;
        struct compact_control cc = {
-               .nr_freepages = 0,
-               .nr_migratepages = 0,
-               .total_migrate_scanned = 0,
-               .total_free_scanned = 0,
                .order = order,
                .gfp_mask = gfp_mask,
                .zone = zone,
@@ -1719,8 +1726,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
                .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
                .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
        };
-       INIT_LIST_HEAD(&cc.freepages);
-       INIT_LIST_HEAD(&cc.migratepages);
 
        ret = compact_zone(zone, &cc);
 
@@ -1819,8 +1824,6 @@ static void compact_node(int nid)
        struct zone *zone;
        struct compact_control cc = {
                .order = -1,
-               .total_migrate_scanned = 0,
-               .total_free_scanned = 0,
                .mode = MIGRATE_SYNC,
                .ignore_skip_hint = true,
                .whole_zone = true,
@@ -1834,11 +1837,7 @@ static void compact_node(int nid)
                if (!populated_zone(zone))
                        continue;
 
-               cc.nr_freepages = 0;
-               cc.nr_migratepages = 0;
                cc.zone = zone;
-               INIT_LIST_HEAD(&cc.freepages);
-               INIT_LIST_HEAD(&cc.migratepages);
 
                compact_zone(zone, &cc);
 
@@ -1947,8 +1946,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
        struct zone *zone;
        struct compact_control cc = {
                .order = pgdat->kcompactd_max_order,
-               .total_migrate_scanned = 0,
-               .total_free_scanned = 0,
                .classzone_idx = pgdat->kcompactd_classzone_idx,
                .mode = MIGRATE_SYNC_LIGHT,
                .ignore_skip_hint = false,
@@ -1972,16 +1969,10 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                                                        COMPACT_CONTINUE)
                        continue;
 
-               cc.nr_freepages = 0;
-               cc.nr_migratepages = 0;
-               cc.total_migrate_scanned = 0;
-               cc.total_free_scanned = 0;
-               cc.zone = zone;
-               INIT_LIST_HEAD(&cc.freepages);
-               INIT_LIST_HEAD(&cc.migratepages);
-
                if (kthread_should_stop())
                        return;
+
+               cc.zone = zone;
                status = compact_zone(zone, &cc);
 
                if (status == COMPACT_SUCCESS) {
index 52517f28e6f4a69020cfc60867f5a57461c89fb8..287f3fa02e5ee84bcd86847c575d3180da94ac8a 100644 (file)
@@ -561,6 +561,28 @@ int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
 }
 EXPORT_SYMBOL(filemap_fdatawait_range);
 
+/**
+ * filemap_fdatawait_range_keep_errors - wait for writeback to complete
+ * @mapping:           address space structure to wait for
+ * @start_byte:                offset in bytes where the range starts
+ * @end_byte:          offset in bytes where the range ends (inclusive)
+ *
+ * Walk the list of under-writeback pages of the given address space in the
+ * given range and wait for all of them.  Unlike filemap_fdatawait_range(),
+ * this function does not clear error status of the address space.
+ *
+ * Use this function if callers don't handle errors themselves.  Expected
+ * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
+ * fsfreeze(8)
+ */
+int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
+               loff_t start_byte, loff_t end_byte)
+{
+       __filemap_fdatawait_range(mapping, start_byte, end_byte);
+       return filemap_check_and_keep_errors(mapping);
+}
+EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
+
 /**
  * file_fdatawait_range - wait for writeback to complete
  * @file:              file pointing to address space structure to wait for
index caadd31714a544411a915babd954867453576f2b..f3088d25bd926278c439726359567dbd95e3c861 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -458,11 +458,14 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
                pgd = pgd_offset_k(address);
        else
                pgd = pgd_offset_gate(mm, address);
-       BUG_ON(pgd_none(*pgd));
+       if (pgd_none(*pgd))
+               return -EFAULT;
        p4d = p4d_offset(pgd, address);
-       BUG_ON(p4d_none(*p4d));
+       if (p4d_none(*p4d))
+               return -EFAULT;
        pud = pud_offset(p4d, address);
-       BUG_ON(pud_none(*pud));
+       if (pud_none(*pud))
+               return -EFAULT;
        pmd = pmd_offset(pud, address);
        if (!pmd_present(*pmd))
                return -EFAULT;
@@ -1367,7 +1370,8 @@ static inline pte_t gup_get_pte(pte_t *ptep)
 }
 #endif
 
-static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
+static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
+                                           struct page **pages)
 {
        while ((*nr) - nr_start) {
                struct page *page = pages[--(*nr)];
index 6fad1864ba03bc1f0dd6be5f77107d288d490934..09ce8528bbdd9012420e93de87c28c7cd547c5de 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/page_idle.h>
 #include <linux/shmem_fs.h>
 #include <linux/oom.h>
+#include <linux/page_owner.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -2477,6 +2478,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
        }
 
        ClearPageCompound(head);
+
+       split_page_owner(head, HPAGE_PMD_ORDER);
+
        /* See comment in __split_huge_page_tail() */
        if (PageAnon(head)) {
                /* Additional pin to radix tree of swap cache */
index 57053affaad20d053d615b282dcd41fdbf14c742..6f4ce9547658dd7cb1a136304e4f07e9da4280b0 100644 (file)
@@ -1073,11 +1073,10 @@ static bool pfn_range_valid_gigantic(struct zone *z,
        struct page *page;
 
        for (i = start_pfn; i < end_pfn; i++) {
-               if (!pfn_valid(i))
+               page = pfn_to_online_page(i);
+               if (!page)
                        return false;
 
-               page = pfn_to_page(i);
-
                if (page_zone(page) != z)
                        return false;
 
index 72e3fb3bb03775490be389a40c3d2fa343229f4c..5eeabece0c17899e66850157062c8d925c9425ce 100644 (file)
 /* GFP bitmask for kmemleak internal allocations */
 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
                                 __GFP_NORETRY | __GFP_NOMEMALLOC | \
-                                __GFP_NOWARN | __GFP_NOFAIL)
+                                __GFP_NOWARN)
 
 /* scanning area inside a memory block */
 struct kmemleak_scan_area {
@@ -576,7 +576,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
        if (in_irq()) {
                object->pid = 0;
                strncpy(object->comm, "hardirq", sizeof(object->comm));
-       } else if (in_softirq()) {
+       } else if (in_serving_softirq()) {
                object->pid = 0;
                strncpy(object->comm, "softirq", sizeof(object->comm));
        } else {
index 7e7cc0cd89fe84fe8567292f17c9634359898fe6..65da189a433b78ba6fb77582b1d5f666b4fc0891 100644 (file)
@@ -1037,26 +1037,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
                css_put(&prev->css);
 }
 
-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
+                                       struct mem_cgroup *dead_memcg)
 {
-       struct mem_cgroup *memcg = dead_memcg;
        struct mem_cgroup_reclaim_iter *iter;
        struct mem_cgroup_per_node *mz;
        int nid;
        int i;
 
-       for (; memcg; memcg = parent_mem_cgroup(memcg)) {
-               for_each_node(nid) {
-                       mz = mem_cgroup_nodeinfo(memcg, nid);
-                       for (i = 0; i <= DEF_PRIORITY; i++) {
-                               iter = &mz->iter[i];
-                               cmpxchg(&iter->position,
-                                       dead_memcg, NULL);
-                       }
+       for_each_node(nid) {
+               mz = mem_cgroup_nodeinfo(from, nid);
+               for (i = 0; i <= DEF_PRIORITY; i++) {
+                       iter = &mz->iter[i];
+                       cmpxchg(&iter->position,
+                               dead_memcg, NULL);
                }
        }
 }
 
+static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+{
+       struct mem_cgroup *memcg = dead_memcg;
+       struct mem_cgroup *last;
+
+       do {
+               __invalidate_reclaim_iterators(memcg, dead_memcg);
+               last = memcg;
+       } while ((memcg = parent_mem_cgroup(memcg)));
+
+       /*
+        * When cgruop1 non-hierarchy mode is used,
+        * parent_mem_cgroup() does not walk all the way up to the
+        * cgroup root (root_mem_cgroup). So we have to handle
+        * dead_memcg from cgroup root separately.
+        */
+       if (last != root_mem_cgroup)
+               __invalidate_reclaim_iterators(root_mem_cgroup,
+                                               dead_memcg);
+}
+
 /**
  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
  * @memcg: hierarchy root
@@ -2618,6 +2637,16 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
 
        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
            !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
+
+               /*
+                * Enforce __GFP_NOFAIL allocation because callers are not
+                * prepared to see failures and likely do not have any failure
+                * handling code.
+                */
+               if (gfp & __GFP_NOFAIL) {
+                       page_counter_charge(&memcg->kmem, nr_pages);
+                       return 0;
+               }
                cancel_charge(memcg, nr_pages);
                return -ENOMEM;
        }
index 2bb5e257080e98a48dc284edcfc82accab949ecc..5859705dafe19f97a9f866ca308df05d2d2f2331 100644 (file)
@@ -34,11 +34,12 @@ static void memfd_tag_pins(struct address_space *mapping)
        void __rcu **slot;
        pgoff_t start;
        struct page *page;
+       unsigned int tagged = 0;
 
        lru_add_drain();
        start = 0;
-       rcu_read_lock();
 
+       xa_lock_irq(&mapping->i_pages);
        radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
                page = radix_tree_deref_slot(slot);
                if (!page || radix_tree_exception(page)) {
@@ -47,18 +48,19 @@ static void memfd_tag_pins(struct address_space *mapping)
                                continue;
                        }
                } else if (page_count(page) - page_mapcount(page) > 1) {
-                       xa_lock_irq(&mapping->i_pages);
                        radix_tree_tag_set(&mapping->i_pages, iter.index,
                                           MEMFD_TAG_PINNED);
-                       xa_unlock_irq(&mapping->i_pages);
                }
 
-               if (need_resched()) {
-                       slot = radix_tree_iter_resume(slot, &iter);
-                       cond_resched_rcu();
-               }
+               if (++tagged % 1024)
+                       continue;
+
+               slot = radix_tree_iter_resume(slot, &iter);
+               xa_unlock_irq(&mapping->i_pages);
+               cond_resched();
+               xa_lock_irq(&mapping->i_pages);
        }
-       rcu_read_unlock();
+       xa_unlock_irq(&mapping->i_pages);
 }
 
 /*
index 2994ceb2e7b0428c902bd2172012171738cbdf97..148fdd929a195fd99e58b62edea0e7d1f06f86bd 100644 (file)
@@ -202,7 +202,6 @@ struct to_kill {
        struct task_struct *tsk;
        unsigned long addr;
        short size_shift;
-       char addr_valid;
 };
 
 /*
@@ -327,22 +326,27 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
                }
        }
        tk->addr = page_address_in_vma(p, vma);
-       tk->addr_valid = 1;
        if (is_zone_device_page(p))
                tk->size_shift = dev_pagemap_mapping_shift(p, vma);
        else
                tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
 
        /*
-        * In theory we don't have to kill when the page was
-        * munmaped. But it could be also a mremap. Since that's
-        * likely very rare kill anyways just out of paranoia, but use
-        * a SIGKILL because the error is not contained anymore.
+        * Send SIGKILL if "tk->addr == -EFAULT". Also, as
+        * "tk->size_shift" is always non-zero for !is_zone_device_page(),
+        * so "tk->size_shift == 0" effectively checks no mapping on
+        * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
+        * to a process' address space, it's possible not all N VMAs
+        * contain mappings for the page, but at least one VMA does.
+        * Only deliver SIGBUS with payload derived from the VMA that
+        * has a mapping for the page.
         */
-       if (tk->addr == -EFAULT || tk->size_shift == 0) {
+       if (tk->addr == -EFAULT) {
                pr_info("Memory failure: Unable to find user space address %lx in %s\n",
                        page_to_pfn(p), tsk->comm);
-               tk->addr_valid = 0;
+       } else if (tk->size_shift == 0) {
+               kfree(tk);
+               return;
        }
        get_task_struct(tsk);
        tk->tsk = tsk;
@@ -369,7 +373,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
                         * make sure the process doesn't catch the
                         * signal and then access the memory. Just kill it.
                         */
-                       if (fail || tk->addr_valid == 0) {
+                       if (fail || tk->addr == -EFAULT) {
                                pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
                                       pfn, tk->tsk->comm, tk->tsk->pid);
                                do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
@@ -1258,17 +1262,19 @@ int memory_failure(unsigned long pfn, int flags)
        if (!sysctl_memory_failure_recovery)
                panic("Memory failure on page %lx", pfn);
 
-       if (!pfn_valid(pfn)) {
+       p = pfn_to_online_page(pfn);
+       if (!p) {
+               if (pfn_valid(pfn)) {
+                       pgmap = get_dev_pagemap(pfn, NULL);
+                       if (pgmap)
+                               return memory_failure_dev_pagemap(pfn, flags,
+                                                                 pgmap);
+               }
                pr_err("Memory failure: %#lx: memory outside kernel control\n",
                        pfn);
                return -ENXIO;
        }
 
-       pgmap = get_dev_pagemap(pfn, NULL);
-       if (pgmap)
-               return memory_failure_dev_pagemap(pfn, flags, pgmap);
-
-       p = pfn_to_page(pfn);
        if (PageHuge(p))
                return memory_failure_hugetlb(pfn, flags);
        if (TestSetPageHWPoison(p)) {
index e0010cb870e050ff40f21ef55d2b9a5651910781..fb5655b518c99cd38cfbc6302ec9ec8220d868a7 100644 (file)
@@ -4491,7 +4491,9 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
        void *old_buf = buf;
        int write = gup_flags & FOLL_WRITE;
 
-       down_read(&mm->mmap_sem);
+       if (down_read_killable(&mm->mmap_sem))
+               return 0;
+
        /* ignore errors, just check how much was successfully transferred */
        while (len) {
                int bytes, ret, offset;
index 62f945ea3e362f109fb0dce4221c73618e33d681..70298b635b59349d816e8a29503d63490e0c1108 100644 (file)
@@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
        },
 };
 
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
                                unsigned long flags);
 
 struct queue_pages {
@@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page,
 }
 
 /*
- * queue_pages_pmd() has three possible return values:
- * 1 - pages are placed on the right node or queued successfully.
- * 0 - THP was split.
- * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
- *        page was already on a node that does not follow the policy.
+ * queue_pages_pmd() has four possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * 2 - THP was split.
+ * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
+ *        existing page was already on a node that does not follow the
+ *        policy.
  */
 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
                                unsigned long end, struct mm_walk *walk)
@@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
        if (is_huge_zero_page(page)) {
                spin_unlock(ptl);
                __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
+               ret = 2;
                goto out;
        }
-       if (!queue_pages_required(page, qp)) {
-               ret = 1;
+       if (!queue_pages_required(page, qp))
                goto unlock;
-       }
 
-       ret = 1;
        flags = qp->flags;
        /* go to thp migration */
        if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
-               if (!vma_migratable(walk->vma)) {
-                       ret = -EIO;
+               if (!vma_migratable(walk->vma) ||
+                   migrate_page_add(page, qp->pagelist, flags)) {
+                       ret = 1;
                        goto unlock;
                }
-
-               migrate_page_add(page, qp->pagelist, flags);
        } else
                ret = -EIO;
 unlock:
@@ -479,6 +479,13 @@ out:
 /*
  * Scan through pages checking if pages follow certain conditions,
  * and move them to the pagelist if they do.
+ *
+ * queue_pages_pte_range() has three possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
+ *        on a node that does not follow the policy.
  */
 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
                        unsigned long end, struct mm_walk *walk)
@@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
        struct queue_pages *qp = walk->private;
        unsigned long flags = qp->flags;
        int ret;
+       bool has_unmovable = false;
        pte_t *pte;
        spinlock_t *ptl;
 
        ptl = pmd_trans_huge_lock(pmd, vma);
        if (ptl) {
                ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
-               if (ret > 0)
-                       return 0;
-               else if (ret < 0)
+               if (ret != 2)
                        return ret;
        }
+       /* THP was split, fall through to pte walk */
 
        if (pmd_trans_unstable(pmd))
                return 0;
@@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
                if (!queue_pages_required(page, qp))
                        continue;
                if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
-                       if (!vma_migratable(vma))
+                       /* MPOL_MF_STRICT must be specified if we get here */
+                       if (!vma_migratable(vma)) {
+                               has_unmovable = true;
                                break;
-                       migrate_page_add(page, qp->pagelist, flags);
+                       }
+
+                       /*
+                        * Do not abort immediately since there may be
+                        * temporary off LRU pages in the range.  Still
+                        * need migrate other LRU pages.
+                        */
+                       if (migrate_page_add(page, qp->pagelist, flags))
+                               has_unmovable = true;
                } else
                        break;
        }
        pte_unmap_unlock(pte - 1, ptl);
        cond_resched();
+
+       if (has_unmovable)
+               return 1;
+
        return addr != end ? -EIO : 0;
 }
 
@@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
  *
  * If pages found in a given range are on a set of nodes (determined by
  * @nodes and @flags,) it's isolated and queued to the pagelist which is
- * passed via @private.)
+ * passed via @private.
+ *
+ * queue_pages_range() has three possible return values:
+ * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * 0 - queue pages successfully or no misplaced page.
+ * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
  */
 static int
 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -926,7 +953,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 /*
  * page migration, thp tail pages can be passed.
  */
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
                                unsigned long flags)
 {
        struct page *head = compound_head(page);
@@ -939,8 +966,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
                        mod_node_page_state(page_pgdat(head),
                                NR_ISOLATED_ANON + page_is_file_cache(head),
                                hpage_nr_pages(head));
+               } else if (flags & MPOL_MF_STRICT) {
+                       /*
+                        * Non-movable page may reach here.  And, there may be
+                        * temporary off LRU pages or non-LRU movable pages.
+                        * Treat them as unmovable pages since they can't be
+                        * isolated, so they can't be moved at the moment.  It
+                        * should return -EIO for this case too.
+                        */
+                       return -EIO;
                }
        }
+
+       return 0;
 }
 
 /* page allocation callback for NUMA node migration */
@@ -1143,9 +1181,10 @@ static struct page *new_page(struct page *page, unsigned long start)
 }
 #else
 
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
                                unsigned long flags)
 {
+       return -EIO;
 }
 
 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
@@ -1168,6 +1207,7 @@ static long do_mbind(unsigned long start, unsigned long len,
        struct mempolicy *new;
        unsigned long end;
        int err;
+       int ret;
        LIST_HEAD(pagelist);
 
        if (flags & ~(unsigned long)MPOL_MF_VALID)
@@ -1229,10 +1269,15 @@ static long do_mbind(unsigned long start, unsigned long len,
        if (err)
                goto mpol_out;
 
-       err = queue_pages_range(mm, start, end, nmask,
+       ret = queue_pages_range(mm, start, end, nmask,
                          flags | MPOL_MF_INVERT, &pagelist);
-       if (!err)
-               err = mbind_range(mm, start, end, new);
+
+       if (ret < 0) {
+               err = -EIO;
+               goto up_out;
+       }
+
+       err = mbind_range(mm, start, end, new);
 
        if (!err) {
                int nr_failed = 0;
@@ -1245,13 +1290,14 @@ static long do_mbind(unsigned long start, unsigned long len,
                                putback_movable_pages(&pagelist);
                }
 
-               if (nr_failed && (flags & MPOL_MF_STRICT))
+               if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
                        err = -EIO;
        } else
                putback_movable_pages(&pagelist);
 
+up_out:
        up_write(&mm->mmap_sem);
- mpol_out:
+mpol_out:
        mpol_put(new);
        return err;
 }
index b2ea7d1e6f248b585fea8af61a6c4bb015e7fe7f..0c48191a90368594949353659d5e3307eb0e6414 100644 (file)
@@ -2328,16 +2328,13 @@ next:
  */
 static void migrate_vma_collect(struct migrate_vma *migrate)
 {
-       struct mm_walk mm_walk;
-
-       mm_walk.pmd_entry = migrate_vma_collect_pmd;
-       mm_walk.pte_entry = NULL;
-       mm_walk.pte_hole = migrate_vma_collect_hole;
-       mm_walk.hugetlb_entry = NULL;
-       mm_walk.test_walk = NULL;
-       mm_walk.vma = migrate->vma;
-       mm_walk.mm = migrate->vma->vm_mm;
-       mm_walk.private = migrate;
+       struct mm_walk mm_walk = {
+               .pmd_entry = migrate_vma_collect_pmd,
+               .pte_hole = migrate_vma_collect_hole,
+               .vma = migrate->vma,
+               .mm = migrate->vma->vm_mm,
+               .private = migrate,
+       };
 
        mmu_notifier_invalidate_range_start(mm_walk.mm,
                                            migrate->start,
index 82bb1a939c0e496affc2849c1e1af795fe3d5d96..06dedb1755727fe0bf36ed0be0fb3cca9dbee3ea 100644 (file)
@@ -316,7 +316,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
         * thanks to mm_take_all_locks().
         */
        spin_lock(&mm->mmu_notifier_mm->lock);
-       hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
+       hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
        spin_unlock(&mm->mmu_notifier_mm->lock);
 
        mm_drop_all_locks(mm);
index e4aac33216aec0f05022f17578b6dee03e25a4e4..1d63ecfc98c5d874ed7bb56b7a9c5ab1dac26d71 100644 (file)
@@ -1779,7 +1779,8 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
        struct vm_area_struct *vma;
        int write = gup_flags & FOLL_WRITE;
 
-       down_read(&mm->mmap_sem);
+       if (down_read_killable(&mm->mmap_sem))
+               return 0;
 
        /* the access must start within one of the target process's mappings */
        vma = find_vma(mm, addr);
index dbddb7a409dd8a314725657832dd766ee1fce96e..a581fe2a2f1fe2736022d7855ef8b90ba2c7fd3f 100644 (file)
@@ -1089,9 +1089,10 @@ bool out_of_memory(struct oom_control *oc)
         * The OOM killer does not compensate for IO-less reclaim.
         * pagefault_out_of_memory lost its gfp context so we have to
         * make sure exclude 0 mask - all other users should have at least
-        * ___GFP_DIRECT_RECLAIM to get here.
+        * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
+        * invoke the OOM killer even if it is a GFP_NOFS allocation.
         */
-       if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
+       if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
                return true;
 
        /*
index d80adfe702d3b85f754589135f3695b4e3394c72..63b1053f5b41fbd4afc870c911a8fc696d6b7d16 100644 (file)
@@ -273,7 +273,8 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
         * not matter as the mixed block count will still be correct
         */
        for (; pfn < end_pfn; ) {
-               if (!pfn_valid(pfn)) {
+               page = pfn_to_online_page(pfn);
+               if (!page) {
                        pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
                        continue;
                }
@@ -281,13 +282,13 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
                block_end_pfn = min(block_end_pfn, end_pfn);
 
-               page = pfn_to_page(pfn);
                pageblock_mt = get_pageblock_migratetype(page);
 
                for (; pfn < block_end_pfn; pfn++) {
                        if (!pfn_valid_within(pfn))
                                continue;
 
+                       /* The pageblock is online, no need to recheck. */
                        page = pfn_to_page(pfn);
 
                        if (page_zone(page) != zone)
index f048c2651954b672a5c16dfa85aef544bcdb4c0d..1bd94ea62f7f1fcbb33949a78a31f8555c4f27fd 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1467,7 +1467,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        /*
                         * No need to invalidate here it will synchronize on
                         * against the special swap migration pte.
+                        *
+                        * The assignment to subpage above was computed from a
+                        * swap PTE which results in an invalid pointer.
+                        * Since only PAGE_SIZE pages can currently be
+                        * migrated, just set it to page. This will need to be
+                        * changed when hugepage migrations to device private
+                        * memory are supported.
                         */
+                       subpage = page;
                        goto discard;
                }
 
index 09c0e24a06d81cb94488503195412d28ee65e039..9c3937c5ce384d93868bf70877fe5b3afb8b1719 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4797,7 +4797,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                }
        }
 
-       get_online_mems();
+       /*
+        * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
+        * already held which will conflict with an existing lock order:
+        *
+        * mem_hotplug_lock->slab_mutex->kernfs_mutex
+        *
+        * We don't really need mem_hotplug_lock (to hold off
+        * slab_mem_going_offline_callback) here because slab's memory hot
+        * unplug code doesn't destroy the kmem_cache->node[] data.
+        */
+
 #ifdef CONFIG_SLUB_DEBUG
        if (flags & SO_ALL) {
                struct kmem_cache_node *n;
@@ -4838,7 +4848,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                        x += sprintf(buf + x, " N%d=%lu",
                                        node, nodes[node]);
 #endif
-       put_online_mems();
        kfree(nodes);
        return x + sprintf(buf + x, "\n");
 }
index a3fc028e338e5362f7f9bdea5433d287f1e101e2..45fdbfb6b2a608857165d6efc625c01b4ab92438 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -740,15 +740,20 @@ void release_pages(struct page **pages, int nr)
                if (is_huge_zero_page(page))
                        continue;
 
-               /* Device public page can not be huge page */
-               if (is_device_public_page(page)) {
+               if (is_zone_device_page(page)) {
                        if (locked_pgdat) {
                                spin_unlock_irqrestore(&locked_pgdat->lru_lock,
                                                       flags);
                                locked_pgdat = NULL;
                        }
-                       put_devmap_managed_page(page);
-                       continue;
+                       /*
+                        * ZONE_DEVICE pages that return 'false' from
+                        * put_devmap_managed_page() do not require special
+                        * processing, and instead, expect a call to
+                        * put_page_testzero().
+                        */
+                       if (put_devmap_managed_page(page))
+                               continue;
                }
 
                page = compound_head(page);
index 14faadcedd06cb01611a4c8b9897fecc3109af87..e81d11715d95b34d1e99e57dc0fd398e9a9e0cbe 100644 (file)
@@ -15,6 +15,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/sched/task.h>
@@ -151,7 +152,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
                                       bool to_user)
 {
        /* Reject if object wraps past end of memory. */
-       if (ptr + n < ptr)
+       if (ptr + (n - 1) < ptr)
                usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
 
        /* Reject if NULL or ZERO-allocation. */
@@ -231,7 +232,12 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
        if (!virt_addr_valid(ptr))
                return;
 
-       page = virt_to_head_page(ptr);
+       /*
+        * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
+        * highmem page or fallback to virt_to_page(). The following
+        * is effectively a highmem-aware virt_to_head_page().
+        */
+       page = compound_head(kmap_to_page((void *)ptr));
 
        if (PageSlab(page)) {
                /* Check slab allocator for flags and size. */
index a46ec261a44e8531088e16877c323342886e00f0..d8e877365f9f524d9149c9edb4bca38e0332683f 100644 (file)
@@ -1751,6 +1751,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
        if (!addr)
                return NULL;
 
+       /*
+        * First make sure the mappings are removed from all page-tables
+        * before they are freed.
+        */
+       vmalloc_sync_all();
+
        /*
         * In this function, newly allocated vm_struct has VM_UNINITIALIZED
         * flag. It means that vm_struct is not fully initialized.
@@ -2296,6 +2302,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
 /*
  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
  * have one.
+ *
+ * The purpose of this function is to make sure the vmalloc area
+ * mappings are identical in all page-tables in the system.
  */
 void __weak vmalloc_sync_all(void)
 {
index 4854584ec436ef06601e3f4146666a9ef6e426dd..39021133cec8604700a30f72f0e63cdce02756f0 100644 (file)
@@ -358,6 +358,9 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
  * "hierarchy" or "local").
  *
  * To be used as memcg event method.
+ *
+ * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
+ * not be parsed.
  */
 int vmpressure_register_event(struct mem_cgroup *memcg,
                              struct eventfd_ctx *eventfd, const char *args)
@@ -365,7 +368,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
        struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
        struct vmpressure_event *ev;
        enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH;
-       enum vmpressure_levels level = -1;
+       enum vmpressure_levels level;
        char *spec, *spec_orig;
        char *token;
        int ret = 0;
@@ -378,20 +381,18 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
 
        /* Find required level */
        token = strsep(&spec, ",");
-       level = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
-       if (level < 0) {
-               ret = level;
+       ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
+       if (ret < 0)
                goto out;
-       }
+       level = ret;
 
        /* Find optional mode */
        token = strsep(&spec, ",");
        if (token) {
-               mode = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
-               if (mode < 0) {
-                       ret = mode;
+               ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
+               if (ret < 0)
                        goto out;
-               }
+               mode = ret;
        }
 
        ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -407,6 +408,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
        mutex_lock(&vmpr->events_lock);
        list_add(&ev->node, &vmpr->events);
        mutex_unlock(&vmpr->events_lock);
+       ret = 0;
 out:
        kfree(spec_orig);
        return ret;
index e42f44cf7b435dd04d932adef3bca6449aa01ed4..b37610c0eac626158bae78a2b2c202d3dfbe793d 100644 (file)
@@ -670,7 +670,14 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
        unsigned long ret, freed = 0;
        struct shrinker *shrinker;
 
-       if (!mem_cgroup_is_root(memcg))
+       /*
+        * The root memcg might be allocated even though memcg is disabled
+        * via "cgroup_disable=memory" boot parameter.  This could make
+        * mem_cgroup_is_root() return false, then just run memcg slab
+        * shrink, but skip global shrink.  This may result in premature
+        * oom.
+        */
+       if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
                return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
 
        if (!down_read_trylock(&shrinker_rwsem))
@@ -2190,7 +2197,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
  *   10TB     320        32GB
  */
 static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
-                                struct scan_control *sc, bool actual_reclaim)
+                                struct scan_control *sc, bool trace)
 {
        enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
@@ -2216,7 +2223,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
         * rid of the stale workingset quickly.
         */
        refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
-       if (file && actual_reclaim && lruvec->refaults != refaults) {
+       if (file && lruvec->refaults != refaults) {
                inactive_ratio = 0;
        } else {
                gb = (inactive + active) >> (30 - PAGE_SHIFT);
@@ -2226,7 +2233,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
                        inactive_ratio = 1;
        }
 
-       if (actual_reclaim)
+       if (trace)
                trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx,
                        lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive,
                        lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active,
index 9da65552e7ca72b89397a38c2b7347e3bdfbf030..4b9063d12b932dccb58a1ce6409b8b7b3795fcf2 100644 (file)
@@ -53,6 +53,7 @@
 #include <linux/zpool.h>
 #include <linux/mount.h>
 #include <linux/migrate.h>
+#include <linux/wait.h>
 #include <linux/pagemap.h>
 #include <linux/fs.h>
 
@@ -267,6 +268,10 @@ struct zs_pool {
 #ifdef CONFIG_COMPACTION
        struct inode *inode;
        struct work_struct free_work;
+       /* A wait queue for when migration races with async_free_zspage() */
+       struct wait_queue_head migration_wait;
+       atomic_long_t isolated_pages;
+       bool destroying;
 #endif
 };
 
@@ -1882,6 +1887,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
        zspage->isolated--;
 }
 
+static void putback_zspage_deferred(struct zs_pool *pool,
+                                   struct size_class *class,
+                                   struct zspage *zspage)
+{
+       enum fullness_group fg;
+
+       fg = putback_zspage(class, zspage);
+       if (fg == ZS_EMPTY)
+               schedule_work(&pool->free_work);
+
+}
+
+static inline void zs_pool_dec_isolated(struct zs_pool *pool)
+{
+       VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
+       atomic_long_dec(&pool->isolated_pages);
+       /*
+        * There's no possibility of racing, since wait_for_isolated_drain()
+        * checks the isolated count under &class->lock after enqueuing
+        * on migration_wait.
+        */
+       if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
+               wake_up_all(&pool->migration_wait);
+}
+
 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
                                struct page *newpage, struct page *oldpage)
 {
@@ -1951,6 +1981,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
         */
        if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
                get_zspage_mapping(zspage, &class_idx, &fullness);
+               atomic_long_inc(&pool->isolated_pages);
                remove_zspage(class, zspage, fullness);
        }
 
@@ -2050,8 +2081,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
         * Page migration is done so let's putback isolated zspage to
         * the list if @page is final isolated subpage in the zspage.
         */
-       if (!is_zspage_isolated(zspage))
-               putback_zspage(class, zspage);
+       if (!is_zspage_isolated(zspage)) {
+               /*
+                * We cannot race with zs_destroy_pool() here because we wait
+                * for isolation to hit zero before we start destroying.
+                * Also, we ensure that everyone can see pool->destroying before
+                * we start waiting.
+                */
+               putback_zspage_deferred(pool, class, zspage);
+               zs_pool_dec_isolated(pool);
+       }
 
        reset_page(page);
        put_page(page);
@@ -2097,13 +2136,12 @@ static void zs_page_putback(struct page *page)
        spin_lock(&class->lock);
        dec_zspage_isolation(zspage);
        if (!is_zspage_isolated(zspage)) {
-               fg = putback_zspage(class, zspage);
                /*
                 * Due to page_lock, we cannot free zspage immediately
                 * so let's defer.
                 */
-               if (fg == ZS_EMPTY)
-                       schedule_work(&pool->free_work);
+               putback_zspage_deferred(pool, class, zspage);
+               zs_pool_dec_isolated(pool);
        }
        spin_unlock(&class->lock);
 }
@@ -2127,8 +2165,36 @@ static int zs_register_migration(struct zs_pool *pool)
        return 0;
 }
 
+static bool pool_isolated_are_drained(struct zs_pool *pool)
+{
+       return atomic_long_read(&pool->isolated_pages) == 0;
+}
+
+/* Function for resolving migration */
+static void wait_for_isolated_drain(struct zs_pool *pool)
+{
+
+       /*
+        * We're in the process of destroying the pool, so there are no
+        * active allocations. zs_page_isolate() fails for completely free
+        * zspages, so we need only wait for the zs_pool's isolated
+        * count to hit zero.
+        */
+       wait_event(pool->migration_wait,
+                  pool_isolated_are_drained(pool));
+}
+
 static void zs_unregister_migration(struct zs_pool *pool)
 {
+       pool->destroying = true;
+       /*
+        * We need a memory barrier here to ensure global visibility of
+        * pool->destroying. Thus pool->isolated pages will either be 0 in which
+        * case we don't care, or it will be > 0 and pool->destroying will
+        * ensure that we wake up once isolation hits 0.
+        */
+       smp_mb();
+       wait_for_isolated_drain(pool); /* This can block */
        flush_work(&pool->free_work);
        iput(pool->inode);
 }
@@ -2366,6 +2432,10 @@ struct zs_pool *zs_create_pool(const char *name)
        if (!pool->name)
                goto err;
 
+#ifdef CONFIG_COMPACTION
+       init_waitqueue_head(&pool->migration_wait);
+#endif
+
        if (create_cache(pool))
                goto err;
 
index b615aae5a0f8175852f9fa1abf0f188858b5f308..d62f83f93d7bbf89549d62ec126aa398f965e710 100644 (file)
@@ -296,6 +296,7 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size)
 
        p9pdu_reset(&req->tc);
        p9pdu_reset(&req->rc);
+       req->t_err = 0;
        req->status = REQ_STATUS_ALLOC;
        init_waitqueue_head(&req->wq);
        INIT_LIST_HEAD(&req->req_list);
index eb596c2ed546ca5555a2100426eebb67205d99b1..849336211c79b99c4c6b691becf955e598d80ed5 100644 (file)
@@ -782,10 +782,16 @@ static struct p9_trans_module p9_virtio_trans = {
 /* The standard init function */
 static int __init p9_virtio_init(void)
 {
+       int rc;
+
        INIT_LIST_HEAD(&virtio_chan_list);
 
        v9fs_register_trans(&p9_virtio_trans);
-       return register_virtio_driver(&p9_virtio_drv);
+       rc = register_virtio_driver(&p9_virtio_drv);
+       if (rc)
+               v9fs_unregister_trans(&p9_virtio_trans);
+
+       return rc;
 }
 
 static void __exit p9_virtio_cleanup(void)
index e2fbf3677b9baf3fa99ba98485f73caf0118f249..9daab0dd833b31cbb663b7756970a545cc59f3cf 100644 (file)
@@ -530,13 +530,19 @@ static struct xenbus_driver xen_9pfs_front_driver = {
 
 static int p9_trans_xen_init(void)
 {
+       int rc;
+
        if (!xen_domain())
                return -ENODEV;
 
        pr_info("Initialising Xen transport for 9pfs\n");
 
        v9fs_register_trans(&p9_xen_trans);
-       return xenbus_register_frontend(&xen_9pfs_front_driver);
+       rc = xenbus_register_frontend(&xen_9pfs_front_driver);
+       if (rc)
+               v9fs_unregister_trans(&p9_xen_trans);
+
+       return rc;
 }
 module_init(p9_trans_xen_init);
 
index 795fbc6c06aa7a9e7078aafafad97024373afeb3..9abb18fffbc31028a674d0ae1b302d479e5165e7 100644 (file)
@@ -1028,6 +1028,11 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol,
         */
        if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
                goto out;
+
+       rc = -EPERM;
+       if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
+               goto out;
+
        rc = -ENOMEM;
        sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern);
        if (!sk)
index 5d01edf8d819e89aec03df70e9f3bcf6340314cb..44ec492f3dc2f42e059939344cab4064ffbca547 100644 (file)
@@ -858,6 +858,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
                break;
 
        case SOCK_RAW:
+               if (!capable(CAP_NET_RAW))
+                       return -EPERM;
                break;
        default:
                return -ESOCKTNOSUPPORT;
index 73bf6a93a3cf1141a34657bf1284893199e04db9..36f244125d24c800d35249af7639d39a516588d4 100644 (file)
@@ -463,17 +463,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
  * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
- * @tvlv_len: tvlv length of the previously considered OGM
+ * @ogm_packet: potential OGM in buffer
  *
  * Return: true if there is enough space for another OGM, false otherwise.
  */
-static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
-                                     __be16 tvlv_len)
+static bool
+batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+                         const struct batadv_ogm_packet *ogm_packet)
 {
        int next_buff_pos = 0;
 
-       next_buff_pos += buff_pos + BATADV_OGM_HLEN;
-       next_buff_pos += ntohs(tvlv_len);
+       /* check if there is enough space for the header */
+       next_buff_pos += buff_pos + sizeof(*ogm_packet);
+       if (next_buff_pos > packet_len)
+               return false;
+
+       /* check if there is enough space for the optional TVLV */
+       next_buff_pos += ntohs(ogm_packet->tvlv_len);
 
        return (next_buff_pos <= packet_len) &&
               (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -501,7 +507,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
 
        /* adjust all flags and log packets */
        while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
-                                        batadv_ogm_packet->tvlv_len)) {
+                                        batadv_ogm_packet)) {
                /* we might have aggregated direct link packets with an
                 * ordinary base packet
                 */
@@ -1852,7 +1858,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
 
        /* unpack the aggregated packets and process them one by one */
        while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
-                                        ogm_packet->tvlv_len)) {
+                                        ogm_packet)) {
                batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
 
                ogm_offset += BATADV_OGM_HLEN;
@@ -2485,7 +2491,7 @@ batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1,
        return ret;
 }
 
-static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
+static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface)
 {
        /* begin scheduling originator messages on that interface */
        batadv_iv_ogm_schedule(hard_iface);
@@ -2825,8 +2831,8 @@ unlock:
 static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
        .name = "BATMAN_IV",
        .iface = {
-               .activate = batadv_iv_iface_activate,
                .enable = batadv_iv_ogm_iface_enable,
+               .enabled = batadv_iv_iface_enabled,
                .disable = batadv_iv_ogm_iface_disable,
                .update_mac = batadv_iv_ogm_iface_update_mac,
                .primary_set = batadv_iv_ogm_primary_iface_set,
index 2948b41b06d47c0ee32649fa410b323f39c36151..d241ccc0ca0278173853512c8aa4bfb8b041f996 100644 (file)
@@ -643,17 +643,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
  * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
- * @tvlv_len: tvlv length of the previously considered OGM
+ * @ogm2_packet: potential OGM2 in buffer
  *
  * Return: true if there is enough space for another OGM, false otherwise.
  */
-static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
-                                    __be16 tvlv_len)
+static bool
+batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
+                        const struct batadv_ogm2_packet *ogm2_packet)
 {
        int next_buff_pos = 0;
 
-       next_buff_pos += buff_pos + BATADV_OGM2_HLEN;
-       next_buff_pos += ntohs(tvlv_len);
+       /* check if there is enough space for the header */
+       next_buff_pos += buff_pos + sizeof(*ogm2_packet);
+       if (next_buff_pos > packet_len)
+               return false;
+
+       /* check if there is enough space for the optional TVLV */
+       next_buff_pos += ntohs(ogm2_packet->tvlv_len);
 
        return (next_buff_pos <= packet_len) &&
               (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -830,7 +836,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
        ogm_packet = (struct batadv_ogm2_packet *)skb->data;
 
        while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
-                                       ogm_packet->tvlv_len)) {
+                                       ogm_packet)) {
                batadv_v_ogm_process(skb, ogm_offset, if_incoming);
 
                ogm_offset += BATADV_OGM2_HLEN;
index 08690d06b7be2b25ca3f009394763c7083c70644..36f0962040d16af4f9ed82629ff03ce85c83ed57 100644 (file)
@@ -821,6 +821,9 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
 
        batadv_hardif_recalc_extra_skbroom(soft_iface);
 
+       if (bat_priv->algo_ops->iface.enabled)
+               bat_priv->algo_ops->iface.enabled(hard_iface);
+
 out:
        return 0;
 
index 0d9459b69bdb812b1b68e28e6b68fec8ec95df2d..c32820963b8e706b4cdde10d46ec582bc51ec4eb 100644 (file)
@@ -118,7 +118,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
 {
        struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
 
-       return attr ? nla_get_u32(attr) : 0;
+       return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
 }
 
 /**
index 359ec1a6e822ffd9781edc7897e457f96c50244b..9fa5389ea244c37cf02d57693f90b74d5c741911 100644 (file)
@@ -3821,6 +3821,8 @@ static void batadv_tt_purge(struct work_struct *work)
  */
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
+       batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_ROAM, 1);
+
        batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
        batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
 
index eeee3e61c625df5b38a69290406031e348387fa4..fdba8a144d7373862ac8028dfd3e97cba591320a 100644 (file)
@@ -2130,6 +2130,9 @@ struct batadv_algo_iface_ops {
        /** @enable: init routing info when hard-interface is enabled */
        int (*enable)(struct batadv_hard_iface *hard_iface);
 
+       /** @enabled: notification when hard-interface was enabled (optional) */
+       void (*enabled)(struct batadv_hard_iface *hard_iface);
+
        /** @disable: de-init routing info when hard-interface is disabled */
        void (*disable)(struct batadv_hard_iface *hard_iface);
 
index 4e2576fc0c59932cbb1f3c98c150764b91bb52c1..357475cceec61ba264fd49621e75de624f4afff4 100644 (file)
@@ -187,10 +187,16 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
        }
 
        if (!rt) {
-               nexthop = &lowpan_cb(skb)->gw;
-
-               if (ipv6_addr_any(nexthop))
-                       return NULL;
+               if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
+                       /* There is neither route nor gateway,
+                        * probably the destination is a direct peer.
+                        */
+                       nexthop = daddr;
+               } else {
+                       /* There is a known gateway
+                        */
+                       nexthop = &lowpan_cb(skb)->gw;
+               }
        } else {
                nexthop = rt6_nexthop(rt, daddr);
 
index 879d5432bf7794d4c4183aae2ca2d092281417b8..260ef5426e0ca7cc915011b67c4dfae2d45e5907 100644 (file)
@@ -4384,6 +4384,12 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
 
        l2cap_chan_lock(chan);
 
+       if (chan->state != BT_DISCONN) {
+               l2cap_chan_unlock(chan);
+               mutex_unlock(&conn->chan_lock);
+               return 0;
+       }
+
        l2cap_chan_hold(chan);
        l2cap_chan_del(chan, 0);
 
index a1c1b7e8a45ca6d6c44de507d7a5ff232d776edc..cc2f7ca91ccdd32136ea82f5fc6bbcff87206d1d 100644 (file)
@@ -2580,6 +2580,19 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
                goto distribute;
        }
 
+       /* Drop IRK if peer is using identity address during pairing but is
+        * providing different address as identity information.
+        *
+        * Microsoft Surface Precision Mouse is known to have this bug.
+        */
+       if (hci_is_identity_address(&hcon->dst, hcon->dst_type) &&
+           (bacmp(&info->bdaddr, &hcon->dst) ||
+            info->addr_type != hcon->dst_type)) {
+               bt_dev_err(hcon->hdev,
+                          "ignoring IRK with invalid identity address");
+               goto distribute;
+       }
+
        bacpy(&smp->id_addr, &info->bdaddr);
        smp->id_addr_type = info->addr_type;
 
index fed0ff446abb3ab59159a990f7635cf609dbe18f..2532c1a19645c7b13623df9f0e9581e29774919e 100644 (file)
@@ -79,7 +79,6 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
        struct net_bridge_fdb_entry *dst = NULL;
        struct net_bridge_mdb_entry *mdst;
        bool local_rcv, mcast_hit = false;
-       const unsigned char *dest;
        struct net_bridge *br;
        u16 vid = 0;
 
@@ -97,10 +96,9 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
                br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
 
        local_rcv = !!(br->dev->flags & IFF_PROMISC);
-       dest = eth_hdr(skb)->h_dest;
-       if (is_multicast_ether_addr(dest)) {
+       if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
                /* by definition the broadcast is also a multicast address */
-               if (is_broadcast_ether_addr(dest)) {
+               if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
                        pkt_type = BR_PKT_BROADCAST;
                        local_rcv = true;
                } else {
@@ -150,7 +148,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
                }
                break;
        case BR_PKT_UNICAST:
-               dst = br_fdb_find_rcu(br, dest, vid);
+               dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
        default:
                break;
        }
index 6d9f48bd374ab28d34d4a846b1b659c36110112b..55198818e3e57d42a74822edadde617b3737f597 100644 (file)
@@ -419,7 +419,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
        struct nlmsghdr *nlh;
        struct nlattr *nest;
 
-       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
        if (!nlh)
                return -EMSGSIZE;
 
index 75901c4641b1ced8308066c5b94ecbe13f268f09..6a362da211e1b45aa5e7b94a8085217ab707023f 100644 (file)
@@ -1147,6 +1147,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
        int type;
        int err = 0;
        __be32 group;
+       u16 nsrcs;
 
        ih = igmpv3_report_hdr(skb);
        num = ntohs(ih->ngrec);
@@ -1160,8 +1161,9 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
                grec = (void *)(skb->data + len - sizeof(*grec));
                group = grec->grec_mca;
                type = grec->grec_type;
+               nsrcs = ntohs(grec->grec_nsrcs);
 
-               len += ntohs(grec->grec_nsrcs) * 4;
+               len += nsrcs * 4;
                if (!pskb_may_pull(skb, len))
                        return -EINVAL;
 
@@ -1182,7 +1184,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
                src = eth_hdr(skb)->h_source;
                if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
                     type == IGMPV3_MODE_IS_INCLUDE) &&
-                   ntohs(grec->grec_nsrcs) == 0) {
+                   nsrcs == 0) {
                        br_ip4_multicast_leave_group(br, port, group, vid, src);
                } else {
                        err = br_ip4_multicast_add_group(br, port, group, vid,
@@ -1217,23 +1219,26 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
        len = skb_transport_offset(skb) + sizeof(*icmp6h);
 
        for (i = 0; i < num; i++) {
-               __be16 *nsrcs, _nsrcs;
-
-               nsrcs = skb_header_pointer(skb,
-                                          len + offsetof(struct mld2_grec,
-                                                         grec_nsrcs),
-                                          sizeof(_nsrcs), &_nsrcs);
-               if (!nsrcs)
+               __be16 *_nsrcs, __nsrcs;
+               u16 nsrcs;
+
+               _nsrcs = skb_header_pointer(skb,
+                                           len + offsetof(struct mld2_grec,
+                                                          grec_nsrcs),
+                                           sizeof(__nsrcs), &__nsrcs);
+               if (!_nsrcs)
                        return -EINVAL;
 
+               nsrcs = ntohs(*_nsrcs);
+
                if (!pskb_may_pull(skb,
                                   len + sizeof(*grec) +
-                                  sizeof(struct in6_addr) * ntohs(*nsrcs)))
+                                  sizeof(struct in6_addr) * nsrcs))
                        return -EINVAL;
 
                grec = (struct mld2_grec *)(skb->data + len);
                len += sizeof(*grec) +
-                      sizeof(struct in6_addr) * ntohs(*nsrcs);
+                      sizeof(struct in6_addr) * nsrcs;
 
                /* We treat these as MLDv1 reports for now. */
                switch (grec->grec_type) {
@@ -1252,7 +1257,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                src = eth_hdr(skb)->h_source;
                if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
                     grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
-                   ntohs(*nsrcs) == 0) {
+                   nsrcs == 0) {
                        br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
                                                     vid, src);
                } else {
@@ -1505,7 +1510,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
                                  struct sk_buff *skb,
                                  u16 vid)
 {
-       const struct ipv6hdr *ip6h = ipv6_hdr(skb);
        struct mld_msg *mld;
        struct net_bridge_mdb_entry *mp;
        struct mld2_query *mld2q;
@@ -1549,7 +1553,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
 
        if (is_general_query) {
                saddr.proto = htons(ETH_P_IPV6);
-               saddr.u.ip6 = ip6h->saddr;
+               saddr.u.ip6 = ipv6_hdr(skb)->saddr;
 
                br_multicast_query_received(br, port, &br->ip6_other_query,
                                            &saddr, max_delay);
@@ -1617,6 +1621,9 @@ br_multicast_leave_group(struct net_bridge *br,
                        if (!br_port_group_equal(p, port, src))
                                continue;
 
+                       if (p->flags & MDB_PG_FLAGS_PERMANENT)
+                               break;
+
                        rcu_assign_pointer(*pp, p->next);
                        hlist_del_init(&p->mglist);
                        del_timer(&p->timer);
index 1b75d6bf12bd9cbe4411c6dadf85a273bbdcbdbc..37ddcea3fc96bbdb55b5194d910231eab738d8a5 100644 (file)
@@ -147,7 +147,6 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
 void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
                struct net_device *dev)
 {
-       const unsigned char *dest = eth_hdr(skb)->h_dest;
        struct net_bridge_port *p;
        struct net_bridge *br;
        const unsigned char *buf;
@@ -176,7 +175,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
        if (p->state == BR_STATE_DISABLED)
                goto out;
 
-       if (!ether_addr_equal(dest, br->group_addr))
+       if (!ether_addr_equal(eth_hdr(skb)->h_dest, br->group_addr))
                goto out;
 
        if (p->flags & BR_BPDU_GUARD) {
index 7df269092103c3e00dc79ecc7904c4f859f62cce..5f3950f00f73b1395f68c3a66c7ca73dde376b3d 100644 (file)
@@ -677,6 +677,11 @@ void br_vlan_flush(struct net_bridge *br)
 
        ASSERT_RTNL();
 
+       /* delete auto-added default pvid local fdb before flushing vlans
+        * otherwise it will be leaked on bridge device init failure
+        */
+       br_fdb_delete_by_port(br, NULL, 0, 1);
+
        vg = br_vlan_group(br);
        __vlan_flush(vg);
        RCU_INIT_POINTER(br->vlgrp, NULL);
index 0bb4d712b80cbec65cec48cbc3531be4c9c6dea1..7d249afa1466cf9253cef184463f2c7a5e99edfd 100644 (file)
@@ -225,7 +225,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
                        return NF_DROP;
                }
 
-               ADD_COUNTER(*(counter_base + i), 1, skb->len);
+               ADD_COUNTER(*(counter_base + i), skb->len, 1);
 
                /* these should only watch: not modify, nor tell us
                 * what to do with the packet
@@ -963,8 +963,8 @@ static void get_counters(const struct ebt_counter *oldcounters,
                        continue;
                counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
                for (i = 0; i < nentries; i++)
-                       ADD_COUNTER(counters[i], counter_base[i].pcnt,
-                                   counter_base[i].bcnt);
+                       ADD_COUNTER(counters[i], counter_base[i].bcnt,
+                                   counter_base[i].pcnt);
        }
 }
 
@@ -1289,7 +1289,7 @@ static int do_update_counters(struct net *net, const char *name,
 
        /* we add to the counters of the first cpu */
        for (i = 0; i < num_counters; i++)
-               ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt);
+               ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
 
        write_unlock_bh(&t->lock);
        ret = 0;
@@ -1779,20 +1779,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
        return 0;
 }
 
+static int ebt_compat_init_offsets(unsigned int number)
+{
+       if (number > INT_MAX)
+               return -EINVAL;
+
+       /* also count the base chain policies */
+       number += NF_BR_NUMHOOKS;
+
+       return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
+}
 
 static int compat_table_info(const struct ebt_table_info *info,
                             struct compat_ebt_replace *newinfo)
 {
        unsigned int size = info->entries_size;
        const void *entries = info->entries;
+       int ret;
 
        newinfo->entries_size = size;
-       if (info->nentries) {
-               int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
-                                                info->nentries);
-               if (ret)
-                       return ret;
-       }
+       ret = ebt_compat_init_offsets(info->nentries);
+       if (ret)
+               return ret;
 
        return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
                                                        entries, newinfo);
@@ -2241,11 +2249,9 @@ static int compat_do_replace(struct net *net, void __user *user,
 
        xt_compat_lock(NFPROTO_BRIDGE);
 
-       if (tmp.nentries) {
-               ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
-               if (ret < 0)
-                       goto out_unlock;
-       }
+       ret = ebt_compat_init_offsets(tmp.nentries);
+       if (ret < 0)
+               goto out_unlock;
 
        ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
        if (ret < 0)
@@ -2268,8 +2274,10 @@ static int compat_do_replace(struct net *net, void __user *user,
        state.buf_kern_len = size64;
 
        ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
-       if (WARN_ON(ret < 0))
+       if (WARN_ON(ret < 0)) {
+               vfree(entries_tmp);
                goto out_unlock;
+       }
 
        vfree(entries_tmp);
        tmp.entries_size = size64;
index 53859346dc9a92f76daf35e4daf68a15ef1c2c9b..bd2161470e456e185eca2b134ce9c05674fec56f 100644 (file)
@@ -1046,32 +1046,50 @@ static __init int cgw_module_init(void)
        pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
                max_hops);
 
-       register_pernet_subsys(&cangw_pernet_ops);
+       ret = register_pernet_subsys(&cangw_pernet_ops);
+       if (ret)
+               return ret;
+
+       ret = -ENOMEM;
        cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
                                      0, 0, NULL);
-
        if (!cgw_cache)
-               return -ENOMEM;
+               goto out_cache_create;
 
        /* set notifier */
        notifier.notifier_call = cgw_notifier;
-       register_netdevice_notifier(&notifier);
+       ret = register_netdevice_notifier(&notifier);
+       if (ret)
+               goto out_register_notifier;
 
        ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
                                   NULL, cgw_dump_jobs, 0);
-       if (ret) {
-               unregister_netdevice_notifier(&notifier);
-               kmem_cache_destroy(cgw_cache);
-               return -ENOBUFS;
-       }
-
-       /* Only the first call to rtnl_register_module can fail */
-       rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
-                            cgw_create_job, NULL, 0);
-       rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
-                            cgw_remove_job, NULL, 0);
+       if (ret)
+               goto out_rtnl_register1;
+
+       ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
+                                  cgw_create_job, NULL, 0);
+       if (ret)
+               goto out_rtnl_register2;
+       ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
+                                  cgw_remove_job, NULL, 0);
+       if (ret)
+               goto out_rtnl_register3;
 
        return 0;
+
+out_rtnl_register3:
+       rtnl_unregister(PF_CAN, RTM_NEWROUTE);
+out_rtnl_register2:
+       rtnl_unregister(PF_CAN, RTM_GETROUTE);
+out_rtnl_register1:
+       unregister_netdevice_notifier(&notifier);
+out_register_notifier:
+       kmem_cache_destroy(cgw_cache);
+out_cache_create:
+       unregister_pernet_subsys(&cangw_pernet_ops);
+
+       return ret;
 }
 
 static __exit void cgw_module_exit(void)
index 60934bd8796c53c5ec96477e1c29fb9e8234e804..76c41a84550e76459aa6dd5ec8677c8275127fd0 100644 (file)
@@ -1423,7 +1423,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
        struct ceph_osds up, acting;
        bool force_resend = false;
        bool unpaused = false;
-       bool legacy_change;
+       bool legacy_change = false;
        bool split = false;
        bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
        bool recovery_deletes = ceph_osdmap_flag(osdc,
@@ -1511,15 +1511,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
                t->osd = acting.primary;
        }
 
-       if (unpaused || legacy_change || force_resend ||
-           (split && con && CEPH_HAVE_FEATURE(con->peer_features,
-                                              RESEND_ON_SPLIT)))
+       if (unpaused || legacy_change || force_resend || split)
                ct_res = CALC_TARGET_NEED_RESEND;
        else
                ct_res = CALC_TARGET_NO_ACTION;
 
 out:
-       dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
+       dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
+            legacy_change, force_resend, split, ct_res, t->osd);
        return ct_res;
 }
 
index 138951d286432b09a572aff7d745a96d5209acb2..ddd8aab20adf2518d07d8c02ac169a013477c12e 100644 (file)
@@ -8562,6 +8562,8 @@ int register_netdevice(struct net_device *dev)
        ret = notifier_to_errno(ret);
        if (ret) {
                rollback_registered(dev);
+               rcu_barrier();
+
                dev->reg_state = NETREG_UNREGISTERED;
        }
        /*
@@ -9510,6 +9512,8 @@ static void __net_exit default_device_exit(struct net *net)
 
                /* Push remaining network devices to init_net */
                snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
+               if (__dev_get_by_name(&init_net, fb_name))
+                       snprintf(fb_name, IFNAMSIZ, "dev%%d");
                err = dev_change_net_namespace(dev, &init_net, fb_name);
                if (err) {
                        pr_emerg("%s: failed to move %s to init_net: %d\n",
index 34ec9324737b399ddff0d619ba1b8f808a994526..e6fa88506c00d7e5969b24ee847b76e1823b0d4f 100644 (file)
@@ -3991,7 +3991,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
                                                    TCP_CA_NAME_MAX-1));
                        name[TCP_CA_NAME_MAX-1] = 0;
                        ret = tcp_set_congestion_control(sk, name, false,
-                                                        reinit);
+                                                        reinit, true);
                } else {
                        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -7234,13 +7234,13 @@ sk_reuseport_is_valid_access(int off, int size,
                return size == size_default;
 
        /* Fields that allow narrowing */
-       case offsetof(struct sk_reuseport_md, eth_protocol):
+       case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
                if (size < FIELD_SIZEOF(struct sk_buff, protocol))
                        return false;
                /* fall through */
-       case offsetof(struct sk_reuseport_md, ip_protocol):
-       case offsetof(struct sk_reuseport_md, bind_inany):
-       case offsetof(struct sk_reuseport_md, len):
+       case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
+       case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
+       case bpf_ctx_range(struct sk_reuseport_md, len):
                bpf_ctx_record_field_size(info, size_default);
                return bpf_ctx_narrow_access_ok(off, size, size_default);
 
index cd9e991f21d734b0e83ed4ab8aa9ab993c499315..c52d6e6b341cfcaf655efeaed8ac27a8509290ce 100644 (file)
@@ -1021,6 +1021,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 
                        atomic_set(&neigh->probes,
                                   NEIGH_VAR(neigh->parms, UCAST_PROBES));
+                       neigh_del_timer(neigh);
                        neigh->nud_state     = NUD_INCOMPLETE;
                        neigh->updated = now;
                        next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
@@ -1037,6 +1038,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
                }
        } else if (neigh->nud_state & NUD_STALE) {
                neigh_dbg(2, "neigh %p is delayed\n", neigh);
+               neigh_del_timer(neigh);
                neigh->nud_state = NUD_DELAY;
                neigh->updated = jiffies;
                neigh_add_timer(neigh, jiffies +
index 3ae899805f8b674b4ffb7d791330f836d38eff7d..a581cf101cd9c799aa0d54839c41500a9ecd2827 100644 (file)
@@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work)
                txq = netdev_get_tx_queue(dev, q_index);
                HARD_TX_LOCK(dev, txq, smp_processor_id());
                if (netif_xmit_frozen_or_stopped(txq) ||
-                   netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
+                   !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
                        skb_queue_head(&npinfo->txq, skb);
                        HARD_TX_UNLOCK(dev, txq);
                        local_irq_restore(flags);
@@ -335,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 
                                HARD_TX_UNLOCK(dev, txq);
 
-                               if (status == NETDEV_TX_OK)
+                               if (dev_xmit_complete(status))
                                        break;
 
                        }
@@ -352,7 +352,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 
        }
 
-       if (status != NETDEV_TX_OK) {
+       if (!dev_xmit_complete(status)) {
                skb_queue_tail(&npinfo->txq, skb);
                schedule_delayed_work(&npinfo->tx_work,0);
        }
index 9b9f696281a9bc270d146bca7940d8a44df1121f..0629ca89ab74f5e089604746480241cd945b7f02 100644 (file)
@@ -3530,6 +3530,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
        int pos;
        int dummy;
 
+       if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
+           (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
+               /* gso_size is untrusted, and we have a frag_list with a linear
+                * non head_frag head.
+                *
+                * (we assume checking the first list_skb member suffices;
+                * i.e if either of the list_skb members have non head_frag
+                * head, then the first one has too).
+                *
+                * If head_skb's headlen does not fit requested gso_size, it
+                * means that the frag_list members do NOT terminate on exact
+                * gso_size boundaries. Hence we cannot perform skb_frag_t page
+                * sharing. Therefore we must fallback to copying the frag_list
+                * skbs; we do so by disabling SG.
+                */
+               if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
+                       features &= ~NETIF_F_SG;
+       }
+
        __skb_push(head_skb, doffset);
        proto = skb_network_protocol(head_skb, &dummy);
        if (unlikely(!proto))
index cc0b21e9bad44164293e158d5b98fc775652aa28..b6170db9b9adb42c75de93470477908ba753f8c0 100644 (file)
@@ -1563,8 +1563,6 @@ static void __sk_destruct(struct rcu_head *head)
                sk_filter_uncharge(sk, filter);
                RCU_INIT_POINTER(sk->sk_filter, NULL);
        }
-       if (rcu_access_pointer(sk->sk_reuseport_cb))
-               reuseport_detach_sock(sk);
 
        sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
 
@@ -1587,7 +1585,14 @@ static void __sk_destruct(struct rcu_head *head)
 
 void sk_destruct(struct sock *sk)
 {
-       if (sock_flag(sk, SOCK_RCU_FREE))
+       bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
+
+       if (rcu_access_pointer(sk->sk_reuseport_cb)) {
+               reuseport_detach_sock(sk);
+               use_call_rcu = true;
+       }
+
+       if (use_call_rcu)
                call_rcu(&sk->sk_rcu, __sk_destruct);
        else
                __sk_destruct(&sk->sk_rcu);
index ba5cba56f5747d0f15061250db8c7fcd7492248e..fd38cf1d2b02205743f8931dd9ccebbc2682729e 100644 (file)
@@ -292,8 +292,19 @@ struct sock *reuseport_select_sock(struct sock *sk,
 
 select_by_hash:
                /* no bpf or invalid bpf result: fall back to hash usage */
-               if (!sk2)
-                       sk2 = reuse->socks[reciprocal_scale(hash, socks)];
+               if (!sk2) {
+                       int i, j;
+
+                       i = j = reciprocal_scale(hash, socks);
+                       while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
+                               i++;
+                               if (i >= reuse->num_socks)
+                                       i = 0;
+                               if (i == j)
+                                       goto out;
+                       }
+                       sk2 = reuse->socks[i];
+               }
        }
 
 out:
index 7d329fb1f553a832e277e12989b8c49ede21ea0e..7f5eaa95a6756073164a36b64e001a31cdfb1e58 100644 (file)
@@ -120,7 +120,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
        int err = 0;
        long vm_wait = 0;
        long current_timeo = *timeo_p;
-       bool noblock = (*timeo_p ? false : true);
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
        if (sk_stream_memory_free(sk))
@@ -133,11 +132,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
 
                if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                        goto do_error;
-               if (!*timeo_p) {
-                       if (noblock)
-                               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-                       goto do_nonblock;
-               }
+               if (!*timeo_p)
+                       goto do_eagain;
                if (signal_pending(current))
                        goto do_interrupted;
                sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -169,7 +165,13 @@ out:
 do_error:
        err = -EPIPE;
        goto out;
-do_nonblock:
+do_eagain:
+       /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
+        * be generated later.
+        * When TCP receives ACK packets that make room, tcp_check_space()
+        * only calls tcp_new_space() if SOCK_NOSPACE is set.
+        */
+       set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
        err = -EAGAIN;
        goto out;
 do_interrupted:
index 142b294d34468508f80457a6e3b5ffc35c680f07..b0b9413fa5bf9c150c0339fe8ca97b581ba52c10 100644 (file)
@@ -127,6 +127,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
 {
        int port;
 
+       if (!ds->ops->port_mdb_add)
+               return;
+
        for_each_set_bit(port, bitmap, ds->num_ports)
                ds->ops->port_mdb_add(ds, port, mdb);
 }
index bc6b912603f164b981e761974e4e64f39b01dc8f..89819745e482c5e54684c673ed45dbeb0ba4e906 100644 (file)
@@ -1018,6 +1018,9 @@ static int ieee802154_create(struct net *net, struct socket *sock,
 
        switch (sock->type) {
        case SOCK_RAW:
+               rc = -EPERM;
+               if (!capable(CAP_NET_RAW))
+                       goto out;
                proto = &ieee802154_raw_prot;
                ops = &ieee802154_raw_ops;
                break;
index f915abff1350a86af8d5bb89725b751c061b0fb5..80107a6a2c4ae1e0cdbd9269dfe2aa61c4930675 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/sock.h>
 #include <net/route.h>
 #include <net/tcp_states.h>
+#include <net/sock_reuseport.h>
 
 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
@@ -73,6 +74,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        }
        inet->inet_daddr = fl4->daddr;
        inet->inet_dport = usin->sin_port;
+       reuseport_has_conns(sk, true);
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
        inet->inet_id = jiffies;
index ea4bd8a52422e75c98e30cb3ab537e97b2170520..d23746143cd2b0e9a07543c3e9154b92c70f233c 100644 (file)
 #include <net/net_namespace.h>
 #include <net/addrconf.h>
 
+#define IPV6ONLY_FLAGS \
+               (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
+                IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
+                IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
+
 static struct ipv4_devconf ipv4_devconf = {
        .data = {
                [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
@@ -462,6 +467,9 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
        ifa->ifa_flags &= ~IFA_F_SECONDARY;
        last_primary = &in_dev->ifa_list;
 
+       /* Don't set IPv6 only flags to IPv4 addresses */
+       ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
+
        for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
             ifap = &ifa1->ifa_next) {
                if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
index ad75c468ecfb2d0bbf3ed79c53506e49631ba652..0167e23d1c8faf6d404995f124ba754c6d9fb2db 100644 (file)
@@ -587,7 +587,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
 
        if (!rt)
                goto out;
-       net = dev_net(rt->dst.dev);
+
+       if (rt->dst.dev)
+               net = dev_net(rt->dst.dev);
+       else if (skb_in->dev)
+               net = dev_net(skb_in->dev);
+       else
+               goto out;
 
        /*
         *      Find the original header. It is expected to be valid, of course.
index d187ee8156a19f4f8590938c66b4f1424884cec6..b2240b7f225d545e9508abf554fe162257b2a9cc 100644 (file)
@@ -1218,12 +1218,8 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
        if (pmc) {
                im->interface = pmc->interface;
                if (im->sfmode == MCAST_INCLUDE) {
-                       im->tomb = pmc->tomb;
-                       pmc->tomb = NULL;
-
-                       im->sources = pmc->sources;
-                       pmc->sources = NULL;
-
+                       swap(im->tomb, pmc->tomb);
+                       swap(im->sources, pmc->sources);
                        for (psf = im->sources; psf; psf = psf->sf_next)
                                psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
                } else {
index 3c734832bb7cb21e1b397eb078ed7209d88ff464..0b87558f265eb9c81415ec13efddfaac129b4afe 100644 (file)
@@ -1531,6 +1531,7 @@ static void erspan_setup(struct net_device *dev)
        struct ip_tunnel *t = netdev_priv(dev);
 
        ether_setup(dev);
+       dev->max_mtu = 0;
        dev->netdev_ops = &erspan_netdev_ops;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
index c248e0dccbe17afa397910b0d68260daf2a9eb3f..67ef9d853d906a6018ca7ccade6ebe9af3cd18ce 100644 (file)
@@ -89,9 +89,12 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
        __ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1);
 
        err = ip_local_out(net, sk, skb);
-       if (unlikely(net_xmit_eval(err)))
-               pkt_len = 0;
-       iptunnel_xmit_stats(dev, pkt_len);
+
+       if (dev) {
+               if (unlikely(net_xmit_eval(err)))
+                       pkt_len = 0;
+               iptunnel_xmit_stats(dev, pkt_len);
+       }
 }
 EXPORT_SYMBOL_GPL(iptunnel_xmit);
 
index c891235b4966cc4994d81acf39070d27c3060f13..4368282eb6f8fb38bcd979e8967e3f5d105cc81c 100644 (file)
@@ -281,6 +281,9 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
        const struct iphdr  *tiph = &tunnel->parms.iph;
        u8 ipproto;
 
+       if (!pskb_inet_may_pull(skb))
+               goto tx_error;
+
        switch (skb->protocol) {
        case htons(ETH_P_IP):
                ipproto = IPPROTO_IPIP;
index 12843c9ef1421d204fba6bea42a85615e2e69cc7..74b19a5c572e9fa570e834448eec0152665e2a2a 100644 (file)
@@ -96,6 +96,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
        flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
        flow.flowi4_tos = RT_TOS(iph->tos);
        flow.flowi4_scope = RT_SCOPE_UNIVERSE;
+       flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
 
        return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
 }
index c200065ef9a5e49b022eeb2286d59eaf822136f9..6367ecdf76c42ff85ff102a4eb4e5a1e81af1953 100644 (file)
@@ -23,9 +23,6 @@ raw_get_hashinfo(const struct inet_diag_req_v2 *r)
                return &raw_v6_hashinfo;
 #endif
        } else {
-               pr_warn_once("Unexpected inet family %d\n",
-                            r->sdiag_family);
-               WARN_ON_ONCE(1);
                return ERR_PTR(-EINVAL);
        }
 }
index 232581c140a05ea2e7e9e3011fe0b48849557e4a..69127f6039b2b3308932d9a742f19c18482947c9 100644 (file)
@@ -908,16 +908,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
        if (peer->rate_tokens == 0 ||
            time_after(jiffies,
                       (peer->rate_last +
-                       (ip_rt_redirect_load << peer->rate_tokens)))) {
+                       (ip_rt_redirect_load << peer->n_redirects)))) {
                __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
 
                icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
                peer->rate_last = jiffies;
-               ++peer->rate_tokens;
                ++peer->n_redirects;
 #ifdef CONFIG_IP_ROUTE_VERBOSE
                if (log_martians &&
-                   peer->rate_tokens == ip_rt_redirect_number)
+                   peer->n_redirects == ip_rt_redirect_number)
                        net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
                                             &ip_hdr(skb)->saddr, inet_iif(skb),
                                             &ip_hdr(skb)->daddr, &gw);
@@ -1477,7 +1476,7 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
        prev = cmpxchg(p, orig, rt);
        if (prev == orig) {
                if (orig) {
-                       dst_dev_put(&orig->dst);
+                       rt_add_uncached_list(orig);
                        dst_release(&orig->dst);
                }
        } else {
@@ -2382,14 +2381,17 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
        int orig_oif = fl4->flowi4_oif;
        unsigned int flags = 0;
        struct rtable *rth;
-       int err = -ENETUNREACH;
+       int err;
 
        if (fl4->saddr) {
-               rth = ERR_PTR(-EINVAL);
                if (ipv4_is_multicast(fl4->saddr) ||
                    ipv4_is_lbcast(fl4->saddr) ||
-                   ipv4_is_zeronet(fl4->saddr))
+                   ipv4_is_zeronet(fl4->saddr)) {
+                       rth = ERR_PTR(-EINVAL);
                        goto out;
+               }
+
+               rth = ERR_PTR(-ENETUNREACH);
 
                /* I removed check for oif == dev_out->oif here.
                   It was wrong for two reasons:
index 364e6fdaa38f50545fc41d8780bd8de7f7478dce..611ba174265c88fa618e3a19b885847af6fc2296 100644 (file)
@@ -934,6 +934,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
        return mss_now;
 }
 
+/* In some cases, both sendpage() and sendmsg() could have added
+ * an skb to the write queue, but failed adding payload on it.
+ * We need to remove it to consume less memory, but more
+ * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
+ * users.
+ */
+static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
+{
+       if (skb && !skb->len) {
+               tcp_unlink_write_queue(skb, sk);
+               if (tcp_write_queue_empty(sk))
+                       tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
+               sk_wmem_free_skb(sk, skb);
+       }
+}
+
 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
                         size_t size, int flags)
 {
@@ -1056,6 +1072,7 @@ out:
        return copied;
 
 do_error:
+       tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
        if (copied)
                goto out;
 out_err:
@@ -1409,17 +1426,11 @@ out_nopush:
        sock_zerocopy_put(uarg);
        return copied + copied_syn;
 
+do_error:
+       skb = tcp_write_queue_tail(sk);
 do_fault:
-       if (!skb->len) {
-               tcp_unlink_write_queue(skb, sk);
-               /* It is the one place in all of TCP, except connection
-                * reset, where we can be unlinking the send_head.
-                */
-               tcp_check_send_head(sk, skb);
-               sk_wmem_free_skb(sk, skb);
-       }
+       tcp_remove_empty_skb(sk, skb);
 
-do_error:
        if (copied + copied_syn)
                goto out;
 out_err:
@@ -2594,6 +2605,8 @@ int tcp_disconnect(struct sock *sk, int flags)
        tcp_saved_syn_free(tp);
        tp->compressed_ack = 0;
        tp->bytes_sent = 0;
+       tp->bytes_acked = 0;
+       tp->bytes_received = 0;
        tp->bytes_retrans = 0;
        tp->dsack_dups = 0;
        tp->reord_seen = 0;
@@ -2729,7 +2742,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                name[val] = 0;
 
                lock_sock(sk);
-               err = tcp_set_congestion_control(sk, name, true, true);
+               err = tcp_set_congestion_control(sk, name, true, true,
+                                                ns_capable(sock_net(sk)->user_ns,
+                                                           CAP_NET_ADMIN));
                release_sock(sk);
                return err;
        }
index bc6c02f1624383043147101cd4ad2157b3bc9289..48f79db446a02a57db9afc5a9e01fa1cd069316f 100644 (file)
@@ -332,7 +332,8 @@ out:
  * tcp_reinit_congestion_control (if the current congestion control was
  * already initialized.
  */
-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit)
+int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
+                              bool reinit, bool cap_net_admin)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        const struct tcp_congestion_ops *ca;
@@ -368,8 +369,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, boo
                } else {
                        err = -EBUSY;
                }
-       } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
-                    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) {
+       } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) {
                err = -EPERM;
        } else if (!try_module_get(ca->owner)) {
                err = -EBUSY;
index 4a8869d3966221a7cfc3d4cb15a98b2b7cef90af..14a6a489937c15de80e995f7cae6c8d47b87355e 100644 (file)
@@ -260,7 +260,7 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
 
 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
 {
-       tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+       tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 }
 
 static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
index 221d9b72423b961d948cda2659f7b86b9727438d..2697e4397e46c8acacb498b2c4ed0f345ac2523f 100644 (file)
@@ -1289,6 +1289,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *buff;
        int nsize, old_factor;
+       long limit;
        int nlen;
        u8 flags;
 
@@ -1299,8 +1300,16 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
        if (nsize < 0)
                nsize = 0;
 
-       if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf &&
-                    tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) {
+       /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
+        * We need some allowance to not penalize applications setting small
+        * SO_SNDBUF values.
+        * Also allow first and last skb in retransmit queue to be split.
+        */
+       limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
+       if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
+                    tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
+                    skb != tcp_rtx_queue_head(sk) &&
+                    skb != tcp_rtx_queue_tail(sk))) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
                return -ENOMEM;
        }
@@ -2037,7 +2046,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
                if (len <= skb->len)
                        break;
 
-               if (unlikely(TCP_SKB_CB(skb)->eor))
+               if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
                        return false;
 
                len -= skb->len;
@@ -2153,6 +2162,7 @@ static int tcp_mtu_probe(struct sock *sk)
                         * we need to propagate it to the new skb.
                         */
                        TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
+                       tcp_skb_collapse_tstamp(nskb, skb);
                        tcp_unlink_write_queue(skb, sk);
                        sk_wmem_free_skb(sk, skb);
                } else {
index 17335a370e6452b58bd3490d6821e842d706db53..9d775b8df57d4fef4635e2f2fe62832633d86d50 100644 (file)
@@ -219,7 +219,7 @@ static int tcp_write_timeout(struct sock *sk)
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct net *net = sock_net(sk);
-       bool expired, do_reset;
+       bool expired = false, do_reset;
        int retry_until;
 
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
@@ -251,9 +251,10 @@ static int tcp_write_timeout(struct sock *sk)
                        if (tcp_out_of_resources(sk, do_reset))
                                return 1;
                }
+       }
+       if (!expired)
                expired = retransmits_timed_out(sk, retry_until,
                                                icsk->icsk_user_timeout);
-       }
        tcp_fastopen_active_detect_blackhole(sk, expired);
 
        if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
index 6ab68b06fa39d5452f7d8a0efea197a64e0c2270..aa59acc8ee0ece77160a34c1d778a41b3308e5f1 100644 (file)
@@ -443,12 +443,13 @@ static struct sock *udp4_lib_lookup2(struct net *net,
                score = compute_score(sk, net, saddr, sport,
                                      daddr, hnum, dif, sdif, exact_dif);
                if (score > badness) {
-                       if (sk->sk_reuseport) {
+                       if (sk->sk_reuseport &&
+                           sk->sk_state != TCP_ESTABLISHED) {
                                hash = udp_ehashfn(net, daddr, hnum,
                                                   saddr, sport);
                                result = reuseport_select_sock(sk, hash, skb,
                                                        sizeof(struct udphdr));
-                               if (result)
+                               if (result && !reuseport_has_conns(sk, false))
                                        return result;
                        }
                        badness = score;
@@ -774,6 +775,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
        int is_udplite = IS_UDPLITE(sk);
        int offset = skb_transport_offset(skb);
        int len = skb->len - offset;
+       int datalen = len - sizeof(*uh);
        __wsum csum = 0;
 
        /*
@@ -807,10 +809,12 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
                        return -EIO;
                }
 
-               skb_shinfo(skb)->gso_size = cork->gso_size;
-               skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
-               skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
-                                                        cork->gso_size);
+               if (datalen > cork->gso_size) {
+                       skb_shinfo(skb)->gso_size = cork->gso_size;
+                       skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+                       skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
+                                                                cork->gso_size);
+               }
                goto csum_partial;
        }
 
index c57efd5c5b387e17c7e99835f47b7fcb84e9acb5..d2968a79abea83f0c8b292b2403d813d44591fb8 100644 (file)
@@ -995,7 +995,8 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
        int err = 0;
 
        if (addr_type == IPV6_ADDR_ANY ||
-           addr_type & IPV6_ADDR_MULTICAST ||
+           (addr_type & IPV6_ADDR_MULTICAST &&
+            !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
            (!(idev->dev->flags & IFF_LOOPBACK) &&
             addr_type & IPV6_ADDR_LOOPBACK))
                return ERR_PTR(-EADDRNOTAVAIL);
@@ -5677,13 +5678,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
        switch (event) {
        case RTM_NEWADDR:
                /*
-                * If the address was optimistic
-                * we inserted the route at the start of
-                * our DAD process, so we don't need
-                * to do it again
+                * If the address was optimistic we inserted the route at the
+                * start of our DAD process, so we don't need to do it again.
+                * If the device was taken down in the middle of the DAD
+                * cycle there is a race where we could get here without a
+                * host route, so nothing to insert. That will be fixed when
+                * the device is brought up.
                 */
-               if (!rcu_access_pointer(ifp->rt->fib6_node))
+               if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
                        ip6_ins_rt(net, ifp->rt);
+               } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
+                       pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
+                               &ifp->addr, ifp->idev->dev->name);
+               }
+
                if (ifp->idev->cnf.forwarding)
                        addrconf_join_anycast(ifp);
                if (!ipv6_addr_any(&ifp->peer_addr))
index cb24850d2c7f87ca65fe604346cc0c1c703ddc27..971a0fdf1fbc3bc13df69d882c49fd5030b35b4f 100644 (file)
@@ -31,6 +31,7 @@
 #include <net/ip6_route.h>
 #include <net/tcp_states.h>
 #include <net/dsfield.h>
+#include <net/sock_reuseport.h>
 
 #include <linux/errqueue.h>
 #include <linux/uaccess.h>
@@ -258,6 +259,7 @@ ipv4_connected:
                goto out;
        }
 
+       reuseport_has_conns(sk, true);
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
 out:
index a6c0479c1d55f8f52b71449895ff56b5903f2ce2..bbb5ffb3397d8b98227c81023cac218603822619 100644 (file)
@@ -1081,8 +1081,24 @@ add:
                err = call_fib6_entry_notifiers(info->nl_net,
                                                FIB_EVENT_ENTRY_ADD,
                                                rt, extack);
-               if (err)
+               if (err) {
+                       struct fib6_info *sibling, *next_sibling;
+
+                       /* If the route has siblings, then it first
+                        * needs to be unlinked from them.
+                        */
+                       if (!rt->fib6_nsiblings)
+                               return err;
+
+                       list_for_each_entry_safe(sibling, next_sibling,
+                                                &rt->fib6_siblings,
+                                                fib6_siblings)
+                               sibling->fib6_nsiblings--;
+                       rt->fib6_nsiblings = 0;
+                       list_del_init(&rt->fib6_siblings);
+                       rt6_multipath_rebalance(next_sibling);
                        return err;
+               }
 
                rcu_assign_pointer(rt->fib6_next, iter);
                atomic_inc(&rt->fib6_ref);
index 01ecd510014f2a03f9e064bf320912450cb6f468..a23516e220569797840ec91d36f5e5d120e06a42 100644 (file)
@@ -680,12 +680,13 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
                                    struct flowi6 *fl6, __u8 *dsfield,
                                    int *encap_limit)
 {
-       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct ipv6hdr *ipv6h;
        struct ip6_tnl *t = netdev_priv(dev);
        __u16 offset;
 
        offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
        /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+       ipv6h = ipv6_hdr(skb);
 
        if (offset > 0) {
                struct ipv6_tlv_tnl_enc_lim *tel;
@@ -987,7 +988,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                if (unlikely(!tun_info ||
                             !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
                             ip_tunnel_info_af(tun_info) != AF_INET6))
-                       return -EINVAL;
+                       goto tx_err;
 
                key = &tun_info->key;
                memset(&fl6, 0, sizeof(fl6));
index 6b74523fc1c4397bd0a989fa321faf81be8dc1b7..acf0749ee5bbdc2cdbbf5f39791cb163e205a638 100644 (file)
@@ -80,8 +80,10 @@ static void ip6_sublist_rcv_finish(struct list_head *head)
 {
        struct sk_buff *skb, *next;
 
-       list_for_each_entry_safe(skb, next, head, list)
+       list_for_each_entry_safe(skb, next, head, list) {
+               skb_list_del_init(skb);
                dst_input(skb);
+       }
 }
 
 static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
@@ -220,6 +222,16 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
        if (ipv6_addr_is_multicast(&hdr->saddr))
                goto err;
 
+       /* While RFC4291 is not explicit about v4mapped addresses
+        * in IPv6 headers, it seems clear linux dual-stack
+        * model can not deal properly with these.
+        * Security models could be fooled by ::ffff:127.0.0.1 for example.
+        *
+        * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
+        */
+       if (ipv6_addr_v4mapped(&hdr->saddr))
+               goto err;
+
        skb->transport_header = skb->network_header + sizeof(*hdr);
        IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
 
index ade1390c63488a60b405ca70052b3493fecc67d5..d0ad85b8650da7a63956038ad3ea80c6c25625b9 100644 (file)
@@ -1283,12 +1283,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+       dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
 
        if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
                return -1;
 
-       dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
-
        skb_set_inner_ipproto(skb, IPPROTO_IPIP);
 
        err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1372,12 +1371,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+       dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
 
        if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
                return -1;
 
-       dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
-
        skb_set_inner_ipproto(skb, IPPROTO_IPV6);
 
        err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
index dbab62e3f0d78ab6ab996cb70627f675bb42e487..2d80e913b82f1067e3bb26a041d41a8dc9fc60f3 100644 (file)
@@ -791,14 +791,15 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
        if (pmc) {
                im->idev = pmc->idev;
                if (im->mca_sfmode == MCAST_INCLUDE) {
-                       im->mca_tomb = pmc->mca_tomb;
-                       im->mca_sources = pmc->mca_sources;
+                       swap(im->mca_tomb, pmc->mca_tomb);
+                       swap(im->mca_sources, pmc->mca_sources);
                        for (psf = im->mca_sources; psf; psf = psf->sf_next)
                                psf->sf_crcount = idev->mc_qrv;
                } else {
                        im->mca_crcount = idev->mc_qrv;
                }
                in6_dev_put(pmc->idev);
+               ip6_mc_clear_src(pmc);
                kfree(pmc);
        }
        spin_unlock_bh(&im->mca_lock);
index c3c6b09acdc4fcb76f923b057a864ae06fa5be13..0f3407f2851ed7fb003451d484c6fe7666d58a60 100644 (file)
@@ -58,7 +58,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
        if (rpfilter_addr_linklocal(&iph->saddr)) {
                lookup_flags |= RT6_LOOKUP_F_IFACE;
                fl6.flowi6_oif = dev->ifindex;
-       } else if ((flags & XT_RPFILTER_LOOSE) == 0)
+       /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
+       } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
+                 (flags & XT_RPFILTER_LOOSE) == 0)
                fl6.flowi6_oif = dev->ifindex;
 
        rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
@@ -73,7 +75,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
                goto out;
        }
 
-       if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
+       if (rt->rt6i_idev->dev == dev ||
+           l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
+           (flags & XT_RPFILTER_LOOSE))
                ret = true;
  out:
        ip6_rt_put(rt);
index 4c04bccc74171058760b528f30ae917326e3b301..5c9be8594483fb9929e46b9a410db1865ed267ce 100644 (file)
@@ -228,7 +228,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net)
        return 0;
 }
 
-static void __net_init ping_v6_proc_exit_net(struct net *net)
+static void __net_exit ping_v6_proc_exit_net(struct net *net)
 {
        remove_proc_entry("icmp6", net->proc_net);
 }
index 24f7b2cf504b2dc2ab2f6467f9a5bfd969ed3ba7..c8858638013400a2c1cc38be299af56d7620142c 100644 (file)
@@ -2214,7 +2214,7 @@ static struct dst_entry *rt6_check(struct rt6_info *rt,
 {
        u32 rt_cookie = 0;
 
-       if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
+       if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
            rt_cookie != cookie)
                return NULL;
 
@@ -3109,7 +3109,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
        rt->fib6_metric = cfg->fc_metric;
        rt->fib6_nh.nh_weight = 1;
 
-       rt->fib6_type = cfg->fc_type;
+       rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
 
        /* We cannot add true routes via loopback here,
           they would result in kernel looping; promote them to reject routes
index 164f1d01273c9bc1723951cf4f449952f6d6b3b6..d1c59cb6dceb21ef54c19d3a55c94ddb0f748071 100644 (file)
@@ -177,13 +177,14 @@ static struct sock *udp6_lib_lookup2(struct net *net,
                score = compute_score(sk, net, saddr, sport,
                                      daddr, hnum, dif, sdif, exact_dif);
                if (score > badness) {
-                       if (sk->sk_reuseport) {
+                       if (sk->sk_reuseport &&
+                           sk->sk_state != TCP_ESTABLISHED) {
                                hash = udp6_ehashfn(net, daddr, hnum,
                                                    saddr, sport);
 
                                result = reuseport_select_sock(sk, hash, skb,
                                                        sizeof(struct udphdr));
-                               if (result)
+                               if (result && !reuseport_has_conns(sk, false))
                                        return result;
                        }
                        result = sk;
@@ -1046,6 +1047,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
        __wsum csum = 0;
        int offset = skb_transport_offset(skb);
        int len = skb->len - offset;
+       int datalen = len - sizeof(*uh);
 
        /*
         * Create a UDP header
@@ -1078,8 +1080,12 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
                        return -EIO;
                }
 
-               skb_shinfo(skb)->gso_size = cork->gso_size;
-               skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+               if (datalen > cork->gso_size) {
+                       skb_shinfo(skb)->gso_size = cork->gso_size;
+                       skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+                       skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
+                                                                cork->gso_size);
+               }
                goto csum_partial;
        }
 
index 0b79c9aa8eb1f2184afbfe38cb29fa4b66b49ba6..1982f9f31debb26c615a91fffdc45887d01cbcba 100644 (file)
@@ -2442,8 +2442,10 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
                goto out;
        }
        err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(out_skb);
                goto out;
+       }
 
        out_hdr = (struct sadb_msg *) out_skb->data;
        out_hdr->sadb_msg_version = hdr->sadb_msg_version;
@@ -2694,8 +2696,10 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
                return PTR_ERR(out_skb);
 
        err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(out_skb);
                return err;
+       }
 
        out_hdr = (struct sadb_msg *) out_skb->data;
        out_hdr->sadb_msg_version = pfk->dump.msg_version;
index 04d9946dcdba648f8e4dce9c0c0211f7036ae135..c0956781665e154c89a4409d52cacc2b35e0cd36 100644 (file)
@@ -1686,6 +1686,9 @@ static const struct proto_ops pppol2tp_ops = {
        .recvmsg        = pppol2tp_recvmsg,
        .mmap           = sock_no_mmap,
        .ioctl          = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = pppox_compat_ioctl,
+#endif
 };
 
 static const struct pppox_proto pppol2tp_proto = {
index 40c51022346790073a5d328e80d7fd9022c2563d..a48e83b19cfa7b0250bfd38ca6ed0fce4351d0bd 100644 (file)
@@ -1471,6 +1471,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
        if (is_multicast_ether_addr(mac))
                return -EINVAL;
 
+       if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
+           sdata->vif.type == NL80211_IFTYPE_STATION &&
+           !sdata->u.mgd.associated)
+               return -EINVAL;
+
        sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
        if (!sta)
                return -ENOMEM;
@@ -1478,10 +1483,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
        if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
                sta->sta.tdls = true;
 
-       if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
-           !sdata->u.mgd.associated)
-               return -EINVAL;
-
        err = sta_apply_parameters(local, sta, params);
        if (err) {
                sta_info_free(local, sta);
index d37d4acafebf54f5bd47f8f642f6fa8956c54ec5..316250ae90712e9394138a4ef0a4866d7a9c0fa3 100644 (file)
@@ -490,9 +490,14 @@ static ssize_t ieee80211_if_fmt_aqm(
        const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
 {
        struct ieee80211_local *local = sdata->local;
-       struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+       struct txq_info *txqi;
        int len;
 
+       if (!sdata->vif.txq)
+               return 0;
+
+       txqi = to_txq_info(sdata->vif.txq);
+
        spin_lock_bh(&local->fq.lock);
        rcu_read_lock();
 
@@ -659,7 +664,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
        DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
        DEBUGFS_ADD(hw_queues);
 
-       if (sdata->local->ops->wake_tx_queue)
+       if (sdata->local->ops->wake_tx_queue &&
+           sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+           sdata->vif.type != NL80211_IFTYPE_NAN)
                DEBUGFS_ADD(aqm);
 }
 
index bb886e7db47f1dcf409a4746d7ee47be8c679462..f783d1377d9a8205eea3218354083debc3c5ecdb 100644 (file)
@@ -169,11 +169,16 @@ int drv_conf_tx(struct ieee80211_local *local,
        if (!check_sdata_in_driver(sdata))
                return -EIO;
 
-       if (WARN_ONCE(params->cw_min == 0 ||
-                     params->cw_min > params->cw_max,
-                     "%s: invalid CW_min/CW_max: %d/%d\n",
-                     sdata->name, params->cw_min, params->cw_max))
+       if (params->cw_min == 0 || params->cw_min > params->cw_max) {
+               /*
+                * If we can't configure hardware anyway, don't warn. We may
+                * never have initialized the CW parameters.
+                */
+               WARN_ONCE(local->ops->conf_tx,
+                         "%s: invalid CW_min/CW_max: %d/%d\n",
+                         sdata->name, params->cw_min, params->cw_max);
                return -EINVAL;
+       }
 
        trace_drv_conf_tx(local, sdata, ac, params);
        if (local->ops->conf_tx)
index 1aaa73fa308e655e013970e2abeb0d5da7cb4bb5..5c9dcafbc34244832d7a2e3218ae4a5abe48a215 100644 (file)
@@ -1967,6 +1967,16 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
                ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac);
        }
 
+       /* WMM specification requires all 4 ACIs. */
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               if (params[ac].cw_min == 0) {
+                       sdata_info(sdata,
+                                  "AP has invalid WMM params (missing AC %d), using defaults\n",
+                                  ac);
+                       return false;
+               }
+       }
+
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                mlme_dbg(sdata,
                         "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
@@ -2544,7 +2554,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
 
        rcu_read_lock();
        ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
-       if (WARN_ON_ONCE(ssid == NULL))
+       if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
+                     "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
                ssid_len = 0;
        else
                ssid_len = ssid[1];
@@ -5029,7 +5040,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 
        rcu_read_lock();
        ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
-       if (!ssidie) {
+       if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
                rcu_read_unlock();
                kfree(assoc_data);
                return -EINVAL;
index 7523d995ea8abe0d26973f04e181898ca3f5cc7f..b12f23c996f4e5e0584b8046d14a460a0335bdfa 100644 (file)
@@ -2372,11 +2372,13 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
                      skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
                     sdata->control_port_over_nl80211)) {
                struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-               bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
+               bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
 
                cfg80211_rx_control_port(dev, skb, noencrypt);
                dev_kfree_skb(skb);
        } else {
+               memset(skb->cb, 0, sizeof(skb->cb));
+
                /* deliver to local stack */
                if (rx->napi)
                        napi_gro_receive(rx->napi, skb);
@@ -2470,8 +2472,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
 
        if (skb) {
                skb->protocol = eth_type_trans(skb, dev);
-               memset(skb->cb, 0, sizeof(skb->cb));
-
                ieee80211_deliver_skb_to_local_stack(skb, rx);
        }
 
index c59638574cf8b3a5a2ec86f1d799e8c0868eb43d..f101a6460b44b07e09a2328423aed5535d978913 100644 (file)
@@ -3527,9 +3527,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
        }
 
        /* Always allow software iftypes */
-       if (local->hw.wiphy->software_iftypes & BIT(iftype) ||
-           (iftype == NL80211_IFTYPE_AP_VLAN &&
-            local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
+       if (cfg80211_iftype_allowed(local->hw.wiphy, iftype, 0, 1)) {
                if (radar_detect)
                        return -EINVAL;
                return 0;
@@ -3564,7 +3562,8 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
 
                if (sdata_iter == sdata ||
                    !ieee80211_sdata_running(sdata_iter) ||
-                   local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
+                   cfg80211_iftype_allowed(local->hw.wiphy,
+                                           wdev_iter->iftype, 0, 1))
                        continue;
 
                params.iftype_num[wdev_iter->iftype]++;
index 13ade5782847bf8e4fe73eae31bdc0f37f51098b..4f01321e793ce120fa20d827506470c1c21ba4f4 100644 (file)
@@ -230,7 +230,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
 
        e.id = ip_to_id(map, ip);
 
-       if (opt->flags & IPSET_DIM_ONE_SRC)
+       if (opt->flags & IPSET_DIM_TWO_SRC)
                ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
        else
                ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
index 1577f2f76060dcd816f94078412f52943568ce40..e2538c5786714ff097f28530af99da893fb61dad 100644 (file)
@@ -1157,7 +1157,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
                return -ENOENT;
 
        write_lock_bh(&ip_set_ref_lock);
-       if (set->ref != 0) {
+       if (set->ref != 0 || set->ref_netlink != 0) {
                ret = -IPSET_ERR_REFERENCED;
                goto out;
        }
index 8a33dac4e8058b0ec07281814f5c90507421eb4e..ddfe06d7530bae18cabb850f825a1e1b9bf6d72a 100644 (file)
@@ -625,7 +625,7 @@ retry:
                                        goto cleanup;
                                }
                                m->size = AHASH_INIT_SIZE;
-                               extsize = ext_size(AHASH_INIT_SIZE, dsize);
+                               extsize += ext_size(AHASH_INIT_SIZE, dsize);
                                RCU_INIT_POINTER(hbucket(t, key), m);
                        } else if (m->pos >= m->size) {
                                struct hbucket *ht;
index fd87de3ed55b33a1d38035211001ba11b81b808e..16ec822e40447447095b9da3f50541d6da6bf990 100644 (file)
@@ -95,15 +95,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
        struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-        /* MAC can be src only */
-       if (!(opt->flags & IPSET_DIM_TWO_SRC))
-               return 0;
-
        if (skb_mac_header(skb) < skb->head ||
            (skb_mac_header(skb) + ETH_HLEN) > skb->data)
                return -EINVAL;
 
-       if (opt->flags & IPSET_DIM_ONE_SRC)
+       if (opt->flags & IPSET_DIM_TWO_SRC)
                ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
        else
                ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
index 62c0e80dcd7198e668e12e15beed6447027f762c..a71f777d1353abf16509c8389fd3863860ba0562 100644 (file)
@@ -2218,7 +2218,6 @@ static const struct nf_hook_ops ip_vs_ops[] = {
 static int __net_init __ip_vs_init(struct net *net)
 {
        struct netns_ipvs *ipvs;
-       int ret;
 
        ipvs = net_generic(net, ip_vs_net_id);
        if (ipvs == NULL)
@@ -2250,17 +2249,11 @@ static int __net_init __ip_vs_init(struct net *net)
        if (ip_vs_sync_net_init(ipvs) < 0)
                goto sync_fail;
 
-       ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
-       if (ret < 0)
-               goto hook_fail;
-
        return 0;
 /*
  * Error handling
  */
 
-hook_fail:
-       ip_vs_sync_net_cleanup(ipvs);
 sync_fail:
        ip_vs_conn_net_cleanup(ipvs);
 conn_fail:
@@ -2290,6 +2283,19 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
        net->ipvs = NULL;
 }
 
+static int __net_init __ip_vs_dev_init(struct net *net)
+{
+       int ret;
+
+       ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+       if (ret < 0)
+               goto hook_fail;
+       return 0;
+
+hook_fail:
+       return ret;
+}
+
 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
@@ -2309,6 +2315,7 @@ static struct pernet_operations ipvs_core_ops = {
 };
 
 static struct pernet_operations ipvs_core_dev_ops = {
+       .init = __ip_vs_dev_init,
        .exit = __ip_vs_dev_cleanup,
 };
 
index 2d4e048762f6ddb85e7785586ea6c1d4f9d26bba..3df94a49912663cfb2314b3132b96751df088f42 100644 (file)
@@ -2382,9 +2382,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
                        cfg.syncid = dm->syncid;
                        ret = start_sync_thread(ipvs, &cfg, dm->state);
                } else {
-                       mutex_lock(&ipvs->sync_mutex);
                        ret = stop_sync_thread(ipvs, dm->state);
-                       mutex_unlock(&ipvs->sync_mutex);
                }
                goto out_dec;
        }
@@ -3492,10 +3490,8 @@ static int ip_vs_genl_del_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
        if (!attrs[IPVS_DAEMON_ATTR_STATE])
                return -EINVAL;
 
-       mutex_lock(&ipvs->sync_mutex);
        ret = stop_sync_thread(ipvs,
                               nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
-       mutex_unlock(&ipvs->sync_mutex);
        return ret;
 }
 
index d4020c5e831d3020a6e412ead6d1895f81b5a124..ecb71062fcb3cbfe230de9ddd5c4b86dbd41f051 100644 (file)
@@ -195,6 +195,7 @@ union ip_vs_sync_conn {
 #define IPVS_OPT_F_PARAM       (1 << (IPVS_OPT_PARAM-1))
 
 struct ip_vs_sync_thread_data {
+       struct task_struct *task;
        struct netns_ipvs *ipvs;
        struct socket *sock;
        char *buf;
@@ -374,8 +375,11 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs,
                                              max(IPVS_SYNC_SEND_DELAY, 1));
                ms->sync_queue_len++;
                list_add_tail(&sb->list, &ms->sync_queue);
-               if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE)
-                       wake_up_process(ms->master_thread);
+               if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) {
+                       int id = (int)(ms - ipvs->ms);
+
+                       wake_up_process(ipvs->master_tinfo[id].task);
+               }
        } else
                ip_vs_sync_buff_release(sb);
        spin_unlock(&ipvs->sync_lock);
@@ -1636,8 +1640,10 @@ static void master_wakeup_work_handler(struct work_struct *work)
        spin_lock_bh(&ipvs->sync_lock);
        if (ms->sync_queue_len &&
            ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
+               int id = (int)(ms - ipvs->ms);
+
                ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
-               wake_up_process(ms->master_thread);
+               wake_up_process(ipvs->master_tinfo[id].task);
        }
        spin_unlock_bh(&ipvs->sync_lock);
 }
@@ -1703,10 +1709,6 @@ done:
        if (sb)
                ip_vs_sync_buff_release(sb);
 
-       /* release the sending multicast socket */
-       sock_release(tinfo->sock);
-       kfree(tinfo);
-
        return 0;
 }
 
@@ -1740,11 +1742,6 @@ static int sync_thread_backup(void *data)
                }
        }
 
-       /* release the sending multicast socket */
-       sock_release(tinfo->sock);
-       kfree(tinfo->buf);
-       kfree(tinfo);
-
        return 0;
 }
 
@@ -1752,8 +1749,8 @@ static int sync_thread_backup(void *data)
 int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
                      int state)
 {
-       struct ip_vs_sync_thread_data *tinfo = NULL;
-       struct task_struct **array = NULL, *task;
+       struct ip_vs_sync_thread_data *ti = NULL, *tinfo;
+       struct task_struct *task;
        struct net_device *dev;
        char *name;
        int (*threadfn)(void *data);
@@ -1822,7 +1819,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
                threadfn = sync_thread_master;
        } else if (state == IP_VS_STATE_BACKUP) {
                result = -EEXIST;
-               if (ipvs->backup_threads)
+               if (ipvs->backup_tinfo)
                        goto out_early;
 
                ipvs->bcfg = *c;
@@ -1849,28 +1846,22 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
                                          master_wakeup_work_handler);
                        ms->ipvs = ipvs;
                }
-       } else {
-               array = kcalloc(count, sizeof(struct task_struct *),
-                               GFP_KERNEL);
-               result = -ENOMEM;
-               if (!array)
-                       goto out;
        }
+       result = -ENOMEM;
+       ti = kcalloc(count, sizeof(struct ip_vs_sync_thread_data),
+                    GFP_KERNEL);
+       if (!ti)
+               goto out;
 
        for (id = 0; id < count; id++) {
-               result = -ENOMEM;
-               tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
-               if (!tinfo)
-                       goto out;
+               tinfo = &ti[id];
                tinfo->ipvs = ipvs;
-               tinfo->sock = NULL;
                if (state == IP_VS_STATE_BACKUP) {
+                       result = -ENOMEM;
                        tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
                                             GFP_KERNEL);
                        if (!tinfo->buf)
                                goto out;
-               } else {
-                       tinfo->buf = NULL;
                }
                tinfo->id = id;
                if (state == IP_VS_STATE_MASTER)
@@ -1885,17 +1876,15 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
                        result = PTR_ERR(task);
                        goto out;
                }
-               tinfo = NULL;
-               if (state == IP_VS_STATE_MASTER)
-                       ipvs->ms[id].master_thread = task;
-               else
-                       array[id] = task;
+               tinfo->task = task;
        }
 
        /* mark as active */
 
-       if (state == IP_VS_STATE_BACKUP)
-               ipvs->backup_threads = array;
+       if (state == IP_VS_STATE_MASTER)
+               ipvs->master_tinfo = ti;
+       else
+               ipvs->backup_tinfo = ti;
        spin_lock_bh(&ipvs->sync_buff_lock);
        ipvs->sync_state |= state;
        spin_unlock_bh(&ipvs->sync_buff_lock);
@@ -1910,29 +1899,31 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
 
 out:
        /* We do not need RTNL lock anymore, release it here so that
-        * sock_release below and in the kthreads can use rtnl_lock
-        * to leave the mcast group.
+        * sock_release below can use rtnl_lock to leave the mcast group.
         */
        rtnl_unlock();
-       count = id;
-       while (count-- > 0) {
-               if (state == IP_VS_STATE_MASTER)
-                       kthread_stop(ipvs->ms[count].master_thread);
-               else
-                       kthread_stop(array[count]);
+       id = min(id, count - 1);
+       if (ti) {
+               for (tinfo = ti + id; tinfo >= ti; tinfo--) {
+                       if (tinfo->task)
+                               kthread_stop(tinfo->task);
+               }
        }
        if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
                kfree(ipvs->ms);
                ipvs->ms = NULL;
        }
        mutex_unlock(&ipvs->sync_mutex);
-       if (tinfo) {
-               if (tinfo->sock)
-                       sock_release(tinfo->sock);
-               kfree(tinfo->buf);
-               kfree(tinfo);
+
+       /* No more mutexes, release socks */
+       if (ti) {
+               for (tinfo = ti + id; tinfo >= ti; tinfo--) {
+                       if (tinfo->sock)
+                               sock_release(tinfo->sock);
+                       kfree(tinfo->buf);
+               }
+               kfree(ti);
        }
-       kfree(array);
        return result;
 
 out_early:
@@ -1944,15 +1935,18 @@ out_early:
 
 int stop_sync_thread(struct netns_ipvs *ipvs, int state)
 {
-       struct task_struct **array;
+       struct ip_vs_sync_thread_data *ti, *tinfo;
        int id;
        int retc = -EINVAL;
 
        IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
 
+       mutex_lock(&ipvs->sync_mutex);
        if (state == IP_VS_STATE_MASTER) {
+               retc = -ESRCH;
                if (!ipvs->ms)
-                       return -ESRCH;
+                       goto err;
+               ti = ipvs->master_tinfo;
 
                /*
                 * The lock synchronizes with sb_queue_tail(), so that we don't
@@ -1971,38 +1965,56 @@ int stop_sync_thread(struct netns_ipvs *ipvs, int state)
                        struct ipvs_master_sync_state *ms = &ipvs->ms[id];
                        int ret;
 
+                       tinfo = &ti[id];
                        pr_info("stopping master sync thread %d ...\n",
-                               task_pid_nr(ms->master_thread));
+                               task_pid_nr(tinfo->task));
                        cancel_delayed_work_sync(&ms->master_wakeup_work);
-                       ret = kthread_stop(ms->master_thread);
+                       ret = kthread_stop(tinfo->task);
                        if (retc >= 0)
                                retc = ret;
                }
                kfree(ipvs->ms);
                ipvs->ms = NULL;
+               ipvs->master_tinfo = NULL;
        } else if (state == IP_VS_STATE_BACKUP) {
-               if (!ipvs->backup_threads)
-                       return -ESRCH;
+               retc = -ESRCH;
+               if (!ipvs->backup_tinfo)
+                       goto err;
+               ti = ipvs->backup_tinfo;
 
                ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
-               array = ipvs->backup_threads;
                retc = 0;
                for (id = ipvs->threads_mask; id >= 0; id--) {
                        int ret;
 
+                       tinfo = &ti[id];
                        pr_info("stopping backup sync thread %d ...\n",
-                               task_pid_nr(array[id]));
-                       ret = kthread_stop(array[id]);
+                               task_pid_nr(tinfo->task));
+                       ret = kthread_stop(tinfo->task);
                        if (retc >= 0)
                                retc = ret;
                }
-               kfree(array);
-               ipvs->backup_threads = NULL;
+               ipvs->backup_tinfo = NULL;
+       } else {
+               goto err;
        }
+       id = ipvs->threads_mask;
+       mutex_unlock(&ipvs->sync_mutex);
+
+       /* No more mutexes, release socks */
+       for (tinfo = ti + id; tinfo >= ti; tinfo--) {
+               if (tinfo->sock)
+                       sock_release(tinfo->sock);
+               kfree(tinfo->buf);
+       }
+       kfree(ti);
 
        /* decrease the module use count */
        ip_vs_use_count_dec();
+       return retc;
 
+err:
+       mutex_unlock(&ipvs->sync_mutex);
        return retc;
 }
 
@@ -2021,7 +2033,6 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
 {
        int retc;
 
-       mutex_lock(&ipvs->sync_mutex);
        retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER);
        if (retc && retc != -ESRCH)
                pr_err("Failed to stop Master Daemon\n");
@@ -2029,5 +2040,4 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
        retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP);
        if (retc && retc != -ESRCH)
                pr_err("Failed to stop Backup Daemon\n");
-       mutex_unlock(&ipvs->sync_mutex);
 }
index 27eff89fad01c3ccc3a8e9593d4aca94c216758d..c6073d17c324430404775a76b9ef9d1bdbe174ab 100644 (file)
@@ -431,13 +431,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
  * table location, we assume id gets exposed to userspace.
  *
  * Following nf_conn items do not change throughout lifetime
- * of the nf_conn after it has been committed to main hash table:
+ * of the nf_conn:
  *
  * 1. nf_conn address
- * 2. nf_conn->ext address
- * 3. nf_conn->master address (normally NULL)
- * 4. tuple
- * 5. the associated net namespace
+ * 2. nf_conn->master address (normally NULL)
+ * 3. the associated net namespace
+ * 4. the original direction tuple
  */
 u32 nf_ct_get_id(const struct nf_conn *ct)
 {
@@ -447,9 +446,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
        net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
 
        a = (unsigned long)ct;
-       b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
-       c = (unsigned long)ct->ext;
-       d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
+       b = (unsigned long)ct->master;
+       c = (unsigned long)nf_ct_net(ct);
+       d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+                                  sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
                                   &ct_id_seed);
 #ifdef CONFIG_64BIT
        return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
index a11c304fb7713b36eacd21f421b72e4a4db1b9e7..efc14c7b4f8ef68f24ccce80395465c27cc7b289 100644 (file)
@@ -323,7 +323,7 @@ static int find_pattern(const char *data, size_t dlen,
                i++;
        }
 
-       pr_debug("Skipped up to `%c'!\n", skip);
+       pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
 
        *numoff = i;
        *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
index 842f3f86fb2e7af1a7f75af55cfdd8bad5e92c81..7011ab27c4371bbedcac3c15795173eaa074ab46 100644 (file)
@@ -480,6 +480,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
        struct ip_ct_tcp_state *receiver = &state->seen[!dir];
        const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
        __u32 seq, ack, sack, end, win, swin;
+       u16 win_raw;
        s32 receiver_offset;
        bool res, in_recv_win;
 
@@ -488,7 +489,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
         */
        seq = ntohl(tcph->seq);
        ack = sack = ntohl(tcph->ack_seq);
-       win = ntohs(tcph->window);
+       win_raw = ntohs(tcph->window);
+       win = win_raw;
        end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
 
        if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
@@ -663,14 +665,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
                            && state->last_seq == seq
                            && state->last_ack == ack
                            && state->last_end == end
-                           && state->last_win == win)
+                           && state->last_win == win_raw)
                                state->retrans++;
                        else {
                                state->last_dir = dir;
                                state->last_seq = seq;
                                state->last_ack = ack;
                                state->last_end = end;
-                               state->last_win = win;
+                               state->last_win = win_raw;
                                state->retrans = 0;
                        }
                }
index 5df7486bb4164ec65b346b4ba2ecb642ee73029a..8ade4051294442f88eb50498632fc023ab12d3fe 100644 (file)
@@ -203,7 +203,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
                return err;
        }
 
-       flow->timeout = (u32)jiffies;
+       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
        return 0;
 }
 EXPORT_SYMBOL_GPL(flow_offload_add);
index 7569ba00e732843958f616efc29458de65c65ecf..a96a8c16baf99fda44c5bdd6ddfd8581f360f9a4 100644 (file)
@@ -174,6 +174,11 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
                goto err;
        }
 
+       if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) {
+               status = -ENETDOWN;
+               goto err;
+       }
+
        *entry = (struct nf_queue_entry) {
                .skb    = skb,
                .state  = *state,
@@ -182,7 +187,6 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
        };
 
        nf_queue_entry_get_refs(entry);
-       skb_dst_force(skb);
 
        switch (entry->state.pf) {
        case AF_INET:
index 29ff59dd99acec375c4d1e6dd7c5d1eed21320b8..24fddf0322790f360d94a33db5077cdc20522646 100644 (file)
@@ -121,9 +121,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
                return;
 
        list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
-               if (trans->msg_type == NFT_MSG_NEWSET &&
-                   nft_trans_set(trans) == set) {
-                       set->bound = true;
+               switch (trans->msg_type) {
+               case NFT_MSG_NEWSET:
+                       if (nft_trans_set(trans) == set)
+                               nft_trans_set_bound(trans) = true;
+                       break;
+               case NFT_MSG_NEWSETELEM:
+                       if (nft_trans_elem_set(trans) == set)
+                               nft_trans_elem_set_bound(trans) = true;
                        break;
                }
        }
@@ -3424,8 +3429,11 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
                              NFT_SET_OBJECT))
                        return -EINVAL;
                /* Only one of these operations is supported */
-               if ((flags & (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT)) ==
-                            (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT))
+               if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
+                            (NFT_SET_MAP | NFT_SET_OBJECT))
+                       return -EOPNOTSUPP;
+               if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
+                            (NFT_SET_EVAL | NFT_SET_OBJECT))
                        return -EOPNOTSUPP;
        }
 
@@ -6656,7 +6664,7 @@ static int __nf_tables_abort(struct net *net)
                        break;
                case NFT_MSG_NEWSET:
                        trans->ctx.table->use--;
-                       if (nft_trans_set(trans)->bound) {
+                       if (nft_trans_set_bound(trans)) {
                                nft_trans_destroy(trans);
                                break;
                        }
@@ -6668,7 +6676,7 @@ static int __nf_tables_abort(struct net *net)
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWSETELEM:
-                       if (nft_trans_elem_set(trans)->bound) {
+                       if (nft_trans_elem_set_bound(trans)) {
                                nft_trans_destroy(trans);
                                break;
                        }
index 916913454624f2740212c62d1c0ce61bc8ae6f73..7f2c1915763f8caec6e2b2b5a460e6db5767d094 100644 (file)
@@ -575,7 +575,7 @@ static int nfnetlink_bind(struct net *net, int group)
        ss = nfnetlink_get_subsys(type << 8);
        rcu_read_unlock();
        if (!ss)
-               request_module("nfnetlink-subsys-%d", type);
+               request_module_nowait("nfnetlink-subsys-%d", type);
        return 0;
 }
 #endif
index af1497ab9464236b3875aeb536ae32e244e99141..69d6173f91e2b9a5070ac6e98977cc5dd4dc0dfd 100644 (file)
@@ -218,8 +218,13 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
 static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
 {
        struct nft_connlimit *priv = nft_expr_priv(expr);
+       bool ret;
 
-       return nf_conncount_gc_list(net, &priv->list);
+       local_bh_disable();
+       ret = nf_conncount_gc_list(net, &priv->list);
+       local_bh_enable();
+
+       return ret;
 }
 
 static struct nft_expr_type nft_connlimit_type;
index 6e0c26025ab13ca3a4b9dd7095c75ef3849c173c..1ef8cb789c41a668bf02bfa0b6b44d758e360624 100644 (file)
@@ -71,11 +71,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
 {
        struct nft_flow_offload *priv = nft_expr_priv(expr);
        struct nf_flowtable *flowtable = &priv->flowtable->data;
+       struct tcphdr _tcph, *tcph = NULL;
        enum ip_conntrack_info ctinfo;
        struct nf_flow_route route;
        struct flow_offload *flow;
        enum ip_conntrack_dir dir;
-       bool is_tcp = false;
        struct nf_conn *ct;
        int ret;
 
@@ -88,7 +88,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
 
        switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
        case IPPROTO_TCP:
-               is_tcp = true;
+               tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
+                                         sizeof(_tcph), &_tcph);
+               if (unlikely(!tcph || tcph->fin || tcph->rst))
+                       goto out;
                break;
        case IPPROTO_UDP:
                break;
@@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
        if (!flow)
                goto err_flow_alloc;
 
-       if (is_tcp) {
+       if (tcph) {
                ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
                ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
        }
@@ -146,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
        return nft_chain_validate_hooks(ctx->chain, hook_mask);
 }
 
+static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
+       [NFTA_FLOW_TABLE_NAME]  = { .type = NLA_STRING,
+                                   .len = NFT_NAME_MAXLEN - 1 },
+};
+
 static int nft_flow_offload_init(const struct nft_ctx *ctx,
                                 const struct nft_expr *expr,
                                 const struct nlattr * const tb[])
@@ -204,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = {
 static struct nft_expr_type nft_flow_offload_type __read_mostly = {
        .name           = "flow_offload",
        .ops            = &nft_flow_offload_ops,
+       .policy         = nft_flow_offload_policy,
        .maxattr        = NFTA_FLOW_MAX,
        .owner          = THIS_MODULE,
 };
index c2d237144f747c4e4938ca4807668a400d1018c5..b8f23f75aea6cbe96bf627c030d39e50539596ad 100644 (file)
@@ -196,7 +196,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
        priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
 
        priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
-       if (priv->modulus <= 1)
+       if (priv->modulus < 1)
                return -ERANGE;
 
        if (priv->offset + priv->modulus - 1 < priv->offset)
index 161c3451a747a7632b0906159b3b982ef985a28a..55754d9939b50a3e8e3829c165d9911a75c9e275 100644 (file)
@@ -76,9 +76,6 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
        if (IS_ERR(set))
                return PTR_ERR(set);
 
-       if (set->flags & NFT_SET_EVAL)
-               return -EOPNOTSUPP;
-
        priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
        err = nft_validate_register_load(priv->sreg, set->klen);
        if (err < 0)
index d7f3776dfd719d402ae178890107db8ebd1627f2..637ce3e8c575ce1b95dfcd340347176772cdff2d 100644 (file)
@@ -47,9 +47,6 @@ static void nft_socket_eval(const struct nft_expr *expr,
                return;
        }
 
-       /* So that subsequent socket matching not to require other lookups. */
-       skb->sk = sk;
-
        switch(priv->key) {
        case NFT_SOCKET_TRANSPARENT:
                nft_reg_store8(dest, inet_sk_transparent(sk));
@@ -66,6 +63,9 @@ static void nft_socket_eval(const struct nft_expr *expr,
                WARN_ON(1);
                regs->verdict.code = NFT_BREAK;
        }
+
+       if (sk != skb->sk)
+               sock_gen_put(sk);
 }
 
 static const struct nla_policy nft_socket_policy[NFTA_SOCKET_MAX + 1] = {
index 6b56f4170860c49e79946a4385a9365bb149af3d..3241fee9f2a19585fb180f552a104299e0918dc6 100644 (file)
@@ -57,25 +57,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par)
        nfnl_acct_put(info->nfacct);
 }
 
-static struct xt_match nfacct_mt_reg __read_mostly = {
-       .name       = "nfacct",
-       .family     = NFPROTO_UNSPEC,
-       .checkentry = nfacct_mt_checkentry,
-       .match      = nfacct_mt,
-       .destroy    = nfacct_mt_destroy,
-       .matchsize  = sizeof(struct xt_nfacct_match_info),
-       .usersize   = offsetof(struct xt_nfacct_match_info, nfacct),
-       .me         = THIS_MODULE,
+static struct xt_match nfacct_mt_reg[] __read_mostly = {
+       {
+               .name       = "nfacct",
+               .revision   = 0,
+               .family     = NFPROTO_UNSPEC,
+               .checkentry = nfacct_mt_checkentry,
+               .match      = nfacct_mt,
+               .destroy    = nfacct_mt_destroy,
+               .matchsize  = sizeof(struct xt_nfacct_match_info),
+               .usersize   = offsetof(struct xt_nfacct_match_info, nfacct),
+               .me         = THIS_MODULE,
+       },
+       {
+               .name       = "nfacct",
+               .revision   = 1,
+               .family     = NFPROTO_UNSPEC,
+               .checkentry = nfacct_mt_checkentry,
+               .match      = nfacct_mt,
+               .destroy    = nfacct_mt_destroy,
+               .matchsize  = sizeof(struct xt_nfacct_match_info_v1),
+               .usersize   = offsetof(struct xt_nfacct_match_info_v1, nfacct),
+               .me         = THIS_MODULE,
+       },
 };
 
 static int __init nfacct_mt_init(void)
 {
-       return xt_register_match(&nfacct_mt_reg);
+       return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
 }
 
 static void __exit nfacct_mt_exit(void)
 {
-       xt_unregister_match(&nfacct_mt_reg);
+       xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
 }
 
 module_init(nfacct_mt_init);
index 05f00fb20b047a1fd0aa5fe4c6bc821aa20faef9..cd15ea79e3e2a3cc339c0f4c4da5156e513c3b47 100644 (file)
@@ -104,11 +104,9 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
        if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) &&
            (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
             info->invert & XT_PHYSDEV_OP_BRIDGED) &&
-           par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
-           (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
+           par->hook_mask & (1 << NF_INET_LOCAL_OUT)) {
                pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
-               if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
-                       return -EINVAL;
+               return -EINVAL;
        }
 
        if (!brnf_probed) {
index 71ffd1a6dc7c6063c00f4c82f985fe9fc0d80dc0..43910e50752cb3f1fabb69f61ed124edf7c926ef 100644 (file)
@@ -872,7 +872,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
        unsigned short frametype, flags, window, timeout;
        int ret;
 
-       skb->sk = NULL;         /* Initially we don't know who it's for */
+       skb_orphan(skb);
 
        /*
         *      skb->data points to the netrom frame start
@@ -970,7 +970,9 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
 
        window = skb->data[20];
 
+       sock_hold(make);
        skb->sk             = make;
+       skb->destructor     = sock_efree;
        make->sk_state      = TCP_ESTABLISHED;
 
        /* Fill in his circuit details */
index ae296273ce3db96cdaeafba66a7ff460d8a59794..e0a2cb8a029f8ff1a5322559a281603122dbcfdd 100644 (file)
@@ -119,9 +119,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
        llcp_sock->service_name = kmemdup(llcp_addr.service_name,
                                          llcp_sock->service_name_len,
                                          GFP_KERNEL);
-
+       if (!llcp_sock->service_name) {
+               ret = -ENOMEM;
+               goto put_dev;
+       }
        llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
        if (llcp_sock->ssap == LLCP_SAP_MAX) {
+               kfree(llcp_sock->service_name);
+               llcp_sock->service_name = NULL;
                ret = -EADDRINUSE;
                goto put_dev;
        }
@@ -1011,10 +1016,13 @@ static int llcp_sock_create(struct net *net, struct socket *sock,
            sock->type != SOCK_RAW)
                return -ESOCKTNOSUPPORT;
 
-       if (sock->type == SOCK_RAW)
+       if (sock->type == SOCK_RAW) {
+               if (!capable(CAP_NET_RAW))
+                       return -EPERM;
                sock->ops = &llcp_rawsock_ops;
-       else
+       } else {
                sock->ops = &llcp_sock_ops;
+       }
 
        sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern);
        if (sk == NULL)
index 908f25e3773e5b494342228fc2e94f6ed4195659..5405d073804c6569988069252db5cd099025915e 100644 (file)
@@ -119,7 +119,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
        conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
        if (!conn_info) {
                rc = -EPROTO;
-               goto free_exit;
+               goto exit;
        }
 
        __skb_queue_head_init(&frags_q);
index 9f2875efb4ac9693ce89c636ebf3ed087ce5f0f6..b3662264aa24c38e7747eab3686e9bcdbf1f9dff 100644 (file)
@@ -981,7 +981,8 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
        int rc;
        u32 idx;
 
-       if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+       if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+           !info->attrs[NFC_ATTR_TARGET_INDEX])
                return -EINVAL;
 
        idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -1029,7 +1030,8 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
        struct sk_buff *msg = NULL;
        u32 idx;
 
-       if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+       if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+           !info->attrs[NFC_ATTR_FIRMWARE_NAME])
                return -EINVAL;
 
        idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
index 85ae53d8fd098b80e22a4b3ccc26f83be7b110c5..8211e8e97c96462837bcf96bb707330489e97fb4 100644 (file)
@@ -175,8 +175,7 @@ static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
        if (skb->ip_summed == CHECKSUM_COMPLETE) {
                __be16 diff[] = { ~(hdr->h_proto), ethertype };
 
-               skb->csum = ~csum_partial((char *)diff, sizeof(diff),
-                                       ~skb->csum);
+               skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
        }
 
        hdr->h_proto = ethertype;
@@ -268,8 +267,7 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
        if (skb->ip_summed == CHECKSUM_COMPLETE) {
                __be32 diff[] = { ~(stack->label_stack_entry), lse };
 
-               skb->csum = ~csum_partial((char *)diff, sizeof(diff),
-                                         ~skb->csum);
+               skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
        }
 
        stack->label_stack_entry = lse;
index 0f5ce77460d44099277e142e37d11620e426e9cb..8e396c7c838946606a863ff1f59c2e2d7443d947 100644 (file)
@@ -2239,7 +2239,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
        [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
        [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
        [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
-       [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+       [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
        [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
        [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
        [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
index 93b5a4200585022ece037a8262e3610b40cddced..7204e7bbebb0dd714aee99ef69e4b8e618eaa43a 100644 (file)
@@ -2616,6 +2616,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 
        mutex_lock(&po->pg_vec_lock);
 
+       /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
+        * we need to confirm it under protection of pg_vec_lock.
+        */
+       if (unlikely(!po->tx_ring.pg_vec)) {
+               err = -EBUSY;
+               goto out;
+       }
        if (likely(saddr == NULL)) {
                dev     = packet_cached_dev_get(po);
                proto   = po->num;
index 64f95624f21939c946e0bb256a40d825588e0fd2..4cea353221da7b2919017b2493dc548b0f0fe000 100644 (file)
@@ -156,7 +156,7 @@ static void psample_group_destroy(struct psample_group *group)
 {
        psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
        list_del(&group->list);
-       kfree(group);
+       kfree_rcu(group, rcu);
 }
 
 static struct psample_group *
index 86e1e37eb4e8a68beeecd3bfeeb597951259ea81..5c75118539bb73d170c92cbba28851dbf7d593cf 100644 (file)
@@ -157,6 +157,7 @@ static void __qrtr_node_release(struct kref *kref)
        list_del(&node->item);
        mutex_unlock(&qrtr_node_lock);
 
+       cancel_work_sync(&node->work);
        skb_queue_purge(&node->rx_queue);
        kfree(node);
 }
index 0f4398e7f2a7add7c20b6fdd333c40af4e719c92..93e336535d3b6ee9335b1e06e1a91030f6b1a618 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -239,34 +239,33 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        }
 
-       sock_set_flag(sk, SOCK_RCU_FREE);
-       ret = rds_add_bound(rs, binding_addr, &port, scope_id);
-       if (ret)
-               goto out;
-
-       if (rs->rs_transport) { /* previously bound */
+       /* The transport can be set using SO_RDS_TRANSPORT option before the
+        * socket is bound.
+        */
+       if (rs->rs_transport) {
                trans = rs->rs_transport;
-               if (trans->laddr_check(sock_net(sock->sk),
+               if (!trans->laddr_check ||
+                   trans->laddr_check(sock_net(sock->sk),
                                       binding_addr, scope_id) != 0) {
                        ret = -ENOPROTOOPT;
-                       rds_remove_bound(rs);
-               } else {
-                       ret = 0;
+                       goto out;
                }
-               goto out;
-       }
-       trans = rds_trans_get_preferred(sock_net(sock->sk), binding_addr,
-                                       scope_id);
-       if (!trans) {
-               ret = -EADDRNOTAVAIL;
-               rds_remove_bound(rs);
-               pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
-                                   __func__, binding_addr);
-               goto out;
+       } else {
+               trans = rds_trans_get_preferred(sock_net(sock->sk),
+                                               binding_addr, scope_id);
+               if (!trans) {
+                       ret = -EADDRNOTAVAIL;
+                       pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
+                                           __func__, binding_addr);
+                       goto out;
+               }
+               rs->rs_transport = trans;
        }
 
-       rs->rs_transport = trans;
-       ret = 0;
+       sock_set_flag(sk, SOCK_RCU_FREE);
+       ret = rds_add_bound(rs, binding_addr, &port, scope_id);
+       if (ret)
+               rs->rs_transport = NULL;
 
 out:
        release_sock(sk);
index eba75c1ba359446ca7e39e5aaa88287129531055..ba3379085c52d249f9a9b919679910237bcee682 100644 (file)
@@ -143,6 +143,9 @@ static void rds_ib_add_one(struct ib_device *device)
        refcount_set(&rds_ibdev->refcount, 1);
        INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
 
+       INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
+       INIT_LIST_HEAD(&rds_ibdev->conn_list);
+
        rds_ibdev->max_wrs = device->attrs.max_qp_wr;
        rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
 
@@ -203,9 +206,6 @@ static void rds_ib_add_one(struct ib_device *device)
                device->name,
                rds_ibdev->use_fastreg ? "FRMR" : "FMR");
 
-       INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
-       INIT_LIST_HEAD(&rds_ibdev->conn_list);
-
        down_write(&rds_ib_devices_lock);
        list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
        up_write(&rds_ib_devices_lock);
index 504cd6bcc54c5ed4d6e151d176761899d5a25d91..c0b945516cdbce681c09cf2d7377dfcfd9612162 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -803,6 +803,7 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
 
        minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
        minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
+       minfo6.tos = 0;
 
        if (flip) {
                minfo6.laddr = *daddr;
@@ -816,6 +817,8 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
                minfo6.fport = inc->i_hdr.h_dport;
        }
 
+       minfo6.flags = 0;
+
        rds_info_copy(iter, &minfo6, sizeof(minfo6));
 }
 #endif
index 3c39b8805d01f2460df022edfad47f3bae6aaed5..7319d3ca30e9494e31ff2a1202ac22e93ad6f56b 100644 (file)
@@ -195,7 +195,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
 
 service_in_use:
        write_unlock(&local->services_lock);
-       rxrpc_put_local(local);
+       rxrpc_unuse_local(local);
        ret = -EADDRINUSE;
 error_unlock:
        release_sock(&rx->sk);
@@ -552,6 +552,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
 
        switch (rx->sk.sk_state) {
        case RXRPC_UNBOUND:
+       case RXRPC_CLIENT_UNBOUND:
                rx->srx.srx_family = AF_RXRPC;
                rx->srx.srx_service = 0;
                rx->srx.transport_type = SOCK_DGRAM;
@@ -576,10 +577,9 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
                }
 
                rx->local = local;
-               rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
+               rx->sk.sk_state = RXRPC_CLIENT_BOUND;
                /* Fall through */
 
-       case RXRPC_CLIENT_UNBOUND:
        case RXRPC_CLIENT_BOUND:
                if (!m->msg_name &&
                    test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
@@ -908,7 +908,7 @@ static int rxrpc_release_sock(struct sock *sk)
        rxrpc_queue_work(&rxnet->service_conn_reaper);
        rxrpc_queue_work(&rxnet->client_conn_reaper);
 
-       rxrpc_put_local(rx->local);
+       rxrpc_unuse_local(rx->local);
        rx->local = NULL;
        key_put(rx->key);
        rx->key = NULL;
index 03e0fc8c183f0f1e1cbe32eee2cc35390b132d77..dfd9eab77cc8a883b55a4e40c2c456c261804efc 100644 (file)
@@ -258,7 +258,8 @@ struct rxrpc_security {
  */
 struct rxrpc_local {
        struct rcu_head         rcu;
-       atomic_t                usage;
+       atomic_t                active_users;   /* Number of users of the local endpoint */
+       atomic_t                usage;          /* Number of references to the structure */
        struct rxrpc_net        *rxnet;         /* The network ns in which this resides */
        struct list_head        link;
        struct socket           *socket;        /* my UDP socket */
@@ -998,6 +999,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
 void rxrpc_put_local(struct rxrpc_local *);
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
+void rxrpc_unuse_local(struct rxrpc_local *);
 void rxrpc_queue_local(struct rxrpc_local *);
 void rxrpc_destroy_all_locals(struct rxrpc_net *);
 
@@ -1057,6 +1060,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
 void rxrpc_put_peer(struct rxrpc_peer *);
+void rxrpc_put_peer_locked(struct rxrpc_peer *);
 
 /*
  * proc.c
index d591f54cb91fb615d67ec96566566dcef39efd18..7965600ee5dec068a8830366e510a7acd9644eb6 100644 (file)
@@ -1106,8 +1106,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
 {
        _enter("%p,%p", local, skb);
 
-       skb_queue_tail(&local->event_queue, skb);
-       rxrpc_queue_local(local);
+       if (rxrpc_get_local_maybe(local)) {
+               skb_queue_tail(&local->event_queue, skb);
+               rxrpc_queue_local(local);
+       } else {
+               rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+       }
 }
 
 /*
@@ -1117,8 +1121,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
 {
        CHECK_SLAB_OKAY(&local->usage);
 
-       skb_queue_tail(&local->reject_queue, skb);
-       rxrpc_queue_local(local);
+       if (rxrpc_get_local_maybe(local)) {
+               skb_queue_tail(&local->reject_queue, skb);
+               rxrpc_queue_local(local);
+       } else {
+               rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+       }
 }
 
 /*
index 10317dbdab5f4ba0ddd4c3a7f3a775764a81a06b..c752ad4870678dcdd7c04e42692ef7c586fc014b 100644 (file)
@@ -83,6 +83,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
        local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
        if (local) {
                atomic_set(&local->usage, 1);
+               atomic_set(&local->active_users, 1);
                local->rxnet = rxnet;
                INIT_LIST_HEAD(&local->link);
                INIT_WORK(&local->processor, rxrpc_local_processor);
@@ -96,7 +97,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
                local->debug_id = atomic_inc_return(&rxrpc_debug_id);
                memcpy(&local->srx, srx, sizeof(*srx));
                local->srx.srx_service = 0;
-               trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
+               trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
        }
 
        _leave(" = %p", local);
@@ -270,11 +271,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
                 * bind the transport socket may still fail if we're attempting
                 * to use a local address that the dying object is still using.
                 */
-               if (!rxrpc_get_local_maybe(local)) {
-                       cursor = cursor->next;
-                       list_del_init(&local->link);
+               if (!rxrpc_use_local(local))
                        break;
-               }
 
                age = "old";
                goto found;
@@ -288,7 +286,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
        if (ret < 0)
                goto sock_error;
 
-       list_add_tail(&local->link, cursor);
+       if (cursor != &rxnet->local_endpoints)
+               list_replace_init(cursor, &local->link);
+       else
+               list_add_tail(&local->link, cursor);
        age = "new";
 
 found:
@@ -324,7 +325,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
        int n;
 
        n = atomic_inc_return(&local->usage);
-       trace_rxrpc_local(local, rxrpc_local_got, n, here);
+       trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
        return local;
 }
 
@@ -338,7 +339,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
        if (local) {
                int n = atomic_fetch_add_unless(&local->usage, 1, 0);
                if (n > 0)
-                       trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
+                       trace_rxrpc_local(local->debug_id, rxrpc_local_got,
+                                         n + 1, here);
                else
                        local = NULL;
        }
@@ -346,24 +348,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
 }
 
 /*
- * Queue a local endpoint.
+ * Queue a local endpoint and pass the caller's reference to the work item.
  */
 void rxrpc_queue_local(struct rxrpc_local *local)
 {
        const void *here = __builtin_return_address(0);
+       unsigned int debug_id = local->debug_id;
+       int n = atomic_read(&local->usage);
 
        if (rxrpc_queue_work(&local->processor))
-               trace_rxrpc_local(local, rxrpc_local_queued,
-                                 atomic_read(&local->usage), here);
-}
-
-/*
- * A local endpoint reached its end of life.
- */
-static void __rxrpc_put_local(struct rxrpc_local *local)
-{
-       _enter("%d", local->debug_id);
-       rxrpc_queue_work(&local->processor);
+               trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
+       else
+               rxrpc_put_local(local);
 }
 
 /*
@@ -376,10 +372,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
 
        if (local) {
                n = atomic_dec_return(&local->usage);
-               trace_rxrpc_local(local, rxrpc_local_put, n, here);
+               trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
 
                if (n == 0)
-                       __rxrpc_put_local(local);
+                       call_rcu(&local->rcu, rxrpc_local_rcu);
+       }
+}
+
+/*
+ * Start using a local endpoint.
+ */
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
+{
+       unsigned int au;
+
+       local = rxrpc_get_local_maybe(local);
+       if (!local)
+               return NULL;
+
+       au = atomic_fetch_add_unless(&local->active_users, 1, 0);
+       if (au == 0) {
+               rxrpc_put_local(local);
+               return NULL;
+       }
+
+       return local;
+}
+
+/*
+ * Cease using a local endpoint.  Once the number of active users reaches 0, we
+ * start the closure of the transport in the work processor.
+ */
+void rxrpc_unuse_local(struct rxrpc_local *local)
+{
+       unsigned int au;
+
+       if (local) {
+               au = atomic_dec_return(&local->active_users);
+               if (au == 0)
+                       rxrpc_queue_local(local);
+               else
+                       rxrpc_put_local(local);
        }
 }
 
@@ -397,16 +430,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
 
        _enter("%d", local->debug_id);
 
-       /* We can get a race between an incoming call packet queueing the
-        * processor again and the work processor starting the destruction
-        * process which will shut down the UDP socket.
-        */
-       if (local->dead) {
-               _leave(" [already dead]");
-               return;
-       }
-       local->dead = true;
-
        mutex_lock(&rxnet->local_mutex);
        list_del_init(&local->link);
        mutex_unlock(&rxnet->local_mutex);
@@ -426,13 +449,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
         */
        rxrpc_purge_queue(&local->reject_queue);
        rxrpc_purge_queue(&local->event_queue);
-
-       _debug("rcu local %d", local->debug_id);
-       call_rcu(&local->rcu, rxrpc_local_rcu);
 }
 
 /*
- * Process events on an endpoint
+ * Process events on an endpoint.  The work item carries a ref which
+ * we must release.
  */
 static void rxrpc_local_processor(struct work_struct *work)
 {
@@ -440,13 +461,15 @@ static void rxrpc_local_processor(struct work_struct *work)
                container_of(work, struct rxrpc_local, processor);
        bool again;
 
-       trace_rxrpc_local(local, rxrpc_local_processing,
+       trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
                          atomic_read(&local->usage), NULL);
 
        do {
                again = false;
-               if (atomic_read(&local->usage) == 0)
-                       return rxrpc_local_destroyer(local);
+               if (atomic_read(&local->active_users) == 0) {
+                       rxrpc_local_destroyer(local);
+                       break;
+               }
 
                if (!skb_queue_empty(&local->reject_queue)) {
                        rxrpc_reject_packets(local);
@@ -458,6 +481,8 @@ static void rxrpc_local_processor(struct work_struct *work)
                        again = true;
                }
        } while (again);
+
+       rxrpc_put_local(local);
 }
 
 /*
index bd2fa3b7caa7e08ccec66979380f01423aca7f7b..dc7fdaf20445b117237f4a33466f52ec0be346ff 100644 (file)
@@ -375,7 +375,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
                spin_lock_bh(&rxnet->peer_hash_lock);
                list_add_tail(&peer->keepalive_link,
                              &rxnet->peer_keepalive[slot & mask]);
-               rxrpc_put_peer(peer);
+               rxrpc_put_peer_locked(peer);
        }
 
        spin_unlock_bh(&rxnet->peer_hash_lock);
index 5691b7d266ca0aaef3a5b2da30d64891e644f0f5..71547e8673b99d19f40a036c9530336643cad324 100644 (file)
@@ -440,6 +440,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
        }
 }
 
+/*
+ * Drop a ref on a peer record where the caller already holds the
+ * peer_hash_lock.
+ */
+void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
+{
+       const void *here = __builtin_return_address(0);
+       int n;
+
+       n = atomic_dec_return(&peer->usage);
+       trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+       if (n == 0) {
+               hash_del_rcu(&peer->hash_link);
+               list_del_init(&peer->keepalive_link);
+               kfree_rcu(peer, rcu);
+       }
+}
+
 /*
  * Make sure all peer records have been discarded.
  */
index be01f9c5d963ddfc766fac811ace9a381b89a7f7..5d6ab4f6fd7abb86ece09271665d6d8f70583f91 100644 (file)
@@ -230,6 +230,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                        rxrpc_set_call_completion(call,
                                                  RXRPC_CALL_LOCAL_ERROR,
                                                  0, ret);
+                       rxrpc_notify_socket(call);
                        goto out;
                }
                _debug("need instant resend %d", ret);
index 7c4a4b874248116a96508b7d187bf50fca8b2549..f2c4bfc7966331bfca17c1656d08d70e7b22b397 100644 (file)
@@ -1307,11 +1307,16 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
                          struct netlink_ext_ack *extack)
 {
        size_t attr_size = 0;
-       int ret = 0;
+       int loop, ret;
        struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
 
-       ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
-                             &attr_size, true, extack);
+       for (loop = 0; loop < 10; loop++) {
+               ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
+                                     actions, &attr_size, true, extack);
+               if (ret != -EAGAIN)
+                       break;
+       }
+
        if (ret < 0)
                return ret;
        ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
@@ -1361,11 +1366,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
                 */
                if (n->nlmsg_flags & NLM_F_REPLACE)
                        ovr = 1;
-replay:
                ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
                                     extack);
-               if (ret == -EAGAIN)
-                       goto replay;
                break;
        case RTM_DELACTION:
                ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
index 0c68bc9cf0b4df540a223e14dfa8ff569f96a40c..800846d77a56fdf4a023d38444f1ab90a0037238 100644 (file)
@@ -287,6 +287,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
        struct tcf_bpf *prog;
        bool is_bpf, is_ebpf;
        int ret, res = 0;
+       u32 index;
 
        if (!nla)
                return -EINVAL;
@@ -299,13 +300,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
                return -EINVAL;
 
        parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
-
-       ret = tcf_idr_check_alloc(tn, &parm->index, act, bind);
+       index = parm->index;
+       ret = tcf_idr_check_alloc(tn, &index, act, bind);
        if (!ret) {
-               ret = tcf_idr_create(tn, parm->index, est, act,
+               ret = tcf_idr_create(tn, index, est, act,
                                     &act_bpf_ops, bind, true);
                if (ret < 0) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        return ret;
                }
 
@@ -412,7 +413,7 @@ static __net_init int bpf_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, bpf_net_id);
 
-       return tc_action_net_init(tn, &act_bpf_ops);
+       return tc_action_net_init(net, tn, &act_bpf_ops);
 }
 
 static void __net_exit bpf_exit_net(struct list_head *net_list)
index 6f0f273f1139f83ef1a45f017c37d200c36fcae1..538dedd84e2101f1eecf0a58660bd19ab03c582d 100644 (file)
@@ -104,6 +104,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
        struct tcf_connmark_info *ci;
        struct tc_connmark *parm;
        int ret = 0;
+       u32 index;
 
        if (!nla)
                return -EINVAL;
@@ -117,13 +118,13 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                return -EINVAL;
 
        parm = nla_data(tb[TCA_CONNMARK_PARMS]);
-
-       ret = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       index = parm->index;
+       ret = tcf_idr_check_alloc(tn, &index, a, bind);
        if (!ret) {
-               ret = tcf_idr_create(tn, parm->index, est, a,
+               ret = tcf_idr_create(tn, index, est, a,
                                     &act_connmark_ops, bind, false);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        return ret;
                }
 
@@ -214,7 +215,7 @@ static __net_init int connmark_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, connmark_net_id);
 
-       return tc_action_net_init(tn, &act_connmark_ops);
+       return tc_action_net_init(net, tn, &act_connmark_ops);
 }
 
 static void __net_exit connmark_exit_net(struct list_head *net_list)
index b8a67ae3105ad10f645bfcb65a503dafcbe5cbb2..1e269441065a6dd72966c48a2b5d84fb17832c8f 100644 (file)
@@ -55,6 +55,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
        struct tc_csum *parm;
        struct tcf_csum *p;
        int ret = 0, err;
+       u32 index;
 
        if (nla == NULL)
                return -EINVAL;
@@ -66,13 +67,13 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_CSUM_PARMS] == NULL)
                return -EINVAL;
        parm = nla_data(tb[TCA_CSUM_PARMS]);
-
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       index = parm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (!err) {
-               ret = tcf_idr_create(tn, parm->index, est, a,
+               ret = tcf_idr_create(tn, index, est, a,
                                     &act_csum_ops, bind, true);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        return ret;
                }
                ret = ACT_P_CREATED;
@@ -677,7 +678,7 @@ static __net_init int csum_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, csum_net_id);
 
-       return tc_action_net_init(tn, &act_csum_ops);
+       return tc_action_net_init(net, tn, &act_csum_ops);
 }
 
 static void __net_exit csum_exit_net(struct list_head *net_list)
index cd1d9bd32ef9af4c5789e0331b6d1c1b7e6820f3..dfef9621375eb176a6c07aaff5149b83dc63e12a 100644 (file)
@@ -64,6 +64,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
        struct tc_gact *parm;
        struct tcf_gact *gact;
        int ret = 0;
+       u32 index;
        int err;
 #ifdef CONFIG_GACT_PROB
        struct tc_gact_p *p_parm = NULL;
@@ -79,6 +80,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_GACT_PARMS] == NULL)
                return -EINVAL;
        parm = nla_data(tb[TCA_GACT_PARMS]);
+       index = parm->index;
 
 #ifndef CONFIG_GACT_PROB
        if (tb[TCA_GACT_PROB] != NULL)
@@ -91,12 +93,12 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
        }
 #endif
 
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (!err) {
-               ret = tcf_idr_create(tn, parm->index, est, a,
+               ret = tcf_idr_create(tn, index, est, a,
                                     &act_gact_ops, bind, true);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        return ret;
                }
                ret = ACT_P_CREATED;
@@ -261,7 +263,7 @@ static __net_init int gact_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, gact_net_id);
 
-       return tc_action_net_init(tn, &act_gact_ops);
+       return tc_action_net_init(net, tn, &act_gact_ops);
 }
 
 static void __net_exit gact_exit_net(struct list_head *net_list)
index 06a3d48018782e5d35981fdcfc3208a5f11ad276..bac353bea02f54390ddf7653543e28b19d7bc82e 100644 (file)
@@ -482,8 +482,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        u8 *saddr = NULL;
        bool exists = false;
        int ret = 0;
+       u32 index;
        int err;
 
+       if (!nla) {
+               NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed");
+               return -EINVAL;
+       }
+
        err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy, NULL);
        if (err < 0)
                return err;
@@ -504,7 +510,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        if (!p)
                return -ENOMEM;
 
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       index = parm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (err < 0) {
                kfree(p);
                return err;
@@ -516,10 +523,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        }
 
        if (!exists) {
-               ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
+               ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
                                     bind, true);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        kfree(p);
                        return ret;
                }
@@ -880,7 +887,7 @@ static __net_init int ife_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, ife_net_id);
 
-       return tc_action_net_init(tn, &act_ife_ops);
+       return tc_action_net_init(net, tn, &act_ife_ops);
 }
 
 static void __net_exit ife_exit_net(struct list_head *net_list)
index 334f3a05767139cc9e77bbfe13ec6dab5ee2cceb..01d3669ef4989770d2d64c5f42608595e342e985 100644 (file)
@@ -65,12 +65,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
        return 0;
 }
 
-static void ipt_destroy_target(struct xt_entry_target *t)
+static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
 {
        struct xt_tgdtor_param par = {
                .target   = t->u.kernel.target,
                .targinfo = t->data,
                .family   = NFPROTO_IPV4,
+               .net      = net,
        };
        if (par.target->destroy != NULL)
                par.target->destroy(&par);
@@ -82,7 +83,7 @@ static void tcf_ipt_release(struct tc_action *a)
        struct tcf_ipt *ipt = to_ipt(a);
 
        if (ipt->tcfi_t) {
-               ipt_destroy_target(ipt->tcfi_t);
+               ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
                kfree(ipt->tcfi_t);
        }
        kfree(ipt->tcfi_tname);
@@ -182,7 +183,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
 
        spin_lock_bh(&ipt->tcf_lock);
        if (ret != ACT_P_CREATED) {
-               ipt_destroy_target(ipt->tcfi_t);
+               ipt_destroy_target(ipt->tcfi_t, net);
                kfree(ipt->tcfi_tname);
                kfree(ipt->tcfi_t);
        }
@@ -353,7 +354,7 @@ static __net_init int ipt_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, ipt_net_id);
 
-       return tc_action_net_init(tn, &act_ipt_ops);
+       return tc_action_net_init(net, tn, &act_ipt_ops);
 }
 
 static void __net_exit ipt_exit_net(struct list_head *net_list)
@@ -403,7 +404,7 @@ static __net_init int xt_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, xt_net_id);
 
-       return tc_action_net_init(tn, &act_xt_ops);
+       return tc_action_net_init(net, tn, &act_xt_ops);
 }
 
 static void __net_exit xt_exit_net(struct list_head *net_list)
index f767e78e38c9878add2d3371d7b974005255b81d..399e3beae6cf45a96973b417b6b06a03091b6e8b 100644 (file)
@@ -104,6 +104,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        struct net_device *dev;
        bool exists = false;
        int ret, err;
+       u32 index;
 
        if (!nla) {
                NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
@@ -117,8 +118,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                return -EINVAL;
        }
        parm = nla_data(tb[TCA_MIRRED_PARMS]);
-
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       index = parm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (err < 0)
                return err;
        exists = err;
@@ -135,21 +136,21 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                if (exists)
                        tcf_idr_release(*a, bind);
                else
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
                return -EINVAL;
        }
 
        if (!exists) {
                if (!parm->ifindex) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
                        return -EINVAL;
                }
-               ret = tcf_idr_create(tn, parm->index, est, a,
+               ret = tcf_idr_create(tn, index, est, a,
                                     &act_mirred_ops, bind, true);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        return ret;
                }
                ret = ACT_P_CREATED;
@@ -418,7 +419,7 @@ static __net_init int mirred_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, mirred_net_id);
 
-       return tc_action_net_init(tn, &act_mirred_ops);
+       return tc_action_net_init(net, tn, &act_mirred_ops);
 }
 
 static void __net_exit mirred_exit_net(struct list_head *net_list)
index 4313aa102440e9b55fb0ba200406801217d65284..d1b47a1b145c40f0acd98ca0ab30f9ecce4f364a 100644 (file)
@@ -45,6 +45,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        struct tc_nat *parm;
        int ret = 0, err;
        struct tcf_nat *p;
+       u32 index;
 
        if (nla == NULL)
                return -EINVAL;
@@ -56,13 +57,13 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        if (tb[TCA_NAT_PARMS] == NULL)
                return -EINVAL;
        parm = nla_data(tb[TCA_NAT_PARMS]);
-
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       index = parm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (!err) {
-               ret = tcf_idr_create(tn, parm->index, est, a,
+               ret = tcf_idr_create(tn, index, est, a,
                                     &act_nat_ops, bind, false);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        return ret;
                }
                ret = ACT_P_CREATED;
@@ -316,7 +317,7 @@ static __net_init int nat_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, nat_net_id);
 
-       return tc_action_net_init(tn, &act_nat_ops);
+       return tc_action_net_init(net, tn, &act_nat_ops);
 }
 
 static void __net_exit nat_exit_net(struct list_head *net_list)
index ca535a8585bc893fb4df0ae1b2dbad85c0fd9052..33c0cc5ef229f3bf71075c0706004cb73d2c2173 100644 (file)
@@ -149,6 +149,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        struct tcf_pedit *p;
        int ret = 0, err;
        int ksize;
+       u32 index;
 
        if (!nla) {
                NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed");
@@ -178,18 +179,19 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        if (IS_ERR(keys_ex))
                return PTR_ERR(keys_ex);
 
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       index = parm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (!err) {
                if (!parm->nkeys) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
                        ret = -EINVAL;
                        goto out_free;
                }
-               ret = tcf_idr_create(tn, parm->index, est, a,
+               ret = tcf_idr_create(tn, index, est, a,
                                     &act_pedit_ops, bind, false);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        goto out_free;
                }
                ret = ACT_P_CREATED;
@@ -486,7 +488,7 @@ static __net_init int pedit_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, pedit_net_id);
 
-       return tc_action_net_init(tn, &act_pedit_ops);
+       return tc_action_net_init(net, tn, &act_pedit_ops);
 }
 
 static void __net_exit pedit_exit_net(struct list_head *net_list)
index 5d8bfa878477e8e738be55d2c9b818423c3c8ccc..4db25959e156285a85c1003e7955b67aa1f637bc 100644 (file)
@@ -85,6 +85,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
        struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
        struct tc_action_net *tn = net_generic(net, police_net_id);
        bool exists = false;
+       u32 index;
        int size;
 
        if (nla == NULL)
@@ -101,7 +102,8 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
                return -EINVAL;
 
        parm = nla_data(tb[TCA_POLICE_TBF]);
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       index = parm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (err < 0)
                return err;
        exists = err;
@@ -109,10 +111,10 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
                return 0;
 
        if (!exists) {
-               ret = tcf_idr_create(tn, parm->index, NULL, a,
+               ret = tcf_idr_create(tn, index, NULL, a,
                                     &act_police_ops, bind, false);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        return ret;
                }
                ret = ACT_P_CREATED;
@@ -340,7 +342,7 @@ static __net_init int police_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, police_net_id);
 
-       return tc_action_net_init(tn, &act_police_ops);
+       return tc_action_net_init(net, tn, &act_police_ops);
 }
 
 static void __net_exit police_exit_net(struct list_head *net_list)
index c7f5d630d97cff5580c4d803cd1eac3f923849f8..ea0738ceb5bb84de228ffede01f654c005591d14 100644 (file)
@@ -43,7 +43,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        struct tc_action_net *tn = net_generic(net, sample_net_id);
        struct nlattr *tb[TCA_SAMPLE_MAX + 1];
        struct psample_group *psample_group;
-       u32 psample_group_num, rate;
+       u32 psample_group_num, rate, index;
        struct tc_sample *parm;
        struct tcf_sample *s;
        bool exists = false;
@@ -59,8 +59,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
                return -EINVAL;
 
        parm = nla_data(tb[TCA_SAMPLE_PARMS]);
-
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       index = parm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (err < 0)
                return err;
        exists = err;
@@ -68,10 +68,10 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
                return 0;
 
        if (!exists) {
-               ret = tcf_idr_create(tn, parm->index, est, a,
+               ret = tcf_idr_create(tn, index, est, a,
                                     &act_sample_ops, bind, true);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        return ret;
                }
                ret = ACT_P_CREATED;
@@ -99,7 +99,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        s->tcf_action = parm->action;
        s->rate = rate;
        s->psample_group_num = psample_group_num;
-       RCU_INIT_POINTER(s->psample_group, psample_group);
+       rcu_swap_protected(s->psample_group, psample_group,
+                          lockdep_is_held(&s->tcf_lock));
 
        if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
                s->truncate = true;
@@ -107,6 +108,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        }
        spin_unlock_bh(&s->tcf_lock);
 
+       if (psample_group)
+               psample_group_put(psample_group);
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
@@ -131,6 +134,7 @@ static bool tcf_sample_dev_ok_push(struct net_device *dev)
        case ARPHRD_TUNNEL6:
        case ARPHRD_SIT:
        case ARPHRD_IPGRE:
+       case ARPHRD_IP6GRE:
        case ARPHRD_VOID:
        case ARPHRD_NONE:
                return false;
@@ -255,7 +259,7 @@ static __net_init int sample_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, sample_net_id);
 
-       return tc_action_net_init(tn, &act_sample_ops);
+       return tc_action_net_init(net, tn, &act_sample_ops);
 }
 
 static void __net_exit sample_exit_net(struct list_head *net_list)
index 52400d49f81f233a572de8cdffff9c94099e614a..b418ef62e0a448a6f67b2ac46ffd88b8d4808b40 100644 (file)
@@ -88,6 +88,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        struct tcf_defact *d;
        bool exists = false;
        int ret = 0, err;
+       u32 index;
 
        if (nla == NULL)
                return -EINVAL;
@@ -100,7 +101,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
                return -EINVAL;
 
        parm = nla_data(tb[TCA_DEF_PARMS]);
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       index = parm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (err < 0)
                return err;
        exists = err;
@@ -111,15 +113,15 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
                if (exists)
                        tcf_idr_release(*a, bind);
                else
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                return -EINVAL;
        }
 
        if (!exists) {
-               ret = tcf_idr_create(tn, parm->index, est, a,
+               ret = tcf_idr_create(tn, index, est, a,
                                     &act_simp_ops, bind, false);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        return ret;
                }
 
@@ -213,7 +215,7 @@ static __net_init int simp_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, simp_net_id);
 
-       return tc_action_net_init(tn, &act_simp_ops);
+       return tc_action_net_init(net, tn, &act_simp_ops);
 }
 
 static void __net_exit simp_exit_net(struct list_head *net_list)
index 86d90fc5e97ea351aad3bbae6c03332163496cbc..a80179c1075f0d0ec6cbb249e68ce314960143a0 100644 (file)
@@ -107,6 +107,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
        u16 *queue_mapping = NULL, *ptype = NULL;
        bool exists = false;
        int ret = 0, err;
+       u32 index;
 
        if (nla == NULL)
                return -EINVAL;
@@ -153,8 +154,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
        }
 
        parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
-
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       index = parm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (err < 0)
                return err;
        exists = err;
@@ -165,15 +166,15 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                if (exists)
                        tcf_idr_release(*a, bind);
                else
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                return -EINVAL;
        }
 
        if (!exists) {
-               ret = tcf_idr_create(tn, parm->index, est, a,
+               ret = tcf_idr_create(tn, index, est, a,
                                     &act_skbedit_ops, bind, true);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        return ret;
                }
 
@@ -315,7 +316,7 @@ static __net_init int skbedit_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 
-       return tc_action_net_init(tn, &act_skbedit_ops);
+       return tc_action_net_init(net, tn, &act_skbedit_ops);
 }
 
 static void __net_exit skbedit_exit_net(struct list_head *net_list)
index 588077fafd6cc58473b1d0de85b1aa3103b3bf69..21d19529612174037fecaa5ab7cd39fba9847ffe 100644 (file)
@@ -88,12 +88,12 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
        struct nlattr *tb[TCA_SKBMOD_MAX + 1];
        struct tcf_skbmod_params *p, *p_old;
        struct tc_skbmod *parm;
+       u32 lflags = 0, index;
        struct tcf_skbmod *d;
        bool exists = false;
        u8 *daddr = NULL;
        u8 *saddr = NULL;
        u16 eth_type = 0;
-       u32 lflags = 0;
        int ret = 0, err;
 
        if (!nla)
@@ -122,10 +122,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
        }
 
        parm = nla_data(tb[TCA_SKBMOD_PARMS]);
+       index = parm->index;
        if (parm->flags & SKBMOD_F_SWAPMAC)
                lflags = SKBMOD_F_SWAPMAC;
 
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (err < 0)
                return err;
        exists = err;
@@ -136,15 +137,15 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
                if (exists)
                        tcf_idr_release(*a, bind);
                else
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                return -EINVAL;
        }
 
        if (!exists) {
-               ret = tcf_idr_create(tn, parm->index, est, a,
+               ret = tcf_idr_create(tn, index, est, a,
                                     &act_skbmod_ops, bind, true);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        return ret;
                }
 
@@ -276,7 +277,7 @@ static __net_init int skbmod_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, skbmod_net_id);
 
-       return tc_action_net_init(tn, &act_skbmod_ops);
+       return tc_action_net_init(net, tn, &act_skbmod_ops);
 }
 
 static void __net_exit skbmod_exit_net(struct list_head *net_list)
index 72d9c432e8b42693094bd11d524c86d34fd10bf5..43309ff2b5dc9639e34dec816522ce1824cdc739 100644 (file)
@@ -224,6 +224,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
        __be16 flags;
        u8 tos, ttl;
        int ret = 0;
+       u32 index;
        int err;
 
        if (!nla) {
@@ -244,7 +245,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
        }
 
        parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       index = parm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (err < 0)
                return err;
        exists = err;
@@ -338,7 +340,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
        }
 
        if (!exists) {
-               ret = tcf_idr_create(tn, parm->index, est, a,
+               ret = tcf_idr_create(tn, index, est, a,
                                     &act_tunnel_key_ops, bind, true);
                if (ret) {
                        NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
@@ -384,7 +386,7 @@ err_out:
        if (exists)
                tcf_idr_release(*a, bind);
        else
-               tcf_idr_cleanup(tn, parm->index);
+               tcf_idr_cleanup(tn, index);
        return ret;
 }
 
@@ -577,7 +579,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
 
-       return tc_action_net_init(tn, &act_tunnel_key_ops);
+       return tc_action_net_init(net, tn, &act_tunnel_key_ops);
 }
 
 static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
index 033d273afe50236a090fd4caddd0a8328ce82157..41528b9664404f877e376f4fe9dfa283828dd8ae 100644 (file)
@@ -118,6 +118,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        u8 push_prio = 0;
        bool exists = false;
        int ret = 0, err;
+       u32 index;
 
        if (!nla)
                return -EINVAL;
@@ -129,7 +130,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        if (!tb[TCA_VLAN_PARMS])
                return -EINVAL;
        parm = nla_data(tb[TCA_VLAN_PARMS]);
-       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       index = parm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
        if (err < 0)
                return err;
        exists = err;
@@ -145,7 +147,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                        if (exists)
                                tcf_idr_release(*a, bind);
                        else
-                               tcf_idr_cleanup(tn, parm->index);
+                               tcf_idr_cleanup(tn, index);
                        return -EINVAL;
                }
                push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
@@ -153,7 +155,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                        if (exists)
                                tcf_idr_release(*a, bind);
                        else
-                               tcf_idr_cleanup(tn, parm->index);
+                               tcf_idr_cleanup(tn, index);
                        return -ERANGE;
                }
 
@@ -167,7 +169,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                                if (exists)
                                        tcf_idr_release(*a, bind);
                                else
-                                       tcf_idr_cleanup(tn, parm->index);
+                                       tcf_idr_cleanup(tn, index);
                                return -EPROTONOSUPPORT;
                        }
                } else {
@@ -181,16 +183,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                if (exists)
                        tcf_idr_release(*a, bind);
                else
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                return -EINVAL;
        }
        action = parm->v_action;
 
        if (!exists) {
-               ret = tcf_idr_create(tn, parm->index, est, a,
+               ret = tcf_idr_create(tn, index, est, a,
                                     &act_vlan_ops, bind, true);
                if (ret) {
-                       tcf_idr_cleanup(tn, parm->index);
+                       tcf_idr_cleanup(tn, index);
                        return ret;
                }
 
@@ -296,6 +298,14 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
+{
+       return nla_total_size(sizeof(struct tc_vlan))
+               + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_ID */
+               + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */
+               + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
+}
+
 static struct tc_action_ops act_vlan_ops = {
        .kind           =       "vlan",
        .type           =       TCA_ACT_VLAN,
@@ -305,6 +315,7 @@ static struct tc_action_ops act_vlan_ops = {
        .init           =       tcf_vlan_init,
        .cleanup        =       tcf_vlan_cleanup,
        .walk           =       tcf_vlan_walker,
+       .get_fill_size  =       tcf_vlan_get_fill_size,
        .lookup         =       tcf_vlan_search,
        .size           =       sizeof(struct tcf_vlan),
 };
@@ -313,7 +324,7 @@ static __net_init int vlan_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, vlan_net_id);
 
-       return tc_action_net_init(tn, &act_vlan_ops);
+       return tc_action_net_init(net, tn, &act_vlan_ops);
 }
 
 static void __net_exit vlan_exit_net(struct list_head *net_list)
index 2167c6ca55e3e1143aff352c65ec15ca92d5f80d..e217ebc693f8c832362a99c51b6543ee6a72b0c4 100644 (file)
@@ -1325,6 +1325,9 @@ replay:
                        tcf_chain_tp_insert(chain, &chain_info, tp);
                tfilter_notify(net, skb, n, tp, block, q, parent, fh,
                               RTM_NEWTFILTER, false);
+               /* q pointer is NULL for shared blocks */
+               if (q)
+                       q->flags &= ~TCQ_F_CAN_BYPASS;
        } else {
                if (tp_created)
                        tcf_proto_destroy(tp, NULL);
@@ -2035,8 +2038,10 @@ out:
 void tcf_exts_destroy(struct tcf_exts *exts)
 {
 #ifdef CONFIG_NET_CLS_ACT
-       tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
-       kfree(exts->actions);
+       if (exts->actions) {
+               tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
+               kfree(exts->actions);
+       }
        exts->nr_actions = 0;
 #endif
 }
index be7cd140b2a381492889bfab3437d01b72d2e178..84fdc4857771bf62fc66f4c858f75b58db95de7a 100644 (file)
@@ -1308,7 +1308,8 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
 }
 
 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
-       [TCA_KIND]              = { .type = NLA_STRING },
+       [TCA_KIND]              = { .type = NLA_NUL_STRING,
+                                   .len = IFNAMSIZ - 1 },
        [TCA_RATE]              = { .type = NLA_BINARY,
                                    .len = sizeof(struct tc_estimator) },
        [TCA_STAB]              = { .type = NLA_NESTED },
@@ -1831,6 +1832,8 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
        cl = cops->find(q, portid);
        if (!cl)
                return;
+       if (!cops->tcf_block)
+               return;
        block = cops->tcf_block(q, cl, NULL);
        if (!block)
                return;
index f42025d53cfe1e4fcd91931bf14b7328896db1e2..ebc3c8c7e66618db10004690ea4db7326600363f 100644 (file)
@@ -1132,6 +1132,32 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
        [TCA_CBQ_POLICE]        = { .len = sizeof(struct tc_cbq_police) },
 };
 
+static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
+                        struct nlattr *opt,
+                        struct netlink_ext_ack *extack)
+{
+       int err;
+
+       if (!opt) {
+               NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
+               return -EINVAL;
+       }
+
+       err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
+       if (err < 0)
+               return err;
+
+       if (tb[TCA_CBQ_WRROPT]) {
+               const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
+
+               if (wrr->priority > TC_CBQ_MAXPRIO) {
+                       NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
+                       err = -EINVAL;
+               }
+       }
+       return err;
+}
+
 static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
                    struct netlink_ext_ack *extack)
 {
@@ -1144,12 +1170,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
        hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
        q->delay_timer.function = cbq_undelay;
 
-       if (!opt) {
-               NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
-               return -EINVAL;
-       }
-
-       err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
+       err = cbq_opt_parse(tb, opt, extack);
        if (err < 0)
                return err;
 
@@ -1466,12 +1487,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        struct cbq_class *parent;
        struct qdisc_rate_table *rtab = NULL;
 
-       if (!opt) {
-               NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
-               return -EINVAL;
-       }
-
-       err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
+       err = cbq_opt_parse(tb, opt, extack);
        if (err < 0)
                return err;
 
index 17cd81f84b5de9d9f53acf2c9078a7618927ddc5..77fae0b7c6ee1c04afd842fe04781c32b87c30f6 100644 (file)
@@ -71,10 +71,10 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
        struct Qdisc *sch = ctx;
        struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
 
-       if (skb)
+       if (skb) {
                sch->qstats.backlog -= qdisc_pkt_len(skb);
-
-       prefetch(&skb->end); /* we'll need skb_shinfo() */
+               prefetch(&skb->end); /* we'll need skb_shinfo() */
+       }
        return skb;
 }
 
index 049714c57075c6acb212e4cfb47d962fda67b2bb..84c948c919142fcb331cc04bc54b0041187cf36b 100644 (file)
@@ -357,6 +357,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
                goto errout;
 
        err = -EINVAL;
+       if (!tb[TCA_DSMARK_INDICES])
+               goto errout;
        indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
 
        if (hweight32(indices) != 1)
index 6c0a9d5dbf9441d00a832915e23d6b82bd8ab313..137692cb8b4f9b40f6d328ce370fe4b62a560e5b 100644 (file)
@@ -600,8 +600,6 @@ static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
                              u32 classid)
 {
-       /* we cannot bypass queue discipline anymore */
-       sch->flags &= ~TCQ_F_CAN_BYPASS;
        return 0;
 }
 
index 77b289da776361f06cc23f776d6057c49c11e43e..30e32df5f84a74b6d5d3c6335fe55182bc9cd193 100644 (file)
@@ -49,6 +49,8 @@ EXPORT_SYMBOL(default_qdisc_ops);
  * - updates to tree and tree walking are only done under the rtnl mutex.
  */
 
+#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
+
 static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
 {
        const struct netdev_queue *txq = q->dev_queue;
@@ -74,7 +76,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
                                q->q.qlen--;
                        }
                } else {
-                       skb = NULL;
+                       skb = SKB_XOFF_MAGIC;
                }
        }
 
@@ -272,8 +274,11 @@ validate:
                return skb;
 
        skb = qdisc_dequeue_skb_bad_txq(q);
-       if (unlikely(skb))
+       if (unlikely(skb)) {
+               if (skb == SKB_XOFF_MAGIC)
+                       return NULL;
                goto bulk;
+       }
        skb = q->dequeue(q);
        if (skb) {
 bulk:
@@ -942,9 +947,13 @@ void qdisc_free(struct Qdisc *qdisc)
 
 void qdisc_destroy(struct Qdisc *qdisc)
 {
-       const struct Qdisc_ops  *ops = qdisc->ops;
+       const struct Qdisc_ops *ops;
        struct sk_buff *skb, *tmp;
 
+       if (!qdisc)
+               return;
+       ops = qdisc->ops;
+
        if (qdisc->flags & TCQ_F_BUILTIN ||
            !refcount_dec_and_test(&qdisc->refcnt))
                return;
index c3a8388dcdf6bcd5e8fc34f16c3104ce5c0de075..a80fe8aa852767c87b7fc5eddd768bf941a1aad6 100644 (file)
@@ -529,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
                new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
 
        non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
-       if (non_hh_quantum > INT_MAX)
+       if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
                return -EINVAL;
 
        sch_tree_lock(sch);
index 4dfe10b9f96c8488283501b13e8950f3f4873856..86350fe5cfc8f73ba93c32b053018c5e3bfdbada 100644 (file)
@@ -749,7 +749,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
        struct disttable *d;
        int i;
 
-       if (n > NETEM_DIST_MAX)
+       if (!n || n > NETEM_DIST_MAX)
                return -EINVAL;
 
        d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
index 2f2678197760ab476c6f817d0429d4b7da1a56d8..650f2146385377944d192f9aa3aac3b26d05c441 100644 (file)
@@ -828,8 +828,6 @@ static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
 static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
                              u32 classid)
 {
-       /* we cannot bypass queue discipline anymore */
-       sch->flags &= ~TCQ_F_CAN_BYPASS;
        return 0;
 }
 
index d97b2b4b7a8bf6eb8396f06c48e34af1a0cc4c05..6d36f74ad29502612695fd099ca4fe08302069ef 100644 (file)
@@ -1350,7 +1350,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
        return status;
 }
 
-static void __net_init sctp_ctrlsock_exit(struct net *net)
+static void __net_exit sctp_ctrlsock_exit(struct net *net)
 {
        /* Free the control endpoint.  */
        inet_ctl_sock_destroy(net->sctp.ctl_sock);
index 3131b4154c74d0d666698a9fa529003bdd0df280..de8a82bc6b42dc486a4799ed568024fde05ac4cb 100644 (file)
@@ -561,8 +561,8 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
         */
        if (net->sctp.pf_enable &&
           (transport->state == SCTP_ACTIVE) &&
-          (asoc->pf_retrans < transport->pathmaxrxt) &&
-          (transport->error_count > asoc->pf_retrans)) {
+          (transport->error_count < transport->pathmaxrxt) &&
+          (transport->error_count > transport->pf_retrans)) {
 
                sctp_assoc_control_transport(asoc, transport,
                                             SCTP_TRANSPORT_PF,
index 8c00a7ef1bcda261018b9025672abd8b50749cd3..227b050cfe45174cd2f9d487430a48bb7522909f 100644 (file)
@@ -4507,34 +4507,18 @@ out_nounlock:
 static int sctp_connect(struct sock *sk, struct sockaddr *addr,
                        int addr_len, int flags)
 {
-       struct inet_sock *inet = inet_sk(sk);
        struct sctp_af *af;
-       int err = 0;
+       int err = -EINVAL;
 
        lock_sock(sk);
 
        pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
                 addr, addr_len);
 
-       /* We may need to bind the socket. */
-       if (!inet->inet_num) {
-               if (sk->sk_prot->get_port(sk, 0)) {
-                       release_sock(sk);
-                       return -EAGAIN;
-               }
-               inet->inet_sport = htons(inet->inet_num);
-       }
-
        /* Validate addr_len before calling common connect/connectx routine. */
        af = sctp_get_af_specific(addr->sa_family);
-       if (!af || addr_len < af->sockaddr_len) {
-               err = -EINVAL;
-       } else {
-               /* Pass correct addr len to common routine (so it knows there
-                * is only one address being passed.
-                */
+       if (af && addr_len >= af->sockaddr_len)
                err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
-       }
 
        release_sock(sk);
        return err;
@@ -8973,7 +8957,7 @@ struct proto sctp_prot = {
        .backlog_rcv =  sctp_backlog_rcv,
        .hash        =  sctp_hash,
        .unhash      =  sctp_unhash,
-       .get_port    =  sctp_get_port,
+       .no_autobind =  true,
        .obj_size    =  sizeof(struct sctp_sock),
        .useroffset  =  offsetof(struct sctp_sock, subscribe),
        .usersize    =  offsetof(struct sctp_sock, initmsg) -
@@ -9015,7 +8999,7 @@ struct proto sctpv6_prot = {
        .backlog_rcv    = sctp_backlog_rcv,
        .hash           = sctp_hash,
        .unhash         = sctp_unhash,
-       .get_port       = sctp_get_port,
+       .no_autobind    = true,
        .obj_size       = sizeof(struct sctp6_sock),
        .useroffset     = offsetof(struct sctp6_sock, sctp.subscribe),
        .usersize       = offsetof(struct sctp6_sock, sctp.initmsg) -
index 3b47457862ccdac668a8947f6191507d1b83ee68..87061a4bb44b6a9d4bf80de1ce4201d64f34192b 100644 (file)
@@ -253,13 +253,20 @@ out:
 int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
 {
        struct sctp_stream_out_ext *soute;
+       int ret;
 
        soute = kzalloc(sizeof(*soute), GFP_KERNEL);
        if (!soute)
                return -ENOMEM;
        SCTP_SO(stream, sid)->ext = soute;
 
-       return sctp_sched_init_sid(stream, sid, GFP_KERNEL);
+       ret = sctp_sched_init_sid(stream, sid, GFP_KERNEL);
+       if (ret) {
+               kfree(SCTP_SO(stream, sid)->ext);
+               SCTP_SO(stream, sid)->ext = NULL;
+       }
+
+       return ret;
 }
 
 void sctp_stream_free(struct sctp_stream *stream)
@@ -409,6 +416,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
                nstr_list[i] = htons(str_list[i]);
 
        if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
+               kfree(nstr_list);
                retval = -EAGAIN;
                goto out;
        }
index 9bbab6ba2dab0d9c2d685453371233576ba7e1a9..26dcd02b2d0cefbd2874edb1132f2e0d64dab97c 100644 (file)
@@ -1680,14 +1680,18 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
                }
                break;
        case TCP_NODELAY:
-               if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
+               if (sk->sk_state != SMC_INIT &&
+                   sk->sk_state != SMC_LISTEN &&
+                   sk->sk_state != SMC_CLOSED) {
                        if (val && !smc->use_fallback)
                                mod_delayed_work(system_wq, &smc->conn.tx_work,
                                                 0);
                }
                break;
        case TCP_CORK:
-               if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
+               if (sk->sk_state != SMC_INIT &&
+                   sk->sk_state != SMC_LISTEN &&
+                   sk->sk_state != SMC_CLOSED) {
                        if (!val && !smc->use_fallback)
                                mod_delayed_work(system_wq, &smc->conn.tx_work,
                                                 0);
index d8366ed517576ba7f258ad74f0cf91f7fd92d7e0..28361aef998256a327a73f41fd0218492b205646 100644 (file)
@@ -75,13 +75,11 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct smc_connection *conn = &smc->conn;
        struct sock *sk = &smc->sk;
-       bool noblock;
        long timeo;
        int rc = 0;
 
        /* similar to sk_stream_wait_memory */
        timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
-       noblock = timeo ? false : true;
        add_wait_queue(sk_sleep(sk), &wait);
        while (1) {
                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -96,8 +94,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
                        break;
                }
                if (!timeo) {
-                       if (noblock)
-                               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+                       /* ensure EPOLLOUT is subsequently generated */
+                       set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                        rc = -EAGAIN;
                        break;
                }
index b88d48d009130985db69993bc115c8cae80a71a9..0f1eaed1bd1b310833443cfb930e57d85dfa51a9 100644 (file)
@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
                tipc_set_node_id(net, node_id);
        }
        tn->trial_addr = addr;
+       tn->addr_trial_end = jiffies;
        pr_info("32-bit node address hash set to %x\n", addr);
 }
 
index 836727e363c46290ab8ef55e9d7b630f1dfac293..6344aca4487b6b3a515de9568ed59dc45bd8b7e1 100644 (file)
@@ -161,6 +161,7 @@ struct tipc_link {
        struct {
                u16 len;
                u16 limit;
+               struct sk_buff *target_bskb;
        } backlog[5];
        u16 snd_nxt;
        u16 last_retransm;
@@ -846,6 +847,7 @@ static void link_prepare_wakeup(struct tipc_link *l)
 void tipc_link_reset(struct tipc_link *l)
 {
        struct sk_buff_head list;
+       u32 imp;
 
        __skb_queue_head_init(&list);
 
@@ -864,11 +866,10 @@ void tipc_link_reset(struct tipc_link *l)
        __skb_queue_purge(&l->transmq);
        __skb_queue_purge(&l->deferdq);
        __skb_queue_purge(&l->backlogq);
-       l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
-       l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
-       l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
-       l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
-       l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
+       for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
+               l->backlog[imp].len = 0;
+               l->backlog[imp].target_bskb = NULL;
+       }
        kfree_skb(l->reasm_buf);
        kfree_skb(l->failover_reasm_skb);
        l->reasm_buf = NULL;
@@ -909,7 +910,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
        u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
        struct sk_buff_head *transmq = &l->transmq;
        struct sk_buff_head *backlogq = &l->backlogq;
-       struct sk_buff *skb, *_skb, *bskb;
+       struct sk_buff *skb, *_skb, **tskb;
        int pkt_cnt = skb_queue_len(list);
        int rc = 0;
 
@@ -955,19 +956,21 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                        seqno++;
                        continue;
                }
-               if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
+               tskb = &l->backlog[imp].target_bskb;
+               if (tipc_msg_bundle(*tskb, hdr, mtu)) {
                        kfree_skb(__skb_dequeue(list));
                        l->stats.sent_bundled++;
                        continue;
                }
-               if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
+               if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
                        kfree_skb(__skb_dequeue(list));
-                       __skb_queue_tail(backlogq, bskb);
-                       l->backlog[msg_importance(buf_msg(bskb))].len++;
+                       __skb_queue_tail(backlogq, *tskb);
+                       l->backlog[imp].len++;
                        l->stats.sent_bundled++;
                        l->stats.sent_bundles++;
                        continue;
                }
+               l->backlog[imp].target_bskb = NULL;
                l->backlog[imp].len += skb_queue_len(list);
                skb_queue_splice_tail_init(list, backlogq);
        }
@@ -983,6 +986,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
        u16 seqno = l->snd_nxt;
        u16 ack = l->rcv_nxt - 1;
        u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+       u32 imp;
 
        while (skb_queue_len(&l->transmq) < l->window) {
                skb = skb_peek(&l->backlogq);
@@ -993,7 +997,10 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
                        break;
                __skb_dequeue(&l->backlogq);
                hdr = buf_msg(skb);
-               l->backlog[msg_importance(hdr)].len--;
+               imp = msg_importance(hdr);
+               l->backlog[imp].len--;
+               if (unlikely(skb == l->backlog[imp].target_bskb))
+                       l->backlog[imp].target_bskb = NULL;
                __skb_queue_tail(&l->transmq, skb);
                __skb_queue_tail(xmitq, _skb);
                TIPC_SKB_CB(skb)->ackers = l->ackers;
index b61891054709597279d6204885a069b848dc869a..cbccf1791d3c5d97cb31070b178b8423a4c441db 100644 (file)
@@ -484,10 +484,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
        bmsg = buf_msg(_skb);
        tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
                      INT_H_SIZE, dnode);
-       if (msg_isdata(msg))
-               msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
-       else
-               msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
+       msg_set_importance(bmsg, msg_importance(msg));
        msg_set_seqno(bmsg, msg_seqno(msg));
        msg_set_ack(bmsg, msg_ack(msg));
        msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
index 3cfeb9df64b0008b894e7495cb0a00dd23938844..e0a3dd424d8c2fc1b93ace436ed6c4d7c58c0dd1 100644 (file)
@@ -221,7 +221,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
                       publ->key);
        }
 
-       kfree_rcu(p, rcu);
+       if (p)
+               kfree_rcu(p, rcu);
 }
 
 /**
index 85ebb675600c59981e4d847e5c5446e9630a43d6..318c541970ecd3a50e8dc9f7d9e300ce374be410 100644 (file)
@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
        int rep_type;
        int rep_size;
        int req_type;
+       int req_size;
        struct net *net;
        struct sk_buff *rep;
        struct tlv_desc *req;
@@ -257,7 +258,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
        int err;
        struct sk_buff *arg;
 
-       if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+       if (msg->req_type && (!msg->req_size ||
+                             !TLV_CHECK_TYPE(msg->req, msg->req_type)))
                return -EINVAL;
 
        msg->rep = tipc_tlv_alloc(msg->rep_size);
@@ -354,7 +356,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
 {
        int err;
 
-       if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+       if (msg->req_type && (!msg->req_size ||
+                             !TLV_CHECK_TYPE(msg->req, msg->req_type)))
                return -EINVAL;
 
        err = __tipc_nl_compat_doit(cmd, msg);
@@ -1276,8 +1279,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
                goto send;
        }
 
-       len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
-       if (!len || !TLV_OK(msg.req, len)) {
+       msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
+       if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
                msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
                err = -EOPNOTSUPP;
                goto send;
index ead29c2aefa762b8aff13eaab72b55c2671e276e..0a613e0ef3bf9908c38575cb1137c700dd61aefd 100644 (file)
@@ -61,7 +61,7 @@ static void tls_device_free_ctx(struct tls_context *ctx)
        if (ctx->rx_conf == TLS_HW)
                kfree(tls_offload_ctx_rx(ctx));
 
-       kfree(ctx);
+       tls_ctx_free(ctx);
 }
 
 static void tls_device_gc_task(struct work_struct *work)
index 25b3fb585777b5aeb0f9c73788da814a709d556c..3288bdff9889457bf9c9caf5de6226c4ecd5a105 100644 (file)
@@ -241,7 +241,7 @@ static void tls_write_space(struct sock *sk)
        ctx->sk_write_space(sk);
 }
 
-static void tls_ctx_free(struct tls_context *ctx)
+void tls_ctx_free(struct tls_context *ctx)
 {
        if (!ctx)
                return;
@@ -301,6 +301,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
 #else
        {
 #endif
+               if (sk->sk_write_space == tls_write_space)
+                       sk->sk_write_space = ctx->sk_write_space;
                tls_ctx_free(ctx);
                ctx = NULL;
        }
index 6848a81967118e5c4d905e1959f1f2efc49191f4..bbb2da70e8701a1231374c84532632ff0d3b181d 100644 (file)
@@ -354,7 +354,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
-       int ret = 0;
+       int ret;
        int required_size;
        long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
        bool eor = !(msg->msg_flags & MSG_MORE);
@@ -370,7 +370,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
        lock_sock(sk);
 
-       if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
+       ret = tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo);
+       if (ret)
                goto send_end;
 
        if (unlikely(msg->msg_controllen)) {
@@ -505,7 +506,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
-       int ret = 0;
+       int ret;
        long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
        bool eor;
        size_t orig_size = size;
@@ -525,7 +526,8 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
 
        sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
-       if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
+       ret = tls_complete_pending_work(sk, tls_ctx, flags, &timeo);
+       if (ret)
                goto sendpage_end;
 
        /* Call the sk_stream functions to manage the sndbuf mem. */
index ab27a2872935774d41fb1f2c2f9341eb67c8cc0a..2a4613b239e0ef545f73d7da664576aeafd126a6 100644 (file)
@@ -281,7 +281,8 @@ EXPORT_SYMBOL_GPL(vsock_insert_connected);
 void vsock_remove_bound(struct vsock_sock *vsk)
 {
        spin_lock_bh(&vsock_table_lock);
-       __vsock_remove_bound(vsk);
+       if (__vsock_in_bound_table(vsk))
+               __vsock_remove_bound(vsk);
        spin_unlock_bh(&vsock_table_lock);
 }
 EXPORT_SYMBOL_GPL(vsock_remove_bound);
@@ -289,7 +290,8 @@ EXPORT_SYMBOL_GPL(vsock_remove_bound);
 void vsock_remove_connected(struct vsock_sock *vsk)
 {
        spin_lock_bh(&vsock_table_lock);
-       __vsock_remove_connected(vsk);
+       if (__vsock_in_connected_table(vsk))
+               __vsock_remove_connected(vsk);
        spin_unlock_bh(&vsock_table_lock);
 }
 EXPORT_SYMBOL_GPL(vsock_remove_connected);
@@ -325,35 +327,10 @@ struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
 }
 EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
 
-static bool vsock_in_bound_table(struct vsock_sock *vsk)
-{
-       bool ret;
-
-       spin_lock_bh(&vsock_table_lock);
-       ret = __vsock_in_bound_table(vsk);
-       spin_unlock_bh(&vsock_table_lock);
-
-       return ret;
-}
-
-static bool vsock_in_connected_table(struct vsock_sock *vsk)
-{
-       bool ret;
-
-       spin_lock_bh(&vsock_table_lock);
-       ret = __vsock_in_connected_table(vsk);
-       spin_unlock_bh(&vsock_table_lock);
-
-       return ret;
-}
-
 void vsock_remove_sock(struct vsock_sock *vsk)
 {
-       if (vsock_in_bound_table(vsk))
-               vsock_remove_bound(vsk);
-
-       if (vsock_in_connected_table(vsk))
-               vsock_remove_connected(vsk);
+       vsock_remove_bound(vsk);
+       vsock_remove_connected(vsk);
 }
 EXPORT_SYMBOL_GPL(vsock_remove_sock);
 
@@ -484,8 +461,7 @@ static void vsock_pending_work(struct work_struct *work)
         * incoming packets can't find this socket, and to reduce the reference
         * count.
         */
-       if (vsock_in_connected_table(vsk))
-               vsock_remove_connected(vsk);
+       vsock_remove_connected(vsk);
 
        sk->sk_state = TCP_CLOSE;
 
@@ -665,7 +641,7 @@ struct sock *__vsock_create(struct net *net,
 }
 EXPORT_SYMBOL_GPL(__vsock_create);
 
-static void __vsock_release(struct sock *sk)
+static void __vsock_release(struct sock *sk, int level)
 {
        if (sk) {
                struct sk_buff *skb;
@@ -675,9 +651,17 @@ static void __vsock_release(struct sock *sk)
                vsk = vsock_sk(sk);
                pending = NULL; /* Compiler warning. */
 
+               /* The release call is supposed to use lock_sock_nested()
+                * rather than lock_sock(), if a sock lock should be acquired.
+                */
                transport->release(vsk);
 
-               lock_sock(sk);
+               /* When "level" is SINGLE_DEPTH_NESTING, use the nested
+                * version to avoid the warning "possible recursive locking
+                * detected". When "level" is 0, lock_sock_nested(sk, level)
+                * is the same as lock_sock(sk).
+                */
+               lock_sock_nested(sk, level);
                sock_orphan(sk);
                sk->sk_shutdown = SHUTDOWN_MASK;
 
@@ -686,7 +670,7 @@ static void __vsock_release(struct sock *sk)
 
                /* Clean up any sockets that never were accepted. */
                while ((pending = vsock_dequeue_accept(sk)) != NULL) {
-                       __vsock_release(pending);
+                       __vsock_release(pending, SINGLE_DEPTH_NESTING);
                        sock_put(pending);
                }
 
@@ -735,7 +719,7 @@ EXPORT_SYMBOL_GPL(vsock_stream_has_space);
 
 static int vsock_release(struct socket *sock)
 {
-       __vsock_release(sock->sk);
+       __vsock_release(sock->sk, 0);
        sock->sk = NULL;
        sock->state = SS_FREE;
 
index a827547aa102be4f3cf46a267c8c684e815e02d6..70350dc673669291e8ce8938a4d077573d4a941d 100644 (file)
@@ -35,6 +35,9 @@
 /* The MTU is 16KB per the host side's design */
 #define HVS_MTU_SIZE           (1024 * 16)
 
+/* How long to wait for graceful shutdown of a connection */
+#define HVS_CLOSE_TIMEOUT (8 * HZ)
+
 struct vmpipe_proto_header {
        u32 pkt_type;
        u32 data_size;
@@ -217,18 +220,6 @@ static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan)
        set_channel_pending_send_size(chan,
                                      HVS_PKT_LEN(HVS_SEND_BUF_SIZE));
 
-       /* See hvs_stream_has_space(): we must make sure the host has seen
-        * the new pending send size, before we can re-check the writable
-        * bytes.
-        */
-       virt_mb();
-}
-
-static void hvs_clear_channel_pending_send_size(struct vmbus_channel *chan)
-{
-       set_channel_pending_send_size(chan, 0);
-
-       /* Ditto */
        virt_mb();
 }
 
@@ -298,27 +289,42 @@ static void hvs_channel_cb(void *ctx)
        if (hvs_channel_readable(chan))
                sk->sk_data_ready(sk);
 
-       /* See hvs_stream_has_space(): when we reach here, the writable bytes
-        * may be already less than HVS_PKT_LEN(HVS_SEND_BUF_SIZE).
-        */
        if (hv_get_bytes_to_write(&chan->outbound) > 0)
                sk->sk_write_space(sk);
 }
 
-static void hvs_close_connection(struct vmbus_channel *chan)
+static void hvs_do_close_lock_held(struct vsock_sock *vsk,
+                                  bool cancel_timeout)
 {
-       struct sock *sk = get_per_channel_state(chan);
-       struct vsock_sock *vsk = vsock_sk(sk);
-
-       lock_sock(sk);
+       struct sock *sk = sk_vsock(vsk);
 
-       sk->sk_state = TCP_CLOSE;
        sock_set_flag(sk, SOCK_DONE);
-       vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
-
+       vsk->peer_shutdown = SHUTDOWN_MASK;
+       if (vsock_stream_has_data(vsk) <= 0)
+               sk->sk_state = TCP_CLOSING;
        sk->sk_state_change(sk);
+       if (vsk->close_work_scheduled &&
+           (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
+               vsk->close_work_scheduled = false;
+               vsock_remove_sock(vsk);
 
+               /* Release the reference taken while scheduling the timeout */
+               sock_put(sk);
+       }
+}
+
+static void hvs_close_connection(struct vmbus_channel *chan)
+{
+       struct sock *sk = get_per_channel_state(chan);
+
+       lock_sock(sk);
+       hvs_do_close_lock_held(vsock_sk(sk), true);
        release_sock(sk);
+
+       /* Release the refcnt for the channel that's opened in
+        * hvs_open_connection().
+        */
+       sock_put(sk);
 }
 
 static void hvs_open_connection(struct vmbus_channel *chan)
@@ -328,8 +334,9 @@ static void hvs_open_connection(struct vmbus_channel *chan)
 
        struct sockaddr_vm addr;
        struct sock *sk, *new = NULL;
-       struct vsock_sock *vnew;
-       struct hvsock *hvs, *hvs_new;
+       struct vsock_sock *vnew = NULL;
+       struct hvsock *hvs = NULL;
+       struct hvsock *hvs_new = NULL;
        int ret;
 
        if_type = &chan->offermsg.offer.if_type;
@@ -386,8 +393,18 @@ static void hvs_open_connection(struct vmbus_channel *chan)
        }
 
        set_per_channel_state(chan, conn_from_host ? new : sk);
+
+       /* This reference will be dropped by hvs_close_connection(). */
+       sock_hold(conn_from_host ? new : sk);
        vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
 
+       /* Set the pending send size to max packet size to always get
+        * notifications from the host when there is enough writable space.
+        * The host is optimized to send notifications only when the pending
+        * size boundary is crossed, and not always.
+        */
+       hvs_set_channel_pending_send_size(chan);
+
        if (conn_from_host) {
                new->sk_state = TCP_ESTABLISHED;
                sk->sk_ack_backlog++;
@@ -452,50 +469,80 @@ static int hvs_connect(struct vsock_sock *vsk)
        return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id);
 }
 
+static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
+{
+       struct vmpipe_proto_header hdr;
+
+       if (hvs->fin_sent || !hvs->chan)
+               return;
+
+       /* It can't fail: see hvs_channel_writable_bytes(). */
+       (void)hvs_send_data(hvs->chan, (struct hvs_send_buf *)&hdr, 0);
+       hvs->fin_sent = true;
+}
+
 static int hvs_shutdown(struct vsock_sock *vsk, int mode)
 {
        struct sock *sk = sk_vsock(vsk);
-       struct vmpipe_proto_header hdr;
-       struct hvs_send_buf *send_buf;
-       struct hvsock *hvs;
 
        if (!(mode & SEND_SHUTDOWN))
                return 0;
 
        lock_sock(sk);
+       hvs_shutdown_lock_held(vsk->trans, mode);
+       release_sock(sk);
+       return 0;
+}
 
-       hvs = vsk->trans;
-       if (hvs->fin_sent)
-               goto out;
-
-       send_buf = (struct hvs_send_buf *)&hdr;
+static void hvs_close_timeout(struct work_struct *work)
+{
+       struct vsock_sock *vsk =
+               container_of(work, struct vsock_sock, close_work.work);
+       struct sock *sk = sk_vsock(vsk);
 
-       /* It can't fail: see hvs_channel_writable_bytes(). */
-       (void)hvs_send_data(hvs->chan, send_buf, 0);
+       sock_hold(sk);
+       lock_sock(sk);
+       if (!sock_flag(sk, SOCK_DONE))
+               hvs_do_close_lock_held(vsk, false);
 
-       hvs->fin_sent = true;
-out:
+       vsk->close_work_scheduled = false;
        release_sock(sk);
-       return 0;
+       sock_put(sk);
 }
 
-static void hvs_release(struct vsock_sock *vsk)
+/* Returns true, if it is safe to remove socket; false otherwise */
+static bool hvs_close_lock_held(struct vsock_sock *vsk)
 {
        struct sock *sk = sk_vsock(vsk);
-       struct hvsock *hvs = vsk->trans;
-       struct vmbus_channel *chan;
 
-       lock_sock(sk);
+       if (!(sk->sk_state == TCP_ESTABLISHED ||
+             sk->sk_state == TCP_CLOSING))
+               return true;
 
-       sk->sk_state = TCP_CLOSING;
-       vsock_remove_sock(vsk);
+       if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
+               hvs_shutdown_lock_held(vsk->trans, SHUTDOWN_MASK);
 
-       release_sock(sk);
+       if (sock_flag(sk, SOCK_DONE))
+               return true;
 
-       chan = hvs->chan;
-       if (chan)
-               hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN);
+       /* This reference will be dropped by the delayed close routine */
+       sock_hold(sk);
+       INIT_DELAYED_WORK(&vsk->close_work, hvs_close_timeout);
+       vsk->close_work_scheduled = true;
+       schedule_delayed_work(&vsk->close_work, HVS_CLOSE_TIMEOUT);
+       return false;
+}
 
+static void hvs_release(struct vsock_sock *vsk)
+{
+       struct sock *sk = sk_vsock(vsk);
+       bool remove_sock;
+
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+       remove_sock = hvs_close_lock_held(vsk);
+       release_sock(sk);
+       if (remove_sock)
+               vsock_remove_sock(vsk);
 }
 
 static void hvs_destruct(struct vsock_sock *vsk)
@@ -651,23 +698,8 @@ static s64 hvs_stream_has_data(struct vsock_sock *vsk)
 static s64 hvs_stream_has_space(struct vsock_sock *vsk)
 {
        struct hvsock *hvs = vsk->trans;
-       struct vmbus_channel *chan = hvs->chan;
-       s64 ret;
 
-       ret = hvs_channel_writable_bytes(chan);
-       if (ret > 0)  {
-               hvs_clear_channel_pending_send_size(chan);
-       } else {
-               /* See hvs_channel_cb() */
-               hvs_set_channel_pending_send_size(chan);
-
-               /* Re-check the writable bytes to avoid race */
-               ret = hvs_channel_writable_bytes(chan);
-               if (ret > 0)
-                       hvs_clear_channel_pending_send_size(chan);
-       }
-
-       return ret;
+       return hvs_channel_writable_bytes(hvs->chan);
 }
 
 static u64 hvs_stream_rcvhiwat(struct vsock_sock *vsk)
index e30f53728725d1c5e80d22cfccaea4ffb17474a9..3c199f752fd3c926d5a4937a63d6f7513e25bdd2 100644 (file)
@@ -791,7 +791,7 @@ void virtio_transport_release(struct vsock_sock *vsk)
        struct sock *sk = &vsk->sk;
        bool remove_sock = true;
 
-       lock_sock(sk);
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
        if (sk->sk_type == SOCK_STREAM)
                remove_sock = virtio_transport_close(vsk);
 
index 2a46ec3cb72c1584d054a938286b67fbff8a6624..68660781aa51fe32613f90d58f0b13d21698a78d 100644 (file)
@@ -1335,10 +1335,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                }
                break;
        case NETDEV_PRE_UP:
-               if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) &&
-                   !(wdev->iftype == NL80211_IFTYPE_AP_VLAN &&
-                     rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
-                     wdev->use_4addr))
+               if (!cfg80211_iftype_allowed(wdev->wiphy, wdev->iftype,
+                                            wdev->use_4addr, 0))
                        return notifier_from_errno(-EOPNOTSUPP);
 
                if (rfkill_blocked(rdev->rfkill))
index 8e2f03ab4cc9f7f602513f51f00c8f9f9865f89b..a28d6456e93e2cf92261af998dc38ba59aa5f997 100644 (file)
@@ -200,6 +200,38 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
        return __cfg80211_rdev_from_attrs(netns, info->attrs);
 }
 
+static int validate_beacon_head(const struct nlattr *attr,
+                               struct netlink_ext_ack *extack)
+{
+       const u8 *data = nla_data(attr);
+       unsigned int len = nla_len(attr);
+       const struct element *elem;
+       const struct ieee80211_mgmt *mgmt = (void *)data;
+       unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
+                                        u.beacon.variable);
+
+       if (len < fixedlen)
+               goto err;
+
+       if (ieee80211_hdrlen(mgmt->frame_control) !=
+           offsetof(struct ieee80211_mgmt, u.beacon))
+               goto err;
+
+       data += fixedlen;
+       len -= fixedlen;
+
+       for_each_element(elem, data, len) {
+               /* nothing */
+       }
+
+       if (for_each_element_completed(elem, data, len))
+               return 0;
+
+err:
+       NL_SET_ERR_MSG_ATTR(extack, attr, "malformed beacon head");
+       return -EINVAL;
+}
+
 /* policy for the attributes */
 static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -2299,6 +2331,8 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
 
        control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
 
+       memset(chandef, 0, sizeof(*chandef));
+
        chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
        chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
        chandef->center_freq1 = control_freq;
@@ -2819,7 +2853,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
 
        if (rdev->ops->get_channel) {
                int ret;
-               struct cfg80211_chan_def chandef;
+               struct cfg80211_chan_def chandef = {};
 
                ret = rdev_get_channel(rdev, wdev, &chandef);
                if (ret == 0) {
@@ -3210,9 +3244,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                        return err;
        }
 
-       if (!(rdev->wiphy.interface_modes & (1 << type)) &&
-           !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
-             rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
+       if (!cfg80211_iftype_allowed(&rdev->wiphy, type, params.use_4addr, 0))
                return -EOPNOTSUPP;
 
        err = nl80211_parse_mon_options(rdev, type, info, &params);
@@ -4016,6 +4048,12 @@ static int nl80211_parse_beacon(struct nlattr *attrs[],
        memset(bcn, 0, sizeof(*bcn));
 
        if (attrs[NL80211_ATTR_BEACON_HEAD]) {
+               int ret = validate_beacon_head(attrs[NL80211_ATTR_BEACON_HEAD],
+                                              NULL);
+
+               if (ret)
+                       return ret;
+
                bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]);
                bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]);
                if (!bcn->head_len)
@@ -5805,6 +5843,9 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->del_mpath)
                return -EOPNOTSUPP;
 
+       if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+               return -EOPNOTSUPP;
+
        return rdev_del_mpath(rdev, dev, dst);
 }
 
@@ -10272,9 +10313,11 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
        hyst = wdev->cqm_config->rssi_hyst;
        n = wdev->cqm_config->n_rssi_thresholds;
 
-       for (i = 0; i < n; i++)
+       for (i = 0; i < n; i++) {
+               i = array_index_nospec(i, n);
                if (last < wdev->cqm_config->rssi_thresholds[i])
                        break;
+       }
 
        low_index = i - 1;
        if (low_index >= 0) {
index 8a47297ff206d20385ed95b54abb3ca2532edb33..cccbf845079c8d85b1f080bdf65697955ca4d453 100644 (file)
@@ -2095,7 +2095,7 @@ static void reg_call_notifier(struct wiphy *wiphy,
 
 static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
-       struct cfg80211_chan_def chandef;
+       struct cfg80211_chan_def chandef = {};
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        enum nl80211_iftype iftype;
 
@@ -2777,7 +2777,7 @@ static void reg_process_pending_hints(void)
 
        /* When last_request->processed becomes true this will be rescheduled */
        if (lr && !lr->processed) {
-               reg_process_hint(lr);
+               pr_debug("Pending regulatory request, waiting for it to be processed...\n");
                return;
        }
 
index d0e7472dd9fd4b2a8938334129f24a60ea3fb421..e5d61ba837addd63934d4b3e99a5f56aac31fd1e 100644 (file)
@@ -484,6 +484,8 @@ const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
                                 const u8 *match, int match_len,
                                 int match_offset)
 {
+       const struct element *elem;
+
        /* match_offset can't be smaller than 2, unless match_len is
         * zero, in which case match_offset must be zero as well.
         */
@@ -491,14 +493,10 @@ const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
                    (!match_len && match_offset)))
                return NULL;
 
-       while (len >= 2 && len >= ies[1] + 2) {
-               if ((ies[0] == eid) &&
-                   (ies[1] + 2 >= match_offset + match_len) &&
-                   !memcmp(ies + match_offset, match, match_len))
-                       return ies;
-
-               len -= ies[1] + 2;
-               ies += ies[1] + 2;
+       for_each_element_id(elem, eid, ies, len) {
+               if (elem->datalen >= match_offset - 2 + match_len &&
+                   !memcmp(elem->data + match_offset - 2, match, match_len))
+                       return (void *)elem;
        }
 
        return NULL;
index d57e2f679a3e48dee53cc3785f2bc5265e015917..d641d81da759ed73bf229f154fcac7b6292e9939 100644 (file)
@@ -930,6 +930,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                }
 
                cfg80211_process_rdev_events(rdev);
+               cfg80211_mlme_purge_registrations(dev->ieee80211_ptr);
        }
 
        err = rdev_change_virtual_intf(rdev, dev, ntype, params);
@@ -1670,7 +1671,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
        for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
                num_interfaces += params->iftype_num[iftype];
                if (params->iftype_num[iftype] > 0 &&
-                   !(wiphy->software_iftypes & BIT(iftype)))
+                   !cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
                        used_iftypes |= BIT(iftype);
        }
 
@@ -1692,7 +1693,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
                        return -ENOMEM;
 
                for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
-                       if (wiphy->software_iftypes & BIT(iftype))
+                       if (cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
                                continue;
                        for (j = 0; j < c->n_limits; j++) {
                                all_iftypes |= limits[j].types;
@@ -1895,3 +1896,26 @@ EXPORT_SYMBOL(rfc1042_header);
 const unsigned char bridge_tunnel_header[] __aligned(2) =
        { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
 EXPORT_SYMBOL(bridge_tunnel_header);
+
+bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
+                            bool is_4addr, u8 check_swif)
+
+{
+       bool is_vlan = iftype == NL80211_IFTYPE_AP_VLAN;
+
+       switch (check_swif) {
+       case 0:
+               if (is_vlan && is_4addr)
+                       return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
+               return wiphy->interface_modes & BIT(iftype);
+       case 1:
+               if (!(wiphy->software_iftypes & BIT(iftype)) && is_vlan)
+                       return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
+               return wiphy->software_iftypes & BIT(iftype);
+       default:
+               break;
+       }
+
+       return false;
+}
+EXPORT_SYMBOL(cfg80211_iftype_allowed);
index 06943d9c983522d499395f733f000d8630aa04d0..4f0cfb8cc682656f21c238ae6616ac648af63c59 100644 (file)
@@ -800,7 +800,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-       struct cfg80211_chan_def chandef;
+       struct cfg80211_chan_def chandef = {};
        int ret;
 
        switch (wdev->iftype) {
index c67d7a82ab132a50be6305f69df6576fd158a15e..73fd0eae08caa960eef0b72211c6fc2485cbfb43 100644 (file)
@@ -202,6 +202,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
                               struct iw_point *data, char *ssid)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
+       int ret = 0;
 
        /* call only for station! */
        if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
@@ -219,7 +220,10 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
                if (ie) {
                        data->flags = 1;
                        data->length = ie[1];
-                       memcpy(ssid, ie + 2, data->length);
+                       if (data->length > IW_ESSID_MAX_SIZE)
+                               ret = -EINVAL;
+                       else
+                               memcpy(ssid, ie + 2, data->length);
                }
                rcu_read_unlock();
        } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
@@ -229,7 +233,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
        }
        wdev_unlock(wdev);
 
-       return 0;
+       return ret;
 }
 
 int cfg80211_mgd_wext_siwap(struct net_device *dev,
index a3b037fbfecdec9e173556159521e28c9db19267..8cab91c482ff5f09f381cdf93318947a9a426170 100644 (file)
@@ -322,7 +322,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
        umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
        if (!umem->pages) {
                err = -ENOMEM;
-               goto out_account;
+               goto out_pin;
        }
 
        for (i = 0; i < umem->npgs; i++)
@@ -330,6 +330,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 
        return 0;
 
+out_pin:
+       xdp_umem_unpin_pages(umem);
 out_account:
        xdp_umem_unaccount_pages(umem);
        return err;
index 8a64b150be546d2d5903bbc62e1b32525e73d992..fe96c0d039f2fae806584c3a389d6d4b7dca0a57 100644 (file)
@@ -239,7 +239,7 @@ static inline void xskq_produce_flush_desc(struct xsk_queue *q)
        /* Order producer and data */
        smp_wmb();
 
-       q->prod_tail = q->prod_head,
+       q->prod_tail = q->prod_head;
        WRITE_ONCE(q->ring->producer, q->prod_tail);
 }
 
index 4a9ee2d83158ba87a4da985af1020faae8c440b7..372c91faa2834d520857e4bd94a52f7ee8547a7a 100644 (file)
@@ -14,6 +14,8 @@ config XFRM_ALGO
        tristate
        select XFRM
        select CRYPTO
+       select CRYPTO_HASH
+       select CRYPTO_BLKCIPHER
 
 config XFRM_USER
        tristate "Transformation user configuration interface"
index 2122f89f615550dc7c38bf5e545b8989340c55f7..1484bc99a53756d2ac306d87445c7ddf2b8c3179 100644 (file)
@@ -150,6 +150,25 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
 
        err = -EINVAL;
        switch (p->family) {
+       case AF_INET:
+               break;
+
+       case AF_INET6:
+#if IS_ENABLED(CONFIG_IPV6)
+               break;
+#else
+               err = -EAFNOSUPPORT;
+               goto out;
+#endif
+
+       default:
+               goto out;
+       }
+
+       switch (p->sel.family) {
+       case AF_UNSPEC:
+               break;
+
        case AF_INET:
                if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
                        goto out;
index dad5583451afba96b2de211309fa898c9e69eb3e..3b2861f47709b46aa6f50218a8cf644e9423bf82 100644 (file)
@@ -20,7 +20,7 @@ success = $(if-success,$(1),y,n)
 
 # $(cc-option,<flag>)
 # Return y if the compiler supports <flag>, n otherwise
-cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null)
+cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
 
 # $(ld-option,<flag>)
 # Return y if the linker supports <flag>, n otherwise
index 7d4af0d0accb34a990098a80062d289168c8fe5e..51884c7b80697911c56fb81ba168d2d2c93a7d8b 100644 (file)
@@ -75,7 +75,7 @@ modpost = scripts/mod/modpost                    \
  $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,)       \
  $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile)   \
  $(if $(KBUILD_EXTMOD),-I $(modulesymfile))      \
- $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
+ $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
  $(if $(KBUILD_EXTMOD),-o $(modulesymfile))      \
  $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S)      \
  $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E)  \
index c4a9ddb174bc5fd312f380d9fe143bea9390e1f4..5aa75a0a1cede6fce64169c5dec9080c89d79197 100755 (executable)
@@ -78,7 +78,7 @@ parse_symbol() {
        fi
 
        # Strip out the base of the path
-       code=${code//^$basepath/""}
+       code=${code#$basepath/}
 
        # In the case of inlines, move everything to same line
        code=${code//$'\n'/' '}
index 6d5bbd31db7f2420fff6beb5ca71fc5dcf1eb493..bd29e4e7a5241b738b2d531c1f7803ca413feb1d 100644 (file)
@@ -443,13 +443,13 @@ static int is_pure_ops_struct(const_tree node)
                if (node == fieldtype)
                        continue;
 
-               if (!is_fptr(fieldtype))
-                       return 0;
-
-               if (code != RECORD_TYPE && code != UNION_TYPE)
+               if (code == RECORD_TYPE || code == UNION_TYPE) {
+                       if (!is_pure_ops_struct(fieldtype))
+                               return 0;
                        continue;
+               }
 
-               if (!is_pure_ops_struct(fieldtype))
+               if (!is_fptr(fieldtype))
                        return 0;
        }
 
index 9f40bcd17d07f5bbeb1ab2fa0793d632116ae06e..f6956aa41366885d9f2ef370a7090433a6ba81fb 100644 (file)
@@ -24,6 +24,10 @@ static struct resword {
        { "__volatile__", VOLATILE_KEYW },
        { "__builtin_va_list", VA_LIST_KEYW },
 
+       { "__int128", BUILTIN_INT_KEYW },
+       { "__int128_t", BUILTIN_INT_KEYW },
+       { "__uint128_t", BUILTIN_INT_KEYW },
+
        // According to rth, c99 defines "_Bool", __restrict", __restrict__", "restrict".  KAO
        { "_Bool", BOOL_KEYW },
        { "_restrict", RESTRICT_KEYW },
index 00a6d7e5497126147dc0d9a564c4a6525fff6079..1ebcf52cd0f9e0402624b748d3818c27d40384ee 100644 (file)
@@ -76,6 +76,7 @@ static void record_compound(struct string_list **keyw,
 %token ATTRIBUTE_KEYW
 %token AUTO_KEYW
 %token BOOL_KEYW
+%token BUILTIN_INT_KEYW
 %token CHAR_KEYW
 %token CONST_KEYW
 %token DOUBLE_KEYW
@@ -263,6 +264,7 @@ simple_type_specifier:
        | VOID_KEYW
        | BOOL_KEYW
        | VA_LIST_KEYW
+       | BUILTIN_INT_KEYW
        | TYPE                  { (*$1)->tag = SYM_TYPEDEF; $$ = $1; }
        ;
 
index 0c9c54b57515e0e6dd1417a251c19ffd79d44cd0..31ed7f3f0e1574c230aca5ccbef9d18a54667272 100644 (file)
@@ -152,6 +152,9 @@ static int read_symbol(FILE *in, struct sym_entry *s)
        /* exclude debugging symbols */
        else if (stype == 'N' || stype == 'n')
                return -1;
+       /* exclude s390 kasan local symbols */
+       else if (!strncmp(sym, ".LASANPC", 8))
+               return -1;
 
        /* include the type field in the symbol name, so that it gets
         * compressed together */
index 91d0a5c014acd3a17cecad8f486ff90f59e1211c..0dde19cf748655b174b89321423eab09463e5e7f 100644 (file)
@@ -784,6 +784,7 @@ int conf_write(const char *name)
        const char *str;
        char dirname[PATH_MAX+1], tmpname[PATH_MAX+22], newname[PATH_MAX+8];
        char *env;
+       int i;
 
        dirname[0] = 0;
        if (name && name[0]) {
@@ -834,11 +835,12 @@ int conf_write(const char *name)
                                     "#\n"
                                     "# %s\n"
                                     "#\n", str);
-               } else if (!(sym->flags & SYMBOL_CHOICE)) {
+               } else if (!(sym->flags & SYMBOL_CHOICE) &&
+                          !(sym->flags & SYMBOL_WRITTEN)) {
                        sym_calc_value(sym);
                        if (!(sym->flags & SYMBOL_WRITE))
                                goto next;
-                       sym->flags &= ~SYMBOL_WRITE;
+                       sym->flags |= SYMBOL_WRITTEN;
 
                        conf_write_symbol(out, sym, &kconfig_printer_cb, NULL);
                }
@@ -859,6 +861,9 @@ next:
        }
        fclose(out);
 
+       for_all_symbols(i, sym)
+               sym->flags &= ~SYMBOL_WRITTEN;
+
        if (*tmpname) {
                strcat(dirname, basename);
                strcat(dirname, ".old");
@@ -1024,8 +1029,6 @@ int conf_write_autoconf(int overwrite)
        if (!overwrite && is_present(autoconf_name))
                return 0;
 
-       sym_clear_all_valid();
-
        conf_write_dep("include/config/auto.conf.cmd");
 
        if (conf_split_config())
index 7c329e179007a1e7cf92f012eee74f9ccb2f85d1..43a87f8ea738cbb90ec68d0a8c541a1c636c687f 100644 (file)
@@ -141,6 +141,7 @@ struct symbol {
 #define SYMBOL_OPTIONAL   0x0100  /* choice is optional - values can be 'n' */
 #define SYMBOL_WRITE      0x0200  /* write symbol to file (KCONFIG_CONFIG) */
 #define SYMBOL_CHANGED    0x0400  /* ? */
+#define SYMBOL_WRITTEN    0x0800  /* track info to avoid double-write to .config */
 #define SYMBOL_NO_WRITE   0x1000  /* Symbol for internal use only; it will not be written */
 #define SYMBOL_CHECKED    0x2000  /* used during dependency checking */
 #define SYMBOL_WARNED     0x8000  /* warning has been issued */
index 6135574a6f3947c37beab1b24ead86d933eda712..1da7bca201a42453fda2a6a09f5d0356a31047cf 100755 (executable)
 use warnings;
 use strict;
 use File::Find;
+use File::Spec;
 
 my $nm = ($ENV{'NM'} || "nm") . " -p";
 my $objdump = ($ENV{'OBJDUMP'} || "objdump") . " -s -j .comment";
-my $srctree = "";
-my $objtree = "";
-$srctree = "$ENV{'srctree'}/" if (exists($ENV{'srctree'}));
-$objtree = "$ENV{'objtree'}/" if (exists($ENV{'objtree'}));
+my $srctree = File::Spec->curdir();
+my $objtree = File::Spec->curdir();
+$srctree = File::Spec->rel2abs($ENV{'srctree'}) if (exists($ENV{'srctree'}));
+$objtree = File::Spec->rel2abs($ENV{'objtree'}) if (exists($ENV{'objtree'}));
 
 if ($#ARGV != -1) {
        print STDERR "usage: $0 takes no parameters\n";
@@ -231,9 +232,9 @@ sub do_nm
        }
        ($source = $basename) =~ s/\.o$//;
        if (-e "$source.c" || -e "$source.S") {
-               $source = "$objtree$File::Find::dir/$source";
+               $source = File::Spec->catfile($objtree, $File::Find::dir, $source)
        } else {
-               $source = "$srctree$File::Find::dir/$source";
+               $source = File::Spec->catfile($srctree, $File::Find::dir, $source)
        }
        if (! -e "$source.c" && ! -e "$source.S") {
                # No obvious source, exclude the object if it is conglomerate
index 2e7793735e145157d5e3cf849679ed70ed2924a7..ccfbfde615563a7e6738c7d5b33fc725f79a1d51 100644 (file)
@@ -326,7 +326,8 @@ static uint_t *sift_rel_mcount(uint_t *mlocp,
                if (!mcountsym)
                        mcountsym = get_mcountsym(sym0, relp, str0);
 
-               if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
+               if (mcountsym && mcountsym == Elf_r_sym(relp) &&
+                               !is_fake_mcount(relp)) {
                        uint_t const addend =
                                _w(_w(relp->r_offset) - recval + mcount_adjust);
                        mrelp->r_offset = _w(offbase
index 067459760a7b048d4b53c28ac476189ac5884c21..3524dbc313163e70d05a5bda0b0a31d41668502d 100755 (executable)
@@ -301,7 +301,7 @@ sub give_redhat_hints()
        #
        # Checks valid for RHEL/CentOS version 7.x.
        #
-       if (! $system_release =~ /Fedora/) {
+       if (!($system_release =~ /Fedora/)) {
                $map{"virtualenv"} = "python-virtualenv";
        }
 
index 088ea2ac857065150822e2d3acef93bd8c1b18ee..612f737cee836177a249a72d6446ef3868cdd494 100644 (file)
@@ -223,16 +223,21 @@ static void *kvmemdup(const void *src, size_t len)
 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
 {
        size_t size = 0;
+       void *pos = e->pos;
 
        if (!inbounds(e, sizeof(u16)))
-               return 0;
+               goto fail;
        size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
        e->pos += sizeof(__le16);
        if (!inbounds(e, size))
-               return 0;
+               goto fail;
        *chunk = e->pos;
        e->pos += size;
        return size;
+
+fail:
+       e->pos = pos;
+       return 0;
 }
 
 /* unpack control byte */
@@ -294,49 +299,66 @@ fail:
 
 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
 {
+       void *pos = e->pos;
+
        if (unpack_nameX(e, AA_U32, name)) {
                if (!inbounds(e, sizeof(u32)))
-                       return 0;
+                       goto fail;
                if (data)
                        *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
                e->pos += sizeof(u32);
                return 1;
        }
+
+fail:
+       e->pos = pos;
        return 0;
 }
 
 static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
 {
+       void *pos = e->pos;
+
        if (unpack_nameX(e, AA_U64, name)) {
                if (!inbounds(e, sizeof(u64)))
-                       return 0;
+                       goto fail;
                if (data)
                        *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
                e->pos += sizeof(u64);
                return 1;
        }
+
+fail:
+       e->pos = pos;
        return 0;
 }
 
 static size_t unpack_array(struct aa_ext *e, const char *name)
 {
+       void *pos = e->pos;
+
        if (unpack_nameX(e, AA_ARRAY, name)) {
                int size;
                if (!inbounds(e, sizeof(u16)))
-                       return 0;
+                       goto fail;
                size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
                e->pos += sizeof(u16);
                return size;
        }
+
+fail:
+       e->pos = pos;
        return 0;
 }
 
 static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
 {
+       void *pos = e->pos;
+
        if (unpack_nameX(e, AA_BLOB, name)) {
                u32 size;
                if (!inbounds(e, sizeof(u32)))
-                       return 0;
+                       goto fail;
                size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
                e->pos += sizeof(u32);
                if (inbounds(e, (size_t) size)) {
@@ -345,6 +367,9 @@ static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
                        return size;
                }
        }
+
+fail:
+       e->pos = pos;
        return 0;
 }
 
@@ -361,9 +386,10 @@ static int unpack_str(struct aa_ext *e, const char **string, const char *name)
                        if (src_str[size - 1] != 0)
                                goto fail;
                        *string = src_str;
+
+                       return size;
                }
        }
-       return size;
 
 fail:
        e->pos = pos;
index d9e7728027c6c3348d110a66241e480017c2e50d..f63b4bd45d60ec50195a4170b4e9449c474d76f4 100644 (file)
@@ -271,8 +271,16 @@ static int ima_calc_file_hash_atfm(struct file *file,
                rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
                rc = integrity_kernel_read(file, offset, rbuf[active],
                                           rbuf_len);
-               if (rc != rbuf_len)
+               if (rc != rbuf_len) {
+                       if (rc >= 0)
+                               rc = -EINVAL;
+                       /*
+                        * Forward current rc, do not overwrite with return value
+                        * from ahash_wait()
+                        */
+                       ahash_wait(ahash_rc, &wait);
                        goto out3;
+               }
 
                if (rbuf[1] && offset) {
                        /* Using two buffers, and it is not the first
index 5e515791ccd119d89150f6ede3f0a2a91eccbf9f..1d34b2a5f485e2380e8b092cfb62955434c3f908 100644 (file)
@@ -71,6 +71,9 @@ static void request_key_auth_describe(const struct key *key,
 {
        struct request_key_auth *rka = get_request_key_auth(key);
 
+       if (!rka)
+               return;
+
        seq_puts(m, "key:");
        seq_puts(m, key->description);
        if (key_is_positive(key))
@@ -88,6 +91,9 @@ static long request_key_auth_read(const struct key *key,
        size_t datalen;
        long ret;
 
+       if (!rka)
+               return -EKEYREVOKED;
+
        datalen = rka->callout_len;
        ret = datalen;
 
index a6ddef855f8751972e743bb9a34facf9dbfbaa9d..14ea725b97bc2f922290115c2a292bad55d88528 100644 (file)
@@ -6552,11 +6552,12 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
        } else if (!strcmp(name, "fscreate")) {
                tsec->create_sid = sid;
        } else if (!strcmp(name, "keycreate")) {
-               error = avc_has_perm(&selinux_state,
-                                    mysid, sid, SECCLASS_KEY, KEY__CREATE,
-                                    NULL);
-               if (error)
-                       goto abort_change;
+               if (sid) {
+                       error = avc_has_perm(&selinux_state, mysid, sid,
+                                            SECCLASS_KEY, KEY__CREATE, NULL);
+                       if (error)
+                               goto abort_change;
+               }
                tsec->keycreate_sid = sid;
        } else if (!strcmp(name, "sockcreate")) {
                tsec->sockcreate_sid = sid;
index d31a52e56b9ecaa97b3dfd37de21a8e7728f49df..91d259c87d10c2135869937c58cad361745eda85 100644 (file)
@@ -275,6 +275,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
        return v;
 }
 
+static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
+
 /*
  * Initialize a policy database structure.
  */
@@ -322,8 +324,10 @@ static int policydb_init(struct policydb *p)
 out:
        hashtab_destroy(p->filename_trans);
        hashtab_destroy(p->range_tr);
-       for (i = 0; i < SYM_NUM; i++)
+       for (i = 0; i < SYM_NUM; i++) {
+               hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
                hashtab_destroy(p->symtab[i].table);
+       }
        return rc;
 }
 
index 9a4c0ad46518d12d38564a703fdc51f3b202a5ae..c071c356a963737583a0354d3c5e9ee8a0c105c3 100644 (file)
@@ -469,7 +469,7 @@ char *smk_parse_smack(const char *string, int len)
        if (i == 0 || i >= SMK_LONGLABEL)
                return ERR_PTR(-EINVAL);
 
-       smack = kzalloc(i + 1, GFP_KERNEL);
+       smack = kzalloc(i + 1, GFP_NOFS);
        if (smack == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -504,7 +504,7 @@ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
                        if ((m & *cp) == 0)
                                continue;
                        rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
-                                                 cat, GFP_KERNEL);
+                                                 cat, GFP_NOFS);
                        if (rc < 0) {
                                netlbl_catmap_free(sap->attr.mls.cat);
                                return rc;
@@ -540,7 +540,7 @@ struct smack_known *smk_import_entry(const char *string, int len)
        if (skp != NULL)
                goto freeout;
 
-       skp = kzalloc(sizeof(*skp), GFP_KERNEL);
+       skp = kzalloc(sizeof(*skp), GFP_NOFS);
        if (skp == NULL) {
                skp = ERR_PTR(-ENOMEM);
                goto freeout;
index 017c47eb795eb47a4e40ed18bf912886b52a73e4..221de4c755c318bf4ae4370f8d0e6520a9716431 100644 (file)
@@ -270,7 +270,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
        if (!(ip->i_opflags & IOP_XATTR))
                return ERR_PTR(-EOPNOTSUPP);
 
-       buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
+       buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
        if (buffer == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -947,7 +947,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
 
                if (rc != 0)
                        return rc;
-       } else if (bprm->unsafe)
+       }
+       if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
                return -EPERM;
 
        bsp->smk_task = isp->smk_task;
@@ -4005,6 +4006,8 @@ access_check:
                        skp = smack_ipv6host_label(&sadd);
                if (skp == NULL)
                        skp = smack_net_ambient;
+               if (skb == NULL)
+                       break;
 #ifdef CONFIG_AUDIT
                smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
                ad.a.u.net->family = family;
index 9cbf6927abe96a735f0e188cfd735710c1d989ad..ca50ff4447964e59ff556650c18d4345f47af2a3 100644 (file)
@@ -125,17 +125,12 @@ static int ac97_codec_add(struct ac97_controller *ac97_ctrl, int idx,
                                                      vendor_id);
 
        ret = device_add(&codec->dev);
-       if (ret)
-               goto err_free_codec;
+       if (ret) {
+               put_device(&codec->dev);
+               return ret;
+       }
 
        return 0;
-err_free_codec:
-       of_node_put(codec->dev.of_node);
-       put_device(&codec->dev);
-       kfree(codec);
-       ac97_ctrl->codecs[idx] = NULL;
-
-       return ret;
 }
 
 unsigned int snd_ac97_bus_scan_one(struct ac97_controller *adrv,
index 8b78ddffa509ab2598f17219c11792307cc50357..516ec35873256e4e598c150473fc5affabe78a20 100644 (file)
@@ -575,10 +575,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
                stream->metadata_set = false;
                stream->next_track = false;
 
-               if (stream->direction == SND_COMPRESS_PLAYBACK)
-                       stream->runtime->state = SNDRV_PCM_STATE_SETUP;
-               else
-                       stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+               stream->runtime->state = SNDRV_PCM_STATE_SETUP;
        } else {
                return -EPERM;
        }
@@ -694,8 +691,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
 {
        int retval;
 
-       if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+       switch (stream->runtime->state) {
+       case SNDRV_PCM_STATE_SETUP:
+               if (stream->direction != SND_COMPRESS_CAPTURE)
+                       return -EPERM;
+               break;
+       case SNDRV_PCM_STATE_PREPARED:
+               break;
+       default:
                return -EPERM;
+       }
+
        retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
        if (!retval)
                stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
@@ -706,9 +712,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
 {
        int retval;
 
-       if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-                       stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+       switch (stream->runtime->state) {
+       case SNDRV_PCM_STATE_OPEN:
+       case SNDRV_PCM_STATE_SETUP:
+       case SNDRV_PCM_STATE_PREPARED:
                return -EPERM;
+       default:
+               break;
+       }
+
        retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
        if (!retval) {
                snd_compr_drain_notify(stream);
@@ -796,9 +808,17 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
 {
        int retval;
 
-       if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-                       stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+       switch (stream->runtime->state) {
+       case SNDRV_PCM_STATE_OPEN:
+       case SNDRV_PCM_STATE_SETUP:
+       case SNDRV_PCM_STATE_PREPARED:
+       case SNDRV_PCM_STATE_PAUSED:
                return -EPERM;
+       case SNDRV_PCM_STATE_XRUN:
+               return -EPIPE;
+       default:
+               break;
+       }
 
        retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
        if (retval) {
@@ -818,6 +838,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
        if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
                return -EPERM;
 
+       /* next track doesn't have any meaning for capture streams */
+       if (stream->direction == SND_COMPRESS_CAPTURE)
+               return -EPERM;
+
        /* you can signal next track if this is intended to be a gapless stream
         * and current track metadata is set
         */
@@ -835,9 +859,23 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
 static int snd_compr_partial_drain(struct snd_compr_stream *stream)
 {
        int retval;
-       if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-                       stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+
+       switch (stream->runtime->state) {
+       case SNDRV_PCM_STATE_OPEN:
+       case SNDRV_PCM_STATE_SETUP:
+       case SNDRV_PCM_STATE_PREPARED:
+       case SNDRV_PCM_STATE_PAUSED:
+               return -EPERM;
+       case SNDRV_PCM_STATE_XRUN:
+               return -EPIPE;
+       default:
+               break;
+       }
+
+       /* partial drain doesn't have any meaning for capture streams */
+       if (stream->direction == SND_COMPRESS_CAPTURE)
                return -EPERM;
+
        /* stream can be drained only when next track has been signalled */
        if (stream->next_track == false)
                return -EPERM;
index f59e13c1d84a8c8ab3ac478087bec2adc66b247e..bd3d68e0489dd9d7ef28643f40d41ddec49c4659 100644 (file)
@@ -1004,7 +1004,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
 {
        struct snd_seq_client *client = file->private_data;
        int written = 0, len;
-       int err;
+       int err, handled;
        struct snd_seq_event event;
 
        if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
@@ -1017,6 +1017,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
        if (!client->accept_output || client->pool == NULL)
                return -ENXIO;
 
+ repeat:
+       handled = 0;
        /* allocate the pool now if the pool is not allocated yet */ 
        mutex_lock(&client->ioctl_mutex);
        if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
@@ -1076,12 +1078,19 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
                                                   0, 0, &client->ioctl_mutex);
                if (err < 0)
                        break;
+               handled++;
 
        __skip_event:
                /* Update pointers and counts */
                count -= len;
                buf += len;
                written += len;
+
+               /* let's have a coffee break if too many events are queued */
+               if (++handled >= 200) {
+                       mutex_unlock(&client->ioctl_mutex);
+                       goto repeat;
+               }
        }
 
  out:
@@ -1809,8 +1818,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
        if (cptr->type == USER_CLIENT) {
                info->input_pool = cptr->data.user.fifo_pool_size;
                info->input_free = info->input_pool;
-               if (cptr->data.user.fifo)
-                       info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
+               info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
        } else {
                info->input_pool = 0;
                info->input_free = 0;
index 72c0302a55d23c05720d6062bef600b40fec6971..6a24732704fcf980b7c580bcf3ceadd3052304e2 100644 (file)
@@ -280,3 +280,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
 
        return 0;
 }
+
+/* get the number of unused cells safely */
+int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
+{
+       unsigned long flags;
+       int cells;
+
+       if (!f)
+               return 0;
+
+       snd_use_lock_use(&f->use_lock);
+       spin_lock_irqsave(&f->lock, flags);
+       cells = snd_seq_unused_cells(f->pool);
+       spin_unlock_irqrestore(&f->lock, flags);
+       snd_use_lock_free(&f->use_lock);
+       return cells;
+}
index 062c446e786722de5aa1936a4c187c74be660d94..5d38a0d7f0cd673a46becc883261ef25adf6005d 100644 (file)
@@ -68,5 +68,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
 /* resize pool in fifo */
 int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
 
+/* get the number of unused cells safely */
+int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
 
 #endif
index 218292bdace6fd63bbc5e94060cd20d69146cdca..f5b325263b6747478a0e78783a8ce1c480e515e3 100644 (file)
@@ -15,7 +15,7 @@ alesis_io14_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
 
 static const unsigned int
 alesis_io26_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
-       {10, 10, 8},    /* Tx0 = Analog + S/PDIF. */
+       {10, 10, 4},    /* Tx0 = Analog + S/PDIF. */
        {16, 8, 0},     /* Tx1 = ADAT1 + ADAT2. */
 };
 
index 743015e87a9605628f559f44a6e28ffaa448f1ae..e240fdfcae31d7c99919ee99118463e4f025fbaa 100644 (file)
@@ -255,6 +255,17 @@ static const struct snd_motu_spec motu_audio_express = {
        .analog_out_ports = 4,
 };
 
+static const struct snd_motu_spec motu_4pre = {
+       .name = "4pre",
+       .protocol = &snd_motu_protocol_v3,
+       .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
+                SND_MOTU_SPEC_TX_MICINST_CHUNK |
+                SND_MOTU_SPEC_TX_RETURN_CHUNK |
+                SND_MOTU_SPEC_RX_SEPARETED_MAIN,
+       .analog_in_ports = 2,
+       .analog_out_ports = 2,
+};
+
 #define SND_MOTU_DEV_ENTRY(model, data)                        \
 {                                                      \
        .match_flags    = IEEE1394_MATCH_VENDOR_ID |    \
@@ -272,6 +283,7 @@ static const struct ieee1394_device_id motu_id_table[] = {
        SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3),     /* FireWire only. */
        SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3),     /* Hybrid. */
        SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
+       SND_MOTU_DEV_ENTRY(0x000045, &motu_4pre),
        { }
 };
 MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
index 1ebf00c83409666579b2a35d9b05836dd2f745bf..715cd99f28de8030e693ba0602827d1eadbe40a5 100644 (file)
@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
        packets_per_page = PAGE_SIZE / packet_size;
        if (WARN_ON(!packets_per_page)) {
                err = -EINVAL;
-               goto error;
+               goto err_packets;
        }
        pages = DIV_ROUND_UP(count, packets_per_page);
 
index e4cc8990e1953c2c550ebd591a2cff434097adea..9e58633e2dead2797fbb123296f567db316f2087 100644 (file)
@@ -57,6 +57,9 @@ static int pcm_open(struct snd_pcm_substream *substream)
                goto err_locked;
 
        err = snd_tscm_stream_get_clock(tscm, &clock);
+       if (err < 0)
+               goto err_locked;
+
        if (clock != SND_TSCM_CLOCK_INTERNAL ||
            amdtp_stream_pcm_running(&tscm->rx_stream) ||
            amdtp_stream_pcm_running(&tscm->tx_stream)) {
index f1657a4e0621ef4999349477ce6e71fdbcd7f411..a1308f12a65b099c8b484326a0630087eae79372 100644 (file)
@@ -9,20 +9,37 @@
 #include <linux/delay.h>
 #include "tascam.h"
 
+#define CLOCK_STATUS_MASK      0xffff0000
+#define CLOCK_CONFIG_MASK      0x0000ffff
+
 #define CALLBACK_TIMEOUT 500
 
 static int get_clock(struct snd_tscm *tscm, u32 *data)
 {
+       int trial = 0;
        __be32 reg;
        int err;
 
-       err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
-                                TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
-                                &reg, sizeof(reg), 0);
-       if (err >= 0)
+       while (trial++ < 5) {
+               err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
+                               TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
+                               &reg, sizeof(reg), 0);
+               if (err < 0)
+                       return err;
+
                *data = be32_to_cpu(reg);
+               if (*data & CLOCK_STATUS_MASK)
+                       break;
 
-       return err;
+               // In intermediate state after changing clock status.
+               msleep(50);
+       }
+
+       // Still in the intermediate state.
+       if (trial >= 5)
+               return -EAGAIN;
+
+       return 0;
 }
 
 static int set_clock(struct snd_tscm *tscm, unsigned int rate,
@@ -35,7 +52,7 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
        err = get_clock(tscm, &data);
        if (err < 0)
                return err;
-       data &= 0x0000ffff;
+       data &= CLOCK_CONFIG_MASK;
 
        if (rate > 0) {
                data &= 0x000000ff;
@@ -80,17 +97,14 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
 
 int snd_tscm_stream_get_rate(struct snd_tscm *tscm, unsigned int *rate)
 {
-       u32 data = 0x0;
-       unsigned int trials = 0;
+       u32 data;
        int err;
 
-       while (data == 0x0 || trials++ < 5) {
-               err = get_clock(tscm, &data);
-               if (err < 0)
-                       return err;
+       err = get_clock(tscm, &data);
+       if (err < 0)
+               return err;
 
-               data = (data & 0xff000000) >> 24;
-       }
+       data = (data & 0xff000000) >> 24;
 
        /* Check base rate. */
        if ((data & 0x0f) == 0x01)
index 74244d8e2909090cfb3bfc54c57c9591332decdd..e858b6fa0c3ad626fb9bcd5742b2889a568480b9 100644 (file)
@@ -443,6 +443,8 @@ static void azx_int_disable(struct hdac_bus *bus)
        list_for_each_entry(azx_dev, &bus->stream_list, list)
                snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
 
+       synchronize_irq(bus->irq);
+
        /* disable SIE for all streams */
        snd_hdac_chip_writeb(bus, INTCTL, 0);
 
index 27eb0270a711f4db55511d6924db13656bc67276..3847fe841d33bd85d7c3084d2d58a11bc5bc4329 100644 (file)
@@ -143,10 +143,12 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
        if (!acomp)
                return -ENODEV;
        if (!acomp->ops) {
-               request_module("i915");
-               /* 60s timeout */
-               wait_for_completion_timeout(&bind_complete,
-                                           msecs_to_jiffies(60 * 1000));
+               if (!IS_ENABLED(CONFIG_MODULES) ||
+                   !request_module("i915")) {
+                       /* 60s timeout */
+                       wait_for_completion_timeout(&bind_complete,
+                                                  msecs_to_jiffies(60 * 1000));
+               }
        }
        if (!acomp->ops) {
                dev_info(bus->dev, "couldn't bind with audio component\n");
index 7f2761a2e7c8c6f2ff6f62e5d2f59ac6aac785c3..971197c34fcef5de287c24b4e640f7ec96876900 100644 (file)
@@ -789,11 +789,12 @@ static int build_adc_controls(struct snd_akm4xxx *ak)
                                return err;
 
                        memset(&knew, 0, sizeof(knew));
-                       knew.name = ak->adc_info[mixer_ch].selector_name;
-                       if (!knew.name) {
+                       if (!ak->adc_info ||
+                               !ak->adc_info[mixer_ch].selector_name) {
                                knew.name = "Capture Channel";
                                knew.index = mixer_ch + ak->idx_offset * 2;
-                       }
+                       } else
+                               knew.name = ak->adc_info[mixer_ch].selector_name;
 
                        knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
                        knew.info = ak4xxx_capture_source_info;
index b9a6b66aeb0ef75b2087468c65e1686c0a1cdd52..d8ba3a6d5042d31061f27bef606a07007b22e88c 100644 (file)
@@ -828,6 +828,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
        while (id >= 0) {
                const struct hda_fixup *fix = codec->fixup_list + id;
 
+               if (++depth > 10)
+                       break;
                if (fix->chained_before)
                        apply_fixup(codec, fix->chain_id, action, depth + 1);
 
@@ -867,8 +869,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
                }
                if (!fix->chained || fix->chained_before)
                        break;
-               if (++depth > 10)
-                       break;
                id = fix->chain_id;
        }
 }
index a6233775e779f19a6ca1e75906477f3e0ad9f921..82b0dc9f528f031a4e0f09d315236beee226e869 100644 (file)
@@ -2947,15 +2947,19 @@ static int hda_codec_runtime_resume(struct device *dev)
 #ifdef CONFIG_PM_SLEEP
 static int hda_codec_force_resume(struct device *dev)
 {
+       struct hda_codec *codec = dev_to_hda_codec(dev);
+       bool forced_resume = !codec->relaxed_resume;
        int ret;
 
        /* The get/put pair below enforces the runtime resume even if the
         * device hasn't been used at suspend time.  This trick is needed to
         * update the jack state change during the sleep.
         */
-       pm_runtime_get_noresume(dev);
+       if (forced_resume)
+               pm_runtime_get_noresume(dev);
        ret = pm_runtime_force_resume(dev);
-       pm_runtime_put(dev);
+       if (forced_resume)
+               pm_runtime_put(dev);
        return ret;
 }
 
index acacc19002658c6b25fcbbd722f29afcaaf3a9d0..2003403ce1c82e4297449c59ddb1839dd7e504d5 100644 (file)
@@ -261,6 +261,8 @@ struct hda_codec {
        unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
        unsigned int force_pin_prefix:1; /* Add location prefix */
        unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
+       unsigned int relaxed_resume:1;  /* don't resume forcibly for jack */
+
 #ifdef CONFIG_PM
        unsigned long power_on_acct;
        unsigned long power_off_acct;
index a12e594d4e3b3a23d78cc0b75531b845c7b6e331..8fcb421193e0258fde55a7b04abc367424d17e81 100644 (file)
@@ -609,11 +609,9 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
        }
        runtime->private_data = azx_dev;
 
-       if (chip->gts_present)
-               azx_pcm_hw.info = azx_pcm_hw.info |
-                       SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
-
        runtime->hw = azx_pcm_hw;
+       if (chip->gts_present)
+               runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
        runtime->hw.channels_min = hinfo->channels_min;
        runtime->hw.channels_max = hinfo->channels_max;
        runtime->hw.formats = hinfo->formats;
@@ -626,6 +624,13 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
                                     20,
                                     178000000);
 
+       /* by some reason, the playback stream stalls on PulseAudio with
+        * tsched=1 when a capture stream triggers.  Until we figure out the
+        * real cause, disable tsched mode by telling the PCM info flag.
+        */
+       if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
+               runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
+
        if (chip->align_buffer_size)
                /* constrain buffer sizes to be multiple of 128
                   bytes. This is more efficient in terms of memory
@@ -872,10 +877,13 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
         */
        if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
                hbus->response_reset = 1;
+               dev_err(chip->card->dev,
+                       "No response from codec, resetting bus: last cmd=0x%08x\n",
+                       bus->last_cmd[addr]);
                return -EAGAIN; /* give a chance to retry */
        }
 
-       dev_err(chip->card->dev,
+       dev_WARN(chip->card->dev,
                "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
                bus->last_cmd[addr]);
        chip->single_cmd = 1;
index 53c3cd28bc9952be2bc898df3e9faec7b3811171..8a9dd4767b1ecb65c4ab4e801c29e1ff9f188b9e 100644 (file)
@@ -40,7 +40,7 @@
 /* 14 unused */
 #define AZX_DCAPS_CTX_WORKAROUND (1 << 15)     /* X-Fi workaround */
 #define AZX_DCAPS_POSFIX_LPIB  (1 << 16)       /* Use LPIB as default */
-/* 17 unused */
+#define AZX_DCAPS_AMD_WORKAROUND (1 << 17)     /* AMD-specific workaround */
 #define AZX_DCAPS_NO_64BIT     (1 << 18)       /* No 64bit address */
 #define AZX_DCAPS_SYNC_WRITE   (1 << 19)       /* sync each cmd write */
 #define AZX_DCAPS_OLD_SSYNC    (1 << 20)       /* Old SSYNC reg for ICH */
index 579984ecdec301b077512093b01f912261e92a2b..2609161707a4141da8b3b1e62cb4eba50b56b2bc 100644 (file)
@@ -5991,7 +5991,8 @@ int snd_hda_gen_init(struct hda_codec *codec)
        if (spec->init_hook)
                spec->init_hook(codec);
 
-       snd_hda_apply_verbs(codec);
+       if (!spec->skip_verbs)
+               snd_hda_apply_verbs(codec);
 
        init_multi_out(codec);
        init_extra_out(codec);
@@ -6033,6 +6034,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
 }
 EXPORT_SYMBOL_GPL(snd_hda_gen_free);
 
+/**
+ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
+ * @codec: the HDA codec
+ *
+ * This can be put as patch_ops reboot_notify function.
+ */
+void snd_hda_gen_reboot_notify(struct hda_codec *codec)
+{
+       /* Make the codec enter D3 to avoid spurious noises from the internal
+        * speaker during (and after) reboot
+        */
+       snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+       snd_hda_codec_write(codec, codec->core.afg, 0,
+                           AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+       msleep(10);
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
+
 #ifdef CONFIG_PM
 /**
  * snd_hda_gen_check_power_status - check the loopback power save state
@@ -6060,6 +6079,7 @@ static const struct hda_codec_ops generic_patch_ops = {
        .init = snd_hda_gen_init,
        .free = snd_hda_gen_free,
        .unsol_event = snd_hda_jack_unsol_event,
+       .reboot_notify = snd_hda_gen_reboot_notify,
 #ifdef CONFIG_PM
        .check_power_status = snd_hda_gen_check_power_status,
 #endif
@@ -6082,7 +6102,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
 
        err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
        if (err < 0)
-               return err;
+               goto error;
 
        err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
        if (err < 0)
index 10123664fa619af70a3b9060685060d541106d2e..8933c0f64cc4ac027dc497771cb0533b3b50e401 100644 (file)
@@ -247,6 +247,7 @@ struct hda_gen_spec {
        unsigned int indep_hp_enabled:1; /* independent HP enabled */
        unsigned int have_aamix_ctl:1;
        unsigned int hp_mic_jack_modes:1;
+       unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
 
        /* additional mute flags (only effective with auto_mute_via_amp=1) */
        u64 mute_bits;
@@ -336,6 +337,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
                                  struct auto_pin_cfg *cfg);
 int snd_hda_gen_build_controls(struct hda_codec *codec);
 int snd_hda_gen_build_pcms(struct hda_codec *codec);
+void snd_hda_gen_reboot_notify(struct hda_codec *codec);
 
 /* standard jack event callbacks */
 void snd_hda_gen_hp_automute(struct hda_codec *codec,
index 308ce76149ccac014269910a384d1984c53d3b5e..bfc45086cf793af6da5878176333956a2e7a5da4 100644 (file)
@@ -78,6 +78,7 @@ enum {
        POS_FIX_VIACOMBO,
        POS_FIX_COMBO,
        POS_FIX_SKL,
+       POS_FIX_FIFO,
 };
 
 /* Defines for ATI HD Audio support in SB450 south bridge */
@@ -149,7 +150,7 @@ module_param_array(model, charp, NULL, 0444);
 MODULE_PARM_DESC(model, "Use the given board model.");
 module_param_array(position_fix, int, NULL, 0444);
 MODULE_PARM_DESC(position_fix, "DMA pointer read method."
-                "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+).");
+                "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+, 6 = FIFO).");
 module_param_array(bdl_pos_adj, int, NULL, 0644);
 MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
 module_param_array(probe_mask, int, NULL, 0444);
@@ -328,13 +329,11 @@ enum {
 
 #define AZX_DCAPS_INTEL_SKYLAKE \
        (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
+        AZX_DCAPS_SYNC_WRITE |\
         AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT |\
         AZX_DCAPS_I915_POWERWELL)
 
-#define AZX_DCAPS_INTEL_BROXTON \
-       (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
-        AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT |\
-        AZX_DCAPS_I915_POWERWELL)
+#define AZX_DCAPS_INTEL_BROXTON                        AZX_DCAPS_INTEL_SKYLAKE
 
 /* quirks for ATI SB / AMD Hudson */
 #define AZX_DCAPS_PRESET_ATI_SB \
@@ -350,6 +349,11 @@ enum {
 #define AZX_DCAPS_PRESET_ATI_HDMI_NS \
        (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
 
+/* quirks for AMD SB */
+#define AZX_DCAPS_PRESET_AMD_SB \
+       (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_AMD_WORKAROUND |\
+        AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
+
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
        (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
@@ -920,6 +924,49 @@ static unsigned int azx_via_get_position(struct azx *chip,
        return bound_pos + mod_dma_pos;
 }
 
+#define AMD_FIFO_SIZE  32
+
+/* get the current DMA position with FIFO size correction */
+static unsigned int azx_get_pos_fifo(struct azx *chip, struct azx_dev *azx_dev)
+{
+       struct snd_pcm_substream *substream = azx_dev->core.substream;
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       unsigned int pos, delay;
+
+       pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
+       if (!runtime)
+               return pos;
+
+       runtime->delay = AMD_FIFO_SIZE;
+       delay = frames_to_bytes(runtime, AMD_FIFO_SIZE);
+       if (azx_dev->insufficient) {
+               if (pos < delay) {
+                       delay = pos;
+                       runtime->delay = bytes_to_frames(runtime, pos);
+               } else {
+                       azx_dev->insufficient = 0;
+               }
+       }
+
+       /* correct the DMA position for capture stream */
+       if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+               if (pos < delay)
+                       pos += azx_dev->core.bufsize;
+               pos -= delay;
+       }
+
+       return pos;
+}
+
+static int azx_get_delay_from_fifo(struct azx *chip, struct azx_dev *azx_dev,
+                                  unsigned int pos)
+{
+       struct snd_pcm_substream *substream = azx_dev->core.substream;
+
+       /* just read back the calculated value in the above */
+       return substream->runtime->delay;
+}
+
 static unsigned int azx_skl_get_dpib_pos(struct azx *chip,
                                         struct azx_dev *azx_dev)
 {
@@ -1408,9 +1455,9 @@ static int azx_free(struct azx *chip)
        }
 
        if (bus->chip_init) {
+               azx_stop_chip(chip);
                azx_clear_irq_pending(chip);
                azx_stop_all_streams(chip);
-               azx_stop_chip(chip);
        }
 
        if (bus->irq >= 0)
@@ -1528,6 +1575,7 @@ static int check_position_fix(struct azx *chip, int fix)
        case POS_FIX_VIACOMBO:
        case POS_FIX_COMBO:
        case POS_FIX_SKL:
+       case POS_FIX_FIFO:
                return fix;
        }
 
@@ -1544,6 +1592,10 @@ static int check_position_fix(struct azx *chip, int fix)
                dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n");
                return POS_FIX_VIACOMBO;
        }
+       if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) {
+               dev_dbg(chip->card->dev, "Using FIFO position fix\n");
+               return POS_FIX_FIFO;
+       }
        if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
                dev_dbg(chip->card->dev, "Using LPIB position fix\n");
                return POS_FIX_LPIB;
@@ -1564,6 +1616,7 @@ static void assign_position_fix(struct azx *chip, int fix)
                [POS_FIX_VIACOMBO] = azx_via_get_position,
                [POS_FIX_COMBO] = azx_get_pos_lpib,
                [POS_FIX_SKL] = azx_get_pos_skl,
+               [POS_FIX_FIFO] = azx_get_pos_fifo,
        };
 
        chip->get_position[0] = chip->get_position[1] = callbacks[fix];
@@ -1578,6 +1631,9 @@ static void assign_position_fix(struct azx *chip, int fix)
                        azx_get_delay_from_lpib;
        }
 
+       if (fix == POS_FIX_FIFO)
+               chip->get_delay[0] = chip->get_delay[1] =
+                       azx_get_delay_from_fifo;
 }
 
 /*
@@ -2594,14 +2650,19 @@ static const struct pci_device_id azx_ids[] = {
        /* AMD Hudson */
        { PCI_DEVICE(0x1022, 0x780d),
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+       /* AMD, X370 & co */
+       { PCI_DEVICE(0x1022, 0x1457),
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+       /* AMD, X570 & co */
+       { PCI_DEVICE(0x1022, 0x1487),
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
        /* AMD Stoney */
        { PCI_DEVICE(0x1022, 0x157a),
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
                         AZX_DCAPS_PM_RUNTIME },
        /* AMD Raven */
        { PCI_DEVICE(0x1022, 0x15e3),
-         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
-                        AZX_DCAPS_PM_RUNTIME },
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
        /* ATI HDMI */
        { PCI_DEVICE(0x1002, 0x0002),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
index fd476fb40e1b458eb0af363b61b76d1b7736cee7..677dcc0aca978f12dd202d3f4ff401dc78e26564 100644 (file)
@@ -370,6 +370,7 @@ static const struct hda_fixup ad1986a_fixups[] = {
 
 static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
+       SND_PCI_QUIRK(0x1043, 0x1153, "ASUS M9V", AD1986A_FIXUP_LAPTOP_IMIC),
        SND_PCI_QUIRK(0x1043, 0x1443, "ASUS Z99He", AD1986A_FIXUP_EAPD),
        SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8JN", AD1986A_FIXUP_EAPD),
        SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
index 3cbd2119e14893064b943afe32c7f5f6af87eb22..ae8fde4c1a1254d7f9899951e0ed4f56879cc5fb 100644 (file)
@@ -176,23 +176,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
 {
        struct conexant_spec *spec = codec->spec;
 
-       switch (codec->core.vendor_id) {
-       case 0x14f12008: /* CX8200 */
-       case 0x14f150f2: /* CX20722 */
-       case 0x14f150f4: /* CX20724 */
-               break;
-       default:
-               return;
-       }
-
        /* Turn the problematic codec into D3 to avoid spurious noises
           from the internal speaker during (and after) reboot */
        cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
-
-       snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
-       snd_hda_codec_write(codec, codec->core.afg, 0,
-                           AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-       msleep(10);
+       snd_hda_gen_reboot_notify(codec);
 }
 
 static void cx_auto_free(struct hda_codec *codec)
@@ -637,18 +624,20 @@ static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
 
 /* update LED status via GPIO */
 static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask,
-                               bool enabled)
+                               bool led_on)
 {
        struct conexant_spec *spec = codec->spec;
        unsigned int oldval = spec->gpio_led;
 
        if (spec->mute_led_polarity)
-               enabled = !enabled;
+               led_on = !led_on;
 
-       if (enabled)
-               spec->gpio_led &= ~mask;
-       else
+       if (led_on)
                spec->gpio_led |= mask;
+       else
+               spec->gpio_led &= ~mask;
+       codec_dbg(codec, "mask:%d enabled:%d gpio_led:%d\n",
+                       mask, led_on, spec->gpio_led);
        if (spec->gpio_led != oldval)
                snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
                                    spec->gpio_led);
@@ -659,8 +648,8 @@ static void cxt_fixup_gpio_mute_hook(void *private_data, int enabled)
 {
        struct hda_codec *codec = private_data;
        struct conexant_spec *spec = codec->spec;
-
-       cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled);
+       /* muted -> LED on */
+       cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, !enabled);
 }
 
 /* turn on/off mic-mute LED via GPIO per capture hook */
@@ -682,7 +671,6 @@ static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
                { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 },
                {}
        };
-       codec_info(codec, "action: %d gpio_led: %d\n", action, spec->gpio_led);
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
                spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook;
@@ -1096,6 +1084,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
  */
 
 static const struct hda_device_id snd_hda_id_conexant[] = {
+       HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
index 35931a18418f301b9b5162d774a6de2240a71601..c827a2a89cc3d772e42662206342ef93efeae17a 100644 (file)
@@ -2293,8 +2293,10 @@ static void generic_hdmi_free(struct hda_codec *codec)
        struct hdmi_spec *spec = codec->spec;
        int pin_idx, pcm_idx;
 
-       if (codec_has_acomp(codec))
+       if (codec_has_acomp(codec)) {
                snd_hdac_acomp_register_notifier(&codec->bus->core, NULL);
+               codec->relaxed_resume = 0;
+       }
 
        for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
                struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
@@ -2550,6 +2552,8 @@ static void register_i915_notifier(struct hda_codec *codec)
        spec->drm_audio_ops.pin_eld_notify = intel_pin_eld_notify;
        snd_hdac_acomp_register_notifier(&codec->bus->core,
                                        &spec->drm_audio_ops);
+       /* no need for forcible resume for jack check thanks to notifier */
+       codec->relaxed_resume = 1;
 }
 
 /* setup_stream ops override for HSW+ */
@@ -2579,6 +2583,8 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
 /* precondition and allocation for Intel codecs */
 static int alloc_intel_hdmi(struct hda_codec *codec)
 {
+       int err;
+
        /* requires i915 binding */
        if (!codec->bus->core.audio_component) {
                codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n");
@@ -2587,7 +2593,12 @@ static int alloc_intel_hdmi(struct hda_codec *codec)
                return -ENODEV;
        }
 
-       return alloc_generic_hdmi(codec);
+       err = alloc_generic_hdmi(codec);
+       if (err < 0)
+               return err;
+       /* no need to handle unsol events */
+       codec->patch_ops.unsol_event = NULL;
+       return 0;
 }
 
 /* parse and post-process for Intel codecs */
@@ -3253,6 +3264,8 @@ static int patch_nvhdmi(struct hda_codec *codec)
                nvhdmi_chmap_cea_alloc_validate_get_type;
        spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
 
+       codec->link_down_at_suspend = 1;
+
        return 0;
 }
 
index 98cfdcfce5b3624387dcf82c615cda3b6f9106ca..dd46354270d0d03ad080005dd5a56652e528e7d5 100644 (file)
@@ -405,6 +405,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0700:
        case 0x10ec0701:
        case 0x10ec0703:
+       case 0x10ec0711:
                alc_update_coef_idx(codec, 0x10, 1<<15, 0);
                break;
        case 0x10ec0662:
@@ -836,9 +837,11 @@ static int alc_init(struct hda_codec *codec)
        if (spec->init_hook)
                spec->init_hook(codec);
 
+       spec->gen.skip_verbs = 1; /* applied in below */
        snd_hda_gen_init(codec);
        alc_fix_pll(codec);
        alc_auto_init_amp(codec, spec->init_amp);
+       snd_hda_apply_verbs(codec); /* apply verbs here after own init */
 
        snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
 
@@ -868,15 +871,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
                alc_shutup(codec);
 }
 
-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
-static void alc_d3_at_reboot(struct hda_codec *codec)
-{
-       snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
-       snd_hda_codec_write(codec, codec->core.afg, 0,
-                           AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-       msleep(10);
-}
-
 #define alc_free       snd_hda_gen_free
 
 #ifdef CONFIG_PM
@@ -1064,6 +1058,9 @@ static const struct snd_pci_quirk beep_white_list[] = {
        SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1),
        SND_PCI_QUIRK(0x1458, 0xa002, "GA-MA790X", 1),
        SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
+       /* blacklist -- no beep available */
+       SND_PCI_QUIRK(0x17aa, 0x309e, "Lenovo ThinkCentre M73", 0),
+       SND_PCI_QUIRK(0x17aa, 0x30a3, "Lenovo ThinkCentre M93", 0),
        {}
 };
 
@@ -5111,7 +5108,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
        struct alc_spec *spec = codec->spec;
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
-               spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
+               spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
                codec->power_save_node = 0; /* avoid click noises */
                snd_hda_apply_pincfgs(codec, pincfgs);
@@ -5680,8 +5677,11 @@ enum {
        ALC225_FIXUP_WYSE_AUTO_MUTE,
        ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
        ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+       ALC256_FIXUP_ASUS_HEADSET_MIC,
        ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
        ALC299_FIXUP_PREDATOR_SPK,
+       ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
+       ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6694,6 +6694,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
        },
+       [ALC256_FIXUP_ASUS_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x03a11020 }, /* headset mic with jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+       },
        [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -6710,6 +6719,26 @@ static const struct hda_fixup alc269_fixups[] = {
                        { }
                }
        },
+       [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x14, 0x411111f0 }, /* disable confusing internal speaker */
+                       { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+       },
+       [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x04a11040 },
+                       { 0x21, 0x04211020 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6851,6 +6880,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -6867,6 +6898,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
        SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -6944,6 +6977,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -6969,6 +7003,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
        SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MBXP", ALC256_FIXUP_HUAWEI_MBXP_PINS),
        SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+       SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
 
 #if 0
        /* Below is a quirk table taken from the old code.
@@ -7133,6 +7168,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
        {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
        {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
+       {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
        {}
 };
 #define ALC225_STANDARD_PINS \
@@ -7518,9 +7554,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60130},
                {0x17, 0x90170110},
                {0x21, 0x03211020}),
-       SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+       SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
                {0x14, 0x90170110},
                {0x21, 0x04211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+               {0x14, 0x90170110},
+               {0x21, 0x04211030}),
        SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC295_STANDARD_PINS,
                {0x17, 0x21014020},
@@ -7725,6 +7764,7 @@ static int patch_alc269(struct hda_codec *codec)
        case 0x10ec0700:
        case 0x10ec0701:
        case 0x10ec0703:
+       case 0x10ec0711:
                spec->codec_variant = ALC269_TYPE_ALC700;
                spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
@@ -8654,6 +8694,11 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
                {0x18, 0x01a19030},
                {0x1a, 0x01813040},
                {0x21, 0x01014020}),
+       SND_HDA_PIN_QUIRK(0x10ec0867, 0x1028, "Dell", ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
+               {0x16, 0x01813030},
+               {0x17, 0x02211010},
+               {0x18, 0x01a19040},
+               {0x21, 0x01014020}),
        SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
                {0x14, 0x01014010},
                {0x18, 0x01a19020},
@@ -8798,6 +8843,7 @@ static int patch_alc680(struct hda_codec *codec)
 static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
@@ -8850,6 +8896,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
        HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
index e97d12d578b00c1a287209e24fed9d29688d777e..9ebe77c3784a8d5e44d4da1d26b58ee309b71e71 100644 (file)
@@ -46,7 +46,10 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9600, 50, 1);
 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_max_gain_tlv, -650, 150, 0);
 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_min_gain_tlv, -1200, 150, 0);
 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_target_tlv, -1650, 150, 0);
-static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(hpmixer_gain_tlv, -1200, 150, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpmixer_gain_tlv,
+       0, 4, TLV_DB_SCALE_ITEM(-1200, 150, 0),
+       8, 11, TLV_DB_SCALE_ITEM(-450, 150, 0),
+);
 
 static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
        0, 0, TLV_DB_SCALE_ITEM(-350, 0, 0),
@@ -84,7 +87,7 @@ static const struct snd_kcontrol_new es8316_snd_controls[] = {
        SOC_DOUBLE_TLV("Headphone Playback Volume", ES8316_CPHP_ICAL_VOL,
                       4, 0, 3, 1, hpout_vol_tlv),
        SOC_DOUBLE_TLV("Headphone Mixer Volume", ES8316_HPMIX_VOL,
-                      0, 4, 7, 0, hpmixer_gain_tlv),
+                      0, 4, 11, 0, hpmixer_gain_tlv),
 
        SOC_ENUM("Playback Polarity", dacpol),
        SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8316_DAC_VOLL,
index 63487240b61e40c1c011e6e0ca6a290a10466083..098196610542a08ba757944cc76f6d3318c97de4 100644 (file)
@@ -1854,6 +1854,12 @@ static void hdmi_codec_remove(struct snd_soc_component *component)
 {
        struct hdac_hdmi_priv *hdmi = snd_soc_component_get_drvdata(component);
        struct hdac_device *hdev = hdmi->hdev;
+       int ret;
+
+       ret = snd_hdac_acomp_register_notifier(hdev->bus, NULL);
+       if (ret < 0)
+               dev_err(&hdev->dev, "notifier unregister failed: err: %d\n",
+                               ret);
 
        pm_runtime_disable(&hdev->dev);
 }
index 122afb7c39a84edfb6b7bc296c367c6ddbf7fe70..0e04c0ed3ce0b6f68b5189ad569590c363135f8e 100644 (file)
@@ -24,8 +24,7 @@
 
 #define PCM3168A_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
                         SNDRV_PCM_FMTBIT_S24_3LE | \
-                        SNDRV_PCM_FMTBIT_S24_LE | \
-                        SNDRV_PCM_FMTBIT_S32_LE)
+                        SNDRV_PCM_FMTBIT_S24_LE)
 
 #define PCM3168A_FMT_I2S               0x0
 #define PCM3168A_FMT_LEFT_J            0x1
index 60764f6201b199a4d7f76fc17c2d8905d5d8d886..64a52d495b1f5483e475a51f41c40a13dab9c0da 100644 (file)
 #define SGTL5000_DAP_REG_OFFSET        0x0100
 #define SGTL5000_MAX_REG_OFFSET        0x013A
 
+/* Delay for the VAG ramp up */
+#define SGTL5000_VAG_POWERUP_DELAY 500 /* ms */
+/* Delay for the VAG ramp down */
+#define SGTL5000_VAG_POWERDOWN_DELAY 500 /* ms */
+
+#define SGTL5000_OUTPUTS_MUTE (SGTL5000_HP_MUTE | SGTL5000_LINE_OUT_MUTE)
+
 /* default value of sgtl5000 registers */
 static const struct reg_default sgtl5000_reg_defaults[] = {
        { SGTL5000_CHIP_DIG_POWER,              0x0000 },
@@ -116,6 +123,13 @@ enum  {
        I2S_LRCLK_STRENGTH_HIGH,
 };
 
+enum {
+       HP_POWER_EVENT,
+       DAC_POWER_EVENT,
+       ADC_POWER_EVENT,
+       LAST_POWER_EVENT = ADC_POWER_EVENT
+};
+
 /* sgtl5000 private structure in codec */
 struct sgtl5000_priv {
        int sysclk;     /* sysclk rate */
@@ -129,8 +143,109 @@ struct sgtl5000_priv {
        u8 micbias_resistor;
        u8 micbias_voltage;
        u8 lrclk_strength;
+       u16 mute_state[LAST_POWER_EVENT + 1];
 };
 
+static inline int hp_sel_input(struct snd_soc_component *component)
+{
+       return (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_CTRL) &
+               SGTL5000_HP_SEL_MASK) >> SGTL5000_HP_SEL_SHIFT;
+}
+
+static inline u16 mute_output(struct snd_soc_component *component,
+                             u16 mute_mask)
+{
+       u16 mute_reg = snd_soc_component_read32(component,
+                                             SGTL5000_CHIP_ANA_CTRL);
+
+       snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
+                           mute_mask, mute_mask);
+       return mute_reg;
+}
+
+static inline void restore_output(struct snd_soc_component *component,
+                                 u16 mute_mask, u16 mute_reg)
+{
+       snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
+               mute_mask, mute_reg);
+}
+
+static void vag_power_on(struct snd_soc_component *component, u32 source)
+{
+       if (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
+           SGTL5000_VAG_POWERUP)
+               return;
+
+       snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
+                           SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
+
+       /* When VAG powering on to get local loop from Line-In, the sleep
+        * is required to avoid loud pop.
+        */
+       if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN &&
+           source == HP_POWER_EVENT)
+               msleep(SGTL5000_VAG_POWERUP_DELAY);
+}
+
+static int vag_power_consumers(struct snd_soc_component *component,
+                              u16 ana_pwr_reg, u32 source)
+{
+       int consumers = 0;
+
+       /* count dac/adc consumers unconditional */
+       if (ana_pwr_reg & SGTL5000_DAC_POWERUP)
+               consumers++;
+       if (ana_pwr_reg & SGTL5000_ADC_POWERUP)
+               consumers++;
+
+       /*
+        * If the event comes from HP and Line-In is selected,
+        * current action is 'DAC to be powered down'.
+        * As HP_POWERUP is not set when HP muxed to line-in,
+        * we need to keep VAG power ON.
+        */
+       if (source == HP_POWER_EVENT) {
+               if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN)
+                       consumers++;
+       } else {
+               if (ana_pwr_reg & SGTL5000_HP_POWERUP)
+                       consumers++;
+       }
+
+       return consumers;
+}
+
+static void vag_power_off(struct snd_soc_component *component, u32 source)
+{
+       u16 ana_pwr = snd_soc_component_read32(component,
+                                            SGTL5000_CHIP_ANA_POWER);
+
+       if (!(ana_pwr & SGTL5000_VAG_POWERUP))
+               return;
+
+       /*
+        * This function calls when any of VAG power consumers is disappearing.
+        * Thus, if there is more than one consumer at the moment, as minimum
+        * one consumer will definitely stay after the end of the current
+        * event.
+        * Don't clear VAG_POWERUP if 2 or more consumers of VAG present:
+        * - LINE_IN (for HP events) / HP (for DAC/ADC events)
+        * - DAC
+        * - ADC
+        * (the current consumer is disappearing right now)
+        */
+       if (vag_power_consumers(component, ana_pwr, source) >= 2)
+               return;
+
+       snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
+               SGTL5000_VAG_POWERUP, 0);
+       /* In power down case, we need wait 400-1000 ms
+        * when VAG fully ramped down.
+        * As longer we wait, as smaller pop we've got.
+        */
+       msleep(SGTL5000_VAG_POWERDOWN_DELAY);
+}
+
 /*
  * mic_bias power on/off share the same register bits with
  * output impedance of mic bias, when power on mic bias, we
@@ -162,36 +277,46 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
-/*
- * As manual described, ADC/DAC only works when VAG powerup,
- * So enabled VAG before ADC/DAC up.
- * In power down case, we need wait 400ms when vag fully ramped down.
- */
-static int power_vag_event(struct snd_soc_dapm_widget *w,
-       struct snd_kcontrol *kcontrol, int event)
+static int vag_and_mute_control(struct snd_soc_component *component,
+                                int event, int event_source)
 {
-       struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
-       const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
+       static const u16 mute_mask[] = {
+               /*
+                * Mask for HP_POWER_EVENT.
+                * Muxing Headphones have to be wrapped with mute/unmute
+                * headphones only.
+                */
+               SGTL5000_HP_MUTE,
+               /*
+                * Masks for DAC_POWER_EVENT/ADC_POWER_EVENT.
+                * Muxing DAC or ADC block have to wrapped with mute/unmute
+                * both headphones and line-out.
+                */
+               SGTL5000_OUTPUTS_MUTE,
+               SGTL5000_OUTPUTS_MUTE
+       };
+
+       struct sgtl5000_priv *sgtl5000 =
+               snd_soc_component_get_drvdata(component);
 
        switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               sgtl5000->mute_state[event_source] =
+                       mute_output(component, mute_mask[event_source]);
+               break;
        case SND_SOC_DAPM_POST_PMU:
-               snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
-                       SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
-               msleep(400);
+               vag_power_on(component, event_source);
+               restore_output(component, mute_mask[event_source],
+                              sgtl5000->mute_state[event_source]);
                break;
-
        case SND_SOC_DAPM_PRE_PMD:
-               /*
-                * Don't clear VAG_POWERUP, when both DAC and ADC are
-                * operational to prevent inadvertently starving the
-                * other one of them.
-                */
-               if ((snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
-                               mask) != mask) {
-                       snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
-                               SGTL5000_VAG_POWERUP, 0);
-                       msleep(400);
-               }
+               sgtl5000->mute_state[event_source] =
+                       mute_output(component, mute_mask[event_source]);
+               vag_power_off(component, event_source);
+               break;
+       case SND_SOC_DAPM_POST_PMD:
+               restore_output(component, mute_mask[event_source],
+                              sgtl5000->mute_state[event_source]);
                break;
        default:
                break;
@@ -200,6 +325,41 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
+/*
+ * Mute Headphone when power it up/down.
+ * Control VAG power on HP power path.
+ */
+static int headphone_pga_event(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_component *component =
+               snd_soc_dapm_to_component(w->dapm);
+
+       return vag_and_mute_control(component, event, HP_POWER_EVENT);
+}
+
+/* As manual describes, ADC/DAC powering up/down requires
+ * to mute outputs to avoid pops.
+ * Control VAG power on ADC/DAC power path.
+ */
+static int adc_updown_depop(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_component *component =
+               snd_soc_dapm_to_component(w->dapm);
+
+       return vag_and_mute_control(component, event, ADC_POWER_EVENT);
+}
+
+static int dac_updown_depop(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_component *component =
+               snd_soc_dapm_to_component(w->dapm);
+
+       return vag_and_mute_control(component, event, DAC_POWER_EVENT);
+}
+
 /* input sources for ADC */
 static const char *adc_mux_text[] = {
        "MIC_IN", "LINE_IN"
@@ -272,7 +432,10 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
                            mic_bias_event,
                            SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
-       SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
+       SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0,
+                          headphone_pga_event,
+                          SND_SOC_DAPM_PRE_POST_PMU |
+                          SND_SOC_DAPM_PRE_POST_PMD),
        SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
 
        SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
@@ -293,11 +456,12 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
                                0, SGTL5000_CHIP_DIG_POWER,
                                1, 0),
 
-       SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
-       SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
-
-       SND_SOC_DAPM_PRE("VAG_POWER_PRE", power_vag_event),
-       SND_SOC_DAPM_POST("VAG_POWER_POST", power_vag_event),
+       SND_SOC_DAPM_ADC_E("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0,
+                          adc_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
+                          SND_SOC_DAPM_PRE_POST_PMD),
+       SND_SOC_DAPM_DAC_E("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0,
+                          dac_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
+                          SND_SOC_DAPM_PRE_POST_PMD),
 };
 
 /* routes for sgtl5000 */
@@ -1165,12 +1329,17 @@ static int sgtl5000_set_power_regs(struct snd_soc_component *component)
                                        SGTL5000_INT_OSC_EN);
                /* Enable VDDC charge pump */
                ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
-       } else if (vddio >= 3100 && vdda >= 3100) {
+       } else {
                ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
-               /* VDDC use VDDIO rail */
-               lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
-               lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
-                           SGTL5000_VDDC_MAN_ASSN_SHIFT;
+               /*
+                * if vddio == vdda the source of charge pump should be
+                * assigned manually to VDDIO
+                */
+               if (vddio == vdda) {
+                       lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
+                       lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
+                                   SGTL5000_VDDC_MAN_ASSN_SHIFT;
+               }
        }
 
        snd_soc_component_write(component, SGTL5000_CHIP_LINREG_CTRL, lreg_ctrl);
@@ -1280,6 +1449,7 @@ static int sgtl5000_probe(struct snd_soc_component *component)
        int ret;
        u16 reg;
        struct sgtl5000_priv *sgtl5000 = snd_soc_component_get_drvdata(component);
+       unsigned int zcd_mask = SGTL5000_HP_ZCD_EN | SGTL5000_ADC_ZCD_EN;
 
        /* power up sgtl5000 */
        ret = sgtl5000_set_power_regs(component);
@@ -1305,9 +1475,8 @@ static int sgtl5000_probe(struct snd_soc_component *component)
        reg = ((sgtl5000->lrclk_strength) << SGTL5000_PAD_I2S_LRCLK_SHIFT | 0x5f);
        snd_soc_component_write(component, SGTL5000_CHIP_PAD_STRENGTH, reg);
 
-       snd_soc_component_write(component, SGTL5000_CHIP_ANA_CTRL,
-                       SGTL5000_HP_ZCD_EN |
-                       SGTL5000_ADC_ZCD_EN);
+       snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
+               zcd_mask, zcd_mask);
 
        snd_soc_component_update_bits(component, SGTL5000_CHIP_MIC_CTRL,
                        SGTL5000_BIAS_R_MASK,
index 608ad49ad978760cbf9204d4018a2d6a89addcad..e8a3e720ef5dc5a37be2a6ff5e37184de7988c44 100644 (file)
@@ -1506,7 +1506,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
        aic31xx->gpio_reset = devm_gpiod_get_optional(aic31xx->dev, "reset",
                                                      GPIOD_OUT_LOW);
        if (IS_ERR(aic31xx->gpio_reset)) {
-               dev_err(aic31xx->dev, "not able to acquire gpio\n");
+               if (PTR_ERR(aic31xx->gpio_reset) != -EPROBE_DEFER)
+                       dev_err(aic31xx->dev, "not able to acquire gpio\n");
                return PTR_ERR(aic31xx->gpio_reset);
        }
 
@@ -1517,7 +1518,9 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
                                      ARRAY_SIZE(aic31xx->supplies),
                                      aic31xx->supplies);
        if (ret) {
-               dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(aic31xx->dev,
+                               "Failed to request supplies: %d\n", ret);
                return ret;
        }
 
index 0a648229e643052d48fc3be9c716e0da5401311c..d83be26d64467d947b4f8d23cd08002708cc7168 100644 (file)
@@ -799,15 +799,6 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
        u32 wl = SSI_SxCCR_WL(sample_size);
        int ret;
 
-       /*
-        * SSI is properly configured if it is enabled and running in
-        * the synchronous mode; Note that AC97 mode is an exception
-        * that should set separate configurations for STCCR and SRCCR
-        * despite running in the synchronous mode.
-        */
-       if (ssi->streams && ssi->synchronous)
-               return 0;
-
        if (fsl_ssi_is_i2s_master(ssi)) {
                ret = fsl_ssi_set_bclk(substream, dai, hw_params);
                if (ret)
@@ -823,6 +814,15 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
                }
        }
 
+       /*
+        * SSI is properly configured if it is enabled and running in
+        * the synchronous mode; Note that AC97 mode is an exception
+        * that should set separate configurations for STCCR and SRCCR
+        * despite running in the synchronous mode.
+        */
+       if (ssi->streams && ssi->synchronous)
+               return 0;
+
        if (!fsl_ssi_is_ac97(ssi)) {
                /*
                 * Keep the ssi->i2s_net intact while having a local variable
@@ -1439,8 +1439,10 @@ static int fsl_ssi_probe_from_dt(struct fsl_ssi *ssi)
         * different name to register the device.
         */
        if (!ssi->card_name[0] && of_get_property(np, "codec-handle", NULL)) {
-               sprop = of_get_property(of_find_node_by_path("/"),
-                                       "compatible", NULL);
+               struct device_node *root = of_find_node_by_path("/");
+
+               sprop = of_get_property(root, "compatible", NULL);
+               of_node_put(root);
                /* Strip "fsl," in the compatible name if applicable */
                p = strrchr(sprop, ',');
                if (p)
index 08a5152e635ac8b8c38ae9214173c66cd8bae445..e7620017e72562a4e7bcb64a392a9e5fac4917b1 100644 (file)
@@ -42,6 +42,7 @@ struct cht_mc_private {
        struct clk *mclk;
        struct snd_soc_jack jack;
        bool ts3a227e_present;
+       int quirks;
 };
 
 static int platform_clock_control(struct snd_soc_dapm_widget *w,
@@ -53,6 +54,10 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
        struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
        int ret;
 
+       /* See the comment in snd_cht_mc_probe() */
+       if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+               return 0;
+
        codec_dai = snd_soc_card_get_codec_dai(card, CHT_CODEC_DAI);
        if (!codec_dai) {
                dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n");
@@ -222,6 +227,10 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
                        "jack detection gpios not added, error %d\n", ret);
        }
 
+       /* See the comment in snd_cht_mc_probe() */
+       if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+               return 0;
+
        /*
         * The firmware might enable the clock at
         * boot (this information may or may not
@@ -420,16 +429,15 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        int ret_val = 0;
        struct cht_mc_private *drv;
        const char *mclk_name;
-       int quirks = 0;
-
-       dmi_id = dmi_first_match(cht_max98090_quirk_table);
-       if (dmi_id)
-               quirks = (unsigned long)dmi_id->driver_data;
 
        drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
        if (!drv)
                return -ENOMEM;
 
+       dmi_id = dmi_first_match(cht_max98090_quirk_table);
+       if (dmi_id)
+               drv->quirks = (unsigned long)dmi_id->driver_data;
+
        drv->ts3a227e_present = acpi_dev_found("104C227E");
        if (!drv->ts3a227e_present) {
                /* no need probe TI jack detection chip */
@@ -446,7 +454,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        snd_soc_card_cht.dev = &pdev->dev;
        snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
 
-       if (quirks & QUIRK_PMC_PLT_CLK_0)
+       if (drv->quirks & QUIRK_PMC_PLT_CLK_0)
                mclk_name = "pmc_plt_clk_0";
        else
                mclk_name = "pmc_plt_clk_3";
@@ -459,6 +467,21 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
                return PTR_ERR(drv->mclk);
        }
 
+       /*
+        * Boards which have the MAX98090's clk connected to clk_0 do not seem
+        * to like it if we muck with the clock. If we disable the clock when
+        * it is unused we get "max98090 i2c-193C9890:00: PLL unlocked" errors
+        * and the PLL never seems to lock again.
+        * So for these boards we enable it here once and leave it at that.
+        */
+       if (drv->quirks & QUIRK_PMC_PLT_CLK_0) {
+               ret_val = clk_prepare_enable(drv->mclk);
+               if (ret_val < 0) {
+                       dev_err(&pdev->dev, "MCLK enable error: %d\n", ret_val);
+                       return ret_val;
+               }
+       }
+
        ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
        if (ret_val) {
                dev_err(&pdev->dev,
@@ -469,11 +492,23 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        return ret_val;
 }
 
+static int snd_cht_mc_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+       struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+       if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+               clk_disable_unprepare(ctx->mclk);
+
+       return 0;
+}
+
 static struct platform_driver snd_cht_mc_driver = {
        .driver = {
                .name = "cht-bsw-max98090",
        },
        .probe = snd_cht_mc_probe,
+       .remove = snd_cht_mc_remove,
 };
 
 module_platform_driver(snd_cht_mc_driver)
index dcff13802c007ece2a87b6bd695c6362613dc67b..771734fd770767c0a468fe69663d348db3a92347 100644 (file)
@@ -231,6 +231,8 @@ struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
 
        if (ipc->ops.reply_msg_match != NULL)
                header = ipc->ops.reply_msg_match(header, &mask);
+       else
+               mask = (u64)-1;
 
        if (list_empty(&ipc->rx_list)) {
                dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n",
index 5d7ac2ee7a3c7edc6f6b6dc48b65395f8aedfbb2..faf1cba57abbbf7c0cb400acbbb5dd6251259a9a 100644 (file)
@@ -196,7 +196,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
        memset(d->fw_read_buff, 0, FW_REG_BUF);
 
        if (w0_stat_sz > 0)
-               __iowrite32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
+               __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
 
        for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
                ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
index 01a050cf877537ddab712863e50fe1b523db98de..3cef2ebfd8be563c3d7ad8a961959edb839b63ee 100644 (file)
@@ -231,7 +231,7 @@ int skl_nhlt_update_topology_bin(struct skl *skl)
        struct hdac_bus *bus = skl_to_bus(skl);
        struct device *dev = bus->dev;
 
-       dev_dbg(dev, "oem_id %.6s, oem_table_id %8s oem_revision %d\n",
+       dev_dbg(dev, "oem_id %.6s, oem_table_id %.8s oem_revision %d\n",
                nhlt->header.oem_id, nhlt->header.oem_table_id,
                nhlt->header.oem_revision);
 
index e578b6f40a07b778ccd16486d7dc9882177e25d5..5774ce0916d403a4fb59116a05c661dd7343198b 100644 (file)
@@ -40,7 +40,7 @@ struct axg_tdm_iface {
 
 static inline bool axg_tdm_lrclk_invert(unsigned int fmt)
 {
-       return (fmt & SND_SOC_DAIFMT_I2S) ^
+       return ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S) ^
                !!(fmt & (SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_NB_IF));
 }
 
index 60d43d53a8f5e8fb00166085531eba66aaef1b79..11399f81c92f973b81cd6c2f037bda7143318d30 100644 (file)
@@ -329,7 +329,6 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
                val |= I2S_CHN_4;
                break;
        case 2:
-       case 1:
                val |= I2S_CHN_2;
                break;
        default:
@@ -462,7 +461,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
        },
        .capture = {
                .stream_name = "Capture",
-               .channels_min = 1,
+               .channels_min = 2,
                .channels_max = 2,
                .rates = SNDRV_PCM_RATE_8000_192000,
                .formats = (SNDRV_PCM_FMTBIT_S8 |
@@ -662,7 +661,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
        }
 
        if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
-               if (val >= 1 && val <= 8)
+               if (val >= 2 && val <= 8)
                        soc_dai->capture.channels_max = val;
        }
 
index 051f96405346b2f495c94ebc409656c98634a3c8..549a137878a657dcfe6505d7e8c5693bc75fdc28 100644 (file)
@@ -30,6 +30,7 @@ struct rsnd_adg {
        struct clk *clkout[CLKOUTMAX];
        struct clk_onecell_data onecell;
        struct rsnd_mod mod;
+       int clk_rate[CLKMAX];
        u32 flags;
        u32 ckr;
        u32 rbga;
@@ -113,9 +114,9 @@ static void __rsnd_adg_get_timesel_ratio(struct rsnd_priv *priv,
        unsigned int val, en;
        unsigned int min, diff;
        unsigned int sel_rate[] = {
-               clk_get_rate(adg->clk[CLKA]),   /* 0000: CLKA */
-               clk_get_rate(adg->clk[CLKB]),   /* 0001: CLKB */
-               clk_get_rate(adg->clk[CLKC]),   /* 0010: CLKC */
+               adg->clk_rate[CLKA],    /* 0000: CLKA */
+               adg->clk_rate[CLKB],    /* 0001: CLKB */
+               adg->clk_rate[CLKC],    /* 0010: CLKC */
                adg->rbga_rate_for_441khz,      /* 0011: RBGA */
                adg->rbgb_rate_for_48khz,       /* 0100: RBGB */
        };
@@ -331,7 +332,7 @@ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
         * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
         */
        for_each_rsnd_clk(clk, adg, i) {
-               if (rate == clk_get_rate(clk))
+               if (rate == adg->clk_rate[i])
                        return sel_table[i];
        }
 
@@ -398,10 +399,18 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
 
        for_each_rsnd_clk(clk, adg, i) {
                ret = 0;
-               if (enable)
+               if (enable) {
                        ret = clk_prepare_enable(clk);
-               else
+
+                       /*
+                        * We shouldn't use clk_get_rate() under
+                        * atomic context. Let's keep it when
+                        * rsnd_adg_clk_enable() was called
+                        */
+                       adg->clk_rate[i] = clk_get_rate(adg->clk[i]);
+               } else {
                        clk_disable_unprepare(clk);
+               }
 
                if (ret < 0)
                        dev_warn(dev, "can't use clk %d\n", i);
index d23c2bbff0cf4e666dcf5d3a4b26fccaf22a05ba..15a31820df1696378730b707267d6492acfe5c75 100644 (file)
@@ -674,6 +674,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        }
 
        /* set format */
+       rdai->bit_clk_inv = 0;
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
                rdai->sys_delay = 0;
index 2257b1b0151c41b9acd6987c53b1a5bc15649fe9..4ce57510b6236cf6398ed0c5829c651e2c8b24f6 100644 (file)
@@ -1145,8 +1145,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
                list_add_tail(&widget->work_list, list);
 
        if (custom_stop_condition && custom_stop_condition(widget, dir)) {
-               widget->endpoints[dir] = 1;
-               return widget->endpoints[dir];
+               list = NULL;
+               custom_stop_condition = NULL;
        }
 
        if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
@@ -1183,8 +1183,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
  *
  * Optionally, can be supplied with a function acting as a stopping condition.
  * This function takes the dapm widget currently being examined and the walk
- * direction as an arguments, it should return true if the walk should be
- * stopped and false otherwise.
+ * direction as an arguments, it should return true if widgets from that point
+ * in the graph onwards should not be added to the widget list.
  */
 static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
        struct list_head *list,
@@ -2139,23 +2139,25 @@ void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm,
 {
        struct dentry *d;
 
-       if (!parent)
+       if (!parent || IS_ERR(parent))
                return;
 
        dapm->debugfs_dapm = debugfs_create_dir("dapm", parent);
 
-       if (!dapm->debugfs_dapm) {
+       if (IS_ERR(dapm->debugfs_dapm)) {
                dev_warn(dapm->dev,
-                      "ASoC: Failed to create DAPM debugfs directory\n");
+                        "ASoC: Failed to create DAPM debugfs directory %ld\n",
+                        PTR_ERR(dapm->debugfs_dapm));
                return;
        }
 
        d = debugfs_create_file("bias_level", 0444,
                                dapm->debugfs_dapm, dapm,
                                &dapm_bias_fops);
-       if (!d)
+       if (IS_ERR(d))
                dev_warn(dapm->dev,
-                        "ASoC: Failed to create bias level debugfs file\n");
+                        "ASoC: Failed to create bias level debugfs file: %ld\n",
+                        PTR_ERR(d));
 }
 
 static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w)
@@ -2169,10 +2171,10 @@ static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w)
        d = debugfs_create_file(w->name, 0444,
                                dapm->debugfs_dapm, w,
                                &dapm_widget_power_fops);
-       if (!d)
+       if (IS_ERR(d))
                dev_warn(w->dapm->dev,
-                       "ASoC: Failed to create %s debugfs file\n",
-                       w->name);
+                        "ASoC: Failed to create %s debugfs file: %ld\n",
+                        w->name, PTR_ERR(d));
 }
 
 static void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm)
index 30e791a533528042ef5ab42f79064eced53fd681..232df04ca58666a99894b9b75bc3c4daeeb89bdf 100644 (file)
@@ -313,6 +313,12 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
 
                if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
                        pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
+
+               if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
+                       strncpy(rtd->pcm->streams[i].pcm->name,
+                               rtd->pcm->streams[i].pcm->id,
+                               sizeof(rtd->pcm->streams[i].pcm->name));
+               }
        }
 
        return 0;
index edb7c5290f94369d3a3c2ea200fdb3bff25b6c1b..3cfd7fc8bc8a54ab04dc06b186c7abdc12321156 100644 (file)
@@ -1093,7 +1093,53 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
        return 0;
 }
 
-static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int soc_pcm_trigger_start(struct snd_pcm_substream *substream, int cmd)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_component *component;
+       struct snd_soc_rtdcom_list *rtdcom;
+       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+       struct snd_soc_dai *codec_dai;
+       int i, ret;
+
+       if (rtd->dai_link->ops->trigger) {
+               ret = rtd->dai_link->ops->trigger(substream, cmd);
+               if (ret < 0)
+                       return ret;
+       }
+
+       for_each_rtdcom(rtd, rtdcom) {
+               component = rtdcom->component;
+
+               if (!component->driver->ops ||
+                   !component->driver->ops->trigger)
+                       continue;
+
+               ret = component->driver->ops->trigger(substream, cmd);
+               if (ret < 0)
+                       return ret;
+       }
+
+       if (cpu_dai->driver->ops->trigger) {
+               ret = cpu_dai->driver->ops->trigger(substream, cmd, cpu_dai);
+               if (ret < 0)
+                       return ret;
+       }
+
+       for (i = 0; i < rtd->num_codecs; i++) {
+               codec_dai = rtd->codec_dais[i];
+               if (codec_dai->driver->ops->trigger) {
+                       ret = codec_dai->driver->ops->trigger(substream,
+                                                             cmd, codec_dai);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int soc_pcm_trigger_stop(struct snd_pcm_substream *substream, int cmd)
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct snd_soc_component *component;
@@ -1112,6 +1158,12 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                }
        }
 
+       if (cpu_dai->driver->ops->trigger) {
+               ret = cpu_dai->driver->ops->trigger(substream, cmd, cpu_dai);
+               if (ret < 0)
+                       return ret;
+       }
+
        for_each_rtdcom(rtd, rtdcom) {
                component = rtdcom->component;
 
@@ -1124,12 +1176,6 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                        return ret;
        }
 
-       if (cpu_dai->driver->ops->trigger) {
-               ret = cpu_dai->driver->ops->trigger(substream, cmd, cpu_dai);
-               if (ret < 0)
-                       return ret;
-       }
-
        if (rtd->dai_link->ops->trigger) {
                ret = rtd->dai_link->ops->trigger(substream, cmd);
                if (ret < 0)
@@ -1139,6 +1185,28 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        return 0;
 }
 
+static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+       int ret;
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+       case SNDRV_PCM_TRIGGER_RESUME:
+       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+               ret = soc_pcm_trigger_start(substream, cmd);
+               break;
+       case SNDRV_PCM_TRIGGER_STOP:
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               ret = soc_pcm_trigger_stop(substream, cmd);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
 static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
                                   int cmd)
 {
index 6173dd86c62ce42de928407b9fa91066691cd78d..18cf8404d27ca351c7225b5b1e210d07c28576db 100644 (file)
@@ -223,10 +223,11 @@ static const struct sun4i_i2s_clk_div sun4i_i2s_mclk_div[] = {
 };
 
 static int sun4i_i2s_get_bclk_div(struct sun4i_i2s *i2s,
-                                 unsigned int oversample_rate,
+                                 unsigned long parent_rate,
+                                 unsigned int sampling_rate,
                                  unsigned int word_size)
 {
-       int div = oversample_rate / word_size / 2;
+       int div = parent_rate / sampling_rate / word_size / 2;
        int i;
 
        for (i = 0; i < ARRAY_SIZE(sun4i_i2s_bclk_div); i++) {
@@ -316,8 +317,8 @@ static int sun4i_i2s_set_clk_rate(struct snd_soc_dai *dai,
                return -EINVAL;
        }
 
-       bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
-                                         word_size);
+       bclk_div = sun4i_i2s_get_bclk_div(i2s, i2s->mclk_freq,
+                                         rate, word_size);
        if (bclk_div < 0) {
                dev_err(dai->dev, "Unsupported BCLK divider: %d\n", bclk_div);
                return -EINVAL;
index 0fcc1c9c3ef839e8d3aec3158e624da7fbec1818..bd3d60235e1b99083968a1754a1fba44243b3e58 100644 (file)
@@ -21,6 +21,7 @@
 #define J721E_CLK_PARENT_44100 1
 
 #define J721E_MAX_CLK_HSDIV    128
+#define PCM1368A_MAX_SYSCLK    36864000
 
 #define J721E_DAI_FMT          (SND_SOC_DAIFMT_RIGHT_J | \
                                 SND_SOC_DAIFMT_NB_NF |   \
@@ -42,6 +43,7 @@ struct j721e_priv {
        struct snd_soc_card cpb_card;
        struct snd_soc_dai_link cpb_dai_links[J721E_CPD_DAI_CNT];
        struct snd_soc_codec_conf codec_conf;
+       struct snd_interval rate_range;
 
        struct j721e_audio_clocks audio_refclk2;
        struct j721e_audio_clocks cpb_mcasp;
@@ -115,6 +117,14 @@ static int j721e_configure_refclk(struct j721e_priv *priv, unsigned int rate)
        return ret;
 }
 
+static int j721e_rule_rate(struct snd_pcm_hw_params *params,
+                          struct snd_pcm_hw_rule *rule)
+{
+       struct snd_interval *t = rule->private;
+
+       return snd_interval_refine(hw_param_interval(params, rule->var), t);
+}
+
 static int j721e_audio_startup(struct snd_pcm_substream *substream)
 {
        struct  snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -128,6 +138,11 @@ static int j721e_audio_startup(struct snd_pcm_substream *substream)
                ret = snd_pcm_hw_constraint_single(substream->runtime,
                                                   SNDRV_PCM_HW_PARAM_RATE,
                                                   priv->rate);
+       else
+               ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+                                         SNDRV_PCM_HW_PARAM_RATE,
+                                         j721e_rule_rate, &priv->rate_range,
+                                         SNDRV_PCM_HW_PARAM_RATE, -1);
 
        mutex_unlock(&priv->mutex);
 
@@ -326,6 +341,25 @@ static int j721e_get_clocks(struct platform_device *pdev,
        return 0;
 }
 
+static void j721e_calculate_rate_range(struct j721e_priv *priv)
+{
+       unsigned int min_rate, max_rate, pll_rate;
+
+       pll_rate = priv->pll_rates[J721E_CLK_PARENT_44100];
+       min_rate = pll_rate / J721E_MAX_CLK_HSDIV;
+       min_rate /= ratios_for_pcm3168a[ARRAY_SIZE(ratios_for_pcm3168a) - 1];
+
+       pll_rate = priv->pll_rates[J721E_CLK_PARENT_48000];
+       if (pll_rate > PCM1368A_MAX_SYSCLK)
+               pll_rate = PCM1368A_MAX_SYSCLK;
+
+       max_rate = pll_rate / ratios_for_pcm3168a[0];
+
+       snd_interval_any(&priv->rate_range);
+       priv->rate_range.min = min_rate;
+       priv->rate_range.max = max_rate;
+}
+
 static int j721e_soc_probe(struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
@@ -422,6 +456,8 @@ static int j721e_soc_probe(struct platform_device *pdev)
        card->codec_conf = &priv->codec_conf;
        card->num_configs = 1;
 
+       j721e_calculate_rate_range(priv);
+
        snd_soc_card_set_drvdata(card, priv);
 
        mutex_init(&priv->mutex);
index ee90e6c3937ce31e0dfb025a1c627b695c35a0a3..2ae582a99b636e3637dd6f7c2200403627fadd23 100644 (file)
@@ -424,8 +424,11 @@ int uniphier_aio_dai_suspend(struct snd_soc_dai *dai)
 {
        struct uniphier_aio *aio = uniphier_priv(dai);
 
-       reset_control_assert(aio->chip->rst);
-       clk_disable_unprepare(aio->chip->clk);
+       aio->chip->num_wup_aios--;
+       if (!aio->chip->num_wup_aios) {
+               reset_control_assert(aio->chip->rst);
+               clk_disable_unprepare(aio->chip->clk);
+       }
 
        return 0;
 }
@@ -439,13 +442,15 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
        if (!aio->chip->active)
                return 0;
 
-       ret = clk_prepare_enable(aio->chip->clk);
-       if (ret)
-               return ret;
+       if (!aio->chip->num_wup_aios) {
+               ret = clk_prepare_enable(aio->chip->clk);
+               if (ret)
+                       return ret;
 
-       ret = reset_control_deassert(aio->chip->rst);
-       if (ret)
-               goto err_out_clock;
+               ret = reset_control_deassert(aio->chip->rst);
+               if (ret)
+                       goto err_out_clock;
+       }
 
        aio_iecout_set_enable(aio->chip, true);
        aio_chip_init(aio->chip);
@@ -458,7 +463,7 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
 
                ret = aio_init(sub);
                if (ret)
-                       goto err_out_clock;
+                       goto err_out_reset;
 
                if (!sub->setting)
                        continue;
@@ -466,11 +471,16 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
                aio_port_reset(sub);
                aio_src_reset(sub);
        }
+       aio->chip->num_wup_aios++;
 
        return 0;
 
+err_out_reset:
+       if (!aio->chip->num_wup_aios)
+               reset_control_assert(aio->chip->rst);
 err_out_clock:
-       clk_disable_unprepare(aio->chip->clk);
+       if (!aio->chip->num_wup_aios)
+               clk_disable_unprepare(aio->chip->clk);
 
        return ret;
 }
@@ -619,6 +629,7 @@ int uniphier_aio_probe(struct platform_device *pdev)
                return PTR_ERR(chip->rst);
 
        chip->num_aios = chip->chip_spec->num_dais;
+       chip->num_wup_aios = chip->num_aios;
        chip->aios = devm_kcalloc(dev,
                                  chip->num_aios, sizeof(struct uniphier_aio),
                                  GFP_KERNEL);
index ca6ccbae0ee8c2f6f89437ed1d9c18c8986c6607..a7ff7e556429ba15345f54bfc94f71a1ef549f66 100644 (file)
@@ -285,6 +285,7 @@ struct uniphier_aio_chip {
 
        struct uniphier_aio *aios;
        int num_aios;
+       int num_wup_aios;
        struct uniphier_aio_pll *plls;
        int num_plls;
 
index 40ad000c2e3ca88f1e028a6c0acf84094b05c1a2..dd64c4b19f23935423c15f3151bb2bf84e1e9550 100644 (file)
@@ -280,7 +280,8 @@ retry:
                                goto retry;
                        }
                        spin_unlock(&sound_loader_lock);
-                       return -EBUSY;
+                       r = -EBUSY;
+                       goto fail;
                }
        }
 
index e1fbb9cc9ea7679f93da3b683833678826d54008..a197fc3b9ab08197f1d8344a2f32d9dc47320f9b 100644 (file)
@@ -604,14 +604,13 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
                ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP,
                                    hiface_pcm_out_urb_handler);
                if (ret < 0)
-                       return ret;
+                       goto error;
        }
 
        ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm);
        if (ret < 0) {
-               kfree(rt);
                dev_err(&chip->dev->dev, "Cannot create pcm instance\n");
-               return ret;
+               goto error;
        }
 
        pcm->private_data = rt;
@@ -624,4 +623,10 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
 
        chip->pcm = rt;
        return 0;
+
+error:
+       for (i = 0; i < PCM_N_URBS; i++)
+               kfree(rt->out_urbs[i].buffer);
+       kfree(rt);
+       return ret;
 }
index 78c2d6cab3b52f7341e7f02ee1e7e0d502f8679b..531564269444e9ff9f148eddd6ad466b9dde4c70 100644 (file)
@@ -554,6 +554,15 @@ int line6_init_pcm(struct usb_line6 *line6,
        line6pcm->volume_monitor = 255;
        line6pcm->line6 = line6;
 
+       spin_lock_init(&line6pcm->out.lock);
+       spin_lock_init(&line6pcm->in.lock);
+       line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
+
+       line6->line6pcm = line6pcm;
+
+       pcm->private_data = line6pcm;
+       pcm->private_free = line6_cleanup_pcm;
+
        line6pcm->max_packet_size_in =
                usb_maxpacket(line6->usbdev,
                        usb_rcvisocpipe(line6->usbdev, ep_read), 0);
@@ -566,15 +575,6 @@ int line6_init_pcm(struct usb_line6 *line6,
                return -EINVAL;
        }
 
-       spin_lock_init(&line6pcm->out.lock);
-       spin_lock_init(&line6pcm->in.lock);
-       line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
-
-       line6->line6pcm = line6pcm;
-
-       pcm->private_data = line6pcm;
-       pcm->private_free = line6_cleanup_pcm;
-
        err = line6_create_audio_out_urbs(line6pcm);
        if (err < 0)
                return err;
index 5f3c87264e66776049f436b45ce7e44f368db4e8..da627b015b32b9c9e14a5ab6c615dc46b53b062d 100644 (file)
@@ -417,7 +417,7 @@ static const struct line6_properties podhd_properties_table[] = {
                .name = "POD HD500",
                .capabilities   = LINE6_CAP_PCM
                                | LINE6_CAP_HWMON,
-               .altsetting = 1,
+               .altsetting = 0,
                .ep_ctrl_r = 0x81,
                .ep_ctrl_w = 0x01,
                .ep_audio_r = 0x86,
index 7e1c6c2dc99e80096c006bf3861d8a01cd3a9d30..b0c5d4ef613740c0cc015e2cc96f9f9a8b723b6d 100644 (file)
@@ -83,6 +83,7 @@ struct mixer_build {
        unsigned char *buffer;
        unsigned int buflen;
        DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
+       DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
        struct usb_audio_term oterm;
        const struct usbmix_name_map *map;
        const struct usbmix_selector_map *selector_map;
@@ -753,12 +754,13 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
                                       struct uac_mixer_unit_descriptor *desc)
 {
        int mu_channels;
-       void *c;
 
        if (desc->bLength < sizeof(*desc))
                return -EINVAL;
        if (!desc->bNrInPins)
                return -EINVAL;
+       if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
+               return -EINVAL;
 
        switch (state->mixer->protocol) {
        case UAC_VERSION_1:
@@ -774,13 +776,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
                break;
        }
 
-       if (!mu_channels)
-               return 0;
-
-       c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
-       if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
-               return 0; /* no bmControls -> skip */
-
        return mu_channels;
 }
 
@@ -788,16 +783,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
  * parse the source unit recursively until it reaches to a terminal
  * or a branched unit.
  */
-static int check_input_term(struct mixer_build *state, int id,
+static int __check_input_term(struct mixer_build *state, int id,
                            struct usb_audio_term *term)
 {
        int protocol = state->mixer->protocol;
        int err;
        void *p1;
+       unsigned char *hdr;
 
        memset(term, 0, sizeof(*term));
-       while ((p1 = find_audio_control_unit(state, id)) != NULL) {
-               unsigned char *hdr = p1;
+       for (;;) {
+               /* a loop in the terminal chain? */
+               if (test_and_set_bit(id, state->termbitmap))
+                       return -EINVAL;
+
+               p1 = find_audio_control_unit(state, id);
+               if (!p1)
+                       break;
+
+               hdr = p1;
                term->id = id;
 
                if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
@@ -815,7 +819,7 @@ static int check_input_term(struct mixer_build *state, int id,
 
                                        /* call recursively to verify that the
                                         * referenced clock entity is valid */
-                                       err = check_input_term(state, d->bCSourceID, term);
+                                       err = __check_input_term(state, d->bCSourceID, term);
                                        if (err < 0)
                                                return err;
 
@@ -849,7 +853,7 @@ static int check_input_term(struct mixer_build *state, int id,
                        case UAC2_CLOCK_SELECTOR: {
                                struct uac_selector_unit_descriptor *d = p1;
                                /* call recursively to retrieve the channel info */
-                               err = check_input_term(state, d->baSourceID[0], term);
+                               err = __check_input_term(state, d->baSourceID[0], term);
                                if (err < 0)
                                        return err;
                                term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -912,7 +916,7 @@ static int check_input_term(struct mixer_build *state, int id,
 
                                /* call recursively to verify that the
                                 * referenced clock entity is valid */
-                               err = check_input_term(state, d->bCSourceID, term);
+                               err = __check_input_term(state, d->bCSourceID, term);
                                if (err < 0)
                                        return err;
 
@@ -963,7 +967,7 @@ static int check_input_term(struct mixer_build *state, int id,
                        case UAC3_CLOCK_SELECTOR: {
                                struct uac_selector_unit_descriptor *d = p1;
                                /* call recursively to retrieve the channel info */
-                               err = check_input_term(state, d->baSourceID[0], term);
+                               err = __check_input_term(state, d->baSourceID[0], term);
                                if (err < 0)
                                        return err;
                                term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -979,7 +983,7 @@ static int check_input_term(struct mixer_build *state, int id,
                                        return -EINVAL;
 
                                /* call recursively to retrieve the channel info */
-                               err = check_input_term(state, d->baSourceID[0], term);
+                               err = __check_input_term(state, d->baSourceID[0], term);
                                if (err < 0)
                                        return err;
 
@@ -997,6 +1001,15 @@ static int check_input_term(struct mixer_build *state, int id,
        return -ENODEV;
 }
 
+
+static int check_input_term(struct mixer_build *state, int id,
+                           struct usb_audio_term *term)
+{
+       memset(term, 0, sizeof(*term));
+       memset(state->termbitmap, 0, sizeof(state->termbitmap));
+       return __check_input_term(state, id, term);
+}
+
 /*
  * Feature Unit
  */
@@ -2007,6 +2020,31 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
  * Mixer Unit
  */
 
+/* check whether the given in/out overflows bmMixerControls matrix */
+static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc,
+                                 int protocol, int num_ins, int num_outs)
+{
+       u8 *hdr = (u8 *)desc;
+       u8 *c = uac_mixer_unit_bmControls(desc, protocol);
+       size_t rest; /* remaining bytes after bmMixerControls */
+
+       switch (protocol) {
+       case UAC_VERSION_1:
+       default:
+               rest = 1; /* iMixer */
+               break;
+       case UAC_VERSION_2:
+               rest = 2; /* bmControls + iMixer */
+               break;
+       case UAC_VERSION_3:
+               rest = 6; /* bmControls + wMixerDescrStr */
+               break;
+       }
+
+       /* overflow? */
+       return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0];
+}
+
 /*
  * build a mixer unit control
  *
@@ -2135,6 +2173,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
                if (err < 0)
                        return err;
                num_ins += iterm.channels;
+               if (mixer_bitmap_overflow(desc, state->mixer->protocol,
+                                         num_ins, num_outs))
+                       break;
                for (; ich < num_ins; ich++) {
                        int och, ich_has_controls = 0;
 
index 5b342fe30c7516b4053035471130e90ab6f96e1e..10c6971cf4772c931272f37cc165573457f24ea8 100644 (file)
@@ -1167,17 +1167,17 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
 {
        struct usb_mixer_interface *mixer;
        struct usb_mixer_elem_info *cval;
-       int unitid = 12; /* SamleRate ExtensionUnit ID */
+       int unitid = 12; /* SampleRate ExtensionUnit ID */
 
        list_for_each_entry(mixer, &chip->mixer_list, list) {
-               cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
-               if (cval) {
+               if (mixer->id_elems[unitid]) {
+                       cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
                        snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
                                                    cval->control << 8,
                                                    samplerate_id);
                        snd_usb_mixer_notify_id(mixer, unitid);
+                       break;
                }
-               break;
        }
 }
 
index db114f3977e0fb4044e88c028f9dea9bad6382af..1828225ba882a20921028462f0c83276fd1fcd27 100644 (file)
@@ -350,10 +350,14 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
                ep = 0x81;
                ifnum = 2;
                goto add_sync_ep_from_ifnum;
+       case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
        case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
                ep = 0x81;
                ifnum = 1;
                goto add_sync_ep_from_ifnum;
+       case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
+               /* BOSS Katana amplifiers do not need quirks */
+               return 0;
        }
 
        if (attr == USB_ENDPOINT_SYNC_ASYNC &&
@@ -463,6 +467,7 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
        }
        ep = get_endpoint(alts, 1)->bEndpointAddress;
        if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+           get_endpoint(alts, 0)->bSynchAddress != 0 &&
            ((is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
             (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
                dev_err(&dev->dev,
index d71e01954975e494e504d6c7c23d6f8045f20058..60d00091f64b20cfae2b9c2146b962b295e034b7 100644 (file)
@@ -1449,6 +1449,8 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        case 0x152a:  /* Thesycon devices */
        case 0x25ce:  /* Mytek devices */
        case 0x2ab6:  /* T+A devices */
+       case 0x3842:  /* EVGA */
+       case 0xc502:  /* HiBy devices */
                if (fp->dsd_raw)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
index d9e3de495c163a3bbca99164fe25b047c055d882..bc582202bd10199cf465324d84f3a1a132019cb2 100644 (file)
@@ -1053,6 +1053,7 @@ found_clock:
 
                pd = kzalloc(sizeof(*pd), GFP_KERNEL);
                if (!pd) {
+                       kfree(fp->chmap);
                        kfree(fp->rate_table);
                        kfree(fp);
                        return NULL;
index 51cfb993cca11e9f7041106bd480943f6141390e..29e8af8300d1b465985b95a0c3274c6c15bc916d 100644 (file)
@@ -34,7 +34,7 @@ CONFIG_DRM_CDNS_MHDP=m
 CONFIG_DRM_CDNS_DSI=n
 
 CONFIG_PHY_J721E_WIZ=y
-CONFIG_PHY_CADENCE_DP=y
+CONFIG_PHY_CADENCE_TORRENT=y
 
 # SGX driver needs legacy support
 CONFIG_DRM_LEGACY=y
index df3c24c36bfeb250ed52be6088d085df0c9a8537..bdeed43c7b38341f8b7169a1fff506255492dca8 100644 (file)
@@ -129,6 +129,7 @@ CONFIG_PCI_MSI=y
 CONFIG_PCI_ENDPOINT=y
 CONFIG_PCI_ENDPOINT_CONFIGFS=y
 CONFIG_PCI_EPF_TEST=y
+CONFIG_PCI_EPF_NTB=y
 CONFIG_PCI_ENDPOINT_TEST=m
 CONFIG_PCI_DRA7XX=y
 CONFIG_PCI_DRA7XX_HOST=y
@@ -140,6 +141,12 @@ CONFIG_PCIE_CADENCE=y
 CONFIG_PCIE_CADENCE_HOST=y
 CONFIG_PCIE_CADENCE_EP=y
 
+#NTB
+CONFIG_NTB=m
+CONFIG_NTB_EPF=m
+CONFIG_NTB_TRANSPORT=m
+CONFIG_NTB_NETDEV=m
+
 #NVME
 CONFIG_NVME_CORE=m
 CONFIG_BLK_DEV_NVME=m
index ce30f791786f531f81d093b825eb7c158939034d..8ef9caa40008aa583605a2ba0bff7342db80eb98 100644 (file)
@@ -156,3 +156,6 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y
 
 # TI PAT
 CONFIG_TI_PAT=y
+
+# Watchdog support
+CONFIG_K3_RTI_WATCHDOG=m
index fcaf00621102f7df85bd8c3bb58a74770bb18478..be7aebff0c1e595b901ec1b5fa28dc0587eb41a2 100644 (file)
@@ -238,7 +238,7 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
 
        fd = get_fd_by_id(id);
        if (fd < 0) {
-               p_err("can't get prog by id (%u): %s", id, strerror(errno));
+               p_err("can't open object by id (%u): %s", id, strerror(errno));
                return -1;
        }
 
index 87439320ef70e1ce5f17d437ff1d882897ccab78..73d7252729fad65bb2cdbda96ad625156bfd516d 100644 (file)
@@ -10,6 +10,8 @@
  * Licensed under the GNU General Public License, version 2.0 (GPLv2)
  */
 
+#define _GNU_SOURCE
+#include <stdio.h>
 #include <stdarg.h>
 #include <stdint.h>
 #include <stdio.h>
@@ -51,11 +53,13 @@ static int fprintf_json(void *out, const char *fmt, ...)
        char *s;
 
        va_start(ap, fmt);
+       if (vasprintf(&s, fmt, ap) < 0)
+               return -1;
+       va_end(ap);
+
        if (!oper_count) {
                int i;
 
-               s = va_arg(ap, char *);
-
                /* Strip trailing spaces */
                i = strlen(s) - 1;
                while (s[i] == ' ')
@@ -68,11 +72,10 @@ static int fprintf_json(void *out, const char *fmt, ...)
        } else if (!strcmp(fmt, ",")) {
                   /* Skip */
        } else {
-               s = va_arg(ap, char *);
                jsonw_string(json_wtr, s);
                oper_count++;
        }
-       va_end(ap);
+       free(s);
        return 0;
 }
 
index bbba0d61570fedbbdcdb09881717ccf79611a165..4f9611af46422ce783c0c36b18aede179d5f334d 100644 (file)
@@ -381,7 +381,9 @@ static int do_show(int argc, char **argv)
                if (fd < 0)
                        return -1;
 
-               return show_prog(fd);
+               err = show_prog(fd);
+               close(fd);
+               return err;
        }
 
        if (argc)
index d7e06fe0270eefb29343b82ce4971765c2a981d5..ef8a82f29f02485a66cdb4247f4a059f2bb2ec65 100644 (file)
@@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
        int sn_offset = 0;
        int error = 0;
        char *buffer;
-       struct hv_kvp_ipaddr_value *ip_buffer;
+       struct hv_kvp_ipaddr_value *ip_buffer = NULL;
        char cidr_mask[5]; /* /xyz */
        int weight;
        int i;
@@ -1386,6 +1386,8 @@ int main(int argc, char *argv[])
                        daemonize = 0;
                        break;
                case 'h':
+                       print_usage(argv);
+                       exit(0);
                default:
                        print_usage(argv);
                        exit(EXIT_FAILURE);
index b1330017276236b91ca2a5be84a3ce48dc07e4f9..c2bb8a360177724659e3d66619a9a21707b96602 100644 (file)
@@ -229,6 +229,8 @@ int main(int argc, char *argv[])
                        daemonize = 0;
                        break;
                case 'h':
+                       print_usage(argv);
+                       exit(0);
                default:
                        print_usage(argv);
                        exit(EXIT_FAILURE);
index 55e7374bade0d5c581fbee36e3183b77b057858d..099f2c44dbed26e9b6fb8fa9e242fe8ed24ce825 100644 (file)
@@ -4,10 +4,10 @@
 import os
 from optparse import OptionParser
 
+help_msg = "print verbose messages. Try -vv, -vvv for  more verbose messages"
 parser = OptionParser()
-parser.add_option("-v", "--verbose", dest="verbose",
-                  help="print verbose messages. Try -vv, -vvv for \
-                       more verbose messages", action="count")
+parser.add_option(
+       "-v", "--verbose", dest="verbose", help=help_msg, action="count")
 
 (options, args) = parser.parse_args()
 
@@ -21,27 +21,28 @@ if not os.path.isdir(vmbus_sys_path):
        exit(-1)
 
 vmbus_dev_dict = {
-       '{0e0b6031-5213-4934-818b-38d90ced39db}' : '[Operating system shutdown]',
-       '{9527e630-d0ae-497b-adce-e80ab0175caf}' : '[Time Synchronization]',
-       '{57164f39-9115-4e78-ab55-382f3bd5422d}' : '[Heartbeat]',
-       '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}' : '[Data Exchange]',
-       '{35fa2e29-ea23-4236-96ae-3a6ebacba440}' : '[Backup (volume checkpoint)]',
-       '{34d14be3-dee4-41c8-9ae7-6b174977c192}' : '[Guest services]',
-       '{525074dc-8985-46e2-8057-a307dc18a502}' : '[Dynamic Memory]',
-       '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}' : 'Synthetic mouse',
-       '{f912ad6d-2b17-48ea-bd65-f927a61c7684}' : 'Synthetic keyboard',
-       '{da0a7802-e377-4aac-8e77-0558eb1073f8}' : 'Synthetic framebuffer adapter',
-       '{f8615163-df3e-46c5-913f-f2d2f965ed0e}' : 'Synthetic network adapter',
-       '{32412632-86cb-44a2-9b5c-50d1417354f5}' : 'Synthetic IDE Controller',
-       '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller',
-       '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter',
-       '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter',
-       '{44c4f61d-4444-4400-9d52-802e27ede19f}' : 'PCI Express pass-through',
-       '{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]',
-       '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]',
-       '{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]',
+       '{0e0b6031-5213-4934-818b-38d90ced39db}': '[Operating system shutdown]',
+       '{9527e630-d0ae-497b-adce-e80ab0175caf}': '[Time Synchronization]',
+       '{57164f39-9115-4e78-ab55-382f3bd5422d}': '[Heartbeat]',
+       '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}': '[Data Exchange]',
+       '{35fa2e29-ea23-4236-96ae-3a6ebacba440}': '[Backup (volume checkpoint)]',
+       '{34d14be3-dee4-41c8-9ae7-6b174977c192}': '[Guest services]',
+       '{525074dc-8985-46e2-8057-a307dc18a502}': '[Dynamic Memory]',
+       '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}': 'Synthetic mouse',
+       '{f912ad6d-2b17-48ea-bd65-f927a61c7684}': 'Synthetic keyboard',
+       '{da0a7802-e377-4aac-8e77-0558eb1073f8}': 'Synthetic framebuffer adapter',
+       '{f8615163-df3e-46c5-913f-f2d2f965ed0e}': 'Synthetic network adapter',
+       '{32412632-86cb-44a2-9b5c-50d1417354f5}': 'Synthetic IDE Controller',
+       '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}': 'Synthetic SCSI Controller',
+       '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}': 'Synthetic fiber channel adapter',
+       '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}': 'Synthetic RDMA adapter',
+       '{44c4f61d-4444-4400-9d52-802e27ede19f}': 'PCI Express pass-through',
+       '{276aacf4-ac15-426c-98dd-7521ad3f01fe}': '[Reserved system device]',
+       '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}': '[Reserved system device]',
+       '{3375baf4-9e15-4b30-b765-67acb10d607b}': '[Reserved system device]',
 }
 
+
 def get_vmbus_dev_attr(dev_name, attr):
        try:
                f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r')
@@ -52,6 +53,7 @@ def get_vmbus_dev_attr(dev_name, attr):
 
        return lines
 
+
 class VMBus_Dev:
        pass
 
@@ -66,12 +68,13 @@ for f in os.listdir(vmbus_sys_path):
 
        chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping')
        chn_vp_mapping = [c.strip() for c in chn_vp_mapping]
-       chn_vp_mapping = sorted(chn_vp_mapping,
-               key = lambda c : int(c.split(':')[0]))
+       chn_vp_mapping = sorted(
+               chn_vp_mapping, key=lambda c: int(c.split(':')[0]))
 
-       chn_vp_mapping = ['\tRel_ID=%s, target_cpu=%s' %
-                               (c.split(':')[0], c.split(':')[1])
-                                       for c in chn_vp_mapping]
+       chn_vp_mapping = [
+               '\tRel_ID=%s, target_cpu=%s' %
+               (c.split(':')[0], c.split(':')[1]) for c in chn_vp_mapping
+       ]
        d = VMBus_Dev()
        d.sysfs_path = '%s/%s' % (vmbus_sys_path, f)
        d.vmbus_id = vmbus_id
@@ -85,7 +88,7 @@ for f in os.listdir(vmbus_sys_path):
        vmbus_dev_list.append(d)
 
 
-vmbus_dev_list  = sorted(vmbus_dev_list, key = lambda d : int(d.vmbus_id))
+vmbus_dev_list = sorted(vmbus_dev_list, key=lambda d: int(d.vmbus_id))
 
 format0 = '%2s: %s'
 format1 = '%2s: Class_ID = %s - %s\n%s'
@@ -95,9 +98,15 @@ for d in vmbus_dev_list:
        if verbose == 0:
                print(('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc))
        elif verbose == 1:
-               print (('VMBUS ID ' + format1) %        \
-                       (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping))
+               print(
+                       ('VMBUS ID ' + format1) %
+                       (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)
+               )
        else:
-               print (('VMBUS ID ' + format2) % \
-                       (d.vmbus_id, d.class_id, d.dev_desc, \
-                       d.device_id, d.sysfs_path, d.chn_vp_mapping))
+               print(
+                       ('VMBUS ID ' + format2) %
+                       (
+                               d.vmbus_id, d.class_id, d.dev_desc,
+                               d.device_id, d.sysfs_path, d.chn_vp_mapping
+                       )
+               )
index 7a6d61c6c0121f9c9cab0021602021c1eb44b8e6..55272fef3b508ef5bd016f643b138e41d4c27f67 100644 (file)
@@ -159,9 +159,9 @@ int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
                        *be = (endianchar == 'b');
                        *bytes = padint / 8;
                        if (*bits_used == 64)
-                               *mask = ~0;
+                               *mask = ~(0ULL);
                        else
-                               *mask = (1ULL << *bits_used) - 1;
+                               *mask = (1ULL << *bits_used) - 1ULL;
 
                        *is_signed = (signchar == 's');
                        if (fclose(sysfsfp)) {
index 57aaeaf8e192038ae37eceaa0b60fba82b5f6013..edba4d93e9e6a346572d12e7def7e01f1c88822d 100644 (file)
@@ -1,22 +1,22 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #if defined(__i386__) || defined(__x86_64__)
-#include "../../arch/x86/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/x86/include/uapi/asm/bitsperlong.h"
 #elif defined(__aarch64__)
-#include "../../arch/arm64/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/arm64/include/uapi/asm/bitsperlong.h"
 #elif defined(__powerpc__)
-#include "../../arch/powerpc/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/powerpc/include/uapi/asm/bitsperlong.h"
 #elif defined(__s390__)
-#include "../../arch/s390/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/s390/include/uapi/asm/bitsperlong.h"
 #elif defined(__sparc__)
-#include "../../arch/sparc/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/sparc/include/uapi/asm/bitsperlong.h"
 #elif defined(__mips__)
-#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/mips/include/uapi/asm/bitsperlong.h"
 #elif defined(__ia64__)
-#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/ia64/include/uapi/asm/bitsperlong.h"
 #elif defined(__riscv)
-#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/riscv/include/uapi/asm/bitsperlong.h"
 #elif defined(__alpha__)
-#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
+#include "../../../arch/alpha/include/uapi/asm/bitsperlong.h"
 #else
 #include <asm-generic/bitsperlong.h>
 #endif
index 66917a4eba2716ebdef75a78e854f4e9874c88b9..bf4cd924aed555320859dfebddd1427cd4b2a4a9 100644 (file)
@@ -2484,6 +2484,7 @@ struct bpf_prog_info {
        char name[BPF_OBJ_NAME_LEN];
        __u32 ifindex;
        __u32 gpl_compatible:1;
+       __u32 :31; /* alignment pad */
        __u64 netns_dev;
        __u64 netns_ino;
        __u32 nr_jited_ksyms;
index dd0b68d1f4be09d27a34daaa2e3b9f8d2e766671..482025b728399338eaa8df4ca32900c9b89a01c7 100644 (file)
@@ -75,6 +75,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
        return syscall(__NR_bpf, cmd, attr, size);
 }
 
+static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
+{
+       int fd;
+
+       do {
+               fd = sys_bpf(BPF_PROG_LOAD, attr, size);
+       } while (fd < 0 && errno == EAGAIN);
+
+       return fd;
+}
+
 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
 {
        __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
@@ -218,7 +229,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
        memcpy(attr.prog_name, load_attr->name,
               min(name_len, BPF_OBJ_NAME_LEN - 1));
 
-       fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+       fd = sys_bpf_prog_load(&attr, sizeof(attr));
        if (fd >= 0 || !log_buf || !log_buf_sz)
                return fd;
 
@@ -227,7 +238,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
        attr.log_size = log_buf_sz;
        attr.log_level = 1;
        log_buf[0] = 0;
-       return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+       return sys_bpf_prog_load(&attr, sizeof(attr));
 }
 
 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
@@ -268,7 +279,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
        attr.kern_version = kern_version;
        attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
 
-       return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+       return sys_bpf_prog_load(&attr, sizeof(attr));
 }
 
 int bpf_map_update_elem(int fd, const void *key, const void *value,
index bdb94939fd602750f882dd0baac261f22da899a6..a350f97e3a1a4eb571c3355e01b77cc976787577 100644 (file)
@@ -2293,10 +2293,7 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
                        struct bpf_object **pobj, int *prog_fd)
 {
-       struct bpf_object_open_attr open_attr = {
-               .file           = attr->file,
-               .prog_type      = attr->prog_type,
-       };
+       struct bpf_object_open_attr open_attr = {};
        struct bpf_program *prog, *first_prog = NULL;
        enum bpf_attach_type expected_attach_type;
        enum bpf_prog_type prog_type;
@@ -2309,6 +2306,9 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
        if (!attr->file)
                return -EINVAL;
 
+       open_attr.file = attr->file;
+       open_attr.prog_type = attr->prog_type;
+
        obj = bpf_object__open_xattr(&open_attr);
        if (IS_ERR_OR_NULL(obj))
                return -ENOENT;
index 0b4e833088a4dc9653fe065cba012141a815c014..bca0c9e5452c4b7c780523363fa35771cadfc8ca 100644 (file)
@@ -55,15 +55,15 @@ set_plugin_dir := 1
 
 # Set plugin_dir to preffered global plugin location
 # If we install under $HOME directory we go under
-# $(HOME)/.traceevent/plugins
+# $(HOME)/.local/lib/traceevent/plugins
 #
 # We dont set PLUGIN_DIR in case we install under $HOME
 # directory, because by default the code looks under:
-# $(HOME)/.traceevent/plugins by default.
+# $(HOME)/.local/lib/traceevent/plugins by default.
 #
 ifeq ($(plugin_dir),)
 ifeq ($(prefix),$(HOME))
-override plugin_dir = $(HOME)/.traceevent/plugins
+override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
 set_plugin_dir := 0
 else
 override plugin_dir = $(libdir)/traceevent/plugins
@@ -259,8 +259,8 @@ endef
 
 define do_generate_dynamic_list_file
        symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
-       xargs echo "U W w" | tr ' ' '\n' | sort -u | xargs echo`;\
-       if [ "$$symbol_type" = "U W w" ];then                           \
+       xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
+       if [ "$$symbol_type" = "U W" ];then                             \
                (echo '{';                                              \
                $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
                echo '};';                                              \
index 6ccfd13d5cf9c4d081f963dfdf090e1360ef05ca..382e476629fb1dd0fca8dba6e140dcf9a968eefc 100644 (file)
@@ -254,10 +254,10 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
                errno = ENOMEM;
                return -1;
        }
+       pevent->cmdlines = cmdlines;
 
        cmdlines[pevent->cmdline_count].comm = strdup(comm);
        if (!cmdlines[pevent->cmdline_count].comm) {
-               free(cmdlines);
                errno = ENOMEM;
                return -1;
        }
@@ -268,7 +268,6 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
                pevent->cmdline_count++;
 
        qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
-       pevent->cmdlines = cmdlines;
 
        return 0;
 }
index f17e25097e1e2573f218639f601ecb6102f67b8e..52874eb94acefa7e31b4aab72baf23f76cd911ff 100644 (file)
@@ -16,7 +16,7 @@
 #include "event-parse.h"
 #include "event-utils.h"
 
-#define LOCAL_PLUGIN_DIR ".traceevent/plugins"
+#define LOCAL_PLUGIN_DIR ".local/lib/traceevent/plugins/"
 
 static struct registered_plugin_options {
        struct registered_plugin_options        *next;
index 88158239622bce38a7f27fb718123b8a62e03d09..20f67fcf378d540eced6080f41c0c844f7fe8b79 100644 (file)
@@ -35,7 +35,7 @@ INCLUDES := -I$(srctree)/tools/include \
            -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
            -I$(srctree)/tools/objtool/arch/$(ARCH)/include
 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
-CFLAGS   += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
+CFLAGS   := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
 LDFLAGS  += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
 
 # Allow old libelf to be used:
index abed594a9653b0de7a200344a03ac9c92bcfe686..b8f3cca8e58b4ec327876c7fd3173a4a3ae6c31d 100644 (file)
@@ -305,7 +305,7 @@ static int read_symbols(struct elf *elf)
                        if (sym->type != STT_FUNC)
                                continue;
                        sym->pfunc = sym->cfunc = sym;
-                       coldstr = strstr(sym->name, ".cold.");
+                       coldstr = strstr(sym->name, ".cold");
                        if (!coldstr)
                                continue;
 
index 849b3be15bd89daeaf58a9e5bdc233a441341e9c..510caedd73194171d66596a150aaf57f426d7c71 100644 (file)
@@ -837,7 +837,7 @@ ifndef NO_JVMTI
     JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
   else
     ifneq (,$(wildcard /usr/sbin/alternatives))
-      JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
+      JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed -e 's%/jre/bin/java.%%g' -e 's%/bin/java.%%g')
     endif
   endif
   ifndef JDIR
index 2f595cd73da662be982a71f130f045f734c29fce..16af6c3b1365b3af0760e9dda3663a9102d7ba5a 100644 (file)
@@ -32,6 +32,8 @@ struct cs_etm_recording {
        struct auxtrace_record  itr;
        struct perf_pmu         *cs_etm_pmu;
        struct perf_evlist      *evlist;
+       int                     wrapped_cnt;
+       bool                    *wrapped;
        bool                    snapshot_mode;
        size_t                  snapshot_size;
 };
@@ -495,16 +497,131 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
        return 0;
 }
 
-static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused,
+static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
+{
+       bool *wrapped;
+       int cnt = ptr->wrapped_cnt;
+
+       /* Make @ptr->wrapped as big as @idx */
+       while (cnt <= idx)
+               cnt++;
+
+       /*
+        * Free'ed in cs_etm_recording_free().  Using realloc() to avoid
+        * cross compilation problems where the host's system supports
+        * reallocarray() but not the target.
+        */
+       wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
+       if (!wrapped)
+               return -ENOMEM;
+
+       wrapped[cnt - 1] = false;
+       ptr->wrapped_cnt = cnt;
+       ptr->wrapped = wrapped;
+
+       return 0;
+}
+
+static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
+                                     size_t buffer_size, u64 head)
+{
+       u64 i, watermark;
+       u64 *buf = (u64 *)buffer;
+       size_t buf_size = buffer_size;
+
+       /*
+        * We want to look the very last 512 byte (chosen arbitrarily) in
+        * the ring buffer.
+        */
+       watermark = buf_size - 512;
+
+       /*
+        * @head is continuously increasing - if its value is equal or greater
+        * than the size of the ring buffer, it has wrapped around.
+        */
+       if (head >= buffer_size)
+               return true;
+
+       /*
+        * The value of @head is somewhere within the size of the ring buffer.
+        * This can be that there hasn't been enough data to fill the ring
+        * buffer yet or the trace time was so long that @head has numerically
+        * wrapped around.  To find we need to check if we have data at the very
+        * end of the ring buffer.  We can reliably do this because mmap'ed
+        * pages are zeroed out and there is a fresh mapping with every new
+        * session.
+        */
+
+       /* @head is less than 512 byte from the end of the ring buffer */
+       if (head > watermark)
+               watermark = head;
+
+       /*
+        * Speed things up by using 64 bit transactions (see "u64 *buf" above)
+        */
+       watermark >>= 3;
+       buf_size >>= 3;
+
+       /*
+        * If we find trace data at the end of the ring buffer, @head has
+        * been there and has numerically wrapped around at least once.
+        */
+       for (i = watermark; i < buf_size; i++)
+               if (buf[i])
+                       return true;
+
+       return false;
+}
+
+static int cs_etm_find_snapshot(struct auxtrace_record *itr,
                                int idx, struct auxtrace_mmap *mm,
-                               unsigned char *data __maybe_unused,
+                               unsigned char *data,
                                u64 *head, u64 *old)
 {
+       int err;
+       bool wrapped;
+       struct cs_etm_recording *ptr =
+                       container_of(itr, struct cs_etm_recording, itr);
+
+       /*
+        * Allocate memory to keep track of wrapping if this is the first
+        * time we deal with this *mm.
+        */
+       if (idx >= ptr->wrapped_cnt) {
+               err = cs_etm_alloc_wrapped_array(ptr, idx);
+               if (err)
+                       return err;
+       }
+
+       /*
+        * Check to see if *head has wrapped around.  If it hasn't only the
+        * amount of data between *head and *old is snapshot'ed to avoid
+        * bloating the perf.data file with zeros.  But as soon as *head has
+        * wrapped around the entire size of the AUX ring buffer it taken.
+        */
+       wrapped = ptr->wrapped[idx];
+       if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
+               wrapped = true;
+               ptr->wrapped[idx] = true;
+       }
+
        pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
                  __func__, idx, (size_t)*old, (size_t)*head, mm->len);
 
-       *old = *head;
-       *head += mm->len;
+       /* No wrap has occurred, we can just use *head and *old. */
+       if (!wrapped)
+               return 0;
+
+       /*
+        * *head has wrapped around - adjust *head and *old to pickup the
+        * entire content of the AUX buffer.
+        */
+       if (*head >= mm->len) {
+               *old = *head - mm->len;
+       } else {
+               *head += mm->len;
+               *old = *head - mm->len;
+       }
 
        return 0;
 }
@@ -545,6 +662,8 @@ static void cs_etm_recording_free(struct auxtrace_record *itr)
 {
        struct cs_etm_recording *ptr =
                        container_of(itr, struct cs_etm_recording, itr);
+
+       zfree(&ptr->wrapped);
        free(ptr);
 }
 
index a19690a17291ebb52a9f0bccf97a356f4af6ade0..c8c86a0c9b793d6f5173d9bc2fa7bb60f7d23f6e 100644 (file)
@@ -6,8 +6,9 @@
 #include "machine.h"
 #include "api/fs/fs.h"
 #include "debug.h"
+#include "symbol.h"
 
-int arch__fix_module_text_start(u64 *start, const char *name)
+int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
 {
        u64 m_start = *start;
        char path[PATH_MAX];
@@ -17,7 +18,35 @@ int arch__fix_module_text_start(u64 *start, const char *name)
        if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
                pr_debug2("Using module %s start:%#lx\n", path, m_start);
                *start = m_start;
+       } else {
+               /* Successful read of the modules segment text start address.
+                * Calculate difference between module start address
+                * in memory and module text segment start address.
+                * For example module load address is 0x3ff8011b000
+                * (from /proc/modules) and module text segment start
+                * address is 0x3ff8011b870 (from file above).
+                *
+                * Adjust the module size and subtract the GOT table
+                * size located at the beginning of the module.
+                */
+               *size -= (*start - m_start);
        }
 
        return 0;
 }
+
+/* On s390 kernel text segment start is located at very low memory addresses,
+ * for example 0x10000. Modules are located at very high memory addresses,
+ * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
+ * and beginning of first module's text segment is very big.
+ * Therefore do not fill this gap and do not assign it to the kernel dso map.
+ */
+void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
+{
+       if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
+               /* Last kernel symbol mapped to end of page */
+               p->end = roundup(p->end, page_size);
+       else
+               p->end = c->start;
+       pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
+}
index 05920e3edf7a7a060f259ff9086d342ab4c39271..47357973b55b208334b651120bd5a832d50861e3 100644 (file)
@@ -1,11 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0
 
 #include <errno.h>
+#include "../../util/debug.h"
 #ifndef REMOTE_UNWIND_LIBUNWIND
 #include <libunwind.h>
 #include "perf_regs.h"
 #include "../../util/unwind.h"
-#include "../../util/debug.h"
 #endif
 
 #ifdef HAVE_ARCH_X86_64_SUPPORT
index fa56fde6e8d8036921e8b012bc69ae9afb70581a..91c0a4434da2767a6eedef5d0b3d0d90d274b368 100644 (file)
@@ -378,8 +378,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
 
        /* Allocate and initialize all memory on CPU#0: */
        if (init_cpu0) {
-               orig_mask = bind_to_node(0);
-               bind_to_memnode(0);
+               int node = numa_node_of_cpu(0);
+
+               orig_mask = bind_to_node(node);
+               bind_to_memnode(node);
        }
 
        bytes = bytes0 + HPSIZE;
index f42f228e88992bd037474ae4c20df6934bfd8674..137955197ba8dd0bb4490b651986c8bc24ffd326 100644 (file)
@@ -174,7 +174,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap)
        int last_cpu;
 
        last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
-       mask_size = (last_cpu + 3) / 4 + 1;
+       mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
        mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
 
        cpumask = malloc(mask_size);
index 99de91698de1e544a68a607ba11c1bc3fc08ba73..0bdb34fee9d8187c6de233cab830d92f72b60cbc 100644 (file)
@@ -711,6 +711,16 @@ __cmd_probe(int argc, const char **argv)
 
                ret = perf_add_probe_events(params.events, params.nevents);
                if (ret < 0) {
+
+                       /*
+                        * When perf_add_probe_events() fails it calls
+                        * cleanup_perf_probe_events(pevs, npevs), i.e.
+                        * cleanup_perf_probe_events(params.events, params.nevents), which
+                        * will call clear_perf_probe_event(), so set nevents to zero
+                        * to avoid cleanup_params() to call clear_perf_probe_event() again
+                        * on the same pevs.
+                        */
+                       params.nevents = 0;
                        pr_err_with_code("  Error: Failed to add events.", ret);
                        return ret;
                }
index 40720150ccd80670e49285d3a0a49c2d184b45e3..6aae10ff954c72847d9648e25e45e7b1d2a7ea1e 100644 (file)
@@ -2497,8 +2497,8 @@ static int add_default_attributes(void)
                                fprintf(stderr,
                                        "Cannot set up top down events %s: %d\n",
                                        str, err);
-                               free(str);
                                parse_events_print_error(&errinfo, str);
+                               free(str);
                                return -1;
                        }
                } else {
@@ -3090,8 +3090,11 @@ int cmd_stat(int argc, const char **argv)
                        fprintf(output, "[ perf stat: executing run #%d ... ]\n",
                                run_idx + 1);
 
+               if (run_idx != 0)
+                       perf_evlist__reset_prev_raw_counts(evsel_list);
+
                status = run_perf_stat(argc, argv, run_idx);
-               if (forever && status != -1) {
+               if (forever && status != -1 && !interval) {
                        print_counters(NULL, argc, argv);
                        perf_stat__reset_stats();
                }
index 33eefc33e0ea9fc49a7d9b1568c3853a29653f9e..d0733251a386e75ac86981303936e1075bc5c898 100644 (file)
@@ -99,7 +99,7 @@ static void perf_top__resize(struct perf_top *top)
 
 static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
 {
-       struct perf_evsel *evsel = hists_to_evsel(he->hists);
+       struct perf_evsel *evsel;
        struct symbol *sym;
        struct annotation *notes;
        struct map *map;
@@ -108,6 +108,8 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
        if (!he || !he->ms.sym)
                return -1;
 
+       evsel = hists_to_evsel(he->hists);
+
        sym = he->ms.sym;
        map = he->ms.map;
 
@@ -224,7 +226,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
 static void perf_top__show_details(struct perf_top *top)
 {
        struct hist_entry *he = top->sym_filter_entry;
-       struct perf_evsel *evsel = hists_to_evsel(he->hists);
+       struct perf_evsel *evsel;
        struct annotation *notes;
        struct symbol *symbol;
        int more;
@@ -232,6 +234,8 @@ static void perf_top__show_details(struct perf_top *top)
        if (!he)
                return;
 
+       evsel = hists_to_evsel(he->hists);
+
        symbol = he->ms.sym;
        notes = symbol__annotation(symbol);
 
index 50df168be326d84cba4e5cfbc26ea8a119d392cf..b02c961046403ef3763891e5c7f622489637f8e2 100644 (file)
@@ -19,6 +19,7 @@ static struct version version;
 static struct option version_options[] = {
        OPT_BOOLEAN(0, "build-options", &version.build_options,
                    "display the build options"),
+       OPT_END(),
 };
 
 static const char * const version_usage[] = {
index 6add3e9826141346a34a93b9e0a970a22720e361..3361d98a4edd6783a5fef158f6a1c495e28c37fb 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/compiler.h>
+#include <linux/string.h>
 #include <sys/types.h>
 #include <stdio.h>
 #include <string.h>
@@ -150,8 +151,7 @@ copy_class_filename(const char * class_sign, const char * file_name, char * resu
                result[i] = '\0';
        } else {
                /* fallback case */
-               size_t file_name_len = strlen(file_name);
-               strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length);
+               strlcpy(result, file_name, max_length);
        }
 }
 
index a11cb006f9682ed15300ee6fa1abce0c0125f78e..80f8ae8b13666a6834c9e9b45dd25c6e490c589c 100644 (file)
@@ -439,6 +439,9 @@ int main(int argc, const char **argv)
 
        srandom(time(NULL));
 
+       /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
+       config_exclusive_filename = getenv("PERF_CONFIG");
+
        err = perf_config(perf_default_config, NULL);
        if (err)
                return err;
index 21bf7f5a3cf51a1a42e3169daa738c8e7e0a8d83..19d435a9623b542cc6b53411eb91b92276731e3b 100644 (file)
@@ -26,7 +26,7 @@ static inline unsigned long long rdclock(void)
 }
 
 #ifndef MAX_NR_CPUS
-#define MAX_NR_CPUS                    1024
+#define MAX_NR_CPUS                    2048
 #endif
 
 extern const char *input_name;
index 68c92bb599eef708228287e0d1c6728429a67ced..6b36b7110669569475e32fc7281abbaac7a980ad 100644 (file)
@@ -450,6 +450,7 @@ static struct fixed {
        { "inst_retired.any_p", "event=0xc0" },
        { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
        { "cpu_clk_unhalted.thread", "event=0x3c" },
+       { "cpu_clk_unhalted.core", "event=0x3c" },
        { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
        { NULL, NULL},
 };
index b1af2499a3c972a97ddbe766f0fe272716a9fca0..7a9b123c7bfcfe6376bdda54c5b7ba57e76d5c25 100644 (file)
@@ -52,7 +52,7 @@ static void *thread_fn(void *arg)
 {
        struct thread_data *td = arg;
        ssize_t ret;
-       int go;
+       int go = 0;
 
        if (thread_init(td))
                return NULL;
index 3b97ac018d5aac5d6505924e508a5451888c6cd7..532c95e8fa6b492937618490fa0dd883a2556c9b 100644 (file)
 #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
                             PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
 
+#if defined(__s390x__)
+/* Return true if kvm module is available and loaded. Test this
+ * and retun success when trace point kvm_s390_create_vm
+ * exists. Otherwise this test always fails.
+ */
+static bool kvm_s390_create_vm_valid(void)
+{
+       char *eventfile;
+       bool rc = false;
+
+       eventfile = get_events_file("kvm-s390");
+
+       if (eventfile) {
+               DIR *mydir = opendir(eventfile);
+
+               if (mydir) {
+                       rc = true;
+                       closedir(mydir);
+               }
+               put_events_file(eventfile);
+       }
+
+       return rc;
+}
+#endif
+
 static int test__checkevent_tracepoint(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel = perf_evlist__first(evlist);
@@ -1622,6 +1648,7 @@ static struct evlist_test test__events[] = {
        {
                .name  = "kvm-s390:kvm_s390_create_vm",
                .check = test__checkevent_tracepoint,
+               .valid = kvm_s390_create_vm_valid,
                .id    = 100,
        },
 #endif
index cab7b0aea6eabbec7575db5bcf6a2cffbe93a762..f5837f28f3af4875ecc0bf127f7a99238c1feaad 100755 (executable)
@@ -43,7 +43,7 @@ trace_libc_inet_pton_backtrace() {
                eventattr='max-stack=4'
                echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
                echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
-               echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+               echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
                ;;
        *)
                eventattr='max-stack=3'
index 4ce276efe6b4c1855e904a30a2e9efcb6ed01a4e..fe223fc5c1f858a2774d8258ac9a6696471dab5b 100755 (executable)
@@ -29,6 +29,10 @@ if [ $err -ne 0 ] ; then
        exit $err
 fi
 
+# Do not use whatever ~/.perfconfig file, it may change the output
+# via trace.{show_timestamp,show_prefix,etc}
+export PERF_CONFIG=/dev/null
+
 trace_open_vfs_getname
 err=$?
 rm -f ${file}
index 1be3b4cf082708194ca7c4c8403219eedb924c3d..82346ca06f17118edbbeba12453fb8ade1f8c6c3 100644 (file)
@@ -22,7 +22,7 @@
 static size_t ioctl__scnprintf_tty_cmd(int nr, int dir, char *bf, size_t size)
 {
        static const char *ioctl_tty_cmd[] = {
-       "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
+       [_IOC_NR(TCGETS)] = "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
        "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL", "TIOCSCTTY",
        "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI", "TIOCGWINSZ", "TIOCSWINSZ",
        "TIOCMGET", "TIOCMBIS", "TIOCMBIC", "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR",
index 1d00e5ec7906ebaf23b57a5d9e1471ed1cbab465..a3c255228d62469e485fd17589b651bc0d33e85f 100644 (file)
@@ -96,11 +96,12 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
        struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
        struct annotation *notes = browser__annotation(browser);
        struct annotation_line *al = list_entry(entry, struct annotation_line, node);
+       const bool is_current_entry = ui_browser__is_current_entry(browser, row);
        struct annotation_write_ops ops = {
                .first_line              = row == 0,
-               .current_entry           = ui_browser__is_current_entry(browser, row),
+               .current_entry           = is_current_entry,
                .change_color            = (!notes->options->hide_src_code &&
-                                           (!ops.current_entry ||
+                                           (!is_current_entry ||
                                             (browser->use_navkeypressed &&
                                              !browser->navkeypressed))),
                .width                   = browser->width,
index a96f62ca984ac8077fe4171bc481ec48fcc2ed43..692d2fa31c351b0ea681ffc1b285b05f591bf31c 100644 (file)
@@ -633,7 +633,11 @@ int hist_browser__run(struct hist_browser *browser, const char *help,
                switch (key) {
                case K_TIMER: {
                        u64 nr_entries;
-                       hbt->timer(hbt->arg);
+
+                       WARN_ON_ONCE(!hbt);
+
+                       if (hbt)
+                               hbt->timer(hbt->arg);
 
                        if (hist_browser__has_filter(browser) ||
                            symbol_conf.report_hierarchy)
@@ -2707,7 +2711,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
 {
        struct hists *hists = evsel__hists(evsel);
        struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts);
-       struct branch_info *bi;
+       struct branch_info *bi = NULL;
 #define MAX_OPTIONS  16
        char *options[MAX_OPTIONS];
        struct popup_action actions[MAX_OPTIONS];
@@ -2973,7 +2977,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                        goto skip_annotation;
 
                if (sort__mode == SORT_MODE__BRANCH) {
-                       bi = browser->he_selection->branch_info;
+
+                       if (browser->he_selection)
+                               bi = browser->he_selection->branch_info;
 
                        if (bi == NULL)
                                goto skip_annotation;
@@ -3144,7 +3150,8 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
 
                switch (key) {
                case K_TIMER:
-                       hbt->timer(hbt->arg);
+                       if (hbt)
+                               hbt->timer(hbt->arg);
 
                        if (!menu->lost_events_warned &&
                            menu->lost_events &&
index dfee110b3a589d998287b3ec1bdd713dbd65a40c..daea1fdf738566891175747d9b54c7903a9f3f2e 100644 (file)
@@ -911,9 +911,8 @@ static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
        if (sym == NULL)
                return 0;
        src = symbol__hists(sym, evsel->evlist->nr_entries);
-       if (src == NULL)
-               return -ENOMEM;
-       return __symbol__inc_addr_samples(sym, map, src, evsel->idx, addr, sample);
+       return (src) ?  __symbol__inc_addr_samples(sym, map, src, evsel->idx,
+                                                  addr, sample) : 0;
 }
 
 static int symbol__account_cycles(u64 addr, u64 start,
@@ -1080,16 +1079,14 @@ static int disasm_line__parse(char *line, const char **namep, char **rawp)
        *namep = strdup(name);
 
        if (*namep == NULL)
-               goto out_free_name;
+               goto out;
 
        (*rawp)[0] = tmp;
        *rawp = ltrim(*rawp);
 
        return 0;
 
-out_free_name:
-       free((void *)namep);
-       *namep = NULL;
+out:
        return -1;
 }
 
index 383674f448fcd67997afe9f07ae0a7fbb48f2f45..f93846edc1e0d463a685f5114e5b798af02a0608 100644 (file)
@@ -701,7 +701,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
        unsigned char *bitmap;
        int last_cpu = cpu_map__cpu(map, map->nr - 1);
 
-       bitmap = zalloc((last_cpu + 7) / 8);
+       if (buf == NULL)
+               return 0;
+
+       bitmap = zalloc(last_cpu / 8 + 1);
        if (bitmap == NULL) {
                buf[0] = '\0';
                return 0;
index b65ad5a273eb1ff1f2579a926e826ba872b9a747..4fad92213609f3923a12daf74df70caf412765e3 100644 (file)
@@ -590,6 +590,9 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
 {
        char bf[128];
 
+       if (!evsel)
+               goto out_unknown;
+
        if (evsel->name)
                return evsel->name;
 
@@ -626,7 +629,10 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
 
        evsel->name = strdup(bf);
 
-       return evsel->name ?: "unknown";
+       if (evsel->name)
+               return evsel->name;
+out_unknown:
+       return "unknown";
 }
 
 const char *perf_evsel__group_name(struct perf_evsel *evsel)
index b9a82598e2ac3970fd298b3c3c44bb875f1c58a2..3c0d74fc1ff26a7b52c7a627cf87561bcae9c06a 100644 (file)
@@ -1114,7 +1114,7 @@ static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 lev
 
        scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
        if (sysfs__read_str(file, &cache->map, &len)) {
-               free(cache->map);
+               free(cache->size);
                free(cache->type);
                return -1;
        }
@@ -1173,7 +1173,7 @@ static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
        return 0;
 }
 
-#define MAX_CACHES 2000
+#define MAX_CACHES (MAX_NR_CPUS * 4)
 
 static int write_cache(struct feat_fd *ff,
                       struct perf_evlist *evlist __maybe_unused)
@@ -2184,8 +2184,10 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
        /* On s390 the socket_id number is not related to the numbers of cpus.
         * The socket_id number might be higher than the numbers of cpus.
         * This depends on the configuration.
+        * AArch64 is the same.
         */
-       if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
+       if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
+                         || !strncmp(ph->env.arch, "aarch64", 7)))
                do_core_id_test = false;
 
        for (i = 0; i < (u32)cpu_nr; i++) {
@@ -3285,6 +3287,13 @@ int perf_session__read_header(struct perf_session *session)
                           data->file.path);
        }
 
+       if (f_header.attr_size == 0) {
+               pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
+                      "Was the 'perf record' command properly terminated?\n",
+                      data->file.path);
+               return -EINVAL;
+       }
+
        nr_attrs = f_header.attrs.size / f_header.attr_size;
        lseek(fd, f_header.attrs.offset, SEEK_SET);
 
@@ -3365,7 +3374,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
        size += sizeof(struct perf_event_header);
        size += ids * sizeof(u64);
 
-       ev = malloc(size);
+       ev = zalloc(size);
 
        if (ev == NULL)
                return -ENOMEM;
@@ -3472,7 +3481,7 @@ int perf_event__process_feature(struct perf_tool *tool,
                return 0;
 
        ff.buf  = (void *)fe->data;
-       ff.size = event->header.size - sizeof(event->header);
+       ff.size = event->header.size - sizeof(*fe);
        ff.ph = &session->header;
 
        if (feat_ops[feat].process(&ff, NULL))
index a1863000e972dbca938ed0003fb3632bf25e35c0..663e790c26936822105d3fcd8cf6d6bf75b52c09 100644 (file)
@@ -394,7 +394,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
        size_t size;
        u16 idr_size;
        const char *sym;
-       uint32_t count;
+       uint64_t count;
        int ret, csize, usize;
        pid_t pid, tid;
        struct {
@@ -417,7 +417,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
                return -1;
 
        filename = event->mmap2.filename;
-       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so",
+       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
                        jd->dir,
                        pid,
                        count);
@@ -530,7 +530,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
                return -1;
 
        filename = event->mmap2.filename;
-       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64,
+       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
                 jd->dir,
                 pid,
                 jr->move.code_index);
index 19262f98cd4e1252c09ca77e6319a02d72db50c0..2344d86cd77868c89f6d6cdcb0d203330aac33da 100644 (file)
@@ -230,14 +230,14 @@ static int detect_kbuild_dir(char **kbuild_dir)
        const char *prefix_dir = "";
        const char *suffix_dir = "";
 
+       /* _UTSNAME_LENGTH is 65 */
+       char release[128];
+
        char *autoconf_path;
 
        int err;
 
        if (!test_dir) {
-               /* _UTSNAME_LENGTH is 65 */
-               char release[128];
-
                err = fetch_kernel_version(NULL, release,
                                           sizeof(release));
                if (err)
index 076718a7b3eaa20c6e9bfdde07d03b5e77f71d64..003b70daf0bfc9310feff31bfc377e822aa7e25a 100644 (file)
@@ -1295,6 +1295,7 @@ static int machine__set_modules_path(struct machine *machine)
        return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
 }
 int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
+                               u64 *size __maybe_unused,
                                const char *name __maybe_unused)
 {
        return 0;
@@ -1306,7 +1307,7 @@ static int machine__create_module(void *arg, const char *name, u64 start,
        struct machine *machine = arg;
        struct map *map;
 
-       if (arch__fix_module_text_start(&start, name) < 0)
+       if (arch__fix_module_text_start(&start, &size, name) < 0)
                return -1;
 
        map = machine__findnew_module_map(machine, start, name);
index ebde3ea70225b04fc74b20781a1fd6a93ff0d762..6f3767808bd92a81b076f645e97daf646921c7a6 100644 (file)
@@ -219,7 +219,7 @@ struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
 
 struct map *machine__findnew_module_map(struct machine *machine, u64 start,
                                        const char *filename);
-int arch__fix_module_text_start(u64 *start, const char *name);
+int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
 
 int machine__load_kallsyms(struct machine *machine, const char *filename);
 
index a28f9b5cc4ffed9c2a4a1a131e2d258b693284da..8b3dafe3fac3a7fa5b70ab7d075491f6c6e3b2c1 100644 (file)
@@ -94,26 +94,49 @@ struct egroup {
        const char *metric_expr;
 };
 
-static struct perf_evsel *find_evsel(struct perf_evlist *perf_evlist,
-                                    const char **ids,
-                                    int idnum,
-                                    struct perf_evsel **metric_events)
+static bool record_evsel(int *ind, struct perf_evsel **start,
+                        int idnum,
+                        struct perf_evsel **metric_events,
+                        struct perf_evsel *ev)
+{
+       metric_events[*ind] = ev;
+       if (*ind == 0)
+               *start = ev;
+       if (++*ind == idnum) {
+               metric_events[*ind] = NULL;
+               return true;
+       }
+       return false;
+}
+
+static struct perf_evsel *find_evsel_group(struct perf_evlist *perf_evlist,
+                                          const char **ids,
+                                          int idnum,
+                                          struct perf_evsel **metric_events)
 {
        struct perf_evsel *ev, *start = NULL;
        int ind = 0;
 
        evlist__for_each_entry (perf_evlist, ev) {
+               if (ev->collect_stat)
+                       continue;
                if (!strcmp(ev->name, ids[ind])) {
-                       metric_events[ind] = ev;
-                       if (ind == 0)
-                               start = ev;
-                       if (++ind == idnum) {
-                               metric_events[ind] = NULL;
+                       if (record_evsel(&ind, &start, idnum,
+                                        metric_events, ev))
                                return start;
-                       }
                } else {
+                       /*
+                        * We saw some other event that is not
+                        * in our list of events. Discard
+                        * the whole match and start again.
+                        */
                        ind = 0;
                        start = NULL;
+                       if (!strcmp(ev->name, ids[ind])) {
+                               if (record_evsel(&ind, &start, idnum,
+                                                metric_events, ev))
+                                       return start;
+                       }
                }
        }
        /*
@@ -143,8 +166,8 @@ static int metricgroup__setup_events(struct list_head *groups,
                        ret = -ENOMEM;
                        break;
                }
-               evsel = find_evsel(perf_evlist, eg->ids, eg->idnum,
-                                  metric_events);
+               evsel = find_evsel_group(perf_evlist, eg->ids, eg->idnum,
+                                        metric_events);
                if (!evsel) {
                        pr_debug("Cannot resolve %s: %s\n",
                                        eg->metric_name, eg->metric_expr);
index 11086097fc9f88f34ef01f28287b8a687361da4f..f016d1b330e543b875d83f27ed803e30e28d21b2 100644 (file)
@@ -1141,6 +1141,9 @@ static void dump_read(struct perf_evsel *evsel, union perf_event *event)
               evsel ? perf_evsel__name(evsel) : "FAIL",
               event->read.value);
 
+       if (!evsel)
+               return;
+
        read_format = evsel->attr.read_format;
 
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
index 99990f5f2512acbe59b0a51ab450fb92c0c6b446..bbb0e042d8e5802a8de01f50e1c2eebbcf02bfe2 100644 (file)
@@ -303,7 +303,7 @@ static struct perf_evsel *perf_stat__find_event(struct perf_evlist *evsel_list,
        struct perf_evsel *c2;
 
        evlist__for_each_entry (evsel_list, c2) {
-               if (!strcasecmp(c2->name, name))
+               if (!strcasecmp(c2->name, name) && !c2->collect_stat)
                        return c2;
        }
        return NULL;
@@ -342,7 +342,8 @@ void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list)
                        if (leader) {
                                /* Search in group */
                                for_each_group_member (oc, leader) {
-                                       if (!strcasecmp(oc->name, metric_names[i])) {
+                                       if (!strcasecmp(oc->name, metric_names[i]) &&
+                                               !oc->collect_stat) {
                                                found = true;
                                                break;
                                        }
index a0061e0b0fade70868ac6f9ffc90e4d9b0e7e990..6917ba8a002404e00658f91169bf80bb3edd3cc2 100644 (file)
@@ -154,6 +154,15 @@ static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
        evsel->prev_raw_counts = NULL;
 }
 
+static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel)
+{
+       if (evsel->prev_raw_counts) {
+               evsel->prev_raw_counts->aggr.val = 0;
+               evsel->prev_raw_counts->aggr.ena = 0;
+               evsel->prev_raw_counts->aggr.run = 0;
+       }
+}
+
 static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
 {
        int ncpus = perf_evsel__nr_cpus(evsel);
@@ -204,6 +213,14 @@ void perf_evlist__reset_stats(struct perf_evlist *evlist)
        }
 }
 
+void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel)
+               perf_evsel__reset_prev_raw_counts(evsel);
+}
+
 static void zero_per_pkg(struct perf_evsel *counter)
 {
        if (counter->per_pkg_mask)
index 36efb986f7fc640f7d109cec664d6f0e208c1edf..e19abb1635c4e8a3887a277c876f66a69c8f47f5 100644 (file)
@@ -158,6 +158,7 @@ void perf_stat__collect_metric_expr(struct perf_evlist *);
 int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
 void perf_evlist__free_stats(struct perf_evlist *evlist);
 void perf_evlist__reset_stats(struct perf_evlist *evlist);
+void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist);
 
 int perf_stat_process_counter(struct perf_stat_config *config,
                              struct perf_evsel *counter);
index 0715f972a275c56a7f6091da5eddd16c9da13275..91404bacc3df81a6faddf606e807265dc7579757 100644 (file)
@@ -86,6 +86,11 @@ static int prefix_underscores_count(const char *str)
        return tail - str;
 }
 
+void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
+{
+       p->end = c->start;
+}
+
 const char * __weak arch__normalize_symbol_name(const char *name)
 {
        return name;
@@ -212,7 +217,7 @@ void symbols__fixup_end(struct rb_root *symbols)
                curr = rb_entry(nd, struct symbol, rb_node);
 
                if (prev->end == prev->start && prev->end != curr->start)
-                       prev->end = curr->start;
+                       arch__symbols__fixup_end(prev, curr);
        }
 
        /* Last entry */
index f25fae4b5743c76bdc50f515110a0fa448fe7fcc..76ef2facd934591826914abc497cf6a7acf63766 100644 (file)
@@ -349,6 +349,7 @@ const char *arch__normalize_symbol_name(const char *name);
 #define SYMBOL_A 0
 #define SYMBOL_B 1
 
+void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
 int arch__compare_symbol_names(const char *namea, const char *nameb);
 int arch__compare_symbol_names_n(const char *namea, const char *nameb,
                                 unsigned int n);
index 56007a7e0b4d7f6aadd8300e2c8c2023354c29bc..2c146d0c217bea33694a128e43b40249976afe90 100644 (file)
@@ -192,14 +192,24 @@ struct comm *thread__comm(const struct thread *thread)
 
 struct comm *thread__exec_comm(const struct thread *thread)
 {
-       struct comm *comm, *last = NULL;
+       struct comm *comm, *last = NULL, *second_last = NULL;
 
        list_for_each_entry(comm, &thread->comm_list, list) {
                if (comm->exec)
                        return comm;
+               second_last = last;
                last = comm;
        }
 
+       /*
+        * 'last' with no start time might be the parent's comm of a synthesized
+        * thread (created by processing a synthesized fork event). For a main
+        * thread, that is very probably wrong. Prefer a later comm to avoid
+        * that case.
+        */
+       if (second_last && !last->start && thread->pid_ == thread->tid)
+               return second_last;
+
        return last;
 }
 
index 7ffe562e7ae7faf5ee45015bb2cb8581b48b5758..2627b038b6f2a2886ea43b279f308a2516ad2d0a 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _PERF_XYARRAY_H_
 #define _PERF_XYARRAY_H_ 1
 
+#include <linux/compiler.h>
 #include <sys/types.h>
 
 struct xyarray {
@@ -10,7 +11,7 @@ struct xyarray {
        size_t entries;
        size_t max_x;
        size_t max_y;
-       char contents[];
+       char contents[] __aligned(8);
 };
 
 struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
index 1eef0aed64239509795229d522cada4e80edf5a2..08a405593a791ed6c52f921824c7f7925a23cf17 100644 (file)
@@ -306,6 +306,8 @@ int cmd_freq_set(int argc, char **argv)
                                bitmask_setbit(cpus_chosen, cpus->cpu);
                                cpus = cpus->next;
                        }
+                       /* Set the last cpu in related cpus list */
+                       bitmask_setbit(cpus_chosen, cpus->cpu);
                        cpufreq_put_related_cpus(cpus);
                }
        }
index fbb53c952b739d2a04531096b2a3686f64826ec2..71cf7e77291ad0814353bd244c6eb6ff335b127d 100644 (file)
@@ -4953,7 +4953,7 @@ int initialize_counters(int cpu_id)
 
 void allocate_output_buffer()
 {
-       output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
+       output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
        outp = output_buffer;
        if (outp == NULL)
                err(-1, "calloc output buffer");
index 65bbe627a425f6361d1249642ad29847abae0575..2aba622d1c5aa611abab9d05d11d5fa3ead6d29e 100644 (file)
@@ -546,7 +546,7 @@ void cmdline(int argc, char **argv)
 
        progname = argv[0];
 
-       while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw",
+       while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:",
                                long_options, &option_index)) != -1) {
                switch (opt) {
                case 'a':
@@ -1260,6 +1260,15 @@ void probe_dev_msr(void)
                if (system("/sbin/modprobe msr > /dev/null 2>&1"))
                        err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
 }
+
+static void get_cpuid_or_exit(unsigned int leaf,
+                            unsigned int *eax, unsigned int *ebx,
+                            unsigned int *ecx, unsigned int *edx)
+{
+       if (!__get_cpuid(leaf, eax, ebx, ecx, edx))
+               errx(1, "Processor not supported\n");
+}
+
 /*
  * early_cpuid()
  * initialize turbo_is_enabled, has_hwp, has_epb
@@ -1267,15 +1276,10 @@ void probe_dev_msr(void)
  */
 void early_cpuid(void)
 {
-       unsigned int eax, ebx, ecx, edx, max_level;
+       unsigned int eax, ebx, ecx, edx;
        unsigned int fms, family, model;
 
-       __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
-
-       if (max_level < 6)
-               errx(1, "Processor not supported\n");
-
-       __get_cpuid(1, &fms, &ebx, &ecx, &edx);
+       get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
        family = (fms >> 8) & 0xf;
        model = (fms >> 4) & 0xf;
        if (family == 6 || family == 0xf)
@@ -1289,7 +1293,7 @@ void early_cpuid(void)
                bdx_highest_ratio = msr & 0xFF;
        }
 
-       __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+       get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
        turbo_is_enabled = (eax >> 1) & 1;
        has_hwp = (eax >> 7) & 1;
        has_epb = (ecx >> 3) & 1;
@@ -1307,7 +1311,7 @@ void parse_cpuid(void)
 
        eax = ebx = ecx = edx = 0;
 
-       __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
+       get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx);
 
        if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
                genuine_intel = 1;
@@ -1316,7 +1320,7 @@ void parse_cpuid(void)
                fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
                        (char *)&ebx, (char *)&edx, (char *)&ecx);
 
-       __get_cpuid(1, &fms, &ebx, &ecx, &edx);
+       get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
        family = (fms >> 8) & 0xf;
        model = (fms >> 4) & 0xf;
        stepping = fms & 0xf;
@@ -1341,7 +1345,7 @@ void parse_cpuid(void)
                errx(1, "CPUID: no MSR");
 
 
-       __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+       get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
        /* turbo_is_enabled already set */
        /* has_hwp already set */
        has_hwp_notify = eax & (1 << 8);
index 33752e06ff8d0719568d3c07bc56596819995419..3de57cc8716b9fd3baee5d479ad46f8813b53b37 100644 (file)
@@ -12,6 +12,7 @@
  */
 #ifndef __NFIT_TEST_H__
 #define __NFIT_TEST_H__
+#include <linux/acpi.h>
 #include <linux/list.h>
 #include <linux/uuid.h>
 #include <linux/ioport.h>
@@ -234,9 +235,6 @@ struct nd_intel_lss {
        __u32 status;
 } __packed;
 
-union acpi_object;
-typedef void *acpi_handle;
-
 typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
 typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
                 const guid_t *guid, u64 rev, u64 func,
index 5aeaa284fc47493decf0fc55b31de7c8a734ac6d..a680628204108b41d0c046a5371cec9f2cfedc0b 100644 (file)
@@ -41,8 +41,7 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
        }
 
        /* Rewrite destination. */
-       if ((ctx->user_ip6[0] & 0xFFFF) == bpf_htons(0xFACE) &&
-            ctx->user_ip6[0] >> 16 == bpf_htons(0xB00C)) {
+       if (ctx->user_ip6[0] == bpf_htonl(0xFACEB00C)) {
                ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
                ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1);
                ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2);
index 0575751bc1bc4020535a238bda9d4be9a2503eb8..e2f6ed0a583de8ed793b0095eade70827b33507d 100644 (file)
@@ -61,7 +61,7 @@ struct sr6_tlv_t {
        unsigned char value[0];
 } BPF_PACKET_HEADER;
 
-__attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
+static __always_inline struct ip6_srh_t *get_srh(struct __sk_buff *skb)
 {
        void *cursor, *data_end;
        struct ip6_srh_t *srh;
@@ -95,7 +95,7 @@ __attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
        return srh;
 }
 
-__attribute__((always_inline))
+static __always_inline
 int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
                   uint32_t old_pad, uint32_t pad_off)
 {
@@ -125,7 +125,7 @@ int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
        return 0;
 }
 
-__attribute__((always_inline))
+static __always_inline
 int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
                          uint32_t *tlv_off, uint32_t *pad_size,
                          uint32_t *pad_off)
@@ -184,7 +184,7 @@ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
        return 0;
 }
 
-__attribute__((always_inline))
+static __always_inline
 int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
            struct sr6_tlv_t *itlv, uint8_t tlv_size)
 {
@@ -228,7 +228,7 @@ int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
        return update_tlv_pad(skb, new_pad, pad_size, pad_off);
 }
 
-__attribute__((always_inline))
+static __always_inline
 int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
               uint32_t tlv_off)
 {
@@ -266,7 +266,7 @@ int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
        return update_tlv_pad(skb, new_pad, pad_size, pad_off);
 }
 
-__attribute__((always_inline))
+static __always_inline
 int has_egr_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh)
 {
        int tlv_offset = sizeof(struct ip6_t) + sizeof(struct ip6_srh_t) +
index b8ebe2f580741a5cb4aae39e604affc313b31ff4..e9567122070a33fed692fe45a1c3acbce155e705 100644 (file)
@@ -13,6 +13,7 @@
 #include <bpf/bpf.h>
 
 #include "cgroup_helpers.h"
+#include "bpf_endian.h"
 #include "bpf_rlimit.h"
 #include "bpf_util.h"
 
@@ -231,7 +232,8 @@ static struct sock_test tests[] = {
                        /* if (ip == expected && port == expected) */
                        BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
                                    offsetof(struct bpf_sock, src_ip6[3])),
-                       BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+                                   __bpf_constant_ntohl(0x00000001), 4),
                        BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
                                    offsetof(struct bpf_sock, src_port)),
                        BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
@@ -260,7 +262,8 @@ static struct sock_test tests[] = {
                        /* if (ip == expected && port == expected) */
                        BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
                                    offsetof(struct bpf_sock, src_ip4)),
-                       BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+                                   __bpf_constant_ntohl(0x7F000001), 4),
                        BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
                                    offsetof(struct bpf_sock, src_port)),
                        BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
index 14c9fe2848062f0c2a8c37004f087d07b1d976f6..075cb0c730149907414f0aa7b1f2957d7e8361e9 100644 (file)
@@ -181,8 +181,7 @@ int cg_find_unified_root(char *root, size_t len)
                strtok(NULL, delim);
                strtok(NULL, delim);
 
-               if (strcmp(fs, "cgroup") == 0 &&
-                   strcmp(type, "cgroup2") == 0) {
+               if (strcmp(type, "cgroup2") == 0) {
                        strncpy(root, mount, len);
                        return 0;
                }
diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
new file mode 100644 (file)
index 0000000..63ed533
--- /dev/null
@@ -0,0 +1,3 @@
+CONFIG_KVM=y
+CONFIG_KVM_INTEL=y
+CONFIG_KVM_AMD=y
index a3122f1949a8e67df2d6cb206ecf353d30f6d015..4d35eba73dc97d45898a1a597a2460f72790ed7c 100644 (file)
@@ -809,9 +809,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
         TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
                 r);
 
-       r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
-        TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
-                r);
+       if (kvm_check_cap(KVM_CAP_XCRS)) {
+               r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
+               TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
+                           r);
+       }
 
        r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
         TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
@@ -858,9 +860,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
                 r);
 
-       r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
-        TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
-                r);
+       if (kvm_check_cap(KVM_CAP_XCRS)) {
+               r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
+               TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
+                           r);
+       }
 
        r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
index 3764e71212650b9f5f4f390f72c0b792d6035185..65db510dddc34b6f86be6c764123e03619f0eaba 100644 (file)
@@ -100,8 +100,8 @@ int main(int argc, char *argv[])
        msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
        vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
                msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
-       test_msr_platform_info_disabled(vm);
        test_msr_platform_info_enabled(vm);
+       test_msr_platform_info_disabled(vm);
        vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
 
        kvm_vm_free(vm);
index 1ba069967fa2b653f905167b5d9fb9282e263f66..ba2d9fab28d0ff225c4fb543355cfda449c2a5f1 100755 (executable)
@@ -15,6 +15,7 @@ GW_IP6=2001:db8:1::2
 SRC_IP6=2001:db8:1::3
 
 DEV_ADDR=192.51.100.1
+DEV_ADDR6=2001:db8:1::1
 DEV=dummy0
 
 log_test()
@@ -55,8 +56,8 @@ setup()
 
        $IP link add dummy0 type dummy
        $IP link set dev dummy0 up
-       $IP address add 192.51.100.1/24 dev dummy0
-       $IP -6 address add 2001:db8:1::1/64 dev dummy0
+       $IP address add $DEV_ADDR/24 dev dummy0
+       $IP -6 address add $DEV_ADDR6/64 dev dummy0
 
        set +e
 }
index cca2baa03fb81b12f393a1df566dbe0494cabe27..a8d8e8b3dc819f3abc90060eaa0d312632c2145f 100755 (executable)
@@ -93,18 +93,10 @@ sw1_create()
        ip route add vrf v$ol1 192.0.2.16/28 \
           nexthop dev g1a \
           nexthop dev g1b
-
-       tc qdisc add dev $ul1 clsact
-       tc filter add dev $ul1 egress pref 111 prot ipv4 \
-          flower dst_ip 192.0.2.66 action pass
-       tc filter add dev $ul1 egress pref 222 prot ipv4 \
-          flower dst_ip 192.0.2.82 action pass
 }
 
 sw1_destroy()
 {
-       tc qdisc del dev $ul1 clsact
-
        ip route del vrf v$ol1 192.0.2.16/28
 
        ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
@@ -139,10 +131,18 @@ sw2_create()
        ip route add vrf v$ol2 192.0.2.0/28 \
           nexthop dev g2a \
           nexthop dev g2b
+
+       tc qdisc add dev $ul2 clsact
+       tc filter add dev $ul2 ingress pref 111 prot 802.1Q \
+          flower vlan_id 111 action pass
+       tc filter add dev $ul2 ingress pref 222 prot 802.1Q \
+          flower vlan_id 222 action pass
 }
 
 sw2_destroy()
 {
+       tc qdisc del dev $ul2 clsact
+
        ip route del vrf v$ol2 192.0.2.0/28
 
        ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
@@ -187,12 +187,16 @@ setup_prepare()
        sw1_create
        sw2_create
        h2_create
+
+       forwarding_enable
 }
 
 cleanup()
 {
        pre_cleanup
 
+       forwarding_restore
+
        h2_destroy
        sw2_destroy
        sw1_destroy
@@ -211,15 +215,15 @@ multipath4_test()
           nexthop dev g1a weight $weight1 \
           nexthop dev g1b weight $weight2
 
-       local t0_111=$(tc_rule_stats_get $ul1 111 egress)
-       local t0_222=$(tc_rule_stats_get $ul1 222 egress)
+       local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
+       local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
 
        ip vrf exec v$h1 \
           $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
               -d 1msec -t udp "sp=1024,dp=0-32768"
 
-       local t1_111=$(tc_rule_stats_get $ul1 111 egress)
-       local t1_222=$(tc_rule_stats_get $ul1 222 egress)
+       local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
+       local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
 
        local d111=$((t1_111 - t0_111))
        local d222=$((t1_222 - t0_222))
index e279051bc6312c132999e00a13b37c2555e6b2cb..270c17ab071e46506f9a705ecdac4c4994c62484 100644 (file)
@@ -90,12 +90,9 @@ struct testcase testcases_v4[] = {
                .tfail = true,
        },
        {
-               /* send a single MSS: will fail with GSO, because the segment
-                * logic in udp4_ufo_fragment demands a gso skb to be > MTU
-                */
+               /* send a single MSS: will fall back to no GSO */
                .tlen = CONST_MSS_V4,
                .gso_len = CONST_MSS_V4,
-               .tfail = true,
                .r_num_mss = 1,
        },
        {
@@ -140,10 +137,9 @@ struct testcase testcases_v4[] = {
                .tfail = true,
        },
        {
-               /* send a single 1B MSS: will fail, see single MSS above */
+               /* send a single 1B MSS: will fall back to no GSO */
                .tlen = 1,
                .gso_len = 1,
-               .tfail = true,
                .r_num_mss = 1,
        },
        {
@@ -197,12 +193,9 @@ struct testcase testcases_v6[] = {
                .tfail = true,
        },
        {
-               /* send a single MSS: will fail with GSO, because the segment
-                * logic in udp4_ufo_fragment demands a gso skb to be > MTU
-                */
+               /* send a single MSS: will fall back to no GSO */
                .tlen = CONST_MSS_V6,
                .gso_len = CONST_MSS_V6,
-               .tfail = true,
                .r_num_mss = 1,
        },
        {
@@ -247,10 +240,9 @@ struct testcase testcases_v6[] = {
                .tfail = true,
        },
        {
-               /* send a single 1B MSS: will fail, see single MSS above */
+               /* send a single 1B MSS: will fall back to no GSO */
                .tlen = 1,
                .gso_len = 1,
-               .tfail = true,
                .r_num_mss = 1,
        },
        {
index 748f6a60bb1e00070dbc61a895ba19e4370b476f..138c18cefb5294ed5c99136bc2297378874266a4 100644 (file)
@@ -11,6 +11,9 @@ datafile_y = initramfs_data.cpio$(suffix_y)
 datafile_d_y = .$(datafile_y).d
 AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
 
+# clean rules do not have CONFIG_INITRAMFS_COMPRESSION.  So clean up after all
+# possible compression formats.
+clean-files += initramfs_data.cpio*
 
 # Generate builtin.o based on initramfs_data.o
 obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o
index 02bac8abd206fd5f6ad357306d82fb721efc17d9..d982650deb33fe7a25fcdab08835503cb5154cb4 100644 (file)
@@ -338,6 +338,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
 {
        kvm_timer_schedule(vcpu);
+       /*
+        * If we're about to block (most likely because we've just hit a
+        * WFI), we need to sync back the state of the GIC CPU interface
+        * so that we have the lastest PMR and group enables. This ensures
+        * that kvm_arch_vcpu_runnable has up-to-date data to decide
+        * whether we have pending interrupts.
+        */
+       preempt_disable();
+       kvm_vgic_vmcr_sync(vcpu);
+       preempt_enable();
+
        kvm_vgic_v4_enable_doorbell(vcpu);
 }
 
index 08443a15e6be8f2e987bf02038dc43863b3c7275..3caee91bca08907ad7aa18a822e1cb40525ae279 100644 (file)
@@ -98,6 +98,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
        unsigned int len;
        int mask;
 
+       /* Detect an already handled MMIO return */
+       if (unlikely(!vcpu->mmio_needed))
+               return 0;
+
+       vcpu->mmio_needed = 0;
+
        if (!run->mmio.is_write) {
                len = run->mmio.len;
                if (len > sizeof(unsigned long))
@@ -200,6 +206,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
        run->mmio.is_write      = is_write;
        run->mmio.phys_addr     = fault_ipa;
        run->mmio.len           = len;
+       vcpu->mmio_needed       = 1;
 
        if (!ret) {
                /* We handled the access successfully in the kernel. */
index 8196e4f8731fbfbd8d8da5b28a15f9145f504a4c..cd75df25fe14060f6578405ae82b1556bea07765 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/cpu.h>
 #include <linux/kvm_host.h>
 #include <kvm/arm_vgic.h>
+#include <asm/kvm_emulate.h>
 #include <asm/kvm_mmu.h>
 #include "vgic.h"
 
@@ -175,12 +176,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
                irq->vcpu = NULL;
                irq->target_vcpu = vcpu0;
                kref_init(&irq->refcount);
-               if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+               switch (dist->vgic_model) {
+               case KVM_DEV_TYPE_ARM_VGIC_V2:
                        irq->targets = 0;
                        irq->group = 0;
-               } else {
+                       break;
+               case KVM_DEV_TYPE_ARM_VGIC_V3:
                        irq->mpidr = 0;
                        irq->group = 1;
+                       break;
+               default:
+                       kfree(dist->spis);
+                       return -EINVAL;
                }
        }
        return 0;
@@ -220,7 +227,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
                irq->intid = i;
                irq->vcpu = NULL;
                irq->target_vcpu = vcpu;
-               irq->targets = 1U << vcpu->vcpu_id;
                kref_init(&irq->refcount);
                if (vgic_irq_is_sgi(i)) {
                        /* SGIs */
@@ -230,11 +236,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
                        /* PPIs */
                        irq->config = VGIC_CONFIG_LEVEL;
                }
-
-               if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
-                       irq->group = 1;
-               else
-                       irq->group = 0;
        }
 
        if (!irqchip_in_kernel(vcpu->kvm))
@@ -297,10 +298,19 @@ int vgic_init(struct kvm *kvm)
 
                for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
                        struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
-                       if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+                       switch (dist->vgic_model) {
+                       case KVM_DEV_TYPE_ARM_VGIC_V3:
                                irq->group = 1;
-                       else
+                               irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+                               break;
+                       case KVM_DEV_TYPE_ARM_VGIC_V2:
                                irq->group = 0;
+                               irq->targets = 1U << idx;
+                               break;
+                       default:
+                               ret = -EINVAL;
+                               goto out;
+                       }
                }
        }
 
index ceeda7e04a4d9aa57932adeb1140e9d9348e2a0d..762f81900529eef4051d42517aa117875927d565 100644 (file)
@@ -203,6 +203,12 @@ static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
        vgic_irq_set_phys_active(irq, true);
 }
 
+static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
+{
+       return (vgic_irq_is_sgi(irq->intid) &&
+               vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
+}
+
 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
                              gpa_t addr, unsigned int len,
                              unsigned long val)
@@ -215,6 +221,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
+               /* GICD_ISPENDR0 SGI bits are WI */
+               if (is_vgic_v2_sgi(vcpu, irq)) {
+                       vgic_put_irq(vcpu->kvm, irq);
+                       continue;
+               }
+
                spin_lock_irqsave(&irq->irq_lock, flags);
                if (irq->hw)
                        vgic_hw_irq_spending(vcpu, irq, is_uaccess);
@@ -262,6 +274,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
+               /* GICD_ICPENDR0 SGI bits are WI */
+               if (is_vgic_v2_sgi(vcpu, irq)) {
+                       vgic_put_irq(vcpu->kvm, irq);
+                       continue;
+               }
+
                spin_lock_irqsave(&irq->irq_lock, flags);
 
                if (irq->hw)
index 69b892abd7dc6faec17e820fe6f4c0f95800ecb3..91b14dfacd1dd72fe80b12b3c88deaa0434af5c7 100644 (file)
@@ -195,7 +195,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
                if (vgic_irq_is_sgi(irq->intid)) {
                        u32 src = ffs(irq->source);
 
-                       BUG_ON(!src);
+                       if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
+                                          irq->intid))
+                               return;
+
                        val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
                        irq->source &= ~(1 << (src - 1));
                        if (irq->source) {
@@ -495,10 +498,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
                       kvm_vgic_global_state.vctrl_base + GICH_APR);
 }
 
-void vgic_v2_put(struct kvm_vcpu *vcpu)
+void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
 {
        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
 
        cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
+}
+
+void vgic_v2_put(struct kvm_vcpu *vcpu)
+{
+       struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+
+       vgic_v2_vmcr_sync(vcpu);
        cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
 }
index 3f2350a4d4ab8375ad3d768a557c3aaa067785d3..8b958ed05306efbac2dd40f6baef26c793d317e0 100644 (file)
@@ -179,7 +179,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
                    model == KVM_DEV_TYPE_ARM_VGIC_V2) {
                        u32 src = ffs(irq->source);
 
-                       BUG_ON(!src);
+                       if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
+                                          irq->intid))
+                               return;
+
                        val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
                        irq->source &= ~(1 << (src - 1));
                        if (irq->source) {
@@ -674,12 +677,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
                __vgic_v3_activate_traps(vcpu);
 }
 
-void vgic_v3_put(struct kvm_vcpu *vcpu)
+void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
 {
        struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
 
        if (likely(cpu_if->vgic_sre))
                cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
+}
+
+void vgic_v3_put(struct kvm_vcpu *vcpu)
+{
+       vgic_v3_vmcr_sync(vcpu);
 
        kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
 
index c5165e3b80cbea92fe813f2416d3a8b1d84ba39f..4040a33cdc9028482dc83864896d66db3d63ff69 100644 (file)
@@ -244,6 +244,13 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
        bool penda, pendb;
        int ret;
 
+       /*
+        * list_sort may call this function with the same element when
+        * the list is fairly long.
+        */
+       if (unlikely(irqa == irqb))
+               return 0;
+
        spin_lock(&irqa->irq_lock);
        spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
 
@@ -902,6 +909,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
                vgic_v3_put(vcpu);
 }
 
+void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
+{
+       if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
+               return;
+
+       if (kvm_vgic_global_state.type == VGIC_V2)
+               vgic_v2_vmcr_sync(vcpu);
+       else
+               vgic_v3_vmcr_sync(vcpu);
+}
+
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
index a90024718ca44b941e8b4d7aa0f6bf20233c4596..d5e4542799252d35496c7373a209903ff448c59d 100644 (file)
@@ -204,6 +204,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
 void vgic_v2_init_lrs(void);
 void vgic_v2_load(struct kvm_vcpu *vcpu);
 void vgic_v2_put(struct kvm_vcpu *vcpu);
+void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
 
 void vgic_v2_save_state(struct kvm_vcpu *vcpu);
 void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
@@ -234,6 +235,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
 
 void vgic_v3_load(struct kvm_vcpu *vcpu);
 void vgic_v3_put(struct kvm_vcpu *vcpu);
+void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
 
 bool vgic_has_its(struct kvm *kvm);
 int kvm_vgic_register_its_device(void);
index 9e65feb6fa58d75988e969954aaa5d0c9c99fed0..b9336693c87e6f07b05b77027c28dab9b3708bec 100644 (file)
@@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
        return 1;
 }
 
-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
+static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
 {
        struct kvm_coalesced_mmio_ring *ring;
        unsigned avail;
@@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
         * there is always one unused entry in the buffer
         */
        ring = dev->kvm->coalesced_mmio_ring;
-       avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
+       avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
        if (avail == 0) {
                /* full */
                return 0;
@@ -67,24 +67,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
 {
        struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
        struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
+       __u32 insert;
 
        if (!coalesced_mmio_in_range(dev, addr, len))
                return -EOPNOTSUPP;
 
        spin_lock(&dev->kvm->ring_lock);
 
-       if (!coalesced_mmio_has_room(dev)) {
+       insert = READ_ONCE(ring->last);
+       if (!coalesced_mmio_has_room(dev, insert) ||
+           insert >= KVM_COALESCED_MMIO_MAX) {
                spin_unlock(&dev->kvm->ring_lock);
                return -EOPNOTSUPP;
        }
 
        /* copy data in first free entry of the ring */
 
-       ring->coalesced_mmio[ring->last].phys_addr = addr;
-       ring->coalesced_mmio[ring->last].len = len;
-       memcpy(ring->coalesced_mmio[ring->last].data, val, len);
+       ring->coalesced_mmio[insert].phys_addr = addr;
+       ring->coalesced_mmio[insert].len = len;
+       memcpy(ring->coalesced_mmio[insert].data, val, len);
        smp_wmb();
-       ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
+       ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
        spin_unlock(&dev->kvm->ring_lock);
        return 0;
 }
index b20b751286fc612214c59c95e787c9fb0fac50b7..757a17f5ebdebd06043fddccae055b9e2090d5b5 100644 (file)
 
 static struct workqueue_struct *irqfd_cleanup_wq;
 
+bool __attribute__((weak))
+kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
+{
+       return true;
+}
+
 static void
 irqfd_inject(struct work_struct *work)
 {
@@ -297,6 +303,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
        if (!kvm_arch_intc_initialized(kvm))
                return -EAGAIN;
 
+       if (!kvm_arch_irqfd_allowed(kvm, args))
+               return -EINVAL;
+
        irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
        if (!irqfd)
                return -ENOMEM;
index 2b36a51afb5764f485ca7918c67675b58ad6883f..4a584a57522161ff6e389f8a55e21b600a50694f 100644 (file)
@@ -2317,6 +2317,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
 #endif
 }
 
+/*
+ * Unlike kvm_arch_vcpu_runnable, this function is called outside
+ * a vcpu_load/vcpu_put pair.  However, for most architectures
+ * kvm_arch_vcpu_runnable does not require vcpu_load.
+ */
+bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+       return kvm_arch_vcpu_runnable(vcpu);
+}
+
+static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
+{
+       if (kvm_arch_dy_runnable(vcpu))
+               return true;
+
+#ifdef CONFIG_KVM_ASYNC_PF
+       if (!list_empty_careful(&vcpu->async_pf.done))
+               return true;
+#endif
+
+       return false;
+}
+
 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
 {
        struct kvm *kvm = me->kvm;
@@ -2346,7 +2369,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
                                continue;
                        if (vcpu == me)
                                continue;
-                       if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
+                       if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
                                continue;
                        if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
                                continue;