aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLCPD Auto Merger2017-05-22 13:05:30 -0500
committerLCPD Auto Merger2017-05-22 13:05:30 -0500
commit7c9c2d615893e76c47ded45ed48c3564162a2d96 (patch)
tree7a2f211e0afb5a83e8544c544baea7ca5589c9bf
parent1c0a81bb88b2adba0f3ef709ac7f416053d2a5a7 (diff)
parent33c4c2a6d1e50c546c11643804051da08248dd7d (diff)
downloadkernel-audio-7c9c2d615893e76c47ded45ed48c3564162a2d96.tar.gz
kernel-audio-7c9c2d615893e76c47ded45ed48c3564162a2d96.tar.xz
kernel-audio-7c9c2d615893e76c47ded45ed48c3564162a2d96.zip
Merge branch 'android-4.9' of https://android.googlesource.com/kernel/common into ti-android-linux-4.9.y
TI-Feature: google_android TI-Tree: https://android.googlesource.com/kernel/common TI-Branch: android-4.9 * 'android-4.9' of https://android.googlesource.com/kernel/common: (84 commits) Linux 4.9.29 pstore: Shut down worker when unregistering pstore: Fix flags to enable dumps on powerpc libnvdimm, pfn: fix 'npfns' vs section alignment libnvdimm, pmem: fix a NULL pointer BUG in nd_pmem_notify libnvdimm, region: fix flush hint detection crash ipmi: Fix kernel panic at ipmi_ssif_thread() Bluetooth: hci_intel: add missing tty-device sanity check Bluetooth: hci_bcm: add missing tty-device sanity check Bluetooth: Fix user channel for 32bit userspace on 64bit kernel tty: pty: Fix ldisc flush after userspace become aware of the data already serial: omap: suspend device on probe errors serial: omap: fix runtime-pm handling on unbind serial: samsung: Use right device for DMA-mapping calls fscrypt: fix context consistency check when key(s) unavailable device-dax: fix cdev leak padata: free correct variable CIFS: add misssing SFM mapping for doublequote cifs: fix CIFS_IOC_GET_MNT_INFO oops CIFS: fix oplock break deadlocks ... Signed-off-by: LCPD Auto Merger <lcpd_integration@list.ti.com>
-rw-r--r--Makefile2
-rw-r--r--arch/arm/kvm/psci.c8
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h6
-rw-r--r--arch/arm64/kvm/sys_regs.c8
-rw-r--r--arch/powerpc/kernel/nvram_64.c1
-rw-r--r--arch/x86/boot/boot.h2
-rw-r--r--arch/x86/events/intel/rapl.c2
-rw-r--r--arch/x86/include/asm/pmem.h2
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/x86/um/ptrace_64.c2
-rw-r--r--arch/x86/xen/mmu.c7
-rw-r--r--block/blk-integrity.c3
-rw-r--r--crypto/algif_aead.c157
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/bluetooth/hci_bcm.c5
-rw-r--r--drivers/bluetooth/hci_intel.c13
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c120
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c114
-rw-r--r--drivers/crypto/ccp/ccp-dev.h8
-rw-r--r--drivers/crypto/ccp/ccp-pci.c2
-rw-r--r--drivers/dax/dax.c15
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/core/verbs.c8
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c26
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c44
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c3
-rw-r--r--drivers/md/dm-era-target.c8
-rw-r--r--drivers/md/dm-rq.c6
-rw-r--r--drivers/md/dm-thin.c1
-rw-r--r--drivers/nvdimm/pfn_devs.c6
-rw-r--r--drivers/nvdimm/pmem.c37
-rw-r--r--drivers/nvdimm/region_devs.c11
-rw-r--r--drivers/staging/android/lowmemorykiller.c1
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c13
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c3
-rw-r--r--drivers/staging/vt6656/usbpipe.c31
-rw-r--r--drivers/target/iscsi/iscsi_target.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c1
-rw-r--r--drivers/target/target_core_file.c3
-rw-r--r--drivers/target/target_core_sbc.c5
-rw-r--r--drivers/tty/pty.c7
-rw-r--r--drivers/tty/serial/omap-serial.c9
-rw-r--r--drivers/tty/serial/samsung.c9
-rw-r--r--drivers/usb/class/cdc-wdm.c103
-rw-r--r--drivers/usb/core/driver.c21
-rw-r--r--drivers/usb/core/file.c9
-rw-r--r--drivers/usb/core/hub.c11
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/function/f_mtp.c18
-rw-r--r--drivers/usb/host/xhci-mem.c13
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/misc/usbtest.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/vfio/vfio_iommu_type1.c102
-rw-r--r--fs/block_dev.c11
-rw-r--r--fs/ceph/xattr.c3
-rw-r--r--fs/cifs/cifs_unicode.c6
-rw-r--r--fs/cifs/cifs_unicode.h5
-rw-r--r--fs/cifs/cifsfs.c15
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/cifssmb.c3
-rw-r--r--fs/cifs/connect.c14
-rw-r--r--fs/cifs/ioctl.c4
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/smb2misc.c5
-rw-r--r--fs/cifs/smb2ops.c1
-rw-r--r--fs/cifs/smb2pdu.c14
-rw-r--r--fs/crypto/policy.c87
-rw-r--r--fs/ext4/inode.c5
-rw-r--r--fs/orangefs/inode.c3
-rw-r--r--fs/orangefs/namei.c2
-rw-r--r--fs/orangefs/xattr.c26
-rw-r--r--fs/pstore/platform.c10
-rw-r--r--fs/sdcardfs/dentry.c15
-rw-r--r--fs/sdcardfs/derived_perm.c130
-rw-r--r--fs/sdcardfs/inode.c54
-rw-r--r--fs/sdcardfs/lookup.c5
-rw-r--r--fs/sdcardfs/main.c8
-rw-r--r--fs/sdcardfs/packagelist.c2
-rw-r--r--fs/sdcardfs/sdcardfs.h106
-rw-r--r--fs/sdcardfs/super.c49
-rw-r--r--fs/xattr.c2
-rw-r--r--kernel/padata.c2
-rw-r--r--mm/page_alloc.c3
-rw-r--r--net/bluetooth/hci_sock.c3
-rw-r--r--net/core/datagram.c13
-rw-r--r--tools/perf/util/auxtrace.c4
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c46
97 files changed, 1113 insertions, 583 deletions
diff --git a/Makefile b/Makefile
index 9460a63087b8..c12e501a18b8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 9 2PATCHLEVEL = 9
3SUBLEVEL = 28 3SUBLEVEL = 29
4EXTRAVERSION = 4EXTRAVERSION =
5NAME = Roaring Lionus 5NAME = Roaring Lionus
6 6
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index c2b131527a64..a08d7a93aebb 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -208,9 +208,10 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
208 208
209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) 209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
210{ 210{
211 int ret = 1; 211 struct kvm *kvm = vcpu->kvm;
212 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); 212 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
213 unsigned long val; 213 unsigned long val;
214 int ret = 1;
214 215
215 switch (psci_fn) { 216 switch (psci_fn) {
216 case PSCI_0_2_FN_PSCI_VERSION: 217 case PSCI_0_2_FN_PSCI_VERSION:
@@ -230,7 +231,9 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
230 break; 231 break;
231 case PSCI_0_2_FN_CPU_ON: 232 case PSCI_0_2_FN_CPU_ON:
232 case PSCI_0_2_FN64_CPU_ON: 233 case PSCI_0_2_FN64_CPU_ON:
234 mutex_lock(&kvm->lock);
233 val = kvm_psci_vcpu_on(vcpu); 235 val = kvm_psci_vcpu_on(vcpu);
236 mutex_unlock(&kvm->lock);
234 break; 237 break;
235 case PSCI_0_2_FN_AFFINITY_INFO: 238 case PSCI_0_2_FN_AFFINITY_INFO:
236 case PSCI_0_2_FN64_AFFINITY_INFO: 239 case PSCI_0_2_FN64_AFFINITY_INFO:
@@ -279,6 +282,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
279 282
280static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) 283static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
281{ 284{
285 struct kvm *kvm = vcpu->kvm;
282 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); 286 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
283 unsigned long val; 287 unsigned long val;
284 288
@@ -288,7 +292,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
288 val = PSCI_RET_SUCCESS; 292 val = PSCI_RET_SUCCESS;
289 break; 293 break;
290 case KVM_PSCI_FN_CPU_ON: 294 case KVM_PSCI_FN_CPU_ON:
295 mutex_lock(&kvm->lock);
291 val = kvm_psci_vcpu_on(vcpu); 296 val = kvm_psci_vcpu_on(vcpu);
297 mutex_unlock(&kvm->lock);
292 break; 298 break;
293 default: 299 default:
294 val = PSCI_RET_NOT_SUPPORTED; 300 val = PSCI_RET_NOT_SUPPORTED;
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index f5ea0ba70f07..fe39e6841326 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -240,6 +240,12 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
240 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; 240 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
241} 241}
242 242
243static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
244{
245 u32 esr = kvm_vcpu_get_hsr(vcpu);
246 return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
247}
248
243static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) 249static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
244{ 250{
245 return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; 251 return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 87e7e6608cd8..7cee552ce0bf 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1573,8 +1573,8 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1573{ 1573{
1574 struct sys_reg_params params; 1574 struct sys_reg_params params;
1575 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1575 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1576 int Rt = (hsr >> 5) & 0xf; 1576 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1577 int Rt2 = (hsr >> 10) & 0xf; 1577 int Rt2 = (hsr >> 10) & 0x1f;
1578 1578
1579 params.is_aarch32 = true; 1579 params.is_aarch32 = true;
1580 params.is_32bit = false; 1580 params.is_32bit = false;
@@ -1625,7 +1625,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1625{ 1625{
1626 struct sys_reg_params params; 1626 struct sys_reg_params params;
1627 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1627 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1628 int Rt = (hsr >> 5) & 0xf; 1628 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1629 1629
1630 params.is_aarch32 = true; 1630 params.is_aarch32 = true;
1631 params.is_32bit = true; 1631 params.is_32bit = true;
@@ -1740,7 +1740,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1740{ 1740{
1741 struct sys_reg_params params; 1741 struct sys_reg_params params;
1742 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 1742 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1743 int Rt = (esr >> 5) & 0x1f; 1743 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1744 int ret; 1744 int ret;
1745 1745
1746 trace_kvm_handle_sys_reg(esr); 1746 trace_kvm_handle_sys_reg(esr);
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 34d2c595de23..73622673eee3 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -561,6 +561,7 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
561static struct pstore_info nvram_pstore_info = { 561static struct pstore_info nvram_pstore_info = {
562 .owner = THIS_MODULE, 562 .owner = THIS_MODULE,
563 .name = "nvram", 563 .name = "nvram",
564 .flags = PSTORE_FLAGS_DMESG,
564 .open = nvram_pstore_open, 565 .open = nvram_pstore_open,
565 .read = nvram_pstore_read, 566 .read = nvram_pstore_read,
566 .write = nvram_pstore_write, 567 .write = nvram_pstore_write,
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index e5612f3e3b57..d7ac721a8a96 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -16,7 +16,7 @@
16#ifndef BOOT_BOOT_H 16#ifndef BOOT_BOOT_H
17#define BOOT_BOOT_H 17#define BOOT_BOOT_H
18 18
19#define STACK_SIZE 512 /* Minimum number of bytes for stack */ 19#define STACK_SIZE 1024 /* Minimum number of bytes for stack */
20 20
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22 22
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 0a535cea8ff3..8b902b67342a 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -759,7 +759,7 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
759 759
760 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init), 760 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
761 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init), 761 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
762 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsw_rapl_init), 762 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
763 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init), 763 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
764 764
765 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init), 765 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index 529bb4a6487a..e2904373010d 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -103,7 +103,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
103 103
104 if (bytes < 8) { 104 if (bytes < 8) {
105 if (!IS_ALIGNED(dest, 4) || (bytes != 4)) 105 if (!IS_ALIGNED(dest, 4) || (bytes != 4))
106 arch_wb_cache_pmem(addr, 1); 106 arch_wb_cache_pmem(addr, bytes);
107 } else { 107 } else {
108 if (!IS_ALIGNED(dest, 8)) { 108 if (!IS_ALIGNED(dest, 8)) {
109 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); 109 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e5bc139d1ba7..43c152853969 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3051,6 +3051,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
3051 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) 3051 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
3052 return -EINVAL; 3052 return -EINVAL;
3053 3053
3054 /* INITs are latched while in SMM */
3055 if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
3056 (events->smi.smm || events->smi.pending) &&
3057 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
3058 return -EINVAL;
3059
3054 process_nmi(vcpu); 3060 process_nmi(vcpu);
3055 vcpu->arch.exception.pending = events->exception.injected; 3061 vcpu->arch.exception.pending = events->exception.injected;
3056 vcpu->arch.exception.nr = events->exception.nr; 3062 vcpu->arch.exception.nr = events->exception.nr;
@@ -7162,6 +7168,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
7162 mp_state->mp_state != KVM_MP_STATE_RUNNABLE) 7168 mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
7163 return -EINVAL; 7169 return -EINVAL;
7164 7170
7171 /* INITs are latched while in SMM */
7172 if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
7173 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
7174 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
7175 return -EINVAL;
7176
7165 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 7177 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
7166 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 7178 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
7167 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); 7179 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index e30202b1716e..7c1601798169 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -125,7 +125,7 @@ int poke_user(struct task_struct *child, long addr, long data)
125 else if ((addr >= offsetof(struct user, u_debugreg[0])) && 125 else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
126 (addr <= offsetof(struct user, u_debugreg[7]))) { 126 (addr <= offsetof(struct user, u_debugreg[7]))) {
127 addr -= offsetof(struct user, u_debugreg[0]); 127 addr -= offsetof(struct user, u_debugreg[0]);
128 addr = addr >> 2; 128 addr = addr >> 3;
129 if ((addr == 4) || (addr == 5)) 129 if ((addr == 4) || (addr == 5))
130 return -EIO; 130 return -EIO;
131 child->thread.arch.debugregs[addr] = data; 131 child->thread.arch.debugregs[addr] = data;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 7d5afdb417cc..418f1b8576cf 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2028,7 +2028,8 @@ static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
2028 2028
2029/* 2029/*
2030 * Translate a virtual address to a physical one without relying on mapped 2030 * Translate a virtual address to a physical one without relying on mapped
2031 * page tables. 2031 * page tables. Don't rely on big pages being aligned in (guest) physical
2032 * space!
2032 */ 2033 */
2033static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) 2034static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2034{ 2035{
@@ -2049,7 +2050,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2049 sizeof(pud))); 2050 sizeof(pud)));
2050 if (!pud_present(pud)) 2051 if (!pud_present(pud))
2051 return 0; 2052 return 0;
2052 pa = pud_pfn(pud) << PAGE_SHIFT; 2053 pa = pud_val(pud) & PTE_PFN_MASK;
2053 if (pud_large(pud)) 2054 if (pud_large(pud))
2054 return pa + (vaddr & ~PUD_MASK); 2055 return pa + (vaddr & ~PUD_MASK);
2055 2056
@@ -2057,7 +2058,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2057 sizeof(pmd))); 2058 sizeof(pmd)));
2058 if (!pmd_present(pmd)) 2059 if (!pmd_present(pmd))
2059 return 0; 2060 return 0;
2060 pa = pmd_pfn(pmd) << PAGE_SHIFT; 2061 pa = pmd_val(pmd) & PTE_PFN_MASK;
2061 if (pmd_large(pmd)) 2062 if (pmd_large(pmd))
2062 return pa + (vaddr & ~PMD_MASK); 2063 return pa + (vaddr & ~PMD_MASK);
2063 2064
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 319f2e4f4a8b..478f572cb1e7 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -412,7 +412,8 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
412 412
413 bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE | 413 bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
414 template->flags; 414 template->flags;
415 bi->interval_exp = ilog2(queue_logical_block_size(disk->queue)); 415 bi->interval_exp = template->interval_exp ? :
416 ilog2(queue_logical_block_size(disk->queue));
416 bi->profile = template->profile ? template->profile : &nop_profile; 417 bi->profile = template->profile ? template->profile : &nop_profile;
417 bi->tuple_size = template->tuple_size; 418 bi->tuple_size = template->tuple_size;
418 bi->tag_size = template->tag_size; 419 bi->tag_size = template->tag_size;
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index fde8d885f7b6..6c11537ca404 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -44,6 +44,11 @@ struct aead_async_req {
44 char iv[]; 44 char iv[];
45}; 45};
46 46
47struct aead_tfm {
48 struct crypto_aead *aead;
49 bool has_key;
50};
51
47struct aead_ctx { 52struct aead_ctx {
48 struct aead_sg_list tsgl; 53 struct aead_sg_list tsgl;
49 struct aead_async_rsgl first_rsgl; 54 struct aead_async_rsgl first_rsgl;
@@ -732,24 +737,146 @@ static struct proto_ops algif_aead_ops = {
732 .poll = aead_poll, 737 .poll = aead_poll,
733}; 738};
734 739
740static int aead_check_key(struct socket *sock)
741{
742 int err = 0;
743 struct sock *psk;
744 struct alg_sock *pask;
745 struct aead_tfm *tfm;
746 struct sock *sk = sock->sk;
747 struct alg_sock *ask = alg_sk(sk);
748
749 lock_sock(sk);
750 if (ask->refcnt)
751 goto unlock_child;
752
753 psk = ask->parent;
754 pask = alg_sk(ask->parent);
755 tfm = pask->private;
756
757 err = -ENOKEY;
758 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
759 if (!tfm->has_key)
760 goto unlock;
761
762 if (!pask->refcnt++)
763 sock_hold(psk);
764
765 ask->refcnt = 1;
766 sock_put(psk);
767
768 err = 0;
769
770unlock:
771 release_sock(psk);
772unlock_child:
773 release_sock(sk);
774
775 return err;
776}
777
778static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
779 size_t size)
780{
781 int err;
782
783 err = aead_check_key(sock);
784 if (err)
785 return err;
786
787 return aead_sendmsg(sock, msg, size);
788}
789
790static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
791 int offset, size_t size, int flags)
792{
793 int err;
794
795 err = aead_check_key(sock);
796 if (err)
797 return err;
798
799 return aead_sendpage(sock, page, offset, size, flags);
800}
801
802static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
803 size_t ignored, int flags)
804{
805 int err;
806
807 err = aead_check_key(sock);
808 if (err)
809 return err;
810
811 return aead_recvmsg(sock, msg, ignored, flags);
812}
813
814static struct proto_ops algif_aead_ops_nokey = {
815 .family = PF_ALG,
816
817 .connect = sock_no_connect,
818 .socketpair = sock_no_socketpair,
819 .getname = sock_no_getname,
820 .ioctl = sock_no_ioctl,
821 .listen = sock_no_listen,
822 .shutdown = sock_no_shutdown,
823 .getsockopt = sock_no_getsockopt,
824 .mmap = sock_no_mmap,
825 .bind = sock_no_bind,
826 .accept = sock_no_accept,
827 .setsockopt = sock_no_setsockopt,
828
829 .release = af_alg_release,
830 .sendmsg = aead_sendmsg_nokey,
831 .sendpage = aead_sendpage_nokey,
832 .recvmsg = aead_recvmsg_nokey,
833 .poll = aead_poll,
834};
835
735static void *aead_bind(const char *name, u32 type, u32 mask) 836static void *aead_bind(const char *name, u32 type, u32 mask)
736{ 837{
737 return crypto_alloc_aead(name, type, mask); 838 struct aead_tfm *tfm;
839 struct crypto_aead *aead;
840
841 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
842 if (!tfm)
843 return ERR_PTR(-ENOMEM);
844
845 aead = crypto_alloc_aead(name, type, mask);
846 if (IS_ERR(aead)) {
847 kfree(tfm);
848 return ERR_CAST(aead);
849 }
850
851 tfm->aead = aead;
852
853 return tfm;
738} 854}
739 855
740static void aead_release(void *private) 856static void aead_release(void *private)
741{ 857{
742 crypto_free_aead(private); 858 struct aead_tfm *tfm = private;
859
860 crypto_free_aead(tfm->aead);
861 kfree(tfm);
743} 862}
744 863
745static int aead_setauthsize(void *private, unsigned int authsize) 864static int aead_setauthsize(void *private, unsigned int authsize)
746{ 865{
747 return crypto_aead_setauthsize(private, authsize); 866 struct aead_tfm *tfm = private;
867
868 return crypto_aead_setauthsize(tfm->aead, authsize);
748} 869}
749 870
750static int aead_setkey(void *private, const u8 *key, unsigned int keylen) 871static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
751{ 872{
752 return crypto_aead_setkey(private, key, keylen); 873 struct aead_tfm *tfm = private;
874 int err;
875
876 err = crypto_aead_setkey(tfm->aead, key, keylen);
877 tfm->has_key = !err;
878
879 return err;
753} 880}
754 881
755static void aead_sock_destruct(struct sock *sk) 882static void aead_sock_destruct(struct sock *sk)
@@ -766,12 +893,14 @@ static void aead_sock_destruct(struct sock *sk)
766 af_alg_release_parent(sk); 893 af_alg_release_parent(sk);
767} 894}
768 895
769static int aead_accept_parent(void *private, struct sock *sk) 896static int aead_accept_parent_nokey(void *private, struct sock *sk)
770{ 897{
771 struct aead_ctx *ctx; 898 struct aead_ctx *ctx;
772 struct alg_sock *ask = alg_sk(sk); 899 struct alg_sock *ask = alg_sk(sk);
773 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private); 900 struct aead_tfm *tfm = private;
774 unsigned int ivlen = crypto_aead_ivsize(private); 901 struct crypto_aead *aead = tfm->aead;
902 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
903 unsigned int ivlen = crypto_aead_ivsize(aead);
775 904
776 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 905 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
777 if (!ctx) 906 if (!ctx)
@@ -798,7 +927,7 @@ static int aead_accept_parent(void *private, struct sock *sk)
798 927
799 ask->private = ctx; 928 ask->private = ctx;
800 929
801 aead_request_set_tfm(&ctx->aead_req, private); 930 aead_request_set_tfm(&ctx->aead_req, aead);
802 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 931 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
803 af_alg_complete, &ctx->completion); 932 af_alg_complete, &ctx->completion);
804 933
@@ -807,13 +936,25 @@ static int aead_accept_parent(void *private, struct sock *sk)
807 return 0; 936 return 0;
808} 937}
809 938
939static int aead_accept_parent(void *private, struct sock *sk)
940{
941 struct aead_tfm *tfm = private;
942
943 if (!tfm->has_key)
944 return -ENOKEY;
945
946 return aead_accept_parent_nokey(private, sk);
947}
948
810static const struct af_alg_type algif_type_aead = { 949static const struct af_alg_type algif_type_aead = {
811 .bind = aead_bind, 950 .bind = aead_bind,
812 .release = aead_release, 951 .release = aead_release,
813 .setkey = aead_setkey, 952 .setkey = aead_setkey,
814 .setauthsize = aead_setauthsize, 953 .setauthsize = aead_setauthsize,
815 .accept = aead_accept_parent, 954 .accept = aead_accept_parent,
955 .accept_nokey = aead_accept_parent_nokey,
816 .ops = &algif_aead_ops, 956 .ops = &algif_aead_ops,
957 .ops_nokey = &algif_aead_ops_nokey,
817 .name = "aead", 958 .name = "aead",
818 .owner = THIS_MODULE 959 .owner = THIS_MODULE
819}; 960};
diff --git a/drivers/Makefile b/drivers/Makefile
index d1edd6c28926..bd4e4e2d391b 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_USB_PHY) += usb/
106obj-$(CONFIG_USB) += usb/ 106obj-$(CONFIG_USB) += usb/
107obj-$(CONFIG_PCI) += usb/ 107obj-$(CONFIG_PCI) += usb/
108obj-$(CONFIG_USB_GADGET) += usb/ 108obj-$(CONFIG_USB_GADGET) += usb/
109obj-$(CONFIG_OF) += usb/
109obj-$(CONFIG_SERIO) += input/serio/ 110obj-$(CONFIG_SERIO) += input/serio/
110obj-$(CONFIG_GAMEPORT) += input/gameport/ 111obj-$(CONFIG_GAMEPORT) += input/gameport/
111obj-$(CONFIG_INPUT) += input/ 112obj-$(CONFIG_INPUT) += input/
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 8f6c23c20c52..deed58013555 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -287,6 +287,9 @@ static int bcm_open(struct hci_uart *hu)
287 287
288 hu->priv = bcm; 288 hu->priv = bcm;
289 289
290 if (!hu->tty->dev)
291 goto out;
292
290 mutex_lock(&bcm_device_lock); 293 mutex_lock(&bcm_device_lock);
291 list_for_each(p, &bcm_device_list) { 294 list_for_each(p, &bcm_device_list) {
292 struct bcm_device *dev = list_entry(p, struct bcm_device, list); 295 struct bcm_device *dev = list_entry(p, struct bcm_device, list);
@@ -307,7 +310,7 @@ static int bcm_open(struct hci_uart *hu)
307 } 310 }
308 311
309 mutex_unlock(&bcm_device_lock); 312 mutex_unlock(&bcm_device_lock);
310 313out:
311 return 0; 314 return 0;
312} 315}
313 316
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 9e271286c5e5..73306384af6c 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -307,6 +307,9 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
307 struct list_head *p; 307 struct list_head *p;
308 int err = -ENODEV; 308 int err = -ENODEV;
309 309
310 if (!hu->tty->dev)
311 return err;
312
310 mutex_lock(&intel_device_list_lock); 313 mutex_lock(&intel_device_list_lock);
311 314
312 list_for_each(p, &intel_device_list) { 315 list_for_each(p, &intel_device_list) {
@@ -379,6 +382,9 @@ static void intel_busy_work(struct work_struct *work)
379 struct intel_data *intel = container_of(work, struct intel_data, 382 struct intel_data *intel = container_of(work, struct intel_data,
380 busy_work); 383 busy_work);
381 384
385 if (!intel->hu->tty->dev)
386 return;
387
382 /* Link is busy, delay the suspend */ 388 /* Link is busy, delay the suspend */
383 mutex_lock(&intel_device_list_lock); 389 mutex_lock(&intel_device_list_lock);
384 list_for_each(p, &intel_device_list) { 390 list_for_each(p, &intel_device_list) {
@@ -889,6 +895,8 @@ done:
889 list_for_each(p, &intel_device_list) { 895 list_for_each(p, &intel_device_list) {
890 struct intel_device *dev = list_entry(p, struct intel_device, 896 struct intel_device *dev = list_entry(p, struct intel_device,
891 list); 897 list);
898 if (!hu->tty->dev)
899 break;
892 if (hu->tty->dev->parent == dev->pdev->dev.parent) { 900 if (hu->tty->dev->parent == dev->pdev->dev.parent) {
893 if (device_may_wakeup(&dev->pdev->dev)) { 901 if (device_may_wakeup(&dev->pdev->dev)) {
894 set_bit(STATE_LPM_ENABLED, &intel->flags); 902 set_bit(STATE_LPM_ENABLED, &intel->flags);
@@ -1056,6 +1064,9 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
1056 1064
1057 BT_DBG("hu %p skb %p", hu, skb); 1065 BT_DBG("hu %p skb %p", hu, skb);
1058 1066
1067 if (!hu->tty->dev)
1068 goto out_enqueue;
1069
1059 /* Be sure our controller is resumed and potential LPM transaction 1070 /* Be sure our controller is resumed and potential LPM transaction
1060 * completed before enqueuing any packet. 1071 * completed before enqueuing any packet.
1061 */ 1072 */
@@ -1072,7 +1083,7 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
1072 } 1083 }
1073 } 1084 }
1074 mutex_unlock(&intel_device_list_lock); 1085 mutex_unlock(&intel_device_list_lock);
1075 1086out_enqueue:
1076 skb_queue_tail(&intel->txq, skb); 1087 skb_queue_tail(&intel->txq, skb);
1077 1088
1078 return 0; 1089 return 0;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 5673ffff00be..6958b5ce9145 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -892,6 +892,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
892 * for details on the intricacies of this. 892 * for details on the intricacies of this.
893 */ 893 */
894 int left; 894 int left;
895 unsigned char *data_to_send;
895 896
896 ssif_inc_stat(ssif_info, sent_messages_parts); 897 ssif_inc_stat(ssif_info, sent_messages_parts);
897 898
@@ -900,6 +901,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
900 left = 32; 901 left = 32;
901 /* Length byte. */ 902 /* Length byte. */
902 ssif_info->multi_data[ssif_info->multi_pos] = left; 903 ssif_info->multi_data[ssif_info->multi_pos] = left;
904 data_to_send = ssif_info->multi_data + ssif_info->multi_pos;
903 ssif_info->multi_pos += left; 905 ssif_info->multi_pos += left;
904 if (left < 32) 906 if (left < 32)
905 /* 907 /*
@@ -913,7 +915,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
913 rv = ssif_i2c_send(ssif_info, msg_written_handler, 915 rv = ssif_i2c_send(ssif_info, msg_written_handler,
914 I2C_SMBUS_WRITE, 916 I2C_SMBUS_WRITE,
915 SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE, 917 SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
916 ssif_info->multi_data + ssif_info->multi_pos, 918 data_to_send,
917 I2C_SMBUS_BLOCK_DATA); 919 I2C_SMBUS_BLOCK_DATA);
918 if (rv < 0) { 920 if (rv < 0) {
919 /* request failed, just return the error. */ 921 /* request failed, just return the error. */
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 8d2dbacc6161..e68966bbfa58 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op *op)
315 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); 315 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
316} 316}
317 317
318static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
319{
320 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
321}
322
323static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
324{
325 iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
326}
327
328static void ccp_irq_bh(unsigned long data)
329{
330 struct ccp_device *ccp = (struct ccp_device *)data;
331 struct ccp_cmd_queue *cmd_q;
332 u32 q_int, status;
333 unsigned int i;
334
335 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
336
337 for (i = 0; i < ccp->cmd_q_count; i++) {
338 cmd_q = &ccp->cmd_q[i];
339
340 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
341 if (q_int) {
342 cmd_q->int_status = status;
343 cmd_q->q_status = ioread32(cmd_q->reg_status);
344 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
345
346 /* On error, only save the first error value */
347 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
348 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
349
350 cmd_q->int_rcvd = 1;
351
352 /* Acknowledge the interrupt and wake the kthread */
353 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
354 wake_up_interruptible(&cmd_q->int_queue);
355 }
356 }
357 ccp_enable_queue_interrupts(ccp);
358}
359
360static irqreturn_t ccp_irq_handler(int irq, void *data)
361{
362 struct device *dev = data;
363 struct ccp_device *ccp = dev_get_drvdata(dev);
364
365 ccp_disable_queue_interrupts(ccp);
366 if (ccp->use_tasklet)
367 tasklet_schedule(&ccp->irq_tasklet);
368 else
369 ccp_irq_bh((unsigned long)ccp);
370
371 return IRQ_HANDLED;
372}
373
318static int ccp_init(struct ccp_device *ccp) 374static int ccp_init(struct ccp_device *ccp)
319{ 375{
320 struct device *dev = ccp->dev; 376 struct device *dev = ccp->dev;
321 struct ccp_cmd_queue *cmd_q; 377 struct ccp_cmd_queue *cmd_q;
322 struct dma_pool *dma_pool; 378 struct dma_pool *dma_pool;
323 char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; 379 char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
324 unsigned int qmr, qim, i; 380 unsigned int qmr, i;
325 int ret; 381 int ret;
326 382
327 /* Find available queues */ 383 /* Find available queues */
328 qim = 0; 384 ccp->qim = 0;
329 qmr = ioread32(ccp->io_regs + Q_MASK_REG); 385 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
330 for (i = 0; i < MAX_HW_QUEUES; i++) { 386 for (i = 0; i < MAX_HW_QUEUES; i++) {
331 if (!(qmr & (1 << i))) 387 if (!(qmr & (1 << i)))
@@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *ccp)
370 init_waitqueue_head(&cmd_q->int_queue); 426 init_waitqueue_head(&cmd_q->int_queue);
371 427
372 /* Build queue interrupt mask (two interrupts per queue) */ 428 /* Build queue interrupt mask (two interrupts per queue) */
373 qim |= cmd_q->int_ok | cmd_q->int_err; 429 ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
374 430
375#ifdef CONFIG_ARM64 431#ifdef CONFIG_ARM64
376 /* For arm64 set the recommended queue cache settings */ 432 /* For arm64 set the recommended queue cache settings */
@@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *ccp)
388 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); 444 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
389 445
390 /* Disable and clear interrupts until ready */ 446 /* Disable and clear interrupts until ready */
391 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); 447 ccp_disable_queue_interrupts(ccp);
392 for (i = 0; i < ccp->cmd_q_count; i++) { 448 for (i = 0; i < ccp->cmd_q_count; i++) {
393 cmd_q = &ccp->cmd_q[i]; 449 cmd_q = &ccp->cmd_q[i];
394 450
395 ioread32(cmd_q->reg_int_status); 451 ioread32(cmd_q->reg_int_status);
396 ioread32(cmd_q->reg_status); 452 ioread32(cmd_q->reg_status);
397 } 453 }
398 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); 454 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
399 455
400 /* Request an irq */ 456 /* Request an irq */
401 ret = ccp->get_irq(ccp); 457 ret = ccp->get_irq(ccp);
@@ -408,6 +464,11 @@ static int ccp_init(struct ccp_device *ccp)
408 init_waitqueue_head(&ccp->sb_queue); 464 init_waitqueue_head(&ccp->sb_queue);
409 init_waitqueue_head(&ccp->suspend_queue); 465 init_waitqueue_head(&ccp->suspend_queue);
410 466
467 /* Initialize the ISR tasklet? */
468 if (ccp->use_tasklet)
469 tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
470 (unsigned long)ccp);
471
411 dev_dbg(dev, "Starting threads...\n"); 472 dev_dbg(dev, "Starting threads...\n");
412 /* Create a kthread for each queue */ 473 /* Create a kthread for each queue */
413 for (i = 0; i < ccp->cmd_q_count; i++) { 474 for (i = 0; i < ccp->cmd_q_count; i++) {
@@ -430,7 +491,7 @@ static int ccp_init(struct ccp_device *ccp)
430 491
431 dev_dbg(dev, "Enabling interrupts...\n"); 492 dev_dbg(dev, "Enabling interrupts...\n");
432 /* Enable interrupts */ 493 /* Enable interrupts */
433 iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); 494 ccp_enable_queue_interrupts(ccp);
434 495
435 dev_dbg(dev, "Registering device...\n"); 496 dev_dbg(dev, "Registering device...\n");
436 ccp_add_device(ccp); 497 ccp_add_device(ccp);
@@ -467,7 +528,7 @@ static void ccp_destroy(struct ccp_device *ccp)
467{ 528{
468 struct ccp_cmd_queue *cmd_q; 529 struct ccp_cmd_queue *cmd_q;
469 struct ccp_cmd *cmd; 530 struct ccp_cmd *cmd;
470 unsigned int qim, i; 531 unsigned int i;
471 532
472 /* Unregister the DMA engine */ 533 /* Unregister the DMA engine */
473 ccp_dmaengine_unregister(ccp); 534 ccp_dmaengine_unregister(ccp);
@@ -478,22 +539,15 @@ static void ccp_destroy(struct ccp_device *ccp)
478 /* Remove this device from the list of available units */ 539 /* Remove this device from the list of available units */
479 ccp_del_device(ccp); 540 ccp_del_device(ccp);
480 541
481 /* Build queue interrupt mask (two interrupt masks per queue) */
482 qim = 0;
483 for (i = 0; i < ccp->cmd_q_count; i++) {
484 cmd_q = &ccp->cmd_q[i];
485 qim |= cmd_q->int_ok | cmd_q->int_err;
486 }
487
488 /* Disable and clear interrupts */ 542 /* Disable and clear interrupts */
489 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); 543 ccp_disable_queue_interrupts(ccp);
490 for (i = 0; i < ccp->cmd_q_count; i++) { 544 for (i = 0; i < ccp->cmd_q_count; i++) {
491 cmd_q = &ccp->cmd_q[i]; 545 cmd_q = &ccp->cmd_q[i];
492 546
493 ioread32(cmd_q->reg_int_status); 547 ioread32(cmd_q->reg_int_status);
494 ioread32(cmd_q->reg_status); 548 ioread32(cmd_q->reg_status);
495 } 549 }
496 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); 550 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
497 551
498 /* Stop the queue kthreads */ 552 /* Stop the queue kthreads */
499 for (i = 0; i < ccp->cmd_q_count; i++) 553 for (i = 0; i < ccp->cmd_q_count; i++)
@@ -520,40 +574,6 @@ static void ccp_destroy(struct ccp_device *ccp)
520 } 574 }
521} 575}
522 576
523static irqreturn_t ccp_irq_handler(int irq, void *data)
524{
525 struct device *dev = data;
526 struct ccp_device *ccp = dev_get_drvdata(dev);
527 struct ccp_cmd_queue *cmd_q;
528 u32 q_int, status;
529 unsigned int i;
530
531 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
532
533 for (i = 0; i < ccp->cmd_q_count; i++) {
534 cmd_q = &ccp->cmd_q[i];
535
536 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
537 if (q_int) {
538 cmd_q->int_status = status;
539 cmd_q->q_status = ioread32(cmd_q->reg_status);
540 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
541
542 /* On error, only save the first error value */
543 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
544 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
545
546 cmd_q->int_rcvd = 1;
547
548 /* Acknowledge the interrupt and wake the kthread */
549 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
550 wake_up_interruptible(&cmd_q->int_queue);
551 }
552 }
553
554 return IRQ_HANDLED;
555}
556
557static const struct ccp_actions ccp3_actions = { 577static const struct ccp_actions ccp3_actions = {
558 .aes = ccp_perform_aes, 578 .aes = ccp_perform_aes,
559 .xts_aes = ccp_perform_xts_aes, 579 .xts_aes = ccp_perform_xts_aes,
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index a388bf2d67f4..2c0ce5f605b3 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -644,6 +644,65 @@ static int ccp_assign_lsbs(struct ccp_device *ccp)
644 return rc; 644 return rc;
645} 645}
646 646
647static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
648{
649 unsigned int i;
650
651 for (i = 0; i < ccp->cmd_q_count; i++)
652 iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
653}
654
655static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
656{
657 unsigned int i;
658
659 for (i = 0; i < ccp->cmd_q_count; i++)
660 iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
661}
662
663static void ccp5_irq_bh(unsigned long data)
664{
665 struct ccp_device *ccp = (struct ccp_device *)data;
666 u32 status;
667 unsigned int i;
668
669 for (i = 0; i < ccp->cmd_q_count; i++) {
670 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
671
672 status = ioread32(cmd_q->reg_interrupt_status);
673
674 if (status) {
675 cmd_q->int_status = status;
676 cmd_q->q_status = ioread32(cmd_q->reg_status);
677 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
678
679 /* On error, only save the first error value */
680 if ((status & INT_ERROR) && !cmd_q->cmd_error)
681 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
682
683 cmd_q->int_rcvd = 1;
684
685 /* Acknowledge the interrupt and wake the kthread */
686 iowrite32(status, cmd_q->reg_interrupt_status);
687 wake_up_interruptible(&cmd_q->int_queue);
688 }
689 }
690 ccp5_enable_queue_interrupts(ccp);
691}
692
693static irqreturn_t ccp5_irq_handler(int irq, void *data)
694{
695 struct device *dev = data;
696 struct ccp_device *ccp = dev_get_drvdata(dev);
697
698 ccp5_disable_queue_interrupts(ccp);
699 if (ccp->use_tasklet)
700 tasklet_schedule(&ccp->irq_tasklet);
701 else
702 ccp5_irq_bh((unsigned long)ccp);
703 return IRQ_HANDLED;
704}
705
647static int ccp5_init(struct ccp_device *ccp) 706static int ccp5_init(struct ccp_device *ccp)
648{ 707{
649 struct device *dev = ccp->dev; 708 struct device *dev = ccp->dev;
@@ -728,19 +787,18 @@ static int ccp5_init(struct ccp_device *ccp)
728 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); 787 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
729 788
730 /* Turn off the queues and disable interrupts until ready */ 789 /* Turn off the queues and disable interrupts until ready */
790 ccp5_disable_queue_interrupts(ccp);
731 for (i = 0; i < ccp->cmd_q_count; i++) { 791 for (i = 0; i < ccp->cmd_q_count; i++) {
732 cmd_q = &ccp->cmd_q[i]; 792 cmd_q = &ccp->cmd_q[i];
733 793
734 cmd_q->qcontrol = 0; /* Start with nothing */ 794 cmd_q->qcontrol = 0; /* Start with nothing */
735 iowrite32(cmd_q->qcontrol, cmd_q->reg_control); 795 iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
736 796
737 /* Disable the interrupts */
738 iowrite32(0x00, cmd_q->reg_int_enable);
739 ioread32(cmd_q->reg_int_status); 797 ioread32(cmd_q->reg_int_status);
740 ioread32(cmd_q->reg_status); 798 ioread32(cmd_q->reg_status);
741 799
742 /* Clear the interrupts */ 800 /* Clear the interrupt status */
743 iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); 801 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
744 } 802 }
745 803
746 dev_dbg(dev, "Requesting an IRQ...\n"); 804 dev_dbg(dev, "Requesting an IRQ...\n");
@@ -750,6 +808,10 @@ static int ccp5_init(struct ccp_device *ccp)
750 dev_err(dev, "unable to allocate an IRQ\n"); 808 dev_err(dev, "unable to allocate an IRQ\n");
751 goto e_pool; 809 goto e_pool;
752 } 810 }
811 /* Initialize the ISR tasklet */
812 if (ccp->use_tasklet)
813 tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
814 (unsigned long)ccp);
753 815
754 /* Initialize the queue used to suspend */ 816 /* Initialize the queue used to suspend */
755 init_waitqueue_head(&ccp->suspend_queue); 817 init_waitqueue_head(&ccp->suspend_queue);
@@ -821,11 +883,7 @@ static int ccp5_init(struct ccp_device *ccp)
821 } 883 }
822 884
823 dev_dbg(dev, "Enabling interrupts...\n"); 885 dev_dbg(dev, "Enabling interrupts...\n");
824 /* Enable interrupts */ 886 ccp5_enable_queue_interrupts(ccp);
825 for (i = 0; i < ccp->cmd_q_count; i++) {
826 cmd_q = &ccp->cmd_q[i];
827 iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable);
828 }
829 887
830 dev_dbg(dev, "Registering device...\n"); 888 dev_dbg(dev, "Registering device...\n");
831 /* Put this on the unit list to make it available */ 889 /* Put this on the unit list to make it available */
@@ -877,17 +935,15 @@ static void ccp5_destroy(struct ccp_device *ccp)
877 ccp_del_device(ccp); 935 ccp_del_device(ccp);
878 936
879 /* Disable and clear interrupts */ 937 /* Disable and clear interrupts */
938 ccp5_disable_queue_interrupts(ccp);
880 for (i = 0; i < ccp->cmd_q_count; i++) { 939 for (i = 0; i < ccp->cmd_q_count; i++) {
881 cmd_q = &ccp->cmd_q[i]; 940 cmd_q = &ccp->cmd_q[i];
882 941
883 /* Turn off the run bit */ 942 /* Turn off the run bit */
884 iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); 943 iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
885 944
886 /* Disable the interrupts */
887 iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
888
889 /* Clear the interrupt status */ 945 /* Clear the interrupt status */
890 iowrite32(0x00, cmd_q->reg_int_enable); 946 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
891 ioread32(cmd_q->reg_int_status); 947 ioread32(cmd_q->reg_int_status);
892 ioread32(cmd_q->reg_status); 948 ioread32(cmd_q->reg_status);
893 } 949 }
@@ -920,38 +976,6 @@ static void ccp5_destroy(struct ccp_device *ccp)
920 } 976 }
921} 977}
922 978
923static irqreturn_t ccp5_irq_handler(int irq, void *data)
924{
925 struct device *dev = data;
926 struct ccp_device *ccp = dev_get_drvdata(dev);
927 u32 status;
928 unsigned int i;
929
930 for (i = 0; i < ccp->cmd_q_count; i++) {
931 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
932
933 status = ioread32(cmd_q->reg_interrupt_status);
934
935 if (status) {
936 cmd_q->int_status = status;
937 cmd_q->q_status = ioread32(cmd_q->reg_status);
938 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
939
940 /* On error, only save the first error value */
941 if ((status & INT_ERROR) && !cmd_q->cmd_error)
942 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
943
944 cmd_q->int_rcvd = 1;
945
946 /* Acknowledge the interrupt and wake the kthread */
947 iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
948 wake_up_interruptible(&cmd_q->int_queue);
949 }
950 }
951
952 return IRQ_HANDLED;
953}
954
955static void ccp5_config(struct ccp_device *ccp) 979static void ccp5_config(struct ccp_device *ccp)
956{ 980{
957 /* Public side */ 981 /* Public side */
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 340aef14d616..8ac7ae17e1f4 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -109,9 +109,8 @@
109#define INT_COMPLETION 0x1 109#define INT_COMPLETION 0x1
110#define INT_ERROR 0x2 110#define INT_ERROR 0x2
111#define INT_QUEUE_STOPPED 0x4 111#define INT_QUEUE_STOPPED 0x4
112#define ALL_INTERRUPTS (INT_COMPLETION| \ 112#define INT_EMPTY_QUEUE 0x8
113 INT_ERROR| \ 113#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR)
114 INT_QUEUE_STOPPED)
115 114
116#define LSB_REGION_WIDTH 5 115#define LSB_REGION_WIDTH 5
117#define MAX_LSB_CNT 8 116#define MAX_LSB_CNT 8
@@ -333,7 +332,10 @@ struct ccp_device {
333 void *dev_specific; 332 void *dev_specific;
334 int (*get_irq)(struct ccp_device *ccp); 333 int (*get_irq)(struct ccp_device *ccp);
335 void (*free_irq)(struct ccp_device *ccp); 334 void (*free_irq)(struct ccp_device *ccp);
335 unsigned int qim;
336 unsigned int irq; 336 unsigned int irq;
337 bool use_tasklet;
338 struct tasklet_struct irq_tasklet;
337 339
338 /* I/O area used for device communication. The register mapping 340 /* I/O area used for device communication. The register mapping
339 * starts at an offset into the mapped bar. 341 * starts at an offset into the mapped bar.
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 28a9996c1085..e880d4cf4ada 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
69 goto e_irq; 69 goto e_irq;
70 } 70 }
71 } 71 }
72 ccp->use_tasklet = true;
72 73
73 return 0; 74 return 0;
74 75
@@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
100 dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); 101 dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
101 goto e_msi; 102 goto e_msi;
102 } 103 }
104 ccp->use_tasklet = true;
103 105
104 return 0; 106 return 0;
105 107
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 193224889e41..586f9543de73 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -553,13 +553,10 @@ static void dax_dev_release(struct device *dev)
553 kfree(dax_dev); 553 kfree(dax_dev);
554} 554}
555 555
556static void unregister_dax_dev(void *dev) 556static void kill_dax_dev(struct dax_dev *dax_dev)
557{ 557{
558 struct dax_dev *dax_dev = to_dax_dev(dev);
559 struct cdev *cdev = &dax_dev->cdev; 558 struct cdev *cdev = &dax_dev->cdev;
560 559
561 dev_dbg(dev, "%s\n", __func__);
562
563 /* 560 /*
564 * Note, rcu is not protecting the liveness of dax_dev, rcu is 561 * Note, rcu is not protecting the liveness of dax_dev, rcu is
565 * ensuring that any fault handlers that might have seen 562 * ensuring that any fault handlers that might have seen
@@ -571,6 +568,15 @@ static void unregister_dax_dev(void *dev)
571 synchronize_srcu(&dax_srcu); 568 synchronize_srcu(&dax_srcu);
572 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1); 569 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
573 cdev_del(cdev); 570 cdev_del(cdev);
571}
572
573static void unregister_dax_dev(void *dev)
574{
575 struct dax_dev *dax_dev = to_dax_dev(dev);
576
577 dev_dbg(dev, "%s\n", __func__);
578
579 kill_dax_dev(dax_dev);
574 device_unregister(dev); 580 device_unregister(dev);
575} 581}
576 582
@@ -647,6 +653,7 @@ struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
647 dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id); 653 dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
648 rc = device_add(dev); 654 rc = device_add(dev);
649 if (rc) { 655 if (rc) {
656 kill_dax_dev(dax_dev);
650 put_device(dev); 657 put_device(dev);
651 return ERR_PTR(rc); 658 return ERR_PTR(rc);
652 } 659 }
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index c1fb545e8d78..42de5f22da93 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1301,7 +1301,7 @@ err_put:
1301 free_port_list_attributes(device); 1301 free_port_list_attributes(device);
1302 1302
1303err_unregister: 1303err_unregister:
1304 device_unregister(class_dev); 1304 device_del(class_dev);
1305 1305
1306err: 1306err:
1307 return ret; 1307 return ret;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 83687646da68..0e64b52af5b2 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1516,7 +1516,9 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1516 1516
1517 if (!qp->device->attach_mcast) 1517 if (!qp->device->attach_mcast)
1518 return -ENOSYS; 1518 return -ENOSYS;
1519 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 1519 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
1520 lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1521 lid == be16_to_cpu(IB_LID_PERMISSIVE))
1520 return -EINVAL; 1522 return -EINVAL;
1521 1523
1522 ret = qp->device->attach_mcast(qp, gid, lid); 1524 ret = qp->device->attach_mcast(qp, gid, lid);
@@ -1532,7 +1534,9 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1532 1534
1533 if (!qp->device->detach_mcast) 1535 if (!qp->device->detach_mcast)
1534 return -ENOSYS; 1536 return -ENOSYS;
1535 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 1537 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
1538 lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1539 lid == be16_to_cpu(IB_LID_PERMISSIVE))
1536 return -EINVAL; 1540 return -EINVAL;
1537 1541
1538 ret = qp->device->detach_mcast(qp, gid, lid); 1542 ret = qp->device->detach_mcast(qp, gid, lid);
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index a1576aea4756..9f768b48321f 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2015, 2016 Intel Corporation. 2 * Copyright(c) 2015 - 2017 Intel Corporation.
3 * 3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license. 5 * redistributing this file, you may do so under either license.
@@ -833,23 +833,29 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
833/* when sending, force a reschedule every one of these periods */ 833/* when sending, force a reschedule every one of these periods */
834#define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */ 834#define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
835 835
836void hfi1_do_send_from_rvt(struct rvt_qp *qp)
837{
838 hfi1_do_send(qp, false);
839}
840
836void _hfi1_do_send(struct work_struct *work) 841void _hfi1_do_send(struct work_struct *work)
837{ 842{
838 struct iowait *wait = container_of(work, struct iowait, iowork); 843 struct iowait *wait = container_of(work, struct iowait, iowork);
839 struct rvt_qp *qp = iowait_to_qp(wait); 844 struct rvt_qp *qp = iowait_to_qp(wait);
840 845
841 hfi1_do_send(qp); 846 hfi1_do_send(qp, true);
842} 847}
843 848
844/** 849/**
845 * hfi1_do_send - perform a send on a QP 850 * hfi1_do_send - perform a send on a QP
846 * @work: contains a pointer to the QP 851 * @work: contains a pointer to the QP
852 * @in_thread: true if in a workqueue thread
847 * 853 *
848 * Process entries in the send work queue until credit or queue is 854 * Process entries in the send work queue until credit or queue is
849 * exhausted. Only allow one CPU to send a packet per QP. 855 * exhausted. Only allow one CPU to send a packet per QP.
850 * Otherwise, two threads could send packets out of order. 856 * Otherwise, two threads could send packets out of order.
851 */ 857 */
852void hfi1_do_send(struct rvt_qp *qp) 858void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
853{ 859{
854 struct hfi1_pkt_state ps; 860 struct hfi1_pkt_state ps;
855 struct hfi1_qp_priv *priv = qp->priv; 861 struct hfi1_qp_priv *priv = qp->priv;
@@ -917,8 +923,10 @@ void hfi1_do_send(struct rvt_qp *qp)
917 qp->s_hdrwords = 0; 923 qp->s_hdrwords = 0;
918 /* allow other tasks to run */ 924 /* allow other tasks to run */
919 if (unlikely(time_after(jiffies, timeout))) { 925 if (unlikely(time_after(jiffies, timeout))) {
920 if (workqueue_congested(cpu, 926 if (!in_thread ||
921 ps.ppd->hfi1_wq)) { 927 workqueue_congested(
928 cpu,
929 ps.ppd->hfi1_wq)) {
922 spin_lock_irqsave( 930 spin_lock_irqsave(
923 &qp->s_lock, 931 &qp->s_lock,
924 ps.flags); 932 ps.flags);
@@ -931,11 +939,9 @@ void hfi1_do_send(struct rvt_qp *qp)
931 *ps.ppd->dd->send_schedule); 939 *ps.ppd->dd->send_schedule);
932 return; 940 return;
933 } 941 }
934 if (!irqs_disabled()) { 942 cond_resched();
935 cond_resched(); 943 this_cpu_inc(
936 this_cpu_inc( 944 *ps.ppd->dd->send_schedule);
937 *ps.ppd->dd->send_schedule);
938 }
939 timeout = jiffies + (timeout_int) / 8; 945 timeout = jiffies + (timeout_int) / 8;
940 } 946 }
941 spin_lock_irqsave(&qp->s_lock, ps.flags); 947 spin_lock_irqsave(&qp->s_lock, ps.flags);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 4b7a16ceb362..01a380efea6b 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2015, 2016 Intel Corporation. 2 * Copyright(c) 2015 - 2017 Intel Corporation.
3 * 3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license. 5 * redistributing this file, you may do so under either license.
@@ -1697,7 +1697,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
1697 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free; 1697 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
1698 dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps; 1698 dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
1699 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; 1699 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
1700 dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send; 1700 dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
1701 dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send; 1701 dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
1702 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send; 1702 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
1703 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr; 1703 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 1c3815d89eb7..bac84f820a54 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2015, 2016 Intel Corporation. 2 * Copyright(c) 2015 - 2017 Intel Corporation.
3 * 3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license. 5 * redistributing this file, you may do so under either license.
@@ -372,7 +372,9 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
372 372
373void _hfi1_do_send(struct work_struct *work); 373void _hfi1_do_send(struct work_struct *work);
374 374
375void hfi1_do_send(struct rvt_qp *qp); 375void hfi1_do_send_from_rvt(struct rvt_qp *qp);
376
377void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
376 378
377void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, 379void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
378 enum ib_wc_status status); 380 enum ib_wc_status status);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 46ad99595fd2..f2a885eee4bb 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2926,6 +2926,7 @@ err_counter:
2926 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); 2926 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2927 2927
2928err_map: 2928err_map:
2929 mlx4_ib_free_eqs(dev, ibdev);
2929 iounmap(ibdev->uar_map); 2930 iounmap(ibdev->uar_map);
2930 2931
2931err_uar: 2932err_uar:
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index a21d37f02f35..e6ea81c9644a 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1102,7 +1102,8 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
1102 while ((p = rb_first(&ctx->mcg_table)) != NULL) { 1102 while ((p = rb_first(&ctx->mcg_table)) != NULL) {
1103 group = rb_entry(p, struct mcast_group, node); 1103 group = rb_entry(p, struct mcast_group, node);
1104 if (atomic_read(&group->refcount)) 1104 if (atomic_read(&group->refcount))
1105 mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group); 1105 mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n",
1106 atomic_read(&group->refcount), group);
1106 1107
1107 force_clean_group(group); 1108 force_clean_group(group);
1108 } 1109 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 6bd5740e2691..09396bd7b02d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -281,8 +281,11 @@ void ipoib_delete_debug_files(struct net_device *dev)
281{ 281{
282 struct ipoib_dev_priv *priv = netdev_priv(dev); 282 struct ipoib_dev_priv *priv = netdev_priv(dev);
283 283
284 WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
285 WARN_ONCE(!priv->path_dentry, "null path debug file\n");
284 debugfs_remove(priv->mcg_dentry); 286 debugfs_remove(priv->mcg_dentry);
285 debugfs_remove(priv->path_dentry); 287 debugfs_remove(priv->path_dentry);
288 priv->mcg_dentry = priv->path_dentry = NULL;
286} 289}
287 290
288int ipoib_register_debugfs(void) 291int ipoib_register_debugfs(void)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3ef7b8f049c4..08c4b0287304 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,6 +108,33 @@ static struct ib_client ipoib_client = {
108 .get_net_dev_by_params = ipoib_get_net_dev_by_params, 108 .get_net_dev_by_params = ipoib_get_net_dev_by_params,
109}; 109};
110 110
111#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
112static int ipoib_netdev_event(struct notifier_block *this,
113 unsigned long event, void *ptr)
114{
115 struct netdev_notifier_info *ni = ptr;
116 struct net_device *dev = ni->dev;
117
118 if (dev->netdev_ops->ndo_open != ipoib_open)
119 return NOTIFY_DONE;
120
121 switch (event) {
122 case NETDEV_REGISTER:
123 ipoib_create_debug_files(dev);
124 break;
125 case NETDEV_CHANGENAME:
126 ipoib_delete_debug_files(dev);
127 ipoib_create_debug_files(dev);
128 break;
129 case NETDEV_UNREGISTER:
130 ipoib_delete_debug_files(dev);
131 break;
132 }
133
134 return NOTIFY_DONE;
135}
136#endif
137
111int ipoib_open(struct net_device *dev) 138int ipoib_open(struct net_device *dev)
112{ 139{
113 struct ipoib_dev_priv *priv = netdev_priv(dev); 140 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -1655,8 +1682,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
1655 1682
1656 ASSERT_RTNL(); 1683 ASSERT_RTNL();
1657 1684
1658 ipoib_delete_debug_files(dev);
1659
1660 /* Delete any child interfaces first */ 1685 /* Delete any child interfaces first */
1661 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 1686 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
1662 /* Stop GC on child */ 1687 /* Stop GC on child */
@@ -2074,8 +2099,6 @@ static struct net_device *ipoib_add_port(const char *format,
2074 goto register_failed; 2099 goto register_failed;
2075 } 2100 }
2076 2101
2077 ipoib_create_debug_files(priv->dev);
2078
2079 if (ipoib_cm_add_mode_attr(priv->dev)) 2102 if (ipoib_cm_add_mode_attr(priv->dev))
2080 goto sysfs_failed; 2103 goto sysfs_failed;
2081 if (ipoib_add_pkey_attr(priv->dev)) 2104 if (ipoib_add_pkey_attr(priv->dev))
@@ -2090,7 +2113,6 @@ static struct net_device *ipoib_add_port(const char *format,
2090 return priv->dev; 2113 return priv->dev;
2091 2114
2092sysfs_failed: 2115sysfs_failed:
2093 ipoib_delete_debug_files(priv->dev);
2094 unregister_netdev(priv->dev); 2116 unregister_netdev(priv->dev);
2095 2117
2096register_failed: 2118register_failed:
@@ -2175,6 +2197,12 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
2175 kfree(dev_list); 2197 kfree(dev_list);
2176} 2198}
2177 2199
2200#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2201static struct notifier_block ipoib_netdev_notifier = {
2202 .notifier_call = ipoib_netdev_event,
2203};
2204#endif
2205
2178static int __init ipoib_init_module(void) 2206static int __init ipoib_init_module(void)
2179{ 2207{
2180 int ret; 2208 int ret;
@@ -2227,6 +2255,9 @@ static int __init ipoib_init_module(void)
2227 if (ret) 2255 if (ret)
2228 goto err_client; 2256 goto err_client;
2229 2257
2258#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2259 register_netdevice_notifier(&ipoib_netdev_notifier);
2260#endif
2230 return 0; 2261 return 0;
2231 2262
2232err_client: 2263err_client:
@@ -2244,6 +2275,9 @@ err_fs:
2244 2275
2245static void __exit ipoib_cleanup_module(void) 2276static void __exit ipoib_cleanup_module(void)
2246{ 2277{
2278#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2279 unregister_netdevice_notifier(&ipoib_netdev_notifier);
2280#endif
2247 ipoib_netlink_fini(); 2281 ipoib_netlink_fini();
2248 ib_unregister_client(&ipoib_client); 2282 ib_unregister_client(&ipoib_client);
2249 ib_sa_unregister_client(&ipoib_sa_client); 2283 ib_sa_unregister_client(&ipoib_sa_client);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index a2f9f29c6ab5..57eadd2b7a71 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -87,8 +87,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
87 goto register_failed; 87 goto register_failed;
88 } 88 }
89 89
90 ipoib_create_debug_files(priv->dev);
91
92 /* RTNL childs don't need proprietary sysfs entries */ 90 /* RTNL childs don't need proprietary sysfs entries */
93 if (type == IPOIB_LEGACY_CHILD) { 91 if (type == IPOIB_LEGACY_CHILD) {
94 if (ipoib_cm_add_mode_attr(priv->dev)) 92 if (ipoib_cm_add_mode_attr(priv->dev))
@@ -109,7 +107,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
109 107
110sysfs_failed: 108sysfs_failed:
111 result = -ENOMEM; 109 result = -ENOMEM;
112 ipoib_delete_debug_files(priv->dev);
113 unregister_netdevice(priv->dev); 110 unregister_netdevice(priv->dev);
114 111
115register_failed: 112register_failed:
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index bf2b2676cb8a..80e3df1f1f7d 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -961,15 +961,15 @@ static int metadata_commit(struct era_metadata *md)
961 } 961 }
962 } 962 }
963 963
964 r = save_sm_root(md); 964 r = dm_tm_pre_commit(md->tm);
965 if (r) { 965 if (r) {
966 DMERR("%s: save_sm_root failed", __func__); 966 DMERR("%s: pre commit failed", __func__);
967 return r; 967 return r;
968 } 968 }
969 969
970 r = dm_tm_pre_commit(md->tm); 970 r = save_sm_root(md);
971 if (r) { 971 if (r) {
972 DMERR("%s: pre commit failed", __func__); 972 DMERR("%s: save_sm_root failed", __func__);
973 return r; 973 return r;
974 } 974 }
975 975
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 2c965424d383..ba7c4c685db3 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -997,10 +997,14 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
997 dm_init_md_queue(md); 997 dm_init_md_queue(md);
998 998
999 /* backfill 'mq' sysfs registration normally done in blk_register_queue */ 999 /* backfill 'mq' sysfs registration normally done in blk_register_queue */
1000 blk_mq_register_dev(disk_to_dev(md->disk), q); 1000 err = blk_mq_register_dev(disk_to_dev(md->disk), q);
1001 if (err)
1002 goto out_cleanup_queue;
1001 1003
1002 return 0; 1004 return 0;
1003 1005
1006out_cleanup_queue:
1007 blk_cleanup_queue(q);
1004out_tag_set: 1008out_tag_set:
1005 blk_mq_free_tag_set(md->tag_set); 1009 blk_mq_free_tag_set(md->tag_set);
1006out_kfree_tag_set: 1010out_kfree_tag_set:
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index d1c05c12a9db..be869a990e38 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1070,6 +1070,7 @@ static void passdown_endio(struct bio *bio)
1070 * to unmap (we ignore err). 1070 * to unmap (we ignore err).
1071 */ 1071 */
1072 queue_passdown_pt2(bio->bi_private); 1072 queue_passdown_pt2(bio->bi_private);
1073 bio_put(bio);
1073} 1074}
1074 1075
1075static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) 1076static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 6c033c9a2f06..78cb3e2359bd 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -538,7 +538,8 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
538 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); 538 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
539 altmap = NULL; 539 altmap = NULL;
540 } else if (nd_pfn->mode == PFN_MODE_PMEM) { 540 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
541 nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE; 541 nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
542 - offset) / PAGE_SIZE);
542 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns) 543 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
543 dev_info(&nd_pfn->dev, 544 dev_info(&nd_pfn->dev,
544 "number of pfns truncated from %lld to %ld\n", 545 "number of pfns truncated from %lld to %ld\n",
@@ -625,7 +626,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
625 */ 626 */
626 start += start_pad; 627 start += start_pad;
627 size = resource_size(&nsio->res); 628 size = resource_size(&nsio->res);
628 npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K; 629 npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
630 / PAGE_SIZE);
629 if (nd_pfn->mode == PFN_MODE_PMEM) { 631 if (nd_pfn->mode == PFN_MODE_PMEM) {
630 /* 632 /*
631 * vmemmap_populate_hugepages() allocates the memmap array in 633 * vmemmap_populate_hugepages() allocates the memmap array in
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 24618431a14b..b4808590870c 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -383,12 +383,12 @@ static void nd_pmem_shutdown(struct device *dev)
383 383
384static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) 384static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
385{ 385{
386 struct pmem_device *pmem = dev_get_drvdata(dev); 386 struct nd_region *nd_region;
387 struct nd_region *nd_region = to_region(pmem);
388 resource_size_t offset = 0, end_trunc = 0; 387 resource_size_t offset = 0, end_trunc = 0;
389 struct nd_namespace_common *ndns; 388 struct nd_namespace_common *ndns;
390 struct nd_namespace_io *nsio; 389 struct nd_namespace_io *nsio;
391 struct resource res; 390 struct resource res;
391 struct badblocks *bb;
392 392
393 if (event != NVDIMM_REVALIDATE_POISON) 393 if (event != NVDIMM_REVALIDATE_POISON)
394 return; 394 return;
@@ -397,20 +397,33 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
397 struct nd_btt *nd_btt = to_nd_btt(dev); 397 struct nd_btt *nd_btt = to_nd_btt(dev);
398 398
399 ndns = nd_btt->ndns; 399 ndns = nd_btt->ndns;
400 } else if (is_nd_pfn(dev)) { 400 nd_region = to_nd_region(ndns->dev.parent);
401 struct nd_pfn *nd_pfn = to_nd_pfn(dev); 401 nsio = to_nd_namespace_io(&ndns->dev);
402 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 402 bb = &nsio->bb;
403 } else {
404 struct pmem_device *pmem = dev_get_drvdata(dev);
403 405
404 ndns = nd_pfn->ndns; 406 nd_region = to_region(pmem);
405 offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad); 407 bb = &pmem->bb;
406 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); 408
407 } else 409 if (is_nd_pfn(dev)) {
408 ndns = to_ndns(dev); 410 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
411 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
412
413 ndns = nd_pfn->ndns;
414 offset = pmem->data_offset +
415 __le32_to_cpu(pfn_sb->start_pad);
416 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
417 } else {
418 ndns = to_ndns(dev);
419 }
420
421 nsio = to_nd_namespace_io(&ndns->dev);
422 }
409 423
410 nsio = to_nd_namespace_io(&ndns->dev);
411 res.start = nsio->res.start + offset; 424 res.start = nsio->res.start + offset;
412 res.end = nsio->res.end - end_trunc; 425 res.end = nsio->res.end - end_trunc;
413 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res); 426 nvdimm_badblocks_populate(nd_region, bb, &res);
414} 427}
415 428
416MODULE_ALIAS("pmem"); 429MODULE_ALIAS("pmem");
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 9cf6f1a88fce..f62306246ca4 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -968,17 +968,20 @@ EXPORT_SYMBOL_GPL(nvdimm_flush);
968 */ 968 */
969int nvdimm_has_flush(struct nd_region *nd_region) 969int nvdimm_has_flush(struct nd_region *nd_region)
970{ 970{
971 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
972 int i; 971 int i;
973 972
974 /* no nvdimm == flushing capability unknown */ 973 /* no nvdimm == flushing capability unknown */
975 if (nd_region->ndr_mappings == 0) 974 if (nd_region->ndr_mappings == 0)
976 return -ENXIO; 975 return -ENXIO;
977 976
978 for (i = 0; i < nd_region->ndr_mappings; i++) 977 for (i = 0; i < nd_region->ndr_mappings; i++) {
979 /* flush hints present, flushing required */ 978 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
980 if (ndrd_get_flush_wpq(ndrd, i, 0)) 979 struct nvdimm *nvdimm = nd_mapping->nvdimm;
980
981 /* flush hints present / available */
982 if (nvdimm->num_flush)
981 return 1; 983 return 1;
984 }
982 985
983 /* 986 /*
984 * The platform defines dimm devices without hints, assume 987 * The platform defines dimm devices without hints, assume
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 6dcde85f05dc..2b14acc6bc8a 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -96,6 +96,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
96 int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages; 96 int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
97 int other_file = global_node_page_state(NR_FILE_PAGES) - 97 int other_file = global_node_page_state(NR_FILE_PAGES) -
98 global_node_page_state(NR_SHMEM) - 98 global_node_page_state(NR_SHMEM) -
99 global_node_page_state(NR_UNEVICTABLE) -
99 total_swapcache_pages(); 100 total_swapcache_pages();
100 101
101 if (lowmem_adj_size < array_size) 102 if (lowmem_adj_size < array_size)
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 70390de66e0e..eb0a095efe9c 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -611,7 +611,7 @@ static void jr3_pci_poll_dev(unsigned long data)
611 s = &dev->subdevices[i]; 611 s = &dev->subdevices[i];
612 spriv = s->private; 612 spriv = s->private;
613 613
614 if (now > spriv->next_time_min) { 614 if (time_after_eq(now, spriv->next_time_min)) {
615 struct jr3_pci_poll_delay sub_delay; 615 struct jr3_pci_poll_delay sub_delay;
616 616
617 sub_delay = jr3_pci_poll_subdevice(s); 617 sub_delay = jr3_pci_poll_subdevice(s);
@@ -727,11 +727,12 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
727 s->insn_read = jr3_pci_ai_insn_read; 727 s->insn_read = jr3_pci_ai_insn_read;
728 728
729 spriv = jr3_pci_alloc_spriv(dev, s); 729 spriv = jr3_pci_alloc_spriv(dev, s);
730 if (spriv) { 730 if (!spriv)
731 /* Channel specific range and maxdata */ 731 return -ENOMEM;
732 s->range_table_list = spriv->range_table_list; 732
733 s->maxdata_list = spriv->maxdata_list; 733 /* Channel specific range and maxdata */
734 } 734 s->range_table_list = spriv->range_table_list;
735 s->maxdata_list = spriv->maxdata_list;
735 } 736 }
736 737
737 /* Reset DSP card */ 738 /* Reset DSP card */
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 400969170d1c..f03e43b1b5f6 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -664,9 +664,8 @@ static int __init gdm_usb_mux_init(void)
664 664
665static void __exit gdm_usb_mux_exit(void) 665static void __exit gdm_usb_mux_exit(void)
666{ 666{
667 unregister_lte_tty_driver();
668
669 usb_deregister(&gdm_mux_driver); 667 usb_deregister(&gdm_mux_driver);
668 unregister_lte_tty_driver();
670} 669}
671 670
672module_init(gdm_usb_mux_init); 671module_init(gdm_usb_mux_init);
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index e9b6b21f7422..f759aa8a342d 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -47,15 +47,25 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
47 u16 index, u16 length, u8 *buffer) 47 u16 index, u16 length, u8 *buffer)
48{ 48{
49 int status = 0; 49 int status = 0;
50 u8 *usb_buffer;
50 51
51 if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) 52 if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
52 return STATUS_FAILURE; 53 return STATUS_FAILURE;
53 54
54 mutex_lock(&priv->usb_lock); 55 mutex_lock(&priv->usb_lock);
55 56
57 usb_buffer = kmemdup(buffer, length, GFP_KERNEL);
58 if (!usb_buffer) {
59 mutex_unlock(&priv->usb_lock);
60 return -ENOMEM;
61 }
62
56 status = usb_control_msg(priv->usb, 63 status = usb_control_msg(priv->usb,
57 usb_sndctrlpipe(priv->usb, 0), request, 0x40, value, 64 usb_sndctrlpipe(priv->usb, 0),
58 index, buffer, length, USB_CTL_WAIT); 65 request, 0x40, value,
66 index, usb_buffer, length, USB_CTL_WAIT);
67
68 kfree(usb_buffer);
59 69
60 mutex_unlock(&priv->usb_lock); 70 mutex_unlock(&priv->usb_lock);
61 71
@@ -75,15 +85,28 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
75 u16 index, u16 length, u8 *buffer) 85 u16 index, u16 length, u8 *buffer)
76{ 86{
77 int status; 87 int status;
88 u8 *usb_buffer;
78 89
79 if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) 90 if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
80 return STATUS_FAILURE; 91 return STATUS_FAILURE;
81 92
82 mutex_lock(&priv->usb_lock); 93 mutex_lock(&priv->usb_lock);
83 94
95 usb_buffer = kmalloc(length, GFP_KERNEL);
96 if (!usb_buffer) {
97 mutex_unlock(&priv->usb_lock);
98 return -ENOMEM;
99 }
100
84 status = usb_control_msg(priv->usb, 101 status = usb_control_msg(priv->usb,
85 usb_rcvctrlpipe(priv->usb, 0), request, 0xc0, value, 102 usb_rcvctrlpipe(priv->usb, 0),
86 index, buffer, length, USB_CTL_WAIT); 103 request, 0xc0, value,
104 index, usb_buffer, length, USB_CTL_WAIT);
105
106 if (status == length)
107 memcpy(buffer, usb_buffer, length);
108
109 kfree(usb_buffer);
87 110
88 mutex_unlock(&priv->usb_lock); 111 mutex_unlock(&priv->usb_lock);
89 112
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index b7d747e92c7a..40e50f2d209d 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4671,6 +4671,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4671 continue; 4671 continue;
4672 } 4672 }
4673 atomic_set(&sess->session_reinstatement, 1); 4673 atomic_set(&sess->session_reinstatement, 1);
4674 atomic_set(&sess->session_fall_back_to_erl0, 1);
4674 spin_unlock(&sess->conn_lock); 4675 spin_unlock(&sess->conn_lock);
4675 4676
4676 list_move_tail(&se_sess->sess_list, &free_list); 4677 list_move_tail(&se_sess->sess_list, &free_list);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index e980e2d0c2db..7e70fe849f0d 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1530,6 +1530,7 @@ static void lio_tpg_close_session(struct se_session *se_sess)
1530 return; 1530 return;
1531 } 1531 }
1532 atomic_set(&sess->session_reinstatement, 1); 1532 atomic_set(&sess->session_reinstatement, 1);
1533 atomic_set(&sess->session_fall_back_to_erl0, 1);
1533 spin_unlock(&sess->conn_lock); 1534 spin_unlock(&sess->conn_lock);
1534 1535
1535 iscsit_stop_time2retain_timer(sess); 1536 iscsit_stop_time2retain_timer(sess);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 15f79a2ca34a..96c55bc10ac9 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -204,6 +204,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
204 initiatorname_param->value) && 204 initiatorname_param->value) &&
205 (sess_p->sess_ops->SessionType == sessiontype))) { 205 (sess_p->sess_ops->SessionType == sessiontype))) {
206 atomic_set(&sess_p->session_reinstatement, 1); 206 atomic_set(&sess_p->session_reinstatement, 1);
207 atomic_set(&sess_p->session_fall_back_to_erl0, 1);
207 spin_unlock(&sess_p->conn_lock); 208 spin_unlock(&sess_p->conn_lock);
208 iscsit_inc_session_usage_count(sess_p); 209 iscsit_inc_session_usage_count(sess_p);
209 iscsit_stop_time2retain_timer(sess_p); 210 iscsit_stop_time2retain_timer(sess_p);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index d545993df18b..29f807b29e74 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -594,8 +594,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
594 if (ret < 0) 594 if (ret < 0)
595 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 595 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
596 596
597 if (ret) 597 target_complete_cmd(cmd, SAM_STAT_GOOD);
598 target_complete_cmd(cmd, SAM_STAT_GOOD);
599 return 0; 598 return 0;
600} 599}
601 600
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index a53fb23a0411..b3b1461ec60d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -506,8 +506,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
506 * been failed with a non-zero SCSI status. 506 * been failed with a non-zero SCSI status.
507 */ 507 */
508 if (cmd->scsi_status) { 508 if (cmd->scsi_status) {
509 pr_err("compare_and_write_callback: non zero scsi_status:" 509 pr_debug("compare_and_write_callback: non zero scsi_status:"
510 " 0x%02x\n", cmd->scsi_status); 510 " 0x%02x\n", cmd->scsi_status);
511 *post_ret = 1;
512 if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
513 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
511 goto out; 514 goto out;
512 } 515 }
513 516
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index a23fa5ed1d67..2b907385b4a8 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -216,16 +216,11 @@ static int pty_signal(struct tty_struct *tty, int sig)
216static void pty_flush_buffer(struct tty_struct *tty) 216static void pty_flush_buffer(struct tty_struct *tty)
217{ 217{
218 struct tty_struct *to = tty->link; 218 struct tty_struct *to = tty->link;
219 struct tty_ldisc *ld;
220 219
221 if (!to) 220 if (!to)
222 return; 221 return;
223 222
224 ld = tty_ldisc_ref(to); 223 tty_buffer_flush(to, NULL);
225 tty_buffer_flush(to, ld);
226 if (ld)
227 tty_ldisc_deref(ld);
228
229 if (to->packet) { 224 if (to->packet) {
230 spin_lock_irq(&tty->ctrl_lock); 225 spin_lock_irq(&tty->ctrl_lock);
231 tty->ctrl_status |= TIOCPKT_FLUSHWRITE; 226 tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index a2a529994ba5..44e5b5bf713b 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1712,7 +1712,8 @@ static int serial_omap_probe(struct platform_device *pdev)
1712 return 0; 1712 return 0;
1713 1713
1714err_add_port: 1714err_add_port:
1715 pm_runtime_put(&pdev->dev); 1715 pm_runtime_dont_use_autosuspend(&pdev->dev);
1716 pm_runtime_put_sync(&pdev->dev);
1716 pm_runtime_disable(&pdev->dev); 1717 pm_runtime_disable(&pdev->dev);
1717 pm_qos_remove_request(&up->pm_qos_request); 1718 pm_qos_remove_request(&up->pm_qos_request);
1718 device_init_wakeup(up->dev, false); 1719 device_init_wakeup(up->dev, false);
@@ -1725,9 +1726,13 @@ static int serial_omap_remove(struct platform_device *dev)
1725{ 1726{
1726 struct uart_omap_port *up = platform_get_drvdata(dev); 1727 struct uart_omap_port *up = platform_get_drvdata(dev);
1727 1728
1729 pm_runtime_get_sync(up->dev);
1730
1731 uart_remove_one_port(&serial_omap_reg, &up->port);
1732
1733 pm_runtime_dont_use_autosuspend(up->dev);
1728 pm_runtime_put_sync(up->dev); 1734 pm_runtime_put_sync(up->dev);
1729 pm_runtime_disable(up->dev); 1735 pm_runtime_disable(up->dev);
1730 uart_remove_one_port(&serial_omap_reg, &up->port);
1731 pm_qos_remove_request(&up->pm_qos_request); 1736 pm_qos_remove_request(&up->pm_qos_request);
1732 device_init_wakeup(&dev->dev, false); 1737 device_init_wakeup(&dev->dev, false);
1733 1738
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 3e2ef4fd7382..d65f92bcd0f1 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -906,14 +906,13 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
906 return -ENOMEM; 906 return -ENOMEM;
907 } 907 }
908 908
909 dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf, 909 dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf,
910 dma->rx_size, DMA_FROM_DEVICE); 910 dma->rx_size, DMA_FROM_DEVICE);
911 911
912 spin_lock_irqsave(&p->port.lock, flags); 912 spin_lock_irqsave(&p->port.lock, flags);
913 913
914 /* TX buffer */ 914 /* TX buffer */
915 dma->tx_addr = dma_map_single(dma->tx_chan->device->dev, 915 dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf,
916 p->port.state->xmit.buf,
917 UART_XMIT_SIZE, DMA_TO_DEVICE); 916 UART_XMIT_SIZE, DMA_TO_DEVICE);
918 917
919 spin_unlock_irqrestore(&p->port.lock, flags); 918 spin_unlock_irqrestore(&p->port.lock, flags);
@@ -927,7 +926,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
927 926
928 if (dma->rx_chan) { 927 if (dma->rx_chan) {
929 dmaengine_terminate_all(dma->rx_chan); 928 dmaengine_terminate_all(dma->rx_chan);
930 dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr, 929 dma_unmap_single(p->port.dev, dma->rx_addr,
931 dma->rx_size, DMA_FROM_DEVICE); 930 dma->rx_size, DMA_FROM_DEVICE);
932 kfree(dma->rx_buf); 931 kfree(dma->rx_buf);
933 dma_release_channel(dma->rx_chan); 932 dma_release_channel(dma->rx_chan);
@@ -936,7 +935,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
936 935
937 if (dma->tx_chan) { 936 if (dma->tx_chan) {
938 dmaengine_terminate_all(dma->tx_chan); 937 dmaengine_terminate_all(dma->tx_chan);
939 dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr, 938 dma_unmap_single(p->port.dev, dma->tx_addr,
940 UART_XMIT_SIZE, DMA_TO_DEVICE); 939 UART_XMIT_SIZE, DMA_TO_DEVICE);
941 dma_release_channel(dma->tx_chan); 940 dma_release_channel(dma->tx_chan);
942 dma->tx_chan = NULL; 941 dma->tx_chan = NULL;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 0a6369510f2d..0b845e550fbd 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -58,7 +58,6 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
58#define WDM_SUSPENDING 8 58#define WDM_SUSPENDING 8
59#define WDM_RESETTING 9 59#define WDM_RESETTING 9
60#define WDM_OVERFLOW 10 60#define WDM_OVERFLOW 10
61#define WDM_DRAIN_ON_OPEN 11
62 61
63#define WDM_MAX 16 62#define WDM_MAX 16
64 63
@@ -182,7 +181,7 @@ static void wdm_in_callback(struct urb *urb)
182 "nonzero urb status received: -ESHUTDOWN\n"); 181 "nonzero urb status received: -ESHUTDOWN\n");
183 goto skip_error; 182 goto skip_error;
184 case -EPIPE: 183 case -EPIPE:
185 dev_dbg(&desc->intf->dev, 184 dev_err(&desc->intf->dev,
186 "nonzero urb status received: -EPIPE\n"); 185 "nonzero urb status received: -EPIPE\n");
187 break; 186 break;
188 default: 187 default:
@@ -210,25 +209,6 @@ static void wdm_in_callback(struct urb *urb)
210 desc->reslength = length; 209 desc->reslength = length;
211 } 210 }
212 } 211 }
213
214 /*
215 * Handling devices with the WDM_DRAIN_ON_OPEN flag set:
216 * If desc->resp_count is unset, then the urb was submitted
217 * without a prior notification. If the device returned any
218 * data, then this implies that it had messages queued without
219 * notifying us. Continue reading until that queue is flushed.
220 */
221 if (!desc->resp_count) {
222 if (!length) {
223 /* do not propagate the expected -EPIPE */
224 desc->rerr = 0;
225 goto unlock;
226 }
227 dev_dbg(&desc->intf->dev, "got %d bytes without notification\n", length);
228 set_bit(WDM_RESPONDING, &desc->flags);
229 usb_submit_urb(desc->response, GFP_ATOMIC);
230 }
231
232skip_error: 212skip_error:
233 set_bit(WDM_READ, &desc->flags); 213 set_bit(WDM_READ, &desc->flags);
234 wake_up(&desc->wait); 214 wake_up(&desc->wait);
@@ -243,7 +223,6 @@ skip_error:
243 service_outstanding_interrupt(desc); 223 service_outstanding_interrupt(desc);
244 } 224 }
245 225
246unlock:
247 spin_unlock(&desc->iuspin); 226 spin_unlock(&desc->iuspin);
248} 227}
249 228
@@ -686,17 +665,6 @@ static int wdm_open(struct inode *inode, struct file *file)
686 dev_err(&desc->intf->dev, 665 dev_err(&desc->intf->dev,
687 "Error submitting int urb - %d\n", rv); 666 "Error submitting int urb - %d\n", rv);
688 rv = usb_translate_errors(rv); 667 rv = usb_translate_errors(rv);
689 } else if (test_bit(WDM_DRAIN_ON_OPEN, &desc->flags)) {
690 /*
691 * Some devices keep pending messages queued
692 * without resending notifications. We must
693 * flush the message queue before we can
694 * assume a one-to-one relationship between
695 * notifications and messages in the queue
696 */
697 dev_dbg(&desc->intf->dev, "draining queued data\n");
698 set_bit(WDM_RESPONDING, &desc->flags);
699 rv = usb_submit_urb(desc->response, GFP_KERNEL);
700 } 668 }
701 } else { 669 } else {
702 rv = 0; 670 rv = 0;
@@ -803,8 +771,7 @@ static void wdm_rxwork(struct work_struct *work)
803/* --- hotplug --- */ 771/* --- hotplug --- */
804 772
805static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep, 773static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep,
806 u16 bufsize, int (*manage_power)(struct usb_interface *, int), 774 u16 bufsize, int (*manage_power)(struct usb_interface *, int))
807 bool drain_on_open)
808{ 775{
809 int rv = -ENOMEM; 776 int rv = -ENOMEM;
810 struct wdm_device *desc; 777 struct wdm_device *desc;
@@ -891,68 +858,6 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
891 858
892 desc->manage_power = manage_power; 859 desc->manage_power = manage_power;
893 860
894 /*
895 * "drain_on_open" enables a hack to work around a firmware
896 * issue observed on network functions, in particular MBIM
897 * functions.
898 *
899 * Quoting section 7 of the CDC-WMC r1.1 specification:
900 *
901 * "The firmware shall interpret GetEncapsulatedResponse as a
902 * request to read response bytes. The firmware shall send
903 * the next wLength bytes from the response. The firmware
904 * shall allow the host to retrieve data using any number of
905 * GetEncapsulatedResponse requests. The firmware shall
906 * return a zero- length reply if there are no data bytes
907 * available.
908 *
909 * The firmware shall send ResponseAvailable notifications
910 * periodically, using any appropriate algorithm, to inform
911 * the host that there is data available in the reply
912 * buffer. The firmware is allowed to send ResponseAvailable
913 * notifications even if there is no data available, but
914 * this will obviously reduce overall performance."
915 *
916 * These requirements, although they make equally sense, are
917 * often not implemented by network functions. Some firmwares
918 * will queue data indefinitely, without ever resending a
919 * notification. The result is that the driver and firmware
920 * loses "syncronization" if the driver ever fails to respond
921 * to a single notification, something which easily can happen
922 * on release(). When this happens, the driver will appear to
923 * never receive notifications for the most current data. Each
924 * notification will only cause a single read, which returns
925 * the oldest data in the firmware's queue.
926 *
927 * The "drain_on_open" hack resolves the situation by draining
928 * data from the firmware until none is returned, without a
929 * prior notification.
930 *
931 * This will inevitably race with the firmware, risking that
932 * we read data from the device before handling the associated
933 * notification. To make things worse, some of the devices
934 * needing the hack do not implement the "return zero if no
935 * data is available" requirement either. Instead they return
936 * an error on the subsequent read in this case. This means
937 * that "winning" the race can cause an unexpected EIO to
938 * userspace.
939 *
940 * "winning" the race is more likely on resume() than on
941 * open(), and the unexpected error is more harmful in the
942 * middle of an open session. The hack is therefore only
943 * applied on open(), and not on resume() where it logically
944 * would be equally necessary. So we define open() as the only
945 * driver <-> device "syncronization point". Should we happen
946 * to lose a notification after open(), then syncronization
947 * will be lost until release()
948 *
949 * The hack should not be enabled for CDC WDM devices
950 * conforming to the CDC-WMC r1.1 specification. This is
951 * ensured by setting drain_on_open to false in wdm_probe().
952 */
953 if (drain_on_open)
954 set_bit(WDM_DRAIN_ON_OPEN, &desc->flags);
955
956 spin_lock(&wdm_device_list_lock); 861 spin_lock(&wdm_device_list_lock);
957 list_add(&desc->device_list, &wdm_device_list); 862 list_add(&desc->device_list, &wdm_device_list);
958 spin_unlock(&wdm_device_list_lock); 863 spin_unlock(&wdm_device_list_lock);
@@ -1006,7 +911,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
1006 goto err; 911 goto err;
1007 ep = &iface->endpoint[0].desc; 912 ep = &iface->endpoint[0].desc;
1008 913
1009 rv = wdm_create(intf, ep, maxcom, &wdm_manage_power, false); 914 rv = wdm_create(intf, ep, maxcom, &wdm_manage_power);
1010 915
1011err: 916err:
1012 return rv; 917 return rv;
@@ -1038,7 +943,7 @@ struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
1038{ 943{
1039 int rv = -EINVAL; 944 int rv = -EINVAL;
1040 945
1041 rv = wdm_create(intf, ep, bufsize, manage_power, true); 946 rv = wdm_create(intf, ep, bufsize, manage_power);
1042 if (rv < 0) 947 if (rv < 0)
1043 goto err; 948 goto err;
1044 949
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index dadd1e8dfe09..0bb380a9fcf7 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1328,6 +1328,24 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1328 */ 1328 */
1329 if (udev->parent && !PMSG_IS_AUTO(msg)) 1329 if (udev->parent && !PMSG_IS_AUTO(msg))
1330 status = 0; 1330 status = 0;
1331
1332 /*
1333 * If the device is inaccessible, don't try to resume
1334 * suspended interfaces and just return the error.
1335 */
1336 if (status && status != -EBUSY) {
1337 int err;
1338 u16 devstat;
1339
1340 err = usb_get_status(udev, USB_RECIP_DEVICE, 0,
1341 &devstat);
1342 if (err) {
1343 dev_err(&udev->dev,
1344 "Failed to suspend device, error %d\n",
1345 status);
1346 goto done;
1347 }
1348 }
1331 } 1349 }
1332 1350
1333 /* If the suspend failed, resume interfaces that did get suspended */ 1351 /* If the suspend failed, resume interfaces that did get suspended */
@@ -1760,6 +1778,9 @@ static int autosuspend_check(struct usb_device *udev)
1760 int w, i; 1778 int w, i;
1761 struct usb_interface *intf; 1779 struct usb_interface *intf;
1762 1780
1781 if (udev->state == USB_STATE_NOTATTACHED)
1782 return -ENODEV;
1783
1763 /* Fail if autosuspend is disabled, or any interfaces are in use, or 1784 /* Fail if autosuspend is disabled, or any interfaces are in use, or
1764 * any interface drivers require remote wakeup but it isn't available. 1785 * any interface drivers require remote wakeup but it isn't available.
1765 */ 1786 */
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 822ced9639aa..422ce7b20d73 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -27,6 +27,7 @@
27#define MAX_USB_MINORS 256 27#define MAX_USB_MINORS 256
28static const struct file_operations *usb_minors[MAX_USB_MINORS]; 28static const struct file_operations *usb_minors[MAX_USB_MINORS];
29static DECLARE_RWSEM(minor_rwsem); 29static DECLARE_RWSEM(minor_rwsem);
30static DEFINE_MUTEX(init_usb_class_mutex);
30 31
31static int usb_open(struct inode *inode, struct file *file) 32static int usb_open(struct inode *inode, struct file *file)
32{ 33{
@@ -109,8 +110,9 @@ static void release_usb_class(struct kref *kref)
109 110
110static void destroy_usb_class(void) 111static void destroy_usb_class(void)
111{ 112{
112 if (usb_class) 113 mutex_lock(&init_usb_class_mutex);
113 kref_put(&usb_class->kref, release_usb_class); 114 kref_put(&usb_class->kref, release_usb_class);
115 mutex_unlock(&init_usb_class_mutex);
114} 116}
115 117
116int usb_major_init(void) 118int usb_major_init(void)
@@ -171,7 +173,10 @@ int usb_register_dev(struct usb_interface *intf,
171 if (intf->minor >= 0) 173 if (intf->minor >= 0)
172 return -EADDRINUSE; 174 return -EADDRINUSE;
173 175
176 mutex_lock(&init_usb_class_mutex);
174 retval = init_usb_class(); 177 retval = init_usb_class();
178 mutex_unlock(&init_usb_class_mutex);
179
175 if (retval) 180 if (retval)
176 return retval; 181 return retval;
177 182
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 35fb2bef0e45..579900640faa 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1064,6 +1064,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1064 1064
1065 portstatus = portchange = 0; 1065 portstatus = portchange = 0;
1066 status = hub_port_status(hub, port1, &portstatus, &portchange); 1066 status = hub_port_status(hub, port1, &portstatus, &portchange);
1067 if (status)
1068 goto abort;
1069
1067 if (udev || (portstatus & USB_PORT_STAT_CONNECTION)) 1070 if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
1068 dev_dbg(&port_dev->dev, "status %04x change %04x\n", 1071 dev_dbg(&port_dev->dev, "status %04x change %04x\n",
1069 portstatus, portchange); 1072 portstatus, portchange);
@@ -1196,7 +1199,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1196 1199
1197 /* Scan all ports that need attention */ 1200 /* Scan all ports that need attention */
1198 kick_hub_wq(hub); 1201 kick_hub_wq(hub);
1199 1202 abort:
1200 if (type == HUB_INIT2 || type == HUB_INIT3) { 1203 if (type == HUB_INIT2 || type == HUB_INIT3) {
1201 /* Allow autosuspend if it was suppressed */ 1204 /* Allow autosuspend if it was suppressed */
1202 disconnected: 1205 disconnected:
@@ -2082,6 +2085,12 @@ void usb_disconnect(struct usb_device **pdev)
2082 dev_info(&udev->dev, "USB disconnect, device number %d\n", 2085 dev_info(&udev->dev, "USB disconnect, device number %d\n",
2083 udev->devnum); 2086 udev->devnum);
2084 2087
2088 /*
2089 * Ensure that the pm runtime code knows that the USB device
2090 * is in the process of being disconnected.
2091 */
2092 pm_runtime_barrier(&udev->dev);
2093
2085 usb_lock_device(udev); 2094 usb_lock_device(udev);
2086 2095
2087 hub_disconnect_children(udev); 2096 hub_disconnect_children(udev);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 3684775677eb..f6cce5ac69c5 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -510,6 +510,7 @@ config USB_CONFIGFS_F_TCM
510choice 510choice
511 tristate "USB Gadget Drivers" 511 tristate "USB Gadget Drivers"
512 default USB_ETH 512 default USB_ETH
513 optional
513 help 514 help
514 A Linux "Gadget Driver" talks to the USB Peripheral Controller 515 A Linux "Gadget Driver" talks to the USB Peripheral Controller
515 driver through the abstract "gadget" API. Some other operating 516 driver through the abstract "gadget" API. Some other operating
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index d6aa2ea0efc8..9515b2a7d0e0 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -541,14 +541,10 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
541 ssize_t r = count; 541 ssize_t r = count;
542 unsigned xfer; 542 unsigned xfer;
543 int ret = 0; 543 int ret = 0;
544 size_t len; 544 size_t len = 0;
545 545
546 DBG(cdev, "mtp_read(%zu)\n", count); 546 DBG(cdev, "mtp_read(%zu)\n", count);
547 547
548 len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
549 if (len > MTP_BULK_BUFFER_SIZE)
550 return -EINVAL;
551
552 /* we will block until we're online */ 548 /* we will block until we're online */
553 DBG(cdev, "mtp_read: waiting for online state\n"); 549 DBG(cdev, "mtp_read: waiting for online state\n");
554 ret = wait_event_interruptible(dev->read_wq, 550 ret = wait_event_interruptible(dev->read_wq,
@@ -558,6 +554,14 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
558 goto done; 554 goto done;
559 } 555 }
560 spin_lock_irq(&dev->lock); 556 spin_lock_irq(&dev->lock);
557 if (dev->ep_out->desc) {
558 len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
559 if (len > MTP_BULK_BUFFER_SIZE) {
560 spin_unlock_irq(&dev->lock);
561 return -EINVAL;
562 }
563 }
564
561 if (dev->state == STATE_CANCELED) { 565 if (dev->state == STATE_CANCELED) {
562 /* report cancelation to userspace */ 566 /* report cancelation to userspace */
563 dev->state = STATE_READY; 567 dev->state = STATE_READY;
@@ -873,6 +877,10 @@ static void receive_file_work(struct work_struct *data)
873 usb_ep_dequeue(dev->ep_out, read_req); 877 usb_ep_dequeue(dev->ep_out, read_req);
874 break; 878 break;
875 } 879 }
880 if (read_req->status) {
881 r = read_req->status;
882 break;
883 }
876 /* if xfer_file_length is 0xFFFFFFFF, then we read until 884 /* if xfer_file_length is 0xFFFFFFFF, then we read until
877 * we get a zero length packet 885 * we get a zero length packet
878 */ 886 */
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d461560212c1..7767fce64adf 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1494,6 +1494,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1494 */ 1494 */
1495 max_esit_payload = xhci_get_max_esit_payload(udev, ep); 1495 max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1496 interval = xhci_get_endpoint_interval(udev, ep); 1496 interval = xhci_get_endpoint_interval(udev, ep);
1497
1498 /* Periodic endpoint bInterval limit quirk */
1499 if (usb_endpoint_xfer_int(&ep->desc) ||
1500 usb_endpoint_xfer_isoc(&ep->desc)) {
1501 if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
1502 udev->speed >= USB_SPEED_HIGH &&
1503 interval >= 7) {
1504 interval = 6;
1505 }
1506 }
1507
1497 mult = xhci_get_endpoint_mult(udev, ep); 1508 mult = xhci_get_endpoint_mult(udev, ep);
1498 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); 1509 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1499 max_burst = xhci_get_endpoint_max_burst(udev, ep); 1510 max_burst = xhci_get_endpoint_max_burst(udev, ep);
@@ -2475,7 +2486,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2475 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | 2486 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2476 xhci->cmd_ring->cycle_state; 2487 xhci->cmd_ring->cycle_state;
2477 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2488 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2478 "// Setting command ring address to 0x%x", val); 2489 "// Setting command ring address to 0x%016llx", val_64);
2479 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 2490 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2480 xhci_dbg_cmd_ptrs(xhci); 2491 xhci_dbg_cmd_ptrs(xhci);
2481 2492
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 954abfd5014d..93f566cb968b 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -199,6 +199,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
199 pdev->device == 0x1042) 199 pdev->device == 0x1042)
200 xhci->quirks |= XHCI_BROKEN_STREAMS; 200 xhci->quirks |= XHCI_BROKEN_STREAMS;
201 201
202 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
203 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
204
202 if (xhci->quirks & XHCI_RESET_ON_RESUME) 205 if (xhci->quirks & XHCI_RESET_ON_RESUME)
203 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 206 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
204 "QUIRK: Resetting on resume"); 207 "QUIRK: Resetting on resume");
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f97b009ffc40..328fe06752ec 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1659,6 +1659,7 @@ struct xhci_hcd {
1659#define XHCI_MISSING_CAS (1 << 24) 1659#define XHCI_MISSING_CAS (1 << 24)
1660/* For controller with a broken Port Disable implementation */ 1660/* For controller with a broken Port Disable implementation */
1661#define XHCI_BROKEN_PORT_PED (1 << 25) 1661#define XHCI_BROKEN_PORT_PED (1 << 25)
1662#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
1662 1663
1663 unsigned int num_active_eps; 1664 unsigned int num_active_eps;
1664 unsigned int limit_active_eps; 1665 unsigned int limit_active_eps;
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 5c8210dc6fd9..d94927e5623b 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -159,6 +159,7 @@ get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
159 case USB_ENDPOINT_XFER_INT: 159 case USB_ENDPOINT_XFER_INT:
160 if (dev->info->intr) 160 if (dev->info->intr)
161 goto try_intr; 161 goto try_intr;
162 continue;
162 case USB_ENDPOINT_XFER_ISOC: 163 case USB_ENDPOINT_XFER_ISOC:
163 if (dev->info->iso) 164 if (dev->info->iso)
164 goto try_iso; 165 goto try_iso;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index d8d13eede6d9..1dc75db16cbd 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -873,6 +873,7 @@ static const struct usb_device_id id_table_combined[] = {
873 { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID, 873 { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
874 USB_CLASS_VENDOR_SPEC, 874 USB_CLASS_VENDOR_SPEC,
875 USB_SUBCLASS_VENDOR_SPEC, 0x00) }, 875 USB_SUBCLASS_VENDOR_SPEC, 0x00) },
876 { USB_DEVICE_INTERFACE_NUMBER(ACTEL_VID, MICROSEMI_ARROW_SF2PLUS_BOARD_PID, 2) },
876 { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, 877 { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
877 { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), 878 { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
878 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 879 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 48ee04c94a75..71fb9e59db71 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -873,6 +873,12 @@
873#define FIC_VID 0x1457 873#define FIC_VID 0x1457
874#define FIC_NEO1973_DEBUG_PID 0x5118 874#define FIC_NEO1973_DEBUG_PID 0x5118
875 875
876/*
877 * Actel / Microsemi
878 */
879#define ACTEL_VID 0x1514
880#define MICROSEMI_ARROW_SF2PLUS_BOARD_PID 0x2008
881
876/* Olimex */ 882/* Olimex */
877#define OLIMEX_VID 0x15BA 883#define OLIMEX_VID 0x15BA
878#define OLIMEX_ARM_USB_OCD_PID 0x0003 884#define OLIMEX_ARM_USB_OCD_PID 0x0003
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2ba19424e4a1..1d48e62f4f52 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -130,57 +130,36 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
130 rb_erase(&old->node, &iommu->dma_list); 130 rb_erase(&old->node, &iommu->dma_list);
131} 131}
132 132
133struct vwork { 133static int vfio_lock_acct(long npage, bool *lock_cap)
134 struct mm_struct *mm;
135 long npage;
136 struct work_struct work;
137};
138
139/* delayed decrement/increment for locked_vm */
140static void vfio_lock_acct_bg(struct work_struct *work)
141{ 134{
142 struct vwork *vwork = container_of(work, struct vwork, work); 135 int ret;
143 struct mm_struct *mm;
144
145 mm = vwork->mm;
146 down_write(&mm->mmap_sem);
147 mm->locked_vm += vwork->npage;
148 up_write(&mm->mmap_sem);
149 mmput(mm);
150 kfree(vwork);
151}
152 136
153static void vfio_lock_acct(long npage) 137 if (!npage)
154{ 138 return 0;
155 struct vwork *vwork;
156 struct mm_struct *mm;
157 139
158 if (!current->mm || !npage) 140 if (!current->mm)
159 return; /* process exited or nothing to do */ 141 return -ESRCH; /* process exited */
142
143 ret = down_write_killable(&current->mm->mmap_sem);
144 if (!ret) {
145 if (npage > 0) {
146 if (lock_cap ? !*lock_cap : !capable(CAP_IPC_LOCK)) {
147 unsigned long limit;
148
149 limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
150
151 if (current->mm->locked_vm + npage > limit)
152 ret = -ENOMEM;
153 }
154 }
155
156 if (!ret)
157 current->mm->locked_vm += npage;
160 158
161 if (down_write_trylock(&current->mm->mmap_sem)) {
162 current->mm->locked_vm += npage;
163 up_write(&current->mm->mmap_sem); 159 up_write(&current->mm->mmap_sem);
164 return;
165 } 160 }
166 161
167 /* 162 return ret;
168 * Couldn't get mmap_sem lock, so must setup to update
169 * mm->locked_vm later. If locked_vm were atomic, we
170 * wouldn't need this silliness
171 */
172 vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
173 if (!vwork)
174 return;
175 mm = get_task_mm(current);
176 if (!mm) {
177 kfree(vwork);
178 return;
179 }
180 INIT_WORK(&vwork->work, vfio_lock_acct_bg);
181 vwork->mm = mm;
182 vwork->npage = npage;
183 schedule_work(&vwork->work);
184} 163}
185 164
186/* 165/*
@@ -262,9 +241,9 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
262static long vfio_pin_pages(unsigned long vaddr, long npage, 241static long vfio_pin_pages(unsigned long vaddr, long npage,
263 int prot, unsigned long *pfn_base) 242 int prot, unsigned long *pfn_base)
264{ 243{
265 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 244 unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
266 bool lock_cap = capable(CAP_IPC_LOCK); 245 bool lock_cap = capable(CAP_IPC_LOCK);
267 long ret, i; 246 long ret, i = 1;
268 bool rsvd; 247 bool rsvd;
269 248
270 if (!current->mm) 249 if (!current->mm)
@@ -283,16 +262,11 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
283 return -ENOMEM; 262 return -ENOMEM;
284 } 263 }
285 264
286 if (unlikely(disable_hugepages)) { 265 if (unlikely(disable_hugepages))
287 if (!rsvd) 266 goto out;
288 vfio_lock_acct(1);
289 return 1;
290 }
291 267
292 /* Lock all the consecutive pages from pfn_base */ 268 /* Lock all the consecutive pages from pfn_base */
293 for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) { 269 for (vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
294 unsigned long pfn = 0;
295
296 ret = vaddr_get_pfn(vaddr, prot, &pfn); 270 ret = vaddr_get_pfn(vaddr, prot, &pfn);
297 if (ret) 271 if (ret)
298 break; 272 break;
@@ -308,12 +282,24 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
308 put_pfn(pfn, prot); 282 put_pfn(pfn, prot);
309 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", 283 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
310 __func__, limit << PAGE_SHIFT); 284 __func__, limit << PAGE_SHIFT);
311 break; 285 ret = -ENOMEM;
286 goto unpin_out;
312 } 287 }
313 } 288 }
314 289
290out:
315 if (!rsvd) 291 if (!rsvd)
316 vfio_lock_acct(i); 292 ret = vfio_lock_acct(i, &lock_cap);
293
294unpin_out:
295 if (ret) {
296 if (!rsvd) {
297 for (pfn = *pfn_base ; i ; pfn++, i--)
298 put_pfn(pfn, prot);
299 }
300
301 return ret;
302 }
317 303
318 return i; 304 return i;
319} 305}
@@ -328,7 +314,7 @@ static long vfio_unpin_pages(unsigned long pfn, long npage,
328 unlocked += put_pfn(pfn++, prot); 314 unlocked += put_pfn(pfn++, prot);
329 315
330 if (do_accounting) 316 if (do_accounting)
331 vfio_lock_acct(-unlocked); 317 vfio_lock_acct(-unlocked, NULL);
332 318
333 return unlocked; 319 return unlocked;
334} 320}
@@ -390,7 +376,7 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
390 cond_resched(); 376 cond_resched();
391 } 377 }
392 378
393 vfio_lock_acct(-unlocked); 379 vfio_lock_acct(-unlocked, NULL);
394} 380}
395 381
396static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) 382static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 9ad527ff9974..2924bddb4a94 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -102,12 +102,11 @@ void invalidate_bdev(struct block_device *bdev)
102{ 102{
103 struct address_space *mapping = bdev->bd_inode->i_mapping; 103 struct address_space *mapping = bdev->bd_inode->i_mapping;
104 104
105 if (mapping->nrpages == 0) 105 if (mapping->nrpages) {
106 return; 106 invalidate_bh_lrus();
107 107 lru_add_drain_all(); /* make sure all lru add caches are flushed */
108 invalidate_bh_lrus(); 108 invalidate_mapping_pages(mapping, 0, -1);
109 lru_add_drain_all(); /* make sure all lru add caches are flushed */ 109 }
110 invalidate_mapping_pages(mapping, 0, -1);
111 /* 99% of the time, we don't need to flush the cleancache on the bdev. 110 /* 99% of the time, we don't need to flush the cleancache on the bdev.
112 * But, for the strange corners, lets be cautious 111 * But, for the strange corners, lets be cautious
113 */ 112 */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index febc28f9e2c2..75267cdd5dfd 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -392,6 +392,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
392 392
393 if (update_xattr) { 393 if (update_xattr) {
394 int err = 0; 394 int err = 0;
395
395 if (xattr && (flags & XATTR_CREATE)) 396 if (xattr && (flags & XATTR_CREATE))
396 err = -EEXIST; 397 err = -EEXIST;
397 else if (!xattr && (flags & XATTR_REPLACE)) 398 else if (!xattr && (flags & XATTR_REPLACE))
@@ -399,12 +400,14 @@ static int __set_xattr(struct ceph_inode_info *ci,
399 if (err) { 400 if (err) {
400 kfree(name); 401 kfree(name);
401 kfree(val); 402 kfree(val);
403 kfree(*newxattr);
402 return err; 404 return err;
403 } 405 }
404 if (update_xattr < 0) { 406 if (update_xattr < 0) {
405 if (xattr) 407 if (xattr)
406 __remove_xattr(ci, xattr); 408 __remove_xattr(ci, xattr);
407 kfree(name); 409 kfree(name);
410 kfree(*newxattr);
408 return 0; 411 return 0;
409 } 412 }
410 } 413 }
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 02b071bf3732..a0b3e7d1be48 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -83,6 +83,9 @@ convert_sfm_char(const __u16 src_char, char *target)
83 case SFM_COLON: 83 case SFM_COLON:
84 *target = ':'; 84 *target = ':';
85 break; 85 break;
86 case SFM_DOUBLEQUOTE:
87 *target = '"';
88 break;
86 case SFM_ASTERISK: 89 case SFM_ASTERISK:
87 *target = '*'; 90 *target = '*';
88 break; 91 break;
@@ -418,6 +421,9 @@ static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
418 case ':': 421 case ':':
419 dest_char = cpu_to_le16(SFM_COLON); 422 dest_char = cpu_to_le16(SFM_COLON);
420 break; 423 break;
424 case '"':
425 dest_char = cpu_to_le16(SFM_DOUBLEQUOTE);
426 break;
421 case '*': 427 case '*':
422 dest_char = cpu_to_le16(SFM_ASTERISK); 428 dest_char = cpu_to_le16(SFM_ASTERISK);
423 break; 429 break;
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 479bc0a941f3..07ade707fa60 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -57,6 +57,7 @@
57 * not conflict (although almost does) with the mapping above. 57 * not conflict (although almost does) with the mapping above.
58 */ 58 */
59 59
60#define SFM_DOUBLEQUOTE ((__u16) 0xF020)
60#define SFM_ASTERISK ((__u16) 0xF021) 61#define SFM_ASTERISK ((__u16) 0xF021)
61#define SFM_QUESTION ((__u16) 0xF025) 62#define SFM_QUESTION ((__u16) 0xF025)
62#define SFM_COLON ((__u16) 0xF022) 63#define SFM_COLON ((__u16) 0xF022)
@@ -64,8 +65,8 @@
64#define SFM_LESSTHAN ((__u16) 0xF023) 65#define SFM_LESSTHAN ((__u16) 0xF023)
65#define SFM_PIPE ((__u16) 0xF027) 66#define SFM_PIPE ((__u16) 0xF027)
66#define SFM_SLASH ((__u16) 0xF026) 67#define SFM_SLASH ((__u16) 0xF026)
67#define SFM_PERIOD ((__u16) 0xF028) 68#define SFM_SPACE ((__u16) 0xF028)
68#define SFM_SPACE ((__u16) 0xF029) 69#define SFM_PERIOD ((__u16) 0xF029)
69 70
70/* 71/*
71 * Mapping mechanism to use when one of the seven reserved characters is 72 * Mapping mechanism to use when one of the seven reserved characters is
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 15261ba464c5..c0c253005b76 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -87,6 +87,7 @@ extern mempool_t *cifs_req_poolp;
87extern mempool_t *cifs_mid_poolp; 87extern mempool_t *cifs_mid_poolp;
88 88
89struct workqueue_struct *cifsiod_wq; 89struct workqueue_struct *cifsiod_wq;
90struct workqueue_struct *cifsoplockd_wq;
90__u32 cifs_lock_secret; 91__u32 cifs_lock_secret;
91 92
92/* 93/*
@@ -1283,9 +1284,16 @@ init_cifs(void)
1283 goto out_clean_proc; 1284 goto out_clean_proc;
1284 } 1285 }
1285 1286
1287 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1288 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1289 if (!cifsoplockd_wq) {
1290 rc = -ENOMEM;
1291 goto out_destroy_cifsiod_wq;
1292 }
1293
1286 rc = cifs_fscache_register(); 1294 rc = cifs_fscache_register();
1287 if (rc) 1295 if (rc)
1288 goto out_destroy_wq; 1296 goto out_destroy_cifsoplockd_wq;
1289 1297
1290 rc = cifs_init_inodecache(); 1298 rc = cifs_init_inodecache();
1291 if (rc) 1299 if (rc)
@@ -1333,7 +1341,9 @@ out_destroy_inodecache:
1333 cifs_destroy_inodecache(); 1341 cifs_destroy_inodecache();
1334out_unreg_fscache: 1342out_unreg_fscache:
1335 cifs_fscache_unregister(); 1343 cifs_fscache_unregister();
1336out_destroy_wq: 1344out_destroy_cifsoplockd_wq:
1345 destroy_workqueue(cifsoplockd_wq);
1346out_destroy_cifsiod_wq:
1337 destroy_workqueue(cifsiod_wq); 1347 destroy_workqueue(cifsiod_wq);
1338out_clean_proc: 1348out_clean_proc:
1339 cifs_proc_clean(); 1349 cifs_proc_clean();
@@ -1356,6 +1366,7 @@ exit_cifs(void)
1356 cifs_destroy_mids(); 1366 cifs_destroy_mids();
1357 cifs_destroy_inodecache(); 1367 cifs_destroy_inodecache();
1358 cifs_fscache_unregister(); 1368 cifs_fscache_unregister();
1369 destroy_workqueue(cifsoplockd_wq);
1359 destroy_workqueue(cifsiod_wq); 1370 destroy_workqueue(cifsiod_wq);
1360 cifs_proc_clean(); 1371 cifs_proc_clean();
1361} 1372}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index b3830f7ab260..48ef401c3c61 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1651,6 +1651,7 @@ void cifs_oplock_break(struct work_struct *work);
1651 1651
1652extern const struct slow_work_ops cifs_oplock_break_ops; 1652extern const struct slow_work_ops cifs_oplock_break_ops;
1653extern struct workqueue_struct *cifsiod_wq; 1653extern struct workqueue_struct *cifsiod_wq;
1654extern struct workqueue_struct *cifsoplockd_wq;
1654extern __u32 cifs_lock_secret; 1655extern __u32 cifs_lock_secret;
1655 1656
1656extern mempool_t *cifs_mid_poolp; 1657extern mempool_t *cifs_mid_poolp;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 586fdac05ec2..1f91c9dadd5b 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -717,6 +717,9 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
717 if (rc) 717 if (rc)
718 return rc; 718 return rc;
719 719
720 if (server->capabilities & CAP_UNICODE)
721 smb->hdr.Flags2 |= SMBFLG2_UNICODE;
722
720 /* set up echo request */ 723 /* set up echo request */
721 smb->hdr.Tid = 0xffff; 724 smb->hdr.Tid = 0xffff;
722 smb->hdr.WordCount = 1; 725 smb->hdr.WordCount = 1;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index b8015de88e8c..1a545695f547 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2839,16 +2839,14 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2839{ 2839{
2840 struct cifs_sb_info *old = CIFS_SB(sb); 2840 struct cifs_sb_info *old = CIFS_SB(sb);
2841 struct cifs_sb_info *new = mnt_data->cifs_sb; 2841 struct cifs_sb_info *new = mnt_data->cifs_sb;
2842 bool old_set = old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
2843 bool new_set = new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
2842 2844
2843 if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) { 2845 if (old_set && new_set && !strcmp(new->prepath, old->prepath))
2844 if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH))
2845 return 0;
2846 /* The prepath should be null terminated strings */
2847 if (strcmp(new->prepath, old->prepath))
2848 return 0;
2849
2850 return 1; 2846 return 1;
2851 } 2847 else if (!old_set && !new_set)
2848 return 1;
2849
2852 return 0; 2850 return 0;
2853} 2851}
2854 2852
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 001528781b6b..bdba9e7a9438 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -264,10 +264,14 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
264 rc = -EOPNOTSUPP; 264 rc = -EOPNOTSUPP;
265 break; 265 break;
266 case CIFS_IOC_GET_MNT_INFO: 266 case CIFS_IOC_GET_MNT_INFO:
267 if (pSMBFile == NULL)
268 break;
267 tcon = tlink_tcon(pSMBFile->tlink); 269 tcon = tlink_tcon(pSMBFile->tlink);
268 rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg); 270 rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
269 break; 271 break;
270 case CIFS_ENUMERATE_SNAPSHOTS: 272 case CIFS_ENUMERATE_SNAPSHOTS:
273 if (pSMBFile == NULL)
274 break;
271 if (arg == 0) { 275 if (arg == 0) {
272 rc = -EINVAL; 276 rc = -EINVAL;
273 goto cifs_ioc_exit; 277 goto cifs_ioc_exit;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c6729156f9a0..5419afea0a36 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -492,7 +492,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
492 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, 492 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
493 &pCifsInode->flags); 493 &pCifsInode->flags);
494 494
495 queue_work(cifsiod_wq, 495 queue_work(cifsoplockd_wq,
496 &netfile->oplock_break); 496 &netfile->oplock_break);
497 netfile->oplock_break_cancelled = false; 497 netfile->oplock_break_cancelled = false;
498 498
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 97307808ae42..967dfe656ced 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -494,7 +494,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
494 else 494 else
495 cfile->oplock_break_cancelled = true; 495 cfile->oplock_break_cancelled = true;
496 496
497 queue_work(cifsiod_wq, &cfile->oplock_break); 497 queue_work(cifsoplockd_wq, &cfile->oplock_break);
498 kfree(lw); 498 kfree(lw);
499 return true; 499 return true;
500 } 500 }
@@ -638,7 +638,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
638 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, 638 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
639 &cinode->flags); 639 &cinode->flags);
640 spin_unlock(&cfile->file_info_lock); 640 spin_unlock(&cfile->file_info_lock);
641 queue_work(cifsiod_wq, &cfile->oplock_break); 641 queue_work(cifsoplockd_wq,
642 &cfile->oplock_break);
642 643
643 spin_unlock(&tcon->open_file_lock); 644 spin_unlock(&tcon->open_file_lock);
644 spin_unlock(&cifs_tcp_ses_lock); 645 spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 007abf7195af..36334fe3266c 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -924,6 +924,7 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
924 } 924 }
925 if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) { 925 if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
926 rc = -ERANGE; 926 rc = -ERANGE;
927 kfree(retbuf);
927 return rc; 928 return rc;
928 } 929 }
929 930
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 802185386851..7c1c6c39d582 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -569,8 +569,12 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
569 } 569 }
570 570
571 if (rsplen != sizeof(struct validate_negotiate_info_rsp)) { 571 if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
572 cifs_dbg(VFS, "invalid size of protocol negotiate response\n"); 572 cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n",
573 return -EIO; 573 rsplen);
574
575 /* relax check since Mac returns max bufsize allowed on ioctl */
576 if (rsplen > CIFSMaxBufSize)
577 return -EIO;
574 } 578 }
575 579
576 /* check validate negotiate info response matches what we got earlier */ 580 /* check validate negotiate info response matches what we got earlier */
@@ -1670,8 +1674,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1670 * than one credit. Windows typically sets this smaller, but for some 1674 * than one credit. Windows typically sets this smaller, but for some
1671 * ioctls it may be useful to allow server to send more. No point 1675 * ioctls it may be useful to allow server to send more. No point
1672 * limiting what the server can send as long as fits in one credit 1676 * limiting what the server can send as long as fits in one credit
1677 * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE
1678 * (by default, note that it can be overridden to make max larger)
1679 * in responses (except for read responses which can be bigger.
1680 * We may want to bump this limit up
1673 */ 1681 */
1674 req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */ 1682 req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize);
1675 1683
1676 if (is_fsctl) 1684 if (is_fsctl)
1677 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); 1685 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index abc18847b98d..bb4e209bd809 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -161,27 +161,61 @@ int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy)
161} 161}
162EXPORT_SYMBOL(fscrypt_get_policy); 162EXPORT_SYMBOL(fscrypt_get_policy);
163 163
164/**
165 * fscrypt_has_permitted_context() - is a file's encryption policy permitted
166 * within its directory?
167 *
168 * @parent: inode for parent directory
169 * @child: inode for file being looked up, opened, or linked into @parent
170 *
171 * Filesystems must call this before permitting access to an inode in a
172 * situation where the parent directory is encrypted (either before allowing
173 * ->lookup() to succeed, or for a regular file before allowing it to be opened)
174 * and before any operation that involves linking an inode into an encrypted
175 * directory, including link, rename, and cross rename. It enforces the
176 * constraint that within a given encrypted directory tree, all files use the
177 * same encryption policy. The pre-access check is needed to detect potentially
178 * malicious offline violations of this constraint, while the link and rename
179 * checks are needed to prevent online violations of this constraint.
180 *
181 * Return: 1 if permitted, 0 if forbidden. If forbidden, the caller must fail
182 * the filesystem operation with EPERM.
183 */
164int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) 184int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
165{ 185{
166 struct fscrypt_info *parent_ci, *child_ci; 186 const struct fscrypt_operations *cops = parent->i_sb->s_cop;
187 const struct fscrypt_info *parent_ci, *child_ci;
188 struct fscrypt_context parent_ctx, child_ctx;
167 int res; 189 int res;
168 190
169 if ((parent == NULL) || (child == NULL)) {
170 printk(KERN_ERR "parent %p child %p\n", parent, child);
171 BUG_ON(1);
172 }
173
174 /* No restrictions on file types which are never encrypted */ 191 /* No restrictions on file types which are never encrypted */
175 if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) && 192 if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
176 !S_ISLNK(child->i_mode)) 193 !S_ISLNK(child->i_mode))
177 return 1; 194 return 1;
178 195
179 /* no restrictions if the parent directory is not encrypted */ 196 /* No restrictions if the parent directory is unencrypted */
180 if (!parent->i_sb->s_cop->is_encrypted(parent)) 197 if (!cops->is_encrypted(parent))
181 return 1; 198 return 1;
182 /* if the child directory is not encrypted, this is always a problem */ 199
183 if (!parent->i_sb->s_cop->is_encrypted(child)) 200 /* Encrypted directories must not contain unencrypted files */
201 if (!cops->is_encrypted(child))
184 return 0; 202 return 0;
203
204 /*
205 * Both parent and child are encrypted, so verify they use the same
206 * encryption policy. Compare the fscrypt_info structs if the keys are
207 * available, otherwise retrieve and compare the fscrypt_contexts.
208 *
209 * Note that the fscrypt_context retrieval will be required frequently
210 * when accessing an encrypted directory tree without the key.
211 * Performance-wise this is not a big deal because we already don't
212 * really optimize for file access without the key (to the extent that
213 * such access is even possible), given that any attempted access
214 * already causes a fscrypt_context retrieval and keyring search.
215 *
216 * In any case, if an unexpected error occurs, fall back to "forbidden".
217 */
218
185 res = fscrypt_get_encryption_info(parent); 219 res = fscrypt_get_encryption_info(parent);
186 if (res) 220 if (res)
187 return 0; 221 return 0;
@@ -190,17 +224,32 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
190 return 0; 224 return 0;
191 parent_ci = parent->i_crypt_info; 225 parent_ci = parent->i_crypt_info;
192 child_ci = child->i_crypt_info; 226 child_ci = child->i_crypt_info;
193 if (!parent_ci && !child_ci) 227
194 return 1; 228 if (parent_ci && child_ci) {
195 if (!parent_ci || !child_ci) 229 return memcmp(parent_ci->ci_master_key, child_ci->ci_master_key,
230 FS_KEY_DESCRIPTOR_SIZE) == 0 &&
231 (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
232 (parent_ci->ci_filename_mode ==
233 child_ci->ci_filename_mode) &&
234 (parent_ci->ci_flags == child_ci->ci_flags);
235 }
236
237 res = cops->get_context(parent, &parent_ctx, sizeof(parent_ctx));
238 if (res != sizeof(parent_ctx))
196 return 0; 239 return 0;
197 240
198 return (memcmp(parent_ci->ci_master_key, 241 res = cops->get_context(child, &child_ctx, sizeof(child_ctx));
199 child_ci->ci_master_key, 242 if (res != sizeof(child_ctx))
200 FS_KEY_DESCRIPTOR_SIZE) == 0 && 243 return 0;
201 (parent_ci->ci_data_mode == child_ci->ci_data_mode) && 244
202 (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) && 245 return memcmp(parent_ctx.master_key_descriptor,
203 (parent_ci->ci_flags == child_ci->ci_flags)); 246 child_ctx.master_key_descriptor,
247 FS_KEY_DESCRIPTOR_SIZE) == 0 &&
248 (parent_ctx.contents_encryption_mode ==
249 child_ctx.contents_encryption_mode) &&
250 (parent_ctx.filenames_encryption_mode ==
251 child_ctx.filenames_encryption_mode) &&
252 (parent_ctx.flags == child_ctx.flags);
204} 253}
205EXPORT_SYMBOL(fscrypt_has_permitted_context); 254EXPORT_SYMBOL(fscrypt_has_permitted_context);
206 255
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 3cb7fa290c18..42723b27d6f2 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5741,6 +5741,11 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5741 file_update_time(vma->vm_file); 5741 file_update_time(vma->vm_file);
5742 5742
5743 down_read(&EXT4_I(inode)->i_mmap_sem); 5743 down_read(&EXT4_I(inode)->i_mmap_sem);
5744
5745 ret = ext4_convert_inline_data(inode);
5746 if (ret)
5747 goto out_ret;
5748
5744 /* Delalloc case is easy... */ 5749 /* Delalloc case is easy... */
5745 if (test_opt(inode->i_sb, DELALLOC) && 5750 if (test_opt(inode->i_sb, DELALLOC) &&
5746 !ext4_should_journal_data(inode) && 5751 !ext4_should_journal_data(inode) &&
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index ef3b4eb54cf2..08ecdeebd6f7 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -223,8 +223,7 @@ int orangefs_setattr(struct dentry *dentry, struct iattr *iattr)
223 if (ret) 223 if (ret)
224 goto out; 224 goto out;
225 225
226 if ((iattr->ia_valid & ATTR_SIZE) && 226 if (iattr->ia_valid & ATTR_SIZE) {
227 iattr->ia_size != i_size_read(inode)) {
228 ret = orangefs_setattr_size(inode, iattr); 227 ret = orangefs_setattr_size(inode, iattr);
229 if (ret) 228 if (ret)
230 goto out; 229 goto out;
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index a290ff6ec756..7c315938e9c2 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -193,8 +193,6 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
193 goto out; 193 goto out;
194 } 194 }
195 195
196 ORANGEFS_I(inode)->getattr_time = jiffies - 1;
197
198 gossip_debug(GOSSIP_NAME_DEBUG, 196 gossip_debug(GOSSIP_NAME_DEBUG,
199 "%s:%s:%d " 197 "%s:%s:%d "
200 "Found good inode [%lu] with count [%d]\n", 198 "Found good inode [%lu] with count [%d]\n",
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index 74a81b1daaac..237c9c04dc3b 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -76,11 +76,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *name,
76 if (S_ISLNK(inode->i_mode)) 76 if (S_ISLNK(inode->i_mode))
77 return -EOPNOTSUPP; 77 return -EOPNOTSUPP;
78 78
79 if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) { 79 if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
80 gossip_err("Invalid key length (%d)\n",
81 (int)strlen(name));
82 return -EINVAL; 80 return -EINVAL;
83 }
84 81
85 fsuid = from_kuid(&init_user_ns, current_fsuid()); 82 fsuid = from_kuid(&init_user_ns, current_fsuid());
86 fsgid = from_kgid(&init_user_ns, current_fsgid()); 83 fsgid = from_kgid(&init_user_ns, current_fsgid());
@@ -172,6 +169,9 @@ static int orangefs_inode_removexattr(struct inode *inode, const char *name,
172 struct orangefs_kernel_op_s *new_op = NULL; 169 struct orangefs_kernel_op_s *new_op = NULL;
173 int ret = -ENOMEM; 170 int ret = -ENOMEM;
174 171
172 if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
173 return -EINVAL;
174
175 down_write(&orangefs_inode->xattr_sem); 175 down_write(&orangefs_inode->xattr_sem);
176 new_op = op_alloc(ORANGEFS_VFS_OP_REMOVEXATTR); 176 new_op = op_alloc(ORANGEFS_VFS_OP_REMOVEXATTR);
177 if (!new_op) 177 if (!new_op)
@@ -231,23 +231,13 @@ int orangefs_inode_setxattr(struct inode *inode, const char *name,
231 "%s: name %s, buffer_size %zd\n", 231 "%s: name %s, buffer_size %zd\n",
232 __func__, name, size); 232 __func__, name, size);
233 233
234 if (size >= ORANGEFS_MAX_XATTR_VALUELEN || 234 if (size > ORANGEFS_MAX_XATTR_VALUELEN)
235 flags < 0) { 235 return -EINVAL;
236 gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n", 236 if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
237 (int)size,
238 flags);
239 return -EINVAL; 237 return -EINVAL;
240 }
241 238
242 internal_flag = convert_to_internal_xattr_flags(flags); 239 internal_flag = convert_to_internal_xattr_flags(flags);
243 240
244 if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
245 gossip_err
246 ("orangefs_inode_setxattr: bogus key size (%d)\n",
247 (int)(strlen(name)));
248 return -EINVAL;
249 }
250
251 /* This is equivalent to a removexattr */ 241 /* This is equivalent to a removexattr */
252 if (size == 0 && value == NULL) { 242 if (size == 0 && value == NULL) {
253 gossip_debug(GOSSIP_XATTR_DEBUG, 243 gossip_debug(GOSSIP_XATTR_DEBUG,
@@ -358,7 +348,7 @@ try_again:
358 348
359 returned_count = new_op->downcall.resp.listxattr.returned_count; 349 returned_count = new_op->downcall.resp.listxattr.returned_count;
360 if (returned_count < 0 || 350 if (returned_count < 0 ||
361 returned_count >= ORANGEFS_MAX_XATTR_LISTLEN) { 351 returned_count > ORANGEFS_MAX_XATTR_LISTLEN) {
362 gossip_err("%s: impossible value for returned_count:%d:\n", 352 gossip_err("%s: impossible value for returned_count:%d:\n",
363 __func__, 353 __func__,
364 returned_count); 354 returned_count);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 14984d902a99..43033a3d66d5 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -704,6 +704,7 @@ int pstore_register(struct pstore_info *psi)
704 if (psi->flags & PSTORE_FLAGS_PMSG) 704 if (psi->flags & PSTORE_FLAGS_PMSG)
705 pstore_register_pmsg(); 705 pstore_register_pmsg();
706 706
707 /* Start watching for new records, if desired. */
707 if (pstore_update_ms >= 0) { 708 if (pstore_update_ms >= 0) {
708 pstore_timer.expires = jiffies + 709 pstore_timer.expires = jiffies +
709 msecs_to_jiffies(pstore_update_ms); 710 msecs_to_jiffies(pstore_update_ms);
@@ -726,6 +727,11 @@ EXPORT_SYMBOL_GPL(pstore_register);
726 727
727void pstore_unregister(struct pstore_info *psi) 728void pstore_unregister(struct pstore_info *psi)
728{ 729{
730 /* Stop timer and make sure all work has finished. */
731 pstore_update_ms = -1;
732 del_timer_sync(&pstore_timer);
733 flush_work(&pstore_work);
734
729 if (psi->flags & PSTORE_FLAGS_PMSG) 735 if (psi->flags & PSTORE_FLAGS_PMSG)
730 pstore_unregister_pmsg(); 736 pstore_unregister_pmsg();
731 if (psi->flags & PSTORE_FLAGS_FTRACE) 737 if (psi->flags & PSTORE_FLAGS_FTRACE)
@@ -825,7 +831,9 @@ static void pstore_timefunc(unsigned long dummy)
825 schedule_work(&pstore_work); 831 schedule_work(&pstore_work);
826 } 832 }
827 833
828 mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms)); 834 if (pstore_update_ms >= 0)
835 mod_timer(&pstore_timer,
836 jiffies + msecs_to_jiffies(pstore_update_ms));
829} 837}
830 838
831module_param(backend, charp, 0444); 839module_param(backend, charp, 0444);
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index ae2b4babe2e5..a23168179716 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -34,6 +34,8 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
34 struct dentry *parent_lower_dentry = NULL; 34 struct dentry *parent_lower_dentry = NULL;
35 struct dentry *lower_cur_parent_dentry = NULL; 35 struct dentry *lower_cur_parent_dentry = NULL;
36 struct dentry *lower_dentry = NULL; 36 struct dentry *lower_dentry = NULL;
37 struct inode *inode;
38 struct sdcardfs_inode_data *data;
37 39
38 if (flags & LOOKUP_RCU) 40 if (flags & LOOKUP_RCU)
39 return -ECHILD; 41 return -ECHILD;
@@ -103,6 +105,19 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
103 spin_unlock(&dentry->d_lock); 105 spin_unlock(&dentry->d_lock);
104 spin_unlock(&lower_dentry->d_lock); 106 spin_unlock(&lower_dentry->d_lock);
105 } 107 }
108 if (!err)
109 goto out;
110
111 /* If our top's inode is gone, we may be out of date */
112 inode = d_inode(dentry);
113 if (inode) {
114 data = top_data_get(SDCARDFS_I(inode));
115 if (data->abandoned) {
116 d_drop(dentry);
117 err = 0;
118 }
119 data_put(data);
120 }
106 121
107out: 122out:
108 dput(parent_dentry); 123 dput(parent_dentry);
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 5a0ef3889846..1239d1cd208b 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -26,28 +26,28 @@ static void inherit_derived_state(struct inode *parent, struct inode *child)
26 struct sdcardfs_inode_info *pi = SDCARDFS_I(parent); 26 struct sdcardfs_inode_info *pi = SDCARDFS_I(parent);
27 struct sdcardfs_inode_info *ci = SDCARDFS_I(child); 27 struct sdcardfs_inode_info *ci = SDCARDFS_I(child);
28 28
29 ci->perm = PERM_INHERIT; 29 ci->data->perm = PERM_INHERIT;
30 ci->userid = pi->userid; 30 ci->data->userid = pi->data->userid;
31 ci->d_uid = pi->d_uid; 31 ci->data->d_uid = pi->data->d_uid;
32 ci->under_android = pi->under_android; 32 ci->data->under_android = pi->data->under_android;
33 ci->under_cache = pi->under_cache; 33 ci->data->under_cache = pi->data->under_cache;
34 ci->under_obb = pi->under_obb; 34 ci->data->under_obb = pi->data->under_obb;
35 set_top(ci, pi->top); 35 set_top(ci, pi->top_data);
36} 36}
37 37
38/* helper function for derived state */ 38/* helper function for derived state */
39void setup_derived_state(struct inode *inode, perm_t perm, userid_t userid, 39void setup_derived_state(struct inode *inode, perm_t perm, userid_t userid,
40 uid_t uid, bool under_android, 40 uid_t uid, bool under_android,
41 struct inode *top) 41 struct sdcardfs_inode_data *top)
42{ 42{
43 struct sdcardfs_inode_info *info = SDCARDFS_I(inode); 43 struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
44 44
45 info->perm = perm; 45 info->data->perm = perm;
46 info->userid = userid; 46 info->data->userid = userid;
47 info->d_uid = uid; 47 info->data->d_uid = uid;
48 info->under_android = under_android; 48 info->data->under_android = under_android;
49 info->under_cache = false; 49 info->data->under_cache = false;
50 info->under_obb = false; 50 info->data->under_obb = false;
51 set_top(info, top); 51 set_top(info, top);
52} 52}
53 53
@@ -58,7 +58,8 @@ void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
58 const struct qstr *name) 58 const struct qstr *name)
59{ 59{
60 struct sdcardfs_inode_info *info = SDCARDFS_I(d_inode(dentry)); 60 struct sdcardfs_inode_info *info = SDCARDFS_I(d_inode(dentry));
61 struct sdcardfs_inode_info *parent_info = SDCARDFS_I(d_inode(parent)); 61 struct sdcardfs_inode_data *parent_data =
62 SDCARDFS_I(d_inode(parent))->data;
62 appid_t appid; 63 appid_t appid;
63 unsigned long user_num; 64 unsigned long user_num;
64 int err; 65 int err;
@@ -82,60 +83,61 @@ void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
82 if (!S_ISDIR(d_inode(dentry)->i_mode)) 83 if (!S_ISDIR(d_inode(dentry)->i_mode))
83 return; 84 return;
84 /* Derive custom permissions based on parent and current node */ 85 /* Derive custom permissions based on parent and current node */
85 switch (parent_info->perm) { 86 switch (parent_data->perm) {
86 case PERM_INHERIT: 87 case PERM_INHERIT:
87 case PERM_ANDROID_PACKAGE_CACHE: 88 case PERM_ANDROID_PACKAGE_CACHE:
88 /* Already inherited above */ 89 /* Already inherited above */
89 break; 90 break;
90 case PERM_PRE_ROOT: 91 case PERM_PRE_ROOT:
91 /* Legacy internal layout places users at top level */ 92 /* Legacy internal layout places users at top level */
92 info->perm = PERM_ROOT; 93 info->data->perm = PERM_ROOT;
93 err = kstrtoul(name->name, 10, &user_num); 94 err = kstrtoul(name->name, 10, &user_num);
94 if (err) 95 if (err)
95 info->userid = 0; 96 info->data->userid = 0;
96 else 97 else
97 info->userid = user_num; 98 info->data->userid = user_num;
98 set_top(info, &info->vfs_inode); 99 set_top(info, info->data);
99 break; 100 break;
100 case PERM_ROOT: 101 case PERM_ROOT:
101 /* Assume masked off by default. */ 102 /* Assume masked off by default. */
102 if (qstr_case_eq(name, &q_Android)) { 103 if (qstr_case_eq(name, &q_Android)) {
103 /* App-specific directories inside; let anyone traverse */ 104 /* App-specific directories inside; let anyone traverse */
104 info->perm = PERM_ANDROID; 105 info->data->perm = PERM_ANDROID;
105 info->under_android = true; 106 info->data->under_android = true;
106 set_top(info, &info->vfs_inode); 107 set_top(info, info->data);
107 } 108 }
108 break; 109 break;
109 case PERM_ANDROID: 110 case PERM_ANDROID:
110 if (qstr_case_eq(name, &q_data)) { 111 if (qstr_case_eq(name, &q_data)) {
111 /* App-specific directories inside; let anyone traverse */ 112 /* App-specific directories inside; let anyone traverse */
112 info->perm = PERM_ANDROID_DATA; 113 info->data->perm = PERM_ANDROID_DATA;
113 set_top(info, &info->vfs_inode); 114 set_top(info, info->data);
114 } else if (qstr_case_eq(name, &q_obb)) { 115 } else if (qstr_case_eq(name, &q_obb)) {
115 /* App-specific directories inside; let anyone traverse */ 116 /* App-specific directories inside; let anyone traverse */
116 info->perm = PERM_ANDROID_OBB; 117 info->data->perm = PERM_ANDROID_OBB;
117 info->under_obb = true; 118 info->data->under_obb = true;
118 set_top(info, &info->vfs_inode); 119 set_top(info, info->data);
119 /* Single OBB directory is always shared */ 120 /* Single OBB directory is always shared */
120 } else if (qstr_case_eq(name, &q_media)) { 121 } else if (qstr_case_eq(name, &q_media)) {
121 /* App-specific directories inside; let anyone traverse */ 122 /* App-specific directories inside; let anyone traverse */
122 info->perm = PERM_ANDROID_MEDIA; 123 info->data->perm = PERM_ANDROID_MEDIA;
123 set_top(info, &info->vfs_inode); 124 set_top(info, info->data);
124 } 125 }
125 break; 126 break;
126 case PERM_ANDROID_OBB: 127 case PERM_ANDROID_OBB:
127 case PERM_ANDROID_DATA: 128 case PERM_ANDROID_DATA:
128 case PERM_ANDROID_MEDIA: 129 case PERM_ANDROID_MEDIA:
129 info->perm = PERM_ANDROID_PACKAGE; 130 info->data->perm = PERM_ANDROID_PACKAGE;
130 appid = get_appid(name->name); 131 appid = get_appid(name->name);
131 if (appid != 0 && !is_excluded(name->name, parent_info->userid)) 132 if (appid != 0 && !is_excluded(name->name, parent_data->userid))
132 info->d_uid = multiuser_get_uid(parent_info->userid, appid); 133 info->data->d_uid =
133 set_top(info, &info->vfs_inode); 134 multiuser_get_uid(parent_data->userid, appid);
135 set_top(info, info->data);
134 break; 136 break;
135 case PERM_ANDROID_PACKAGE: 137 case PERM_ANDROID_PACKAGE:
136 if (qstr_case_eq(name, &q_cache)) { 138 if (qstr_case_eq(name, &q_cache)) {
137 info->perm = PERM_ANDROID_PACKAGE_CACHE; 139 info->data->perm = PERM_ANDROID_PACKAGE_CACHE;
138 info->under_cache = true; 140 info->data->under_cache = true;
139 } 141 }
140 break; 142 break;
141 } 143 }
@@ -166,7 +168,8 @@ void fixup_lower_ownership(struct dentry *dentry, const char *name)
166 struct inode *delegated_inode = NULL; 168 struct inode *delegated_inode = NULL;
167 int error; 169 int error;
168 struct sdcardfs_inode_info *info; 170 struct sdcardfs_inode_info *info;
169 struct sdcardfs_inode_info *info_top; 171 struct sdcardfs_inode_data *info_d;
172 struct sdcardfs_inode_data *info_top;
170 perm_t perm; 173 perm_t perm;
171 struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb); 174 struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
172 uid_t uid = sbi->options.fs_low_uid; 175 uid_t uid = sbi->options.fs_low_uid;
@@ -174,15 +177,16 @@ void fixup_lower_ownership(struct dentry *dentry, const char *name)
174 struct iattr newattrs; 177 struct iattr newattrs;
175 178
176 info = SDCARDFS_I(d_inode(dentry)); 179 info = SDCARDFS_I(d_inode(dentry));
177 perm = info->perm; 180 info_d = info->data;
178 if (info->under_obb) { 181 perm = info_d->perm;
182 if (info_d->under_obb) {
179 perm = PERM_ANDROID_OBB; 183 perm = PERM_ANDROID_OBB;
180 } else if (info->under_cache) { 184 } else if (info_d->under_cache) {
181 perm = PERM_ANDROID_PACKAGE_CACHE; 185 perm = PERM_ANDROID_PACKAGE_CACHE;
182 } else if (perm == PERM_INHERIT) { 186 } else if (perm == PERM_INHERIT) {
183 info_top = SDCARDFS_I(grab_top(info)); 187 info_top = top_data_get(info);
184 perm = info_top->perm; 188 perm = info_top->perm;
185 release_top(info); 189 data_put(info_top);
186 } 190 }
187 191
188 switch (perm) { 192 switch (perm) {
@@ -192,7 +196,7 @@ void fixup_lower_ownership(struct dentry *dentry, const char *name)
192 case PERM_ANDROID_MEDIA: 196 case PERM_ANDROID_MEDIA:
193 case PERM_ANDROID_PACKAGE: 197 case PERM_ANDROID_PACKAGE:
194 case PERM_ANDROID_PACKAGE_CACHE: 198 case PERM_ANDROID_PACKAGE_CACHE:
195 uid = multiuser_get_uid(info->userid, uid); 199 uid = multiuser_get_uid(info_d->userid, uid);
196 break; 200 break;
197 case PERM_ANDROID_OBB: 201 case PERM_ANDROID_OBB:
198 uid = AID_MEDIA_OBB; 202 uid = AID_MEDIA_OBB;
@@ -207,24 +211,24 @@ void fixup_lower_ownership(struct dentry *dentry, const char *name)
207 case PERM_ANDROID_DATA: 211 case PERM_ANDROID_DATA:
208 case PERM_ANDROID_MEDIA: 212 case PERM_ANDROID_MEDIA:
209 if (S_ISDIR(d_inode(dentry)->i_mode)) 213 if (S_ISDIR(d_inode(dentry)->i_mode))
210 gid = multiuser_get_uid(info->userid, AID_MEDIA_RW); 214 gid = multiuser_get_uid(info_d->userid, AID_MEDIA_RW);
211 else 215 else
212 gid = multiuser_get_uid(info->userid, get_type(name)); 216 gid = multiuser_get_uid(info_d->userid, get_type(name));
213 break; 217 break;
214 case PERM_ANDROID_OBB: 218 case PERM_ANDROID_OBB:
215 gid = AID_MEDIA_OBB; 219 gid = AID_MEDIA_OBB;
216 break; 220 break;
217 case PERM_ANDROID_PACKAGE: 221 case PERM_ANDROID_PACKAGE:
218 if (uid_is_app(info->d_uid)) 222 if (uid_is_app(info_d->d_uid))
219 gid = multiuser_get_ext_gid(info->d_uid); 223 gid = multiuser_get_ext_gid(info_d->d_uid);
220 else 224 else
221 gid = multiuser_get_uid(info->userid, AID_MEDIA_RW); 225 gid = multiuser_get_uid(info_d->userid, AID_MEDIA_RW);
222 break; 226 break;
223 case PERM_ANDROID_PACKAGE_CACHE: 227 case PERM_ANDROID_PACKAGE_CACHE:
224 if (uid_is_app(info->d_uid)) 228 if (uid_is_app(info_d->d_uid))
225 gid = multiuser_get_ext_cache_gid(info->d_uid); 229 gid = multiuser_get_ext_cache_gid(info_d->d_uid);
226 else 230 else
227 gid = multiuser_get_uid(info->userid, AID_MEDIA_RW); 231 gid = multiuser_get_uid(info_d->userid, AID_MEDIA_RW);
228 break; 232 break;
229 case PERM_PRE_ROOT: 233 case PERM_PRE_ROOT:
230 default: 234 default:
@@ -257,11 +261,13 @@ retry_deleg:
257 sdcardfs_put_lower_path(dentry, &path); 261 sdcardfs_put_lower_path(dentry, &path);
258} 262}
259 263
260static int descendant_may_need_fixup(struct sdcardfs_inode_info *info, struct limit_search *limit) 264static int descendant_may_need_fixup(struct sdcardfs_inode_data *data,
265 struct limit_search *limit)
261{ 266{
262 if (info->perm == PERM_ROOT) 267 if (data->perm == PERM_ROOT)
263 return (limit->flags & BY_USERID)?info->userid == limit->userid:1; 268 return (limit->flags & BY_USERID) ?
264 if (info->perm == PERM_PRE_ROOT || info->perm == PERM_ANDROID) 269 data->userid == limit->userid : 1;
270 if (data->perm == PERM_PRE_ROOT || data->perm == PERM_ANDROID)
265 return 1; 271 return 1;
266 return 0; 272 return 0;
267} 273}
@@ -292,7 +298,7 @@ static void __fixup_perms_recursive(struct dentry *dentry, struct limit_search *
292 } 298 }
293 info = SDCARDFS_I(d_inode(dentry)); 299 info = SDCARDFS_I(d_inode(dentry));
294 300
295 if (needs_fixup(info->perm)) { 301 if (needs_fixup(info->data->perm)) {
296 list_for_each_entry(child, &dentry->d_subdirs, d_child) { 302 list_for_each_entry(child, &dentry->d_subdirs, d_child) {
297 spin_lock_nested(&child->d_lock, depth + 1); 303 spin_lock_nested(&child->d_lock, depth + 1);
298 if (!(limit->flags & BY_NAME) || qstr_case_eq(&child->d_name, &limit->name)) { 304 if (!(limit->flags & BY_NAME) || qstr_case_eq(&child->d_name, &limit->name)) {
@@ -305,7 +311,7 @@ static void __fixup_perms_recursive(struct dentry *dentry, struct limit_search *
305 } 311 }
306 spin_unlock(&child->d_lock); 312 spin_unlock(&child->d_lock);
307 } 313 }
308 } else if (descendant_may_need_fixup(info, limit)) { 314 } else if (descendant_may_need_fixup(info->data, limit)) {
309 list_for_each_entry(child, &dentry->d_subdirs, d_child) { 315 list_for_each_entry(child, &dentry->d_subdirs, d_child) {
310 __fixup_perms_recursive(child, limit, depth + 1); 316 __fixup_perms_recursive(child, limit, depth + 1);
311 } 317 }
@@ -349,12 +355,12 @@ int need_graft_path(struct dentry *dentry)
349 struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb); 355 struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
350 struct qstr obb = QSTR_LITERAL("obb"); 356 struct qstr obb = QSTR_LITERAL("obb");
351 357
352 if (parent_info->perm == PERM_ANDROID && 358 if (parent_info->data->perm == PERM_ANDROID &&
353 qstr_case_eq(&dentry->d_name, &obb)) { 359 qstr_case_eq(&dentry->d_name, &obb)) {
354 360
355 /* /Android/obb is the base obbpath of DERIVED_UNIFIED */ 361 /* /Android/obb is the base obbpath of DERIVED_UNIFIED */
356 if (!(sbi->options.multiuser == false 362 if (!(sbi->options.multiuser == false
357 && parent_info->userid == 0)) { 363 && parent_info->data->userid == 0)) {
358 ret = 1; 364 ret = 1;
359 } 365 }
360 } 366 }
@@ -415,11 +421,11 @@ int is_base_obbpath(struct dentry *dentry)
415 421
416 spin_lock(&SDCARDFS_D(dentry)->lock); 422 spin_lock(&SDCARDFS_D(dentry)->lock);
417 if (sbi->options.multiuser) { 423 if (sbi->options.multiuser) {
418 if (parent_info->perm == PERM_PRE_ROOT && 424 if (parent_info->data->perm == PERM_PRE_ROOT &&
419 qstr_case_eq(&dentry->d_name, &q_obb)) { 425 qstr_case_eq(&dentry->d_name, &q_obb)) {
420 ret = 1; 426 ret = 1;
421 } 427 }
422 } else if (parent_info->perm == PERM_ANDROID && 428 } else if (parent_info->data->perm == PERM_ANDROID &&
423 qstr_case_eq(&dentry->d_name, &q_obb)) { 429 qstr_case_eq(&dentry->d_name, &q_obb)) {
424 ret = 1; 430 ret = 1;
425 } 431 }
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 4d558b87fe42..d48da4126dac 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -23,7 +23,8 @@
23#include <linux/ratelimit.h> 23#include <linux/ratelimit.h>
24 24
25/* Do not directly use this function. Use OVERRIDE_CRED() instead. */ 25/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
26const struct cred *override_fsids(struct sdcardfs_sb_info *sbi, struct sdcardfs_inode_info *info) 26const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
27 struct sdcardfs_inode_data *data)
27{ 28{
28 struct cred *cred; 29 struct cred *cred;
29 const struct cred *old_cred; 30 const struct cred *old_cred;
@@ -33,10 +34,10 @@ const struct cred *override_fsids(struct sdcardfs_sb_info *sbi, struct sdcardfs_
33 if (!cred) 34 if (!cred)
34 return NULL; 35 return NULL;
35 36
36 if (info->under_obb) 37 if (data->under_obb)
37 uid = AID_MEDIA_OBB; 38 uid = AID_MEDIA_OBB;
38 else 39 else
39 uid = multiuser_get_uid(info->userid, sbi->options.fs_low_uid); 40 uid = multiuser_get_uid(data->userid, sbi->options.fs_low_uid);
40 cred->fsuid = make_kuid(&init_user_ns, uid); 41 cred->fsuid = make_kuid(&init_user_ns, uid);
41 cred->fsgid = make_kgid(&init_user_ns, sbi->options.fs_low_gid); 42 cred->fsgid = make_kgid(&init_user_ns, sbi->options.fs_low_gid);
42 43
@@ -96,7 +97,8 @@ static int sdcardfs_create(struct inode *dir, struct dentry *dentry,
96 if (err) 97 if (err)
97 goto out; 98 goto out;
98 99
99 err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, SDCARDFS_I(dir)->userid); 100 err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path,
101 SDCARDFS_I(dir)->data->userid);
100 if (err) 102 if (err)
101 goto out; 103 goto out;
102 fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir)); 104 fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
@@ -267,7 +269,7 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
267 struct path lower_path; 269 struct path lower_path;
268 struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb); 270 struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
269 const struct cred *saved_cred = NULL; 271 const struct cred *saved_cred = NULL;
270 struct sdcardfs_inode_info *pi = SDCARDFS_I(dir); 272 struct sdcardfs_inode_data *pd = SDCARDFS_I(dir)->data;
271 int touch_err = 0; 273 int touch_err = 0;
272 struct fs_struct *saved_fs; 274 struct fs_struct *saved_fs;
273 struct fs_struct *copied_fs; 275 struct fs_struct *copied_fs;
@@ -336,7 +338,7 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
336 make_nomedia_in_obb = 1; 338 make_nomedia_in_obb = 1;
337 } 339 }
338 340
339 err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, pi->userid); 341 err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, pd->userid);
340 if (err) { 342 if (err) {
341 unlock_dir(lower_parent_dentry); 343 unlock_dir(lower_parent_dentry);
342 goto out; 344 goto out;
@@ -349,12 +351,13 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
349 fixup_lower_ownership(dentry, dentry->d_name.name); 351 fixup_lower_ownership(dentry, dentry->d_name.name);
350 unlock_dir(lower_parent_dentry); 352 unlock_dir(lower_parent_dentry);
351 if ((!sbi->options.multiuser) && (qstr_case_eq(&dentry->d_name, &q_obb)) 353 if ((!sbi->options.multiuser) && (qstr_case_eq(&dentry->d_name, &q_obb))
352 && (pi->perm == PERM_ANDROID) && (pi->userid == 0)) 354 && (pd->perm == PERM_ANDROID) && (pd->userid == 0))
353 make_nomedia_in_obb = 1; 355 make_nomedia_in_obb = 1;
354 356
355 /* When creating /Android/data and /Android/obb, mark them as .nomedia */ 357 /* When creating /Android/data and /Android/obb, mark them as .nomedia */
356 if (make_nomedia_in_obb || 358 if (make_nomedia_in_obb ||
357 ((pi->perm == PERM_ANDROID) && (qstr_case_eq(&dentry->d_name, &q_data)))) { 359 ((pd->perm == PERM_ANDROID)
360 && (qstr_case_eq(&dentry->d_name, &q_data)))) {
358 REVERT_CRED(saved_cred); 361 REVERT_CRED(saved_cred);
359 OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(d_inode(dentry))); 362 OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(d_inode(dentry)));
360 set_fs_pwd(current->fs, &lower_path); 363 set_fs_pwd(current->fs, &lower_path);
@@ -620,7 +623,7 @@ static int sdcardfs_permission(struct vfsmount *mnt, struct inode *inode, int ma
620{ 623{
621 int err; 624 int err;
622 struct inode tmp; 625 struct inode tmp;
623 struct inode *top = grab_top(SDCARDFS_I(inode)); 626 struct sdcardfs_inode_data *top = top_data_get(SDCARDFS_I(inode));
624 627
625 if (!top) 628 if (!top)
626 return -EINVAL; 629 return -EINVAL;
@@ -637,10 +640,11 @@ static int sdcardfs_permission(struct vfsmount *mnt, struct inode *inode, int ma
637 * locks must be dealt with to avoid undefined behavior. 640 * locks must be dealt with to avoid undefined behavior.
638 */ 641 */
639 copy_attrs(&tmp, inode); 642 copy_attrs(&tmp, inode);
640 tmp.i_uid = make_kuid(&init_user_ns, SDCARDFS_I(top)->d_uid); 643 tmp.i_uid = make_kuid(&init_user_ns, top->d_uid);
641 tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, SDCARDFS_I(top))); 644 tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, top));
642 tmp.i_mode = (inode->i_mode & S_IFMT) | get_mode(mnt, SDCARDFS_I(top)); 645 tmp.i_mode = (inode->i_mode & S_IFMT)
643 release_top(SDCARDFS_I(inode)); 646 | get_mode(mnt, SDCARDFS_I(inode), top);
647 data_put(top);
644 tmp.i_sb = inode->i_sb; 648 tmp.i_sb = inode->i_sb;
645 if (IS_POSIXACL(inode)) 649 if (IS_POSIXACL(inode))
646 pr_warn("%s: This may be undefined behavior...\n", __func__); 650 pr_warn("%s: This may be undefined behavior...\n", __func__);
@@ -692,11 +696,12 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
692 struct dentry *parent; 696 struct dentry *parent;
693 struct inode tmp; 697 struct inode tmp;
694 struct dentry tmp_d; 698 struct dentry tmp_d;
695 struct inode *top; 699 struct sdcardfs_inode_data *top;
700
696 const struct cred *saved_cred = NULL; 701 const struct cred *saved_cred = NULL;
697 702
698 inode = d_inode(dentry); 703 inode = d_inode(dentry);
699 top = grab_top(SDCARDFS_I(inode)); 704 top = top_data_get(SDCARDFS_I(inode));
700 705
701 if (!top) 706 if (!top)
702 return -EINVAL; 707 return -EINVAL;
@@ -714,11 +719,12 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
714 * 719 *
715 */ 720 */
716 copy_attrs(&tmp, inode); 721 copy_attrs(&tmp, inode);
717 tmp.i_uid = make_kuid(&init_user_ns, SDCARDFS_I(top)->d_uid); 722 tmp.i_uid = make_kuid(&init_user_ns, top->d_uid);
718 tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, SDCARDFS_I(top))); 723 tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, top));
719 tmp.i_mode = (inode->i_mode & S_IFMT) | get_mode(mnt, SDCARDFS_I(top)); 724 tmp.i_mode = (inode->i_mode & S_IFMT)
725 | get_mode(mnt, SDCARDFS_I(inode), top);
720 tmp.i_size = i_size_read(inode); 726 tmp.i_size = i_size_read(inode);
721 release_top(SDCARDFS_I(inode)); 727 data_put(top);
722 tmp.i_sb = inode->i_sb; 728 tmp.i_sb = inode->i_sb;
723 tmp_d.d_inode = &tmp; 729 tmp_d.d_inode = &tmp;
724 730
@@ -821,17 +827,17 @@ static int sdcardfs_fillattr(struct vfsmount *mnt,
821 struct inode *inode, struct kstat *stat) 827 struct inode *inode, struct kstat *stat)
822{ 828{
823 struct sdcardfs_inode_info *info = SDCARDFS_I(inode); 829 struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
824 struct inode *top = grab_top(info); 830 struct sdcardfs_inode_data *top = top_data_get(info);
825 831
826 if (!top) 832 if (!top)
827 return -EINVAL; 833 return -EINVAL;
828 834
829 stat->dev = inode->i_sb->s_dev; 835 stat->dev = inode->i_sb->s_dev;
830 stat->ino = inode->i_ino; 836 stat->ino = inode->i_ino;
831 stat->mode = (inode->i_mode & S_IFMT) | get_mode(mnt, SDCARDFS_I(top)); 837 stat->mode = (inode->i_mode & S_IFMT) | get_mode(mnt, info, top);
832 stat->nlink = inode->i_nlink; 838 stat->nlink = inode->i_nlink;
833 stat->uid = make_kuid(&init_user_ns, SDCARDFS_I(top)->d_uid); 839 stat->uid = make_kuid(&init_user_ns, top->d_uid);
834 stat->gid = make_kgid(&init_user_ns, get_gid(mnt, SDCARDFS_I(top))); 840 stat->gid = make_kgid(&init_user_ns, get_gid(mnt, top));
835 stat->rdev = inode->i_rdev; 841 stat->rdev = inode->i_rdev;
836 stat->size = i_size_read(inode); 842 stat->size = i_size_read(inode);
837 stat->atime = inode->i_atime; 843 stat->atime = inode->i_atime;
@@ -839,7 +845,7 @@ static int sdcardfs_fillattr(struct vfsmount *mnt,
839 stat->ctime = inode->i_ctime; 845 stat->ctime = inode->i_ctime;
840 stat->blksize = (1 << inode->i_blkbits); 846 stat->blksize = (1 << inode->i_blkbits);
841 stat->blocks = inode->i_blocks; 847 stat->blocks = inode->i_blocks;
842 release_top(info); 848 data_put(top);
843 return 0; 849 return 0;
844} 850}
845 851
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 706329dd1ca3..17761c546617 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -71,7 +71,7 @@ struct inode_data {
71static int sdcardfs_inode_test(struct inode *inode, void *candidate_data/*void *candidate_lower_inode*/) 71static int sdcardfs_inode_test(struct inode *inode, void *candidate_data/*void *candidate_lower_inode*/)
72{ 72{
73 struct inode *current_lower_inode = sdcardfs_lower_inode(inode); 73 struct inode *current_lower_inode = sdcardfs_lower_inode(inode);
74 userid_t current_userid = SDCARDFS_I(inode)->userid; 74 userid_t current_userid = SDCARDFS_I(inode)->data->userid;
75 75
76 if (current_lower_inode == ((struct inode_data *)candidate_data)->lower_inode && 76 if (current_lower_inode == ((struct inode_data *)candidate_data)->lower_inode &&
77 current_userid == ((struct inode_data *)candidate_data)->id) 77 current_userid == ((struct inode_data *)candidate_data)->id)
@@ -438,7 +438,8 @@ struct dentry *sdcardfs_lookup(struct inode *dir, struct dentry *dentry,
438 goto out; 438 goto out;
439 } 439 }
440 440
441 ret = __sdcardfs_lookup(dentry, flags, &lower_parent_path, SDCARDFS_I(dir)->userid); 441 ret = __sdcardfs_lookup(dentry, flags, &lower_parent_path,
442 SDCARDFS_I(dir)->data->userid);
442 if (IS_ERR(ret)) 443 if (IS_ERR(ret))
443 goto out; 444 goto out;
444 if (ret) 445 if (ret)
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 953d2156d2e9..3c5b51d49d21 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -327,13 +327,13 @@ static int sdcardfs_read_super(struct vfsmount *mnt, struct super_block *sb,
327 mutex_lock(&sdcardfs_super_list_lock); 327 mutex_lock(&sdcardfs_super_list_lock);
328 if (sb_info->options.multiuser) { 328 if (sb_info->options.multiuser) {
329 setup_derived_state(d_inode(sb->s_root), PERM_PRE_ROOT, 329 setup_derived_state(d_inode(sb->s_root), PERM_PRE_ROOT,
330 sb_info->options.fs_user_id, AID_ROOT, 330 sb_info->options.fs_user_id, AID_ROOT,
331 false, d_inode(sb->s_root)); 331 false, SDCARDFS_I(d_inode(sb->s_root))->data);
332 snprintf(sb_info->obbpath_s, PATH_MAX, "%s/obb", dev_name); 332 snprintf(sb_info->obbpath_s, PATH_MAX, "%s/obb", dev_name);
333 } else { 333 } else {
334 setup_derived_state(d_inode(sb->s_root), PERM_ROOT, 334 setup_derived_state(d_inode(sb->s_root), PERM_ROOT,
335 sb_info->options.fs_user_id, AID_ROOT, 335 sb_info->options.fs_user_id, AID_ROOT,
336 false, d_inode(sb->s_root)); 336 false, SDCARDFS_I(d_inode(sb->s_root))->data);
337 snprintf(sb_info->obbpath_s, PATH_MAX, "%s/Android/obb", dev_name); 337 snprintf(sb_info->obbpath_s, PATH_MAX, "%s/Android/obb", dev_name);
338 } 338 }
339 fixup_tmp_permissions(d_inode(sb->s_root)); 339 fixup_tmp_permissions(d_inode(sb->s_root));
diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c
index 5ea6469638d8..00a0f656acc7 100644
--- a/fs/sdcardfs/packagelist.c
+++ b/fs/sdcardfs/packagelist.c
@@ -156,7 +156,7 @@ int check_caller_access_to_name(struct inode *parent_node, const struct qstr *na
156 struct qstr q_android_secure = QSTR_LITERAL("android_secure"); 156 struct qstr q_android_secure = QSTR_LITERAL("android_secure");
157 157
158 /* Always block security-sensitive files at root */ 158 /* Always block security-sensitive files at root */
159 if (parent_node && SDCARDFS_I(parent_node)->perm == PERM_ROOT) { 159 if (parent_node && SDCARDFS_I(parent_node)->data->perm == PERM_ROOT) {
160 if (qstr_case_eq(name, &q_autorun) 160 if (qstr_case_eq(name, &q_autorun)
161 || qstr_case_eq(name, &q__android_secure) 161 || qstr_case_eq(name, &q__android_secure)
162 || qstr_case_eq(name, &q_android_secure)) { 162 || qstr_case_eq(name, &q_android_secure)) {
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 380982b4a567..3687b22a2e6b 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -30,6 +30,7 @@
30#include <linux/file.h> 30#include <linux/file.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/aio.h> 32#include <linux/aio.h>
33#include <linux/kref.h>
33#include <linux/mm.h> 34#include <linux/mm.h>
34#include <linux/mount.h> 35#include <linux/mount.h>
35#include <linux/namei.h> 36#include <linux/namei.h>
@@ -81,7 +82,8 @@
81 */ 82 */
82#define fixup_tmp_permissions(x) \ 83#define fixup_tmp_permissions(x) \
83 do { \ 84 do { \
84 (x)->i_uid = make_kuid(&init_user_ns, SDCARDFS_I(x)->d_uid); \ 85 (x)->i_uid = make_kuid(&init_user_ns, \
86 SDCARDFS_I(x)->data->d_uid); \
85 (x)->i_gid = make_kgid(&init_user_ns, AID_SDCARD_RW); \ 87 (x)->i_gid = make_kgid(&init_user_ns, AID_SDCARD_RW); \
86 (x)->i_mode = ((x)->i_mode & S_IFMT) | 0775;\ 88 (x)->i_mode = ((x)->i_mode & S_IFMT) | 0775;\
87 } while (0) 89 } while (0)
@@ -97,14 +99,14 @@
97 */ 99 */
98#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred, info) \ 100#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred, info) \
99 do { \ 101 do { \
100 saved_cred = override_fsids(sdcardfs_sbi, info); \ 102 saved_cred = override_fsids(sdcardfs_sbi, info->data); \
101 if (!saved_cred) \ 103 if (!saved_cred) \
102 return -ENOMEM; \ 104 return -ENOMEM; \
103 } while (0) 105 } while (0)
104 106
105#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred, info) \ 107#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred, info) \
106 do { \ 108 do { \
107 saved_cred = override_fsids(sdcardfs_sbi, info); \ 109 saved_cred = override_fsids(sdcardfs_sbi, info->data); \
108 if (!saved_cred) \ 110 if (!saved_cred) \
109 return ERR_PTR(-ENOMEM); \ 111 return ERR_PTR(-ENOMEM); \
110 } while (0) 112 } while (0)
@@ -142,9 +144,11 @@ typedef enum {
142struct sdcardfs_sb_info; 144struct sdcardfs_sb_info;
143struct sdcardfs_mount_options; 145struct sdcardfs_mount_options;
144struct sdcardfs_inode_info; 146struct sdcardfs_inode_info;
147struct sdcardfs_inode_data;
145 148
146/* Do not directly use this function. Use OVERRIDE_CRED() instead. */ 149/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
147const struct cred *override_fsids(struct sdcardfs_sb_info *sbi, struct sdcardfs_inode_info *info); 150const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
151 struct sdcardfs_inode_data *data);
148/* Do not directly use this function, use REVERT_CRED() instead. */ 152/* Do not directly use this function, use REVERT_CRED() instead. */
149void revert_fsids(const struct cred *old_cred); 153void revert_fsids(const struct cred *old_cred);
150 154
@@ -178,18 +182,26 @@ struct sdcardfs_file_info {
178 const struct vm_operations_struct *lower_vm_ops; 182 const struct vm_operations_struct *lower_vm_ops;
179}; 183};
180 184
181/* sdcardfs inode data in memory */ 185struct sdcardfs_inode_data {
182struct sdcardfs_inode_info { 186 struct kref refcount;
183 struct inode *lower_inode; 187 bool abandoned;
184 /* state derived based on current position in hierachy */ 188
185 perm_t perm; 189 perm_t perm;
186 userid_t userid; 190 userid_t userid;
187 uid_t d_uid; 191 uid_t d_uid;
188 bool under_android; 192 bool under_android;
189 bool under_cache; 193 bool under_cache;
190 bool under_obb; 194 bool under_obb;
195};
196
197/* sdcardfs inode data in memory */
198struct sdcardfs_inode_info {
199 struct inode *lower_inode;
200 /* state derived based on current position in hierarchy */
201 struct sdcardfs_inode_data *data;
202
191 /* top folder for ownership */ 203 /* top folder for ownership */
192 struct inode *top; 204 struct sdcardfs_inode_data *top_data;
193 205
194 struct inode vfs_inode; 206 struct inode vfs_inode;
195}; 207};
@@ -351,39 +363,56 @@ SDCARDFS_DENT_FUNC(orig_path)
351 363
352static inline bool sbinfo_has_sdcard_magic(struct sdcardfs_sb_info *sbinfo) 364static inline bool sbinfo_has_sdcard_magic(struct sdcardfs_sb_info *sbinfo)
353{ 365{
354 return sbinfo && sbinfo->sb && sbinfo->sb->s_magic == SDCARDFS_SUPER_MAGIC; 366 return sbinfo && sbinfo->sb
367 && sbinfo->sb->s_magic == SDCARDFS_SUPER_MAGIC;
355} 368}
356 369
357/* grab a refererence if we aren't linking to ourself */ 370static inline struct sdcardfs_inode_data *data_get(
358static inline void set_top(struct sdcardfs_inode_info *info, struct inode *top) 371 struct sdcardfs_inode_data *data)
359{ 372{
360 struct inode *old_top = NULL; 373 if (data)
361 374 kref_get(&data->refcount);
362 BUG_ON(IS_ERR_OR_NULL(top)); 375 return data;
363 if (info->top && info->top != &info->vfs_inode)
364 old_top = info->top;
365 if (top != &info->vfs_inode)
366 igrab(top);
367 info->top = top;
368 iput(old_top);
369} 376}
370 377
371static inline struct inode *grab_top(struct sdcardfs_inode_info *info) 378static inline struct sdcardfs_inode_data *top_data_get(
379 struct sdcardfs_inode_info *info)
372{ 380{
373 struct inode *top = info->top; 381 return data_get(info->top_data);
382}
374 383
375 if (top) 384extern void data_release(struct kref *ref);
376 return igrab(top); 385
377 else 386static inline void data_put(struct sdcardfs_inode_data *data)
378 return NULL; 387{
388 kref_put(&data->refcount, data_release);
389}
390
391static inline void release_own_data(struct sdcardfs_inode_info *info)
392{
393 /*
394 * This happens exactly once per inode. At this point, the inode that
395 * originally held this data is about to be freed, and all references
396 * to it are held as a top value, and will likely be released soon.
397 */
398 info->data->abandoned = true;
399 data_put(info->data);
379} 400}
380 401
381static inline void release_top(struct sdcardfs_inode_info *info) 402static inline void set_top(struct sdcardfs_inode_info *info,
403 struct sdcardfs_inode_data *top)
382{ 404{
383 iput(info->top); 405 struct sdcardfs_inode_data *old_top = info->top_data;
406
407 if (top)
408 data_get(top);
409 info->top_data = top;
410 if (old_top)
411 data_put(old_top);
384} 412}
385 413
386static inline int get_gid(struct vfsmount *mnt, struct sdcardfs_inode_info *info) 414static inline int get_gid(struct vfsmount *mnt,
415 struct sdcardfs_inode_data *data)
387{ 416{
388 struct sdcardfs_vfsmount_options *opts = mnt->data; 417 struct sdcardfs_vfsmount_options *opts = mnt->data;
389 418
@@ -396,10 +425,12 @@ static inline int get_gid(struct vfsmount *mnt, struct sdcardfs_inode_info *info
396 */ 425 */
397 return AID_SDCARD_RW; 426 return AID_SDCARD_RW;
398 else 427 else
399 return multiuser_get_uid(info->userid, opts->gid); 428 return multiuser_get_uid(data->userid, opts->gid);
400} 429}
401 430
402static inline int get_mode(struct vfsmount *mnt, struct sdcardfs_inode_info *info) 431static inline int get_mode(struct vfsmount *mnt,
432 struct sdcardfs_inode_info *info,
433 struct sdcardfs_inode_data *data)
403{ 434{
404 int owner_mode; 435 int owner_mode;
405 int filtered_mode; 436 int filtered_mode;
@@ -407,12 +438,12 @@ static inline int get_mode(struct vfsmount *mnt, struct sdcardfs_inode_info *inf
407 int visible_mode = 0775 & ~opts->mask; 438 int visible_mode = 0775 & ~opts->mask;
408 439
409 440
410 if (info->perm == PERM_PRE_ROOT) { 441 if (data->perm == PERM_PRE_ROOT) {
411 /* Top of multi-user view should always be visible to ensure 442 /* Top of multi-user view should always be visible to ensure
412 * secondary users can traverse inside. 443 * secondary users can traverse inside.
413 */ 444 */
414 visible_mode = 0711; 445 visible_mode = 0711;
415 } else if (info->under_android) { 446 } else if (data->under_android) {
416 /* Block "other" access to Android directories, since only apps 447 /* Block "other" access to Android directories, since only apps
417 * belonging to a specific user should be in there; we still 448 * belonging to a specific user should be in there; we still
418 * leave +x open for the default view. 449 * leave +x open for the default view.
@@ -481,8 +512,9 @@ struct limit_search {
481 userid_t userid; 512 userid_t userid;
482}; 513};
483 514
484extern void setup_derived_state(struct inode *inode, perm_t perm, userid_t userid, 515extern void setup_derived_state(struct inode *inode, perm_t perm,
485 uid_t uid, bool under_android, struct inode *top); 516 userid_t userid, uid_t uid, bool under_android,
517 struct sdcardfs_inode_data *top);
486extern void get_derived_permission(struct dentry *parent, struct dentry *dentry); 518extern void get_derived_permission(struct dentry *parent, struct dentry *dentry);
487extern void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, const struct qstr *name); 519extern void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, const struct qstr *name);
488extern void fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit); 520extern void fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit);
@@ -601,7 +633,7 @@ static inline void sdcardfs_copy_and_fix_attrs(struct inode *dest, const struct
601{ 633{
602 dest->i_mode = (src->i_mode & S_IFMT) | S_IRWXU | S_IRWXG | 634 dest->i_mode = (src->i_mode & S_IFMT) | S_IRWXU | S_IRWXG |
603 S_IROTH | S_IXOTH; /* 0775 */ 635 S_IROTH | S_IXOTH; /* 0775 */
604 dest->i_uid = make_kuid(&init_user_ns, SDCARDFS_I(dest)->d_uid); 636 dest->i_uid = make_kuid(&init_user_ns, SDCARDFS_I(dest)->data->d_uid);
605 dest->i_gid = make_kgid(&init_user_ns, AID_SDCARD_RW); 637 dest->i_gid = make_kgid(&init_user_ns, AID_SDCARD_RW);
606 dest->i_rdev = src->i_rdev; 638 dest->i_rdev = src->i_rdev;
607 dest->i_atime = src->i_atime; 639 dest->i_atime = src->i_atime;
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
index 8a9c9c7adca2..7f4539b4b249 100644
--- a/fs/sdcardfs/super.c
+++ b/fs/sdcardfs/super.c
@@ -26,6 +26,23 @@
26 */ 26 */
27static struct kmem_cache *sdcardfs_inode_cachep; 27static struct kmem_cache *sdcardfs_inode_cachep;
28 28
29/*
30 * To support the top references, we must track some data separately.
31 * An sdcardfs_inode_info always has a reference to its data, and once set up,
32 * also has a reference to its top. The top may be itself, in which case it
33 * holds two references to its data. When top is changed, it takes a ref to the
34 * new data and then drops the ref to the old data.
35 */
36static struct kmem_cache *sdcardfs_inode_data_cachep;
37
38void data_release(struct kref *ref)
39{
40 struct sdcardfs_inode_data *data =
41 container_of(ref, struct sdcardfs_inode_data, refcount);
42
43 kmem_cache_free(sdcardfs_inode_data_cachep, data);
44}
45
29/* final actions when unmounting a file system */ 46/* final actions when unmounting a file system */
30static void sdcardfs_put_super(struct super_block *sb) 47static void sdcardfs_put_super(struct super_block *sb)
31{ 48{
@@ -166,6 +183,7 @@ static void sdcardfs_evict_inode(struct inode *inode)
166 struct inode *lower_inode; 183 struct inode *lower_inode;
167 184
168 truncate_inode_pages(&inode->i_data, 0); 185 truncate_inode_pages(&inode->i_data, 0);
186 set_top(SDCARDFS_I(inode), NULL);
169 clear_inode(inode); 187 clear_inode(inode);
170 /* 188 /*
171 * Decrement a reference to a lower_inode, which was incremented 189 * Decrement a reference to a lower_inode, which was incremented
@@ -173,13 +191,13 @@ static void sdcardfs_evict_inode(struct inode *inode)
173 */ 191 */
174 lower_inode = sdcardfs_lower_inode(inode); 192 lower_inode = sdcardfs_lower_inode(inode);
175 sdcardfs_set_lower_inode(inode, NULL); 193 sdcardfs_set_lower_inode(inode, NULL);
176 set_top(SDCARDFS_I(inode), inode);
177 iput(lower_inode); 194 iput(lower_inode);
178} 195}
179 196
180static struct inode *sdcardfs_alloc_inode(struct super_block *sb) 197static struct inode *sdcardfs_alloc_inode(struct super_block *sb)
181{ 198{
182 struct sdcardfs_inode_info *i; 199 struct sdcardfs_inode_info *i;
200 struct sdcardfs_inode_data *d;
183 201
184 i = kmem_cache_alloc(sdcardfs_inode_cachep, GFP_KERNEL); 202 i = kmem_cache_alloc(sdcardfs_inode_cachep, GFP_KERNEL);
185 if (!i) 203 if (!i)
@@ -188,6 +206,16 @@ static struct inode *sdcardfs_alloc_inode(struct super_block *sb)
188 /* memset everything up to the inode to 0 */ 206 /* memset everything up to the inode to 0 */
189 memset(i, 0, offsetof(struct sdcardfs_inode_info, vfs_inode)); 207 memset(i, 0, offsetof(struct sdcardfs_inode_info, vfs_inode));
190 208
209 d = kmem_cache_alloc(sdcardfs_inode_data_cachep,
210 GFP_KERNEL | __GFP_ZERO);
211 if (!d) {
212 kmem_cache_free(sdcardfs_inode_cachep, i);
213 return NULL;
214 }
215
216 i->data = d;
217 kref_init(&d->refcount);
218
191 i->vfs_inode.i_version = 1; 219 i->vfs_inode.i_version = 1;
192 return &i->vfs_inode; 220 return &i->vfs_inode;
193} 221}
@@ -196,6 +224,7 @@ static void i_callback(struct rcu_head *head)
196{ 224{
197 struct inode *inode = container_of(head, struct inode, i_rcu); 225 struct inode *inode = container_of(head, struct inode, i_rcu);
198 226
227 release_own_data(SDCARDFS_I(inode));
199 kmem_cache_free(sdcardfs_inode_cachep, SDCARDFS_I(inode)); 228 kmem_cache_free(sdcardfs_inode_cachep, SDCARDFS_I(inode));
200} 229}
201 230
@@ -214,20 +243,30 @@ static void init_once(void *obj)
214 243
215int sdcardfs_init_inode_cache(void) 244int sdcardfs_init_inode_cache(void)
216{ 245{
217 int err = 0;
218
219 sdcardfs_inode_cachep = 246 sdcardfs_inode_cachep =
220 kmem_cache_create("sdcardfs_inode_cache", 247 kmem_cache_create("sdcardfs_inode_cache",
221 sizeof(struct sdcardfs_inode_info), 0, 248 sizeof(struct sdcardfs_inode_info), 0,
222 SLAB_RECLAIM_ACCOUNT, init_once); 249 SLAB_RECLAIM_ACCOUNT, init_once);
250
223 if (!sdcardfs_inode_cachep) 251 if (!sdcardfs_inode_cachep)
224 err = -ENOMEM; 252 return -ENOMEM;
225 return err; 253
254 sdcardfs_inode_data_cachep =
255 kmem_cache_create("sdcardfs_inode_data_cache",
256 sizeof(struct sdcardfs_inode_data), 0,
257 SLAB_RECLAIM_ACCOUNT, NULL);
258 if (!sdcardfs_inode_data_cachep) {
259 kmem_cache_destroy(sdcardfs_inode_cachep);
260 return -ENOMEM;
261 }
262
263 return 0;
226} 264}
227 265
228/* sdcardfs inode cache destructor */ 266/* sdcardfs inode cache destructor */
229void sdcardfs_destroy_inode_cache(void) 267void sdcardfs_destroy_inode_cache(void)
230{ 268{
269 kmem_cache_destroy(sdcardfs_inode_data_cachep);
231 kmem_cache_destroy(sdcardfs_inode_cachep); 270 kmem_cache_destroy(sdcardfs_inode_cachep);
232} 271}
233 272
diff --git a/fs/xattr.c b/fs/xattr.c
index 2d13b4e62fae..ed8c374570ed 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -530,7 +530,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
530 size = XATTR_SIZE_MAX; 530 size = XATTR_SIZE_MAX;
531 kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 531 kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
532 if (!kvalue) { 532 if (!kvalue) {
533 kvalue = vmalloc(size); 533 kvalue = vzalloc(size);
534 if (!kvalue) 534 if (!kvalue)
535 return -ENOMEM; 535 return -ENOMEM;
536 } 536 }
diff --git a/kernel/padata.c b/kernel/padata.c
index b4a3c0ae649b..e4a8f8d9b31a 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -358,7 +358,7 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
358 358
359 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); 359 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
360 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { 360 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
361 free_cpumask_var(pd->cpumask.cbcpu); 361 free_cpumask_var(pd->cpumask.pcpu);
362 return -ENOMEM; 362 return -ENOMEM;
363 } 363 }
364 364
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e5b159b88e39..5b06fb385dd7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3125,6 +3125,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3125 enum compact_priority prio, enum compact_result *compact_result) 3125 enum compact_priority prio, enum compact_result *compact_result)
3126{ 3126{
3127 struct page *page; 3127 struct page *page;
3128 unsigned int noreclaim_flag = current->flags & PF_MEMALLOC;
3128 3129
3129 if (!order) 3130 if (!order)
3130 return NULL; 3131 return NULL;
@@ -3132,7 +3133,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3132 current->flags |= PF_MEMALLOC; 3133 current->flags |= PF_MEMALLOC;
3133 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3134 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3134 prio); 3135 prio);
3135 current->flags &= ~PF_MEMALLOC; 3136 current->flags = (current->flags & ~PF_MEMALLOC) | noreclaim_flag;
3136 3137
3137 if (*compact_result <= COMPACT_INACTIVE) 3138 if (*compact_result <= COMPACT_INACTIVE)
3138 return NULL; 3139 return NULL;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 48f9471e7c85..c88a6007e643 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1680,7 +1680,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1680 if (msg->msg_flags & MSG_OOB) 1680 if (msg->msg_flags & MSG_OOB)
1681 return -EOPNOTSUPP; 1681 return -EOPNOTSUPP;
1682 1682
1683 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE)) 1683 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1684 MSG_CMSG_COMPAT))
1684 return -EINVAL; 1685 return -EINVAL;
1685 1686
1686 if (len < 4 || len > HCI_MAX_FRAME_SIZE) 1687 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 963732e775df..58dfa23d12ca 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -740,7 +740,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
740 740
741 if (msg_data_left(msg) < chunk) { 741 if (msg_data_left(msg) < chunk) {
742 if (__skb_checksum_complete(skb)) 742 if (__skb_checksum_complete(skb))
743 goto csum_error; 743 return -EINVAL;
744 if (skb_copy_datagram_msg(skb, hlen, msg, chunk)) 744 if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
745 goto fault; 745 goto fault;
746 } else { 746 } else {
@@ -748,15 +748,16 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
748 if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter, 748 if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
749 chunk, &csum)) 749 chunk, &csum))
750 goto fault; 750 goto fault;
751 if (csum_fold(csum)) 751
752 goto csum_error; 752 if (csum_fold(csum)) {
753 iov_iter_revert(&msg->msg_iter, chunk);
754 return -EINVAL;
755 }
756
753 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) 757 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
754 netdev_rx_csum_fault(skb->dev); 758 netdev_rx_csum_fault(skb->dev);
755 } 759 }
756 return 0; 760 return 0;
757csum_error:
758 iov_iter_revert(&msg->msg_iter, chunk);
759 return -EINVAL;
760fault: 761fault:
761 return -EFAULT; 762 return -EFAULT;
762} 763}
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index c5a6e0b12452..78bd632f144d 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1826,7 +1826,7 @@ static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
1826 filt->addr = start; 1826 filt->addr = start;
1827 if (filt->range && !filt->size && !filt->sym_to) { 1827 if (filt->range && !filt->size && !filt->sym_to) {
1828 filt->size = size; 1828 filt->size = size;
1829 no_size = !!size; 1829 no_size = !size;
1830 } 1830 }
1831 } 1831 }
1832 1832
@@ -1840,7 +1840,7 @@ static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
1840 if (err) 1840 if (err)
1841 return err; 1841 return err;
1842 filt->size = start + size - filt->addr; 1842 filt->size = start + size - filt->addr;
1843 no_size = !!size; 1843 no_size = !size;
1844 } 1844 }
1845 1845
1846 /* The very last symbol in kallsyms does not imply a particular size */ 1846 /* The very last symbol in kallsyms does not imply a particular size */
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 4af47079cf04..e717fed80219 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -403,6 +403,51 @@ static void *threadproc(void *ctx)
403 } 403 }
404} 404}
405 405
406#ifdef __i386__
407
408#ifndef SA_RESTORE
409#define SA_RESTORER 0x04000000
410#endif
411
412/*
413 * The UAPI header calls this 'struct sigaction', which conflicts with
414 * glibc. Sigh.
415 */
416struct fake_ksigaction {
417 void *handler; /* the real type is nasty */
418 unsigned long sa_flags;
419 void (*sa_restorer)(void);
420 unsigned char sigset[8];
421};
422
423static void fix_sa_restorer(int sig)
424{
425 struct fake_ksigaction ksa;
426
427 if (syscall(SYS_rt_sigaction, sig, NULL, &ksa, 8) == 0) {
428 /*
429 * glibc has a nasty bug: it sometimes writes garbage to
430 * sa_restorer. This interacts quite badly with anything
431 * that fiddles with SS because it can trigger legacy
432 * stack switching. Patch it up. See:
433 *
434 * https://sourceware.org/bugzilla/show_bug.cgi?id=21269
435 */
436 if (!(ksa.sa_flags & SA_RESTORER) && ksa.sa_restorer) {
437 ksa.sa_restorer = NULL;
438 if (syscall(SYS_rt_sigaction, sig, &ksa, NULL,
439 sizeof(ksa.sigset)) != 0)
440 err(1, "rt_sigaction");
441 }
442 }
443}
444#else
445static void fix_sa_restorer(int sig)
446{
447 /* 64-bit glibc works fine. */
448}
449#endif
450
406static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), 451static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
407 int flags) 452 int flags)
408{ 453{
@@ -414,6 +459,7 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
414 if (sigaction(sig, &sa, 0)) 459 if (sigaction(sig, &sa, 0))
415 err(1, "sigaction"); 460 err(1, "sigaction");
416 461
462 fix_sa_restorer(sig);
417} 463}
418 464
419static jmp_buf jmpbuf; 465static jmp_buf jmpbuf;