aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--arch/arc/kernel/unwind.c37
-rw-r--r--arch/arm/boot/dts/wm8650.dtsi9
-rw-r--r--arch/mips/kvm/kvm_locore.S16
-rw-r--r--arch/mips/kvm/kvm_mips.c5
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c2
-rw-r--r--arch/s390/mm/extable.c8
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c2
-rw-r--r--arch/um/os-Linux/start_up.c2
-rw-r--r--arch/x86/platform/efi/efi.c7
-rw-r--r--arch/x86/platform/efi/efi_32.c11
-rw-r--r--arch/x86/platform/efi/efi_64.c3
-rw-r--r--block/partitions/mac.c10
-rw-r--r--crypto/async_tx/async_memcpy.c2
-rw-r--r--crypto/async_tx/async_pq.c4
-rw-r--r--crypto/async_tx/async_raid6_recov.c4
-rw-r--r--crypto/async_tx/async_xor.c4
-rw-r--r--drivers/ata/libata-sff.c32
-rw-r--r--drivers/ata/sata_sil.c3
-rw-r--r--drivers/clocksource/vt8500_timer.c6
-rw-r--r--drivers/dma/dw/core.c12
-rw-r--r--drivers/edac/edac_device.c11
-rw-r--r--drivers/edac/edac_mc.c14
-rw-r--r--drivers/edac/edac_pci.c9
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c7
-rw-r--r--drivers/gpu/drm/ast/ast_main.c1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c20
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/vga/vgaarb.c6
-rw-r--r--drivers/hwmon/ads1015.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c35
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c5
-rw-r--r--drivers/md/bcache/btree.c5
-rw-r--r--drivers/md/bcache/super.c11
-rw-r--r--drivers/md/bcache/writeback.c37
-rw-r--r--drivers/md/bcache/writeback.h3
-rw-r--r--drivers/md/dm-exception-store.h2
-rw-r--r--drivers/md/dm-snap-persistent.c5
-rw-r--r--drivers/md/dm-snap-transient.c4
-rw-r--r--drivers/md/dm-snap.c20
-rw-r--r--drivers/md/dm-thin-metadata.c6
-rw-r--r--drivers/md/dm-thin.c5
-rw-r--r--drivers/md/persistent-data/dm-btree.c16
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c29
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c6
-rw-r--r--drivers/media/dvb-frontends/tda1004x.c9
-rw-r--r--drivers/media/usb/gspca/ov534.c9
-rw-r--r--drivers/media/usb/gspca/topro.c6
-rw-r--r--drivers/mmc/card/block.c11
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/net/can/sja1000/sja1000.c3
-rw-r--r--drivers/net/can/usb/ems_usb.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c20
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/xen-pcifront.c10
-rw-r--r--drivers/power/wm831x_power.c6
-rw-r--r--drivers/powercap/intel_rapl.c7
-rw-r--r--drivers/s390/block/dasd_alias.c23
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c15
-rw-r--r--drivers/scsi/ses.c30
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/target/target_core_sbc.c17
-rw-r--r--drivers/target/target_core_transport.c14
-rw-r--r--drivers/usb/class/cdc-acm.c5
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/option.c9
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/inode.c21
-rw-r--r--fs/btrfs/send.c16
-rw-r--r--fs/dcache.c155
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/lockd/host.c7
-rw-r--r--fs/lockd/mon.c36
-rw-r--r--fs/lockd/netns.h1
-rw-r--r--fs/lockd/svc.c1
-rw-r--r--fs/lockd/svc4proc.c2
-rw-r--r--fs/lockd/svcproc.c2
-rw-r--r--fs/namei.c4
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/proc/task_mmu.c4
-rw-r--r--fs/proc/task_nommu.c2
-rw-r--r--fs/splice.c13
-rw-r--r--include/asm-generic/cputime_nsecs.h5
-rw-r--r--include/linux/enclosure.h4
-rw-r--r--include/linux/lockd/lockd.h9
-rw-r--r--include/linux/nfs_fs.h4
-rw-r--r--include/linux/tracepoint.h6
-rw-r--r--include/net/af_unix.h4
-rw-r--r--include/net/scm.h1
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--kernel/irq/manage.c6
-rw-r--r--kernel/resource.c5
-rw-r--r--kernel/sched/core.c69
-rw-r--r--kernel/sched/deadline.c65
-rw-r--r--kernel/sched/idle_task.c9
-rw-r--r--kernel/sched/rt.c71
-rw-r--r--kernel/sched/sched.h19
-rw-r--r--kernel/time/posix-clock.c4
-rw-r--r--kernel/trace/ring_buffer.c12
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/workqueue.c8
-rw-r--r--lib/devres.c2
-rw-r--r--net/ceph/messenger.c4
-rw-r--r--net/core/scm.c7
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c4
-rw-r--r--net/ipv6/addrconf.c17
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c18
-rw-r--r--net/mac80211/mesh_pathtbl.c8
-rw-r--r--net/netfilter/nf_tables_api.c4
-rw-r--r--net/rds/send.c4
-rw-r--r--net/rfkill/core.c22
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/unix/garbage.c8
-rw-r--r--scripts/recordmcount.c14
-rw-r--r--tools/Makefile9
-rw-r--r--virt/kvm/async_pf.c2
136 files changed, 933 insertions, 518 deletions
diff --git a/Makefile b/Makefile
index b738f644c71..0843ef4cc0a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 14 2PATCHLEVEL = 14
3SUBLEVEL = 62 3SUBLEVEL = 63
4EXTRAVERSION = 4EXTRAVERSION =
5NAME = Remembering Coco 5NAME = Remembering Coco
6 6
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index e550b117ec4..2d6a36ea8aa 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -986,42 +986,13 @@ int arc_unwind(struct unwind_frame_info *frame)
986 (const u8 *)(fde + 986 (const u8 *)(fde +
987 1) + 987 1) +
988 *fde, ptrType); 988 *fde, ptrType);
989 if (pc >= endLoc) 989 if (pc >= endLoc) {
990 fde = NULL; 990 fde = NULL;
991 } else
992 fde = NULL;
993 }
994 if (fde == NULL) {
995 for (fde = table->address, tableSize = table->size;
996 cie = NULL, tableSize > sizeof(*fde)
997 && tableSize - sizeof(*fde) >= *fde;
998 tableSize -= sizeof(*fde) + *fde,
999 fde += 1 + *fde / sizeof(*fde)) {
1000 cie = cie_for_fde(fde, table);
1001 if (cie == &bad_cie) {
1002 cie = NULL; 991 cie = NULL;
1003 break;
1004 } 992 }
1005 if (cie == NULL 993 } else {
1006 || cie == &not_fde 994 fde = NULL;
1007 || (ptrType = fde_pointer_type(cie)) < 0) 995 cie = NULL;
1008 continue;
1009 ptr = (const u8 *)(fde + 2);
1010 startLoc = read_pointer(&ptr,
1011 (const u8 *)(fde + 1) +
1012 *fde, ptrType);
1013 if (!startLoc)
1014 continue;
1015 if (!(ptrType & DW_EH_PE_indirect))
1016 ptrType &=
1017 DW_EH_PE_FORM | DW_EH_PE_signed;
1018 endLoc =
1019 startLoc + read_pointer(&ptr,
1020 (const u8 *)(fde +
1021 1) +
1022 *fde, ptrType);
1023 if (pc >= startLoc && pc < endLoc)
1024 break;
1025 } 996 }
1026 } 997 }
1027 } 998 }
diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
index 7525982262a..2897c1ac47d 100644
--- a/arch/arm/boot/dts/wm8650.dtsi
+++ b/arch/arm/boot/dts/wm8650.dtsi
@@ -187,6 +187,15 @@
187 interrupts = <43>; 187 interrupts = <43>;
188 }; 188 };
189 189
190 sdhc@d800a000 {
191 compatible = "wm,wm8505-sdhc";
192 reg = <0xd800a000 0x400>;
193 interrupts = <20>, <21>;
194 clocks = <&clksdhc>;
195 bus-width = <4>;
196 sdon-inverted;
197 };
198
190 fb: fb@d8050800 { 199 fb: fb@d8050800 {
191 compatible = "wm,wm8505-fb"; 200 compatible = "wm,wm8505-fb";
192 reg = <0xd8050800 0x200>; 201 reg = <0xd8050800 0x200>;
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
index 03a2db58b22..ba5ce99c021 100644
--- a/arch/mips/kvm/kvm_locore.S
+++ b/arch/mips/kvm/kvm_locore.S
@@ -159,9 +159,11 @@ FEXPORT(__kvm_mips_vcpu_run)
159 159
160FEXPORT(__kvm_mips_load_asid) 160FEXPORT(__kvm_mips_load_asid)
161 /* Set the ASID for the Guest Kernel */ 161 /* Set the ASID for the Guest Kernel */
162 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 162 PTR_L t0, VCPU_COP0(k1)
163 /* addresses shift to 0x80000000 */ 163 LONG_L t0, COP0_STATUS(t0)
164 bltz t0, 1f /* If kernel */ 164 andi t0, KSU_USER | ST0_ERL | ST0_EXL
165 xori t0, KSU_USER
166 bnez t0, 1f /* If kernel */
165 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 167 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
166 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 168 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1671: 1691:
@@ -438,9 +440,11 @@ __kvm_mips_return_to_guest:
438 mtc0 t0, CP0_EPC 440 mtc0 t0, CP0_EPC
439 441
440 /* Set the ASID for the Guest Kernel */ 442 /* Set the ASID for the Guest Kernel */
441 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 443 PTR_L t0, VCPU_COP0(k1)
442 /* addresses shift to 0x80000000 */ 444 LONG_L t0, COP0_STATUS(t0)
443 bltz t0, 1f /* If kernel */ 445 andi t0, KSU_USER | ST0_ERL | ST0_EXL
446 xori t0, KSU_USER
447 bnez t0, 1f /* If kernel */
444 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 448 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
445 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 449 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
4461: 4501:
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index 897c605263f..12d850b6876 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -313,7 +313,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
313 313
314 if (!gebase) { 314 if (!gebase) {
315 err = -ENOMEM; 315 err = -ENOMEM;
316 goto out_free_cpu; 316 goto out_uninit_cpu;
317 } 317 }
318 kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n", 318 kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
319 ALIGN(size, PAGE_SIZE), gebase); 319 ALIGN(size, PAGE_SIZE), gebase);
@@ -373,6 +373,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
373out_free_gebase: 373out_free_gebase:
374 kfree(gebase); 374 kfree(gebase);
375 375
376out_uninit_cpu:
377 kvm_vcpu_uninit(vcpu);
378
376out_free_cpu: 379out_free_cpu:
377 kfree(vcpu); 380 kfree(vcpu);
378 381
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
index c76f297b714..33085819cd8 100644
--- a/arch/mips/kvm/kvm_mips_emul.c
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -935,7 +935,7 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
935 935
936 base = (inst >> 21) & 0x1f; 936 base = (inst >> 21) & 0x1f;
937 op_inst = (inst >> 16) & 0x1f; 937 op_inst = (inst >> 16) & 0x1f;
938 offset = inst & 0xffff; 938 offset = (int16_t)inst;
939 cache = (inst >> 16) & 0x3; 939 cache = (inst >> 16) & 0x3;
940 op = (inst >> 18) & 0x7; 940 op = (inst >> 18) & 0x7;
941 941
diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
index 4d1ee88864e..18c8b819b0a 100644
--- a/arch/s390/mm/extable.c
+++ b/arch/s390/mm/extable.c
@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
52 int i; 52 int i;
53 53
54 /* Normalize entries to being relative to the start of the section */ 54 /* Normalize entries to being relative to the start of the section */
55 for (p = start, i = 0; p < finish; p++, i += 8) 55 for (p = start, i = 0; p < finish; p++, i += 8) {
56 p->insn += i; 56 p->insn += i;
57 p->fixup += i + 4;
58 }
57 sort(start, finish - start, sizeof(*start), cmp_ex, NULL); 59 sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
58 /* Denormalize all entries */ 60 /* Denormalize all entries */
59 for (p = start, i = 0; p < finish; p++, i += 8) 61 for (p = start, i = 0; p < finish; p++, i += 8) {
60 p->insn -= i; 62 p->insn -= i;
63 p->fixup -= i + 4;
64 }
61} 65}
62 66
63#ifdef CONFIG_MODULES 67#ifdef CONFIG_MODULES
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 25db14a33d0..47ae8d75777 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -412,7 +412,7 @@ out:
412 412
413SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) 413SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
414{ 414{
415 int ret; 415 long ret;
416 416
417 if (personality(current->personality) == PER_LINUX32 && 417 if (personality(current->personality) == PER_LINUX32 &&
418 personality(personality) == PER_LINUX) 418 personality(personality) == PER_LINUX)
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 337518c5042..b412c62486f 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -95,6 +95,8 @@ static int start_ptraced_child(void)
95{ 95{
96 int pid, n, status; 96 int pid, n, status;
97 97
98 fflush(stdout);
99
98 pid = fork(); 100 pid = fork();
99 if (pid == 0) 101 if (pid == 0)
100 ptrace_child(); 102 ptrace_child();
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index ae7d543f23e..8894f5bc462 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -248,12 +248,19 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
248 efi_memory_desc_t *virtual_map) 248 efi_memory_desc_t *virtual_map)
249{ 249{
250 efi_status_t status; 250 efi_status_t status;
251 unsigned long flags;
251 252
252 efi_call_phys_prelog(); 253 efi_call_phys_prelog();
254
255 /* Disable interrupts around EFI calls: */
256 local_irq_save(flags);
253 status = efi_call_phys4(efi_phys.set_virtual_address_map, 257 status = efi_call_phys4(efi_phys.set_virtual_address_map,
254 memory_map_size, descriptor_size, 258 memory_map_size, descriptor_size,
255 descriptor_version, virtual_map); 259 descriptor_version, virtual_map);
260 local_irq_restore(flags);
261
256 efi_call_phys_epilog(); 262 efi_call_phys_epilog();
263
257 return status; 264 return status;
258} 265}
259 266
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 9ee3491e31f..be4e7eb4167 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -33,11 +33,10 @@
33 33
34/* 34/*
35 * To make EFI call EFI runtime service in physical addressing mode we need 35 * To make EFI call EFI runtime service in physical addressing mode we need
36 * prelog/epilog before/after the invocation to disable interrupt, to 36 * prolog/epilog before/after the invocation to claim the EFI runtime service
37 * claim EFI runtime service handler exclusively and to duplicate a memory in 37 * handler exclusively and to duplicate a memory mapping in low memory space,
38 * low memory space say 0 - 3G. 38 * say 0 - 3G.
39 */ 39 */
40static unsigned long efi_rt_eflags;
41 40
42void efi_sync_low_kernel_mappings(void) {} 41void efi_sync_low_kernel_mappings(void) {}
43void __init efi_dump_pagetable(void) {} 42void __init efi_dump_pagetable(void) {}
@@ -59,8 +58,6 @@ void efi_call_phys_prelog(void)
59{ 58{
60 struct desc_ptr gdt_descr; 59 struct desc_ptr gdt_descr;
61 60
62 local_irq_save(efi_rt_eflags);
63
64 load_cr3(initial_page_table); 61 load_cr3(initial_page_table);
65 __flush_tlb_all(); 62 __flush_tlb_all();
66 63
@@ -79,8 +76,6 @@ void efi_call_phys_epilog(void)
79 76
80 load_cr3(swapper_pg_dir); 77 load_cr3(swapper_pg_dir);
81 __flush_tlb_all(); 78 __flush_tlb_all();
82
83 local_irq_restore(efi_rt_eflags);
84} 79}
85 80
86void __init efi_runtime_mkexec(void) 81void __init efi_runtime_mkexec(void)
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 666b74a0909..b1be0425c68 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -41,7 +41,6 @@
41#include <asm/realmode.h> 41#include <asm/realmode.h>
42 42
43static pgd_t *save_pgd __initdata; 43static pgd_t *save_pgd __initdata;
44static unsigned long efi_flags __initdata;
45 44
46/* 45/*
47 * We allocate runtime services regions bottom-up, starting from -4G, i.e. 46 * We allocate runtime services regions bottom-up, starting from -4G, i.e.
@@ -87,7 +86,6 @@ void __init efi_call_phys_prelog(void)
87 return; 86 return;
88 87
89 early_code_mapping_set_exec(1); 88 early_code_mapping_set_exec(1);
90 local_irq_save(efi_flags);
91 89
92 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); 90 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
93 save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); 91 save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
@@ -115,7 +113,6 @@ void __init efi_call_phys_epilog(void)
115 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); 113 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
116 kfree(save_pgd); 114 kfree(save_pgd);
117 __flush_tlb_all(); 115 __flush_tlb_all();
118 local_irq_restore(efi_flags);
119 early_code_mapping_set_exec(0); 116 early_code_mapping_set_exec(0);
120} 117}
121 118
diff --git a/block/partitions/mac.c b/block/partitions/mac.c
index 76d8ba6379a..bd5b9146523 100644
--- a/block/partitions/mac.c
+++ b/block/partitions/mac.c
@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state)
32 Sector sect; 32 Sector sect;
33 unsigned char *data; 33 unsigned char *data;
34 int slot, blocks_in_map; 34 int slot, blocks_in_map;
35 unsigned secsize; 35 unsigned secsize, datasize, partoffset;
36#ifdef CONFIG_PPC_PMAC 36#ifdef CONFIG_PPC_PMAC
37 int found_root = 0; 37 int found_root = 0;
38 int found_root_goodness = 0; 38 int found_root_goodness = 0;
@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state)
50 } 50 }
51 secsize = be16_to_cpu(md->block_size); 51 secsize = be16_to_cpu(md->block_size);
52 put_dev_sector(sect); 52 put_dev_sector(sect);
53 data = read_part_sector(state, secsize/512, &sect); 53 datasize = round_down(secsize, 512);
54 data = read_part_sector(state, datasize / 512, &sect);
54 if (!data) 55 if (!data)
55 return -1; 56 return -1;
56 part = (struct mac_partition *) (data + secsize%512); 57 partoffset = secsize % 512;
58 if (partoffset + sizeof(*part) > datasize)
59 return -1;
60 part = (struct mac_partition *) (data + partoffset);
57 if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { 61 if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
58 put_dev_sector(sect); 62 put_dev_sector(sect);
59 return 0; /* not a MacOS disk */ 63 return 0; /* not a MacOS disk */
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index f8c0b8dbeb7..88bc8e6b2a5 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
53 struct dmaengine_unmap_data *unmap = NULL; 53 struct dmaengine_unmap_data *unmap = NULL;
54 54
55 if (device) 55 if (device)
56 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); 56 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
57 57
58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { 58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
59 unsigned long dma_prep_flags = 0; 59 unsigned long dma_prep_flags = 0;
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index d05327caf69..7eb264e6526 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -176,7 +176,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
176 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); 176 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
177 177
178 if (device) 178 if (device)
179 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); 179 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
180 180
181 if (unmap && 181 if (unmap &&
182 (src_cnt <= dma_maxpq(device, 0) || 182 (src_cnt <= dma_maxpq(device, 0) ||
@@ -294,7 +294,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
294 BUG_ON(disks < 4); 294 BUG_ON(disks < 4);
295 295
296 if (device) 296 if (device)
297 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); 297 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
298 298
299 if (unmap && disks <= dma_maxpq(device, 0) && 299 if (unmap && disks <= dma_maxpq(device, 0) &&
300 is_dma_pq_aligned(device, offset, 0, len)) { 300 is_dma_pq_aligned(device, offset, 0, len)) {
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 934a8498149..8fab6275ea1 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
41 u8 *a, *b, *c; 41 u8 *a, *b, *c;
42 42
43 if (dma) 43 if (dma)
44 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); 44 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
45 45
46 if (unmap) { 46 if (unmap) {
47 struct device *dev = dma->dev; 47 struct device *dev = dma->dev;
@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
105 u8 *d, *s; 105 u8 *d, *s;
106 106
107 if (dma) 107 if (dma)
108 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); 108 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
109 109
110 if (unmap) { 110 if (unmap) {
111 dma_addr_t dma_dest[2]; 111 dma_addr_t dma_dest[2];
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index e1bce26cd4f..da75777f2b3 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
182 BUG_ON(src_cnt <= 1); 182 BUG_ON(src_cnt <= 1);
183 183
184 if (device) 184 if (device)
185 unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); 185 unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
186 186
187 if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { 187 if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
188 struct dma_async_tx_descriptor *tx; 188 struct dma_async_tx_descriptor *tx;
@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
278 BUG_ON(src_cnt <= 1); 278 BUG_ON(src_cnt <= 1);
279 279
280 if (device) 280 if (device)
281 unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); 281 unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
282 282
283 if (unmap && src_cnt <= device->max_xor && 283 if (unmap && src_cnt <= device->max_xor &&
284 is_dma_xor_aligned(device, offset, 0, len)) { 284 is_dma_xor_aligned(device, offset, 0, len)) {
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 136803c47cd..96e5ed18863 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
997static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 997static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
998{ 998{
999 struct ata_port *ap = qc->ap; 999 struct ata_port *ap = qc->ap;
1000 unsigned long flags;
1001 1000
1002 if (ap->ops->error_handler) { 1001 if (ap->ops->error_handler) {
1003 if (in_wq) { 1002 if (in_wq) {
1004 spin_lock_irqsave(ap->lock, flags);
1005
1006 /* EH might have kicked in while host lock is 1003 /* EH might have kicked in while host lock is
1007 * released. 1004 * released.
1008 */ 1005 */
@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1014 } else 1011 } else
1015 ata_port_freeze(ap); 1012 ata_port_freeze(ap);
1016 } 1013 }
1017
1018 spin_unlock_irqrestore(ap->lock, flags);
1019 } else { 1014 } else {
1020 if (likely(!(qc->err_mask & AC_ERR_HSM))) 1015 if (likely(!(qc->err_mask & AC_ERR_HSM)))
1021 ata_qc_complete(qc); 1016 ata_qc_complete(qc);
@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1024 } 1019 }
1025 } else { 1020 } else {
1026 if (in_wq) { 1021 if (in_wq) {
1027 spin_lock_irqsave(ap->lock, flags);
1028 ata_sff_irq_on(ap); 1022 ata_sff_irq_on(ap);
1029 ata_qc_complete(qc); 1023 ata_qc_complete(qc);
1030 spin_unlock_irqrestore(ap->lock, flags);
1031 } else 1024 } else
1032 ata_qc_complete(qc); 1025 ata_qc_complete(qc);
1033 } 1026 }
@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1048{ 1041{
1049 struct ata_link *link = qc->dev->link; 1042 struct ata_link *link = qc->dev->link;
1050 struct ata_eh_info *ehi = &link->eh_info; 1043 struct ata_eh_info *ehi = &link->eh_info;
1051 unsigned long flags = 0;
1052 int poll_next; 1044 int poll_next;
1053 1045
1046 lockdep_assert_held(ap->lock);
1047
1054 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 1048 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1055 1049
1056 /* Make sure ata_sff_qc_issue() does not throw things 1050 /* Make sure ata_sff_qc_issue() does not throw things
@@ -1112,14 +1106,6 @@ fsm_start:
1112 } 1106 }
1113 } 1107 }
1114 1108
1115 /* Send the CDB (atapi) or the first data block (ata pio out).
1116 * During the state transition, interrupt handler shouldn't
1117 * be invoked before the data transfer is complete and
1118 * hsm_task_state is changed. Hence, the following locking.
1119 */
1120 if (in_wq)
1121 spin_lock_irqsave(ap->lock, flags);
1122
1123 if (qc->tf.protocol == ATA_PROT_PIO) { 1109 if (qc->tf.protocol == ATA_PROT_PIO) {
1124 /* PIO data out protocol. 1110 /* PIO data out protocol.
1125 * send first data block. 1111 * send first data block.
@@ -1135,9 +1121,6 @@ fsm_start:
1135 /* send CDB */ 1121 /* send CDB */
1136 atapi_send_cdb(ap, qc); 1122 atapi_send_cdb(ap, qc);
1137 1123
1138 if (in_wq)
1139 spin_unlock_irqrestore(ap->lock, flags);
1140
1141 /* if polling, ata_sff_pio_task() handles the rest. 1124 /* if polling, ata_sff_pio_task() handles the rest.
1142 * otherwise, interrupt handler takes over from here. 1125 * otherwise, interrupt handler takes over from here.
1143 */ 1126 */
@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
1361 u8 status; 1344 u8 status;
1362 int poll_next; 1345 int poll_next;
1363 1346
1347 spin_lock_irq(ap->lock);
1348
1364 BUG_ON(ap->sff_pio_task_link == NULL); 1349 BUG_ON(ap->sff_pio_task_link == NULL);
1365 /* qc can be NULL if timeout occurred */ 1350 /* qc can be NULL if timeout occurred */
1366 qc = ata_qc_from_tag(ap, link->active_tag); 1351 qc = ata_qc_from_tag(ap, link->active_tag);
1367 if (!qc) { 1352 if (!qc) {
1368 ap->sff_pio_task_link = NULL; 1353 ap->sff_pio_task_link = NULL;
1369 return; 1354 goto out_unlock;
1370 } 1355 }
1371 1356
1372fsm_start: 1357fsm_start:
@@ -1381,11 +1366,14 @@ fsm_start:
1381 */ 1366 */
1382 status = ata_sff_busy_wait(ap, ATA_BUSY, 5); 1367 status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1383 if (status & ATA_BUSY) { 1368 if (status & ATA_BUSY) {
1369 spin_unlock_irq(ap->lock);
1384 ata_msleep(ap, 2); 1370 ata_msleep(ap, 2);
1371 spin_lock_irq(ap->lock);
1372
1385 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1373 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1386 if (status & ATA_BUSY) { 1374 if (status & ATA_BUSY) {
1387 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); 1375 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1388 return; 1376 goto out_unlock;
1389 } 1377 }
1390 } 1378 }
1391 1379
@@ -1402,6 +1390,8 @@ fsm_start:
1402 */ 1390 */
1403 if (poll_next) 1391 if (poll_next)
1404 goto fsm_start; 1392 goto fsm_start;
1393out_unlock:
1394 spin_unlock_irq(ap->lock);
1405} 1395}
1406 1396
1407/** 1397/**
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3062f8605b2..fe0a3017161 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -630,6 +630,9 @@ static void sil_dev_config(struct ata_device *dev)
630 unsigned int n, quirks = 0; 630 unsigned int n, quirks = 0;
631 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 631 unsigned char model_num[ATA_ID_PROD_LEN + 1];
632 632
633 /* This controller doesn't support trim */
634 dev->horkage |= ATA_HORKAGE_NOTRIM;
635
633 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 636 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
634 637
635 for (n = 0; sil_blacklist[n].product; n++) 638 for (n = 0; sil_blacklist[n].product; n++)
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index 1098ed3b9b8..dc45ddb3611 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -50,6 +50,8 @@
50 50
51#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) 51#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
52 52
53#define MIN_OSCR_DELTA 16
54
53static void __iomem *regbase; 55static void __iomem *regbase;
54 56
55static cycle_t vt8500_timer_read(struct clocksource *cs) 57static cycle_t vt8500_timer_read(struct clocksource *cs)
@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
80 cpu_relax(); 82 cpu_relax();
81 writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL); 83 writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
82 84
83 if ((signed)(alarm - clocksource.read(&clocksource)) <= 16) 85 if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
84 return -ETIME; 86 return -ETIME;
85 87
86 writel(1, regbase + TIMER_IER_VAL); 88 writel(1, regbase + TIMER_IER_VAL);
@@ -160,7 +162,7 @@ static void __init vt8500_timer_init(struct device_node *np)
160 pr_err("%s: setup_irq failed for %s\n", __func__, 162 pr_err("%s: setup_irq failed for %s\n", __func__,
161 clockevent.name); 163 clockevent.name);
162 clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, 164 clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
163 4, 0xf0000000); 165 MIN_OSCR_DELTA * 2, 0xf0000000);
164} 166}
165 167
166CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init); 168CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 3ae48ee2f48..df79cb0bf04 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -176,7 +176,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
176 176
177/*----------------------------------------------------------------------*/ 177/*----------------------------------------------------------------------*/
178 178
179static inline unsigned int dwc_fast_fls(unsigned long long v) 179static inline unsigned int dwc_fast_ffs(unsigned long long v)
180{ 180{
181 /* 181 /*
182 * We can be a lot more clever here, but this should take care 182 * We can be a lot more clever here, but this should take care
@@ -720,7 +720,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
720 dw->data_width[dwc->dst_master]); 720 dw->data_width[dwc->dst_master]);
721 721
722 src_width = dst_width = min_t(unsigned int, data_width, 722 src_width = dst_width = min_t(unsigned int, data_width,
723 dwc_fast_fls(src | dest | len)); 723 dwc_fast_ffs(src | dest | len));
724 724
725 ctllo = DWC_DEFAULT_CTLLO(chan) 725 ctllo = DWC_DEFAULT_CTLLO(chan)
726 | DWC_CTLL_DST_WIDTH(dst_width) 726 | DWC_CTLL_DST_WIDTH(dst_width)
@@ -799,7 +799,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
799 799
800 switch (direction) { 800 switch (direction) {
801 case DMA_MEM_TO_DEV: 801 case DMA_MEM_TO_DEV:
802 reg_width = __fls(sconfig->dst_addr_width); 802 reg_width = __ffs(sconfig->dst_addr_width);
803 reg = sconfig->dst_addr; 803 reg = sconfig->dst_addr;
804 ctllo = (DWC_DEFAULT_CTLLO(chan) 804 ctllo = (DWC_DEFAULT_CTLLO(chan)
805 | DWC_CTLL_DST_WIDTH(reg_width) 805 | DWC_CTLL_DST_WIDTH(reg_width)
@@ -819,7 +819,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
819 len = sg_dma_len(sg); 819 len = sg_dma_len(sg);
820 820
821 mem_width = min_t(unsigned int, 821 mem_width = min_t(unsigned int,
822 data_width, dwc_fast_fls(mem | len)); 822 data_width, dwc_fast_ffs(mem | len));
823 823
824slave_sg_todev_fill_desc: 824slave_sg_todev_fill_desc:
825 desc = dwc_desc_get(dwc); 825 desc = dwc_desc_get(dwc);
@@ -859,7 +859,7 @@ slave_sg_todev_fill_desc:
859 } 859 }
860 break; 860 break;
861 case DMA_DEV_TO_MEM: 861 case DMA_DEV_TO_MEM:
862 reg_width = __fls(sconfig->src_addr_width); 862 reg_width = __ffs(sconfig->src_addr_width);
863 reg = sconfig->src_addr; 863 reg = sconfig->src_addr;
864 ctllo = (DWC_DEFAULT_CTLLO(chan) 864 ctllo = (DWC_DEFAULT_CTLLO(chan)
865 | DWC_CTLL_SRC_WIDTH(reg_width) 865 | DWC_CTLL_SRC_WIDTH(reg_width)
@@ -879,7 +879,7 @@ slave_sg_todev_fill_desc:
879 len = sg_dma_len(sg); 879 len = sg_dma_len(sg);
880 880
881 mem_width = min_t(unsigned int, 881 mem_width = min_t(unsigned int,
882 data_width, dwc_fast_fls(mem | len)); 882 data_width, dwc_fast_ffs(mem | len));
883 883
884slave_sg_fromdev_fill_desc: 884slave_sg_fromdev_fill_desc:
885 desc = dwc_desc_get(dwc); 885 desc = dwc_desc_get(dwc);
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 592af5f0cf3..53587377e67 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
435 */ 435 */
436void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) 436void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
437{ 437{
438 int status;
439
440 if (!edac_dev->edac_check) 438 if (!edac_dev->edac_check)
441 return; 439 return;
442 440
443 status = cancel_delayed_work(&edac_dev->work); 441 edac_dev->op_state = OP_OFFLINE;
444 if (status == 0) { 442
445 /* workq instance might be running, wait for it */ 443 cancel_delayed_work_sync(&edac_dev->work);
446 flush_workqueue(edac_workqueue); 444 flush_workqueue(edac_workqueue);
447 }
448} 445}
449 446
450/* 447/*
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 33edd676634..19dc0bc9b13 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -584,18 +584,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
584 */ 584 */
585static void edac_mc_workq_teardown(struct mem_ctl_info *mci) 585static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
586{ 586{
587 int status; 587 mci->op_state = OP_OFFLINE;
588
589 if (mci->op_state != OP_RUNNING_POLL)
590 return;
591
592 status = cancel_delayed_work(&mci->work);
593 if (status == 0) {
594 edac_dbg(0, "not canceled, flush the queue\n");
595 588
596 /* workq instance might be running, wait for it */ 589 cancel_delayed_work_sync(&mci->work);
597 flush_workqueue(edac_workqueue); 590 flush_workqueue(edac_workqueue);
598 }
599} 591}
600 592
601/* 593/*
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 2cf44b4db80..b4b38603b80 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
274 */ 274 */
275static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci) 275static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
276{ 276{
277 int status;
278
279 edac_dbg(0, "\n"); 277 edac_dbg(0, "\n");
280 278
281 status = cancel_delayed_work(&pci->work); 279 pci->op_state = OP_OFFLINE;
282 if (status == 0) 280
283 flush_workqueue(edac_workqueue); 281 cancel_delayed_work_sync(&pci->work);
282 flush_workqueue(edac_workqueue);
284} 283}
285 284
286/* 285/*
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 9833a1b1acc..3fc122306f1 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -296,6 +296,7 @@ int ast_framebuffer_init(struct drm_device *dev,
296int ast_fbdev_init(struct drm_device *dev); 296int ast_fbdev_init(struct drm_device *dev);
297void ast_fbdev_fini(struct drm_device *dev); 297void ast_fbdev_fini(struct drm_device *dev);
298void ast_fbdev_set_suspend(struct drm_device *dev, int state); 298void ast_fbdev_set_suspend(struct drm_device *dev, int state);
299void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr);
299 300
300struct ast_bo { 301struct ast_bo {
301 struct ttm_buffer_object bo; 302 struct ttm_buffer_object bo;
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index a28640f47c2..b55b6b1c9fe 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -367,3 +367,10 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state)
367 367
368 fb_set_suspend(ast->fbdev->helper.fbdev, state); 368 fb_set_suspend(ast->fbdev->helper.fbdev, state);
369} 369}
370
371void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr)
372{
373 ast->fbdev->helper.fbdev->fix.smem_start =
374 ast->fbdev->helper.fbdev->apertures->ranges[0].base + gpu_addr;
375 ast->fbdev->helper.fbdev->fix.smem_len = ast->vram_size - gpu_addr;
376}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index d830b38e54f..c0f284230a3 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -312,6 +312,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
312 dev->mode_config.min_height = 0; 312 dev->mode_config.min_height = 0;
313 dev->mode_config.preferred_depth = 24; 313 dev->mode_config.preferred_depth = 24;
314 dev->mode_config.prefer_shadow = 1; 314 dev->mode_config.prefer_shadow = 1;
315 dev->mode_config.fb_base = pci_resource_start(ast->dev->pdev, 0);
315 316
316 if (ast->chip == AST2100 || 317 if (ast->chip == AST2100 ||
317 ast->chip == AST2200 || 318 ast->chip == AST2200 ||
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index d2e56e95d88..cea916fa164 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -509,6 +509,8 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
509 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); 509 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
510 if (ret) 510 if (ret)
511 DRM_ERROR("failed to kmap fbcon\n"); 511 DRM_ERROR("failed to kmap fbcon\n");
512 else
513 ast_fbdev_set_base(ast, gpu_addr);
512 } 514 }
513 ast_bo_unreserve(bo); 515 ast_bo_unreserve(bo);
514 516
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 958b26dcac8..0a9d1fd3299 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8821,11 +8821,21 @@ connected_sink_compute_bpp(struct intel_connector * connector,
8821 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 8821 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
8822 } 8822 }
8823 8823
8824 /* Clamp bpp to 8 on screens without EDID 1.4 */ 8824 /* Clamp bpp to default limit on screens without EDID 1.4 */
8825 if (connector->base.display_info.bpc == 0 && bpp > 24) { 8825 if (connector->base.display_info.bpc == 0) {
8826 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 8826 int type = connector->base.connector_type;
8827 bpp); 8827 int clamp_bpp = 24;
8828 pipe_config->pipe_bpp = 24; 8828
8829 /* Fall back to 18 bpp when DP sink capability is unknown. */
8830 if (type == DRM_MODE_CONNECTOR_DisplayPort ||
8831 type == DRM_MODE_CONNECTOR_eDP)
8832 clamp_bpp = 18;
8833
8834 if (bpp > clamp_bpp) {
8835 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
8836 bpp, clamp_bpp);
8837 pipe_config->pipe_bpp = clamp_bpp;
8838 }
8829 } 8839 }
8830} 8840}
8831 8841
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 56a13a91515..0928c5e2baf 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
168 cmd->command_size)) 168 cmd->command_size))
169 return -EFAULT; 169 return -EFAULT;
170 170
171 reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); 171 reloc_info = kmalloc_array(cmd->relocs_num,
172 sizeof(struct qxl_reloc_info), GFP_KERNEL);
172 if (!reloc_info) 173 if (!reloc_info)
173 return -ENOMEM; 174 return -ENOMEM;
174 175
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 2fa3cf615a6..6a3b5f92219 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -436,7 +436,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
436 } 436 }
437 437
438 /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ 438 /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
439 if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) && 439 if (((dev->pdev->device == 0x9802) ||
440 (dev->pdev->device == 0x9805) ||
441 (dev->pdev->device == 0x9806)) &&
440 (dev->pdev->subsystem_vendor == 0x1734) && 442 (dev->pdev->subsystem_vendor == 0x1734) &&
441 (dev->pdev->subsystem_device == 0x11bd)) { 443 (dev->pdev->subsystem_device == 0x11bd)) {
442 if (*connector_type == DRM_MODE_CONNECTOR_VGA) { 444 if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
@@ -447,14 +449,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
447 } 449 }
448 } 450 }
449 451
450 /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
451 if ((dev->pdev->device == 0x9805) &&
452 (dev->pdev->subsystem_vendor == 0x1734) &&
453 (dev->pdev->subsystem_device == 0x11bd)) {
454 if (*connector_type == DRM_MODE_CONNECTOR_VGA)
455 return false;
456 }
457
458 return true; 452 return true;
459} 453}
460 454
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index f8b20e1c082..614144d34ae 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
79 struct drm_mode_config *mode_config = &dev->mode_config; 79 struct drm_mode_config *mode_config = &dev->mode_config;
80 struct drm_connector *connector; 80 struct drm_connector *connector;
81 81
82 /* we can race here at startup, some boards seem to trigger
83 * hotplug irqs when they shouldn't. */
84 if (!rdev->mode_info.mode_config_initialized)
85 return;
86
82 mutex_lock(&mode_config->mutex); 87 mutex_lock(&mode_config->mutex);
83 if (mode_config->num_connector) { 88 if (mode_config->num_connector) {
84 list_for_each_entry(connector, &mode_config->connector_list, head) 89 list_for_each_entry(connector, &mode_config->connector_list, head)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 0b00de55b2a..9a559140e4a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -915,8 +915,6 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
915 915
916 /* update display watermarks based on new power state */ 916 /* update display watermarks based on new power state */
917 radeon_bandwidth_update(rdev); 917 radeon_bandwidth_update(rdev);
918 /* update displays */
919 radeon_dpm_display_configuration_changed(rdev);
920 918
921 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 919 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
922 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 920 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
@@ -936,6 +934,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
936 934
937 radeon_dpm_post_set_power_state(rdev); 935 radeon_dpm_post_set_power_state(rdev);
938 936
937 /* update displays */
938 radeon_dpm_display_configuration_changed(rdev);
939
939 if (rdev->asic->dpm.force_performance_level) { 940 if (rdev->asic->dpm.force_performance_level) {
940 if (rdev->pm.dpm.thermal_active) { 941 if (rdev->pm.dpm.thermal_active) {
941 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 942 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
@@ -1364,8 +1365,7 @@ int radeon_pm_late_init(struct radeon_device *rdev)
1364 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1365 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1365 if (ret) 1366 if (ret)
1366 DRM_ERROR("failed to create device file for power method\n"); 1367 DRM_ERROR("failed to create device file for power method\n");
1367 if (!ret) 1368 rdev->pm.sysfs_initialized = true;
1368 rdev->pm.sysfs_initialized = true;
1369 } 1369 }
1370 1370
1371 mutex_lock(&rdev->pm.mutex); 1371 mutex_lock(&rdev->pm.mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index c0625805cdd..a1d68426654 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
349 /* see if we can skip over some allocations */ 349 /* see if we can skip over some allocations */
350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
351 351
352 for (i = 0; i < RADEON_NUM_RINGS; ++i)
353 radeon_fence_ref(fences[i]);
354
352 spin_unlock(&sa_manager->wq.lock); 355 spin_unlock(&sa_manager->wq.lock);
353 r = radeon_fence_wait_any(rdev, fences, false); 356 r = radeon_fence_wait_any(rdev, fences, false);
357 for (i = 0; i < RADEON_NUM_RINGS; ++i)
358 radeon_fence_unref(&fences[i]);
354 spin_lock(&sa_manager->wq.lock); 359 spin_lock(&sa_manager->wq.lock);
355 /* if we have nothing to wait for block */ 360 /* if we have nothing to wait for block */
356 if (r == -ENOENT && block) { 361 if (r == -ENOENT && block) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 553d1b752b7..feaf018f4cf 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -623,7 +623,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
623 0, PAGE_SIZE, 623 0, PAGE_SIZE,
624 PCI_DMA_BIDIRECTIONAL); 624 PCI_DMA_BIDIRECTIONAL);
625 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { 625 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
626 while (--i) { 626 while (i--) {
627 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], 627 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
628 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 628 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
629 gtt->ttm.dma_address[i] = 0; 629 gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 8fcb932a3a5..aaefb10aa09 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -1415,7 +1415,7 @@ int rv770_resume_smc(struct radeon_device *rdev)
1415int rv770_set_sw_state(struct radeon_device *rdev) 1415int rv770_set_sw_state(struct radeon_device *rdev)
1416{ 1416{
1417 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) 1417 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
1418 return -EINVAL; 1418 DRM_ERROR("rv770_set_sw_state failed\n");
1419 return 0; 1419 return 0;
1420} 1420}
1421 1421
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index b68fb182f25..fd33d74c936 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -25,6 +25,7 @@
25 * 25 *
26 **************************************************************************/ 26 **************************************************************************/
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/console.h>
28 29
29#include <drm/drmP.h> 30#include <drm/drmP.h>
30#include "vmwgfx_drv.h" 31#include "vmwgfx_drv.h"
@@ -1383,6 +1384,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1383static int __init vmwgfx_init(void) 1384static int __init vmwgfx_init(void)
1384{ 1385{
1385 int ret; 1386 int ret;
1387
1388#ifdef CONFIG_VGA_CONSOLE
1389 if (vgacon_text_force())
1390 return -EINVAL;
1391#endif
1392
1386 ret = drm_pci_init(&driver, &vmw_pci_driver); 1393 ret = drm_pci_init(&driver, &vmw_pci_driver);
1387 if (ret) 1394 if (ret)
1388 DRM_ERROR("Failed initializing DRM.\n"); 1395 DRM_ERROR("Failed initializing DRM.\n");
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index af025970835..bbb554d586d 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -392,8 +392,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
392 set_current_state(interruptible ? 392 set_current_state(interruptible ?
393 TASK_INTERRUPTIBLE : 393 TASK_INTERRUPTIBLE :
394 TASK_UNINTERRUPTIBLE); 394 TASK_UNINTERRUPTIBLE);
395 if (signal_pending(current)) { 395 if (interruptible && signal_pending(current)) {
396 rc = -EINTR; 396 __set_current_state(TASK_RUNNING);
397 remove_wait_queue(&vga_wait_queue, &wait);
398 rc = -ERESTARTSYS;
397 break; 399 break;
398 } 400 }
399 schedule(); 401 schedule();
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index 126516414c1..44223f5d92d 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
126 struct ads1015_data *data = i2c_get_clientdata(client); 126 struct ads1015_data *data = i2c_get_clientdata(client);
127 unsigned int pga = data->channel_data[channel].pga; 127 unsigned int pga = data->channel_data[channel].pga;
128 int fullscale = fullscale_table[pga]; 128 int fullscale = fullscale_table[pga];
129 const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0; 129 const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
130 130
131 return DIV_ROUND_CLOSEST(reg * fullscale, mask); 131 return DIV_ROUND_CLOSEST(reg * fullscale, mask);
132} 132}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 095bb046e2c..875348699e6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
149 error = l2t_send(tdev, skb, l2e); 149 error = l2t_send(tdev, skb, l2e);
150 if (error < 0) 150 if (error < 0)
151 kfree_skb(skb); 151 kfree_skb(skb);
152 return error; 152 return error < 0 ? error : 0;
153} 153}
154 154
155int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb) 155int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
165 error = cxgb3_ofld_send(tdev, skb); 165 error = cxgb3_ofld_send(tdev, skb);
166 if (error < 0) 166 if (error < 0)
167 kfree_skb(skb); 167 kfree_skb(skb);
168 return error; 168 return error < 0 ? error : 0;
169} 169}
170 170
171static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) 171static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
index dabb697b1c2..48ba1c3e945 100644
--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
286 struct qib_ibdev *dev = to_idev(ibqp->device); 286 struct qib_ibdev *dev = to_idev(ibqp->device);
287 struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); 287 struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
288 struct qib_mcast *mcast = NULL; 288 struct qib_mcast *mcast = NULL;
289 struct qib_mcast_qp *p, *tmp; 289 struct qib_mcast_qp *p, *tmp, *delp = NULL;
290 struct rb_node *n; 290 struct rb_node *n;
291 int last = 0; 291 int last = 0;
292 int ret; 292 int ret;
293 293
294 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { 294 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
295 ret = -EINVAL; 295 return -EINVAL;
296 goto bail;
297 }
298 296
299 spin_lock_irq(&ibp->lock); 297 spin_lock_irq(&ibp->lock);
300 298
@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
303 while (1) { 301 while (1) {
304 if (n == NULL) { 302 if (n == NULL) {
305 spin_unlock_irq(&ibp->lock); 303 spin_unlock_irq(&ibp->lock);
306 ret = -EINVAL; 304 return -EINVAL;
307 goto bail;
308 } 305 }
309 306
310 mcast = rb_entry(n, struct qib_mcast, rb_node); 307 mcast = rb_entry(n, struct qib_mcast, rb_node);
@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
328 */ 325 */
329 list_del_rcu(&p->list); 326 list_del_rcu(&p->list);
330 mcast->n_attached--; 327 mcast->n_attached--;
328 delp = p;
331 329
332 /* If this was the last attached QP, remove the GID too. */ 330 /* If this was the last attached QP, remove the GID too. */
333 if (list_empty(&mcast->qp_list)) { 331 if (list_empty(&mcast->qp_list)) {
@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
338 } 336 }
339 337
340 spin_unlock_irq(&ibp->lock); 338 spin_unlock_irq(&ibp->lock);
339 /* QP not attached */
340 if (!delp)
341 return -EINVAL;
342 /*
343 * Wait for any list walkers to finish before freeing the
344 * list element.
345 */
346 wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
347 qib_mcast_qp_free(delp);
341 348
342 if (p) {
343 /*
344 * Wait for any list walkers to finish before freeing the
345 * list element.
346 */
347 wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
348 qib_mcast_qp_free(p);
349 }
350 if (last) { 349 if (last) {
351 atomic_dec(&mcast->refcount); 350 atomic_dec(&mcast->refcount);
352 wait_event(mcast->wait, !atomic_read(&mcast->refcount)); 351 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
355 dev->n_mcast_grps_allocated--; 354 dev->n_mcast_grps_allocated--;
356 spin_unlock_irq(&dev->n_mcast_grps_lock); 355 spin_unlock_irq(&dev->n_mcast_grps_lock);
357 } 356 }
358 357 return 0;
359 ret = 0;
360
361bail:
362 return ret;
363} 358}
364 359
365int qib_mcast_tree_empty(struct qib_ibport *ibp) 360int qib_mcast_tree_empty(struct qib_ibport *ibp)
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 3ae2bb8d9cf..21a44b168d4 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -204,7 +204,12 @@ int __init fpga_irq_of_init(struct device_node *node,
204 if (!parent_irq) 204 if (!parent_irq)
205 parent_irq = -1; 205 parent_irq = -1;
206 206
207#ifdef CONFIG_ARCH_VERSATILE
208 fpga_irq_init(base, node->name, IRQ_SIC_START, parent_irq, valid_mask,
209 node);
210#else
207 fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); 211 fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
212#endif
208 213
209 writel(clear_mask, base + IRQ_ENABLE_CLEAR); 214 writel(clear_mask, base + IRQ_ENABLE_CLEAR);
210 writel(clear_mask, base + FIQ_ENABLE_CLEAR); 215 writel(clear_mask, base + FIQ_ENABLE_CLEAR);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index fbcb6225f79..74a5786ddcc 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1641,6 +1641,7 @@ static void bch_btree_gc(struct cache_set *c)
1641 do { 1641 do {
1642 ret = btree_root(gc_root, c, &op, &writes, &stats); 1642 ret = btree_root(gc_root, c, &op, &writes, &stats);
1643 closure_sync(&writes); 1643 closure_sync(&writes);
1644 cond_resched();
1644 1645
1645 if (ret && ret != -EAGAIN) 1646 if (ret && ret != -EAGAIN)
1646 pr_warn("gc failed!"); 1647 pr_warn("gc failed!");
@@ -2037,8 +2038,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2037 rw_lock(true, b, b->level); 2038 rw_lock(true, b, b->level);
2038 2039
2039 if (b->key.ptr[0] != btree_ptr || 2040 if (b->key.ptr[0] != btree_ptr ||
2040 b->seq != seq + 1) 2041 b->seq != seq + 1) {
2042 op->lock = b->level;
2041 goto out; 2043 goto out;
2044 }
2042 } 2045 }
2043 2046
2044 SET_KEY_PTRS(check_key, 1); 2047 SET_KEY_PTRS(check_key, 1);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 24a3a1546ca..1b6beb1e314 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -712,6 +712,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
712 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || 712 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
713 sysfs_create_link(&c->kobj, &d->kobj, d->name), 713 sysfs_create_link(&c->kobj, &d->kobj, d->name),
714 "Couldn't create device <-> cache set symlinks"); 714 "Couldn't create device <-> cache set symlinks");
715
716 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
715} 717}
716 718
717static void bcache_device_detach(struct bcache_device *d) 719static void bcache_device_detach(struct bcache_device *d)
@@ -882,8 +884,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
882 buf[SB_LABEL_SIZE] = '\0'; 884 buf[SB_LABEL_SIZE] = '\0';
883 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); 885 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
884 886
885 if (atomic_xchg(&dc->running, 1)) 887 if (atomic_xchg(&dc->running, 1)) {
888 kfree(env[1]);
889 kfree(env[2]);
886 return; 890 return;
891 }
887 892
888 if (!d->c && 893 if (!d->c &&
889 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 894 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
@@ -2081,8 +2086,10 @@ static int __init bcache_init(void)
2081 closure_debug_init(); 2086 closure_debug_init();
2082 2087
2083 bcache_major = register_blkdev(0, "bcache"); 2088 bcache_major = register_blkdev(0, "bcache");
2084 if (bcache_major < 0) 2089 if (bcache_major < 0) {
2090 unregister_reboot_notifier(&reboot);
2085 return bcache_major; 2091 return bcache_major;
2092 }
2086 2093
2087 if (!(bcache_wq = create_workqueue("bcache")) || 2094 if (!(bcache_wq = create_workqueue("bcache")) ||
2088 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || 2095 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index f4300e4c011..d6525c12c8d 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
323 323
324static bool dirty_pred(struct keybuf *buf, struct bkey *k) 324static bool dirty_pred(struct keybuf *buf, struct bkey *k)
325{ 325{
326 struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
327
328 BUG_ON(KEY_INODE(k) != dc->disk.id);
329
326 return KEY_DIRTY(k); 330 return KEY_DIRTY(k);
327} 331}
328 332
@@ -372,11 +376,24 @@ next:
372 } 376 }
373} 377}
374 378
379/*
380 * Returns true if we scanned the entire disk
381 */
375static bool refill_dirty(struct cached_dev *dc) 382static bool refill_dirty(struct cached_dev *dc)
376{ 383{
377 struct keybuf *buf = &dc->writeback_keys; 384 struct keybuf *buf = &dc->writeback_keys;
385 struct bkey start = KEY(dc->disk.id, 0, 0);
378 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); 386 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
379 bool searched_from_start = false; 387 struct bkey start_pos;
388
389 /*
390 * make sure keybuf pos is inside the range for this disk - at bringup
391 * we might not be attached yet so this disk's inode nr isn't
392 * initialized then
393 */
394 if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
395 bkey_cmp(&buf->last_scanned, &end) > 0)
396 buf->last_scanned = start;
380 397
381 if (dc->partial_stripes_expensive) { 398 if (dc->partial_stripes_expensive) {
382 refill_full_stripes(dc); 399 refill_full_stripes(dc);
@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
384 return false; 401 return false;
385 } 402 }
386 403
387 if (bkey_cmp(&buf->last_scanned, &end) >= 0) { 404 start_pos = buf->last_scanned;
388 buf->last_scanned = KEY(dc->disk.id, 0, 0);
389 searched_from_start = true;
390 }
391
392 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); 405 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
393 406
394 return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start; 407 if (bkey_cmp(&buf->last_scanned, &end) < 0)
408 return false;
409
410 /*
411 * If we get to the end start scanning again from the beginning, and
412 * only scan up to where we initially started scanning from:
413 */
414 buf->last_scanned = start;
415 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
416
417 return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
395} 418}
396 419
397static int bch_writeback_thread(void *arg) 420static int bch_writeback_thread(void *arg)
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index e2f8598937a..afe7ecada50 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
63 63
64static inline void bch_writeback_queue(struct cached_dev *dc) 64static inline void bch_writeback_queue(struct cached_dev *dc)
65{ 65{
66 wake_up_process(dc->writeback_thread); 66 if (!IS_ERR_OR_NULL(dc->writeback_thread))
67 wake_up_process(dc->writeback_thread);
67} 68}
68 69
69static inline void bch_writeback_add(struct cached_dev *dc) 70static inline void bch_writeback_add(struct cached_dev *dc)
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 0b2536247cf..84e27708ad9 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -70,7 +70,7 @@ struct dm_exception_store_type {
70 * Update the metadata with this exception. 70 * Update the metadata with this exception.
71 */ 71 */
72 void (*commit_exception) (struct dm_exception_store *store, 72 void (*commit_exception) (struct dm_exception_store *store,
73 struct dm_exception *e, 73 struct dm_exception *e, int valid,
74 void (*callback) (void *, int success), 74 void (*callback) (void *, int success),
75 void *callback_context); 75 void *callback_context);
76 76
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index d6e88178d22..d3272acc0f0 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -700,7 +700,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
700} 700}
701 701
702static void persistent_commit_exception(struct dm_exception_store *store, 702static void persistent_commit_exception(struct dm_exception_store *store,
703 struct dm_exception *e, 703 struct dm_exception *e, int valid,
704 void (*callback) (void *, int success), 704 void (*callback) (void *, int success),
705 void *callback_context) 705 void *callback_context)
706{ 706{
@@ -709,6 +709,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
709 struct core_exception ce; 709 struct core_exception ce;
710 struct commit_callback *cb; 710 struct commit_callback *cb;
711 711
712 if (!valid)
713 ps->valid = 0;
714
712 ce.old_chunk = e->old_chunk; 715 ce.old_chunk = e->old_chunk;
713 ce.new_chunk = e->new_chunk; 716 ce.new_chunk = e->new_chunk;
714 write_exception(ps, ps->current_committed++, &ce); 717 write_exception(ps, ps->current_committed++, &ce);
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
index 1ce9a2586e4..31439d53cf7 100644
--- a/drivers/md/dm-snap-transient.c
+++ b/drivers/md/dm-snap-transient.c
@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
52} 52}
53 53
54static void transient_commit_exception(struct dm_exception_store *store, 54static void transient_commit_exception(struct dm_exception_store *store,
55 struct dm_exception *e, 55 struct dm_exception *e, int valid,
56 void (*callback) (void *, int success), 56 void (*callback) (void *, int success),
57 void *callback_context) 57 void *callback_context)
58{ 58{
59 /* Just succeed */ 59 /* Just succeed */
60 callback(callback_context, 1); 60 callback(callback_context, valid);
61} 61}
62 62
63static void transient_usage(struct dm_exception_store *store, 63static void transient_usage(struct dm_exception_store *store,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c356a10b9ba..2e9117630db 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1388,8 +1388,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1388 dm_table_event(s->ti->table); 1388 dm_table_event(s->ti->table);
1389} 1389}
1390 1390
1391static void pending_complete(struct dm_snap_pending_exception *pe, int success) 1391static void pending_complete(void *context, int success)
1392{ 1392{
1393 struct dm_snap_pending_exception *pe = context;
1393 struct dm_exception *e; 1394 struct dm_exception *e;
1394 struct dm_snapshot *s = pe->snap; 1395 struct dm_snapshot *s = pe->snap;
1395 struct bio *origin_bios = NULL; 1396 struct bio *origin_bios = NULL;
@@ -1460,24 +1461,13 @@ out:
1460 free_pending_exception(pe); 1461 free_pending_exception(pe);
1461} 1462}
1462 1463
1463static void commit_callback(void *context, int success)
1464{
1465 struct dm_snap_pending_exception *pe = context;
1466
1467 pending_complete(pe, success);
1468}
1469
1470static void complete_exception(struct dm_snap_pending_exception *pe) 1464static void complete_exception(struct dm_snap_pending_exception *pe)
1471{ 1465{
1472 struct dm_snapshot *s = pe->snap; 1466 struct dm_snapshot *s = pe->snap;
1473 1467
1474 if (unlikely(pe->copy_error)) 1468 /* Update the metadata if we are persistent */
1475 pending_complete(pe, 0); 1469 s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
1476 1470 pending_complete, pe);
1477 else
1478 /* Update the metadata if we are persistent */
1479 s->store->type->commit_exception(s->store, &pe->e,
1480 commit_callback, pe);
1481} 1471}
1482 1472
1483/* 1473/*
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 3412b86e79f..7768de60f69 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1205,6 +1205,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1205 dm_block_t held_root; 1205 dm_block_t held_root;
1206 1206
1207 /* 1207 /*
1208 * We commit to ensure the btree roots which we increment in a
1209 * moment are up to date.
1210 */
1211 __commit_transaction(pmd);
1212
1213 /*
1208 * Copy the superblock. 1214 * Copy the superblock.
1209 */ 1215 */
1210 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION); 1216 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index b94e4648c19..d633a3921b3 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1619,6 +1619,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1619 case PM_WRITE: 1619 case PM_WRITE:
1620 if (old_mode != new_mode) 1620 if (old_mode != new_mode)
1621 notify_of_pool_mode_change(pool, "write"); 1621 notify_of_pool_mode_change(pool, "write");
1622 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
1622 dm_pool_metadata_read_write(pool->pmd); 1623 dm_pool_metadata_read_write(pool->pmd);
1623 pool->process_bio = process_bio; 1624 pool->process_bio = process_bio;
1624 pool->process_discard = process_discard; 1625 pool->process_discard = process_discard;
@@ -2567,8 +2568,8 @@ static void pool_postsuspend(struct dm_target *ti)
2567 struct pool_c *pt = ti->private; 2568 struct pool_c *pt = ti->private;
2568 struct pool *pool = pt->pool; 2569 struct pool *pool = pt->pool;
2569 2570
2570 cancel_delayed_work(&pool->waker); 2571 cancel_delayed_work_sync(&pool->waker);
2571 cancel_delayed_work(&pool->no_space_timeout); 2572 cancel_delayed_work_sync(&pool->no_space_timeout);
2572 flush_workqueue(pool->wq); 2573 flush_workqueue(pool->wq);
2573 (void) commit(pool); 2574 (void) commit(pool);
2574} 2575}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 7ba85e2b146..7b4bb1f09b0 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -250,6 +250,16 @@ static void pop_frame(struct del_stack *s)
250 dm_tm_unlock(s->tm, f->b); 250 dm_tm_unlock(s->tm, f->b);
251} 251}
252 252
253static void unlock_all_frames(struct del_stack *s)
254{
255 struct frame *f;
256
257 while (unprocessed_frames(s)) {
258 f = s->spine + s->top--;
259 dm_tm_unlock(s->tm, f->b);
260 }
261}
262
253int dm_btree_del(struct dm_btree_info *info, dm_block_t root) 263int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
254{ 264{
255 int r; 265 int r;
@@ -306,9 +316,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
306 pop_frame(s); 316 pop_frame(s);
307 } 317 }
308 } 318 }
309
310out: 319out:
320 if (r) {
321 /* cleanup all frames of del_stack */
322 unlock_all_frames(s);
323 }
311 kfree(s); 324 kfree(s);
325
312 return r; 326 return r;
313} 327}
314EXPORT_SYMBOL_GPL(dm_btree_del); 328EXPORT_SYMBOL_GPL(dm_btree_del);
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 199c9ccd1f5..032ee39a0e9 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -136,7 +136,7 @@ static int brb_push(struct bop_ring_buffer *brb,
136 return 0; 136 return 0;
137} 137}
138 138
139static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) 139static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
140{ 140{
141 struct block_op *bop; 141 struct block_op *bop;
142 142
@@ -147,6 +147,14 @@ static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
147 result->type = bop->type; 147 result->type = bop->type;
148 result->block = bop->block; 148 result->block = bop->block;
149 149
150 return 0;
151}
152
153static int brb_pop(struct bop_ring_buffer *brb)
154{
155 if (brb_empty(brb))
156 return -ENODATA;
157
150 brb->begin = brb_next(brb, brb->begin); 158 brb->begin = brb_next(brb, brb->begin);
151 159
152 return 0; 160 return 0;
@@ -211,7 +219,7 @@ static int apply_bops(struct sm_metadata *smm)
211 while (!brb_empty(&smm->uncommitted)) { 219 while (!brb_empty(&smm->uncommitted)) {
212 struct block_op bop; 220 struct block_op bop;
213 221
214 r = brb_pop(&smm->uncommitted, &bop); 222 r = brb_peek(&smm->uncommitted, &bop);
215 if (r) { 223 if (r) {
216 DMERR("bug in bop ring buffer"); 224 DMERR("bug in bop ring buffer");
217 break; 225 break;
@@ -220,6 +228,8 @@ static int apply_bops(struct sm_metadata *smm)
220 r = commit_bop(smm, &bop); 228 r = commit_bop(smm, &bop);
221 if (r) 229 if (r)
222 break; 230 break;
231
232 brb_pop(&smm->uncommitted);
223 } 233 }
224 234
225 return r; 235 return r;
@@ -681,7 +691,6 @@ static struct dm_space_map bootstrap_ops = {
681static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) 691static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
682{ 692{
683 int r, i; 693 int r, i;
684 enum allocation_event ev;
685 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 694 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
686 dm_block_t old_len = smm->ll.nr_blocks; 695 dm_block_t old_len = smm->ll.nr_blocks;
687 696
@@ -703,11 +712,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
703 * allocate any new blocks. 712 * allocate any new blocks.
704 */ 713 */
705 do { 714 do {
706 for (i = old_len; !r && i < smm->begin; i++) { 715 for (i = old_len; !r && i < smm->begin; i++)
707 r = sm_ll_inc(&smm->ll, i, &ev); 716 r = add_bop(smm, BOP_INC, i);
708 if (r) 717
709 goto out; 718 if (r)
710 } 719 goto out;
720
711 old_len = smm->begin; 721 old_len = smm->begin;
712 722
713 r = apply_bops(smm); 723 r = apply_bops(smm);
@@ -752,7 +762,6 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
752{ 762{
753 int r; 763 int r;
754 dm_block_t i; 764 dm_block_t i;
755 enum allocation_event ev;
756 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 765 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
757 766
758 smm->begin = superblock + 1; 767 smm->begin = superblock + 1;
@@ -780,7 +789,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
780 * allocated blocks that they were built from. 789 * allocated blocks that they were built from.
781 */ 790 */
782 for (i = superblock; !r && i < smm->begin; i++) 791 for (i = superblock; !r && i < smm->begin; i++)
783 r = sm_ll_inc(&smm->ll, i, &ev); 792 r = add_bop(smm, BOP_INC, i);
784 793
785 if (r) 794 if (r)
786 return r; 795 return r;
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 1f925e85697..46a984291b7 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -2195,9 +2195,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
2195 dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n", 2195 dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
2196 __func__, c->delivery_system, fe->ops.info.type); 2196 __func__, c->delivery_system, fe->ops.info.type);
2197 2197
2198 /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't 2198 /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
2199 * do it, it is done for it. */ 2199 if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
2200 info->caps |= FE_CAN_INVERSION_AUTO; 2200 info->caps |= FE_CAN_INVERSION_AUTO;
2201 err = 0; 2201 err = 0;
2202 break; 2202 break;
2203 } 2203 }
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index a2631be7ffa..08e0f0dd872 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
903{ 903{
904 struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache; 904 struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
905 struct tda1004x_state* state = fe->demodulator_priv; 905 struct tda1004x_state* state = fe->demodulator_priv;
906 int status;
906 907
907 dprintk("%s\n", __func__); 908 dprintk("%s\n", __func__);
908 909
910 status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
911 if (status == -1)
912 return -EIO;
913
914 /* Only update the properties cache if device is locked */
915 if (!(status & 8))
916 return 0;
917
909 // inversion status 918 // inversion status
910 fe_params->inversion = INVERSION_OFF; 919 fe_params->inversion = INVERSION_OFF;
911 if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20) 920 if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index 90f0d637cd9..cd05840abc9 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -1490,8 +1490,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
1490 struct v4l2_fract *tpf = &cp->timeperframe; 1490 struct v4l2_fract *tpf = &cp->timeperframe;
1491 struct sd *sd = (struct sd *) gspca_dev; 1491 struct sd *sd = (struct sd *) gspca_dev;
1492 1492
1493 /* Set requested framerate */ 1493 if (tpf->numerator == 0 || tpf->denominator == 0)
1494 sd->frame_rate = tpf->denominator / tpf->numerator; 1494 /* Set default framerate */
1495 sd->frame_rate = 30;
1496 else
1497 /* Set requested framerate */
1498 sd->frame_rate = tpf->denominator / tpf->numerator;
1499
1495 if (gspca_dev->streaming) 1500 if (gspca_dev->streaming)
1496 set_frame_rate(gspca_dev); 1501 set_frame_rate(gspca_dev);
1497 1502
diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
index 640c2fe760b..a6fbb2a0797 100644
--- a/drivers/media/usb/gspca/topro.c
+++ b/drivers/media/usb/gspca/topro.c
@@ -4792,7 +4792,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
4792 struct v4l2_fract *tpf = &cp->timeperframe; 4792 struct v4l2_fract *tpf = &cp->timeperframe;
4793 int fr, i; 4793 int fr, i;
4794 4794
4795 sd->framerate = tpf->denominator / tpf->numerator; 4795 if (tpf->numerator == 0 || tpf->denominator == 0)
4796 sd->framerate = 30;
4797 else
4798 sd->framerate = tpf->denominator / tpf->numerator;
4799
4796 if (gspca_dev->streaming) 4800 if (gspca_dev->streaming)
4797 setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure)); 4801 setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
4798 4802
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index d71f5ef036e..92aeb1d2b41 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -62,8 +62,7 @@ MODULE_ALIAS("mmc:block");
62#define MMC_SANITIZE_REQ_TIMEOUT 240000 62#define MMC_SANITIZE_REQ_TIMEOUT 240000
63#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 63#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
64 64
65#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ 65#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
66 (req->cmd_flags & REQ_META)) && \
67 (rq_data_dir(req) == WRITE)) 66 (rq_data_dir(req) == WRITE))
68#define PACKED_CMD_VER 0x01 67#define PACKED_CMD_VER 0x01
69#define PACKED_CMD_WR 0x02 68#define PACKED_CMD_WR 0x02
@@ -1328,13 +1327,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1328 1327
1329 /* 1328 /*
1330 * Reliable writes are used to implement Forced Unit Access and 1329 * Reliable writes are used to implement Forced Unit Access and
1331 * REQ_META accesses, and are supported only on MMCs. 1330 * are supported only on MMCs.
1332 *
1333 * XXX: this really needs a good explanation of why REQ_META
1334 * is treated special.
1335 */ 1331 */
1336 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 1332 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1337 (req->cmd_flags & REQ_META)) &&
1338 (rq_data_dir(req) == WRITE) && 1333 (rq_data_dir(req) == WRITE) &&
1339 (md->flags & MMC_BLK_REL_WR); 1334 (md->flags & MMC_BLK_REL_WR);
1340 1335
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 4d721c6e2af..ae360b3b4fd 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -670,7 +670,7 @@ try_again:
670 */ 670 */
671 if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) { 671 if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
672 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 672 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
673 ocr); 673 ocr_card);
674 if (err == -EAGAIN) { 674 if (err == -EAGAIN) {
675 sdio_reset(host); 675 sdio_reset(host);
676 mmc_go_idle(host); 676 mmc_go_idle(host);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index b9312263653..8103db25db6 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1860,7 +1860,7 @@ static struct amba_id mmci_ids[] = {
1860 { 1860 {
1861 .id = 0x00280180, 1861 .id = 0x00280180,
1862 .mask = 0x00ffffff, 1862 .mask = 0x00ffffff,
1863 .data = &variant_u300, 1863 .data = &variant_nomadik,
1864 }, 1864 },
1865 { 1865 {
1866 .id = 0x00480180, 1866 .id = 0x00480180,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 881bf89acfc..75d3c28940f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2663,7 +2663,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
2663 2663
2664static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 2664static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
2665{ 2665{
2666 if (host->runtime_suspended || host->bus_on) 2666 if (host->bus_on)
2667 return; 2667 return;
2668 host->bus_on = true; 2668 host->bus_on = true;
2669 pm_runtime_get_noresume(host->mmc->parent); 2669 pm_runtime_get_noresume(host->mmc->parent);
@@ -2671,7 +2671,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
2671 2671
2672static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 2672static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
2673{ 2673{
2674 if (host->runtime_suspended || !host->bus_on) 2674 if (!host->bus_on)
2675 return; 2675 return;
2676 host->bus_on = false; 2676 host->bus_on = false;
2677 pm_runtime_put_noidle(host->mmc->parent); 2677 pm_runtime_put_noidle(host->mmc->parent);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index c2d0559115d..732a8ed571c 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -187,6 +187,9 @@ static void sja1000_start(struct net_device *dev)
187 /* clear interrupt flags */ 187 /* clear interrupt flags */
188 priv->read_reg(priv, SJA1000_IR); 188 priv->read_reg(priv, SJA1000_IR);
189 189
190 /* clear interrupt flags */
191 priv->read_reg(priv, SJA1000_IR);
192
190 /* leave reset mode */ 193 /* leave reset mode */
191 set_normal_mode(dev); 194 set_normal_mode(dev);
192} 195}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 52c42fd4951..a5735a7797f 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
117 */ 117 */
118#define EMS_USB_ARM7_CLOCK 8000000 118#define EMS_USB_ARM7_CLOCK 8000000
119 119
120#define CPC_TX_QUEUE_TRIGGER_LOW 25
121#define CPC_TX_QUEUE_TRIGGER_HIGH 35
122
120/* 123/*
121 * CAN-Message representation in a CPC_MSG. Message object type is 124 * CAN-Message representation in a CPC_MSG. Message object type is
122 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or 125 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
278 switch (urb->status) { 281 switch (urb->status) {
279 case 0: 282 case 0:
280 dev->free_slots = dev->intr_in_buffer[1]; 283 dev->free_slots = dev->intr_in_buffer[1];
284 if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
285 if (netif_queue_stopped(netdev)){
286 netif_wake_queue(netdev);
287 }
288 }
281 break; 289 break;
282 290
283 case -ECONNRESET: /* unlink */ 291 case -ECONNRESET: /* unlink */
@@ -529,8 +537,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
529 /* Release context */ 537 /* Release context */
530 context->echo_index = MAX_TX_URBS; 538 context->echo_index = MAX_TX_URBS;
531 539
532 if (netif_queue_stopped(netdev))
533 netif_wake_queue(netdev);
534} 540}
535 541
536/* 542/*
@@ -590,7 +596,7 @@ static int ems_usb_start(struct ems_usb *dev)
590 int err, i; 596 int err, i;
591 597
592 dev->intr_in_buffer[0] = 0; 598 dev->intr_in_buffer[0] = 0;
593 dev->free_slots = 15; /* initial size */ 599 dev->free_slots = 50; /* initial size */
594 600
595 for (i = 0; i < MAX_RX_URBS; i++) { 601 for (i = 0; i < MAX_RX_URBS; i++) {
596 struct urb *urb = NULL; 602 struct urb *urb = NULL;
@@ -841,7 +847,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
841 847
842 /* Slow down tx path */ 848 /* Slow down tx path */
843 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || 849 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
844 dev->free_slots < 5) { 850 dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
845 netif_stop_queue(netdev); 851 netif_stop_queue(netdev);
846 } 852 }
847 } 853 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 1fbeaa9dd20..6ef93562c6b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2401,10 +2401,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2401 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ 2401 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
2402 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) 2402 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
2403 2403
2404#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ 2404#define HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD \
2405 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ 2405 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
2406 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ 2406 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
2407 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) 2407 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
2408
2409#define HW_PRTY_ASSERT_SET_3 (HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD | \
2410 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
2408 2411
2409#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ 2412#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
2410 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) 2413 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 242874041ba..e157adb85b2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4631,9 +4631,7 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4631 res |= true; 4631 res |= true;
4632 break; 4632 break;
4633 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4633 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4634 if (print) 4634 (*par_num)++;
4635 _print_next_block((*par_num)++,
4636 "MCP SCPAD");
4637 /* clear latched SCPAD PATIRY from MCP */ 4635 /* clear latched SCPAD PATIRY from MCP */
4638 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 4636 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4639 1UL << 10); 4637 1UL << 10);
@@ -4695,6 +4693,7 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4695 (sig[3] & HW_PRTY_ASSERT_SET_3) || 4693 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4696 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 4694 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4697 int par_num = 0; 4695 int par_num = 0;
4696
4698 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" 4697 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4699 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 4698 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4700 sig[0] & HW_PRTY_ASSERT_SET_0, 4699 sig[0] & HW_PRTY_ASSERT_SET_0,
@@ -4702,9 +4701,18 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4702 sig[2] & HW_PRTY_ASSERT_SET_2, 4701 sig[2] & HW_PRTY_ASSERT_SET_2,
4703 sig[3] & HW_PRTY_ASSERT_SET_3, 4702 sig[3] & HW_PRTY_ASSERT_SET_3,
4704 sig[4] & HW_PRTY_ASSERT_SET_4); 4703 sig[4] & HW_PRTY_ASSERT_SET_4);
4705 if (print) 4704 if (print) {
4706 netdev_err(bp->dev, 4705 if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4707 "Parity errors detected in blocks: "); 4706 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4707 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4708 (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4709 (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4710 netdev_err(bp->dev,
4711 "Parity errors detected in blocks: ");
4712 } else {
4713 print = false;
4714 }
4715 }
4708 res |= bnx2x_check_blocks_with_parity0(bp, 4716 res |= bnx2x_check_blocks_with_parity0(bp,
4709 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); 4717 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4710 res |= bnx2x_check_blocks_with_parity1(bp, 4718 res |= bnx2x_check_blocks_with_parity1(bp,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 988f9fec0bf..d8c1b69d0f6 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
367 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, 367 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
368 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, 368 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
369 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 373 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
@@ -383,10 +384,10 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
383 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, 384 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
384 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, 385 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
385 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, 386 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
386 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 387 {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
387 {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, 388 {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
388 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, 389 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
389 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, 390 {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
390 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 391 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
391 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, 392 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
392 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, 393 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 7c7a388c85a..126f641a958 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -1133,8 +1133,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
1133{ 1133{
1134 pci_lock_rescan_remove(); 1134 pci_lock_rescan_remove();
1135 1135
1136 if (slot->flags & SLOT_IS_GOING_AWAY) 1136 if (slot->flags & SLOT_IS_GOING_AWAY) {
1137 pci_unlock_rescan_remove();
1137 return -ENODEV; 1138 return -ENODEV;
1139 }
1138 1140
1139 mutex_lock(&slot->crit_sect); 1141 mutex_lock(&slot->crit_sect);
1140 /* configure all functions */ 1142 /* configure all functions */
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 0bf82a20a0f..48d21e0edd5 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
262 rpc->rpd = dev; 262 rpc->rpd = dev;
263 INIT_WORK(&rpc->dpc_handler, aer_isr); 263 INIT_WORK(&rpc->dpc_handler, aer_isr);
264 mutex_init(&rpc->rpc_mutex); 264 mutex_init(&rpc->rpc_mutex);
265 init_waitqueue_head(&rpc->wait_release);
266 265
267 /* Use PCIe bus function to store rpc into PCIe device */ 266 /* Use PCIe bus function to store rpc into PCIe device */
268 set_service_data(dev, rpc); 267 set_service_data(dev, rpc);
@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
285 if (rpc->isr) 284 if (rpc->isr)
286 free_irq(dev->irq, dev); 285 free_irq(dev->irq, dev);
287 286
288 wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); 287 flush_work(&rpc->dpc_handler);
289
290 aer_disable_rootport(rpc); 288 aer_disable_rootport(rpc);
291 kfree(rpc); 289 kfree(rpc);
292 set_service_data(dev, NULL); 290 set_service_data(dev, NULL);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 84420b7c945..945c939a86c 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -72,7 +72,6 @@ struct aer_rpc {
72 * recovery on the same 72 * recovery on the same
73 * root port hierarchy 73 * root port hierarchy
74 */ 74 */
75 wait_queue_head_t wait_release;
76}; 75};
77 76
78struct aer_broadcast_data { 77struct aer_broadcast_data {
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index b2c8881da76..777edcc4aab 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -785,8 +785,6 @@ void aer_isr(struct work_struct *work)
785 while (get_e_source(rpc, &e_src)) 785 while (get_e_source(rpc, &e_src))
786 aer_isr_one_error(p_device, &e_src); 786 aer_isr_one_error(p_device, &e_src);
787 mutex_unlock(&rpc->rpc_mutex); 787 mutex_unlock(&rpc->rpc_mutex);
788
789 wake_up(&rpc->wait_release);
790} 788}
791 789
792/** 790/**
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 179b8edc226..318d535e337 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -52,7 +52,7 @@ struct pcifront_device {
52}; 52};
53 53
54struct pcifront_sd { 54struct pcifront_sd {
55 int domain; 55 struct pci_sysdata sd;
56 struct pcifront_device *pdev; 56 struct pcifront_device *pdev;
57}; 57};
58 58
@@ -66,7 +66,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
66 unsigned int domain, unsigned int bus, 66 unsigned int domain, unsigned int bus,
67 struct pcifront_device *pdev) 67 struct pcifront_device *pdev)
68{ 68{
69 sd->domain = domain; 69 /* Because we do not expose that information via XenBus. */
70 sd->sd.node = first_online_node;
71 sd->sd.domain = domain;
70 sd->pdev = pdev; 72 sd->pdev = pdev;
71} 73}
72 74
@@ -464,8 +466,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
464 dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", 466 dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
465 domain, bus); 467 domain, bus);
466 468
467 bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); 469 bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
468 sd = kmalloc(sizeof(*sd), GFP_KERNEL); 470 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
469 if (!bus_entry || !sd) { 471 if (!bus_entry || !sd) {
470 err = -ENOMEM; 472 err = -ENOMEM;
471 goto err_out; 473 goto err_out;
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 3bed2f55cf7..3ccadf631d4 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -567,7 +567,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
567 567
568 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO")); 568 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
569 ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq, 569 ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
570 IRQF_TRIGGER_RISING, "System power low", 570 IRQF_TRIGGER_RISING | IRQF_ONESHOT, "System power low",
571 power); 571 power);
572 if (ret != 0) { 572 if (ret != 0) {
573 dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n", 573 dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
@@ -577,7 +577,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
577 577
578 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC")); 578 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
579 ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq, 579 ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
580 IRQF_TRIGGER_RISING, "Power source", 580 IRQF_TRIGGER_RISING | IRQF_ONESHOT, "Power source",
581 power); 581 power);
582 if (ret != 0) { 582 if (ret != 0) {
583 dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n", 583 dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
@@ -590,7 +590,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
590 platform_get_irq_byname(pdev, 590 platform_get_irq_byname(pdev,
591 wm831x_bat_irqs[i])); 591 wm831x_bat_irqs[i]));
592 ret = request_threaded_irq(irq, NULL, wm831x_bat_irq, 592 ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
593 IRQF_TRIGGER_RISING, 593 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
594 wm831x_bat_irqs[i], 594 wm831x_bat_irqs[i],
595 power); 595 power);
596 if (ret != 0) { 596 if (ret != 0) {
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 3c6768378a9..4048d7f5bab 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1194,10 +1194,13 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
1194 1194
1195 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { 1195 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
1196 /* check if the domain is locked by BIOS */ 1196 /* check if the domain is locked by BIOS */
1197 if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) { 1197 ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked);
1198 if (ret)
1199 return ret;
1200 if (locked) {
1198 pr_info("RAPL package %d domain %s locked by BIOS\n", 1201 pr_info("RAPL package %d domain %s locked by BIOS\n",
1199 rp->id, rd->name); 1202 rp->id, rd->name);
1200 rd->state |= DOMAIN_STATE_BIOS_LOCKED; 1203 rd->state |= DOMAIN_STATE_BIOS_LOCKED;
1201 } 1204 }
1202 } 1205 }
1203 1206
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index a2597e683e7..6a64e86e8cc 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
264 spin_unlock_irqrestore(&lcu->lock, flags); 264 spin_unlock_irqrestore(&lcu->lock, flags);
265 cancel_work_sync(&lcu->suc_data.worker); 265 cancel_work_sync(&lcu->suc_data.worker);
266 spin_lock_irqsave(&lcu->lock, flags); 266 spin_lock_irqsave(&lcu->lock, flags);
267 if (device == lcu->suc_data.device) 267 if (device == lcu->suc_data.device) {
268 dasd_put_device(device);
268 lcu->suc_data.device = NULL; 269 lcu->suc_data.device = NULL;
270 }
269 } 271 }
270 was_pending = 0; 272 was_pending = 0;
271 if (device == lcu->ruac_data.device) { 273 if (device == lcu->ruac_data.device) {
@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
273 was_pending = 1; 275 was_pending = 1;
274 cancel_delayed_work_sync(&lcu->ruac_data.dwork); 276 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
275 spin_lock_irqsave(&lcu->lock, flags); 277 spin_lock_irqsave(&lcu->lock, flags);
276 if (device == lcu->ruac_data.device) 278 if (device == lcu->ruac_data.device) {
279 dasd_put_device(device);
277 lcu->ruac_data.device = NULL; 280 lcu->ruac_data.device = NULL;
281 }
278 } 282 }
279 private->lcu = NULL; 283 private->lcu = NULL;
280 spin_unlock_irqrestore(&lcu->lock, flags); 284 spin_unlock_irqrestore(&lcu->lock, flags);
@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
549 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { 553 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
550 DBF_DEV_EVENT(DBF_WARNING, device, "could not update" 554 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
551 " alias data in lcu (rc = %d), retry later", rc); 555 " alias data in lcu (rc = %d), retry later", rc);
552 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); 556 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
557 dasd_put_device(device);
553 } else { 558 } else {
559 dasd_put_device(device);
554 lcu->ruac_data.device = NULL; 560 lcu->ruac_data.device = NULL;
555 lcu->flags &= ~UPDATE_PENDING; 561 lcu->flags &= ~UPDATE_PENDING;
556 } 562 }
@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
593 */ 599 */
594 if (!usedev) 600 if (!usedev)
595 return -EINVAL; 601 return -EINVAL;
602 dasd_get_device(usedev);
596 lcu->ruac_data.device = usedev; 603 lcu->ruac_data.device = usedev;
597 schedule_delayed_work(&lcu->ruac_data.dwork, 0); 604 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
605 dasd_put_device(usedev);
598 return 0; 606 return 0;
599} 607}
600 608
@@ -722,7 +730,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
722 ASCEBC((char *) &cqr->magic, 4); 730 ASCEBC((char *) &cqr->magic, 4);
723 ccw = cqr->cpaddr; 731 ccw = cqr->cpaddr;
724 ccw->cmd_code = DASD_ECKD_CCW_RSCK; 732 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
725 ccw->flags = 0 ; 733 ccw->flags = CCW_FLAG_SLI;
726 ccw->count = 16; 734 ccw->count = 16;
727 ccw->cda = (__u32)(addr_t) cqr->data; 735 ccw->cda = (__u32)(addr_t) cqr->data;
728 ((char *)cqr->data)[0] = reason; 736 ((char *)cqr->data)[0] = reason;
@@ -926,6 +934,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
926 /* 3. read new alias configuration */ 934 /* 3. read new alias configuration */
927 _schedule_lcu_update(lcu, device); 935 _schedule_lcu_update(lcu, device);
928 lcu->suc_data.device = NULL; 936 lcu->suc_data.device = NULL;
937 dasd_put_device(device);
929 spin_unlock_irqrestore(&lcu->lock, flags); 938 spin_unlock_irqrestore(&lcu->lock, flags);
930} 939}
931 940
@@ -985,6 +994,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
985 } 994 }
986 lcu->suc_data.reason = reason; 995 lcu->suc_data.reason = reason;
987 lcu->suc_data.device = device; 996 lcu->suc_data.device = device;
997 dasd_get_device(device);
988 spin_unlock(&lcu->lock); 998 spin_unlock(&lcu->lock);
989 schedule_work(&lcu->suc_data.worker); 999 if (!schedule_work(&lcu->suc_data.worker))
1000 dasd_put_device(device);
990}; 1001};
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 34452ea386a..52636cfbab8 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -334,6 +334,8 @@ enum MR_EVT_ARGS {
334 MR_EVT_ARGS_GENERIC, 334 MR_EVT_ARGS_GENERIC,
335}; 335};
336 336
337
338#define SGE_BUFFER_SIZE 4096
337/* 339/*
338 * define constants for device list query options 340 * define constants for device list query options
339 */ 341 */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index c80afde97e9..9f833f1504c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3821,7 +3821,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
3821 } 3821 }
3822 } 3822 }
3823 instance->max_sectors_per_req = instance->max_num_sge * 3823 instance->max_sectors_per_req = instance->max_num_sge *
3824 PAGE_SIZE / 512; 3824 SGE_BUFFER_SIZE / 512;
3825 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 3825 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
3826 instance->max_sectors_per_req = tmp_sectors; 3826 instance->max_sectors_per_req = tmp_sectors;
3827 3827
@@ -5281,6 +5281,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
5281 int i; 5281 int i;
5282 int error = 0; 5282 int error = 0;
5283 compat_uptr_t ptr; 5283 compat_uptr_t ptr;
5284 unsigned long local_raw_ptr;
5285 u32 local_sense_off;
5286 u32 local_sense_len;
5284 5287
5285 if (clear_user(ioc, sizeof(*ioc))) 5288 if (clear_user(ioc, sizeof(*ioc)))
5286 return -EFAULT; 5289 return -EFAULT;
@@ -5298,9 +5301,15 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
5298 * sense_len is not null, so prepare the 64bit value under 5301 * sense_len is not null, so prepare the 64bit value under
5299 * the same condition. 5302 * the same condition.
5300 */ 5303 */
5301 if (ioc->sense_len) { 5304 if (get_user(local_raw_ptr, ioc->frame.raw) ||
5305 get_user(local_sense_off, &ioc->sense_off) ||
5306 get_user(local_sense_len, &ioc->sense_len))
5307 return -EFAULT;
5308
5309
5310 if (local_sense_len) {
5302 void __user **sense_ioc_ptr = 5311 void __user **sense_ioc_ptr =
5303 (void __user **)(ioc->frame.raw + ioc->sense_off); 5312 (void __user **)((u8*)local_raw_ptr + local_sense_off);
5304 compat_uptr_t *sense_cioc_ptr = 5313 compat_uptr_t *sense_cioc_ptr =
5305 (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off); 5314 (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
5306 if (get_user(ptr, sense_cioc_ptr) || 5315 if (get_user(ptr, sense_cioc_ptr) ||
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index eba183c428c..3643bbf5456 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -70,6 +70,7 @@ static int ses_probe(struct device *dev)
70static int ses_recv_diag(struct scsi_device *sdev, int page_code, 70static int ses_recv_diag(struct scsi_device *sdev, int page_code,
71 void *buf, int bufflen) 71 void *buf, int bufflen)
72{ 72{
73 int ret;
73 unsigned char cmd[] = { 74 unsigned char cmd[] = {
74 RECEIVE_DIAGNOSTIC, 75 RECEIVE_DIAGNOSTIC,
75 1, /* Set PCV bit */ 76 1, /* Set PCV bit */
@@ -78,9 +79,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
78 bufflen & 0xff, 79 bufflen & 0xff,
79 0 80 0
80 }; 81 };
82 unsigned char recv_page_code;
81 83
82 return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 84 ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
83 NULL, SES_TIMEOUT, SES_RETRIES, NULL); 85 NULL, SES_TIMEOUT, SES_RETRIES, NULL);
86 if (unlikely(!ret))
87 return ret;
88
89 recv_page_code = ((unsigned char *)buf)[0];
90
91 if (likely(recv_page_code == page_code))
92 return ret;
93
94 /* successful diagnostic but wrong page code. This happens to some
95 * USB devices, just print a message and pretend there was an error */
96
97 sdev_printk(KERN_ERR, sdev,
98 "Wrong diagnostic page; asked for %d got %u\n",
99 page_code, recv_page_code);
100
101 return -EINVAL;
84} 102}
85 103
86static int ses_send_diag(struct scsi_device *sdev, int page_code, 104static int ses_send_diag(struct scsi_device *sdev, int page_code,
@@ -436,7 +454,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
436 if (desc_ptr) 454 if (desc_ptr)
437 desc_ptr += len; 455 desc_ptr += len;
438 456
439 if (addl_desc_ptr) 457 if (addl_desc_ptr &&
458 /* only find additional descriptions for specific devices */
459 (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
460 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE ||
461 type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER ||
462 /* these elements are optional */
463 type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
464 type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
465 type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
440 addl_desc_ptr += addl_desc_ptr[1] + 2; 466 addl_desc_ptr += addl_desc_ptr[1] + 2;
441 467
442 } 468 }
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3bb6646bb40..f9da66fa850 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1610,8 +1610,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1610 vm_srb->win8_extension.time_out_value = 60; 1610 vm_srb->win8_extension.time_out_value = 60;
1611 1611
1612 vm_srb->win8_extension.srb_flags |= 1612 vm_srb->win8_extension.srb_flags |=
1613 (SRB_FLAGS_QUEUE_ACTION_ENABLE | 1613 SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
1614 SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
1615 1614
1616 /* Build the SRB */ 1615 /* Build the SRB */
1617 switch (scmnd->sc_data_direction) { 1616 switch (scmnd->sc_data_direction) {
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index f89b24a09b1..231d63caa66 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -314,7 +314,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
314 return 0; 314 return 0;
315} 315}
316 316
317static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) 317static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
318 int *post_ret)
318{ 319{
319 unsigned char *buf, *addr; 320 unsigned char *buf, *addr;
320 struct scatterlist *sg; 321 struct scatterlist *sg;
@@ -378,7 +379,8 @@ sbc_execute_rw(struct se_cmd *cmd)
378 cmd->data_direction); 379 cmd->data_direction);
379} 380}
380 381
381static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) 382static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
383 int *post_ret)
382{ 384{
383 struct se_device *dev = cmd->se_dev; 385 struct se_device *dev = cmd->se_dev;
384 386
@@ -388,8 +390,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
388 * sent to the backend driver. 390 * sent to the backend driver.
389 */ 391 */
390 spin_lock_irq(&cmd->t_state_lock); 392 spin_lock_irq(&cmd->t_state_lock);
391 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 393 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
392 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 394 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
395 *post_ret = 1;
396 }
393 spin_unlock_irq(&cmd->t_state_lock); 397 spin_unlock_irq(&cmd->t_state_lock);
394 398
395 /* 399 /*
@@ -401,7 +405,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
401 return TCM_NO_SENSE; 405 return TCM_NO_SENSE;
402} 406}
403 407
404static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) 408static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
409 int *post_ret)
405{ 410{
406 struct se_device *dev = cmd->se_dev; 411 struct se_device *dev = cmd->se_dev;
407 struct scatterlist *write_sg = NULL, *sg; 412 struct scatterlist *write_sg = NULL, *sg;
@@ -497,11 +502,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
497 502
498 if (block_size < PAGE_SIZE) { 503 if (block_size < PAGE_SIZE) {
499 sg_set_page(&write_sg[i], m.page, block_size, 504 sg_set_page(&write_sg[i], m.page, block_size,
500 block_size); 505 m.piter.sg->offset + block_size);
501 } else { 506 } else {
502 sg_miter_next(&m); 507 sg_miter_next(&m);
503 sg_set_page(&write_sg[i], m.page, block_size, 508 sg_set_page(&write_sg[i], m.page, block_size,
504 0); 509 m.piter.sg->offset);
505 } 510 }
506 len -= block_size; 511 len -= block_size;
507 i++; 512 i++;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6fc38903046..7afea9b59e2 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1581,7 +1581,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1581void transport_generic_request_failure(struct se_cmd *cmd, 1581void transport_generic_request_failure(struct se_cmd *cmd,
1582 sense_reason_t sense_reason) 1582 sense_reason_t sense_reason)
1583{ 1583{
1584 int ret = 0; 1584 int ret = 0, post_ret = 0;
1585 1585
1586 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1586 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1587 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1587 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
@@ -1604,7 +1604,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1604 */ 1604 */
1605 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1605 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1606 cmd->transport_complete_callback) 1606 cmd->transport_complete_callback)
1607 cmd->transport_complete_callback(cmd, false); 1607 cmd->transport_complete_callback(cmd, false, &post_ret);
1608 1608
1609 switch (sense_reason) { 1609 switch (sense_reason) {
1610 case TCM_NON_EXISTENT_LUN: 1610 case TCM_NON_EXISTENT_LUN:
@@ -1940,11 +1940,13 @@ static void target_complete_ok_work(struct work_struct *work)
1940 */ 1940 */
1941 if (cmd->transport_complete_callback) { 1941 if (cmd->transport_complete_callback) {
1942 sense_reason_t rc; 1942 sense_reason_t rc;
1943 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
1944 bool zero_dl = !(cmd->data_length);
1945 int post_ret = 0;
1943 1946
1944 rc = cmd->transport_complete_callback(cmd, true); 1947 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
1945 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 1948 if (!rc && !post_ret) {
1946 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1949 if (caw && zero_dl)
1947 !cmd->data_length)
1948 goto queue_rsp; 1950 goto queue_rsp;
1949 1951
1950 return; 1952 return;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 2aca8871563..584514c7ed1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1810,6 +1810,11 @@ static const struct usb_device_id acm_ids[] = {
1810 }, 1810 },
1811#endif 1811#endif
1812 1812
1813 /*Samsung phone in firmware update mode */
1814 { USB_DEVICE(0x04e8, 0x685d),
1815 .driver_info = IGNORE_DEVICE,
1816 },
1817
1813 /* Exclude Infineon Flash Loader utility */ 1818 /* Exclude Infineon Flash Loader utility */
1814 { USB_DEVICE(0x058b, 0x0041), 1819 { USB_DEVICE(0x058b, 0x0041),
1815 .driver_info = IGNORE_DEVICE, 1820 .driver_info = IGNORE_DEVICE,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 02e6fe228a6..21bf168981f 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
162 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 162 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
163 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 163 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
164 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 164 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
165 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
166 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
165 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ 167 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
166 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ 168 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
167 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ 169 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 81f6a572f01..9bab34cf01d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb);
315#define TOSHIBA_PRODUCT_G450 0x0d45 315#define TOSHIBA_PRODUCT_G450 0x0d45
316 316
317#define ALINK_VENDOR_ID 0x1e0e 317#define ALINK_VENDOR_ID 0x1e0e
318#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
318#define ALINK_PRODUCT_PH300 0x9100 319#define ALINK_PRODUCT_PH300 0x9100
319#define ALINK_PRODUCT_3GU 0x9200 320#define ALINK_PRODUCT_3GU 0x9200
320 321
@@ -615,6 +616,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
615 .reserved = BIT(3) | BIT(4), 616 .reserved = BIT(3) | BIT(4),
616}; 617};
617 618
619static const struct option_blacklist_info simcom_sim7100e_blacklist = {
620 .reserved = BIT(5) | BIT(6),
621};
622
618static const struct option_blacklist_info telit_le910_blacklist = { 623static const struct option_blacklist_info telit_le910_blacklist = {
619 .sendsetup = BIT(0), 624 .sendsetup = BIT(0),
620 .reserved = BIT(1) | BIT(2), 625 .reserved = BIT(1) | BIT(2),
@@ -1130,6 +1135,8 @@ static const struct usb_device_id option_ids[] = {
1130 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, 1135 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
1131 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 1136 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
1132 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ 1137 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
1138 { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
1139 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1133 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1140 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1134 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1141 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1135 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1142 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = {
1645 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, 1652 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
1646 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, 1653 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
1647 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, 1654 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
1655 { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1656 .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
1648 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), 1657 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1649 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist 1658 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
1650 }, 1659 },
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f48d5fc352a..469051b01fb 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2336,6 +2336,7 @@ int open_ctree(struct super_block *sb,
2336 if (btrfs_check_super_csum(bh->b_data)) { 2336 if (btrfs_check_super_csum(bh->b_data)) {
2337 printk(KERN_ERR "BTRFS: superblock checksum mismatch\n"); 2337 printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
2338 err = -EINVAL; 2338 err = -EINVAL;
2339 brelse(bh);
2339 goto fail_alloc; 2340 goto fail_alloc;
2340 } 2341 }
2341 2342
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 08824fe6ef4..fb37441a592 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7511,15 +7511,28 @@ int btrfs_readpage(struct file *file, struct page *page)
7511static int btrfs_writepage(struct page *page, struct writeback_control *wbc) 7511static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
7512{ 7512{
7513 struct extent_io_tree *tree; 7513 struct extent_io_tree *tree;
7514 7514 struct inode *inode = page->mapping->host;
7515 int ret;
7515 7516
7516 if (current->flags & PF_MEMALLOC) { 7517 if (current->flags & PF_MEMALLOC) {
7517 redirty_page_for_writepage(wbc, page); 7518 redirty_page_for_writepage(wbc, page);
7518 unlock_page(page); 7519 unlock_page(page);
7519 return 0; 7520 return 0;
7520 } 7521 }
7522
7523 /*
7524 * If we are under memory pressure we will call this directly from the
7525 * VM, we need to make sure we have the inode referenced for the ordered
7526 * extent. If not just return like we didn't do anything.
7527 */
7528 if (!igrab(inode)) {
7529 redirty_page_for_writepage(wbc, page);
7530 return AOP_WRITEPAGE_ACTIVATE;
7531 }
7521 tree = &BTRFS_I(page->mapping->host)->io_tree; 7532 tree = &BTRFS_I(page->mapping->host)->io_tree;
7522 return extent_write_full_page(tree, page, btrfs_get_extent, wbc); 7533 ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
7534 btrfs_add_delayed_iput(inode);
7535 return ret;
7523} 7536}
7524 7537
7525static int btrfs_writepages(struct address_space *mapping, 7538static int btrfs_writepages(struct address_space *mapping,
@@ -8612,9 +8625,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
8612 /* 8625 /*
8613 * 2 items for inode item and ref 8626 * 2 items for inode item and ref
8614 * 2 items for dir items 8627 * 2 items for dir items
8628 * 1 item for updating parent inode item
8629 * 1 item for the inline extent item
8615 * 1 item for xattr if selinux is on 8630 * 1 item for xattr if selinux is on
8616 */ 8631 */
8617 trans = btrfs_start_transaction(root, 5); 8632 trans = btrfs_start_transaction(root, 7);
8618 if (IS_ERR(trans)) 8633 if (IS_ERR(trans))
8619 return PTR_ERR(trans); 8634 return PTR_ERR(trans);
8620 8635
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 20d79354209..0fd23ab3b4a 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1377,7 +1377,21 @@ static int read_symlink(struct btrfs_root *root,
1377 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1377 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1378 if (ret < 0) 1378 if (ret < 0)
1379 goto out; 1379 goto out;
1380 BUG_ON(ret); 1380 if (ret) {
1381 /*
1382 * An empty symlink inode. Can happen in rare error paths when
1383 * creating a symlink (transaction committed before the inode
1384 * eviction handler removed the symlink inode items and a crash
1385 * happened in between or the subvol was snapshoted in between).
1386 * Print an informative message to dmesg/syslog so that the user
1387 * can delete the symlink.
1388 */
1389 btrfs_err(root->fs_info,
1390 "Found empty symlink inode %llu at root %llu",
1391 ino, root->root_key.objectid);
1392 ret = -EIO;
1393 goto out;
1394 }
1381 1395
1382 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 1396 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1383 struct btrfs_file_extent_item); 1397 struct btrfs_file_extent_item);
diff --git a/fs/dcache.c b/fs/dcache.c
index e285b6b9cbc..e072fd75869 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -439,42 +439,12 @@ void d_drop(struct dentry *dentry)
439} 439}
440EXPORT_SYMBOL(d_drop); 440EXPORT_SYMBOL(d_drop);
441 441
442/* 442static void __dentry_kill(struct dentry *dentry)
443 * Finish off a dentry we've decided to kill.
444 * dentry->d_lock must be held, returns with it unlocked.
445 * If ref is non-zero, then decrement the refcount too.
446 * Returns dentry requiring refcount drop, or NULL if we're done.
447 */
448static struct dentry *
449dentry_kill(struct dentry *dentry, int unlock_on_failure)
450 __releases(dentry->d_lock)
451{ 443{
452 struct inode *inode;
453 struct dentry *parent = NULL; 444 struct dentry *parent = NULL;
454 bool can_free = true; 445 bool can_free = true;
455
456 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
457 can_free = dentry->d_flags & DCACHE_MAY_FREE;
458 spin_unlock(&dentry->d_lock);
459 goto out;
460 }
461
462 inode = dentry->d_inode;
463 if (inode && !spin_trylock(&inode->i_lock)) {
464relock:
465 if (unlock_on_failure) {
466 spin_unlock(&dentry->d_lock);
467 cpu_relax();
468 }
469 return dentry; /* try again with same dentry */
470 }
471 if (!IS_ROOT(dentry)) 446 if (!IS_ROOT(dentry))
472 parent = dentry->d_parent; 447 parent = dentry->d_parent;
473 if (parent && !spin_trylock(&parent->d_lock)) {
474 if (inode)
475 spin_unlock(&inode->i_lock);
476 goto relock;
477 }
478 448
479 /* 449 /*
480 * The dentry is now unrecoverably dead to the world. 450 * The dentry is now unrecoverably dead to the world.
@@ -518,9 +488,74 @@ relock:
518 can_free = false; 488 can_free = false;
519 } 489 }
520 spin_unlock(&dentry->d_lock); 490 spin_unlock(&dentry->d_lock);
521out:
522 if (likely(can_free)) 491 if (likely(can_free))
523 dentry_free(dentry); 492 dentry_free(dentry);
493}
494
495/*
496 * Finish off a dentry we've decided to kill.
497 * dentry->d_lock must be held, returns with it unlocked.
498 * If ref is non-zero, then decrement the refcount too.
499 * Returns dentry requiring refcount drop, or NULL if we're done.
500 */
501static struct dentry *dentry_kill(struct dentry *dentry)
502 __releases(dentry->d_lock)
503{
504 struct inode *inode = dentry->d_inode;
505 struct dentry *parent = NULL;
506
507 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
508 goto failed;
509
510 if (!IS_ROOT(dentry)) {
511 parent = dentry->d_parent;
512 if (unlikely(!spin_trylock(&parent->d_lock))) {
513 if (inode)
514 spin_unlock(&inode->i_lock);
515 goto failed;
516 }
517 }
518
519 __dentry_kill(dentry);
520 return parent;
521
522failed:
523 spin_unlock(&dentry->d_lock);
524 cpu_relax();
525 return dentry; /* try again with same dentry */
526}
527
528static inline struct dentry *lock_parent(struct dentry *dentry)
529{
530 struct dentry *parent = dentry->d_parent;
531 if (IS_ROOT(dentry))
532 return NULL;
533 if (unlikely((int)dentry->d_lockref.count < 0))
534 return NULL;
535 if (likely(spin_trylock(&parent->d_lock)))
536 return parent;
537 rcu_read_lock();
538 spin_unlock(&dentry->d_lock);
539again:
540 parent = ACCESS_ONCE(dentry->d_parent);
541 spin_lock(&parent->d_lock);
542 /*
543 * We can't blindly lock dentry until we are sure
544 * that we won't violate the locking order.
545 * Any changes of dentry->d_parent must have
546 * been done with parent->d_lock held, so
547 * spin_lock() above is enough of a barrier
548 * for checking if it's still our child.
549 */
550 if (unlikely(parent != dentry->d_parent)) {
551 spin_unlock(&parent->d_lock);
552 goto again;
553 }
554 rcu_read_unlock();
555 if (parent != dentry)
556 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
557 else
558 parent = NULL;
524 return parent; 559 return parent;
525} 560}
526 561
@@ -580,7 +615,7 @@ repeat:
580 return; 615 return;
581 616
582kill_it: 617kill_it:
583 dentry = dentry_kill(dentry, 1); 618 dentry = dentry_kill(dentry);
584 if (dentry) 619 if (dentry)
585 goto repeat; 620 goto repeat;
586} 621}
@@ -798,8 +833,11 @@ static void shrink_dentry_list(struct list_head *list)
798 struct dentry *dentry, *parent; 833 struct dentry *dentry, *parent;
799 834
800 while (!list_empty(list)) { 835 while (!list_empty(list)) {
836 struct inode *inode;
801 dentry = list_entry(list->prev, struct dentry, d_lru); 837 dentry = list_entry(list->prev, struct dentry, d_lru);
802 spin_lock(&dentry->d_lock); 838 spin_lock(&dentry->d_lock);
839 parent = lock_parent(dentry);
840
803 /* 841 /*
804 * The dispose list is isolated and dentries are not accounted 842 * The dispose list is isolated and dentries are not accounted
805 * to the LRU here, so we can simply remove it from the list 843 * to the LRU here, so we can simply remove it from the list
@@ -813,26 +851,33 @@ static void shrink_dentry_list(struct list_head *list)
813 */ 851 */
814 if ((int)dentry->d_lockref.count > 0) { 852 if ((int)dentry->d_lockref.count > 0) {
815 spin_unlock(&dentry->d_lock); 853 spin_unlock(&dentry->d_lock);
854 if (parent)
855 spin_unlock(&parent->d_lock);
816 continue; 856 continue;
817 } 857 }
818 858
819 parent = dentry_kill(dentry, 0); 859
820 /* 860 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
821 * If dentry_kill returns NULL, we have nothing more to do. 861 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
822 */ 862 spin_unlock(&dentry->d_lock);
823 if (!parent) 863 if (parent)
864 spin_unlock(&parent->d_lock);
865 if (can_free)
866 dentry_free(dentry);
824 continue; 867 continue;
868 }
825 869
826 if (unlikely(parent == dentry)) { 870 inode = dentry->d_inode;
827 /* 871 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
828 * trylocks have failed and d_lock has been held the
829 * whole time, so it could not have been added to any
830 * other lists. Just add it back to the shrink list.
831 */
832 d_shrink_add(dentry, list); 872 d_shrink_add(dentry, list);
833 spin_unlock(&dentry->d_lock); 873 spin_unlock(&dentry->d_lock);
874 if (parent)
875 spin_unlock(&parent->d_lock);
834 continue; 876 continue;
835 } 877 }
878
879 __dentry_kill(dentry);
880
836 /* 881 /*
837 * We need to prune ancestors too. This is necessary to prevent 882 * We need to prune ancestors too. This is necessary to prevent
838 * quadratic behavior of shrink_dcache_parent(), but is also 883 * quadratic behavior of shrink_dcache_parent(), but is also
@@ -840,8 +885,26 @@ static void shrink_dentry_list(struct list_head *list)
840 * fragmentation. 885 * fragmentation.
841 */ 886 */
842 dentry = parent; 887 dentry = parent;
843 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) 888 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
844 dentry = dentry_kill(dentry, 1); 889 parent = lock_parent(dentry);
890 if (dentry->d_lockref.count != 1) {
891 dentry->d_lockref.count--;
892 spin_unlock(&dentry->d_lock);
893 if (parent)
894 spin_unlock(&parent->d_lock);
895 break;
896 }
897 inode = dentry->d_inode; /* can't be NULL */
898 if (unlikely(!spin_trylock(&inode->i_lock))) {
899 spin_unlock(&dentry->d_lock);
900 if (parent)
901 spin_unlock(&parent->d_lock);
902 cpu_relax();
903 continue;
904 }
905 __dentry_kill(dentry);
906 dentry = parent;
907 }
845 } 908 }
846} 909}
847 910
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index fe649d325b1..ce653dfb0ae 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -720,15 +720,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
720 720
721 init_special_inode(inode, mode, dev); 721 init_special_inode(inode, mode, dev);
722 err = do_mknod(name, mode, MAJOR(dev), MINOR(dev)); 722 err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
723 if (!err) 723 if (err)
724 goto out_free; 724 goto out_free;
725 725
726 err = read_name(inode, name); 726 err = read_name(inode, name);
727 __putname(name); 727 __putname(name);
728 if (err) 728 if (err)
729 goto out_put; 729 goto out_put;
730 if (err)
731 goto out_put;
732 730
733 d_instantiate(dentry, inode); 731 d_instantiate(dentry, inode);
734 return 0; 732 return 0;
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 969d589c848..b5f3c3ab0d5 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -116,7 +116,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
116 atomic_inc(&nsm->sm_count); 116 atomic_inc(&nsm->sm_count);
117 else { 117 else {
118 host = NULL; 118 host = NULL;
119 nsm = nsm_get_handle(ni->sap, ni->salen, 119 nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
120 ni->hostname, ni->hostname_len); 120 ni->hostname, ni->hostname_len);
121 if (unlikely(nsm == NULL)) { 121 if (unlikely(nsm == NULL)) {
122 dprintk("lockd: %s failed; no nsm handle\n", 122 dprintk("lockd: %s failed; no nsm handle\n",
@@ -534,17 +534,18 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
534 534
535/** 535/**
536 * nlm_host_rebooted - Release all resources held by rebooted host 536 * nlm_host_rebooted - Release all resources held by rebooted host
537 * @net: network namespace
537 * @info: pointer to decoded results of NLM_SM_NOTIFY call 538 * @info: pointer to decoded results of NLM_SM_NOTIFY call
538 * 539 *
539 * We were notified that the specified host has rebooted. Release 540 * We were notified that the specified host has rebooted. Release
540 * all resources held by that peer. 541 * all resources held by that peer.
541 */ 542 */
542void nlm_host_rebooted(const struct nlm_reboot *info) 543void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info)
543{ 544{
544 struct nsm_handle *nsm; 545 struct nsm_handle *nsm;
545 struct nlm_host *host; 546 struct nlm_host *host;
546 547
547 nsm = nsm_reboot_lookup(info); 548 nsm = nsm_reboot_lookup(net, info);
548 if (unlikely(nsm == NULL)) 549 if (unlikely(nsm == NULL))
549 return; 550 return;
550 551
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 6ae664b489a..13fac49aff7 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -51,7 +51,6 @@ struct nsm_res {
51}; 51};
52 52
53static const struct rpc_program nsm_program; 53static const struct rpc_program nsm_program;
54static LIST_HEAD(nsm_handles);
55static DEFINE_SPINLOCK(nsm_lock); 54static DEFINE_SPINLOCK(nsm_lock);
56 55
57/* 56/*
@@ -259,33 +258,35 @@ void nsm_unmonitor(const struct nlm_host *host)
259 } 258 }
260} 259}
261 260
262static struct nsm_handle *nsm_lookup_hostname(const char *hostname, 261static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles,
263 const size_t len) 262 const char *hostname, const size_t len)
264{ 263{
265 struct nsm_handle *nsm; 264 struct nsm_handle *nsm;
266 265
267 list_for_each_entry(nsm, &nsm_handles, sm_link) 266 list_for_each_entry(nsm, nsm_handles, sm_link)
268 if (strlen(nsm->sm_name) == len && 267 if (strlen(nsm->sm_name) == len &&
269 memcmp(nsm->sm_name, hostname, len) == 0) 268 memcmp(nsm->sm_name, hostname, len) == 0)
270 return nsm; 269 return nsm;
271 return NULL; 270 return NULL;
272} 271}
273 272
274static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap) 273static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles,
274 const struct sockaddr *sap)
275{ 275{
276 struct nsm_handle *nsm; 276 struct nsm_handle *nsm;
277 277
278 list_for_each_entry(nsm, &nsm_handles, sm_link) 278 list_for_each_entry(nsm, nsm_handles, sm_link)
279 if (rpc_cmp_addr(nsm_addr(nsm), sap)) 279 if (rpc_cmp_addr(nsm_addr(nsm), sap))
280 return nsm; 280 return nsm;
281 return NULL; 281 return NULL;
282} 282}
283 283
284static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv) 284static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles,
285 const struct nsm_private *priv)
285{ 286{
286 struct nsm_handle *nsm; 287 struct nsm_handle *nsm;
287 288
288 list_for_each_entry(nsm, &nsm_handles, sm_link) 289 list_for_each_entry(nsm, nsm_handles, sm_link)
289 if (memcmp(nsm->sm_priv.data, priv->data, 290 if (memcmp(nsm->sm_priv.data, priv->data,
290 sizeof(priv->data)) == 0) 291 sizeof(priv->data)) == 0)
291 return nsm; 292 return nsm;
@@ -350,6 +351,7 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
350 351
351/** 352/**
352 * nsm_get_handle - Find or create a cached nsm_handle 353 * nsm_get_handle - Find or create a cached nsm_handle
354 * @net: network namespace
353 * @sap: pointer to socket address of handle to find 355 * @sap: pointer to socket address of handle to find
354 * @salen: length of socket address 356 * @salen: length of socket address
355 * @hostname: pointer to C string containing hostname to find 357 * @hostname: pointer to C string containing hostname to find
@@ -362,11 +364,13 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
362 * @hostname cannot be found in the handle cache. Returns NULL if 364 * @hostname cannot be found in the handle cache. Returns NULL if
363 * an error occurs. 365 * an error occurs.
364 */ 366 */
365struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, 367struct nsm_handle *nsm_get_handle(const struct net *net,
368 const struct sockaddr *sap,
366 const size_t salen, const char *hostname, 369 const size_t salen, const char *hostname,
367 const size_t hostname_len) 370 const size_t hostname_len)
368{ 371{
369 struct nsm_handle *cached, *new = NULL; 372 struct nsm_handle *cached, *new = NULL;
373 struct lockd_net *ln = net_generic(net, lockd_net_id);
370 374
371 if (hostname && memchr(hostname, '/', hostname_len) != NULL) { 375 if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
372 if (printk_ratelimit()) { 376 if (printk_ratelimit()) {
@@ -381,9 +385,10 @@ retry:
381 spin_lock(&nsm_lock); 385 spin_lock(&nsm_lock);
382 386
383 if (nsm_use_hostnames && hostname != NULL) 387 if (nsm_use_hostnames && hostname != NULL)
384 cached = nsm_lookup_hostname(hostname, hostname_len); 388 cached = nsm_lookup_hostname(&ln->nsm_handles,
389 hostname, hostname_len);
385 else 390 else
386 cached = nsm_lookup_addr(sap); 391 cached = nsm_lookup_addr(&ln->nsm_handles, sap);
387 392
388 if (cached != NULL) { 393 if (cached != NULL) {
389 atomic_inc(&cached->sm_count); 394 atomic_inc(&cached->sm_count);
@@ -397,7 +402,7 @@ retry:
397 } 402 }
398 403
399 if (new != NULL) { 404 if (new != NULL) {
400 list_add(&new->sm_link, &nsm_handles); 405 list_add(&new->sm_link, &ln->nsm_handles);
401 spin_unlock(&nsm_lock); 406 spin_unlock(&nsm_lock);
402 dprintk("lockd: created nsm_handle for %s (%s)\n", 407 dprintk("lockd: created nsm_handle for %s (%s)\n",
403 new->sm_name, new->sm_addrbuf); 408 new->sm_name, new->sm_addrbuf);
@@ -414,19 +419,22 @@ retry:
414 419
415/** 420/**
416 * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle 421 * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle
422 * @net: network namespace
417 * @info: pointer to NLMPROC_SM_NOTIFY arguments 423 * @info: pointer to NLMPROC_SM_NOTIFY arguments
418 * 424 *
419 * Returns a matching nsm_handle if found in the nsm cache. The returned 425 * Returns a matching nsm_handle if found in the nsm cache. The returned
420 * nsm_handle's reference count is bumped. Otherwise returns NULL if some 426 * nsm_handle's reference count is bumped. Otherwise returns NULL if some
421 * error occurred. 427 * error occurred.
422 */ 428 */
423struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info) 429struct nsm_handle *nsm_reboot_lookup(const struct net *net,
430 const struct nlm_reboot *info)
424{ 431{
425 struct nsm_handle *cached; 432 struct nsm_handle *cached;
433 struct lockd_net *ln = net_generic(net, lockd_net_id);
426 434
427 spin_lock(&nsm_lock); 435 spin_lock(&nsm_lock);
428 436
429 cached = nsm_lookup_priv(&info->priv); 437 cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv);
430 if (unlikely(cached == NULL)) { 438 if (unlikely(cached == NULL)) {
431 spin_unlock(&nsm_lock); 439 spin_unlock(&nsm_lock);
432 dprintk("lockd: never saw rebooted peer '%.*s' before\n", 440 dprintk("lockd: never saw rebooted peer '%.*s' before\n",
diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
index 5010b55628b..414da99744e 100644
--- a/fs/lockd/netns.h
+++ b/fs/lockd/netns.h
@@ -16,6 +16,7 @@ struct lockd_net {
16 spinlock_t nsm_clnt_lock; 16 spinlock_t nsm_clnt_lock;
17 unsigned int nsm_users; 17 unsigned int nsm_users;
18 struct rpc_clnt *nsm_clnt; 18 struct rpc_clnt *nsm_clnt;
19 struct list_head nsm_handles;
19}; 20};
20 21
21extern int lockd_net_id; 22extern int lockd_net_id;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 59a53f66400..bb1ad4df024 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -583,6 +583,7 @@ static int lockd_init_net(struct net *net)
583 INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender); 583 INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
584 INIT_LIST_HEAD(&ln->grace_list); 584 INIT_LIST_HEAD(&ln->grace_list);
585 spin_lock_init(&ln->nsm_clnt_lock); 585 spin_lock_init(&ln->nsm_clnt_lock);
586 INIT_LIST_HEAD(&ln->nsm_handles);
586 return 0; 587 return 0;
587} 588}
588 589
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index b147d1ae71f..09c576f26c7 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -421,7 +421,7 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
421 return rpc_system_err; 421 return rpc_system_err;
422 } 422 }
423 423
424 nlm_host_rebooted(argp); 424 nlm_host_rebooted(SVC_NET(rqstp), argp);
425 return rpc_success; 425 return rpc_success;
426} 426}
427 427
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 21171f0c647..fb26b9f522e 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -464,7 +464,7 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
464 return rpc_system_err; 464 return rpc_system_err;
465 } 465 }
466 466
467 nlm_host_rebooted(argp); 467 nlm_host_rebooted(SVC_NET(rqstp), argp);
468 return rpc_success; 468 return rpc_success;
469} 469}
470 470
diff --git a/fs/namei.c b/fs/namei.c
index f4f6460b695..c24781f07cf 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3085,6 +3085,10 @@ opened:
3085 goto exit_fput; 3085 goto exit_fput;
3086 } 3086 }
3087out: 3087out:
3088 if (unlikely(error > 0)) {
3089 WARN_ON(1);
3090 error = -EINVAL;
3091 }
3088 if (got_write) 3092 if (got_write)
3089 mnt_drop_write(nd->path.mnt); 3093 mnt_drop_write(nd->path.mnt);
3090 path_put(&save_parent); 3094 path_put(&save_parent);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 45a7dd36b4a..3b5e86fd280 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2187,9 +2187,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2187 dentry = d_add_unique(dentry, igrab(state->inode)); 2187 dentry = d_add_unique(dentry, igrab(state->inode));
2188 if (dentry == NULL) { 2188 if (dentry == NULL) {
2189 dentry = opendata->dentry; 2189 dentry = opendata->dentry;
2190 } else if (dentry != ctx->dentry) { 2190 } else {
2191 dput(ctx->dentry); 2191 dput(ctx->dentry);
2192 ctx->dentry = dget(dentry); 2192 ctx->dentry = dentry;
2193 } 2193 }
2194 nfs_set_verifier(dentry, 2194 nfs_set_verifier(dentry,
2195 nfs_save_change_attribute(opendata->dir->d_inode)); 2195 nfs_save_change_attribute(opendata->dir->d_inode));
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index c402b672a47..1c02b300dc5 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1482,7 +1482,7 @@ restart:
1482 spin_unlock(&state->state_lock); 1482 spin_unlock(&state->state_lock);
1483 } 1483 }
1484 nfs4_put_open_state(state); 1484 nfs4_put_open_state(state);
1485 clear_bit(NFS4CLNT_RECLAIM_NOGRACE, 1485 clear_bit(NFS_STATE_RECLAIM_NOGRACE,
1486 &state->flags); 1486 &state->flags);
1487 spin_lock(&sp->so_lock); 1487 spin_lock(&sp->so_lock);
1488 goto restart; 1488 goto restart;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index eaa7374305a..6b1d8498d20 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -165,7 +165,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
165 if (!priv->task) 165 if (!priv->task)
166 return ERR_PTR(-ESRCH); 166 return ERR_PTR(-ESRCH);
167 167
168 mm = mm_access(priv->task, PTRACE_MODE_READ); 168 mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS);
169 if (!mm || IS_ERR(mm)) 169 if (!mm || IS_ERR(mm))
170 return mm; 170 return mm;
171 down_read(&mm->mmap_sem); 171 down_read(&mm->mmap_sem);
@@ -1182,7 +1182,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
1182 if (!pm.buffer) 1182 if (!pm.buffer)
1183 goto out_task; 1183 goto out_task;
1184 1184
1185 mm = mm_access(task, PTRACE_MODE_READ); 1185 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
1186 ret = PTR_ERR(mm); 1186 ret = PTR_ERR(mm);
1187 if (!mm || IS_ERR(mm)) 1187 if (!mm || IS_ERR(mm))
1188 goto out_free; 1188 goto out_free;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 678455d2d68..f9db7e9f696 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -216,7 +216,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
216 if (!priv->task) 216 if (!priv->task)
217 return ERR_PTR(-ESRCH); 217 return ERR_PTR(-ESRCH);
218 218
219 mm = mm_access(priv->task, PTRACE_MODE_READ); 219 mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS);
220 if (!mm || IS_ERR(mm)) { 220 if (!mm || IS_ERR(mm)) {
221 put_task_struct(priv->task); 221 put_task_struct(priv->task);
222 priv->task = NULL; 222 priv->task = NULL;
diff --git a/fs/splice.c b/fs/splice.c
index f345d53f94d..e64f59960ec 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -949,6 +949,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
949 949
950 splice_from_pipe_begin(sd); 950 splice_from_pipe_begin(sd);
951 do { 951 do {
952 cond_resched();
952 ret = splice_from_pipe_next(pipe, sd); 953 ret = splice_from_pipe_next(pipe, sd);
953 if (ret > 0) 954 if (ret > 0)
954 ret = splice_from_pipe_feed(pipe, sd, actor); 955 ret = splice_from_pipe_feed(pipe, sd, actor);
@@ -1175,7 +1176,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1175 long ret, bytes; 1176 long ret, bytes;
1176 umode_t i_mode; 1177 umode_t i_mode;
1177 size_t len; 1178 size_t len;
1178 int i, flags; 1179 int i, flags, more;
1179 1180
1180 /* 1181 /*
1181 * We require the input being a regular file, as we don't want to 1182 * We require the input being a regular file, as we don't want to
@@ -1218,6 +1219,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1218 * Don't block on output, we have to drain the direct pipe. 1219 * Don't block on output, we have to drain the direct pipe.
1219 */ 1220 */
1220 sd->flags &= ~SPLICE_F_NONBLOCK; 1221 sd->flags &= ~SPLICE_F_NONBLOCK;
1222 more = sd->flags & SPLICE_F_MORE;
1221 1223
1222 while (len) { 1224 while (len) {
1223 size_t read_len; 1225 size_t read_len;
@@ -1231,6 +1233,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1231 sd->total_len = read_len; 1233 sd->total_len = read_len;
1232 1234
1233 /* 1235 /*
1236 * If more data is pending, set SPLICE_F_MORE
1237 * If this is the last data and SPLICE_F_MORE was not set
1238 * initially, clears it.
1239 */
1240 if (read_len < len)
1241 sd->flags |= SPLICE_F_MORE;
1242 else if (!more)
1243 sd->flags &= ~SPLICE_F_MORE;
1244 /*
1234 * NOTE: nonblocking mode only applies to the input. We 1245 * NOTE: nonblocking mode only applies to the input. We
1235 * must not do the output in nonblocking mode as then we 1246 * must not do the output in nonblocking mode as then we
1236 * could get stuck data in the internal pipe: 1247 * could get stuck data in the internal pipe:
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index 2c9e62c2bfd..f55fb04501e 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -70,7 +70,7 @@ typedef u64 __nocast cputime64_t;
70 */ 70 */
71static inline cputime_t timespec_to_cputime(const struct timespec *val) 71static inline cputime_t timespec_to_cputime(const struct timespec *val)
72{ 72{
73 u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; 73 u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
74 return (__force cputime_t) ret; 74 return (__force cputime_t) ret;
75} 75}
76static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) 76static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
@@ -86,7 +86,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
86 */ 86 */
87static inline cputime_t timeval_to_cputime(const struct timeval *val) 87static inline cputime_t timeval_to_cputime(const struct timeval *val)
88{ 88{
89 u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; 89 u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
90 val->tv_usec * NSEC_PER_USEC;
90 return (__force cputime_t) ret; 91 return (__force cputime_t) ret;
91} 92}
92static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) 93static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 9a33c5f7e12..f6c229e2bff 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -29,7 +29,11 @@
29/* A few generic types ... taken from ses-2 */ 29/* A few generic types ... taken from ses-2 */
30enum enclosure_component_type { 30enum enclosure_component_type {
31 ENCLOSURE_COMPONENT_DEVICE = 0x01, 31 ENCLOSURE_COMPONENT_DEVICE = 0x01,
32 ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07,
33 ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14,
34 ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15,
32 ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, 35 ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
36 ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18,
33}; 37};
34 38
35/* ses-2 common element status */ 39/* ses-2 common element status */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index dcaad79f54e..0adf073f13b 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -236,7 +236,8 @@ void nlm_rebind_host(struct nlm_host *);
236struct nlm_host * nlm_get_host(struct nlm_host *); 236struct nlm_host * nlm_get_host(struct nlm_host *);
237void nlm_shutdown_hosts(void); 237void nlm_shutdown_hosts(void);
238void nlm_shutdown_hosts_net(struct net *net); 238void nlm_shutdown_hosts_net(struct net *net);
239void nlm_host_rebooted(const struct nlm_reboot *); 239void nlm_host_rebooted(const struct net *net,
240 const struct nlm_reboot *);
240 241
241/* 242/*
242 * Host monitoring 243 * Host monitoring
@@ -244,11 +245,13 @@ void nlm_host_rebooted(const struct nlm_reboot *);
244int nsm_monitor(const struct nlm_host *host); 245int nsm_monitor(const struct nlm_host *host);
245void nsm_unmonitor(const struct nlm_host *host); 246void nsm_unmonitor(const struct nlm_host *host);
246 247
247struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, 248struct nsm_handle *nsm_get_handle(const struct net *net,
249 const struct sockaddr *sap,
248 const size_t salen, 250 const size_t salen,
249 const char *hostname, 251 const char *hostname,
250 const size_t hostname_len); 252 const size_t hostname_len);
251struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info); 253struct nsm_handle *nsm_reboot_lookup(const struct net *net,
254 const struct nlm_reboot *info);
252void nsm_release(struct nsm_handle *nsm); 255void nsm_release(struct nsm_handle *nsm);
253 256
254/* 257/*
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 0ae5807480f..1e122cc9ea3 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -580,9 +580,7 @@ static inline int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
580 580
581static inline loff_t nfs_size_to_loff_t(__u64 size) 581static inline loff_t nfs_size_to_loff_t(__u64 size)
582{ 582{
583 if (size > (__u64) OFFSET_MAX - 1) 583 return min_t(u64, size, OFFSET_MAX);
584 return OFFSET_MAX - 1;
585 return (loff_t) size;
586} 584}
587 585
588static inline ino_t 586static inline ino_t
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 7159a0a933d..97c8689c7e5 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -14,8 +14,11 @@
14 * See the file COPYING for more details. 14 * See the file COPYING for more details.
15 */ 15 */
16 16
17#include <linux/smp.h>
17#include <linux/errno.h> 18#include <linux/errno.h>
18#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/percpu.h>
21#include <linux/cpumask.h>
19#include <linux/rcupdate.h> 22#include <linux/rcupdate.h>
20#include <linux/static_key.h> 23#include <linux/static_key.h>
21 24
@@ -126,6 +129,9 @@ static inline void tracepoint_synchronize_unregister(void)
126 void *it_func; \ 129 void *it_func; \
127 void *__data; \ 130 void *__data; \
128 \ 131 \
132 if (!cpu_online(raw_smp_processor_id())) \
133 return; \
134 \
129 if (!(cond)) \ 135 if (!(cond)) \
130 return; \ 136 return; \
131 prercu; \ 137 prercu; \
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index e830c3dff61..7bb69c9c3c4 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -6,8 +6,8 @@
6#include <linux/mutex.h> 6#include <linux/mutex.h>
7#include <net/sock.h> 7#include <net/sock.h>
8 8
9void unix_inflight(struct file *fp); 9void unix_inflight(struct user_struct *user, struct file *fp);
10void unix_notinflight(struct file *fp); 10void unix_notinflight(struct user_struct *user, struct file *fp);
11void unix_gc(void); 11void unix_gc(void);
12void wait_for_unix_gc(void); 12void wait_for_unix_gc(void);
13struct sock *unix_get_socket(struct file *filp); 13struct sock *unix_get_socket(struct file *filp);
diff --git a/include/net/scm.h b/include/net/scm.h
index 262532d111f..59fa93c01d2 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -21,6 +21,7 @@ struct scm_creds {
21struct scm_fp_list { 21struct scm_fp_list {
22 short count; 22 short count;
23 short max; 23 short max;
24 struct user_struct *user;
24 struct file *fp[SCM_MAX_FD]; 25 struct file *fp[SCM_MAX_FD];
25}; 26};
26 27
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index e4b9e011d2a..42606764d83 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -513,7 +513,7 @@ struct se_cmd {
513 sense_reason_t (*execute_cmd)(struct se_cmd *); 513 sense_reason_t (*execute_cmd)(struct se_cmd *);
514 sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *, 514 sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
515 u32, enum dma_data_direction); 515 u32, enum dma_data_direction);
516 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); 516 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
517 517
518 unsigned char *t_task_cdb; 518 unsigned char *t_task_cdb;
519 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; 519 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ebb8a9e937f..2c2e5e70e4f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1230,6 +1230,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1230 if (!desc) 1230 if (!desc)
1231 return NULL; 1231 return NULL;
1232 1232
1233 chip_bus_lock(desc);
1233 raw_spin_lock_irqsave(&desc->lock, flags); 1234 raw_spin_lock_irqsave(&desc->lock, flags);
1234 1235
1235 /* 1236 /*
@@ -1243,7 +1244,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1243 if (!action) { 1244 if (!action) {
1244 WARN(1, "Trying to free already-free IRQ %d\n", irq); 1245 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1245 raw_spin_unlock_irqrestore(&desc->lock, flags); 1246 raw_spin_unlock_irqrestore(&desc->lock, flags);
1246 1247 chip_bus_sync_unlock(desc);
1247 return NULL; 1248 return NULL;
1248 } 1249 }
1249 1250
@@ -1266,6 +1267,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1266#endif 1267#endif
1267 1268
1268 raw_spin_unlock_irqrestore(&desc->lock, flags); 1269 raw_spin_unlock_irqrestore(&desc->lock, flags);
1270 chip_bus_sync_unlock(desc);
1269 1271
1270 unregister_handler_proc(irq, action); 1272 unregister_handler_proc(irq, action);
1271 1273
@@ -1339,9 +1341,7 @@ void free_irq(unsigned int irq, void *dev_id)
1339 desc->affinity_notify = NULL; 1341 desc->affinity_notify = NULL;
1340#endif 1342#endif
1341 1343
1342 chip_bus_lock(desc);
1343 kfree(__free_irq(irq, dev_id)); 1344 kfree(__free_irq(irq, dev_id));
1344 chip_bus_sync_unlock(desc);
1345} 1345}
1346EXPORT_SYMBOL(free_irq); 1346EXPORT_SYMBOL(free_irq);
1347 1347
diff --git a/kernel/resource.c b/kernel/resource.c
index 3f285dce934..449282e48bb 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -961,9 +961,10 @@ struct resource * __request_region(struct resource *parent,
961 if (!conflict) 961 if (!conflict)
962 break; 962 break;
963 if (conflict != parent) { 963 if (conflict != parent) {
964 parent = conflict; 964 if (!(conflict->flags & IORESOURCE_BUSY)) {
965 if (!(conflict->flags & IORESOURCE_BUSY)) 965 parent = conflict;
966 continue; 966 continue;
967 }
967 } 968 }
968 if (conflict->flags & flags & IORESOURCE_MUXED) { 969 if (conflict->flags & flags & IORESOURCE_MUXED) {
969 add_wait_queue(&muxed_resource_wait, &wait); 970 add_wait_queue(&muxed_resource_wait, &wait);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bbe957762ac..46afc8cd69d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -937,6 +937,13 @@ inline int task_curr(const struct task_struct *p)
937 return cpu_curr(task_cpu(p)) == p; 937 return cpu_curr(task_cpu(p)) == p;
938} 938}
939 939
940/*
941 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
942 * use the balance_callback list if you want balancing.
943 *
944 * this means any call to check_class_changed() must be followed by a call to
945 * balance_callback().
946 */
940static inline void check_class_changed(struct rq *rq, struct task_struct *p, 947static inline void check_class_changed(struct rq *rq, struct task_struct *p,
941 const struct sched_class *prev_class, 948 const struct sched_class *prev_class,
942 int oldprio) 949 int oldprio)
@@ -1423,8 +1430,12 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1423 1430
1424 p->state = TASK_RUNNING; 1431 p->state = TASK_RUNNING;
1425#ifdef CONFIG_SMP 1432#ifdef CONFIG_SMP
1426 if (p->sched_class->task_woken) 1433 if (p->sched_class->task_woken) {
1434 /*
1435 * XXX can drop rq->lock; most likely ok.
1436 */
1427 p->sched_class->task_woken(rq, p); 1437 p->sched_class->task_woken(rq, p);
1438 }
1428 1439
1429 if (rq->idle_stamp) { 1440 if (rq->idle_stamp) {
1430 u64 delta = rq_clock(rq) - rq->idle_stamp; 1441 u64 delta = rq_clock(rq) - rq->idle_stamp;
@@ -1685,7 +1696,6 @@ out:
1685 */ 1696 */
1686int wake_up_process(struct task_struct *p) 1697int wake_up_process(struct task_struct *p)
1687{ 1698{
1688 WARN_ON(task_is_stopped_or_traced(p));
1689 return try_to_wake_up(p, TASK_NORMAL, 0); 1699 return try_to_wake_up(p, TASK_NORMAL, 0);
1690} 1700}
1691EXPORT_SYMBOL(wake_up_process); 1701EXPORT_SYMBOL(wake_up_process);
@@ -2179,18 +2189,30 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2179} 2189}
2180 2190
2181/* rq->lock is NOT held, but preemption is disabled */ 2191/* rq->lock is NOT held, but preemption is disabled */
2182static inline void post_schedule(struct rq *rq) 2192static void __balance_callback(struct rq *rq)
2183{ 2193{
2184 if (rq->post_schedule) { 2194 struct callback_head *head, *next;
2185 unsigned long flags; 2195 void (*func)(struct rq *rq);
2196 unsigned long flags;
2186 2197
2187 raw_spin_lock_irqsave(&rq->lock, flags); 2198 raw_spin_lock_irqsave(&rq->lock, flags);
2188 if (rq->curr->sched_class->post_schedule) 2199 head = rq->balance_callback;
2189 rq->curr->sched_class->post_schedule(rq); 2200 rq->balance_callback = NULL;
2190 raw_spin_unlock_irqrestore(&rq->lock, flags); 2201 while (head) {
2202 func = (void (*)(struct rq *))head->func;
2203 next = head->next;
2204 head->next = NULL;
2205 head = next;
2191 2206
2192 rq->post_schedule = 0; 2207 func(rq);
2193 } 2208 }
2209 raw_spin_unlock_irqrestore(&rq->lock, flags);
2210}
2211
2212static inline void balance_callback(struct rq *rq)
2213{
2214 if (unlikely(rq->balance_callback))
2215 __balance_callback(rq);
2194} 2216}
2195 2217
2196#else 2218#else
@@ -2199,7 +2221,7 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2199{ 2221{
2200} 2222}
2201 2223
2202static inline void post_schedule(struct rq *rq) 2224static inline void balance_callback(struct rq *rq)
2203{ 2225{
2204} 2226}
2205 2227
@@ -2220,7 +2242,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
2220 * FIXME: do we need to worry about rq being invalidated by the 2242 * FIXME: do we need to worry about rq being invalidated by the
2221 * task_switch? 2243 * task_switch?
2222 */ 2244 */
2223 post_schedule(rq); 2245 balance_callback(rq);
2224 2246
2225#ifdef __ARCH_WANT_UNLOCKED_CTXSW 2247#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2226 /* In this case, finish_task_switch does not reenable preemption */ 2248 /* In this case, finish_task_switch does not reenable preemption */
@@ -2732,7 +2754,7 @@ need_resched:
2732 } else 2754 } else
2733 raw_spin_unlock_irq(&rq->lock); 2755 raw_spin_unlock_irq(&rq->lock);
2734 2756
2735 post_schedule(rq); 2757 balance_callback(rq);
2736 2758
2737 sched_preempt_enable_no_resched(); 2759 sched_preempt_enable_no_resched();
2738 if (need_resched()) 2760 if (need_resched())
@@ -2994,7 +3016,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
2994 3016
2995 check_class_changed(rq, p, prev_class, oldprio); 3017 check_class_changed(rq, p, prev_class, oldprio);
2996out_unlock: 3018out_unlock:
3019 preempt_disable(); /* avoid rq from going away on us */
2997 __task_rq_unlock(rq); 3020 __task_rq_unlock(rq);
3021
3022 balance_callback(rq);
3023 preempt_enable();
2998} 3024}
2999#endif 3025#endif
3000 3026
@@ -3500,10 +3526,17 @@ change:
3500 enqueue_task(rq, p, 0); 3526 enqueue_task(rq, p, 0);
3501 3527
3502 check_class_changed(rq, p, prev_class, oldprio); 3528 check_class_changed(rq, p, prev_class, oldprio);
3529 preempt_disable(); /* avoid rq from going away on us */
3503 task_rq_unlock(rq, p, &flags); 3530 task_rq_unlock(rq, p, &flags);
3504 3531
3505 rt_mutex_adjust_pi(p); 3532 rt_mutex_adjust_pi(p);
3506 3533
3534 /*
3535 * Run balance callbacks after we've adjusted the PI chain.
3536 */
3537 balance_callback(rq);
3538 preempt_enable();
3539
3507 return 0; 3540 return 0;
3508} 3541}
3509 3542
@@ -5386,13 +5419,13 @@ static int init_rootdomain(struct root_domain *rd)
5386{ 5419{
5387 memset(rd, 0, sizeof(*rd)); 5420 memset(rd, 0, sizeof(*rd));
5388 5421
5389 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) 5422 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
5390 goto out; 5423 goto out;
5391 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) 5424 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
5392 goto free_span; 5425 goto free_span;
5393 if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 5426 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
5394 goto free_online; 5427 goto free_online;
5395 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 5428 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5396 goto free_dlo_mask; 5429 goto free_dlo_mask;
5397 5430
5398 init_dl_bw(&rd->dl_bw); 5431 init_dl_bw(&rd->dl_bw);
@@ -6902,7 +6935,7 @@ void __init sched_init(void)
6902 rq->sd = NULL; 6935 rq->sd = NULL;
6903 rq->rd = NULL; 6936 rq->rd = NULL;
6904 rq->cpu_power = SCHED_POWER_SCALE; 6937 rq->cpu_power = SCHED_POWER_SCALE;
6905 rq->post_schedule = 0; 6938 rq->balance_callback = NULL;
6906 rq->active_balance = 0; 6939 rq->active_balance = 0;
6907 rq->next_balance = jiffies; 6940 rq->next_balance = jiffies;
6908 rq->push_cpu = 0; 6941 rq->push_cpu = 0;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 8d3c5ddfdfd..6ab59bb2947 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -210,6 +210,25 @@ static inline int has_pushable_dl_tasks(struct rq *rq)
210 210
211static int push_dl_task(struct rq *rq); 211static int push_dl_task(struct rq *rq);
212 212
213static DEFINE_PER_CPU(struct callback_head, dl_push_head);
214static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
215
216static void push_dl_tasks(struct rq *);
217static void pull_dl_task(struct rq *);
218
219static inline void queue_push_tasks(struct rq *rq)
220{
221 if (!has_pushable_dl_tasks(rq))
222 return;
223
224 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
225}
226
227static inline void queue_pull_task(struct rq *rq)
228{
229 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
230}
231
213#else 232#else
214 233
215static inline 234static inline
@@ -232,6 +251,13 @@ void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
232{ 251{
233} 252}
234 253
254static inline void queue_push_tasks(struct rq *rq)
255{
256}
257
258static inline void queue_pull_task(struct rq *rq)
259{
260}
235#endif /* CONFIG_SMP */ 261#endif /* CONFIG_SMP */
236 262
237static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 263static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
@@ -1005,7 +1031,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq)
1005#endif 1031#endif
1006 1032
1007#ifdef CONFIG_SMP 1033#ifdef CONFIG_SMP
1008 rq->post_schedule = has_pushable_dl_tasks(rq); 1034 queue_push_tasks(rq);
1009#endif /* CONFIG_SMP */ 1035#endif /* CONFIG_SMP */
1010 1036
1011 return p; 1037 return p;
@@ -1336,15 +1362,16 @@ static void push_dl_tasks(struct rq *rq)
1336 ; 1362 ;
1337} 1363}
1338 1364
1339static int pull_dl_task(struct rq *this_rq) 1365static void pull_dl_task(struct rq *this_rq)
1340{ 1366{
1341 int this_cpu = this_rq->cpu, ret = 0, cpu; 1367 int this_cpu = this_rq->cpu, cpu;
1342 struct task_struct *p; 1368 struct task_struct *p;
1369 bool resched = false;
1343 struct rq *src_rq; 1370 struct rq *src_rq;
1344 u64 dmin = LONG_MAX; 1371 u64 dmin = LONG_MAX;
1345 1372
1346 if (likely(!dl_overloaded(this_rq))) 1373 if (likely(!dl_overloaded(this_rq)))
1347 return 0; 1374 return;
1348 1375
1349 /* 1376 /*
1350 * Match the barrier from dl_set_overloaded; this guarantees that if we 1377 * Match the barrier from dl_set_overloaded; this guarantees that if we
@@ -1399,7 +1426,7 @@ static int pull_dl_task(struct rq *this_rq)
1399 src_rq->curr->dl.deadline)) 1426 src_rq->curr->dl.deadline))
1400 goto skip; 1427 goto skip;
1401 1428
1402 ret = 1; 1429 resched = true;
1403 1430
1404 deactivate_task(src_rq, p, 0); 1431 deactivate_task(src_rq, p, 0);
1405 set_task_cpu(p, this_cpu); 1432 set_task_cpu(p, this_cpu);
@@ -1412,7 +1439,8 @@ skip:
1412 double_unlock_balance(this_rq, src_rq); 1439 double_unlock_balance(this_rq, src_rq);
1413 } 1440 }
1414 1441
1415 return ret; 1442 if (resched)
1443 resched_task(this_rq->curr);
1416} 1444}
1417 1445
1418static void pre_schedule_dl(struct rq *rq, struct task_struct *prev) 1446static void pre_schedule_dl(struct rq *rq, struct task_struct *prev)
@@ -1422,11 +1450,6 @@ static void pre_schedule_dl(struct rq *rq, struct task_struct *prev)
1422 pull_dl_task(rq); 1450 pull_dl_task(rq);
1423} 1451}
1424 1452
1425static void post_schedule_dl(struct rq *rq)
1426{
1427 push_dl_tasks(rq);
1428}
1429
1430/* 1453/*
1431 * Since the task is not running and a reschedule is not going to happen 1454 * Since the task is not running and a reschedule is not going to happen
1432 * anytime soon on its runqueue, we try pushing it away now. 1455 * anytime soon on its runqueue, we try pushing it away now.
@@ -1529,7 +1552,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
1529 * from an overloaded cpu, if any. 1552 * from an overloaded cpu, if any.
1530 */ 1553 */
1531 if (!rq->dl.dl_nr_running) 1554 if (!rq->dl.dl_nr_running)
1532 pull_dl_task(rq); 1555 queue_pull_task(rq);
1533#endif 1556#endif
1534} 1557}
1535 1558
@@ -1539,8 +1562,6 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
1539 */ 1562 */
1540static void switched_to_dl(struct rq *rq, struct task_struct *p) 1563static void switched_to_dl(struct rq *rq, struct task_struct *p)
1541{ 1564{
1542 int check_resched = 1;
1543
1544 /* 1565 /*
1545 * If p is throttled, don't consider the possibility 1566 * If p is throttled, don't consider the possibility
1546 * of preempting rq->curr, the check will be done right 1567 * of preempting rq->curr, the check will be done right
@@ -1551,12 +1572,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
1551 1572
1552 if (p->on_rq || rq->curr != p) { 1573 if (p->on_rq || rq->curr != p) {
1553#ifdef CONFIG_SMP 1574#ifdef CONFIG_SMP
1554 if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) 1575 if (rq->dl.overloaded)
1555 /* Only reschedule if pushing failed */ 1576 queue_push_tasks(rq);
1556 check_resched = 0; 1577#else
1557#endif /* CONFIG_SMP */ 1578 if (task_has_dl_policy(rq->curr))
1558 if (check_resched && task_has_dl_policy(rq->curr))
1559 check_preempt_curr_dl(rq, p, 0); 1579 check_preempt_curr_dl(rq, p, 0);
1580#endif /* CONFIG_SMP */
1560 } 1581 }
1561} 1582}
1562 1583
@@ -1576,15 +1597,14 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1576 * or lowering its prio, so... 1597 * or lowering its prio, so...
1577 */ 1598 */
1578 if (!rq->dl.overloaded) 1599 if (!rq->dl.overloaded)
1579 pull_dl_task(rq); 1600 queue_pull_task(rq);
1580 1601
1581 /* 1602 /*
1582 * If we now have a earlier deadline task than p, 1603 * If we now have a earlier deadline task than p,
1583 * then reschedule, provided p is still on this 1604 * then reschedule, provided p is still on this
1584 * runqueue. 1605 * runqueue.
1585 */ 1606 */
1586 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) && 1607 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1587 rq->curr == p)
1588 resched_task(p); 1608 resched_task(p);
1589#else 1609#else
1590 /* 1610 /*
@@ -1615,7 +1635,6 @@ const struct sched_class dl_sched_class = {
1615 .rq_online = rq_online_dl, 1635 .rq_online = rq_online_dl,
1616 .rq_offline = rq_offline_dl, 1636 .rq_offline = rq_offline_dl,
1617 .pre_schedule = pre_schedule_dl, 1637 .pre_schedule = pre_schedule_dl,
1618 .post_schedule = post_schedule_dl,
1619 .task_woken = task_woken_dl, 1638 .task_woken = task_woken_dl,
1620#endif 1639#endif
1621 1640
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 516c3d9ceea..d08678d38d1 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -19,11 +19,6 @@ static void pre_schedule_idle(struct rq *rq, struct task_struct *prev)
19 idle_exit_fair(rq); 19 idle_exit_fair(rq);
20 rq_last_tick_reset(rq); 20 rq_last_tick_reset(rq);
21} 21}
22
23static void post_schedule_idle(struct rq *rq)
24{
25 idle_enter_fair(rq);
26}
27#endif /* CONFIG_SMP */ 22#endif /* CONFIG_SMP */
28/* 23/*
29 * Idle tasks are unconditionally rescheduled: 24 * Idle tasks are unconditionally rescheduled:
@@ -37,8 +32,7 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
37{ 32{
38 schedstat_inc(rq, sched_goidle); 33 schedstat_inc(rq, sched_goidle);
39#ifdef CONFIG_SMP 34#ifdef CONFIG_SMP
40 /* Trigger the post schedule to do an idle_enter for CFS */ 35 idle_enter_fair(rq);
41 rq->post_schedule = 1;
42#endif 36#endif
43 return rq->idle; 37 return rq->idle;
44} 38}
@@ -102,7 +96,6 @@ const struct sched_class idle_sched_class = {
102#ifdef CONFIG_SMP 96#ifdef CONFIG_SMP
103 .select_task_rq = select_task_rq_idle, 97 .select_task_rq = select_task_rq_idle,
104 .pre_schedule = pre_schedule_idle, 98 .pre_schedule = pre_schedule_idle,
105 .post_schedule = post_schedule_idle,
106#endif 99#endif
107 100
108 .set_curr_task = set_curr_task_idle, 101 .set_curr_task = set_curr_task_idle,
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 27b8e836307..0fb72ae876e 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -315,6 +315,25 @@ static inline int has_pushable_tasks(struct rq *rq)
315 return !plist_head_empty(&rq->rt.pushable_tasks); 315 return !plist_head_empty(&rq->rt.pushable_tasks);
316} 316}
317 317
318static DEFINE_PER_CPU(struct callback_head, rt_push_head);
319static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
320
321static void push_rt_tasks(struct rq *);
322static void pull_rt_task(struct rq *);
323
324static inline void queue_push_tasks(struct rq *rq)
325{
326 if (!has_pushable_tasks(rq))
327 return;
328
329 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
330}
331
332static inline void queue_pull_task(struct rq *rq)
333{
334 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
335}
336
318static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 337static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
319{ 338{
320 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 339 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
@@ -359,6 +378,9 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
359{ 378{
360} 379}
361 380
381static inline void queue_push_tasks(struct rq *rq)
382{
383}
362#endif /* CONFIG_SMP */ 384#endif /* CONFIG_SMP */
363 385
364static inline int on_rt_rq(struct sched_rt_entity *rt_se) 386static inline int on_rt_rq(struct sched_rt_entity *rt_se)
@@ -1349,11 +1371,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
1349 dequeue_pushable_task(rq, p); 1371 dequeue_pushable_task(rq, p);
1350 1372
1351#ifdef CONFIG_SMP 1373#ifdef CONFIG_SMP
1352 /* 1374 queue_push_tasks(rq);
1353 * We detect this state here so that we can avoid taking the RQ
1354 * lock again later if there is no need to push
1355 */
1356 rq->post_schedule = has_pushable_tasks(rq);
1357#endif 1375#endif
1358 1376
1359 return p; 1377 return p;
@@ -1641,14 +1659,15 @@ static void push_rt_tasks(struct rq *rq)
1641 ; 1659 ;
1642} 1660}
1643 1661
1644static int pull_rt_task(struct rq *this_rq) 1662static void pull_rt_task(struct rq *this_rq)
1645{ 1663{
1646 int this_cpu = this_rq->cpu, ret = 0, cpu; 1664 int this_cpu = this_rq->cpu, cpu;
1665 bool resched = false;
1647 struct task_struct *p; 1666 struct task_struct *p;
1648 struct rq *src_rq; 1667 struct rq *src_rq;
1649 1668
1650 if (likely(!rt_overloaded(this_rq))) 1669 if (likely(!rt_overloaded(this_rq)))
1651 return 0; 1670 return;
1652 1671
1653 /* 1672 /*
1654 * Match the barrier from rt_set_overloaded; this guarantees that if we 1673 * Match the barrier from rt_set_overloaded; this guarantees that if we
@@ -1705,7 +1724,7 @@ static int pull_rt_task(struct rq *this_rq)
1705 if (p->prio < src_rq->curr->prio) 1724 if (p->prio < src_rq->curr->prio)
1706 goto skip; 1725 goto skip;
1707 1726
1708 ret = 1; 1727 resched = true;
1709 1728
1710 deactivate_task(src_rq, p, 0); 1729 deactivate_task(src_rq, p, 0);
1711 set_task_cpu(p, this_cpu); 1730 set_task_cpu(p, this_cpu);
@@ -1721,7 +1740,8 @@ skip:
1721 double_unlock_balance(this_rq, src_rq); 1740 double_unlock_balance(this_rq, src_rq);
1722 } 1741 }
1723 1742
1724 return ret; 1743 if (resched)
1744 resched_task(this_rq->curr);
1725} 1745}
1726 1746
1727static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) 1747static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
@@ -1731,11 +1751,6 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1731 pull_rt_task(rq); 1751 pull_rt_task(rq);
1732} 1752}
1733 1753
1734static void post_schedule_rt(struct rq *rq)
1735{
1736 push_rt_tasks(rq);
1737}
1738
1739/* 1754/*
1740 * If we are not running and we are not going to reschedule soon, we should 1755 * If we are not running and we are not going to reschedule soon, we should
1741 * try to push tasks away now 1756 * try to push tasks away now
@@ -1829,8 +1844,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
1829 if (!p->on_rq || rq->rt.rt_nr_running) 1844 if (!p->on_rq || rq->rt.rt_nr_running)
1830 return; 1845 return;
1831 1846
1832 if (pull_rt_task(rq)) 1847 queue_pull_task(rq);
1833 resched_task(rq->curr);
1834} 1848}
1835 1849
1836void init_sched_rt_class(void) 1850void init_sched_rt_class(void)
@@ -1851,8 +1865,6 @@ void init_sched_rt_class(void)
1851 */ 1865 */
1852static void switched_to_rt(struct rq *rq, struct task_struct *p) 1866static void switched_to_rt(struct rq *rq, struct task_struct *p)
1853{ 1867{
1854 int check_resched = 1;
1855
1856 /* 1868 /*
1857 * If we are already running, then there's nothing 1869 * If we are already running, then there's nothing
1858 * that needs to be done. But if we are not running 1870 * that needs to be done. But if we are not running
@@ -1862,13 +1874,12 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1862 */ 1874 */
1863 if (p->on_rq && rq->curr != p) { 1875 if (p->on_rq && rq->curr != p) {
1864#ifdef CONFIG_SMP 1876#ifdef CONFIG_SMP
1865 if (rq->rt.overloaded && push_rt_task(rq) && 1877 if (rq->rt.overloaded)
1866 /* Don't resched if we changed runqueues */ 1878 queue_push_tasks(rq);
1867 rq != task_rq(p)) 1879#else
1868 check_resched = 0; 1880 if (p->prio < rq->curr->prio)
1869#endif /* CONFIG_SMP */
1870 if (check_resched && p->prio < rq->curr->prio)
1871 resched_task(rq->curr); 1881 resched_task(rq->curr);
1882#endif /* CONFIG_SMP */
1872 } 1883 }
1873} 1884}
1874 1885
@@ -1889,14 +1900,13 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1889 * may need to pull tasks to this runqueue. 1900 * may need to pull tasks to this runqueue.
1890 */ 1901 */
1891 if (oldprio < p->prio) 1902 if (oldprio < p->prio)
1892 pull_rt_task(rq); 1903 queue_pull_task(rq);
1904
1893 /* 1905 /*
1894 * If there's a higher priority task waiting to run 1906 * If there's a higher priority task waiting to run
1895 * then reschedule. Note, the above pull_rt_task 1907 * then reschedule.
1896 * can release the rq lock and p could migrate.
1897 * Only reschedule if p is still on the same runqueue.
1898 */ 1908 */
1899 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) 1909 if (p->prio > rq->rt.highest_prio.curr)
1900 resched_task(p); 1910 resched_task(p);
1901#else 1911#else
1902 /* For UP simply resched on drop of prio */ 1912 /* For UP simply resched on drop of prio */
@@ -2008,7 +2018,6 @@ const struct sched_class rt_sched_class = {
2008 .rq_online = rq_online_rt, 2018 .rq_online = rq_online_rt,
2009 .rq_offline = rq_offline_rt, 2019 .rq_offline = rq_offline_rt,
2010 .pre_schedule = pre_schedule_rt, 2020 .pre_schedule = pre_schedule_rt,
2011 .post_schedule = post_schedule_rt,
2012 .task_woken = task_woken_rt, 2021 .task_woken = task_woken_rt,
2013 .switched_from = switched_from_rt, 2022 .switched_from = switched_from_rt,
2014#endif 2023#endif
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 835b6efa8bd..675e147a86f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -587,9 +587,10 @@ struct rq {
587 587
588 unsigned long cpu_power; 588 unsigned long cpu_power;
589 589
590 struct callback_head *balance_callback;
591
590 unsigned char idle_balance; 592 unsigned char idle_balance;
591 /* For active balancing */ 593 /* For active balancing */
592 int post_schedule;
593 int active_balance; 594 int active_balance;
594 int push_cpu; 595 int push_cpu;
595 struct cpu_stop_work active_balance_work; 596 struct cpu_stop_work active_balance_work;
@@ -690,6 +691,21 @@ extern int migrate_swap(struct task_struct *, struct task_struct *);
690 691
691#ifdef CONFIG_SMP 692#ifdef CONFIG_SMP
692 693
694static inline void
695queue_balance_callback(struct rq *rq,
696 struct callback_head *head,
697 void (*func)(struct rq *rq))
698{
699 lockdep_assert_held(&rq->lock);
700
701 if (unlikely(head->next))
702 return;
703
704 head->func = (void (*)(struct callback_head *))func;
705 head->next = rq->balance_callback;
706 rq->balance_callback = head;
707}
708
693#define rcu_dereference_check_sched_domain(p) \ 709#define rcu_dereference_check_sched_domain(p) \
694 rcu_dereference_check((p), \ 710 rcu_dereference_check((p), \
695 lockdep_is_held(&sched_domains_mutex)) 711 lockdep_is_held(&sched_domains_mutex))
@@ -1131,7 +1147,6 @@ struct sched_class {
1131 void (*migrate_task_rq)(struct task_struct *p, int next_cpu); 1147 void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
1132 1148
1133 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1149 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1134 void (*post_schedule) (struct rq *this_rq);
1135 void (*task_waking) (struct task_struct *task); 1150 void (*task_waking) (struct task_struct *task);
1136 void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1151 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1137 1152
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index ce033c7aa2e..9cff0ab82b6 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
69static unsigned int posix_clock_poll(struct file *fp, poll_table *wait) 69static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
70{ 70{
71 struct posix_clock *clk = get_posix_clock(fp); 71 struct posix_clock *clk = get_posix_clock(fp);
72 int result = 0; 72 unsigned int result = 0;
73 73
74 if (!clk) 74 if (!clk)
75 return -ENODEV; 75 return POLLERR;
76 76
77 if (clk->ops.poll) 77 if (clk->ops.poll)
78 result = clk->ops.poll(clk, fp, wait); 78 result = clk->ops.poll(clk, fp, wait);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index da41de9dc31..c798ed2fc28 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1949,12 +1949,6 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1949 goto again; 1949 goto again;
1950} 1950}
1951 1951
1952static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1953{
1954 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1955 cpu_buffer->reader_page->read = 0;
1956}
1957
1958static void rb_inc_iter(struct ring_buffer_iter *iter) 1952static void rb_inc_iter(struct ring_buffer_iter *iter)
1959{ 1953{
1960 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1954 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
@@ -3592,7 +3586,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3592 3586
3593 /* Finally update the reader page to the new head */ 3587 /* Finally update the reader page to the new head */
3594 cpu_buffer->reader_page = reader; 3588 cpu_buffer->reader_page = reader;
3595 rb_reset_reader_page(cpu_buffer); 3589 cpu_buffer->reader_page->read = 0;
3596 3590
3597 if (overwrite != cpu_buffer->last_overrun) { 3591 if (overwrite != cpu_buffer->last_overrun) {
3598 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 3592 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
@@ -3602,6 +3596,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3602 goto again; 3596 goto again;
3603 3597
3604 out: 3598 out:
3599 /* Update the read_stamp on the first event */
3600 if (reader && reader->read == 0)
3601 cpu_buffer->read_stamp = reader->page->time_stamp;
3602
3605 arch_spin_unlock(&cpu_buffer->lock); 3603 arch_spin_unlock(&cpu_buffer->lock);
3606 local_irq_restore(flags); 3604 local_irq_restore(flags);
3607 3605
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c6646a58d23..bb1ac9cbe30 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -606,7 +606,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
606 * The ftrace subsystem is for showing formats only. 606 * The ftrace subsystem is for showing formats only.
607 * They can not be enabled or disabled via the event files. 607 * They can not be enabled or disabled via the event files.
608 */ 608 */
609 if (call->class && call->class->reg) 609 if (call->class && call->class->reg &&
610 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
610 return file; 611 return file;
611 } 612 }
612 613
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 646a8b81bee..423c9e37a9e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1475,13 +1475,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1475 timer_stats_timer_set_start_info(&dwork->timer); 1475 timer_stats_timer_set_start_info(&dwork->timer);
1476 1476
1477 dwork->wq = wq; 1477 dwork->wq = wq;
1478 /* timer isn't guaranteed to run in this cpu, record earlier */
1479 if (cpu == WORK_CPU_UNBOUND)
1480 cpu = raw_smp_processor_id();
1481 dwork->cpu = cpu; 1478 dwork->cpu = cpu;
1482 timer->expires = jiffies + delay; 1479 timer->expires = jiffies + delay;
1483 1480
1484 add_timer_on(timer, cpu); 1481 if (unlikely(cpu != WORK_CPU_UNBOUND))
1482 add_timer_on(timer, cpu);
1483 else
1484 add_timer(timer);
1485} 1485}
1486 1486
1487/** 1487/**
diff --git a/lib/devres.c b/lib/devres.c
index 823533138fa..20afaf181b2 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -423,7 +423,7 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
423 if (!iomap) 423 if (!iomap)
424 return; 424 return;
425 425
426 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 426 for (i = 0; i < PCIM_IOMAP_MAX; i++) {
427 if (!(mask & (1 << i))) 427 if (!(mask & (1 << i)))
428 continue; 428 continue;
429 429
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 2e87eecec8f..04dd542697a 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2279,7 +2279,7 @@ static int read_partial_message(struct ceph_connection *con)
2279 con->in_base_pos = -front_len - middle_len - data_len - 2279 con->in_base_pos = -front_len - middle_len - data_len -
2280 sizeof(m->footer); 2280 sizeof(m->footer);
2281 con->in_tag = CEPH_MSGR_TAG_READY; 2281 con->in_tag = CEPH_MSGR_TAG_READY;
2282 return 0; 2282 return 1;
2283 } else if ((s64)seq - (s64)con->in_seq > 1) { 2283 } else if ((s64)seq - (s64)con->in_seq > 1) {
2284 pr_err("read_partial_message bad seq %lld expected %lld\n", 2284 pr_err("read_partial_message bad seq %lld expected %lld\n",
2285 seq, con->in_seq + 1); 2285 seq, con->in_seq + 1);
@@ -2312,7 +2312,7 @@ static int read_partial_message(struct ceph_connection *con)
2312 sizeof(m->footer); 2312 sizeof(m->footer);
2313 con->in_tag = CEPH_MSGR_TAG_READY; 2313 con->in_tag = CEPH_MSGR_TAG_READY;
2314 con->in_seq++; 2314 con->in_seq++;
2315 return 0; 2315 return 1;
2316 } 2316 }
2317 2317
2318 BUG_ON(!con->in_msg); 2318 BUG_ON(!con->in_msg);
diff --git a/net/core/scm.c b/net/core/scm.c
index d30eb057fa7..cad57a1390d 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
87 *fplp = fpl; 87 *fplp = fpl;
88 fpl->count = 0; 88 fpl->count = 0;
89 fpl->max = SCM_MAX_FD; 89 fpl->max = SCM_MAX_FD;
90 fpl->user = NULL;
90 } 91 }
91 fpp = &fpl->fp[fpl->count]; 92 fpp = &fpl->fp[fpl->count];
92 93
@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
107 *fpp++ = file; 108 *fpp++ = file;
108 fpl->count++; 109 fpl->count++;
109 } 110 }
111
112 if (!fpl->user)
113 fpl->user = get_uid(current_user());
114
110 return num; 115 return num;
111} 116}
112 117
@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
119 scm->fp = NULL; 124 scm->fp = NULL;
120 for (i=fpl->count-1; i>=0; i--) 125 for (i=fpl->count-1; i>=0; i--)
121 fput(fpl->fp[i]); 126 fput(fpl->fp[i]);
127 free_uid(fpl->user);
122 kfree(fpl); 128 kfree(fpl);
123 } 129 }
124} 130}
@@ -337,6 +343,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
337 for (i = 0; i < fpl->count; i++) 343 for (i = 0; i < fpl->count; i++)
338 get_file(fpl->fp[i]); 344 get_file(fpl->fp[i]);
339 new_fpl->max = new_fpl->count; 345 new_fpl->max = new_fpl->count;
346 new_fpl->user = get_uid(fpl->user);
340 } 347 }
341 return new_fpl; 348 return new_fpl;
342} 349}
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index c49dcd0284a..56dd8ac6d28 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -61,9 +61,7 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
61 if (FIB_RES_DEV(res) == dev) 61 if (FIB_RES_DEV(res) == dev)
62 dev_match = true; 62 dev_match = true;
63#endif 63#endif
64 if (dev_match || flags & XT_RPFILTER_LOOSE) 64 return dev_match || flags & XT_RPFILTER_LOOSE;
65 return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
66 return dev_match;
67} 65}
68 66
69static bool rpfilter_is_local(const struct sk_buff *skb) 67static bool rpfilter_is_local(const struct sk_buff *skb)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3f0ec063d7f..7b74fca4d85 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4793,6 +4793,21 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
4793 return ret; 4793 return ret;
4794} 4794}
4795 4795
4796static
4797int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
4798 void __user *buffer, size_t *lenp, loff_t *ppos)
4799{
4800 struct inet6_dev *idev = ctl->extra1;
4801 int min_mtu = IPV6_MIN_MTU;
4802 struct ctl_table lctl;
4803
4804 lctl = *ctl;
4805 lctl.extra1 = &min_mtu;
4806 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
4807
4808 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
4809}
4810
4796static void dev_disable_change(struct inet6_dev *idev) 4811static void dev_disable_change(struct inet6_dev *idev)
4797{ 4812{
4798 struct netdev_notifier_info info; 4813 struct netdev_notifier_info info;
@@ -4944,7 +4959,7 @@ static struct addrconf_sysctl_table
4944 .data = &ipv6_devconf.mtu6, 4959 .data = &ipv6_devconf.mtu6,
4945 .maxlen = sizeof(int), 4960 .maxlen = sizeof(int),
4946 .mode = 0644, 4961 .mode = 0644,
4947 .proc_handler = proc_dointvec, 4962 .proc_handler = addrconf_sysctl_mtu,
4948 }, 4963 },
4949 { 4964 {
4950 .procname = "accept_ra", 4965 .procname = "accept_ra",
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index a0d17270117..bd174540eb2 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
37} 37}
38 38
39static void 39static void
40synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb, 40synproxy_send_tcp(const struct synproxy_net *snet,
41 const struct sk_buff *skb, struct sk_buff *nskb,
41 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, 42 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
42 struct ipv6hdr *niph, struct tcphdr *nth, 43 struct ipv6hdr *niph, struct tcphdr *nth,
43 unsigned int tcp_hdr_size) 44 unsigned int tcp_hdr_size)
44{ 45{
45 struct net *net = nf_ct_net((struct nf_conn *)nfct); 46 struct net *net = nf_ct_net(snet->tmpl);
46 struct dst_entry *dst; 47 struct dst_entry *dst;
47 struct flowi6 fl6; 48 struct flowi6 fl6;
48 49
@@ -83,7 +84,8 @@ free_nskb:
83} 84}
84 85
85static void 86static void
86synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, 87synproxy_send_client_synack(const struct synproxy_net *snet,
88 const struct sk_buff *skb, const struct tcphdr *th,
87 const struct synproxy_options *opts) 89 const struct synproxy_options *opts)
88{ 90{
89 struct sk_buff *nskb; 91 struct sk_buff *nskb;
@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
119 121
120 synproxy_build_options(nth, opts); 122 synproxy_build_options(nth, opts);
121 123
122 synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 124 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
123 niph, nth, tcp_hdr_size); 125 niph, nth, tcp_hdr_size);
124} 126}
125 127
@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
163 165
164 synproxy_build_options(nth, opts); 166 synproxy_build_options(nth, opts);
165 167
166 synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, 168 synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
167 niph, nth, tcp_hdr_size); 169 niph, nth, tcp_hdr_size);
168} 170}
169 171
@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
203 205
204 synproxy_build_options(nth, opts); 206 synproxy_build_options(nth, opts);
205 207
206 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 208 synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
207} 209}
208 210
209static void 211static void
@@ -241,7 +243,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
241 243
242 synproxy_build_options(nth, opts); 244 synproxy_build_options(nth, opts);
243 245
244 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 246 synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
245} 247}
246 248
247static bool 249static bool
@@ -301,7 +303,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
301 XT_SYNPROXY_OPT_SACK_PERM | 303 XT_SYNPROXY_OPT_SACK_PERM |
302 XT_SYNPROXY_OPT_ECN); 304 XT_SYNPROXY_OPT_ECN);
303 305
304 synproxy_send_client_synack(skb, th, &opts); 306 synproxy_send_client_synack(snet, skb, th, &opts);
305 return NF_DROP; 307 return NF_DROP;
306 308
307 } else if (th->ack && !(th->fin || th->rst || th->syn)) { 309 } else if (th->ack && !(th->fin || th->rst || th->syn)) {
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 7d050ed6fe5..6d28bd434ce 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -746,10 +746,8 @@ void mesh_plink_broken(struct sta_info *sta)
746static void mesh_path_node_reclaim(struct rcu_head *rp) 746static void mesh_path_node_reclaim(struct rcu_head *rp)
747{ 747{
748 struct mpath_node *node = container_of(rp, struct mpath_node, rcu); 748 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
749 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
750 749
751 del_timer_sync(&node->mpath->timer); 750 del_timer_sync(&node->mpath->timer);
752 atomic_dec(&sdata->u.mesh.mpaths);
753 kfree(node->mpath); 751 kfree(node->mpath);
754 kfree(node); 752 kfree(node);
755} 753}
@@ -757,8 +755,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
757/* needs to be called with the corresponding hashwlock taken */ 755/* needs to be called with the corresponding hashwlock taken */
758static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) 756static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
759{ 757{
760 struct mesh_path *mpath; 758 struct mesh_path *mpath = node->mpath;
761 mpath = node->mpath; 759 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
760
762 spin_lock(&mpath->state_lock); 761 spin_lock(&mpath->state_lock);
763 mpath->flags |= MESH_PATH_RESOLVING; 762 mpath->flags |= MESH_PATH_RESOLVING;
764 if (mpath->is_gate) 763 if (mpath->is_gate)
@@ -766,6 +765,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
766 hlist_del_rcu(&node->list); 765 hlist_del_rcu(&node->list);
767 call_rcu(&node->rcu, mesh_path_node_reclaim); 766 call_rcu(&node->rcu, mesh_path_node_reclaim);
768 spin_unlock(&mpath->state_lock); 767 spin_unlock(&mpath->state_lock);
768 atomic_dec(&sdata->u.mesh.mpaths);
769 atomic_dec(&tbl->entries); 769 atomic_dec(&tbl->entries);
770} 770}
771 771
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 99de2409f73..4e8d90b8fc0 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3316,9 +3316,9 @@ EXPORT_SYMBOL_GPL(nft_data_init);
3316 */ 3316 */
3317void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) 3317void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
3318{ 3318{
3319 switch (type) { 3319 if (type < NFT_DATA_VERDICT)
3320 case NFT_DATA_VALUE:
3321 return; 3320 return;
3321 switch (type) {
3322 case NFT_DATA_VERDICT: 3322 case NFT_DATA_VERDICT:
3323 return nft_verdict_uninit(data); 3323 return nft_verdict_uninit(data);
3324 default: 3324 default:
diff --git a/net/rds/send.c b/net/rds/send.c
index a82fb660ec0..44222c0607c 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -955,11 +955,13 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
955 release_sock(sk); 955 release_sock(sk);
956 } 956 }
957 957
958 /* racing with another thread binding seems ok here */ 958 lock_sock(sk);
959 if (daddr == 0 || rs->rs_bound_addr == 0) { 959 if (daddr == 0 || rs->rs_bound_addr == 0) {
960 release_sock(sk);
960 ret = -ENOTCONN; /* XXX not a great errno */ 961 ret = -ENOTCONN; /* XXX not a great errno */
961 goto out; 962 goto out;
962 } 963 }
964 release_sock(sk);
963 965
964 /* size of rm including all sgs */ 966 /* size of rm including all sgs */
965 ret = rds_rm_size(msg, payload_len); 967 ret = rds_rm_size(msg, payload_len);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index ed7e0b4e7f9..4b9dc246077 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -49,7 +49,6 @@
49struct rfkill { 49struct rfkill {
50 spinlock_t lock; 50 spinlock_t lock;
51 51
52 const char *name;
53 enum rfkill_type type; 52 enum rfkill_type type;
54 53
55 unsigned long state; 54 unsigned long state;
@@ -73,6 +72,7 @@ struct rfkill {
73 struct delayed_work poll_work; 72 struct delayed_work poll_work;
74 struct work_struct uevent_work; 73 struct work_struct uevent_work;
75 struct work_struct sync_work; 74 struct work_struct sync_work;
75 char name[];
76}; 76};
77#define to_rfkill(d) container_of(d, struct rfkill, dev) 77#define to_rfkill(d) container_of(d, struct rfkill, dev)
78 78
@@ -861,14 +861,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
861 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) 861 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
862 return NULL; 862 return NULL;
863 863
864 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); 864 rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
865 if (!rfkill) 865 if (!rfkill)
866 return NULL; 866 return NULL;
867 867
868 spin_lock_init(&rfkill->lock); 868 spin_lock_init(&rfkill->lock);
869 INIT_LIST_HEAD(&rfkill->node); 869 INIT_LIST_HEAD(&rfkill->node);
870 rfkill->type = type; 870 rfkill->type = type;
871 rfkill->name = name; 871 strcpy(rfkill->name, name);
872 rfkill->ops = ops; 872 rfkill->ops = ops;
873 rfkill->data = ops_data; 873 rfkill->data = ops_data;
874 874
@@ -1078,17 +1078,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
1078 return res; 1078 return res;
1079} 1079}
1080 1080
1081static bool rfkill_readable(struct rfkill_data *data)
1082{
1083 bool r;
1084
1085 mutex_lock(&data->mtx);
1086 r = !list_empty(&data->events);
1087 mutex_unlock(&data->mtx);
1088
1089 return r;
1090}
1091
1092static ssize_t rfkill_fop_read(struct file *file, char __user *buf, 1081static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1093 size_t count, loff_t *pos) 1082 size_t count, loff_t *pos)
1094{ 1083{
@@ -1105,8 +1094,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1105 goto out; 1094 goto out;
1106 } 1095 }
1107 mutex_unlock(&data->mtx); 1096 mutex_unlock(&data->mtx);
1097 /* since we re-check and it just compares pointers,
1098 * using !list_empty() without locking isn't a problem
1099 */
1108 ret = wait_event_interruptible(data->read_wait, 1100 ret = wait_event_interruptible(data->read_wait,
1109 rfkill_readable(data)); 1101 !list_empty(&data->events));
1110 mutex_lock(&data->mtx); 1102 mutex_lock(&data->mtx);
1111 1103
1112 if (ret) 1104 if (ret)
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 0adc66caae2..07edbcd8697 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1230,7 +1230,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
1230 if (bp[0] == '\\' && bp[1] == 'x') { 1230 if (bp[0] == '\\' && bp[1] == 'x') {
1231 /* HEX STRING */ 1231 /* HEX STRING */
1232 bp += 2; 1232 bp += 2;
1233 while (len < bufsize) { 1233 while (len < bufsize - 1) {
1234 int h, l; 1234 int h, l;
1235 1235
1236 h = hex_to_bin(bp[0]); 1236 h = hex_to_bin(bp[0]);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0cd18c24025..ab2eeb1cb32 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1469,7 +1469,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1469 UNIXCB(skb).fp = NULL; 1469 UNIXCB(skb).fp = NULL;
1470 1470
1471 for (i = scm->fp->count-1; i >= 0; i--) 1471 for (i = scm->fp->count-1; i >= 0; i--)
1472 unix_notinflight(scm->fp->fp[i]); 1472 unix_notinflight(scm->fp->user, scm->fp->fp[i]);
1473} 1473}
1474 1474
1475static void unix_destruct_scm(struct sk_buff *skb) 1475static void unix_destruct_scm(struct sk_buff *skb)
@@ -1534,7 +1534,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1534 return -ENOMEM; 1534 return -ENOMEM;
1535 1535
1536 for (i = scm->fp->count - 1; i >= 0; i--) 1536 for (i = scm->fp->count - 1; i >= 0; i--)
1537 unix_inflight(scm->fp->fp[i]); 1537 unix_inflight(scm->fp->user, scm->fp->fp[i]);
1538 return max_level; 1538 return max_level;
1539} 1539}
1540 1540
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 06730fe6ad9..a72182d6750 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -122,7 +122,7 @@ struct sock *unix_get_socket(struct file *filp)
122 * descriptor if it is for an AF_UNIX socket. 122 * descriptor if it is for an AF_UNIX socket.
123 */ 123 */
124 124
125void unix_inflight(struct file *fp) 125void unix_inflight(struct user_struct *user, struct file *fp)
126{ 126{
127 struct sock *s = unix_get_socket(fp); 127 struct sock *s = unix_get_socket(fp);
128 128
@@ -139,11 +139,11 @@ void unix_inflight(struct file *fp)
139 } 139 }
140 unix_tot_inflight++; 140 unix_tot_inflight++;
141 } 141 }
142 fp->f_cred->user->unix_inflight++; 142 user->unix_inflight++;
143 spin_unlock(&unix_gc_lock); 143 spin_unlock(&unix_gc_lock);
144} 144}
145 145
146void unix_notinflight(struct file *fp) 146void unix_notinflight(struct user_struct *user, struct file *fp)
147{ 147{
148 struct sock *s = unix_get_socket(fp); 148 struct sock *s = unix_get_socket(fp);
149 149
@@ -157,7 +157,7 @@ void unix_notinflight(struct file *fp)
157 list_del_init(&u->link); 157 list_del_init(&u->link);
158 unix_tot_inflight--; 158 unix_tot_inflight--;
159 } 159 }
160 fp->f_cred->user->unix_inflight--; 160 user->unix_inflight--;
161 spin_unlock(&unix_gc_lock); 161 spin_unlock(&unix_gc_lock);
162} 162}
163 163
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 9c22317778e..ee625e3a56b 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -189,6 +189,20 @@ static void *mmap_file(char const *fname)
189 addr = umalloc(sb.st_size); 189 addr = umalloc(sb.st_size);
190 uread(fd_map, addr, sb.st_size); 190 uread(fd_map, addr, sb.st_size);
191 } 191 }
192 if (sb.st_nlink != 1) {
193 /* file is hard-linked, break the hard link */
194 close(fd_map);
195 if (unlink(fname) < 0) {
196 perror(fname);
197 fail_file();
198 }
199 fd_map = open(fname, O_RDWR | O_CREAT, sb.st_mode);
200 if (fd_map < 0) {
201 perror(fname);
202 fail_file();
203 }
204 uwrite(fd_map, addr, sb.st_size);
205 }
192 return addr; 206 return addr;
193} 207}
194 208
diff --git a/tools/Makefile b/tools/Makefile
index feec3ad5fd0..6e8ac898214 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -24,6 +24,10 @@ help:
24 @echo ' from the kernel command line to build and install one of' 24 @echo ' from the kernel command line to build and install one of'
25 @echo ' the tools above' 25 @echo ' the tools above'
26 @echo '' 26 @echo ''
27 @echo ' $$ make tools/all'
28 @echo ''
29 @echo ' builds all tools.'
30 @echo ''
27 @echo ' $$ make tools/install' 31 @echo ' $$ make tools/install'
28 @echo '' 32 @echo ''
29 @echo ' installs all tools.' 33 @echo ' installs all tools.'
@@ -58,6 +62,11 @@ turbostat x86_energy_perf_policy: FORCE
58tmon: FORCE 62tmon: FORCE
59 $(call descend,thermal/$@) 63 $(call descend,thermal/$@)
60 64
65all: acpi cgroup cpupower firewire lguest \
66 perf selftests turbostat usb \
67 virtio vm net x86_energy_perf_policy \
68 tmon
69
61acpi_install: 70acpi_install:
62 $(call descend,power/$(@:_install=),install) 71 $(call descend,power/$(@:_install=),install)
63 72
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index f2c80d5451c..91909502952 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -152,7 +152,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
152 * do alloc nowait since if we are going to sleep anyway we 152 * do alloc nowait since if we are going to sleep anyway we
153 * may as well sleep faulting in page 153 * may as well sleep faulting in page
154 */ 154 */
155 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); 155 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
156 if (!work) 156 if (!work)
157 return 0; 157 return 0;
158 158