summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 270c10e)
raw | patch | inline | side by side (parent: 270c10e)
author | David S. Miller <davem@davemloft.net> | |
Sun, 28 Oct 2012 06:00:41 +0000 (23:00 -0700) | ||
committer | David S. Miller <davem@davemloft.net> | |
Sun, 28 Oct 2012 06:00:41 +0000 (23:00 -0700) |
In atomic backoff and cpu_relax(), use the pause instruction
found on SPARC-T4 and later.
It makes the cpu strand unselectable for the given number of
cycles, unless an intervening disrupting trap occurs.
Signed-off-by: David S. Miller <davem@davemloft.net>
found on SPARC-T4 and later.
It makes the cpu strand unselectable for the given number of
cycles, unless an intervening disrupting trap occurs.
Signed-off-by: David S. Miller <davem@davemloft.net>
index 64b077b3b13b31e2a36c65c714e29ae72c08acad..20f01df0871b18eac5e7007ca2db3605847e3d18 100644 (file)
#define BACKOFF_LABEL(spin_label, continue_label) \
spin_label
-#define BACKOFF_SPIN(reg, tmp, label) \
- mov reg, tmp; \
-88: rd %ccr, %g0; \
- rd %ccr, %g0; \
- rd %ccr, %g0; \
- brnz,pt tmp, 88b; \
- sub tmp, 1, tmp; \
- set BACKOFF_LIMIT, tmp; \
- cmp reg, tmp; \
- bg,pn %xcc, label; \
- nop; \
- ba,pt %xcc, label; \
- sllx reg, 1, reg;
+#define BACKOFF_SPIN(reg, tmp, label) \
+ mov reg, tmp; \
+88: rd %ccr, %g0; \
+ rd %ccr, %g0; \
+ rd %ccr, %g0; \
+ .section .pause_patch,"ax"; \
+ .word 88b; \
+ sllx tmp, 7, tmp; \
+ wr tmp, 0, %asr27; \
+ clr tmp; \
+ .previous; \
+ brnz,pt tmp, 88b; \
+ sub tmp, 1, tmp; \
+ set BACKOFF_LIMIT, tmp; \
+ cmp reg, tmp; \
+ bg,pn %xcc, label; \
+ nop; \
+ ba,pt %xcc, label; \
+ sllx reg, 1, reg;
#else
index 986563409469bcabd532942808466c84c39e6ddd..9cdf52eec48ab222b551f5f9c94665c21fb0322d 100644 (file)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
-#define cpu_relax() asm volatile("rd %%ccr, %%g0\n\t" \
- "rd %%ccr, %%g0\n\t" \
- "rd %%ccr, %%g0" \
+#define cpu_relax() asm volatile("\n99:\n\t" \
+ "rd %%ccr, %%g0\n\t" \
+ "rd %%ccr, %%g0\n\t" \
+ "rd %%ccr, %%g0\n\t" \
+ ".section .pause_patch,\"ax\"\n\t"\
+ ".word 99b\n\t" \
+ "wr %%g0, 128, %%asr27\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".previous" \
::: "memory")
/* Prefetch support. This is tuned for UltraSPARC-III and later.
index 0c218e4c0881fba70c1748e6a4c295e0db280652..51742df63c757ee3dd017cbae8a42c1100e8f577 100644 (file)
extern struct popc_6insn_patch_entry __popc_6insn_patch,
__popc_6insn_patch_end;
+struct pause_patch_entry {
+ unsigned int addr;
+ unsigned int insns[3];
+};
+extern struct pause_patch_entry __pause_patch,
+ __pause_patch_end;
+
extern void __init per_cpu_patch(void);
extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
struct sun4v_1insn_patch_entry *);
index 0800e71d8a880242083688b385e88d564e4a681a..b45cff408de307e5ef123393f2fdca9ce9e9a8a9 100644 (file)
}
}
+static void __init pause_patch(void)
+{
+ struct pause_patch_entry *p;
+
+ p = &__pause_patch;
+ while (p < &__pause_patch_end) {
+ unsigned long i, addr = p->addr;
+
+ for (i = 0; i < 3; i++) {
+ *(unsigned int *) (addr + (i * 4)) = p->insns[i];
+ wmb();
+ __asm__ __volatile__("flush %0"
+ : : "r" (addr + (i * 4)));
+ }
+
+ p++;
+ }
+}
+
#ifdef CONFIG_SMP
void __init boot_cpu_id_too_large(int cpu)
{
if (sparc64_elf_hwcap & AV_SPARC_POPC)
popc_patch();
+ if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
+ pause_patch();
}
void __init setup_arch(char **cmdline_p)
index 89c2c29f154b4c45114df0dce93feee4ce8330af..847f9f793618f097eec67a558d4adbb713e46a47 100644 (file)
*(.popc_6insn_patch)
__popc_6insn_patch_end = .;
}
+ .pause_patch : {
+ __pause_patch = .;
+ *(.pause_patch)
+ __pause_patch_end = .;
+ }
PERCPU_SECTION(SMP_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);