aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm/mmu_context_64.h')
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h46
1 files changed, 13 insertions, 33 deletions
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index b84be675e507..0cdeb2b483a0 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -17,22 +17,19 @@ extern spinlock_t ctx_alloc_lock;
17extern unsigned long tlb_context_cache; 17extern unsigned long tlb_context_cache;
18extern unsigned long mmu_context_bmap[]; 18extern unsigned long mmu_context_bmap[];
19 19
20DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
20void get_new_mmu_context(struct mm_struct *mm); 21void get_new_mmu_context(struct mm_struct *mm);
21#ifdef CONFIG_SMP
22void smp_new_mmu_context_version(void);
23#else
24#define smp_new_mmu_context_version() do { } while (0)
25#endif
26
27int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 22int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
28void destroy_context(struct mm_struct *mm); 23void destroy_context(struct mm_struct *mm);
29 24
30void __tsb_context_switch(unsigned long pgd_pa, 25void __tsb_context_switch(unsigned long pgd_pa,
31 struct tsb_config *tsb_base, 26 struct tsb_config *tsb_base,
32 struct tsb_config *tsb_huge, 27 struct tsb_config *tsb_huge,
33 unsigned long tsb_descr_pa); 28 unsigned long tsb_descr_pa,
29 unsigned long secondary_ctx);
34 30
35static inline void tsb_context_switch(struct mm_struct *mm) 31static inline void tsb_context_switch_ctx(struct mm_struct *mm,
32 unsigned long ctx)
36{ 33{
37 __tsb_context_switch(__pa(mm->pgd), 34 __tsb_context_switch(__pa(mm->pgd),
38 &mm->context.tsb_block[0], 35 &mm->context.tsb_block[0],
@@ -43,9 +40,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
43#else 40#else
44 NULL 41 NULL
45#endif 42#endif
46 , __pa(&mm->context.tsb_descr[0])); 43 , __pa(&mm->context.tsb_descr[0]),
44 ctx);
47} 45}
48 46
47#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
48
49void tsb_grow(struct mm_struct *mm, 49void tsb_grow(struct mm_struct *mm,
50 unsigned long tsb_index, 50 unsigned long tsb_index,
51 unsigned long mm_rss); 51 unsigned long mm_rss);
@@ -74,8 +74,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
74static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 74static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
75{ 75{
76 unsigned long ctx_valid, flags; 76 unsigned long ctx_valid, flags;
77 int cpu; 77 int cpu = smp_processor_id();
78 78
79 per_cpu(per_cpu_secondary_mm, cpu) = mm;
79 if (unlikely(mm == &init_mm)) 80 if (unlikely(mm == &init_mm))
80 return; 81 return;
81 82
@@ -114,14 +115,12 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
114 * cpu0 to update it's TSB because at that point the cpu_vm_mask 115 * cpu0 to update it's TSB because at that point the cpu_vm_mask
115 * only had cpu1 set in it. 116 * only had cpu1 set in it.
116 */ 117 */
117 load_secondary_context(mm); 118 tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
118 tsb_context_switch(mm);
119 119
120 /* Any time a processor runs a context on an address space 120 /* Any time a processor runs a context on an address space
121 * for the first time, we must flush that context out of the 121 * for the first time, we must flush that context out of the
122 * local TLB. 122 * local TLB.
123 */ 123 */
124 cpu = smp_processor_id();
125 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { 124 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
126 cpumask_set_cpu(cpu, mm_cpumask(mm)); 125 cpumask_set_cpu(cpu, mm_cpumask(mm));
127 __flush_tlb_mm(CTX_HWBITS(mm->context), 126 __flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -131,26 +130,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
131} 130}
132 131
133#define deactivate_mm(tsk,mm) do { } while (0) 132#define deactivate_mm(tsk,mm) do { } while (0)
134 133#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
135/* Activate a new MM instance for the current task. */
136static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
137{
138 unsigned long flags;
139 int cpu;
140
141 spin_lock_irqsave(&mm->context.lock, flags);
142 if (!CTX_VALID(mm->context))
143 get_new_mmu_context(mm);
144 cpu = smp_processor_id();
145 if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
146 cpumask_set_cpu(cpu, mm_cpumask(mm));
147
148 load_secondary_context(mm);
149 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
150 tsb_context_switch(mm);
151 spin_unlock_irqrestore(&mm->context.lock, flags);
152}
153
154#endif /* !(__ASSEMBLY__) */ 134#endif /* !(__ASSEMBLY__) */
155 135
156#endif /* !(__SPARC64_MMU_CONTEXT_H) */ 136#endif /* !(__SPARC64_MMU_CONTEXT_H) */