[glsdk/meta-ti-glsdk.git] / recipes-kernel / linux / linux-omap / linus / 0004-ARM-fix-cache-xsc3l2-after-stack-based-kmap_atomic.patch
1 From fc077c0fbb09ca255691d05789076d121ae11789 Mon Sep 17 00:00:00 2001
2 From: Nicolas Pitre <nicolas.pitre@linaro.org>
3 Date: Wed, 15 Dec 2010 23:29:04 -0500
4 Subject: [PATCH 04/65] ARM: fix cache-xsc3l2 after stack based kmap_atomic()
6 Since commit 3e4d3af501 "mm: stack based kmap_atomic()", it is actively
7 wrong to rely on fixed kmap type indices (namely KM_L2_CACHE) as
8 kmap_atomic() totally ignores them and a concurrent instance of it may
9 happily reuse any slot for any purpose. Because kmap_atomic() is now
10 able to deal with reentrancy, we can get rid of the ad hoc mapping here,
11 and we even don't have to disable IRQs anymore (highmem case).
13 While the code is made much simpler, there is a needless cache flush
14 introduced by the usage of __kunmap_atomic(). It is not clear if the
15 performance difference to remove that is worth the cost in code
16 maintenance (I don't think there are that many highmem users on that
17 platform if at all anyway).
19 Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
20 ---
21 arch/arm/mm/cache-xsc3l2.c | 57 ++++++++++++++++---------------------------
22 1 files changed, 21 insertions(+), 36 deletions(-)
24 diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
25 index c315492..5a32020 100644
26 --- a/arch/arm/mm/cache-xsc3l2.c
27 +++ b/arch/arm/mm/cache-xsc3l2.c
28 @@ -17,14 +17,10 @@
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 */
31 #include <linux/init.h>
32 +#include <linux/highmem.h>
33 #include <asm/system.h>
34 #include <asm/cputype.h>
35 #include <asm/cacheflush.h>
36 -#include <asm/kmap_types.h>
37 -#include <asm/fixmap.h>
38 -#include <asm/pgtable.h>
39 -#include <asm/tlbflush.h>
40 -#include "mm.h"
42 #define CR_L2 (1 << 26)
44 @@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void)
45 dsb();
46 }
48 +static inline void l2_unmap_va(unsigned long va)
49 +{
50 #ifdef CONFIG_HIGHMEM
51 -#define l2_map_save_flags(x) raw_local_save_flags(x)
52 -#define l2_map_restore_flags(x) raw_local_irq_restore(x)
53 -#else
54 -#define l2_map_save_flags(x) ((x) = 0)
55 -#define l2_map_restore_flags(x) ((void)(x))
56 + if (va != -1)
57 + kunmap_atomic((void *)va);
58 #endif
59 +}
61 -static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
62 - unsigned long flags)
63 +static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
64 {
65 #ifdef CONFIG_HIGHMEM
66 unsigned long va = prev_va & PAGE_MASK;
67 @@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
68 /*
69 * Switching to a new page. Because cache ops are
70 * using virtual addresses only, we must put a mapping
71 - * in place for it. We also enable interrupts for a
72 - * short while and disable them again to protect this
73 - * mapping.
74 + * in place for it.
75 */
76 - unsigned long idx;
77 - raw_local_irq_restore(flags);
78 - idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
79 - va = __fix_to_virt(FIX_KMAP_BEGIN + idx);
80 - raw_local_irq_restore(flags | PSR_I_BIT);
81 - set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0);
82 - local_flush_tlb_kernel_page(va);
83 + l2_unmap_va(prev_va);
84 + va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
85 }
86 return va + (pa_offset >> (32 - PAGE_SHIFT));
87 #else
88 @@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
90 static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
91 {
92 - unsigned long vaddr, flags;
93 + unsigned long vaddr;
95 if (start == 0 && end == -1ul) {
96 xsc3_l2_inv_all();
97 @@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
98 }
100 vaddr = -1; /* to force the first mapping */
101 - l2_map_save_flags(flags);
103 /*
104 * Clean and invalidate partial first cache line.
105 */
106 if (start & (CACHE_LINE_SIZE - 1)) {
107 - vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags);
108 + vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
109 xsc3_l2_clean_mva(vaddr);
110 xsc3_l2_inv_mva(vaddr);
111 start = (start | (CACHE_LINE_SIZE - 1)) + 1;
112 @@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
113 * Invalidate all full cache lines between 'start' and 'end'.
114 */
115 while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
116 - vaddr = l2_map_va(start, vaddr, flags);
117 + vaddr = l2_map_va(start, vaddr);
118 xsc3_l2_inv_mva(vaddr);
119 start += CACHE_LINE_SIZE;
120 }
121 @@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
122 * Clean and invalidate partial last cache line.
123 */
124 if (start < end) {
125 - vaddr = l2_map_va(start, vaddr, flags);
126 + vaddr = l2_map_va(start, vaddr);
127 xsc3_l2_clean_mva(vaddr);
128 xsc3_l2_inv_mva(vaddr);
129 }
131 - l2_map_restore_flags(flags);
132 + l2_unmap_va(vaddr);
134 dsb();
135 }
137 static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
138 {
139 - unsigned long vaddr, flags;
140 + unsigned long vaddr;
142 vaddr = -1; /* to force the first mapping */
143 - l2_map_save_flags(flags);
145 start &= ~(CACHE_LINE_SIZE - 1);
146 while (start < end) {
147 - vaddr = l2_map_va(start, vaddr, flags);
148 + vaddr = l2_map_va(start, vaddr);
149 xsc3_l2_clean_mva(vaddr);
150 start += CACHE_LINE_SIZE;
151 }
153 - l2_map_restore_flags(flags);
154 + l2_unmap_va(vaddr);
156 dsb();
157 }
158 @@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void)
160 static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
161 {
162 - unsigned long vaddr, flags;
163 + unsigned long vaddr;
165 if (start == 0 && end == -1ul) {
166 xsc3_l2_flush_all();
167 @@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
168 }
170 vaddr = -1; /* to force the first mapping */
171 - l2_map_save_flags(flags);
173 start &= ~(CACHE_LINE_SIZE - 1);
174 while (start < end) {
175 - vaddr = l2_map_va(start, vaddr, flags);
176 + vaddr = l2_map_va(start, vaddr);
177 xsc3_l2_clean_mva(vaddr);
178 xsc3_l2_inv_mva(vaddr);
179 start += CACHE_LINE_SIZE;
180 }
182 - l2_map_restore_flags(flags);
183 + l2_unmap_va(vaddr);
185 dsb();
186 }
187 --
188 1.6.6.1