aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/cache-2dmanager/cache-2dmanager.c')
-rw-r--r--drivers/misc/cache-2dmanager/cache-2dmanager.c208
1 files changed, 208 insertions, 0 deletions
diff --git a/drivers/misc/cache-2dmanager/cache-2dmanager.c b/drivers/misc/cache-2dmanager/cache-2dmanager.c
new file mode 100644
index 00000000000..a1f0839f8e7
--- /dev/null
+++ b/drivers/misc/cache-2dmanager/cache-2dmanager.c
@@ -0,0 +1,208 @@
1/*
2 * cache-2dmanager.c
3 *
4 * Copyright (C) 2011-2012 Texas Instruments Corporation.
5 *
6 * This package is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
11 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
12 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
13 */
14
15#include <linux/dma-mapping.h>
16#include <linux/module.h>
17#include <asm/cacheflush.h>
18#include <linux/sched.h>
19#include <linux/cache-2dmanager.h>
20
21static void per_cpu_cache_flush_arm(void *arg)
22{
23 flush_cache_all();
24}
25
26void c2dm_l1cache(int count, /* number of regions */
27 struct c2dmrgn rgns[], /* array of regions */
28 int dir) /* cache operation */
29{
30 unsigned long size = 0;
31 int rgn;
32 for (rgn = 0; rgn < count; rgn++)
33 size += rgns[rgn].span * rgns[rgn].lines;
34
35 /* If the total size of the caller's request exceeds the threshold,
36 * we can perform the operation on the entire cache instead.
37 *
38 * If the caller requests a clean larger than the threshold, we want
39 * to clean all. But this function does not exist in the L1 cache
40 * routines. So we use flush all.
41 *
42 * If the caller requests an invalidate larger than the threshold, we
43 * want to invalidate all. However, if the client does not fill the
44 * cache, an invalidate all will lose data from other processes, which
45 * can be catastrophic. So we must clean the entire cache before we
46 * invalidate it. Flush all cleans and invalidates in one operation.
47 */
48 if (size >= L1THRESHOLD) {
49 switch (dir) {
50 case DMA_TO_DEVICE:
51 /* Use clean all when available */
52 /* Fall through for now */
53 case DMA_BIDIRECTIONAL:
54 /* Can't invalidate all without cleaning, so fall
55 * through to flush all to do both. */
56 case DMA_FROM_DEVICE:
57 on_each_cpu(per_cpu_cache_flush_arm, NULL, 1);
58 break;
59 }
60 } else {
61 int rgn;
62 for (rgn = 0; rgn < count; rgn++) {
63 int line;
64 char *start = rgns[rgn].start;
65 for (line = 0; line < rgns[rgn].lines; line++) {
66 if (dir == DMA_BIDIRECTIONAL)
67 cpu_cache.dma_flush_range(
68 start,
69 start + rgns[rgn].span);
70 else
71 cpu_cache.dma_map_area(
72 start,
73 rgns[rgn].span,
74 dir);
75 start += rgns[rgn].stride;
76 }
77 }
78 }
79}
80EXPORT_SYMBOL(c2dm_l1cache);
81
82static u32 virt2phys(u32 usr)
83{
84 pmd_t *pmd;
85 pte_t *ptep;
86 pgd_t *pgd = pgd_offset(current->mm, usr);
87
88 if (pgd_none(*pgd) || pgd_bad(*pgd))
89 return 0;
90
91 pmd = pmd_offset((pud_t *)pgd, usr);
92 if (pmd_none(*pmd) || pmd_bad(*pmd))
93 return 0;
94
95 ptep = pte_offset_map(pmd, usr);
96 if (ptep && pte_present(*ptep))
97 return (*ptep & PAGE_MASK) | (~PAGE_MASK & usr);
98
99 return 0;
100}
101
102void c2dm_l2cache(int count, /* number of regions */
103 struct c2dmrgn rgns[], /* array of regions */
104 int dir) /* cache operation */
105{
106
107 unsigned long size = 0;
108 int rgn;
109
110 for (rgn = 0; rgn < count; rgn++)
111 size += rgns[rgn].span * rgns[rgn].lines;
112
113 if (size >= L2THRESHOLD) {
114 switch (dir) {
115 case DMA_TO_DEVICE:
116 /* Use clean all when available */
117 /* Fall through for now */
118 case DMA_BIDIRECTIONAL:
119 /* Can't invalidate all without cleaning, so fall
120 * through to flush all to do both. */
121 case DMA_FROM_DEVICE:
122 outer_flush_all();
123 break;
124 }
125 return;
126 }
127
128 for (rgn = 0; rgn < count; rgn++) {
129 int i, j;
130 unsigned long linestart, start;
131 unsigned long page_begin, end, offset,
132 pageremain, lineremain;
133 unsigned long phys, opsize;
134 int page_num;
135
136 /* beginning virtual address of each line */
137 start = (unsigned long)rgns[rgn].start;
138
139 for (i = 0; i < rgns[rgn].lines; i++) {
140
141 linestart = start + (i * rgns[rgn].stride);
142
143 /* beginning of the page for the new line */
144 page_begin = linestart & PAGE_MASK;
145
146 /* end of the new line */
147 end = (unsigned long)linestart +
148 rgns[rgn].span;
149
150 page_num = DIV_ROUND_UP(
151 end-page_begin, PAGE_SIZE);
152
153 /* offset of the new line from page begin */
154 offset = linestart - page_begin;
155
156 /* track how long it is to the end of
157 the current page */
158 pageremain = PAGE_SIZE - offset;
159
160 /* keep track of how much of the line remains
161 to be copied */
162 lineremain = rgns[rgn].span;
163
164 for (j = 0; j < page_num; j++) {
165
166 opsize = (lineremain < pageremain) ?
167 lineremain : pageremain;
168
169 phys = virt2phys(page_begin);
170 if (phys) {
171 phys = phys + offset;
172 switch (dir) {
173 case DMA_TO_DEVICE:
174 outer_clean_range(
175 phys, phys + opsize);
176 break;
177 case DMA_FROM_DEVICE:
178 outer_inv_range(
179 phys, phys + opsize);
180 break;
181 case DMA_BIDIRECTIONAL:
182 outer_flush_range(
183 phys, phys + opsize);
184 break;
185 }
186 }
187
188 lineremain -= opsize;
189 /* Move to next page */
190 page_begin += PAGE_SIZE;
191
192 /* After first page, start address
193 * will be page aligned so offset
194 * is 0 */
195 offset = 0;
196
197 if (!lineremain)
198 break;
199
200 pageremain -= opsize;
201 if (!pageremain)
202 pageremain = PAGE_SIZE;
203
204 }
205 }
206 }
207}
208EXPORT_SYMBOL(c2dm_l2cache);