1 /******************************************************************************
2 * FILE PURPOSE: Main Routines for HPLIB Virtual Memory Allocator
3 ******************************************************************************
4 * FILE NAME: hplib_vm.c
5 *
6 * DESCRIPTION: The virtual memory APIs provide the following functionality:
7 * Maps peripheral registers into user space for LLDs.
8 * QMSS
9 * CPPI
10 * SRIO
11 .* PASS
12 * Timer64 (Appleton)
13 * Memory allocation routines.
14 * Routines to perform address translations (physical to virtual, virtual to physical)
16 *
17 * REVISION HISTORY:
18 *
19 * Copyright (c) Texas Instruments Incorporated 2010-2012
20 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
23 * are met:
24 *
25 * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 *
28 * Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the
31 * distribution.
32 *
33 * Neither the name of Texas Instruments Incorporated nor the names of
34 * its contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
51 #include <stdint.h>
52 #include <stdio.h>
53 #include <string.h>
54 #include <sys/types.h>
55 #include <sys/stat.h>
56 #include <fcntl.h>
57 #include <sys/mman.h>
58 #include <errno.h>
59 #include <unistd.h>
60 #include <ti/drv/nwal/nwal.h>
61 #include <sys/ioctl.h>
62 #include "hplibmod.h"
63 #include "hplib.h"
64 #include "ti/csl/cslr_device.h"
66 /***********************RAW MEMORY ALLOCATION & TRANSLATION*************************/
67 /* Macro to align x to y */
68 #define align(x,y) ((x + y) & (~y))
71 hplib_virtualMemPoolAddr_T memPoolAddr[HPLIB_MAX_MEM_POOLS];
72 uint8_t *hplib_VM_mem_start_phy = (uint8_t*)0;
73 uint8_t *hplib_VM_mem_start = (uint8_t*)0;
74 uint8_t *hplib_VM_mem_end = (uint8_t*)0;
75 uint8_t *hplib_VM_mem_end_phy = (uint8_t*)0;
76 static uint8_t *hplib_VM_mem_alloc_ptr = (uint8_t*)0;
77 static uint32_t hplib_VM_mem_size = 0;
78 uint32_t hplib_VM_virt_to_phy_mapping;
79 uint32_t hplib_VM_phy_to_virt_mapping;
81 /* File descriptor for /dev/mem */
82 static int dev_mem_fd;
84 extern int hplib_mod_fd;
86 #define HPLIB_USE_MODULE_MMAP //we will mmap through hplib kernel module, not /dev/mem
87 #ifndef HPLIB_USE_MODULE_MMAP
88 static int temp_fd;
89 #endif
91 /**************************************************************************
92 * FUNCTION PURPOSE: Maps the give physical address to virtual memory space
93 **************************************************************************/
94 void *hplib_VM_MemMap
95 (
96 void *addr, /* Physical address */
97 uint32_t size /* Size of block */
98 )
99 {
100 void *map_base,*virt_addr,*tmpAddr;
101 uint32_t page_sz;
102 long retval;
103 uint32_t mask = (size-1);
104 uint32_t offset;
106 retval = sysconf(_SC_PAGE_SIZE);
107 if (retval == -1)
108 {
109 hplib_Log("hplib_VM_MemMap: Failed to get page size err=%s\n",
110 strerror(errno));
111 return (void *)0;
112 }
114 page_sz = (uint32_t)retval;
116 if (size%page_sz)
117 {
118 hplib_Log("hplib_VM_MemMap: error: block size not aligned to page size\n");
119 return (void *)0;
120 }
122 if ((uint32_t)addr%page_sz)
123 {
124 hplib_Log("hplib_VM_MemMap: error: addr not aligned to page size\n");
125 return (void *)0;
126 }
128 map_base = mmap(0, size, (PROT_READ|PROT_WRITE), MAP_SHARED, dev_mem_fd, (off_t)addr);
129 if(map_base == (void *) -1)
130 {
131 hplib_Log("hplib_VM_MemMap: Failed to mmap \"dev/mem\" err=%s\n",
132 strerror(errno));
133 return (void *)0;
134 }
135 virt_addr = map_base;
137 return(virt_addr);
138 }
141 /**************************************************************************
142 * FUNCTION PURPOSE: Initialize the allocated memory pool area
143 **************************************************************************/
144 HPLIB_BOOL_T hplib_VM_MemAllocInit
145 (
146 uint8_t *addr, /* Physical address */
147 uint32_t size, /* Size of block */
148 uint8_t i /* pool id */
149 )
150 {
151 void *map_base = NULL;
153 /* Set up memory mapping for un-cached memory, this requires physical address and size of memory region to map*/
154 if ((addr != NULL ) && size)
155 {
156 map_base = hplib_VM_MemMap((void *)addr, size);
157 if (!map_base)
158 {
159 hplib_Log("hplib_VM_MemAllocInit(): Failed to memory map for uncached memory, addr (0x%x)", addr);
160 return HPLIB_FALSE;
161 }
162 memPoolAddr[i].memAllocPtr = memPoolAddr[i].memStart = map_base;
163 memPoolAddr[i].memSize = size;
164 memPoolAddr[i].memEnd = memPoolAddr[i].memStart + size;
165 memPoolAddr[i].memStartPhy = addr;
166 memPoolAddr[i].memEndPhy = memPoolAddr[i].memStartPhy + memPoolAddr[i].memSize;
167 hplib_Log("hplib_VM_MemAllocInit(un-cached): Phy Addr %x Memory (%d bytes) mapped at address %p.\n", addr,size, map_base);
168 hplib_Log("hplib_VM_MemAllocInit(un-cached): End_phy 0x%x, start_phy: 0x%x\n", memPoolAddr[i].memEndPhy, memPoolAddr[i].memStartPhy);
169 }
170 if (addr == NULL)
171 {
172 addr= ( uint8_t *) hplib_utilGetPhysOfBufferArea(); //get address that was allocated for us by kernela module */
173 size = hplib_utilGetSizeOfBufferArea(); //get the size that was allocated
174 #ifdef HPLIB_USE_MODULE_MMAP
175 map_base = (void *) hplib_utilGetVaOfBufferArea(HPLIBMOD_MMAP_DMA_MEM_OFFSET, size); //mmap into our space, return va
176 #else
177 if( (temp_fd = open("/dev/mem", O_RDWR )) == -1)
178 {
179 hplib_Log("hplib_VM_MemAllocInit(): failed to open dev/mem again cached err=%d\n",errno);
180 return HPLIB_FALSE;
181 }
183 map_base = mmap(0,size , PROT_READ | PROT_WRITE, MAP_SHARED, temp_fd, addr);
184 if(map_base == (void *) -1)
185 {
186 hplib_Log("hplib_VM_MemAllocInit: failed to mmap CMA area at phy %x err=%d\n",
187 addr, errno);
188 return HPLIB_FALSE;
189 }
190 #endif
191 memPoolAddr[i].memAllocPtr =memPoolAddr[i].memStart = map_base;
192 memPoolAddr[i].memSize =size;
193 memPoolAddr[i].memEnd = memPoolAddr[i].memStart + size;
194 memPoolAddr[i].memStartPhy = addr;
195 memPoolAddr[i].memEndPhy = memPoolAddr[i].memStartPhy + memPoolAddr[i].memSize;
197 hplib_Log("hplib_VM_MemAllocInit(cached): Phy Addr %x Memory (%d bytes) mapped at address %p.\n", addr,size, map_base);
198 hplib_Log("hplib_VM_MemAllocInit(cached): End_phy 0x%x, start_phy: 0x%x\n", memPoolAddr[i].memEndPhy, memPoolAddr[i].memStartPhy);
199 }
200 if (i== 0)
201 {
202 hplib_VM_mem_alloc_ptr = hplib_VM_mem_start = map_base;
203 hplib_VM_mem_size = size;
204 hplib_VM_mem_end = hplib_VM_mem_start + hplib_VM_mem_size;
205 hplib_VM_mem_start_phy = addr;
206 hplib_VM_mem_end_phy = hplib_VM_mem_start_phy + hplib_VM_mem_size;
207 hplib_VM_virt_to_phy_mapping = (uint32_t) hplib_VM_mem_start_phy - (uint32_t)hplib_VM_mem_start;
208 hplib_VM_phy_to_virt_mapping = (uint32_t)hplib_VM_mem_start - (uint32_t)hplib_VM_mem_start_phy;
209 }
210 return HPLIB_TRUE;
211 }
215 /**************************************************************************
216 * FUNCTION PURPOSE: Returns the free amount of buffer/descriptor area for the memory
217 *pool specified.
218 **************************************************************************/
220 int hplib_vmGetMemPoolRemainder(int poolId)
221 {
222 uint32_t key;
224 if (poolId > HPLIB_MAX_MEM_POOLS)
225 return 0;
227 return (int) ((unsigned int) memPoolAddr[poolId].memEnd) - ((unsigned int) memPoolAddr[poolId].memAllocPtr);
228 }
231 /*****************************************************************************
232 * FUNCTION PURPOSE: Allocates memory for Buffer/Descriptors from memory pool specified
233 *****************************************************************************/
234 void* hplib_vmMemAlloc
235 (
236 uint32_t size,
237 uint32_t align,
238 int poolId
239 )
240 {
241 uint32_t key;
242 uint8_t *alloc_ptr;
243 void *p_block =NULL;
245 if (poolId > HPLIB_MAX_MEM_POOLS)
246 return p_block;
248 Osal_hplibCsEnter();
249 alloc_ptr = (uint8_t*)align((uint32_t)memPoolAddr[poolId].memAllocPtr, align);
250 if ((alloc_ptr + size) < memPoolAddr[poolId].memEnd)
251 {
252 p_block =(void *)alloc_ptr;
253 memPoolAddr[poolId].memAllocPtr = alloc_ptr + size;
254 Osal_hplibCsExit(key);
255 memset (p_block, 0, size);
256 return p_block;
257 }
258 Osal_hplibCsExit(key);
259 return p_block;
260 }
263 /********************************************************************
264 *FUNCTION PURPOSE: The function API is used to release/unmap continuous
265 * block of memory allocated via hplib_VM_MemorySetup function,
266 * remove mapping of virtual memory for peripherals.
267 ********************************************************************/
268 void hplib_vmTeardown(void)
269 {
270 hplib_utilModClose();
271 close(dev_mem_fd);
272 #ifndef HPLIB_USE_MODULE_MMAP
273 close(temp_fd);
274 #endif
275 }
278 /********************************************************************
279 * FUNCTION PURPOSE: Allocate continuous block of cached memory via CMA and
280 * optionally un-cached memory if specified, maps virtual memory for peripheral registers.
281 ********************************************************************/
282 hplib_RetValue hplib_vmInit(hplib_virtualAddrInfo_T *hplib_vmaddr, int num_pools, hplib_memPoolAttr_T *mempool_attr)
283 {
284 static int first_time = HPLIB_TRUE;
285 int i;
287 if (first_time == HPLIB_TRUE)
288 first_time = HPLIB_FALSE;
289 else
290 {
291 hplib_Log("hplib_vmInit(): already called, returning\n");
292 return 1;
293 }
295 if (num_pools == 0)
296 {
297 hplib_Log("hplib_vmInit(): Error, invalid number of pools\n");
298 return -1;
299 }
302 /*Open dev/mem, since we need for QM, CPPI, etc */
303 if((dev_mem_fd = open("/dev/mem", (O_RDWR | O_SYNC))) == -1)
304 {
305 hplib_Log("hplib_vmInit: Failed to open \"dev/mem\" err=%s\n",
306 strerror(errno));
307 return HPLIB_FALSE;
308 }
309 /* Open kernel module since we need it for PA to VA mappings */
310 if (hplib_mod_fd == -1)
311 {
312 hplib_mod_fd=hplib_utilModOpen();
314 if (hplib_mod_fd == -1)
315 {
316 hplib_Log("hplib_vmInit:: failed to open /dev/netapi: '%s'\n", strerror(errno));
317 return HPLIB_FALSE;
318 }
319 }
320 for (i=0;i < num_pools; i++)
321 {
322 /* Initialize memory which was allocated from DDR via hplibmod CMA*/
323 if (mempool_attr[i].attr == HPLIB_ATTR_KM_CACHED0) {
324 if (hplib_VM_MemAllocInit( NULL, 0, i) == HPLIB_FALSE)
325 {
326 hplib_Log("hplib_vmInit: hplib_VM_MemAllocInit from DDR/CMA failed\n");
327 return hplib_ERR_NOMEM;
328 }
329 }
330 /* Intialize memory for un-cached memory, make sure size and phys_addr passed in and have valid values */
331 if ((mempool_attr[i].attr == HPLIB_ATTR_UN_CACHED) && mempool_attr[i].size && mempool_attr[i].phys_addr)
332 {
333 if (hplib_VM_MemAllocInit((uint8_t*)mempool_attr[i].phys_addr,
334 mempool_attr[i].size, i) == HPLIB_FALSE)
335 {
336 hplib_Log(" hplib_vmInit: hplib_VM_MemAllocInit from un-cached memory failed\n");
337 return hplib_ERR_NOMEM;
338 }
339 }
340 }
342 /* Create virtual memory maps for peripherals */
343 /* QMSS CFG Regs */
344 hplib_vmaddr->qmssCfgVaddr = hplib_VM_MemMap((void*)QMSS_CFG_BASE_ADDR, QMSS_CFG_BLK_SZ);
345 if (!hplib_vmaddr->qmssCfgVaddr)
346 {
347 hplib_Log("hplib_vmInit: Failed to map QMSS CFG registers\n");
348 return hplib_FAILURE;
349 }
350 hplib_Log("hplib_vmInit: QMSS_CFG_BASE_ADDR:0x%x Memory mapped at address %p.\n",
351 (void*)QMSS_CFG_BASE_ADDR, hplib_vmaddr->qmssCfgVaddr);
353 /*QMSS DATA Regs */
354 #ifdef HPLIB_USE_MODULE_MMAP
355 hplib_vmaddr->qmssDataVaddr = (void *) hplib_utilGetVaOfBufferArea(HPLIBMOD_MMAP_QM_DATA_REG_MEM_OFFSET, QMSS_DATA_BLK_SZ);
356 #else
357 hplib_vmaddr->qmssDataVaddr = hplib_VM_MemMap((void*)QMSS_DATA_BASE_ADDR,
358 QMSS_DATA_BLK_SZ);
359 #endif
361 if (!hplib_vmaddr->qmssDataVaddr)
362 {
363 hplib_Log("hplib_vmInit(): Failed to map QMSS DATA registers\n");
364 return hplib_FAILURE;
365 }
366 hplib_Log("hplib_vmInit(): QMSS_DATA_BASE_ADDR:0x%x Memory mapped at address %p.\n",
367 (void*)QMSS_DATA_BASE_ADDR, hplib_vmaddr->qmssDataVaddr);
369 /* SRIO CFG Regs */
370 hplib_vmaddr->srioCfgVaddr = hplib_VM_MemMap((void*)SRIO_CFG_BASE_ADDR,
371 SRIO_CFG_BLK_SZ);
372 if (!hplib_vmaddr->srioCfgVaddr)
373 {
374 hplib_Log("hplib_vmInit(): Failed to map SRIO CFG registers\n");
375 return hplib_FAILURE;
376 }
377 hplib_Log("hplib_vmInit(): SRIO_CFG_BASE_ADDR:0x%x Memory mapped at address %p.\n",
378 (void*)SRIO_CFG_BASE_ADDR, hplib_vmaddr->srioCfgVaddr);
380 /* PASS CFG Regs */
381 hplib_vmaddr->passCfgVaddr = hplib_VM_MemMap((void*)PASS_CFG_BASE_ADDR,
382 PASS_CFG_BLK_SZ);
383 if (!hplib_vmaddr->passCfgVaddr)
384 {
385 hplib_Log("hplib_vmInit(): Failed to map PASS CFG registers\n");
386 return hplib_FAILURE;
387 }
388 hplib_Log("hplib_vmInit(): PASS_CFG_BASE_ADDR:0x%x Memory mapped at address %p.\n",
389 (void*)PASS_CFG_BASE_ADDR, hplib_vmaddr->passCfgVaddr);
392 #ifndef CORTEX_A15
393 /* (2f) Timer */
394 t64_memmap(dev_mem_fd);
395 #endif
397 return hplib_OK;
399 }