summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSam Nelson2016-04-01 10:11:27 -0500
committerSam Nelson2016-04-01 12:19:20 -0500
commit4404e26b6185d58049c22cb4cbc836e587cb3909 (patch)
tree53494e755d9e3f25a875abe517c30c59172778e8
parent489b669540c5545125ab5d9f3f2cdb5ec5fc09dd (diff)
downloadmpm-transport-4404e26b6185d58049c22cb4cbc836e587cb3909.tar.gz
mpm-transport-4404e26b6185d58049c22cb4cbc836e587cb3909.tar.xz
mpm-transport-4404e26b6185d58049c22cb4cbc836e587cb3909.zip
shared_mem: Fix issues with internal mmap for read and write
Current code requires mapping the whole region to be mapped instead of requested region. The partial mapping was causing undesirable results. Also added other minor cleanup Signed-off-by: Sam Nelson <sam.nelson@ti.com>
-rwxr-xr-xsrc/transport/sharedmem/mpm_transport_sharedmem.c34
1 files changed, 21 insertions, 13 deletions
diff --git a/src/transport/sharedmem/mpm_transport_sharedmem.c b/src/transport/sharedmem/mpm_transport_sharedmem.c
index 076cc2d..63d5fb8 100755
--- a/src/transport/sharedmem/mpm_transport_sharedmem.c
+++ b/src/transport/sharedmem/mpm_transport_sharedmem.c
@@ -251,13 +251,11 @@ void *mpm_transport_sharedmem_mmap(mpm_transport_cfg_t *sp, uint32_t addr, uint3
251 pg_offset = offset & (~((page_size<< UIO_MODULE_DRV_MAP_OFFSET_SHIFT) - 1)); 251 pg_offset = offset & (~((page_size<< UIO_MODULE_DRV_MAP_OFFSET_SHIFT) - 1));
252 252
253 mmap_length = length + offset - pg_offset + base_correction; 253 mmap_length = length + offset - pg_offset + base_correction;
254// mmap_length = (min_len & ~(page_size - 1));
255// mmap_length = (mmap_length != min_len) ? mmap_length + page_size : mmap_length;
256 254
257 td->mmap_user[user_index].addr = mmap(NULL, mmap_length, 255 td->mmap_user[user_index].addr = mmap(NULL, mmap_length,
258 mcfg->mmap_prot, mcfg->mmap_flags, 256 mcfg->mmap_prot, mcfg->mmap_flags,
259 td->fd_mem_block.fd[td->fd_index[index]], 257 td->fd_mem_block.fd[td->fd_index[index]],
260 ((pg_offset) + (td->map_index[index] * getpagesize()))); 258 ((pg_offset) + (td->map_index[index] * page_size)));
261 if (td->mmap_user[user_index].addr == MAP_FAILED) { 259 if (td->mmap_user[user_index].addr == MAP_FAILED) {
262 mpm_printf(1, "can't mmap for the address 0x%x with length" 260 mpm_printf(1, "can't mmap for the address 0x%x with length"
263 " 0x%x (err: %s)\n", 261 " 0x%x (err: %s)\n",
@@ -303,8 +301,9 @@ int mpm_transport_sharedmem_munmap(mpm_transport_cfg_t *sp, void *va, uint32_t l
303static void *mpm_transport_sharedmem_rw_map (mpm_transport_cfg_t *sp, uint32_t addr, uint32_t length, int *index) 301static void *mpm_transport_sharedmem_rw_map (mpm_transport_cfg_t *sp, uint32_t addr, uint32_t length, int *index)
304{ 302{
305 uint32_t offset, base_address, base_correction; 303 uint32_t offset, base_address, base_correction;
306 int pg_offset, mmap_length, min_len; 304 int mmap_length, min_len;
307 int page_size = getpagesize(); 305 int page_size = getpagesize();
306 int ret_val;
308 mpm_transport_sharedmem_t *td = (mpm_transport_sharedmem_t *) sp->td; 307 mpm_transport_sharedmem_t *td = (mpm_transport_sharedmem_t *) sp->td;
309 308
310 if(mpm_transport_get_mem_details(sp, addr, length, &base_address, 309 if(mpm_transport_get_mem_details(sp, addr, length, &base_address,
@@ -314,26 +313,30 @@ static void *mpm_transport_sharedmem_rw_map (mpm_transport_cfg_t *sp, uint32_t a
314 } 313 }
315 314
316 base_correction = (base_address & (page_size - 1)); 315 base_correction = (base_address & (page_size - 1));
317 pg_offset = offset & (~((page_size<< UIO_MODULE_DRV_MAP_OFFSET_SHIFT) - 1)); 316 mmap_length = sp->mmap[*index].length + base_correction;
318 mmap_length = length + offset - pg_offset + base_correction;
319 317
320 mpm_printf(1, "Debug: Base Address %x, base_correction %x pg_offset %x, mmap_length %x \n", 318 mpm_printf(1, "Debug: Base Address 0x%x, index %d, offset 0x%x, base_correction 0x%x, mmap_length 0x%x \n",
321 base_address, base_correction, pg_offset, mmap_length); 319 base_address, *index, offset, base_correction, mmap_length);
322 if (!td->mmap_rw[*index].addr) { 320 if (!td->mmap_rw[*index].addr) {
323 td->mmap_rw[*index].addr = mmap(NULL, mmap_length, (PROT_READ|PROT_WRITE), 321 td->mmap_rw[*index].addr = mmap(NULL, mmap_length, (PROT_READ|PROT_WRITE),
324 MAP_SHARED, td->fd_mem_block.fd[td->fd_index[*index]], 322 MAP_SHARED, td->fd_mem_block.fd[td->fd_index[*index]],
325 ((pg_offset) + (td->map_index[*index] * getpagesize()))); 323 (td->map_index[*index] * page_size));
326 if (td->mmap_rw[*index].addr == MAP_FAILED) { 324 if (td->mmap_rw[*index].addr == MAP_FAILED) {
325 td->mmap_rw[*index].addr = 0;
327 mpm_printf(1, "can't mmap for the address 0x%x with length 0x%x (err: %s)\n", 326 mpm_printf(1, "can't mmap for the address 0x%x with length 0x%x (err: %s)\n",
328 addr, length, strerror(errno)); 327 addr, length, strerror(errno));
329 return 0; 328 return 0;
330 } 329 }
331 td->mmap_rw[*index].addr_usr = td->mmap_rw[*index].addr
332 + (offset - pg_offset) + base_correction;
333 330
334 td->mmap_rw[*index].size = sp->mmap[*index].length; 331 td->mmap_rw[*index].size = sp->mmap[*index].length;
335 pthread_mutex_init(&td->mutex_rw[*index], NULL); 332 ret_val = pthread_mutex_init(&td->mutex_rw[*index], NULL);
333 if (ret_val != 0) {
334 mpm_printf(1, " Unable to init mutex");
335 return 0;
336 }
336 } 337 }
338 td->mmap_rw[*index].addr_usr = td->mmap_rw[*index].addr
339 + offset + base_correction;
337 340
338 return (td->mmap_rw[*index].addr_usr); 341 return (td->mmap_rw[*index].addr_usr);
339} 342}
@@ -385,17 +388,22 @@ void mpm_transport_sharedmem_close(mpm_transport_cfg_t *sp)
385 for (i = 0; i < MPM_MAX_USER_MMAPS; i++) { 388 for (i = 0; i < MPM_MAX_USER_MMAPS; i++) {
386 if(td->mmap_user[i].addr) { 389 if(td->mmap_user[i].addr) {
387 munmap(td->mmap_user[i].addr, td->mmap_user[i].size); 390 munmap(td->mmap_user[i].addr, td->mmap_user[i].size);
391 td->mmap_user[i].addr = 0;
388 } 392 }
389 } 393 }
390 for (i = 0; i < MPM_MAX_MMAPS; i++) { 394 for (i = 0; i < MPM_MAX_MMAPS; i++) {
391 if(td->mmap_rw[i].addr) { 395 if(td->mmap_rw[i].addr) {
392 munmap(td->mmap_rw[i].addr, td->mmap_rw[i].size); 396 munmap(td->mmap_rw[i].addr, td->mmap_rw[i].size);
397 pthread_mutex_destroy(&td->mutex_rw[i]);
398 td->mmap_rw[i].addr = 0;
393 } 399 }
394 pthread_mutex_destroy(&td->mutex_rw[i]);
395 } 400 }
396 for (i = 0; i < td->fd_mem_block.num_fds; i++) { 401 for (i = 0; i < td->fd_mem_block.num_fds; i++) {
402 if (td->fd_mem_block.fd[i]) {
397 fsync(td->fd_mem_block.fd[i]); 403 fsync(td->fd_mem_block.fd[i]);
398 close(td->fd_mem_block.fd[i]); 404 close(td->fd_mem_block.fd[i]);
405 td->fd_mem_block.fd[i] = 0;
406 }
399 } 407 }
400 408
401 if (td) free(td); 409 if (td) free(td);