summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 9be8327)
raw | patch | inline | side by side (parent: 9be8327)
author | David Lide <a0216552@gtudci01.(none)> | |
Mon, 2 Apr 2012 20:08:09 +0000 (16:08 -0400) | ||
committer | David Lide <a0216552@gtudci01.(none)> | |
Mon, 2 Apr 2012 20:08:09 +0000 (16:08 -0400) |
12 files changed:
index d8391e38e96384951969ee4cae9f5266bc01b737..c57a1eda7cad90b4c9f391e634f8aaf39662aa54 100755 (executable)
*/\r
#define NETAPI_ENABLE_SECURITY \r
\r
+\r
+/**\r
+* @def NETAPI_USE_DDR\r
+* (0) define this to enable use of cached DDR for buffers and descriptors \r
+* do not define if USE_MSMC defined below\r
+*/\r
+#define NETAPI_USE_DDR\r
+#ifdef NETAPI_USE_DDR\r
+#define NETAPI_TUNE_USE_CACHE_OPS //for appleton, no cache coherency with netcp & ARM \r
+#endif\r
+/**\r
+* @def NETAPI_USE_MSMC\r
+* (0) define this to enable use of un-cached MSMC for buffers and descriptors \r
+* do not define if USE_DDR defined above \r
+*/\r
+//#define NETAPI_USE_MSMC\r
+\r
+#ifdef NETAPI_USE_MSMC\r
+#ifdef NETAPI_USE_DDR\r
+#error "only define NETAPO_USE_MSMC or NETAPI_USE_DDR"\r
+#endif\r
+#endif\r
+\r
/**\r
* @def TUNE_NETAPI_NUM_CORES\r
* (0) How many cores (theads) \r
#define TUNE_NETAPI_MAX_BURST_RCV 32 //max #ok pkts to recv in one poll\r
\r
//(13) netcp interfaces\r
-#define TUNE_NETAPI_MAX_IP_PER_INTERFACE 2 //2 ip's per interface\r
+#define TUNE_NETAPI_MAX_IP 4 //4 ip's can be attached to interfaces\r
#define TUNE_NETAPI_MAX_INTERFACES 2 //2 interfaces\r
\r
//(14) timers\r
index 1dd5b7d019ddab7d511761dd2c761c754c0a5777..5c6e3bb307f446bcb99a7a4765c0e9e9417a9dfe 100755 (executable)
typedef uint32_t NETCP_CFG_IPSEC_POLICY_T; \r
\r
\r
+//use this in AddIp, AddClassifier to indicate any MAC address\r
+#define NETCP_CFG_NO_INTERFACE 0xff\r
+\r
/* del mac i/f */\r
void netcp_cfgDelMac(NETAPI_T h,int iface_no, int *err);\r
\r
\r
} NETCP_CFG_CLASS_L4_T;\r
\r
-//classifier L4 + IPSEC type (L2, L3 (outer), L3 (inner) implied by iface,policy\r
+//classifier L4 + policy (L2, L3 (outer), tunnel, L3 (inner) implied by policy\r
typedef struct NETCP_CFG_CLASS_L4_IPSEC_Tag\r
{\r
//which mac interface pkt is from, \r
int iface;\r
- NETCP_CFG_IP_T ip;\r
+ NETCP_CFG_IPSEC_POLICY_T ip_policy;\r
\r
//L4 (port)\r
nwal_appProtoType_t proto; //L4 proto (-1 for don't care)\r
index 8ee57a8eb5980e52caf0ed5230c9a2548d0417da..c8661809aae97616217205d21d2d9a6b87960e82 100755 (executable)
if (p->createdHeaps[i]) {netapi_closeHeap(h,p->createdHeaps[i]);p->createdHeaps[i]=NULL;}\r
}\r
netapi_cleanup_at_start(); //clear 1st 50 not-specified queues..\r
+ netapi_VM_memory_teardown();\r
} \r
free(p);\r
return;\r
index 4f0082b84a226215b5b78701bd8c3a65ae7d8a4a..0662057c53abc657a2311807a5d6038fdf67ab4d 100755 (executable)
void * nwal_handle; //handle associated with this interface\r
unsigned char mac[6]; // mac address\r
unsigned int vlan; //future\r
- NETCP_INTERFACE_IP_T ips[TUNE_NETAPI_MAX_IP_PER_INTERFACE];\r
} NETCP_INTERFACE_T;\r
\r
/*to keep track of netcp config transactions */\r
NETCP_REGISTERED_FLOWS_T flows[TUNE_NETAPI_MAX_FLOWS]; //flows\r
NETCP_IPSEC_SA_T tunnel[TUNE_NETAPI_MAX_SA]; //tunnels\r
NETCP_IPSEC_POLICY_T policy[TUNE_NETAPI_MAX_POLICY]; //policies\r
+ NETCP_INTERFACE_IP_T ips[TUNE_NETAPI_MAX_IP]; //ips\r
} NETAPI_NWAL_GLOBAL_CONTEXT_T;\r
\r
/* NWAL Local context (per core/thread) */\r
#define NETAPI_GLOBAL_REGION 0\r
#define NETAPI_LOCAL_REGION 1\r
\r
+int netapi_VM_memory_setup(void);\r
+void netapi_VM_memory_teardown(void);\r
\r
//nwal callbacks\r
void netapi_NWALRxPktCallback (uint32_t appCookie,\r
NetapiNwalTransInfo_t * netapip_GetFreeTransInfo(NETAPI_GLOBAL_T *p_global, nwal_TransID_t *pTransId);\r
void *netcp_cfgp_get_policy( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
int policy_slot);\r
-\r
#endif\r
index 9ec00c088ebbb9cc50eb4df4909c174e26a6edaa..3742ec997d50e6c87b28ef1e7c533fdb49e653d5 100644 (file)
void * mac_handle = netcp_cfgp_get_mac_handle(&netapi_get_global()->nwal_context,iface_no);
*perr =0;
- if ((!n) || (!sa_info) || (!mac_handle)|| (!p_data_mode_handle) ) {*perr = NETAPI_ERR_BAD_INPUT; return -1;}
+ if ((!n) || (!sa_info) || (!p_data_mode_handle) ) {*perr = NETAPI_ERR_BAD_INPUT; return -1;}
//reserve a slot
tunnelId = netcp_cfgp_find_sa_slot(&netapi_get_global()->nwal_context,
index dd4b0461da9ea45a59322d577f0934a36cc4a13b..82452437326ea2f23a8ecb9b5ca63b6c1ed47250 100755 (executable)
}\r
#define netapi_timing_stop netapi_timing_start\r
\r
+//netapi kernel module access routines\r
+int netapi_utilModInit(void);\r
+void netapi_utilModClose(void);\r
+unsigned long netapi_utilGetPhysOfBufferArea(void);\r
+unsigned long netap_utilGetSizeofBufferArea(void);\r
+int netapi_utilCacheWbInv(void *ptr, size_t size);\r
+unsigned long netapi_utilGetVaOfBufferArea(void);\r
+\r
#endif\r
index c979cb22b528b256972e2db5448ccf1cb6ccad91..939e7aba677180d25a0fc92c1009cddc59babfac 100755 (executable)
\r
#include <ti/drv/nwal/nwal.h> \r
#include "netapi_vm.h"\r
-\r
+#include <sys/ioctl.h>\r
+#include "tools/module/netapimod.h"\r
\r
\r
/***********************RAW MEMORY ALLOCATION & TRANSLATION*************************/\r
\r
uint8_t *netapi_VM_mem_start_phy = (uint8_t*)0;\r
uint8_t *netapi_VM_mem_start = (uint8_t*)0;\r
-\r
-static uint8_t *netapi_VM_mem_end = (uint8_t*)0;\r
+uint8_t *netapi_VM_mem_end = (uint8_t*)0;\r
+uint8_t *netapi_VM_mem_end_phy = (uint8_t*)0;\r
static uint8_t *netapi_VM_mem_alloc_ptr = (uint8_t*)0;\r
static uint32_t netapi_VM_mem_size = 0;\r
\r
\r
/* File descriptor for /dev/mem */ \r
static int dev_mem_fd;\r
+static int our_netapi_module_fd;\r
+#ifndef USE_MODULE_MMAP\r
+static int temp_fd;\r
+#endif\r
\r
nwal_Bool_t netapi_VM_memAllocInit\r
(\r
{\r
void *map_base; \r
\r
+ //always open dev/mem, since we need for QM, CPPI, etc\r
if((dev_mem_fd = open("/dev/mem", (O_RDWR | O_SYNC))) == -1)\r
{\r
printf(">netapi_VM_memAllocInit: Failed to open \"dev/mem\" err=%s\n",\r
return nwal_FALSE;\r
}\r
\r
+#ifdef NETAPI_USE_MSMC\r
+ // memory map in addr to addr+size (msmc)\r
map_base = netapi_VM_memMap ((void *)addr, size); \r
\r
if (!map_base)\r
return nwal_FALSE;\r
}\r
\r
- printf(">netapi_VM_memAllocInit: Phy Addr %x Memory (%d bytes) mapped at address %p.\n", addr,size, map_base); \r
+ printf(">netapi_VM_memAllocInit (uncached msmc) Phy Addr %x Memory (%d bytes) mapped at address %p.\n", addr,size, map_base); \r
+#else \r
+ //use cached DDR. This requires NETAPI kernel module\r
+ our_netapi_module_fd=netapi_utilModInit();\r
+\r
+ if (our_netapi_module_fd == -1) {\r
+ printf(">netapi_VM_memAllocInit: failed to open /dev/netapi: '%s'\n", strerror(errno));\r
+ return nwal_FALSE;\r
+ }\r
+ addr= ( uint8_t *) netapi_utilGetPhysOfBufferArea(); //get address that was allocated for us by kernela module */\r
+ size = netapi_utilGetSizeOfBufferArea(); //get the size that was allocated\r
+#ifdef USE_MODULE_MMAP\r
+ map_base = (void *) netapi_utilGetVaOfBufferArea(); //mmap into our space, return va\r
+#else\r
+ if( (temp_fd = open("/dev/mem", O_RDWR )) == -1) {\r
+ printf(">netapi_VM_memAllocInit: failed to open dev/mem again cached err=%d\n",errno);\r
+ return nwal_FALSE; \r
+ }\r
+\r
+ map_base = mmap(0,size , PROT_READ | PROT_WRITE, MAP_SHARED, temp_fd, addr);\r
+ if(map_base == (void *) -1) {\r
+ printf(">netapi_VM_memAllocInit: failed to mmap CMA area at phy %x err=%d\n",\r
+ addr, errno); \r
+ return nwal_FALSE;\r
+ }\r
+#endif\r
+ printf(">netapi_VM_memAllocInit: (cached ddr) Phy Addr %x Memory (%d bytes) mapped at address %p.\n", addr,size, map_base); \r
+#endif\r
\r
netapi_VM_mem_alloc_ptr = netapi_VM_mem_start = map_base;\r
netapi_VM_mem_size = size;\r
netapi_VM_mem_end = netapi_VM_mem_start + netapi_VM_mem_size;\r
netapi_VM_mem_start_phy = addr;\r
+ netapi_VM_mem_end_phy = netapi_VM_mem_start_phy + netapi_VM_mem_size;\r
return nwal_TRUE;\r
}\r
\r
/* finaly SA context area */\r
unsigned char *netapi_VM_SaContextVaddr=NULL;\r
\r
+/************************************************\r
+ * teardown VM memory\r
+ ***********************************************/\r
+void netapi_VM_memory_teardown(void)\r
+{\r
+ netapi_utilModClose();\r
+ close(dev_mem_fd);\r
+#ifndef USE_MODULE_MMAP\r
+ close(temp_fd);\r
+#endif\r
+}\r
/*************************************************\r
* setup VM memory\r
************************************************/\r
int netapi_VM_memory_setup(void)\r
{\r
- /* (1) big chunck of memory out of MSMC -> todo, get from CMA */\r
+/* (1) big chunck of memory out of MSMC or DDR via kernel CMA */\r
+#ifdef NETAPI_USE_DDR\r
+ if (netapi_VM_memAllocInit( NULL, 0) == nwal_FALSE) {\r
+ printf(">netapi ERROR: netapi_V_MmemAllocInit from DDR/CMA failed\n");\r
+ return (-1);\r
+ }\r
+#else //uncached MSMC \r
if (netapi_VM_memAllocInit((uint8_t*)MSMC_SRAM_BASE_ADDR,\r
NETAPI_PERM_MEM_SZ) == nwal_FALSE) {\r
- printf(">netapi ERROR: netapi_V_MmemAllocInit failed\n");\r
+ printf(">netapi ERROR: netapi_V_MmemAllocInit from MSMC failed\n");\r
return (-1);\r
}\r
-\r
+#endif\r
\r
/* (2) Create virtual memory maps for peripherals */\r
/* (2a) QMSS CFG Regs */\r
index 0b63801daa3a1616c5df753bf7d2d1bb0ee883f7..09cc3dec58a3186af5acc499b554bc27fa96e1e4 100755 (executable)
\r
/* Physical address of the [only] memory pool */\r
extern uint8_t *netapi_VM_mem_start_phy;\r
+extern uint8_t *netapi_VM_mem_end_phy;\r
\r
/* virtual address of the [only] memory pool */\r
extern uint8_t *netapi_VM_mem_start;\r
+extern uint8_t *netapi_VM_mem_end;\r
\r
//qm regions: netapi defines two regions, 0,1\r
extern unsigned char *netapi_VM_QMemLocalDescRam;\r
index 1347ee6cf92cd28099a399ca86803dd660a4caf7..e9a7791e4e0e09e4cd8fbb3f0b4f67dced86a892 100755 (executable)
int netcp_cfgp_find_sa_slot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p, int iface)\r
{ \r
int i;\r
+ if (iface != NETCP_CFG_NO_INTERFACE)\r
+ {\r
if ((iface <0 ) || (iface >=TUNE_NETAPI_MAX_INTERFACES)) return -1;\r
-\r
+ }\r
//find a free entry\r
for(i=0;i<TUNE_NETAPI_MAX_SA;i++)\r
{ \r
\r
/*============================IP ADDRESSES==========================*/\r
\r
-//internal: find a free slot for IP rule in interface\r
+//internal: find a free slot for IP rule \r
static int netcp_cfgp_find_ip_slot(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
int iface_no)\r
{\r
int i;\r
- if ((iface_no <0 ) || (iface_no >= TUNE_NETAPI_MAX_INTERFACES))\r
- {\r
- return -1;\r
- }\r
- if (!p->interfaces[iface_no].in_use) return -1;\r
\r
//find a free entry\r
- for(i=0;i<TUNE_NETAPI_MAX_IP_PER_INTERFACE;i++)\r
+ for(i=0;i<TUNE_NETAPI_MAX_IP;i++)\r
{\r
- if (!p->interfaces[iface_no].ips[i].in_use)\r
+ if (!p->ips[i].in_use)\r
{\r
- p->interfaces[iface_no].ips[i].in_use = 2; //pending\r
+ p->ips[i].in_use = 2; //pending\r
return i;\r
}\r
}\r
int ip_slot, //we 'reserved it already'\r
void * handle)\r
{\r
- p->interfaces[iface_no].ips[ip_slot].in_use=1;\r
- memcpy(&p->interfaces[iface_no].ips[ip_slot].ip_addr, ip_addr, sizeof(nwalIpAddr_t));\r
+ p->ips[ip_slot].in_use=1;\r
+ memcpy(&p->ips[ip_slot].ip_addr, ip_addr, sizeof(nwalIpAddr_t));\r
if(ip_qualifiers)\r
- memcpy(&p->interfaces[iface_no].ips[ip_slot].ip_qualifiers, ip_qualifiers, sizeof(nwalIpOpt_t));\r
+ memcpy(&p->ips[ip_slot].ip_qualifiers, ip_qualifiers, sizeof(nwalIpOpt_t));\r
else\r
- memset(&p->interfaces[iface_no].ips[ip_slot].ip_qualifiers, 0, sizeof(nwalIpOpt_t));\r
- p->interfaces[iface_no].ips[ip_slot].ip_type = ipType;\r
- p->interfaces[iface_no].ips[ip_slot].nwal_handle = handle;\r
+ memset(&p->ips[ip_slot].ip_qualifiers, 0, sizeof(nwalIpOpt_t));\r
+ p->ips[ip_slot].ip_type = ipType;\r
+ p->ips[ip_slot].nwal_handle = handle;\r
return;\r
}\r
\r
int iface_no,\r
int ip_slot )\r
{\r
- if ((iface_no <0 ) || (iface_no >= TUNE_NETAPI_MAX_INTERFACES))\r
- {\r
- return ;\r
- }\r
- if (!p->interfaces[iface_no].in_use) return ;\r
- if ((ip_slot <0)||(ip_slot>TUNE_NETAPI_MAX_IP_PER_INTERFACE)) return ;\r
- p->interfaces[iface_no].ips[ip_slot].in_use=0;\r
+ if ((ip_slot <0)||(ip_slot>TUNE_NETAPI_MAX_IP)) return ;\r
+ p->ips[ip_slot].in_use=0;\r
return;\r
}\r
\r
int iface_no,\r
int ip_slot )\r
{\r
- if ((iface_no <0 ) || (iface_no >= TUNE_NETAPI_MAX_INTERFACES))\r
- {\r
- return NULL;\r
- }\r
- if (!p->interfaces[iface_no].in_use) return NULL;\r
- if ((ip_slot <0)||(ip_slot>=TUNE_NETAPI_MAX_IP_PER_INTERFACE)) return NULL;\r
- if (!p->interfaces[iface_no].ips[ip_slot].in_use) return NULL;\r
- return (void *) p->interfaces[iface_no].ips[ip_slot].nwal_handle;\r
+ if ((ip_slot <0)||(ip_slot>=TUNE_NETAPI_MAX_IP)) return NULL;\r
+ if (!p->ips[ip_slot].in_use) return NULL;\r
+ return (void *) p->ips[ip_slot].nwal_handle;\r
}\r
\r
/*==========================MAC INTERFACES======================*/\r
@@ -361,6 +348,7 @@ static void netcp_cfgp_insert_mac(NETAPI_NWAL_GLOBAL_CONTEXT_T *p, unsigned char
//internal: get handle associated with interface\r
void* netcp_cfgp_get_mac_handle(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,int iface_no)\r
{\r
+ if (iface_no == NETCP_CFG_NO_INTERFACE) return NULL;\r
if ((iface_no <0 ) || (iface_no >= TUNE_NETAPI_MAX_INTERFACES))\r
{\r
return NULL;\r
int flag) //TRUE: add IP to iface. False: add IP as part of classifier\r
{\r
NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;\r
-void * n_handle;\r
+void * n_handle=NULL;\r
nwalIpParam_t nwalIpParam= {\r
pa_IPV4, /* IP Type */\r
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* Dest IP */\r
NETCP_CFG_IP_T ip_rule_id;\r
NETCP_CFG_IP_T temp;\r
\r
- //verify that iface has been configurred \r
+ //verify that iface has been configured \r
+ if (iface_no != NETCP_CFG_NO_INTERFACE)\r
+ {\r
if ((iface_no<0) || (iface_no>= TUNE_NETAPI_MAX_INTERFACES)) {*err = NETAPI_ERR_BAD_INPUT; return -1;}\r
+ }\r
\r
+ if (iface_no != NETCP_CFG_NO_INTERFACE)\r
+ {\r
if(netapi_get_global()->nwal_context.interfaces[iface_no].in_use)\r
{\r
n_handle = netapi_get_global()->nwal_context.interfaces[iface_no].nwal_handle;\r
*err = NETAPI_ERR_BAD_INPUT;\r
return -1;\r
}\r
-\r
+ }\r
if (flag) //if adding IP to MAC then reserve a slot to save info\r
{\r
//find free slot for IP & reserve\r
}\r
\r
//verify that iface has been configured \r
+ if (iface_no != NETCP_CFG_NO_INTERFACE)\r
+ {\r
if(!netapi_get_global()->nwal_context.interfaces[iface_no].in_use)\r
{\r
*err = NETAPI_ERR_BAD_INPUT;\r
return -1;\r
}\r
+ }\r
\r
if (p_class->classType== NETCP_CFG_CLASS_TYPE_L4)\r
{\r
index d582b56b350b4b4d97b6847c23672a48616d575b..a09136f7c1c84a3ac7e53f8c729d941425014529 100755 (executable)
#include <stdlib.h>\r
#include <stdio.h>\r
\r
+#include "netapi_tune.h"\r
#include "netapi_vm.h"\r
#include "netapi_timer.h"\r
+#include <unistd.h>\r
+#include <sys/mman.h>\r
+#include <sys/types.h>\r
+#include <sys/stat.h>\r
+#include <sys/ioctl.h>\r
+#include <fcntl.h>\r
+#include <errno.h>\r
+#include <string.h>\r
+#include "netapi_util.h"\r
+#include "tools/module/netapimod.h"\r
#define System_printf printf\r
\r
uint32_t Osal_qmss_MallocCounter =0;\r
uint32_t Osal_cppi_MallocCounter =0;\r
uint32_t Osal_cppi_FreeCounter =0;\r
\r
+void* Osal_saGetSCPhyAddr(void* vaddr);\r
+\r
\r
\r
/* TODO: */\r
\r
#endif\r
\r
+/**********USE SPACE ACCESS TO KERNEL MEMORY SERVICES*************/\r
+static int netapi_fd;\r
+\r
+/***init **/\r
+int netapi_utilModInit(void)\r
+{\r
+ netapi_fd = open("/dev/netapi", O_RDWR);\r
+\r
+ if (netapi_fd == -1) {\r
+ return -1;\r
+ }\r
+ return netapi_fd;\r
+}\r
+\r
+/***close **/\r
+void netapi_utilModClose(void)\r
+{\r
+ close(netapi_fd);\r
+}\r
+\r
+/* return physical address of region kernel module has allocated for us */\r
+unsigned long netapi_utilGetPhysOfBufferArea(void)\r
+{\r
+ unsigned long physp;\r
+\r
+ if (ioctl(netapi_fd, NETAPIMOD_IOCGETPHYS | NETAPIMOD_IOCMAGIC, &physp) == -1) {\r
+ return 0;\r
+ }\r
+ return physp;\r
+}\r
+\r
+/* return the size of that region */\r
+unsigned long netapi_utilGetSizeOfBufferArea(void)\r
+{\r
+ unsigned long size;\r
+\r
+ if (ioctl(netapi_fd, NETAPIMOD_IOCGETSIZE | NETAPIMOD_IOCMAGIC, &size) == -1) {\r
+ return 0;\r
+ }\r
+ return size;\r
+}\r
+\r
+//*****for the actual wb, inv cache ops, call the osal_xxx version, not these directly\r
+// (so make inline)\r
+/** write back operation on block */\r
+static inline int netapi_utilCacheWb(void *ptr, size_t size)\r
+{\r
+ struct netapimod_block block;\r
+\r
+ block.addr = (unsigned long)ptr;\r
+ block.size = size;\r
+\r
+ if (ioctl(netapi_fd, NETAPIMOD_IOCCACHEWB | NETAPIMOD_IOCMAGIC, &block) == -1) {\r
+ return -1;\r
+ }\r
+ return 0;\r
+}\r
+\r
+/** write back & invalidate **/\r
+int netapi_utilCacheWbInv(void *ptr, size_t size)\r
+{\r
+ struct netapimod_block block;\r
+\r
+ block.addr = (unsigned long)ptr;\r
+ block.size = size;\r
+\r
+ if (ioctl(netapi_fd, NETAPIMOD_IOCCACHEWBINV | NETAPIMOD_IOCMAGIC, &block) == -1) {\r
+ return -1;\r
+ }\r
+ return 0;\r
+}\r
+\r
+/** just invalidate **/\r
+static inline int netapi_utilCacheInv(void *ptr, size_t size)\r
+{\r
+ struct netapimod_block block;\r
+\r
+ block.addr = (unsigned long)ptr;\r
+ block.size = size;\r
+\r
+ if (ioctl(netapi_fd, NETAPIMOD_IOCCACHEINV | NETAPIMOD_IOCMAGIC, &block) == -1) {\r
+ return -1;\r
+ }\r
+ return 0;\r
+}\r
+\r
+//***mmap the block into our user space process memory map. */\r
+unsigned long netapi_utilGetVaOfBufferArea(void)\r
+{\r
+ void *userp;\r
+\r
+ /* Map the physical address to user space */\r
+ userp = mmap(0, // Preferred start address\r
+ NETAPIMOD_MEMSZ, // Length to be mapped\r
+ PROT_WRITE | PROT_READ, // Read and write access\r
+ MAP_SHARED, // Shared memory\r
+ netapi_fd, // File descriptor\r
+ 0); // The byte offset from fd\r
+\r
+ if (userp == MAP_FAILED) {\r
+ return 0;\r
+ }\r
+ return (unsigned long)userp;\r
+}\r
+\r
+\r
/*****************************************************************************\r
* FUNCTION PURPOSE: Cache Invalidation Routine\r
***************************************************************************** \r
*****************************************************************************/\r
void Osal_invalidateCache (void *blockPtr, uint32_t size) \r
{\r
- /* Stub Function. TBD: Would need to handle when cache is enabled for ARM */\r
+\r
+#ifdef NETAPI_TUNE_USE_CACHE_OPS\r
+ if ((blockPtr <netapi_VM_mem_start)||( blockPtr>netapi_VM_mem_end)) return;\r
+ //netapi_utilCacheInv(blockPtr, size);\r
+ netapi_utilCacheInv(Osal_saGetSCPhyAddr(blockPtr), size);\r
+#endif\r
return;\r
}\r
\r
*****************************************************************************/\r
void Osal_writeBackCache (void *blockPtr, uint32_t size) \r
{\r
- /* Stub Function. TBD: Would need to handle when cache is enabled for ARM */\r
+#ifdef NETAPI_TUNE_USE_CACHE_OPS\r
+ if ((blockPtr <netapi_VM_mem_start)||( blockPtr>netapi_VM_mem_end)) return;\r
+ //netapi_utilCacheWbInv(blockPtr, size);\r
+ netapi_utilCacheWbInv(Osal_saGetSCPhyAddr(blockPtr), size);\r
+#endif\r
return;\r
}\r
\r
\r
void Osal_nwalInvalidateCache (void *blockPtr, uint32_t size)\r
{\r
- Osal_invalidateCache(blockPtr,size);\r
+ //Osal_invalidateCache(blockPtr,size);\r
return;\r
}\r
\r
void Osal_nwalWriteBackCache (void *blockPtr, uint32_t size)\r
{\r
- Osal_writeBackCache(blockPtr,size);\r
+ //Osal_writeBackCache(blockPtr,size);\r
return;\r
}\r
\r
\r
void Osal_pktLibBeginMemAccess(void* ptr, uint32_t size)\r
{\r
- Osal_invalidateCache(ptr,size);\r
+ //Osal_invalidateCache(ptr,size);\r
}\r
\r
\r
void Osal_pktLibEndMemAccess(void* ptr, uint32_t size)\r
{\r
- Osal_writeBackCache(ptr,size);\r
+ //Osal_writeBackCache(ptr,size);\r
}\r
\r
\r
@@ -431,7 +559,7 @@ void Osal_pktLibBeginPktAccess(Pktlib_HeapHandle heapHandle, Ti_Pkt* ptrPkt, uin
* 'create/find' HEAP API & depending upon the comparison take appropriate action. \r
* Just for testing we are always invalidating the cache here. */\r
\r
- Osal_invalidateCache(ptrPkt,size);\r
+ //Osal_invalidateCache(ptrPkt,size);\r
}\r
\r
\r
@@ -442,7 +570,7 @@ void Osal_pktLibEndPktAccess(Pktlib_HeapHandle heapHandle, Ti_Pkt* ptrPkt, uint3
* Just for testing we are always writing back the cache here. */\r
\r
/* Writeback the contents of the cache. */\r
- Osal_writeBackCache(ptrPkt,size);\r
+ //Osal_writeBackCache(ptrPkt,size);\r
}\r
\r
\r
index 4c038e204472485a4ae1af86ddc4a119132dc0b4..0ad2965665364251e6e61b228a4bc4bb3655e2fa 100755 (executable)
#include <string.h>\r
#include "netapi.h"\r
\r
+extern unsigned int vv7p;\r
+extern unsigned int vv8p;\r
+\r
/*--------------------Utilites-----------------*/\r
static PKTIO_HANDLE_T * pktiop_get_free_channel_slot(NETAPI_T n)\r
{\r
{\r
/* tod: meta data for non netcp xfers */\r
/* process meta data */\r
+ vv8p=netapi_timing_stop();\r
Qmss_queuePushDesc (p->q, (void*)pkt);\r
}\r
return 1;\r
n= (p->max_n< PKTIO_MAX_RECV) ? p->max_n : PKTIO_MAX_RECV;\r
for(r=0;r<n;r++)\r
{\r
+ if (r==0) vv7p=netapi_timing_stop();\r
temp=(Ti_Pkt*)(Cppi_HostDesc*)QMSS_DESC_PTR(Qmss_queuePop(p->q));\r
if(!temp) break;\r
/* process meta data */\r
index cbade343e2ff9e043d617db7dad2da3857660ab7..ac13485448786138a47a24865d9917c87708c399 100644 (file)
#include <sys/resource.h>\r
\r
//IPSEC MODE(only choose one rx and one tx)\r
-//#define IPSEC_MODE_RX_INFLOW\r
-//#define IPSEC_MODE_TX_INFLOW\r
-#define IPSEC_MODE_RX_SIDEBAND\r
-#define IPSEC_MODE_TX_SIDEBAND\r
+#define IPSEC_MODE_RX_INFLOW\r
+#define IPSEC_MODE_TX_INFLOW\r
+//#define IPSEC_MODE_RX_SIDEBAND\r
+//#define IPSEC_MODE_TX_SIDEBAND\r
\r
/*************debug********************/\r
void dump_descr(unsigned long *p, int n)\r
{\r
NULL, NULL //* to be filled in\r
};\r
-NETCP_CFG_CLASSIFIER_T class_2_cfg=\r
+NETCP_CFG_CLASSIFIER_T class_2_cfg= \r
{\r
NETCP_CFG_CLASS_TYPE_L3_L4,\r
{\r
b=Pktlib_allocPacket(OurHeap,PKT_LEN);\r
Pktlib_freePacket(b);\r
}\r
+}\r
+\r
+unsigned int vv1;\r
+unsigned int vv2;\r
+unsigned int vv3;\r
+unsigned int vv4;\r
+unsigned int vv5;\r
+unsigned int vv6;\r
+unsigned int vv7p;\r
+unsigned int vv8p;\r
+\r
+/*--------------basic pktio send/recv benchmark----------------------*/\r
+void our_pktio_bench(int ntrials)\r
+{\r
+int i;\r
+#define NBATCH 8\r
+Ti_Pkt tip;\r
+unsigned char * pData;\r
+int len;\r
+int n;\r
+int err;\r
+PKTIO_METADATA_T meta[10]={0};\r
+//send single, recv single\r
+for(i=0;i<ntrials;i++)\r
+{\r
+ vv1= netapi_timing_stop();\r
+ tip=Pktlib_allocPacket(OurHeap,PKT_LEN);\r
+ vv2= netapi_timing_stop();\r
+ Pktlib_getDataBuffer(tip,&pData,&len);\r
+ vv3= netapi_timing_stop();\r
+ pktio_send(our_chan,tip,&meta[0],&err);\r
+ vv4= netapi_timing_stop();\r
+ n=pktio_poll(our_chan,NULL , &err);\r
+ vv5= netapi_timing_stop();\r
+ printf("pktio send. rx=%d (wcb%d) (toqm%d) tx=%d (toqm%d) alloc=%d\n", vv6-vv4,vv5-vv4,vv7p-vv4, vv4-vv3, vv8p-vv4, vv3-vv1);\r
+ \r
+}\r
+\r
+//send multiple, rec multiple\r
+for(i=0;i<ntrials;i++)\r
+{\r
+}\r
+\r
}\r
/*-----------test driver: gen an input pkt------- */\r
//char buffer[sizeof(HEAD_T)+PKT_LEN];\r
//inner ip &udp for ipsec\r
if (flag) \r
{\r
+ //just drop non-udp packet\r
+ if (p_pkt[14+20+8+16+9]!=0x11)\r
+ {\r
+ stats.n_new+=1;Pktlib_freePacket(tip); return;\r
+ }\r
\r
//spi\r
//memset(&p_pkt[14+20],0x88,4); \r
\r
\r
\r
-\r
-\r
+/***************************************\r
+ benchmark receive handler\r
+****************************************/\r
+void recv_cb_bench(struct PKTIO_HANDLE_Tag * channel, Ti_Pkt* p_recv[],\r
+ PKTIO_METADATA_T meta[], int n_pkts,\r
+ uint64_t ts )\r
+{\r
+ int i;\r
+ vv6= netapi_timing_stop();\r
+ for (i=0;i<n_pkts; i++) Pktlib_freePacket(p_recv[i]);\r
+}\r
\r
/****************************************************************************************/\r
/******************SB Accelerator Callback PKT RECEIVE HANDLER *************************/\r
flip_and_send_pkt(tip,p_pkt,len,1);\r
#endif\r
}\r
+ else if ((p_head->ip[2]&0x0000ff00)!=0x00001100)\r
+ {\r
+ stats.n_new+=1;Pktlib_freePacket(tip); continue;\r
+ }\r
else //non ipsec\r
{\r
if (!check_header(p_head,&meta[i])) { \r
{\r
Ti_Pkt * tip;\r
unsigned int len;\r
-nwalTxPktInfo_t meta_tx={0};\r
+nwalTxPktInfo_t meta_tx;\r
PKTIO_METADATA_T meta = {PKTIO_META_TX,{0},0};\r
int err;\r
static int house_pkts_gened=0;\r
unsigned char * pIpHdr,* pData;\r
unsigned int vv1,vv2,vv3;\r
\r
+memset(&meta_tx,0,sizeof(meta_tx));\r
for(p=0;p<TX_BURST;p++) { \r
//reguest stats \r
if ((house_pkts_gened>0) && (! (house_pkts_gened%400)) )\r
\r
\r
/* create a pktio channel */\r
-our_chan=pktio_create(netapi_handle,"our1stq",(PKTIO_CB) recv_cb, &our_chan_cfg,&err);\r
+our_chan=pktio_create(netapi_handle,"our1stq",(PKTIO_CB) recv_cb_bench, &our_chan_cfg,&err);\r
if (!our_chan) {printf("pktio create failed err=%d\n",err); exit(1);}\r
\r
/* open netcp default tx, rx queues */\r
/********************************************\r
* Basic pkt loopback test\r
*********************************************/\r
+printf("...runnining pure push/pop benchmark\n");\r
+our_pktio_bench(100);\r
+\r
+\r
+\r
\r
\r
+/**************unused stuff******************/\r
/* create TRIE */\r
P_trie = trie_new();\r
if (!P_trie) {printf("trie alloc failed\n"); exit(1);}\r