summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 60c0b82)
raw | patch | inline | side by side (parent: 60c0b82)
author | David Lide <a0216552@gtudci01.(none)> | |
Fri, 1 Jun 2012 22:23:15 +0000 (18:23 -0400) | ||
committer | David Lide <a0216552@gtudci01.(none)> | |
Fri, 1 Jun 2012 22:23:15 +0000 (18:23 -0400) |
16 files changed:
index fdce74ccc3be2e3220463fbed1cc3a353deb3230..d1dd141bad5cda56fe06e6fc4ba491136285e074 100755 (executable)
\r
#define NETAPI_SYS_MASTER 2 //master for system\r
#define NETAPI_CORE_MASTER 1 //master for core\r
-#define NETAPI_NO_MASTE 0 //data only\r
+#define NETAPI_NO_MASTER 0 //data only\r
+\r
+/***********************************************\r
+*************RUN TIME CONTROLS*****************\r
+***********************************************/\r
+typedef struct NETAPI_CFG_Tag\r
+{\r
+ int def_mem_size; //bytes of CMA memory we have allocated \r
+ int def_flow_pkt_rx_offset; //offset in pkt buffer for hw to start RX\r
+ int def_max_descriptors; //# of descriptors in system (must be power of 2), 2^14 max\r
+ int def_tot_descriptors_for_us; //#of descriptors to create in our region (must be power of 2)\r
+ int def_heap_n_descriptors; //# descriptors+buffers in default heap\r
+ int def_heap_n_zdescriptors; //# zero len descriptors in defaule heap\r
+ int def_heap_buf_size; //size of buffers in default heap\r
+}NETAPI_CFG_T;\r
+\r
\r
\r
#include "netapi_err.h"\r
**********BUILD TIME CONTROLS *****************\r
***********************************************/\r
/* see netapi_tune.h */\r
- \r
+\r
\r
/*************************************\r
**************NETAPI****************\r
* - cppi\r
* - nwal\r
* @param[in] master mode: NETAPI_SYS_MASTER or NETAPI_NO_MASTER\r
+ * @param[in] configuration (master mode). pointer to NETAPI_CFG_T above or NULL\r
* @retval @ref NETAPI_T: handle to the instance or NULL on error \r
* @pre none \r
*/\r
-NETAPI_T netapi_init(int master);\r
+NETAPI_T netapi_init(int master, NETAPI_CFG_T * p_cfg);\r
\r
\r
/** @ingroup netapi_api_functions */\r
index f8ed7ba32fae42d2a93086040b24605d30fe3137..033b757bf81c1496175a629117349b0760413aa6 100755 (executable)
* @pre @ref netapi_init \r
*/\r
//return 64 bit timestamp from hw timer\r
-static inline unsigned long long netapi_getTimestamp(void) \r
+volatile static inline unsigned long long netapi_getTimestamp(void) \r
{\r
volatile unsigned long long t1;\r
volatile unsigned long long t2;\r
index 7cea8b1296470050222eb3f5a093ca98bd182c4c..54dcad9ff100c2342280a4b727e9217f7a6e055d 100755 (executable)
/**\r
* @def TUNE_NETAPI_PERM_MEM_SZ\r
* (1) how much contiguous memory to grab. This is used for\r
- * descriptors and buffers. Can't be bigger than MSMC (internal SOC memory area) \r
+ * descriptors and buffers. Can't be bigger than CMA memory area (or msmc if\r
+ * we are using uncached . This can be set at netapi_init via NETAPI_CFG_T\r
*/\r
-#define TUNE_NETAPI_PERM_MEM_SZ (2*1024*1024)\r
+#define TUNE_NETAPI_PERM_MEM_SZ (2*1024*1024) \r
\r
/**\r
* @def TUNE_NETAPI_MAX_PKTIO\r
/**\r
* @def TUNE_NETAPI_DEFAULT_BUFFER_SIZE\r
* (3) size of netapi default pktlib heap buffers\r
+* This can be set at netapi_init() \r
*/\r
#define TUNE_NETAPI_DEFAULT_BUFFER_SIZE 1600 \r
\r
/**\r
* @def TUNE_NETAPI_DEFAULT_NUM_BUFFERS\r
*(4) number of netapi default pktlib heap buffers (and assoc descriptors)\r
+ * this can be set at netapi_init()\r
*/\r
#define TUNE_NETAPI_DEFAULT_NUM_BUFFERS 200 \r
\r
/*\r
* @def TUNE_NETAPI_DEFAULT_NUM_SOLO_DESCRIPTORS\r
* (5) number of netapi default pkt lib heap solo descriptors\r
+ * this can be set at netapi_init\r
*/\r
#define TUNE_NETAPI_DEFAULT_NUM_SOLO_DESCRIPTORS 100 \r
\r
#define NETAPI_INCLUDE_SCHED \r
\r
//(7) # of QM descriptors (total)\r
+// this can be set set in netapi_init\r
+// MUST BE POWER OF 2\r
#define TUNE_NETAPI_QM_CONFIG_MAX_DESC_NUM 1024 /* 16384 is abs max */\r
\r
//(8) Region info\r
\r
/* at least as big as DEFAULT_NUM_SOLO+DEFAULT_NUM_BUFFERS but also a power of 2*/\r
#define TUNE_NETAPI_NUM_GLOBAL_DESC 512 \r
+\r
#define TUNE_NETAPI_DESC_SIZE 128 //don't change!!\r
#ifdef NETAPI_USE_DDR\r
#define TUNE_NETAPI_QM_START_INDEX 0x2000 //WARNING: must reflect what kernel is using for their region, see device tree blob\r
index 5c6e3bb307f446bcb99a7a4765c0e9e9417a9dfe..073a65fcfde23f2a4207c5db0cbf91bf82f4e689 100755 (executable)
int n, \r
Pktlib_HeapHandle handles[],\r
int sizes[],\r
+ int recv_offset, //bytes to save in front of packet\r
int * err );\r
void netcp_cfgDelFlow(NETAPI_T , NETCP_CFG_FLOW_HANDLE_T , int * err);\r
\r
index 91eb045c06c6e6a6068007d9472f23d7f9fa0ade..ef7bc775a2a3e3534631b0f374c3d36da7cd6adf 100755 (executable)
/* the callback function */\r
struct PKTIO_HANDLE_tag;\r
\r
+//polling control\r
+typedef struct PKTIO_POLL_Tag\r
+{\r
+/* future */\r
+} PKTIO_POLL_T;\r
+\r
#define PKTIO_MAX_RECV (TUNE_NETAPI_MAX_BURST_RCV)\r
typedef void (*PKTIO_CB)(struct PKTIO_HANDLE_tag * channel, Ti_Pkt* p_recv[],\r
PKTIO_METADATA_T p_meta[], int n_pkts,\r
uint64_t ts );\r
\r
+typedef int (*PKTIO_SEND)(struct PKTIO_HANDLE_tag * channel, Ti_Pkt* p_send,\r
+ PKTIO_METADATA_T *p_meta, int * p_err);\r
+\r
+typedef int (*PKTIO_POLL)(struct PKTIO_HANDLE_tag * channel,PKTIO_POLL_T * p_poll_cfg ,\r
+ int * p_err);\r
+\r
/** channel configuration */\r
#define PKTIO_NA 0\r
typedef struct PKTIO_CFG_Tag\r
Qmss_Queue qInfo; /* and its qm#/q# */\r
int max_n; /* max # of pkts to read in one poll */\r
void * cookie; /* app specific */\r
+ PKTIO_SEND _send; /* pktio type specific send function */\r
+ PKTIO_POLL _poll; /* pktio type specific POLL function */\r
char name[PKTIO_MAX_NAME+1];\r
} PKTIO_HANDLE_T;\r
\r
} PKTIO_CONTROL_T;\r
\r
\r
-//polling control\r
-typedef struct PKTIO_POLL_Tag\r
-{\r
-/* future */\r
-} PKTIO_POLL_T;\r
\r
/*---------------------------------------------------*/\r
/*-------------------------API-----------------------*/\r
* @retval int npkts: 1 if packet sent, 0 if error \r
* @pre @ref netapi_init, @ref pktio_create, @ref pktio_open\r
*/\r
-int pktio_send(PKTIO_HANDLE_T * channel, /* the channel */\r
+static inline int pktio_send(PKTIO_HANDLE_T * channel, /* the channel */\r
Ti_Pkt *pkt, /* pointer to packet */\r
PKTIO_METADATA_T *m, /* pointer to meta data */\r
- int * err);\r
+ int * err)\r
+{\r
+ return channel->_send((struct PKTIO_HANDLE_tag *)channel, pkt, m, err);\r
+}\r
\r
/*\r
* @brief API sends data to a pktio channel\r
* @retval int npkts: number of packets received by poll \r
* @pre @ref netapi_init, @ref pktio_create, @ref pktio_open\r
*/\r
-int pktio_poll(PKTIO_HANDLE_T * handle, //handle to pktio\r
+static inline int pktio_poll(PKTIO_HANDLE_T * handle, //handle to pktio\r
PKTIO_POLL_T * p_poll_cfg, //polling configuration\r
- int * err);\r
+ int * err)\r
+{\r
+ handle->_poll((struct PKTIO_HANDLE_tag *) handle, p_poll_cfg, err);\r
+}\r
\r
/*\r
* @brief API polls all pkto channels associarted with @ref NETAPI_T instance\r
#define pktio_set_cookie(handle, cookie) (handle)->cookie = cookie\r
#define pktio_get_cookie(handle) (handle)->cookie\r
#define pktio_get_q(handle) (handle)->q\r
+\r
+/*-----------------Extra Fast Path pkt meta data macros--------------------*/\r
+#include "cppi_desc.h"\r
+#include "ti/drv/pa/pa.h"\r
+#include "ti/drv/pa/pasahost.h"\r
+\r
+\r
+//return default packet queue to poll for netcp RX\r
+//these are expensive calls, so call once and save\r
+static inline Qmss_QueueHnd PKTIO_GET_DEFAULT_NETCP_Q(PKTIO_HANDLE_T *h)\r
+{\r
+nwalGlobCxtInfo_t Info;\r
+nwal_getGlobCxtInfo(h->nwalInstanceHandle,&Info);\r
+return Info.rxDefPktQ;\r
+}\r
+\r
+//return L4Queue to poll for netcp RX (L4 classifier queue)\r
+//these are expensive calls, so call once and save\r
+static inline Qmss_QueueHnd PKTIO_GET_DEFAULT_NETCP_L4Q(PKTIO_HANDLE_T *h)\r
+{\r
+nwalLocCxtInfo_t Info;\r
+nwal_getLocCxtInfo(h->nwalInstanceHandle,&Info);\r
+return Info.rxL4PktQ;\r
+}\r
+\r
+\r
+/* find pointer to proto info fields in descriptor */\r
+static inline pasahoLongInfo_t* PKTIO_GET_PROTO_INFO( Ti_Pkt * pkt)\r
+{\r
+pasahoLongInfo_t* pinfo;\r
+uint32_t infoLen;\r
+Cppi_getPSData (Cppi_DescType_HOST, Cppi_PSLoc_PS_IN_DESC,(Cppi_Desc *)pkt, (uint8_t **)&pinfo, &infoLen);\r
+return pinfo;\r
+}\r
+\r
+/** "p" below is return of PKTIO_GET_PROTO_INFO() above**/\r
+\r
+/* offset to L3 header */\r
+#define PKTIO_GET_L3_OFFSET(p) PASAHO_LINFO_READ_L3_OFFSET(p)\r
+\r
+/* offset to L4 header */\r
+#define PKTIO_GET_L4_OFFSET(p) PASAHO_LINFO_READ_L4_OFFSET(p)\r
+\r
+/* next proto header */\r
+#define PKTIO_GET_NEXT_HEADER_TYPE(P) PASAHO_LINFO_READ_NXT_HDR_TYPE(p)\r
+\r
+/* offset to L4 payload */\r
+#define PKTIO_GET_L5_OFFSET(p) PASAHO_LINFO_READ_L5_OFFSET(p)\r
+\r
+/* end of L4 payload */\r
+#define PKTIO_GET_PAYLOAD_END(p) PASAHO_LINFO_READ_END_OFFSET(p)\r
+\r
+/* IPSEC ESP done ? */\r
+#define PKTIO_ESP_DONE(p) PASAHO_LINFO_IS_IPSEC_ESP(p)\r
+\r
+/* IPSEC ESP done ? */\r
+#define PKTIO_AH_DONE(p) PASAHO_LINFO_IS_IPSEC_AH(p)\r
+\r
+/* MAC info */\r
+#define PKTIO_IS_MAC_BROADCAST(p) PASAHO_READ_BITFIELD(p)\r
+#define PKTIO_IS_MAC_MULTICAST(p) PASAHO_READ_BITFIELD(p)\r
+#define PKTIO_GET_MAC_TYPE(p) PASAHO_LINFO_READ_MAC_PKTTYPE(p)\r
+\r
+/* read input port */\r
+#define PKTIO_GET_INPUT_PORT(p) PASAHO_LINFO_READ_INPORT(p) \r
+\r
+/* AppId */\r
+static inline unsigned int PKTIO_GET_APPID( Ti_Pkt * pkt) \r
+{\r
+unsigned char * p_swinfo0;\r
+Cppi_getSoftwareInfo (Cppi_DescType_HOST,\r
+ (Cppi_Desc *)pkt,\r
+ &p_swinfo0);\r
+return *((unsigned int *)p_swinfo0);\r
+} \r
+\r
#endif\r
index 7cf66d5c2ae16cbf8335000a9942f0dd206d44e8..fe785af69fd0ce3f094b7858ad2b09186cd67fa5 100755 (executable)
/*------------globals-----------------*/\r
#define NUM_HOST_DESC (TUNE_NETAPI_NUM_LOCAL_DESC)\r
#define SIZE_LOCAL_DESC (TUNE_NETAPI_DESC_SIZE) \r
-#define NUM_SHARED_DESC (TUNE_NETAPI_NUM_GLOBAL_DESC) \r
#define SIZE_SHARED_DESC (TUNE_NETAPI_DESC_SIZE)\r
\r
#define CONFIG_BUFSIZE_PA_INST 256\r
#define CONFIG_BUFSIZE_L2_TABLE 1000\r
#define CONFIG_BUFSIZE_L3_TABLE 4000\r
\r
+static NETAPI_CFG_T netapi_default_cfg=\r
+{\r
+TUNE_NETAPI_PERM_MEM_SZ,\r
+0, //start of packet offset for hw to place data on rx for default flow\r
+TUNE_NETAPI_QM_CONFIG_MAX_DESC_NUM, //max number of descriptors in system\r
+TUNE_NETAPI_NUM_GLOBAL_DESC, //total we will use\r
+TUNE_NETAPI_DEFAULT_NUM_BUFFERS, //#descriptors+buffers in default heap\r
+TUNE_NETAPI_DEFAULT_NUM_SOLO_DESCRIPTORS, //#descriptors w/o buffers in default heap\r
+TUNE_NETAPI_DEFAULT_BUFFER_SIZE //size of buffers in default heap\r
+\r
+};\r
+\r
static Pktlib_HeapIfTable netapi_pktlib_ifTable;\r
static NETAPI_GLOBAL_T netapi_global;\r
NETAPI_GLOBAL_T * netapi_get_global(){ return &netapi_global;}\r
/*-------------------------------------\r
* initialize NETAPI instance \r
*-------------------------------------*/\r
-NETAPI_T netapi_init(int master)\r
+NETAPI_T netapi_init(int master, NETAPI_CFG_T * p_cfg)\r
{\r
int i;\r
int err;\r
/* global stuff (if master) */\r
if (master==NETAPI_SYS_MASTER)\r
{\r
+ if (p_cfg) memcpy(&netapi_global.cfg,p_cfg, sizeof(NETAPI_CFG_T));\r
+ else memcpy(&netapi_global.cfg,&netapi_default_cfg, sizeof(NETAPI_CFG_T));\r
for(i=0;i<NETAPI_MAX_PKTIO;i++) \r
{ \r
netapi_global.pktios[i].qn.qNum=-1;\r
netapi_init_timer();\r
\r
/* Initialize Queue Manager Sub System */\r
- result = netapi_init_qm (); \r
+ result = netapi_init_qm (netapi_global.cfg.def_max_descriptors); \r
if (result != 1)\r
{\r
return -1;\r
\r
/* Initialize the global descriptor memory region. */\r
result= netapi_qm_setup_mem_region( \r
- NUM_SHARED_DESC,\r
+ netapi_global.cfg.def_tot_descriptors_for_us,\r
SIZE_SHARED_DESC,\r
(unsigned int *) netapi_VM_QMemGlobalDescRam,\r
NETAPI_GLOBAL_REGION);\r
if(result <0) {printf("can't setup local region\n"); return -1;}\r
#endif\r
/* Initialize CPPI CPDMA */\r
+\r
result = netapi_init_cppi ();\r
if (result != 1)\r
{\r
netapi_pktlib_ifTable.data_free = netapiSharedMemoryFree;\r
\r
/* Create Shared Heap with specified configuration. */\r
-#define SHARED_MAX_DATA_SIZE (TUNE_NETAPI_DEFAULT_BUFFER_SIZE) \r
sharedHeapHandle = Pktlib_createHeap("netapi", NETAPI_GLOBAL_REGION, //was 0\r
1,\r
- SHARED_MAX_DATA_SIZE,\r
- TUNE_NETAPI_DEFAULT_NUM_BUFFERS,\r
- TUNE_NETAPI_DEFAULT_NUM_SOLO_DESCRIPTORS,\r
+ netapi_global.cfg.def_heap_buf_size,\r
+ netapi_global.cfg.def_heap_n_descriptors,\r
+ netapi_global.cfg.def_heap_n_zdescriptors,\r
&netapi_pktlib_ifTable);\r
//todo -> cleanup on failure\r
if (!sharedHeapHandle) { printf(">'netapi' heap create failed\n"); return -1;}\r
result = netapi_init_nwal(\r
NETAPI_GLOBAL_REGION,\r
&netapi_pktlib_ifTable, \r
- &netapi_global.nwal_context);\r
+ &netapi_global.nwal_context,\r
+ &netapi_global.cfg);\r
if (result<0) {printf(">netapi init_nwal() failed\n"); return -1; }\r
\r
/* start NWAL */\r
index 59c0b3df26974b3dcea8720a68ad49028e4e9d76..51936b396fabb9f6843196997d8206e06f51e04d 100755 (executable)
//****************************************************\r
// initialize QM (per SOC)\r
//***************************************************\r
-int netapi_init_qm(void)\r
+int netapi_init_qm(int max_descriptors)\r
{\r
Qmss_InitCfg qmssInitConfig;\r
int32_t result;\r
qmssInitConfig.linkingRAM0Base = 0;\r
qmssInitConfig.linkingRAM0Size = 0;\r
qmssInitConfig.linkingRAM1Base = 0;\r
- qmssInitConfig.maxDescNum = TUNE_NETAPI_QM_CONFIG_MAX_DESC_NUM;\r
+ qmssInitConfig.maxDescNum = max_descriptors;\r
qmssInitConfig.qmssHwStatus =QMSS_HW_INIT_COMPLETE; //bypass some of the hw init\r
nwalTest_qmssGblCfgParams = qmssGblCfgParams[0];\r
\r
@@ -393,7 +393,8 @@ uint8_t salldChanHandle[NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE_PER_CHAN * TUNE
int netapi_init_nwal(\r
int region2use, \r
Pktlib_HeapIfTable * p_table,\r
- NETAPI_NWAL_GLOBAL_CONTEXT_T * p_nwal_context )\r
+ NETAPI_NWAL_GLOBAL_CONTEXT_T * p_nwal_context, \r
+ NETAPI_CFG_T*p_cfg )\r
{\r
nwalSizeInfo_t nwalSizeInfo;\r
nwalMemBuf_t nwalMemBuf[nwal_N_BUFS];\r
/* Initialize Buffer Pool for NetCP PA to SA packets */\r
nwalGlobCfg.pa2SaBufPool.numBufPools = 1;\r
nwalGlobCfg.pa2SaBufPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;\r
- nwalGlobCfg.pa2SaBufPool.bufPool[0].bufSize = TUNE_NETAPI_DEFAULT_BUFFER_SIZE;\r
+ nwalGlobCfg.pa2SaBufPool.bufPool[0].bufSize = p_cfg->def_heap_buf_size;\r
nwalGlobCfg.pa2SaBufPool.bufPool[0].heapHandle = p_nwal_context->pa2sa_heap=\r
Pktlib_createHeap("nwal PA2SA",\r
region2use,\r
0,\r
- TUNE_NETAPI_DEFAULT_BUFFER_SIZE,\r
+ p_cfg->def_heap_buf_size,\r
TUNE_NETAPI_CONFIG_MAX_PA_TO_SA_DESC,\r
0,\r
p_table);\r
/* Initialize Buffer Pool for NetCP SA to PA packets */\r
nwalGlobCfg.sa2PaBufPool.numBufPools = 1;\r
nwalGlobCfg.sa2PaBufPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;\r
- nwalGlobCfg.sa2PaBufPool.bufPool[0].bufSize = TUNE_NETAPI_DEFAULT_BUFFER_SIZE;\r
+ nwalGlobCfg.sa2PaBufPool.bufPool[0].bufSize = p_cfg->def_heap_buf_size;\r
\r
nwalGlobCfg.sa2PaBufPool.bufPool[0].heapHandle = p_nwal_context->sa2pa_heap=\r
Pktlib_createHeap("nwal SA2PA",\r
region2use,\r
0,\r
- TUNE_NETAPI_DEFAULT_BUFFER_SIZE,\r
+ p_cfg->def_heap_buf_size,\r
TUNE_NETAPI_CONFIG_MAX_SA_TO_PA_DESC,\r
0,\r
p_table);\r
index cc9029ac21f586c625ff43515f794fa94b288594..b7244aa7760fe9a2eed809df884297ba343b041d 100755 (executable)
#define NETAPI_MAX_PKTIO (TUNE_NETAPI_MAX_PKTIO) \r
PKTIO_ENTRY_T pktios[NETAPI_MAX_PKTIO];\r
\r
-/* pktlib heap */\r
+/* configuration */\r
+NETAPI_CFG_T cfg;\r
\r
/* global timers */\r
\r
\r
\r
//internal initialization routines */\r
-int netapi_init_qm(void);\r
+int netapi_init_qm(int max_descriptors);\r
int netapi_init_cppi(void);\r
int netapi_init_cpsw(void);\r
int netapi_start_qm(void);\r
int netapi_init_nwal(\r
int region2use,\r
Pktlib_HeapIfTable * p_table,\r
- NETAPI_NWAL_GLOBAL_CONTEXT_T * p_nwal_context );\r
+ NETAPI_NWAL_GLOBAL_CONTEXT_T * p_nwal_context,\r
+ NETAPI_CFG_T *p_cfg );\r
int netapi_start_nwal(Pktlib_HeapHandle pkt_heap,\r
Pktlib_HeapHandle cmd_heap,\r
NETAPI_NWAL_LOCAL_CONTEXT_T *p ,\r
index 3742ec997d50e6c87b28ef1e7c533fdb49e653d5..a71882112ce4dcf9af87efa6988108dabc2e379e 100644 (file)
netcp_cfgp_delete_sa(&netapi_get_global()->nwal_context,tunnelId);
return -1;
}
- printf(">netapisec %d Creating sideband mode SA for %d ( mac %d)\n", tunnelId, iface_no);
+ printf(">netapisec. Creating sideband mode SA for %d ( mac %d)\n", tunnelId, iface_no);
*p_data_mode_handle = dm_handle;
}
//save stuff
index 8830112bc029095d9d500cdf258edb0e29d16ec4..b0bae824753fc542c3904b7fcd0f5ff1356db221 100755 (executable)
*****************************************************/\r
#ifndef __NETAPI_UTIL__H\r
#define __NETAPI_UTIL__H\r
+\r
/* timing */\r
static inline unsigned long netapi_timing_start(void)\r
{\r
index 03a80494f79b0ccc998e4569535bbefe90a9bb0e..a7e1756d441f22bc8663e65ba68b9df60a3d9767 100755 (executable)
/* Macro to align x to y */\r
#define align(x,y) ((x + y) & (~y))\r
\r
-#define NETAPI_PERM_MEM_SZ (TUNE_NETAPI_PERM_MEM_SZ) \r
-\r
-/* Physical address map & size for various subsystems */\r
-#define QMSS_CFG_BASE_ADDR CSL_QM_SS_CFG_QUE_PEEK_REGS\r
-#define QMSS_CFG_BLK_SZ (1*1024*1024)\r
-#define QMSS_DATA_BASE_ADDR 0x44020000 \r
-#define QMSS_DATA_BLK_SZ (0x60000)\r
-#define SRIO_CFG_BASE_ADDR CSL_SRIO_CONFIG_REGS\r
-#define SRIO_CFG_BLK_SZ (132*1024)\r
-#define PASS_CFG_BASE_ADDR CSL_PA_SS_CFG_REGS \r
-#define PASS_CFG_BLK_SZ (1*1024*1024)\r
-\r
-#define MSMC_SRAM_BASE_ADDR CSL_MSMC_SRAM_REGS\r
-\r
uint8_t *netapi_VM_mem_start_phy = (uint8_t*)0;\r
uint8_t *netapi_VM_mem_start = (uint8_t*)0;\r
uint8_t *netapi_VM_mem_end = (uint8_t*)0;\r
static uint32_t netapi_VM_mem_size = 0;\r
\r
\r
+\r
/* File descriptor for /dev/mem */ \r
static int dev_mem_fd;\r
static int our_netapi_module_fd;\r
addr= ( uint8_t *) netapi_utilGetPhysOfBufferArea(); //get address that was allocated for us by kernela module */\r
size = netapi_utilGetSizeOfBufferArea(); //get the size that was allocated\r
#ifdef USE_MODULE_MMAP\r
- map_base = (void *) netapi_utilGetVaOfBufferArea(NETAPIMOD_MMAP_DMA_MEM_OFFSET, size); //mmap into our space, return va\r
+ map_base = (void *) netapi_utilGetVaOfBufferArea(0,size); //mmap into our space, return va\r
#else\r
if( (temp_fd = open("/dev/mem", O_RDWR )) == -1) {\r
printf(">netapi_VM_memAllocInit: failed to open dev/mem again cached err=%d\n",errno);\r
/* Total Permanent memory required in NWAL test\r
* for Packet buffers & descriptor buffers\r
*/\r
+#define NETAPI_PERM_MEM_SZ (TUNE_NETAPI_PERM_MEM_SZ) \r
+\r
+/* Physical address map & size for various subsystems */\r
+#define QMSS_CFG_BASE_ADDR CSL_QM_SS_CFG_QUE_PEEK_REGS\r
+#define QMSS_CFG_BLK_SZ (1*1024*1024)\r
+#define QMSS_DATA_BASE_ADDR 0x44020000 \r
+#define QMSS_DATA_BLK_SZ (0x60000)\r
+#define SRIO_CFG_BASE_ADDR CSL_SRIO_CONFIG_REGS\r
+#define SRIO_CFG_BLK_SZ (132*1024)\r
+#define PASS_CFG_BASE_ADDR CSL_PA_SS_CFG_REGS \r
+#define PASS_CFG_BLK_SZ (1*1024*1024)\r
+\r
+#define MSMC_SRAM_BASE_ADDR CSL_MSMC_SRAM_REGS\r
+\r
/* Global variables to hold virtual address of various subsystems */\r
void *netapi_VM_qmssCfgVaddr;\r
void *netapi_VM_qmssDataVaddr;\r
printf(">netapi QMSS_CFG_BASE_ADDR:0x%x Memory mapped at address %p.\n",(void*)QMSS_CFG_BASE_ADDR, netapi_VM_qmssCfgVaddr);\r
\r
/* (2b) QMSS DATA Regs */\r
-#ifdef USE_MODULE_MMAP\r
- netapi_VM_qmssDataVaddr = (void *) netapi_utilGetVaOfBufferArea(NETAPIMOD_MMAP_QM_DATA_REG_MEM_OFFSET, QMSS_DATA_BLK_SZ);\r
-#else\r
netapi_VM_qmssDataVaddr = netapi_VM_memMap((void*)QMSS_DATA_BASE_ADDR,\r
QMSS_DATA_BLK_SZ);\r
-#endif\r
-\r
if (!netapi_VM_qmssDataVaddr)\r
{\r
printf(">netapi ERROR: Failed to map QMSS DATA registers\n");\r
index e9a7791e4e0e09e4cd8fbb3f0b4f67dced86a892..2ae66eb34b75873a7bc1197b2d3486e9a5981f99 100755 (executable)
int n,\r
Pktlib_HeapHandle handles[],\r
int sizes[],\r
+ int byte_offset,\r
int * err )\r
{\r
Cppi_RxFlowCfg rxFlowCfg;\r
rxFlowCfg.flowIdNum = CPPI_PARAM_NOT_SPECIFIED;\r
rxFlowCfg.rx_dest_qnum = 100; //DANGEROUS> TODO PUT VALID Q HERE\r
rxFlowCfg.rx_dest_qmgr = 0;\r
- rxFlowCfg.rx_sop_offset = 0;\r
+ rxFlowCfg.rx_sop_offset = byte_offset;\r
rxFlowCfg.rx_ps_location = Cppi_PSLoc_PS_IN_DESC;\r
rxFlowCfg.rx_desc_type = Cppi_DescType_HOST;\r
rxFlowCfg.rx_error_handling = 0;\r
index 5e6a334ffef18cc50443cfed65164de5237fee61..6ee7c33497390ee1205df449f1670fd2f422b7b0 100755 (executable)
@@ -220,11 +220,11 @@ unsigned long netapi_utilGetVaOfBufferArea(unsigned int offset, unsigned int siz
\r
/* Map the physical address to user space */\r
userp = mmap(0, // Preferred start address\r
- size, // Length to be mapped\r
+ size, // Length to be mapped\r
PROT_WRITE | PROT_READ, // Read and write access\r
MAP_SHARED, // Shared memory\r
netapi_fd, // File descriptor\r
- offset); // The byte offset from fd\r
+ offset); // The byte offset from fd\r
\r
if (userp == MAP_FAILED) {\r
return 0;\r
if ((blockPtr <netapi_VM_mem_start)||( blockPtr>netapi_VM_mem_end)) return;\r
//netapi_utilCacheWbInv(blockPtr, size);\r
//printf("osal> wbiv %x %x %d ..", blockPtr, Osal_saGetSCPhyAddr(blockPtr), size);\r
- _netapi_utilCacheWbInv(blockPtr, size);\r
+ _netapi_utilCacheWbInv(_Osal_qmssVirtToPhy(blockPtr), size);\r
v2= netapi_timing_stop();\r
cache_op_cycles += (v2-v1); \r
n_cache_op_cycles+=1;\r
Cppi_HostDesc *prevBDPtr = 0;\r
while (nextBDPtr)\r
{\r
- void *buffPtr;\r
+ void *buffPtr=NULL;\r
if (nextBDPtr->buffPtr)\r
{\r
buffPtr = (void *)nextBDPtr->buffPtr;\r
nextBDPtr->buffPtr = (uint32_t)_Osal_qmssVirtToPhy((void *)(nextBDPtr->buffPtr));\r
if (!(nextBDPtr->buffPtr)) return (void *)0;\r
- Osal_writeBackCache(buffPtr, nextBDPtr->buffLen);\r
+ Osal_writeBackCache(buffPtr, nextBDPtr->buffLen); \r
}\r
\r
if (nextBDPtr->origBuffPtr)\r
if (!(prevBDPtr->nextBDPtr)) return (void *)0;\r
}\r
\r
+ if (buffPtr) Osal_writeBackCache(buffPtr, prevBDPtr->buffLen);\r
Osal_writeBackCache(prevBDPtr, TUNE_NETAPI_DESC_SIZE);\r
}\r
descAddr = _Osal_qmssVirtToPhy(descAddr);\r
Cppi_HostDesc *nextBDPtr = (Cppi_HostDesc *)QMSS_DESC_PTR(descAddr);\r
while (nextBDPtr)\r
{\r
- //Osal_invalidateCache(nextBDPtr, TUNE_NETAPI_DESC_SIZE);\r
+ //Qmss_osalBeginMemAccess(nextBDPtr, TUNE_NETAPI_DESC_SIZE);\r
if (nextBDPtr->buffPtr)\r
{\r
nextBDPtr->buffPtr = (uint32_t)_Osal_qmssPhyToVirt((void *)(nextBDPtr->buffPtr));\r
if (!(nextBDPtr->buffPtr)) return (void *)0;\r
- //Osal_invalidateCache((void *)(nextBDPtr->buffPtr), nextBDPtr->buffLen);\r
+ //Qmss_osalBeginMemAccess((void *)(nextBDPtr->buffPtr), nextBDPtr->buffLen);\r
}\r
\r
if (nextBDPtr->origBuffPtr)\r
{\r
descAddr = _Osal_qmssPhyToVirt(descAddr);\r
if (!descAddr) return (void *)0;\r
- //Osal_invalidateCache(descAddr, TUNE_NETAPI_DESC_SIZE);\r
}\r
#endif\r
return descAddr;\r
index 0f2b15d1355d18657cb073520b61814eedadc542..c805913fee5f24c56741a9cd1e2f9608a3b21f5c 100755 (executable)
extern unsigned int vv10p;\r
extern unsigned int vv11p;\r
extern unsigned int vv12p;\r
+extern unsigned int vv13p; //rcv path\r
+extern unsigned int vv14p;\r
+extern unsigned int vv15p;\r
+\r
\r
#ifdef DAL_BENCH\r
unsigned int pktio_get_qop_time(){return (BENCH_get_qop_time());}\r
return NULL;\r
}\r
\r
+\r
+/*-----------------------------------------------------*/\r
+/* optimized send/rcv functions */\r
+/*----------------------------------------------------*/\r
+\r
+//********************************************\r
+//send pkt via ipc queue \r
+//********************************************\r
+static int pktio_send_ipc(PKTIO_HANDLE_T * p, Ti_Pkt *pkt, PKTIO_METADATA_T *m, int * err)\r
+{\r
+ *err=0;\r
+#ifdef DO_BENCH\r
+ vv8p=netapi_timing_stop();\r
+#endif\r
+#ifdef DAL_BENCH\r
+ BENCH_Qmss_queuePushDesc (p->q, (void*)pkt);\r
+#else\r
+ Qmss_queuePushDesc (p->q, (void*)pkt);\r
+#endif\r
+#ifdef DO_BENCH\r
+ vv9p=netapi_timing_stop();\r
+#endif\r
+ return 1;\r
+}\r
+//********************************************\r
+//send pkt to NETCP via NWAL\r
+//********************************************\r
+static int pktio_send_nwal(PKTIO_HANDLE_T * p, Ti_Pkt *pkt, PKTIO_METADATA_T *m, int * err)\r
+{\r
+ nwalTxPktInfo_t * pPktInfo=m->u.tx_meta;\r
+ nwal_RetValue res;\r
+ *err=0;\r
+ pPktInfo->pPkt = pkt;\r
+#ifdef DO_BENCH\r
+ vv11p=netapi_timing_stop();\r
+#endif\r
+ res=nwal_send(p->nwalInstanceHandle, m->sa_handle,pPktInfo);\r
+#ifdef DO_BENCH\r
+ vv12p=netapi_timing_stop();\r
+#endif\r
+ if (res != nwal_OK) { *err = NETAPI_ERR_NWAL_TX_ERR -res;}\r
+ return 1;\r
+}\r
+//********************************************\r
+//send to SA via SB queue\r
+//********************************************\r
+static int pktio_send_sb(PKTIO_HANDLE_T * p, Ti_Pkt *pkt, PKTIO_METADATA_T *m, int * err)\r
+{\r
+ nwalDmTxPayloadInfo_t *pPktInfoSB = m->u.tx_sb_meta;\r
+ nwal_RetValue res;\r
+ *err=0;\r
+ pPktInfoSB->pPkt = pkt;\r
+ res=nwal_sendDM(p->nwalInstanceHandle, m->sa_handle,pPktInfoSB);\r
+ if (res != nwal_OK) *err = NETAPI_ERR_NWAL_TX_ERR -res;\r
+ return 1;\r
+}\r
+//********************************************\r
+//dummy. return err\r
+//********************************************\r
+static int pktio_send_dummy(PKTIO_HANDLE_T * p, Ti_Pkt *pkt, PKTIO_METADATA_T *m, int * err)\r
+{\r
+ *err = NETAPI_ERR_BAD_INPUT;\r
+ return -1;\r
+} \r
+//*******************************************\r
+//dummy poll\r
+//*******************************************\r
+static int pktio_poll_dummy(PKTIO_HANDLE_T * p, PKTIO_POLL_T * p_poll_cfg, int * err)\r
+{\r
+ *err= NETAPI_ERR_BAD_INPUT;\r
+ return 0;\r
+}\r
+//********************************************\r
+//poll IPC queue\r
+//********************************************\r
+static int pktio_poll_ipc(PKTIO_HANDLE_T * p, PKTIO_POLL_T * p_poll_cfg, int * err)\r
+{\r
+Ti_Pkt * pkt_list[PKTIO_MAX_RECV];\r
+PKTIO_METADATA_T meta_s[PKTIO_MAX_RECV];\r
+int r=0;\r
+int n;\r
+Ti_Pkt * temp;\r
+*err=0;\r
+n= (p->max_n< PKTIO_MAX_RECV) ? p->max_n : PKTIO_MAX_RECV;\r
+for(r=0;r<n;r++)\r
+{\r
+#ifdef DO_BENCH\r
+ if (r==0) vv7p=netapi_timing_stop();\r
+#endif\r
+#ifdef DAL_BENCH\r
+ temp=(Ti_Pkt*)(Cppi_HostDesc*)QMSS_DESC_PTR(BENCH_Qmss_queuePop(p->q));\r
+#else\r
+ temp=(Ti_Pkt*)(Cppi_HostDesc*)QMSS_DESC_PTR(Qmss_queuePop(p->q));\r
+#endif\r
+#ifdef DO_BENCH\r
+ if (r==0) vv10p=netapi_timing_stop();\r
+#endif\r
+ if(!temp) break;\r
+ /* process meta data */\r
+ pkt_list[r]= temp;\r
+ meta_s[r].flags1=0x1;\r
+ }\r
+ if (r) p->cb((struct PKTIO_HANDLE_tag *)p, pkt_list, &meta_s[0], r, 0LL);\r
+ return r;\r
+}\r
+//********************************************\r
+//poll nwal data queues for pkts from netcp\r
+//********************************************\r
+static int pktio_poll_nwal(PKTIO_HANDLE_T * p, PKTIO_POLL_T * p_poll_cfg, int * err)\r
+{\r
+int r=0;\r
+*err=0;\r
+ /* Poll for common L2/L3 packets and L4 class pkts (todo-> only do L4 if classifiers are\r
+ set.. optimizaion maybe? */\r
+#ifdef DO_BENCH\r
+ vv13p=netapi_timing_stop();\r
+#endif\r
+ r=nwal_pollPkt(p->nwalInstanceHandle,\r
+ nwal_POLL_DEFAULT_GLOB_PKT_Q| nwal_POLL_DEFAULT_PER_PROC_PKT_Q,\r
+ (uint32_t) p,\r
+ p->max_n,\r
+ QMSS_PARAM_NOT_SPECIFIED,\r
+ (void*) NULL);\r
+ return r;\r
+}\r
+//********************************************\r
+//poll nwal sideband queues for pkts from SA\r
+//********************************************\r
+static int pktio_poll_sb(PKTIO_HANDLE_T * p, PKTIO_POLL_T * p_poll_cfg, int * err)\r
+{\r
+int r=0;\r
+*err=0;\r
+ r=nwal_pollDm(p->nwalInstanceHandle,\r
+ nwal_POLL_DM_DEF_GLOB_ENCRYPT_Q,\r
+ (uint32_t) p,\r
+ p->max_n,\r
+ QMSS_PARAM_NOT_SPECIFIED,\r
+ (void *) NULL);\r
+ r+=nwal_pollDm(p->nwalInstanceHandle,\r
+ nwal_POLL_DM_DEF_GLOB_DECRYPT_Q,\r
+ (uint32_t) p,\r
+ p->max_n,\r
+ QMSS_PARAM_NOT_SPECIFIED,\r
+ (void *) NULL);\r
+ return r;\r
+}\r
+\r
+//********************************************\r
+//poll app-provided netcp rx queue\r
+//********************************************\r
+static int pktio_poll_nwal_adj(PKTIO_HANDLE_T * p, PKTIO_POLL_T * p_poll_cfg, int * err)\r
+{\r
+int r=0;\r
+*err=0;\r
+ /* Poll for common L2/L3 packets and L4 class pkts (todo-> only do L4 if classifiers are\r
+ set.. optimizaion maybe? */\r
+#ifdef DO_BENCH\r
+ vv14p=netapi_timing_stop();\r
+#endif\r
+ r=nwal_pollPkt(p->nwalInstanceHandle,\r
+ nwal_POLL_APP_MANAGED_PKT_Q,\r
+ (uint32_t) p,\r
+ p->max_n,\r
+ p->q,\r
+ (void *) NULL);\r
+ return r;\r
+}\r
+\r
+//********************************************\r
+//poll app-defined sideband queues for pkts from SA\r
+//********************************************\r
+static int pktio_poll_sb_adj(PKTIO_HANDLE_T * p, PKTIO_POLL_T * p_poll_cfg, int * err)\r
+{\r
+int r=0;\r
+*err=0;\r
+ r=nwal_pollDm(p->nwalInstanceHandle,\r
+ nwal_POLL_DM_APP_MANAGED_Q,\r
+ (uint32_t) p,\r
+ p->max_n,\r
+ p->q,\r
+ (void *) NULL);\r
+ return r;\r
+}\r
+\r
/*-----------------------MAIN API----------------------*/\r
/* create a channel */\r
PKTIO_HANDLE_T * pktio_create(NETAPI_T n, char * name,\r
p->back = n;\r
p->cb = cb;\r
p->max_n = p_cfg->max_n;\r
+p->_poll=pktio_poll_dummy;\r
+p->_send=pktio_send_dummy;\r
memcpy((char *)&p->cfg, (char*) p_cfg, sizeof(PKTIO_CFG_T));\r
\r
/* create a general queue (for now). todo: allow qnum to be passed in */\r
if (p_cfg->flags2 & PKTIO_PKT)\r
{\r
p->use_nwal = PKTIO_4_ADJ_NWAL;\r
+ p->_poll=pktio_poll_nwal_adj;\r
p->nwalInstanceHandle = netapi_return_nwal_instance_handle(n);\r
}\r
else if (p_cfg->flags2 & PKTIO_SB)\r
{\r
p->use_nwal = PKTIO_4_ADJ_SB;\r
+ p->_poll=pktio_poll_sb_adj;\r
p->nwalInstanceHandle = netapi_return_nwal_instance_handle(n);\r
}\r
- else p->use_nwal=0;\r
+ else\r
+ {\r
+ p->use_nwal=0;\r
+ if (p_cfg->flags1& PKTIO_W) p->_send=pktio_send_ipc;\r
+ if (p_cfg->flags1& PKTIO_R) p->_poll=pktio_poll_ipc;\r
+ }\r
\r
/* save name */ \r
strncpy(p->name,name,\r
p->back = n;\r
p->cb = cb;\r
p->max_n = p_cfg->max_n;\r
+p->_poll=pktio_poll_dummy;\r
+p->_send=pktio_send_dummy;\r
memcpy((char *)&p->cfg, (char*) p_cfg, sizeof(PKTIO_CFG_T));\r
\r
/* special handling of NETCP_RX, NETCP_TX */\r
p->use_nwal = PKTIO_DEF_NWAL;\r
p->q = 0; \r
p->nwalInstanceHandle = netapi_return_nwal_instance_handle(n); \r
+ if (!strcmp(name,NETCP_RX)) p->_poll=pktio_poll_nwal;\r
+ if (!strcmp(name,NETCP_TX)) p->_send=pktio_send_nwal;\r
}\r
else if( (!strcmp(name, NETCP_SB_RX)) || (!strcmp(name,NETCP_SB_TX)) )\r
{\r
p->use_nwal = PKTIO_DEF_SB;\r
p->q = 0;\r
p->nwalInstanceHandle = netapi_return_nwal_instance_handle(n);\r
+ if (!strcmp(name,NETCP_SB_RX)) p->_poll=pktio_poll_sb;\r
+ if (!strcmp(name,NETCP_SB_TX)) p->_send=pktio_send_sb;\r
}\r
else\r
{\r
if (p_cfg->flags2 & PKTIO_PKT)\r
{\r
p->use_nwal = PKTIO_4_ADJ_NWAL; //additonal RX q for nwal\r
+ p->_poll = pktio_poll_nwal_adj;\r
p->nwalInstanceHandle = netapi_return_nwal_instance_handle(n); \r
} \r
else if (p_cfg->flags2 & PKTIO_SB)\r
{\r
p->use_nwal = PKTIO_4_ADJ_SB; //additional RX q for sideband with NWAL\r
+ p->_poll = pktio_poll_sb_adj;\r
p->nwalInstanceHandle = netapi_return_nwal_instance_handle(n);\r
} \r
-\r
- else p->use_nwal=0; //not handled by nwal\r
+ else \r
+ {\r
+ p->use_nwal=0; //not handled by nwal\r
+ if (p_cfg->flags1& PKTIO_W) p->_send=pktio_send_ipc;\r
+ if (p_cfg->flags1& PKTIO_R) p->_poll=pktio_poll_ipc;\r
+ }\r
}\r
\r
/* save name */\r
/***********************************************************/\r
/*****************send *************************/\r
/***********************************************************/\r
-int pktio_send(PKTIO_HANDLE_T * p, Ti_Pkt *pkt, PKTIO_METADATA_T *m, int * err)\r
+int pktio_send_genric(PKTIO_HANDLE_T * p, Ti_Pkt *pkt, PKTIO_METADATA_T *m, int * err)\r
{\r
nwalTxPktInfo_t * pPktInfo=m->u.tx_meta;\r
nwal_RetValue res;\r
vv11p=netapi_timing_stop();\r
res=nwal_send(p->nwalInstanceHandle, m->sa_handle,pPktInfo);\r
vv12p=netapi_timing_stop();\r
- if (res != nwal_OK) *err = NETAPI_ERR_NWAL_TX_ERR -res;\r
+ if (res != nwal_OK) { printf("nwal send err= %d (%x)\n", res, res); *err = NETAPI_ERR_NWAL_TX_ERR -res;}\r
}\r
else if ((p->use_nwal== PKTIO_DEF_SB) )\r
{\r
@@ -397,14 +606,15 @@ int pktio_sendMulti(PKTIO_HANDLE_T * p, Ti_Pkt * pkt[], PKTIO_METADATA_T * m[],
/***********************************************************/\r
\r
/* poll a particular channel */\r
-int pktio_poll(PKTIO_HANDLE_T * p, PKTIO_POLL_T * p_poll_cfg, int * err)\r
+int pktio_poll_generic(PKTIO_HANDLE_T * p, PKTIO_POLL_T * p_poll_cfg, int * err)\r
{\r
int r=0;\r
int n;\r
Ti_Pkt * temp;\r
Ti_Pkt * pkt_list[PKTIO_MAX_RECV];\r
PKTIO_METADATA_T meta_s[PKTIO_MAX_RECV];\r
-uint64_t ts= netapi_getTimestamp(); //get_ts\r
+//uint64_t ts= netapi_getTimestamp(); //get_ts\r
+uint64_t ts=0LL;\r
\r
if(! p->cfg.flags1&PKTIO_R) return 0;\r
\r
{\r
/* Poll for common L2/L3 packets and L4 class pkts (todo-> only do L4 if classifiers are\r
set.. optimizaion maybe? */\r
+ vv13p=netapi_timing_stop();\r
r=nwal_pollPkt(p->nwalInstanceHandle,\r
nwal_POLL_DEFAULT_GLOB_PKT_Q| nwal_POLL_DEFAULT_PER_PROC_PKT_Q,\r
(uint32_t) p,\r
else if (p->use_nwal==PKTIO_4_ADJ_NWAL)\r
{\r
/* Poll an additional NETCP RX queue */\r
+ vv14p=netapi_timing_stop();\r
r=nwal_pollPkt(p->nwalInstanceHandle,\r
nwal_POLL_APP_MANAGED_PKT_Q,\r
(uint32_t) p,\r
int n;\r
Ti_Pkt * pkt_list[PKTIO_MAX_RECV];\r
PKTIO_METADATA_T meta_s[PKTIO_MAX_RECV];\r
+ vv15p=netapi_timing_stop();\r
for(r=0;r<numPkts;r++)\r
{\r
pkt_list[r] = pPktInfo[r].pPkt;\r
index ad17c5a8f29a796c46f330fc7a04cd98532ee542..00c1a80d19c5f64bf4b1ef5b0a0cb17116e6cf13 100755 (executable)
printf(">netapi: tim64 calibration - n=%d t2-t1=%lu t264-t164=%llu ccpt=%ld tps=%ld\n",\r
n, t2-t1, t264-t164, t64_cpu_cycle_per_tick, t64_ticks_sec());\r
\r
+t1=timing_stop();\r
+t164=netapi_getTimestamp();\r
+t264=netapi_getTimestamp();\r
+t2=timing_stop();\r
+printf(">netapi: tim64 cycle cost= %d(cpu ticks), back2back= %lld\n", (t2-t1)/2, t264-t164);\r
+\r
}\r
\r
\r
/*********************************\r
* memory map t64 into user space\r
+t264=read_t64();\r
+t264=read_t64();\r
* input: pass in fd for /dev/mem\r
**********************************/\r
int t64_memmap(int fd)\r
index 3877e14ed088661a89696f691fe30b93cd659bde..311fc3a39d5b87114561093ffc93beab9471e8cf 100644 (file)
//#define IPSEC_MODE_RX_SIDEBAND\r
//#define IPSEC_MODE_TX_SIDEBAND\r
\r
+//#define TEST_TIMERS\r
+\r
/*************debug********************/\r
void dump_descr(unsigned long *p, int n)\r
{\r
//************for multi pkt burst xfer test in loopback mode\r
#define TX_BURST 800 \r
int pktloopback=TUNE_NETAPI_NWAL_ENABLE_PASS_LOOPBACK;\r
+nwalTxFlowCookie_t flowCookie;\r
\r
//this device: 10.0.0.100, mac 0x,01,02,03,04,05 and .. 0x6\r
\r
typedef struct stats_t\r
{\r
long itx; //initially generated\r
+ long itx2;\r
long rx;\r
long tx;\r
long n_bad;\r
/*******************************************\r
*************NETAPI OBJECTS***************\r
*****************************************/\r
+static NETAPI_CFG_T our_netapi_default_cfg=\r
+{\r
+TUNE_NETAPI_PERM_MEM_SZ,\r
+256, //start of packet offset for hw to place data on rx for default flow\r
+TUNE_NETAPI_QM_CONFIG_MAX_DESC_NUM, //max number of descriptors in system\r
+TUNE_NETAPI_NUM_GLOBAL_DESC, //total we will use\r
+TUNE_NETAPI_DEFAULT_NUM_BUFFERS, //#descriptors+buffers in default heap\r
+0, //#descriptors w/o buffers in default heap\r
+TUNE_NETAPI_DEFAULT_BUFFER_SIZE+256 //size of buffers in default heap\r
+};\r
+\r
Pktlib_HeapHandle OurHeap;\r
Pktlib_HeapHandle specialSmall;\r
Pktlib_HeapHandle specialLarge;\r
NETAPI_T netapi_handle;\r
NETAPI_SCHED_HANDLE_T * our_sched;\r
NETAPI_SCHED_CONFIG_T our_sched_cfg={\r
- NETAPI_SCHED_DURATION|NETAPI_SCHED_CBV, 0, house, 500 //every 500 poll loops\r
+ NETAPI_SCHED_DURATION|NETAPI_SCHED_CBV, 0, house, 50000 //every 5000 poll loops\r
};\r
void our_stats_cb(NETAPI_T h, paSysStats_t* pPaStats);\r
NETAPI_TIMER_GROUP_HANDLE_T ourTimerBlock; \r
\r
unsigned int vv11;\r
\r
+unsigned int vv13p; //rcv path\r
+unsigned int vv14p;\r
+unsigned int vv15p;\r
+\r
+extern unsigned int nwal_prof1,nwal_prof2,nwal_prof3,nwal_prof4,nwal_prof5,nwal_prof6;\r
+\r
+//#define REASSEMBLE_BENCH\r
+#ifdef REASSEMBLE_BENCH\r
+#include <ti/drv/pa/example/reassemLib/reassemLib.h>\r
+/*--------------reassembly benchmark--------------------------------*/\r
+void our_reassemble_bench(int nfrags)\r
+{\r
+paIPReassemblyConfig_t Config={5,128,10000 };\r
+int i,j;\r
+int len;\r
+Ti_Pkt tip;\r
+char *buffer;\r
+unsigned long v1;\r
+unsigned long v2;\r
+unsigned long sum1=0;\r
+unsigned long sum2=0;\r
+paEx_reassemLibInit(&Config);\r
+for(j=0;j<200/nfrags;j++)\r
+{\r
+ for(i=0;i<nfrags;i++)\r
+ {\r
+ short temp;\r
+ tip=Pktlib_allocPacket(OurHeap,PKT_LEN);\r
+ Pktlib_getDataBuffer(tip,(uint8_t**)&buffer,&len);\r
+ memcpy(&buffer[0],&testPkt[14],20); //IP header\r
+ if (i < (nfrags-1)) buffer[6] = 0x20;\r
+ temp = i*40; \r
+ buffer[6]|= (temp&0x1f00)>>8;\r
+ buffer[7]= (temp&0xff);\r
+ temp = 20+40*8; \r
+ buffer[2]= (temp&0xff00)>>8;\r
+ buffer[3]= (temp&0xff);\r
+ Pktlib_setPacketLen(tip, temp);\r
+ v1= netapi_timing_stop();\r
+ paEx_reassemLibProc(tip, 0xffff);\r
+ v2= netapi_timing_stop();\r
+ sum1+= v2-v1;\r
+ }\r
+ sum2 += v2-v1;\r
+}\r
+printf("reasssembly test: %d trials, %d frags/pkt %d cycles/frag %d cycles/last frag\n",j,nfrags, sum1/(j*nfrags), sum2/(j));\r
+}\r
+#endif\r
+\r
/*--------------basic pktio send/recv benchmark----------------------*/\r
unsigned int timings[10];\r
void our_pktio_bench(int ntrials)\r
\r
//get pointer to buffer area of packet\r
Pktlib_getDataBuffer(b,(uint8_t**)&buffer,&len);\r
+ if (!buffer) \r
+ {printf("net_test: get_pkt() heap returned empty buffer %d \n", n); return NULL;};\r
\r
#if 0 \r
if (pktloopback==0)\r
{\r
int i;\r
vv6= netapi_timing_stop();\r
- for (i=0;i<n_pkts; i++) Pktlib_freePacket(p_recv[i]);\r
+ for (i=0;i<n_pkts; i++) \r
+ {\r
+ Pktlib_freePacket(p_recv[i]);\r
+ }\r
vv11 = netapi_timing_stop();\r
}\r
\r
unsigned int sum_vv4=0;\r
unsigned int sum_vv5=0;\r
\r
+unsigned int nwal_flow_vv1,nwal_flow_vv2;\r
+unsigned int nwal_sum_vv1=0;\r
+unsigned int nwal_sum_vv2=0;\r
+unsigned int nwal_sum_vv3=0;\r
+unsigned int nwal_sum_vv4=0;\r
+unsigned int nwal_sum_vv5=0;\r
+unsigned int nwal_sum_vv6=0;\r
+\r
+unsigned int nwal_sum_flow_vv1=0;\r
+unsigned int nwal_sum_flow_vv2=0;\r
+static int first =0;\r
+\r
Osal_cache_op_measure_reset();\r
memset(&meta_tx,0,sizeof(meta_tx));\r
for(p=0;p<TX_BURST;p++) { \r
\r
/* set up meta data */\r
meta.sa_handle=nwal_HANDLE_INVALID;\r
- meta_tx.txFlag1 = (NWAL_TX_FLAG1_DO_IPV4_CHKSUM | NWAL_TX_FLAG1_DO_UDP_CHKSUM| NWAL_TX_FLAG1_META_DATA_VALID);\r
+ meta_tx.txFlag1 = (NWAL_TX_FLAG1_DO_IPV4_CHKSUM | NWAL_TX_FLAG1_DO_UDP_CHKSUM| NWAL_TX_FLAG1_META_DATA_VALID|NWAL_TX_FLAG1_COPY_TX_FLOW_COOKIE);\r
meta_tx.startOffset = 0;\r
//GONE in V2 meta_tx.pktLen = len;\r
meta_tx.ipOffBytes = TEST_PKT_IP_OFFSET_BYTES;\r
meta_tx.l4HdrLen = TEST_PKT_UDP_HDR_LEN;\r
//GONE in V2 meta_tx.ploadOffBytes = TEST_PKT_PLOAD_OFFSET_BYTES;\r
meta_tx.ploadLen = TEST_PAYLOAD_LEN;\r
+ meta_tx.pTxFlowCookie = &flowCookie;\r
\r
Pktlib_getDataBuffer(tip,&pData,&len);\r
pIpHdr = pData + meta_tx.ipOffBytes;\r
if (house_pkts_gened<16) dump_descr((long *) tip, house_pkts_gened);\r
else if (house_pkts_gened>99) dump_descr((long *) tip,house_pkts_gened);\r
#endif\r
- vv2= netapi_timing_stop();\r
- pktio_send(netcp_tx_chan,tip,&meta,&err);\r
- vv3= netapi_timing_stop();\r
- sum_vv1 += (vv3-vv1);\r
- sum_vv2 += (vv2-vv1);\r
- sum_vv3 += (vv3-vv2);\r
- sum_vv4 += (vv11p-vv2);\r
- sum_vv5 += (vv12p-vv11p);\r
+\r
+ if(first < TX_BURST)\r
+ {\r
+ first++;\r
+ vv2= netapi_timing_stop();\r
+ meta_tx.txFlag1 |= NWAL_TX_FLAG1_COPY_TX_FLOW_COOKIE;\r
+ pktio_send(netcp_tx_chan,tip,&meta,&err);\r
+ vv3= netapi_timing_stop();\r
+ if(err != 0)\r
+ {\r
+ printf("pktIO Error %d \n",err);\r
+ }\r
+ sum_vv1 += (vv3-vv1);\r
+ sum_vv2 += (vv2-vv1);\r
+ sum_vv3 += (vv3-vv2);\r
+ sum_vv4 += (vv11p-vv2);\r
+ sum_vv5 += (vv12p-vv11p);\r
+\r
+ nwal_sum_vv1+= (nwal_prof1-vv11p);\r
+ nwal_sum_vv2+= (nwal_prof2-nwal_prof1);\r
+ nwal_sum_vv3+= (nwal_prof3-nwal_prof2);\r
+ nwal_sum_vv4+= (nwal_prof4-nwal_prof3);\r
+ nwal_sum_vv5+= (nwal_prof5-nwal_prof4);\r
+ nwal_sum_vv6+= (nwal_prof6-nwal_prof5);\r
+ if (err == 0) stats.itx +=1;\r
+ }\r
+ else\r
+ {\r
+ nwal_RetValue retVal;\r
+ nwal_flow_vv1= netapi_timing_stop();\r
+ meta_tx.pPkt = tip;\r
+ retVal = nwal_flowSend(&meta_tx);\r
+ nwal_flow_vv2= netapi_timing_stop();\r
+ if(retVal != nwal_OK)\r
+ { \r
+ printf("nwal_flowSend Error %d \n",retVal);\r
+ }\r
+\r
+ nwal_sum_flow_vv1 += (nwal_flow_vv1-vv1); \r
+ nwal_sum_flow_vv2 += (nwal_flow_vv2-nwal_flow_vv1); \r
+\r
+ if(retVal == nwal_OK)stats.itx2 +=1;\r
+ }\r
+ \r
// printf("pktio send. full=%d metadata=%d pktio_send=%d\n", vv3-vv1, vv2-vv1, vv3-vv2);\r
- if (err == 0) stats.itx +=1;\r
+ \r
\r
house_pkts_gened +=1;\r
}\r
int n_c_ops;\r
ccycles =Osal_cache_op_measure(&n_c_ops);\r
if (sum_vv1) \r
+ {\r
printf("BURST pktio send %d pkts. full=%d metadata=%d pktio_send=%d to_nwal=%d nwal_send= %d n_c_ops=%d cache_op_time=%d (pp-> %d)\n", stats.itx,\r
sum_vv1/stats.itx, sum_vv2/stats.itx, sum_vv3/stats.itx, \r
sum_vv4/stats.itx, sum_vv5/stats.itx, n_c_ops, ccycles, \r
n_c_ops? (ccycles/(n_c_ops/2)) : 0);\r
+\r
+ printf("NWAL Profile Cycles: Prof1= %d,Prof2=%d,Prof3=%d,Prof4=%d,Prof5=%d ,Prof6=%d \n",\r
+ nwal_sum_vv1/stats.itx,nwal_sum_vv2/stats.itx,nwal_sum_vv3/stats.itx,\r
+ nwal_sum_vv4/stats.itx,nwal_sum_vv5/stats.itx,nwal_sum_vv6/stats.itx);\r
+ \r
+\r
+ if(stats.itx2)\r
+ {\r
+ printf("nwal_flowSend Profile Cycles: Prof1= %d,Prof2=%d \n",\r
+ nwal_sum_flow_vv1/stats.itx2,nwal_sum_flow_vv2/stats.itx2);\r
+ }\r
+\r
+ }\r
}\r
}\r
\r
/*******************************************/\r
\r
/* create netapi */\r
-netapi_handle = netapi_init(NETAPI_SYS_MASTER);\r
+netapi_handle = netapi_init(NETAPI_SYS_MASTER, &our_netapi_default_cfg);\r
\r
/* open the main heap */\r
OurHeap = Pktlib_findHeapByName("netapi");\r
netapi_registerHeap(netapi_handle, specialSmall);\r
netapi_registerHeap(netapi_handle, specialLarge);\r
\r
+#ifdef REASSEMBLE_BENCH\r
+our_reassemble_bench(2);\r
+exit(1);\r
+#endif\r
\r
/* create a pktio channel */\r
our_chan=pktio_create(netapi_handle,"our1stq",(PKTIO_CB) recv_cb_bench, &our_chan_cfg,&err);\r
/********************************************\r
* Basic pkt loopback test\r
*********************************************/\r
-printf("...runnining pure push/pop benchmark\n");\r
+printf("...running pure push/pop benchmark\n");\r
our_pktio_bench(1000);\r
our_pktio_bench(1000);\r
our_pktio_bench(1000);\r
&err\r
);\r
if (err) {printf("addip0 failed %d\n",err); exit(1); } \r
-#if 1\r
+\r
//create a 2nd mac instance\r
netcp_cfgCreateMacInterface(\r
netapi_handle,\r
2,\r
heaps,\r
sizes,\r
+ 0, //offset to start rx is 0\r
&err);\r
if (err) {printf("add flow failed\n", err); exit(1);}\r
}\r
+#if 0\r
//special route for this classifier: different flow + destination q\r
class2_route.p_dest_q = netcp_rx_chan2;\r
class2_route.p_flow = specialFlow;\r
if (err) {printf("addTxSa failed %d\n",err); exit(1);}\r
\r
\r
-\r
+#ifdef TEST_TIMERS\r
//timers\r
ourTimerBlock = netapi_TimerGroupCreate(\r
netapi_handle,\r
300LL, //timer group ticks\r
&err);\r
if (err) {printf("timerstart failed %d\n");}\r
-\r
+#endif\r
netcp_cfgReqStats(netapi_handle, our_stats_cb, 1,&err);\r
if (err!=0) {printf("stats req failed\n");}\r
\r
our_pktio_bench(100);\r
\r
\r
-\r
-\r
-\r
/**************unused stuff******************/\r
/* create TRIE */\r
P_trie = trie_new();\r
/* done */\r
our_stats_cb(netapi_handle, NULL);\r
\r
+\r
+#define DO_FAST_POLL\r
+#ifdef DO_FAST_POLL\r
+example_fast_poll(netcp_rx_chan);\r
+#endif\r
+\r
/*************************************************\r
************CLEAN UP****************************\r
************************************************/\r
//delete Classifiers\r
netcp_cfgDelClass(netapi_handle, class_0, &err); \r
netcp_cfgDelClass(netapi_handle, class_1, &err); \r
-netcp_cfgDelClass(netapi_handle, class_2, &err); \r
+//netcp_cfgDelClass(netapi_handle, class_2, &err); \r
\r
//delete flow \r
netcp_cfgDelFlow(netapi_handle, specialFlow, &err);\r
netapi_shutdown(netapi_handle);\r
\r
}\r
+\r
+//EXAMPLE FAST POLL\r
+/* PLD */\r
+void netapi_pld(void * x)\r
+{ \r
+ asm volatile("pld [r0]");\r
+}\r
+\r
+#define M 1008\r
+static int l3_off[M], l4_off[M], L3_chk_ok[M], L4_chk_ok[M], appid[M], len[M] ;\r
+static unsigned char * buf[M];\r
+#define N2POLL 8\r
+void example_fast_poll( PKTIO_HANDLE_T * p_pktio)\r
+{\r
+int j=0;\r
+int jj=0;\r
+int i,k,l=0,ltot=0;\r
+int n= N2POLL; //max # of pkts to poll\r
+Ti_Pkt * pHd[N2POLL];\r
+Ti_Pkt * tempVA;\r
+pasahoLongInfo_t* pinfo;\r
+unsigned long t1;\r
+unsigned long t2;\r
+unsigned long t11;\r
+unsigned long t12;\r
+unsigned long np;\r
+unsigned long sumt=0;\r
+unsigned long sumf=0;\r
+unsigned long sump=0;\r
+unsigned long totlen=0;\r
+int max_batch=0;\r
+\r
+//this should be done once and saved\r
+Qmss_QueueHnd rxQ= PKTIO_GET_DEFAULT_NETCP_Q(p_pktio);\r
+Qmss_QueueHnd freeQ;\r
+//loop forever\r
+for(;;)\r
+{\r
+ t1= netapi_timing_stop();\r
+ pHd[0] = (Ti_Pkt *)QMSS_DESC_PTR(Qmss_queuePopRAW (rxQ));\r
+ if (!pHd[0]) continue;\r
+ //got pkt\r
+ for(i=1;(i<n) && (pHd[i-1]);i++)\r
+ {\r
+ //convert previous descriptor PA -> VA\r
+ tempVA = Osal_qmssPhyToVirt(pHd[i-1]); \r
+ Cppi_getData (Cppi_DescType_HOST, (Cppi_Desc*)tempVA, &buf[jj], &len[jj]);\r
+\r
+ //try and preload desriptor\r
+ //__builtin_prefetch(tempVA);\r
+ //netapi_pld(tempVA);\r
+\r
+ //read next descriptor from queue \r
+ pHd[i] = (Ti_Pkt *)QMSS_DESC_PTR(Qmss_queuePopRAW (rxQ));\r
+ \r
+ /* extract some meta data */\r
+ pinfo = PKTIO_GET_PROTO_INFO(tempVA);\r
+ l3_off[jj]= PKTIO_GET_L3_OFFSET(pinfo);\r
+ l4_off[jj]= PKTIO_GET_L4_OFFSET(pinfo);\r
+ appid[jj]= PKTIO_GET_APPID(tempVA);\r
+ //get ptr (Physical address) and length of associate buffer\r
+ //Cppi_getData (Cppi_DescType_HOST, (Cppi_Desc*)tempVA, &buf[jj], &len[jj]);\r
+ jj+=1;\r
+ }\r
+ //finish last pkt in burst\r
+ if(pHd[i-1])\r
+ {\r
+ //convert previous descriptor PA -> VA\r
+ tempVA = Osal_qmssPhyToVirt(pHd[i-1]); \r
+\r
+ /* extract some meta data */\r
+ pinfo = PKTIO_GET_PROTO_INFO(tempVA);\r
+ l3_off[jj]= PKTIO_GET_L3_OFFSET(pinfo);\r
+ l4_off[jj]= PKTIO_GET_L4_OFFSET(pinfo);\r
+ appid[jj]= PKTIO_GET_APPID(tempVA);\r
+ //get ptr (Physical address) and length of associate buffer\r
+ Cppi_getData (Cppi_DescType_HOST, (Cppi_Desc*)tempVA, &buf[jj], &len[jj]);\r
+ jj+=1;\r
+ }\r
+ t2= netapi_timing_stop();\r
+ j+=(pHd[i-1]? i: (i-1)) ;\r
+ if (jj>(M-n)) jj=0;\r
+ l+=1; //n batches\r
+ ltot+=1;\r
+ if(pHd[i-1])\r
+ {\r
+ if (i>max_batch) max_batch= i;\r
+ }\r
+ else\r
+ {\r
+ if( (i-1) >max_batch) max_batch = i-1;\r
+ }\r
+\r
+ //cleanup\r
+ //printf("cleanup %d\n",i);\r
+ for(k=0;k<i;k++)\r
+ {\r
+ //cleanup. need to covert all of desriptor to VA so that i can use freePacket() \r
+ //alternative would be to just do cache ops plus descriptor raw push to pktlib\r
+ // heap free queue\r
+ if(pHd[k])\r
+ {\r
+\r
+ //tempVA=Qmss_osalConvertDescPhyToVirt(pHd[k]);\r
+ tempVA = Osal_qmssPhyToVirt(pHd[k]);\r
+ freeQ=Qmss_getQueueHandle(Cppi_getReturnQueue (Cppi_DescType_HOST, tempVA));\r
+ netapi_utilCacheWbInv(pHd[k],128);\r
+ //would need to wbInv buffer also in practice. Also need to walk\r
+ // descriptor chain \r
+ t11= netapi_timing_stop();\r
+ // Pktlib_freePacket(tempVA);\r
+ Qmss_queuePushDescSizeRaw (freeQ,\r
+ (void *) pHd[k],\r
+ 128);\r
+\r
+ t12= netapi_timing_stop();\r
+ sumf += (t12-t11); \r
+ }\r
+ }\r
+ sumt += (t2-t1);\r
+ sump +=(pHd[i-1]? i: (i-1));\r
+ //printf("end cleanup %d %d %d\n",sumt,sumf,sump );\r
+ if (sump > 10000) {\r
+ printf("pkts rx %d batches=%d appid=%x l3_off=%d l4_off=%d len=%d buf=0x%x rxcycle= %d pkts/batchx1000=%d maxbatch=%d cycles per rawpush = %d\n", \r
+ j,ltot, appid[j%M], \r
+ l3_off[j%M],l4_off[j%M], \r
+ len[j%M],buf[j%M],\r
+ sumt/sump, (sump*1000)/l, max_batch,\r
+ sumf/sump);\r
+ sumt=sump=sumf=0;\r
+ l=0;\r
+ }\r
+}\r
+\r
+\r
+}\r
+\r
+\r