1 /******************************************\r
2 * File: nt_bench.c \r
3 * Purpose: benchmarks for NT.\r
4 **************************************************************\r
5 * FILE: nt_bench.c\r
6 * \r
7 * DESCRIPTION: netapi user space transport\r
8 * library test application : benchmarks\r
9 * \r
10 * REVISION HISTORY: rev 0.0.1 \r
11 *\r
12 * Copyright (c) Texas Instruments Incorporated 2010-2011\r
13 * \r
14 * Redistribution and use in source and binary forms, with or without \r
15 * modification, are permitted provided that the following conditions \r
16 * are met:\r
17 *\r
18 * Redistributions of source code must retain the above copyright \r
19 * notice, this list of conditions and the following disclaimer.\r
20 *\r
21 * Redistributions in binary form must reproduce the above copyright\r
22 * notice, this list of conditions and the following disclaimer in the \r
23 * documentation and/or other materials provided with the \r
24 * distribution.\r
25 *\r
26 * Neither the name of Texas Instruments Incorporated nor the names of\r
27 * its contributors may be used to endorse or promote products derived\r
28 * from this software without specific prior written permission.\r
29 *\r
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \r
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT \r
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT \r
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, \r
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT \r
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \r
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \r
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
41 \r
42 *****************************************/\r
43 \r
44 #include <stdio.h>\r
45 #include <stdlib.h>\r
46 #include <unistd.h>\r
47 #include <string.h>\r
48 #include <signal.h>\r
49 #include <pthread.h>\r
50 #include <sched.h>\r
51 \r
52 //#include "trie.h"\r
53 #include <ti/runtime/netapi/netapi.h>\r
54 #include <ti/runtime/hplib/hplib.h>\r
55 #include "ti/runtime/netapi/pktio.h"\r
56 #include "transport_dpi_demo.h"\r
57 #include "navl_wrapper.h"\r
58 //#include "ti/runtime/netapi/test/net_test.h"\r
59 #include <ti/drv/sa/salld.h>\r
60 \r
61 #include <ti/drv/qmss/device/k2e/src/qmss_device.c>\r
62 #include <ti/drv/cppi/device/k2e/src/cppi_device.c>\r
63 \r
64 extern Rm_ServiceHandle *rmClientServiceHandle;\r
65 extern NETCP_CFG_EXCEPTION_PKT_T expPkt_appid;\r
66 \r
67 \r
68 #define netapi_timing_start hplib_mUtilGetPmuCCNT\r
69 \r
70 navl_wrapper_cfg_info_t *pNavlCfg;\r
71 navl_wrapper_pkt_stat_t *pStats1;\r
72 navl_wrapper_pkt_stat_t *pStats2;\r
73 navl_global_dpi_stats *pGlobDpiStats;\r
74 void* pTemp;\r
75 \r
76 \r
77 STATS_T stats;\r
78 paSysStats_t netcp_stats;\r
79 //struct dpi_stats dpis;\r
80 \r
81 #define VDPI\r
82 #ifdef VDPI\r
83 static int DPI=0; //1 to enable\r
84 static int DUMP_DPI_CONN=0;\r
85 #endif\r
86 \r
87 \r
88 void* pShmBase;\r
89 void *pShmEntry;\r
90 \r
91 \r
92 \r
93 static int scnt=0;\r
94 volatile static int QUIT=0;\r
95 static int XMIT=0;\r
96 static int CAP=0;\r
97 volatile int RESET=0; //to reset stats\r
98 static int NTH=1;\r
99 volatile static int PKTGEN=0;\r
100 int pkt_len=64;\r
101 \r
102 \r
103 \r
104 NETCP_CFG_MACIF_T mac[NUM_PROCS];\r
105 NETCP_CFG_MACIF_T mac0;\r
106 NETCP_CFG_MACIF_T mac1;\r
107 \r
108 hplib_spinLock_T dpi_demo_thread_lock;\r
109 \r
110 \r
111 static char usage[] = "usage: %s -s \n";\r
112 \r
113 \r
114 \r
115 \r
116 //int procs =2; \r
117 \r
118 #define HPLIB_THREADID 0 // for main: HPLIB THREAD INSTANCE\r
119 //__thread int our_core;\r
120 static unsigned char dummy_mac[]={0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x01,0x02,0x03,0x04,0x05,0x08,0x00};\r
121 \r
122 void house(NETAPI_SCHED_HANDLE_T *s);\r
123 void our_stats_cb_mt(NETAPI_T h, paSysStats_t* pPaStats);\r
124 void our_stats_cb(NETAPI_T h, paSysStats_t* pPaStats);\r
125 \r
126 //sig handler\r
127 void netTest_utilMySig(int x)\r
128 {\r
129 QUIT=1;\r
130 scnt+=1;\r
131 printf(">net_test_dpi: recv'd signal %d cnt=%d\n",x,scnt);\r
132 if (scnt > 10) {printf(">dpi-demo: WARNING EXITING WITH PROPER SHUTDOWN LUTS LEFT ACTIVE\n");exit(1);}\r
133 \r
134 }\r
135 \r
136 \r
137 void recv_cb_bridge(struct PKTIO_HANDLE_Tag * channel, Ti_Pkt* p_recv[],\r
138 PKTIO_METADATA_T meta[], int n_pkts,\r
139 uint64_t ts );\r
140 \r
141 \r
142 /*************debug********************/\r
143 void netTest_utilDumpDescr(unsigned long *p, int n)\r
144 {\r
145 printf("--------dump of descriptor %d %x\n", n, (int) p);\r
146 printf("> %x %x %x %x %x %x %x %x\n",p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7]);\r
147 printf("> %x %x %x %x %x %x %x %x\n",p[8],p[9],p[10],p[11],p[12],p[13],p[14],p[15]);\r
148 printf("-----------------------------\n");\r
149 }\r
150 void netTest_utilDumpHeader(unsigned long *p, int n, int a, int r)\r
151 {\r
152 printf("--------dump of header %d %x appID=%x flag1=%x\n", n, (int) p,a,r);\r
153 printf("> %0x %0x %0x %0x %0x %0x %0x %0x\n",\r
154 ntohl(p[0]),ntohl(p[1]),ntohl(p[2]),ntohl(p[3]),\r
155 ntohl(p[4]),ntohl(p[5]),ntohl(p[6]),ntohl(p[7]) );\r
156 #if 0\r
157 printf("> %x %x %x %x %x %x %x %x\n",p[8],p[9],p[10],p[11],p[12],p[13],p[14],p[15]);\r
158 printf("> %x %x %x %x %x %x %x %x\n",p[16],p[17],p[18],p[19],p[20],p[21],p[22],p[23]);\r
159 printf("> %x %x %x %x %x %x %x %x\n",p[24],p[25],p[26],p[27],p[28],p[29],p[30],p[31]);\r
160 #endif\r
161 printf("-----------------------------\n");\r
162 }\r
163 /*****************************************/\r
164 \r
165 void house(NETAPI_SCHED_HANDLE_T * s)\r
166 {\r
167 int err;\r
168 NETAPI_SCHED_SHUTDOWN_T sched_shutdown;\r
169 int coreid; //who we are\r
170 NETAPI_T nh= netapi_schedGetHandle(s);\r
171 coreid=(int) netapi_getCookie(nh);\r
172 \r
173 if (QUIT)\r
174 {\r
175 sched_shutdown.shutdown_type = NETAPI_SCHED_SHUTDOWN_NOW;\r
176 netapi_schedClose(s,&sched_shutdown,&err); \r
177 return;\r
178 }\r
179 \r
180 \r
181 #ifdef VDPI\r
182 if (DUMP_DPI_CONN )\r
183 navl_dump_conn_info();\r
184 #endif\r
185 \r
186 \r
187 /* only slow path threads get netcp stats, this needs to be set in cookie\r
188 during slow path thread creation*/\r
189 if (coreid & SP_THREAD_MASK)\r
190 {\r
191 netapi_netcpCfgReqStats(nh, our_stats_cb_mt, 0,&err);\r
192 }\r
193 \r
194 }\r
195 \r
196 unsigned long long CALIB=0;\r
197 unsigned long long calibrate_idle(void)\r
198 {\r
199 volatile unsigned long long at1;\r
200 volatile unsigned long long at2;\r
201 volatile unsigned long pt1;\r
202 volatile unsigned long pt2;\r
203 unsigned long long calib;\r
204 at1 = hplib_mUtilGetTimestamp();\r
205 pt1=netapi_timing_start();\r
206 for(;;)\r
207 {\r
208 pt2=netapi_timing_start() ;\r
209 if ((pt2-pt1) >= 100000) break;\r
210 }\r
211 at2 = hplib_mUtilGetTimestamp();\r
212 \r
213 calib = ((unsigned long long) (pt2-pt1))/(at2-at1);\r
214 printf("calibrate: arm time=%lld -> arm cycles=%d calib=%lld\n", at2-at1, pt2-pt1, calib);\r
215 \r
216 return calib;\r
217 }\r
218 \r
219 /*******************************************\r
220 *************NETAPI OBJECTS***************\r
221 *****************************************/\r
222 static NETAPI_CFG_T our_netapi_default_cfg=\r
223 {\r
224 TUNE_NETAPI_PERM_MEM_SZ,\r
225 128, //start of packet offset for hw to place data on rx for default flow\r
226 TUNE_NETAPI_QM_CONFIG_MAX_DESC_NUM, //max number of descriptors in system\r
227 TUNE_NETAPI_NUM_GLOBAL_DESC, //total we will use\r
228 TUNE_NETAPI_DEFAULT_NUM_BUFFERS, //#descriptors+buffers in default heap\r
229 64, //#descriptors w/o buffers in default heap\r
230 TUNE_NETAPI_DEFAULT_BUFFER_SIZE+128+128, //size of buffers in default heap\r
231 128, //tail room\r
232 256, //extra room\r
233 0,\r
234 NULL\r
235 };\r
236 \r
237 Pktlib_HeapHandle OurHeap; //default heap, used by producer\r
238 PKTIO_HANDLE_T * netcp_rx_chan;\r
239 PKTIO_HANDLE_T * netcp_rx_chan2;\r
240 PKTIO_HANDLE_T * netcp_tx_chan;\r
241 \r
242 PKTIO_CFG_T our_chan_cfg={PKTIO_RX_TX, PKTIO_LOCAL, PKTIO_Q_ANY, 8};\r
243 PKTIO_CFG_T netcp_rx_cfg={PKTIO_RX, PKTIO_NA, PKTIO_NA, 8};\r
244 PKTIO_CFG_T netcp_rx_cfg2={PKTIO_RX, (PKTIO_GLOBAL|PKTIO_PKT), PKTIO_Q_ANY, 8};\r
245 PKTIO_CFG_T netcp_tx_cfg={PKTIO_TX, PKTIO_NA, PKTIO_NA, 8};\r
246 NETAPI_T netapi_handle;\r
247 NETAPI_SCHED_HANDLE_T * our_sched;\r
248 NETAPI_SCHED_HANDLE_T * scheduler[TUNE_NETAPI_NUM_CORES];\r
249 NETAPI_SCHED_CONFIG_T our_sched_cfg={\r
250 NETAPI_SCHED_DURATION|NETAPI_SCHED_CBV, 0, house, 5000000 //every 5000000 poll loops\r
251 };\r
252 \r
253 NETCP_CFG_IP_T ip_rule0;\r
254 NETCP_CFG_IP_T ip_rule1;\r
255 \r
256 \r
257 PKTIO_CFG_T direct_to_cpsw_cfg={PKTIO_TX, PKTIO_GLOBAL, 648, 8};\r
258 PKTIO_HANDLE_T * cpsw_tx_chan;\r
259 \r
260 PKTIO_CONTROL_T zap_channel_control={PKTIO_CLEAR, NULL};\r
261 PKTIO_CONTROL_T poll_cannel_control={PKTIO_SET_POLL_FLAGS, NULL, nwal_POLL_DEFAULT_GLOB_PKT_Q};\r
262 \r
263 //template for fast path\r
264 nwalTxPktInfo_t txPktInfoNoCrypto =\r
265 {\r
266 NULL, /* p_pkt */\r
267 NWAL_TX_FLAG1_META_DATA_VALID, /* txFlags */\r
268 0, /* lpbackPass */\r
269 0, /* enetport */\r
270 0, /* msuSize */\r
271 0, /* startOffset */\r
272 0, /* saOffBytes */\r
273 0, /* saPayLoadLen */\r
274 0 , /* saAhIcvOffBytes */\r
275 0, /* saAhMacSize */\r
276 0, /* etherLenOffBytes */\r
277 MAC_HEADER_LEN, /* ipOffBytes */\r
278 MAC_HEADER_LEN + IP_HEADER_LEN, /* l4OffBytes */\r
279 UDP_HEADER_LEN, /* l4HdrLen */\r
280 0, /* pseudoHdrChecksum */\r
281 0 /* pLoadLen */\r
282 };\r
283 \r
284 \r
285 NETCP_CFG_ROUTE_T test_route=\r
286 {\r
287 0,\r
288 NULL,\r
289 NULL,\r
290 0//* to be filled in\r
291 };\r
292 \r
293 NETCP_CFG_FLOW_HANDLE_T kernelFlow22;\r
294 NETCP_CFG_FLOW_HANDLE_T kernelFlow23;\r
295 \r
296 /*************************END NETAPI OBJECTS***********************/\r
297 \r
298 static unsigned char all_mac[]={0,0,0,0,0,0};\r
299 nwalIpAddr_t all_ip={0,0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\r
300 \r
301 \r
302 static unsigned char all_dest[]={0xff,0xff,0xff,0xff,0xff,0xff};\r
303 \r
304 \r
305 \r
306 \r
307 static unsigned long last_header[32/sizeof(unsigned long)];\r
308 static unsigned long last_desc[64/sizeof(unsigned long)];\r
309 \r
310 //stats\r
311 #define MAX_CORE 4\r
312 int pkt_rx[MAX_CORE]; \r
313 int pkt_tx[MAX_CORE]; \r
314 unsigned long long pkt_rx_cycles[MAX_CORE]={0L};\r
315 unsigned long long pkt_tx_cycles[MAX_CORE]={0L};\r
316 unsigned long long pkt_cb_cycles[MAX_CORE]={0L};\r
317 unsigned long long idle_cycles[MAX_CORE]={0L};\r
318 volatile unsigned long long start_time[MAX_CORE];\r
319 unsigned long long end_time[MAX_CORE];\r
320 unsigned long long pkt_stall[MAX_CORE]={0L};\r
321 //*********************************\r
322 // packet generator\r
323 //*********************************\r
324 void gen_pkts(int np, int out_port);\r
325 \r
326 /******************************************************\r
327 * stats callback\r
328 *******************************************************/\r
329 void our_stats_cb_mt(NETAPI_T h, paSysStats_t* pPaStats)\r
330 {\r
331 stats.n_stats_cb +=1;\r
332 if(pPaStats) memcpy(&netcp_stats,pPaStats, sizeof(paSysStats_t));\r
333 }\r
334 \r
335 void our_stats_cb(NETAPI_T h, paSysStats_t* pPaStats)\r
336 {\r
337 uint32_t numFreeDataPackets;\r
338 uint32_t numZeroBufferPackets;\r
339 uint32_t numPacketsinGarbage;\r
340 Pktlib_HeapStats pktLibHeapStats;\r
341 int i,j;\r
342 unsigned long long bcpp;\r
343 unsigned long long bcpp_noc;\r
344 unsigned long long bcpp_app;\r
345 unsigned long long bcpp_tx;\r
346 unsigned long long npL;\r
347 unsigned long long cyclesL;\r
348 unsigned long long ccyclesL; //cache cycles\r
349 unsigned long long tmp_npL[TUNE_NETAPI_NUM_CORES];\r
350 unsigned long long tmp_cyclesL[TUNE_NETAPI_NUM_CORES];\r
351 unsigned long long tmp_ccyclesL[TUNE_NETAPI_NUM_CORES]; //cache cycles\r
352 NETAPI_SA_STATS_T netapi_sa_stats;\r
353 \r
354 printf(">*****stats @ %lld (#cbs%d) \n", hplib_mUtilGetTimestamp(),stats.n_stats_cb);\r
355 //printf("netcp_tx_handle check %x\n", netcp_tx_chan->back);\r
356 printf(">itx=%d rx=%d tx=%d bad=%d slow=%d \n>rx_class0=%d rx_class1=%d rx_class2=%d secRx=%d secPRX=%d sb_rx=%d sb_tx=%d auth_ok=%d sec_tx=%d min_rx=%d min_tx=%d ip=%d\n",\r
357 stats.itx, stats.rx, stats.tx, stats.n_bad, stats.n_new, \r
358 stats.n_class0_rx, stats.n_class1_rx, \r
359 stats.n_class2_rx, stats.sec_rx, stats.secp_rx, stats.sb_rx, stats.sb_tx, stats.n_auth_ok,\r
360 stats.sec_tx, stats.rx_min, stats.tx_min, stats.ip);\r
361 printf(">if rx stats: %d %d %d\n",stats.if_rx[0],stats.if_rx[1],stats.if_rx[2]);\r
362 \r
363 \r
364 printf(">core rx stats: %d %d %d\n",stats.core_rx[1],stats.core_rx[2],stats.core_rx[3]);\r
365 \r
366 \r
367 for (j= 1;j < NUM_PROCS;j++)\r
368 {\r
369 tmp_npL[j]=0LL; tmp_cyclesL[j]=0LL; tmp_ccyclesL[j]=0LL;\r
370 netapi_schedGetStats(scheduler[j],&tmp_npL[j],&tmp_cyclesL[j],&tmp_ccyclesL[j]);\r
371 npL += tmp_npL[j];\r
372 cyclesL += tmp_cyclesL[j];\r
373 ccyclesL += tmp_ccyclesL[j];\r
374 }\r
375 \r
376 if (npL && stats.rx)\r
377 {\r
378 bcpp = cyclesL/npL; \r
379 bcpp_noc = (cyclesL-ccyclesL)/npL; \r
380 bcpp_app = (stats.app_cycles-stats.tx_cache_cycles)/stats.rx;\r
381 }\r
382 else {bcpp = bcpp_noc=bcpp_app=0L;}\r
383 if (stats.tx)\r
384 {\r
385 bcpp_tx = (stats.send_cycles-stats.tx_cache_cycles)/stats.tx;\r
386 }\r
387 else\r
388 {\r
389 bcpp_tx = 0L;\r
390 }\r
391 printf("> ++ busy cycles pp=%lld (%lld wo cache ops) (app+tx= %lld) (tx= %lld) ++\n",\r
392 bcpp,bcpp_noc,bcpp_app, bcpp_tx);\r
393 \r
394 \r
395 #ifdef VDPI\r
396 navl_return_stats(\r
397 &pGlobDpiStats->n_ops,\r
398 &pGlobDpiStats->n_class,\r
399 &pGlobDpiStats->min_time,\r
400 &pGlobDpiStats->max_time,\r
401 &pGlobDpiStats->tot,\r
402 &pGlobDpiStats->m_op,\r
403 &pGlobDpiStats->m_bytes,\r
404 &pGlobDpiStats->n_err,\r
405 &pGlobDpiStats->f_op,\r
406 &pGlobDpiStats->m_cycles,\r
407 &pGlobDpiStats->f_cycles);\r
408 \r
409 printf("dpi stats: nops=%d nclass=%d min cycle=%d max cycle=%d ave cycle=%lld #mallocs=%d #mbytes=%d n_err=%d fops=%d mCycles=%d fCycles=%d\n",\r
410 pGlobDpiStats->n_ops,\r
411 pGlobDpiStats->n_class,\r
412 pGlobDpiStats->min_time,\r
413 pGlobDpiStats->max_time,\r
414 pGlobDpiStats->n_ops? pGlobDpiStats->tot/pGlobDpiStats->n_ops : 0,\r
415 pGlobDpiStats->m_op,\r
416 pGlobDpiStats->m_bytes,\r
417 pGlobDpiStats->n_err,\r
418 pGlobDpiStats->f_op, pGlobDpiStats->m_cycles, pGlobDpiStats->f_cycles);\r
419 for(i=0; i< NUM_FP_PROCS;i++)\r
420 {\r
421 navl_results(i);\r
422 }\r
423 #endif\r
424 if(pPaStats)\r
425 {\r
426 printf("C1 number of packets: %d\n", pPaStats->classify1.nPackets);\r
427 printf("C1 number IPv4 packets: %d\n", pPaStats->classify1.nIpv4Packets);\r
428 printf("C1 number IPv6 packets: %d\n", pPaStats->classify1.nIpv6Packets);\r
429 printf("C1 number Custom packets: %d\n", pPaStats->classify1.nCustomPackets);\r
430 printf("C1 number SRIO packets: %d\n", pPaStats->classify1.nSrioPackets);\r
431 printf("C1 number LLC/SNAP Fail packets: %d\n", pPaStats->classify1.nLlcSnapFail);\r
432 printf("C1 number table matched: %d\n", pPaStats->classify1.nTableMatch);\r
433 printf("C1 number failed table matched: %d\n", pPaStats->classify1.nNoTableMatch);\r
434 printf("C1 number IP Fragmented packets: %d\n", pPaStats->classify1.nIpFrag);\r
435 printf("C1 number IP Depth Overflow: %d\n", pPaStats->classify1.nIpDepthOverflow);\r
436 printf("C1 number VLAN Depth Overflow: %d\n", pPaStats->classify1.nVlanDepthOverflow);\r
437 printf("C1 number GRE Depth Overflow: %d\n", pPaStats->classify1.nGreDepthOverflow);\r
438 printf("C1 number MPLS Packets: %d\n", pPaStats->classify1.nMplsPackets);\r
439 printf("C1 number of parse fail: %d\n",pPaStats->classify1.nParseFail);\r
440 printf("C1 number of Invalid IPv6 Opt: %d\n", pPaStats->classify1.nInvalidIPv6Opt);\r
441 printf("C1 number of TX IP Fragments: %d\n", pPaStats->classify1.nTxIpFrag);\r
442 printf("C1 number of silent discard: %d\n",pPaStats->classify1.nSilentDiscard);\r
443 printf("C1 number of invalid control: %d\n", pPaStats->classify1.nInvalidControl);\r
444 printf("C1 number of invalid states: %d\n",pPaStats->classify1.nInvalidState);\r
445 printf("C1 number of system fails: %d\n",pPaStats->classify1.nSystemFail);\r
446 printf("C2 number Packets : %d\n",pPaStats->classify2.nPackets);\r
447 printf("C2 number udp : %d\n",pPaStats->classify2.nUdp);\r
448 printf("C2 number tcp : %d\n",pPaStats->classify2.nTcp);\r
449 printf("C2 number Custom : %d\n",pPaStats->classify2.nCustom);\r
450 printf("C2 number silent drop : %d\n",pPaStats->classify2.nSilentDiscard);\r
451 printf("C2 number invalid cntrl : %d\n\n",pPaStats->classify2.nInvalidControl);\r
452 printf("C2 number Modify Stats Cmd Fail : %d\n\n",pPaStats->modify.nCommandFail);\r
453 }\r
454 Pktlib_getHeapStats(OurHeap, &pktLibHeapStats);\r
455 \r
456 printf("main heap stats> #free=%d #zb=%d #garbage=%d\n", pktLibHeapStats.numFreeDataPackets,\r
457 pktLibHeapStats.numZeroBufferPackets, pktLibHeapStats.numPacketsinGarbage);\r
458 printf(" > #dataBufThreshStatus=%d #dataBufStarvCounter=%d #zBufThreshStatus=%d #zBufStarvCounter=%d \n", \r
459 pktLibHeapStats.dataBufferThresholdStatus,pktLibHeapStats.dataBufferStarvationCounter,\r
460 pktLibHeapStats.zeroDataBufferThresholdStatus, pktLibHeapStats.zeroDataBufferStarvationCounter);\r
461 }\r
462 \r
463 NETAPI_T worker_nh[MAX_NUM_CORES];\r
464 \r
465 void slow_path_thread(uint32_t index)\r
466 {\r
467 int err, i;;\r
468 uint32_t thread_num;\r
469 cpu_set_t cpu_set;\r
470 \r
471 /* index being passed in is the core we want to run the thread on */\r
472 thread_num = index;\r
473 printf("slow_path_thread, mypid: %d, core_id %d\n", gettid(), thread_num);\r
474 \r
475 CPU_ZERO( &cpu_set);\r
476 for (i = 0; i < 1;i++)\r
477 {\r
478 CPU_SET( i, &cpu_set);\r
479 }\r
480 hplib_utilSetupThread(thread_num, &cpu_set, hplib_spinLock_Type_LOL);\r
481 worker_nh[thread_num] = netapi_init(NETAPI_CORE_MASTER,NULL);\r
482 \r
483 if (worker_nh[thread_num] == NULL)\r
484 {\r
485 printf("slow_path_thread: netapi_init failure, exiting\n");\r
486 exit(1);\r
487 }\r
488 netapi_setCookie(worker_nh[thread_num],(void*)(thread_num | SP_THREAD_MASK));\r
489 \r
490 scheduler[thread_num] =netapi_schedOpen(worker_nh[thread_num],&our_sched_cfg, &err);\r
491 if (!scheduler[thread_num]) \r
492 {\r
493 printf("sched create failed for core%d\n",thread_num);\r
494 goto ERR_slow_path_thread;\r
495 }\r
496 scheduler[thread_num]->config.yield = TRUE;\r
497 scheduler[thread_num]->config.pollGarbageQ = TRUE;\r
498 scheduler[thread_num]->config.pollCtrlQ = TRUE;\r
499 printf("Slow Path thread: %d setup complete, running on ARM CORE: %d\n",\r
500 index,index);\r
501 \r
502 \r
503 netapi_schedRun(scheduler[thread_num], &err);\r
504 \r
505 ERR_slow_path_thread:\r
506 printf("slow_path_thread: calling netapi_shutdown\n");\r
507 netapi_shutdown(worker_nh[thread_num]);\r
508 }\r
509 \r
510 \r
511 void fast_path_thread(uint32_t index)\r
512 {\r
513 int err, i;\r
514 PKTIO_HANDLE_T *rx_chan;\r
515 PKTIO_HANDLE_T *sb_rx_chan;\r
516 uint32_t thread_num;\r
517 int navlHandle;\r
518 \r
519 \r
520 cpu_set_t cpu_set;\r
521 \r
522 CPU_ZERO( &cpu_set);\r
523 thread_num = index;\r
524 printf("fast_path_thread: core %d\n", index);\r
525 \r
526 \r
527 CPU_SET( thread_num, &cpu_set);\r
528 hplib_utilSetupThread(thread_num, &cpu_set, hplib_spinLock_Type_LOL);\r
529 \r
530 \r
531 hplib_mSpinLockLock(&dpi_demo_thread_lock);\r
532 worker_nh[thread_num]=netapi_init(NETAPI_CORE_MASTER,NULL);\r
533 \r
534 if (worker_nh[thread_num] == NULL)\r
535 {\r
536 printf("fast_path_thread: netapi_init failure, exiting\n");\r
537 hplib_mSpinLockUnlock(&dpi_demo_thread_lock);\r
538 exit(1);\r
539 }\r
540 else\r
541 {\r
542 #ifdef VDPI\r
543 navlHandle = navl_per_thread_init(thread_num);\r
544 #endif\r
545 }\r
546 hplib_mSpinLockUnlock(&dpi_demo_thread_lock);\r
547 \r
548 \r
549 if (worker_nh[thread_num] == NULL)\r
550 {\r
551 printf("fast_path_thread: netapi_init failure, exiting\n");\r
552 exit(1);\r
553 }\r
554 \r
555 /* open netcp default RX channels*/\r
556 rx_chan = netapi_pktioOpen(worker_nh[thread_num], NETCP_RX, (PKTIO_CB) recv_cb_bridge, &netcp_rx_cfg, &err);\r
557 \r
558 \r
559 netapi_setCookie(worker_nh[thread_num],(void*)thread_num);\r
560 \r
561 scheduler[thread_num] =netapi_schedOpen(worker_nh[thread_num],\r
562 &our_sched_cfg,\r
563 &err);\r
564 if (!scheduler[thread_num]) \r
565 {\r
566 printf("sched create failed for core%d\n",thread_num);\r
567 goto ERR_fast_path_thread;\r
568 //exit(1);\r
569 }\r
570 \r
571 \r
572 scheduler[thread_num]->config.yield = FALSE;\r
573 scheduler[thread_num]->config.pollGarbageQ = FALSE;\r
574 scheduler[thread_num]->config.pollCtrlQ = FALSE;\r
575 /* Entry point to scheduler */\r
576 \r
577 \r
578 printf("Fast Path thread: %d setup complete, running on ARM CORE: %d\n",\r
579 index,index);\r
580 netapi_schedRun(scheduler[thread_num], &err);\r
581 \r
582 ERR_fast_path_thread:\r
583 #ifdef VDPI\r
584 navl_fini(navlHandle);\r
585 #endif\r
586 netapi_pktioClose(rx_chan, &err);\r
587 \r
588 printf("fast_path_thread: calling netapi_shutdown\n");\r
589 netapi_shutdown(worker_nh[thread_num]);\r
590 }\r
591 \r
592 \r
593 /******************************\r
594 * main program\r
595 *****************************/\r
596 int main(int argc, char **argv)\r
597 {\r
598 int err,i;\r
599 int j;\r
600 int32_t errCode;\r
601 Pktlib_HeapIfTable* pPktifTable;\r
602 Pktlib_HeapCfg heapCfg;\r
603 long t1, t2 ;\r
604 cpu_set_t cpu_set;\r
605 int c;\r
606 int statsQueryRequest = 0;\r
607 pthread_t *thrs;\r
608 int p;\r
609 \r
610 #if 0\r
611 if (initRm())\r
612 {\r
613 printf("main: initRm() returned error\n");\r
614 exit(1);\r
615 }\r
616 #endif\r
617 \r
618 \r
619 #if 1\r
620 \r
621 if (argc == 2)\r
622 {\r
623 printf("main: argument %s\n", argv[1]);\r
624 if(!(strcmp(argv[1], "stats")))\r
625 {\r
626 statsQueryRequest =1;\r
627 printf("querying for stats\n");\r
628 }\r
629 }\r
630 printf("statsQueryReqeust: %d\n", statsQueryRequest);\r
631 \r
632 \r
633 #endif\r
634 \r
635 \r
636 if (!statsQueryRequest)\r
637 {\r
638 signal(SIGINT,netTest_utilMySig);\r
639 CPU_ZERO( &cpu_set);\r
640 CPU_SET( 0, &cpu_set);\r
641 hplib_utilSetupThread(HPLIB_THREADID, &cpu_set, hplib_spinLock_Type_LOL);\r
642 \r
643 /* create netapi */\r
644 our_netapi_default_cfg.rmHandle = rmClientServiceHandle;\r
645 netapi_handle = netapi_init(NETAPI_SYS_MASTER,\r
646 &our_netapi_default_cfg);\r
647 if (netapi_handle == NULL)\r
648 {\r
649 printf("main: netapi_init failure, exiting\n");\r
650 exit(1);\r
651 }\r
652 /* allocate segment for shared memory for packet stats */\r
653 /* allocate packet statistics */\r
654 pShmBase = hplib_shmOpen();\r
655 if (pShmBase)\r
656 {\r
657 if (hplib_shmAddEntry(pShmBase,\r
658 sizeof(navl_wrapper_pkt_stat_t)\r
659 * MAX_PROTOCOLS *NUM_FP_PROCS + sizeof(navl_wrapper_cfg_info_t),\r
660 APP_ENTRY_1) != hplib_OK)\r
661 {\r
662 printf("main: hplib_shmAddEntry failure\n");\r
663 return -1;\r
664 }\r
665 else\r
666 {\r
667 pShmEntry = hplib_shmGetEntry(pShmBase, APP_ENTRY_1);\r
668 pNavlCfg = (navl_wrapper_cfg_info_t*)pShmEntry;\r
669 memset(pNavlCfg,\r
670 0,\r
671 sizeof(navl_wrapper_pkt_stat_t) * MAX_PROTOCOLS *NUM_FP_PROCS+ sizeof(navl_wrapper_cfg_info_t));\r
672 pNavlCfg = (navl_wrapper_cfg_info_t*)pShmEntry;\r
673 pNavlCfg->enable_dpi = 0; /* disable DPI by default */\r
674 }\r
675 if (hplib_shmAddEntry(pShmBase, sizeof(navl_global_dpi_stats), APP_ENTRY_2) != hplib_OK)\r
676 {\r
677 printf("main: hplib_shmAddEntry failure\n");\r
678 return -1;\r
679 }\r
680 else\r
681 {\r
682 pShmEntry = hplib_shmGetEntry(pShmBase, APP_ENTRY_2);\r
683 pGlobDpiStats = (navl_global_dpi_stats*) pShmEntry;\r
684 memset(pGlobDpiStats, 0, sizeof(navl_global_dpi_stats));\r
685 pGlobDpiStats->min_time=100000000;\r
686 }\r
687 }\r
688 else\r
689 {\r
690 printf("main: hplib_shmOpen failure, exiting\n");\r
691 exit(1);\r
692 }\r
693 /* open the main heap */\r
694 OurHeap = Pktlib_findHeapByName("netapi");\r
695 if (!OurHeap)\r
696 {\r
697 printf("findheapbyname fail\n");\r
698 exit(1);\r
699 }\r
700 \r
701 //if we want to relay network packets, we create a handle to the \r
702 //default netcp receive queue here\r
703 netcp_rx_chan= netapi_pktioOpen(netapi_handle, NETCP_RX, (PKTIO_CB) recv_cb_bridge, &netcp_rx_cfg, &err);\r
704 if (!netcp_rx_chan)\r
705 {\r
706 printf("pktio open RX failed err=%d\n",err);\r
707 exit(1);\r
708 }\r
709 \r
710 netcp_tx_chan= netapi_pktioOpen(netapi_handle, NETCP_TX, (PKTIO_CB) NULL, &netcp_tx_cfg, &err);\r
711 if (!netcp_tx_chan)\r
712 {\r
713 printf("pktio open TX failed err=%d\n",err);\r
714 exit(1);\r
715 }\r
716 else //install a fast path template into the NETCP TX channel\r
717 {\r
718 PKTIO_CONTROL_T control2;\r
719 control2.op = PKTIO_UPDATE_FAST_PATH;\r
720 PKTIO_CFG_T cfg2;\r
721 memset(&cfg2, 0, sizeof(PKTIO_CFG_T));\r
722 cfg2.fast_path_cfg.fp_send_option = PKTIO_FP_NO_CRYPTO_NO_CKSUM_PORT;\r
723 cfg2.fast_path_cfg.txPktInfo= &txPktInfoNoCrypto;\r
724 netapi_pktioControl(netcp_tx_chan, NULL, &cfg2, &control2, &err);\r
725 }\r
726 \r
727 \r
728 if (navl_setup() < 0)\r
729 {\r
730 printf("main: navl_setup failure, exiting\n");\r
731 exit(1);\r
732 }\r
733 \r
734 /*********************************************/\r
735 /*****************end NETAPI STARTUP**********/\r
736 /*********************************************/\r
737 \r
738 //now creaate a simple netcp rule\r
739 //to get a lot of packets\r
740 mac0 = netapi_netcpCfgCreateMacInterface(\r
741 netapi_handle,\r
742 &all_mac[0],\r
743 NULL,\r
744 0,\r
745 1,\r
746 (NETCP_CFG_ROUTE_HANDLE_T) NULL,\r
747 (NETCP_CFG_VLAN_T ) NULL , //future\r
748 0x0800,\r
749 1,\r
750 &err);\r
751 if (err) {printf("addmac0 failed %d\n",err); exit(1); }\r
752 else printf("addmac0 sucess\n");\r
753 \r
754 mac1 = netapi_netcpCfgCreateMacInterface(\r
755 netapi_handle,\r
756 &all_mac[0],\r
757 NULL,\r
758 1,\r
759 2,\r
760 (NETCP_CFG_ROUTE_HANDLE_T) NULL,\r
761 (NETCP_CFG_VLAN_T ) NULL , //future\r
762 0x0800,\r
763 1,\r
764 &err);\r
765 if (err) {printf("addmac1 failed %d\n",err); exit(1); }\r
766 else printf("addmac1 sucess\n");\r
767 \r
768 //calibrate idle\r
769 CALIB = calibrate_idle();\r
770 \r
771 //**************************************\r
772 //Create a slow path thread\r
773 //***************************************\r
774 thrs = malloc( sizeof( pthread_t ) * NUM_PROCS );\r
775 if (thrs == NULL)\r
776 {\r
777 perror( "malloc" );\r
778 return -1;\r
779 }\r
780 printf( "dpi-demo: Starting slow_path_thread on core 0\n");\r
781 \r
782 if (pthread_create( &thrs[0], NULL, (void*)slow_path_thread,\r
783 (void *)0 )) //start at core 0\r
784 {\r
785 perror( "pthread_create" );\r
786 exit(1);\r
787 }\r
788 \r
789 \r
790 for (j= 1;j < NUM_PROCS;j++)\r
791 {\r
792 printf( "dpi-demo: Starting fast_path_thread on core 1\n");\r
793 if (pthread_create( &thrs[j], NULL, (void*)fast_path_thread,\r
794 (void *)j )) //start at core 1\r
795 {\r
796 perror( "pthread_create" );\r
797 exit(1);\r
798 }\r
799 }\r
800 }\r
801 else\r
802 {\r
803 \r
804 pShmBase = hplib_shmOpen();\r
805 if (pShmBase)\r
806 {\r
807 pTemp = hplib_shmGetEntry(pShmBase, APP_ENTRY_1);\r
808 pNavlCfg = (navl_wrapper_cfg_info_t*)pTemp;\r
809 \r
810 pStats1 = pTemp + sizeof(navl_wrapper_cfg_info_t);\r
811 \r
812 \r
813 pStats2 = pTemp + sizeof(navl_wrapper_cfg_info_t) +\r
814 (sizeof(navl_wrapper_pkt_stat_t)*pNavlCfg->num_protocols);\r
815 \r
816 pTemp = hplib_shmGetEntry(pShmBase, APP_ENTRY_2);\r
817 pGlobDpiStats = (struct dpi_stats*) pTemp;\r
818 \r
819 printf("dpi stats: nops=%d nclass=%d min cycle=%d max cycle=%d ave cycle=%lld #mallocs=%d #mbytes=%d n_err=%d fops=%d mCycles=%d fCycles=%d\n",\r
820 pGlobDpiStats->n_ops,\r
821 pGlobDpiStats->n_class,\r
822 pGlobDpiStats->min_time,\r
823 pGlobDpiStats->max_time,\r
824 pGlobDpiStats->n_ops? pGlobDpiStats->tot/pGlobDpiStats->n_ops : 0,\r
825 pGlobDpiStats->m_op,\r
826 pGlobDpiStats->m_bytes,\r
827 pGlobDpiStats->n_err,\r
828 pGlobDpiStats->f_op, pGlobDpiStats->m_cycles, pGlobDpiStats->f_cycles);\r
829 for(i=0;i < NUM_FP_PROCS; i++)\r
830 {\r
831 navl_results2(i);\r
832 }\r
833 exit(1);\r
834 }\r
835 }\r
836 \r
837 \r
838 //this thread of execution (main) now just waits on user input\r
839 for(;;)\r
840 {\r
841 printf(">");\r
842 c=getchar();\r
843 if (c=='C')\r
844 {\r
845 CAP=!CAP; \r
846 printf("CAPTURE= %d\n", CAP);\r
847 }\r
848 else if (c=='q') {QUIT=1;break;}\r
849 else if (c=='s')\r
850 our_stats_cb(netapi_handle, &netcp_stats);\r
851 #ifdef VDPI\r
852 else if (c=='c') \r
853 {navl_clear_stats();printf("> Clearing DPI stats\n");}\r
854 else if (c=='v') navl_set_verbose();\r
855 else if (c=='p') \r
856 {DUMP_DPI_CONN = !DUMP_DPI_CONN;printf("> **DPI CONN DUMP is %s ** \n", DUMP_DPI_CONN ?"enabled":"disabled");}\r
857 else if (c=='d')\r
858 {\r
859 pNavlCfg->enable_dpi = !pNavlCfg->enable_dpi;\r
860 printf("enable_dpi flag: %d\n", pNavlCfg->enable_dpi);\r
861 printf("> **DPI is %s ** \n", pNavlCfg->enable_dpi?"enabled":"disabled");\r
862 }\r
863 #endif\r
864 else if (c=='!') {system("sh");}\r
865 \r
866 else if ((c=='h')||(c=='?'))\r
867 {\r
868 printf("> 'q' to quit, 's' for stats,'d' to dump capture\n,> 'h' for help\n ");\r
869 }\r
870 #if 1\r
871 else if (c=='r')\r
872 {\r
873 netTest_utilDumpHeader(&last_header[0], 0,0,0);\r
874 netTest_utilDumpDescr(&last_desc[0], 0);\r
875 }\r
876 #endif\r
877 }\r
878 \r
879 \r
880 \r
881 #ifdef VDPI\r
882 navl_done();\r
883 #endif\r
884 \r
885 //wait for completion \r
886 printf("main task now pending on thread completion\n");\r
887 for (i = 0; i < NUM_PROCS; i++)\r
888 pthread_join( thrs[i], NULL );\r
889 \r
890 free( thrs );\r
891 \r
892 /*************************************************\r
893 ************CLEAN UP****************************\r
894 ************************************************/\r
895 //get rid of rule, in the case that we are relaying packets\r
896 //also close our netcp rx channel\r
897 netapi_netcpCfgDelMac(netapi_handle,0,&err);\r
898 netapi_netcpCfgDelMac(netapi_handle,1,&err);\r
899 \r
900 netapi_pktioClose(netcp_rx_chan,&err);\r
901 netapi_pktioClose(netcp_tx_chan,&err);\r
902 \r
903 \r
904 //done\r
905 netapi_shutdown(netapi_handle);\r
906 \r
907 \r
908 }\r
909 #if 1\r
910 static inline void send_it(Ti_Pkt *tip, int len, int out_port)\r
911 {\r
912 int err=0;\r
913 PKTIO_METADATA_T meta2 = {PKTIO_META_TX,{0},0};\r
914 nwalTxPktInfo_t meta_tx2={0};\r
915 int coreid=Osal_nwalGetProcId();\r
916 if (len<60)\r
917 {\r
918 unsigned int templen;\r
919 char * p_pkt;\r
920 len=60;\r
921 Pktlib_getDataBuffer(tip,(uint8_t**)&p_pkt,&templen);//ignore templen\r
922 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *) tip, p_pkt,len);\r
923 }\r
924 Pktlib_setPacketLen(tip,len);\r
925 meta_tx2.txFlag1 = NWAL_TX_FLAG1_META_DATA_VALID;\r
926 meta_tx2.ploadLen = len ;\r
927 meta_tx2.enetPort=out_port;\r
928 meta2.u.tx_meta=&meta_tx2;\r
929 stats.tx+=1;\r
930 if(coreid<MAX_NUM_CORES)\r
931 pkt_tx[coreid]+=1;\r
932 netapi_pktioSend(netcp_tx_chan,tip,&meta2,&err);\r
933 }\r
934 #endif\r
935 void recv_cb_bridge(struct PKTIO_HANDLE_Tag * channel, Ti_Pkt* p_recv[],\r
936 PKTIO_METADATA_T meta[], int n_pkts,\r
937 uint64_t ts )\r
938 {\r
939 int i;\r
940 int len;\r
941 int p;\r
942 Ti_Pkt * tip;\r
943 unsigned int appid;\r
944 unsigned int templen;\r
945 char * p_pkt;\r
946 unsigned long t1;\r
947 unsigned long t2;\r
948 unsigned long long ct1;\r
949 unsigned long long ct2;\r
950 unsigned short ip_pl;\r
951 unsigned long long n_c_ops;\r
952 int ifno;\r
953 int out_port;\r
954 \r
955 int coreid=Osal_nwalGetProcId();\r
956 \r
957 \r
958 pasahoLongInfo_t* protoInfo;\r
959 \r
960 t1=netapi_timing_start();\r
961 ct1 =Osal_cache_op_measure(&n_c_ops);\r
962 for(i=0;i<n_pkts;i++)\r
963 {\r
964 \r
965 tip = p_recv[i];\r
966 appid = ((unsigned int)meta[i].u.rx_meta->appId)&0xff000000;\r
967 if (appid == NETAPI_NETCP_MATCH_GENERIC_IP) \r
968 {\r
969 stats.ip+=1;\r
970 }\r
971 \r
972 protoInfo=nwal_mGetProtoInfo(tip);\r
973 ifno = nwal_mGetRxEmacPort( protoInfo);\r
974 if (ifno ==1) out_port=2; else out_port=1;\r
975 if(coreid<MAX_NUM_CORES) stats.core_rx[coreid]+=1;\r
976 if (ifno < MAX_NUM_INTERFACES) stats.if_rx[ifno]+=1;\r
977 Pktlib_getDataBuffer(tip,(uint8_t**)&p_pkt,&templen);//ignore templen\r
978 if (CAP==coreid)\r
979 {\r
980 memcpy((unsigned char *)&last_header[0],p_pkt,32);\r
981 memcpy((unsigned char*)&last_desc[0],tip,64);\r
982 }\r
983 len = Pktlib_getPacketLen(tip)-4;//real length, subtract mac trailer\r
984 stats.rx+=1;\r
985 //printf("recv_cb_bridge: appId: 0x%x, out_port: %d\n", appid, out_port);\r
986 if (appid == NETAPI_NETCP_MATCH_GENERIC_MAC)\r
987 {\r
988 #ifdef VDPI\r
989 {\r
990 if (pNavlCfg->enable_dpi)\r
991 navl_process_pkt(p_pkt, len);\r
992 }\r
993 #endif\r
994 }\r
995 \r
996 \r
997 //printf("recv_cb_bridge: coreId: %d, outPort %d\n", coreid, out_port);\r
998 //Pktlib_freePacket(tip);\r
999 send_it(tip,len+4,out_port);\r
1000 }\r
1001 t2=netapi_timing_start();\r
1002 ct2 =Osal_cache_op_measure(&n_c_ops);\r
1003 stats.app_cycles += (unsigned long long) (t2-t1);\r
1004 stats.tx_cache_cycles += (unsigned long long) (ct2-ct1);\r
1005 return;\r
1006 }\r
1007 \r
1008 #define NTOPOP 150\r
1009 volatile Ti_Pkt * pHd[NTOPOP];\r
1010 \r
1011 #define PKTGEN_PKT_LEN pkt_len\r
1012 #define MAXP 4 //max ports \r
1013 void gen_pkts(int np, int out_port)\r
1014 {\r
1015 int i;\r
1016 int p=0;\r
1017 unsigned long * pI ;\r
1018 Ti_Pkt * tip;\r
1019 int len;\r
1020 unsigned char * pData;\r
1021 int cstall=0;\r
1022 int coreid = Osal_nwalGetProcId();\r
1023 for(i=0;i<np;)\r
1024 {\r
1025 //set out output port\r
1026 if (out_port)\r
1027 {\r
1028 p=out_port;\r
1029 }\r
1030 else //flip flop\r
1031 {\r
1032 p+=1;\r
1033 if(p>MAXP) p=1;\r
1034 }\r
1035 //get a packet\r
1036 tip=Pktlib_allocPacket(OurHeap,PKTGEN_PKT_LEN);\r
1037 pI = (unsigned long *) tip;\r
1038 if (!tip)\r
1039 {\r
1040 pkt_stall[coreid]+=1;\r
1041 cstall+=1;\r
1042 if (cstall >= 100000) \r
1043 {\r
1044 printf("worker core %d, max stall hit,, exiting.\n",coreid); \r
1045 return;\r
1046 }\r
1047 continue;\r
1048 }\r
1049 cstall=0;\r
1050 Pktlib_getDataBuffer(tip,&pData,&len);\r
1051 memcpy(pData,&dummy_mac,14);\r
1052 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *) tip, pData,PKTGEN_PKT_LEN);\r
1053 Pktlib_setPacketLen(tip,PKTGEN_PKT_LEN);\r
1054 pI[1]=0x80000000;\r
1055 //pI[2] &= 0xfff0ffff ;move to pktio send function\r
1056 \r
1057 //capture packet just in case\r
1058 if (CAP==coreid)\r
1059 {\r
1060 unsigned int templen;\r
1061 char * p_pkt;\r
1062 Pktlib_getDataBuffer(tip,(uint8_t**)&p_pkt,&templen);//ignore templen\r
1063 memcpy((unsigned char *)&last_header[0],p_pkt,32);\r
1064 memcpy((unsigned char*)&last_desc[0],tip,64);\r
1065 }\r
1066 \r
1067 //send packet\r
1068 send_it(tip, PKTGEN_PKT_LEN, p);\r
1069 pkt_tx[coreid]+=1;\r
1070 i+=1;\r
1071 }\r
1072 \r
1073 return;\r
1074 }\r