summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Ruei2015-11-24 17:36:36 -0600
committerEric Ruei2015-11-24 17:36:36 -0600
commit6fb24c2ab712e2b14f07df291d11e18b2698b616 (patch)
tree34992abc81ee280fca24630efedee95622f62f28
parentee1bcf5c062dc4452e6846707101556b02b491b1 (diff)
parentc7f2b91691ee571238c752c6dcc3203826ad53cb (diff)
downloadpa-lld-6fb24c2ab712e2b14f07df291d11e18b2698b616.tar.gz
pa-lld-6fb24c2ab712e2b14f07df291d11e18b2698b616.tar.xz
pa-lld-6fb24c2ab712e2b14f07df291d11e18b2698b616.zip
Merge branch 'master' of gtgit01.gt.design.ti.com:git/projects/pa-lld
-rw-r--r--example/emacExample/k2h/armv7/bios/PA_emacExample_K2HArmBiosExampleProject.txt8
-rwxr-xr-xtest/PAPktCapTest/src/armv7/bios/framework.c1636
-rwxr-xr-xtest/PAPktCapTest/src/armv7/bios/fw_main.c143
-rwxr-xr-xtest/PAPktCapTest/src/armv7/bios/pcap_linker.cmd7
-rwxr-xr-xtest/PAUnitTest/src/armv7/bios/framework.c2672
-rwxr-xr-xtest/PAUnitTest/src/armv7/bios/testMain.c297
-rwxr-xr-xtest/PAUnitTest/src/armv7/bios/testmem.c71
7 files changed, 4833 insertions, 1 deletions
diff --git a/example/emacExample/k2h/armv7/bios/PA_emacExample_K2HArmBiosExampleProject.txt b/example/emacExample/k2h/armv7/bios/PA_emacExample_K2HArmBiosExampleProject.txt
index 39c7db0..1cda8de 100644
--- a/example/emacExample/k2h/armv7/bios/PA_emacExample_K2HArmBiosExampleProject.txt
+++ b/example/emacExample/k2h/armv7/bios/PA_emacExample_K2HArmBiosExampleProject.txt
@@ -12,7 +12,13 @@
12-ccs.linkFile "PDK_INSTALL_PATH/ti/drv/rm/device/k2h/global-resource-list.c" 12-ccs.linkFile "PDK_INSTALL_PATH/ti/drv/rm/device/k2h/global-resource-list.c"
13-ccs.linkFile "PDK_INSTALL_PATH/ti/drv/cppi/device/k2h/src/cppi_device.c" 13-ccs.linkFile "PDK_INSTALL_PATH/ti/drv/cppi/device/k2h/src/cppi_device.c"
14-ccs.linkFile "PDK_INSTALL_PATH/ti/drv/qmss/device/k2h/src/qmss_device.c" 14-ccs.linkFile "PDK_INSTALL_PATH/ti/drv/qmss/device/k2h/src/qmss_device.c"
15-ccs.linkFile "PDK_INSTALL_PATH/ti/drv/pa/device/k2h/src/nss_device.c" 15-ccs.linkFile "PDK_INSTALL_PATH/ti/drv/pa/device/k2h/src/nss_device.c"
16-ccs.linkFile "PDK_INSTALL_PATH/ti/csl/src/ip/sgmii/V0/csl_cpsgmii.c"
17-ccs.linkFile "PDK_INSTALL_PATH/ti/csl/src/ip/cpsw/V0/csl_cpsw_5gf.c"
18-ccs.linkFile "PDK_INSTALL_PATH/ti/csl/src/ip/serdes_sb/V0/csl_wiz8_sb_refclk125MHz_10bit_5Gbps.c"
19-ccs.linkFile "PDK_INSTALL_PATH/ti/csl/src/ip/serdes_sb/V0/csl_wiz8_sb_refclk125MHz_20bit_6p25Gbps.c"
20-ccs.linkFile "PDK_INSTALL_PATH/ti/csl/src/ip/serdes_sb/V0/csl_wiz8_sb_refclk156p25MHz_10bit_5Gbps.c"
21-ccs.linkFile "PDK_INSTALL_PATH/ti/csl/src/ip/serdes_sb/V0/csl_wiz8_sb_refclk156p25MHz_20bit_6p25Gbps.c"
16-ccs.linkFile "PASS_INSTALL_PATH/ti/drv/pa/example/emacExample/k2h/armv7/bios/cpsw_example_k2h.cfg" 22-ccs.linkFile "PASS_INSTALL_PATH/ti/drv/pa/example/emacExample/k2h/armv7/bios/cpsw_example_k2h.cfg"
17-ccs.setCompilerOptions "-c -mcpu=cortex-a15 -mtune=cortex-a15 -marm -mfloat-abi=hard -DSOC_K2H -DNUM_PORTS=5 -D_LITTLE_ENDIAN=1 -g -gstrict-dwarf -Wall -MMD -MP -I${PDK_INSTALL_PATH} -I${PDK_INSTALL_PATH}/ti/drv/pa/example/emacExample/src -I${PASS_INSTALL_PATH}/ti/drv/pa/example/emacExample/src/armv7/bios -I${PDK_INSTALL_PATH}/ti/drv/cppi -I${PDK_INSTALL_PATH}/ti/drv/qmss" -rtsc.enableRtsc 23-ccs.setCompilerOptions "-c -mcpu=cortex-a15 -mtune=cortex-a15 -marm -mfloat-abi=hard -DSOC_K2H -DNUM_PORTS=5 -D_LITTLE_ENDIAN=1 -g -gstrict-dwarf -Wall -MMD -MP -I${PDK_INSTALL_PATH} -I${PDK_INSTALL_PATH}/ti/drv/pa/example/emacExample/src -I${PASS_INSTALL_PATH}/ti/drv/pa/example/emacExample/src/armv7/bios -I${PDK_INSTALL_PATH}/ti/drv/cppi -I${PDK_INSTALL_PATH}/ti/drv/qmss" -rtsc.enableRtsc
18-ccs.setLinkerOptions " -lrdimon -lgcc -lm -lnosys -nostartfiles -static -Wl,--gc-sections -L$(XDCTOOLS)/packages/gnu/targets/arm/libs/install-native/arm-none-eabi/lib/fpu " 24-ccs.setLinkerOptions " -lrdimon -lgcc -lm -lnosys -nostartfiles -static -Wl,--gc-sections -L$(XDCTOOLS)/packages/gnu/targets/arm/libs/install-native/arm-none-eabi/lib/fpu "
diff --git a/test/PAPktCapTest/src/armv7/bios/framework.c b/test/PAPktCapTest/src/armv7/bios/framework.c
new file mode 100755
index 0000000..398a9b5
--- /dev/null
+++ b/test/PAPktCapTest/src/armv7/bios/framework.c
@@ -0,0 +1,1636 @@
1/**
2 * @file framework.c
3 *
4 * @brief
5 * This file holds all the platform specific framework
6 * initialization and setup code.
7 *
8 * \par
9 * ============================================================================
10 * @n (C) Copyright 2009-2013, Texas Instruments, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the
22 * distribution.
23 *
24 * Neither the name of Texas Instruments Incorporated nor the names of
25 * its contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40*/
41#include "pcap_singlecore.h"
42#include "ti/drv/pa/pa.h"
43#include "ti/drv/pa/pasahost.h"
44/* Firmware images */
45#include <ti/drv/pa/fw/pafw.h>
46
47#include <ti/csl/cslr_device.h>
48#include <ti/csl/csl_psc.h>
49#include <ti/csl/csl_pscAux.h>
50
51#define PASS_TEST_TX_CMD
52/* High Priority Accumulation Interrupt Service Handler for this application */
53void Cpsw_RxISR (void);
54
55/* Constructed data packet to send. */
56#ifdef _TMS320C6X
57#pragma DATA_ALIGN(pktMatch, 16)
58uint8_t pktMatch[] = {
59#else
60uint8_t pktMatch[] __attribute__ ((aligned (16))) = {
61#endif
62 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, /* Dest MAC */
63 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, /* Src MAC */
64 0x08, 0x00, /* Ethertype = IPv4 */
65 0x45, 0x00, 0x00, 0x6c, /* IP version, services, total length */
66 0x00, 0x00, 0x00, 0x00, /* IP ID, flags, fragment offset */
67 0x05, 0x11, 0x32, 0x26, /* IP ttl, protocol (UDP), header checksum */
68 0xc0, 0xa8, 0x01, 0x01, /* Source IP address */
69 0xc0, 0xa8, 0x01, 0x0a, /* Destination IP address */
70 0x12, 0x34, 0x56, 0x78, /* UDP source port, dest port */
71 0x00, 0x58, 0x1d, 0x18, /* UDP len, UDP checksum */
72 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, /* 80 bytes of payload data */
73 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41,
74 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
75 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
76 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
77 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61,
78 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
79 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71,
80 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
81 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81 };
82
83/* Constructed data packet to send. */
84#ifdef _TMS320C6X
85#pragma DATA_ALIGN(pktDrop, 16)
86uint8_t pktDrop[] = {
87#else
88uint8_t pktDrop[] __attribute__ ((aligned (16))) = {
89#endif
90 0x10, 0x11, 0x12, 0x13, 0x14, 0x16, /* Dest MAC */
91 0x00, 0x01, 0x02, 0x03, 0x04, 0x06, /* Src MAC */
92 0x08, 0x00, /* Ethertype = IPv4 */
93 0x45, 0x00, 0x00, 0x6c, /* IP version, services, total length */
94 0x00, 0x00, 0x00, 0x00, /* IP ID, flags, fragment offset */
95 0x05, 0x11, 0x32, 0x26, /* IP ttl, protocol (UDP), header checksum */
96 0xc0, 0xa8, 0x01, 0x01, /* Source IP address */
97 0xc0, 0xa8, 0x01, 0x0a, /* Destination IP address */
98 0x12, 0x34, 0x56, 0x78, /* UDP source port, dest port */
99 0x00, 0x58, 0x1d, 0x18, /* UDP len, UDP checksum */
100 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, /* 80 bytes of payload data */
101 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41,
102 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
103 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
104 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
105 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61,
106 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
107 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71,
108 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
109 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81 };
110
111
112extern int expectedTxCounter[NUM_MAC_PORTS], expectedRxCounter[NUM_MAC_PORTS];
113extern int actialTxCounter[NUM_MAC_PORTS], actualRxCounter[NUM_MAC_PORTS];
114
115uTestEmacport_t ports[8]={
116 /* Port 1 or pa_EMAC_PORT_0 */
117 {
118 pa_EMAC_PORT_0,
119 AVAILABLE_FOR_PCAP_TEST
120 },
121 /* Port 2 or pa_EMAC_PORT_1 */
122 {
123 pa_EMAC_PORT_1,
124 NOT_AVAILABLE_FOR_PCAP_TEST
125 },
126 /* Port 3 or pa_EMAC_PORT_2 */
127 {
128 pa_EMAC_PORT_2,
129 NOT_AVAILABLE_FOR_PCAP_TEST
130 },
131 /* Port 4 or pa_EMAC_PORT_3 */
132 {
133 pa_EMAC_PORT_3,
134 AVAILABLE_FOR_PCAP_TEST
135 },
136 /* Port 5 or pa_EMAC_PORT_4 */
137 {
138 pa_EMAC_PORT_4,
139 AVAILABLE_FOR_PCAP_TEST
140 },
141 /* Port 6 or pa_EMAC_PORT_5 */
142 {
143 pa_EMAC_PORT_5,
144 AVAILABLE_FOR_PCAP_TEST
145 },
146 /* Port 7 or pa_EMAC_PORT_6 */
147 {
148 pa_EMAC_PORT_6,
149 AVAILABLE_FOR_PCAP_TEST
150 },
151 /* Port 8 or pa_EMAC_PORT_7 */
152 {
153 pa_EMAC_PORT_7,
154 AVAILABLE_FOR_PCAP_TEST
155 },
156
157};
158
159uint8_t * DataBufAlloc(void)
160{
161 uint8_t* pDataBuffer = NULL;
162 if ((pDataBuffer = (Ptr) Memory_alloc(NULL, PA_EMAC_EX_RXBUF_SIZE, 0, NULL)) == NULL)
163 {
164 System_printf ("Error allocating memory for Rx data buffer \n");
165 }
166 return (pDataBuffer);
167}
168
169/* Free Attached Buffers */
170void DataBufFree(void* pDataBuffer, uint32_t size)
171{
172 Memory_free(NULL, pDataBuffer, size);
173}
174
175/** ============================================================================
176 * @n@b Convert_CoreLocal2GlobalAddr
177 *
178 * @b Description
179 * @n This API converts a core local L2 address to a global L2 address.
180 *
181 * @param[in]
182 * @n addr L2 address to be converted to global.
183 *
184 * @return uint32_t
185 * @n >0 Global L2 address
186 * =============================================================================
187 */
188uint32_t Convert_CoreLocal2GlobalAddr (uint32_t addr)
189{
190#ifdef _TMS320C6X
191 uint32_t coreNum;
192
193 /* Get the core number. */
194 coreNum = CSL_chipReadReg(CSL_CHIP_DNUM);
195
196 /* Compute the global address. */
197 return ((1 << 28) | (coreNum << 24) | (addr & 0x00ffffff));
198#else
199 return (addr);
200#endif
201}
202
203/** ============================================================================
204 * @n@b Convert_CoreGlobal2L2Addr
205 *
206 * @b Description
207 * @n This API converts a core local L2 address to a global L2 address.
208 *
209 * @param[in]
210 * @n addr L2 address to be converted to global.
211 *
212 * @return uint32_t
213 * @n >0 Global L2 address
214 * =============================================================================
215 */
216uint32_t Convert_CoreGlobal2L2Addr (uint32_t addr)
217{
218#ifdef _TMS320C6X
219 /* Compute the local l2 address. */
220 return (addr & 0x00ffffff);
221#else
222 return (addr);
223#endif
224}
225
226/** ============================================================================
227 * @n@b get_qmssGblCfgParamsRegsPhy2Virt
228 *
229 * @b Description
230 * @n This API updates the QMSS global configuration registers to global
231 * addressable space for that platform.
232 *
233 * @param[in]
234 * @n addr L2 address to be converted to global.
235 *
236 * @return uint32_t
237 * @n >0 Global L2 address
238 * =============================================================================
239 */
240void get_qmssGblCfgParamsRegsPhy2Virt(Qmss_GlobalConfigParams *fw_qmssGblCfgParams)
241{
242 /* Since all physical memory is accessible in DSP, nothing to be done */
243 return;
244}
245
246/** ============================================================================
247 * @n@b get_cppiGblCfgParamsRegsPhy2Virt
248 *
249 * @b Description
250 * @n This API updates the QMSS global configuration registers to global
251 * addressable space for that platform.
252 *
253 * @param[in]
254 * @n addr L2 address to be converted to global.
255 *
256 * @return uint32_t
257 * @n >0 Global L2 address
258 * =============================================================================
259 */
260void get_cppiGblCfgParamsRegsPhy2Virt(Cppi_GlobalConfigParams *fw_cppiGblCfgParams)
261{
262 /* Since all physical memory is accessible in DSP, nothing to be done */
263 return;
264}
265
266Bool gIsPingListUsed = 0;
267uint8_t accChannelNum;
268/* High Priority Accumulator List - [((Interrupt Threshold + 1) * 2)]
269 *
270 * MUST be 16 byte aligned.
271 *
272 * The High priority accumulator list consists of 2 buffers Ping and
273 * Pong each consisting of the following entries:
274 *
275 * (1) Entry count - specifies number of packets accumulated in
276 * the list.
277 * (2) Descriptors - an array of Rx packet descriptors accumulated
278 * in this list.
279 *
280 * Hence the size of high priority accumulator list is calculated as
281 * follows:
282 *
283 * (1) Get the interrupt threshold, i.e., maximum number of Rx
284 * packets to accumulate before an interrupt is generated.
285 * (2) Add an extra entry to the threshold to track
286 * entry count of the list.
287 * (3) Double this to accomodate space for Ping/Pong lists.
288 * (4) Each accumulator entry is 4 bytes wide.
289 *
290 * size = ((interrupt threshold + 1) * 2) * 4 bytes
291 *
292 * Lets allocate here assuming that interrupt threshold is 1, i.e.,
293 * interrupt on every Rxed packet.
294 */
295#ifdef _TMS320C6X
296#pragma DATA_ALIGN (gHiPriAccumList, 16)
297uint32_t gHiPriAccumList[(RX_INT_THRESHOLD + 1) * 2];
298#else
299uint32_t gHiPriAccumList[(RX_INT_THRESHOLD + 1) * 2] __attribute__ ((aligned (16)));
300#endif
301
302int32_t setup_rx_queue(Qmss_Queue *rxQInfo)
303{
304 Qmss_AccCmdCfg accCfg;
305 uint16_t numAccEntries, intThreshold;
306 uint8_t isAllocated;
307 Qmss_Result result;
308#ifdef _TMS320C6X
309 int32_t eventId, vectId;
310 uint8_t coreNum = (uint8_t) CSL_chipReadReg(CSL_CHIP_DNUM);
311#else
312 uint8_t coreNum = 0;
313#endif
314 extern Qmss_QueueHnd gRxQHnd;
315
316 //if (linuxBoot == FALSE)
317 if (1)
318 {
319
320 /* Open a Receive (Rx) queue.
321 *
322 * This queue will be used to hold all the packets received by PASS/CPSW
323 *
324 * Open the next available High Priority Accumulation queue for Rx.
325 */
326 if ((gRxQHnd = Qmss_queueOpen (Qmss_QueueType_HIGH_PRIORITY_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated)) < 0)
327 {
328 System_printf ("Error opening a High Priority Accumulation Rx queue \n");
329 return -1;
330 }
331 *rxQInfo = Qmss_getQueueNumber (gRxQHnd);
332
333 /* Setup high priority accumulation interrupts on the Rx queue.
334 *
335 * Let's configure the accumulator with the following settings:
336 * (1) Interrupt pacing disabled.
337 * (2) Interrupt on every received packet
338 */
339 intThreshold = RX_INT_THRESHOLD;
340 numAccEntries = (intThreshold + 1) * 2;
341 accChannelNum = PA_ACC_CHANNEL_NUM + coreNum;
342
343 /* Initialize the accumulator list memory */
344 memset ((void *) gHiPriAccumList, 0, numAccEntries * 4);
345
346 /* Setup the accumulator settings */
347 accCfg.channel = accChannelNum;
348 accCfg.command = Qmss_AccCmd_ENABLE_CHANNEL;
349 accCfg.queueEnMask = 0;
350 accCfg.listAddress = Convert_CoreLocal2GlobalAddr((uint32_t) gHiPriAccumList);
351 accCfg.queMgrIndex = Qmss_getQIDFromHandle(gRxQHnd);
352 accCfg.maxPageEntries = (intThreshold + 1); /* Add an extra entry for holding the entry count */
353 accCfg.timerLoadCount = 0;
354 accCfg.interruptPacingMode = Qmss_AccPacingMode_LAST_INTERRUPT;
355 accCfg.listEntrySize = Qmss_AccEntrySize_REG_D;
356 accCfg.listCountMode = Qmss_AccCountMode_ENTRY_COUNT;
357 accCfg.multiQueueMode = Qmss_AccQueueMode_SINGLE_QUEUE;
358
359 /* Program the accumulator */
360 if ((result = Qmss_programAccumulator (Qmss_PdspId_PDSP1, &accCfg)) != QMSS_ACC_SOK)
361 {
362 System_printf ("Error Programming high priority accumulator for channel : %d queue : %d error code : %d\n",
363 accCfg.channel, accCfg.queMgrIndex, result);
364 return -1;
365 }
366
367 /* Register interrupts for the system event corresponding to the
368 * accumulator channel we are using.
369 */
370#ifdef _TMS320C6X
371 /* System event 48 - Accumulator Channel 0 */
372 eventId = 48;
373
374 /* Pick a interrupt vector id to use */
375 vectId = 7;
376
377 /* Register our ISR handle for this event */
378 EventCombiner_dispatchPlug (eventId, (EventCombiner_FuncPtr)Cpsw_RxISR, (UArg)NULL, TRUE);
379
380 /* Map the combiner's output event id (evevtId/32) to hardware interrupt 8. */
381 /* The HW int 8 is slected via CM.eventGroupHwiNum[] specified at cpsw_example.cfg */
382 Hwi_eventMap(vectId, 1);
383
384 /* Enable interrupt 8. */
385 Hwi_enableInterrupt(vectId);
386#else
387 Hwi_Struct *handle;
388 Error_Block eb;
389 Hwi_Params hwiParams;
390
391 Error_init(&eb);
392 handle = (Hwi_Struct *)Memory_alloc(NULL, sizeof(Hwi_Struct),
393 0, &eb);
394 if (handle == NULL)
395 {
396 System_printf ("Error memory allocate Hwi_Struct \n");
397 return -1;
398 }
399 Hwi_Params_init(&hwiParams);
400 hwiParams.instance->name = NULL;
401 hwiParams.arg = NULL;
402 hwiParams.priority = 0x20U;
403 hwiParams.eventId = 0;
404 hwiParams.enableInt = TRUE;
405 hwiParams.maskSetting = Hwi_MaskingOption_SELF;
406 hwiParams.triggerSensitivity = 0x3; /* interrupt edge triggered */
407
408 Hwi_construct(handle, CSL_ARM_GIC_QMSS_INTD_1_HIGH_0 + 32, (Hwi_FuncPtr)Cpsw_RxISR,
409 &hwiParams, &eb);
410#endif
411 }
412 else {
413 /* Open a Receive (Rx) queue.
414 *
415 * This queue will be used to hold all the packets received by PASS/CPSW
416 *
417 * Open the next available High Priority Accumulation queue for Rx.
418 */
419 if ((gRxQHnd = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, RX_QUEUE_NUM_INIT, &isAllocated)) < 0)
420 {
421 System_printf ("Error opening a High Priority Accumulation Rx queue \n");
422 return -1;
423 }
424 *rxQInfo = Qmss_getQueueNumber (gRxQHnd);
425 }
426
427 return (0);
428
429}
430
431/** ============================================================================
432 * @n@b Cpsw_RxISR
433 *
434 * @b Description
435 * @n This API is the example application's High Priority Accumulation interrupt
436 * Service Handler (ISR). This API is called in interrupt context. This API
437 * fetches the Received packet (descriptor) from the accumulator list and
438 * verifies the data received to ensure that it is correct. On success,
439 * this API recycles the Rx descriptor back to Rx free queue for use again.
440 * This API processes the Ping and Pong accumulator lists alternatively.
441 *
442 * @param[in]
443 * @n None
444 *
445 * @return
446 * @n None
447 * =============================================================================
448 */
449void Cpsw_RxISR (void)
450{
451 Cppi_Desc* pCppiDesc;
452 uint32_t count, i;
453
454 /* Process ISR.
455 *
456 * Get the number of entries in accumulator list.
457 * The hardware enqueues data alternatively to Ping/Pong buffer lists in
458 * the accumulator. Hence, we need to track which list (Ping/Pong)
459 * we serviced the last time and accordingly process the other one
460 * this time around.
461 */
462 if (!gIsPingListUsed)
463 {
464 /* Serviced Pong list last time. So read off the Ping list now */
465 count = gHiPriAccumList[0];
466 }
467 else
468 {
469 /* Serviced Ping list last time. So read off the Pong list now */
470 count = gHiPriAccumList[RX_INT_THRESHOLD + 1];
471 }
472
473 /* Process all the Results received
474 *
475 * Skip the first entry in the list that contains the
476 * entry count and proceed processing results.
477 */
478 for (i = 1; i <= count; i ++)
479 {
480 /* Get the result descriptor.
481 *
482 * The hardware enqueues data alternatively to Ping/Pong buffer lists in
483 * the accumulator. Hence, we need to track which list (Ping/Pong)
484 * we serviced the last time and accordingly process the other one
485 * this time around.
486 */
487 if (!gIsPingListUsed)
488 {
489 /* Serviced Pong list last time. So read off the Ping list now */
490 pCppiDesc = (Cppi_Desc *) gHiPriAccumList [i];
491 }
492 else
493 {
494 /* Serviced Ping list last time. So read off the Pong list now
495 *
496 * Skip over Ping list length to arrive at Pong list start.
497 */
498 pCppiDesc = (Cppi_Desc *) gHiPriAccumList [i + RX_INT_THRESHOLD + 1];
499 }
500
501 /* Descriptor size appended to the address in the last 4 bits.
502 *
503 * To get the true descriptor size, always mask off the last
504 * 4 bits of the address.
505 */
506 pCppiDesc = (Ptr) ((uint32_t) pCppiDesc & 0xFFFFFFF0);
507
508 VerifyPacket_port (pCppiDesc, dest_emac_port_id, 0xaaaaaaaa);
509 }
510
511 /* Clear the accumulator list and save whether we used Ping/Pong
512 * list information for next time around.
513 */
514 if (!gIsPingListUsed)
515 {
516 /* Just processed Ping list */
517 gIsPingListUsed = 1;
518
519 /* Clear the accumulator list after processing */
520 memset ((void *) &gHiPriAccumList [0], 0, sizeof (uint32_t) * (RX_INT_THRESHOLD + 1));
521 }
522 else
523 {
524 /* Just processed Pong list */
525 gIsPingListUsed = 0;
526
527 /* Clear the accumulator list after processing */
528 memset ((void *) &gHiPriAccumList[RX_INT_THRESHOLD + 1], 0, sizeof (uint32_t) * (RX_INT_THRESHOLD + 1));
529 }
530
531 /* Clear INTD */
532 Qmss_ackInterrupt(accChannelNum, 1);
533 Qmss_setEoiVector(Qmss_IntdInterruptType_HIGH, accChannelNum);
534
535 /* Done processing interrupt. Return */
536 return;
537}
538
539/***************************************************************************************
540 * FUNCTION PURPOSE: Power up PA subsystem
541 ***************************************************************************************
542 * DESCRIPTION: this function powers up the PA subsystem domains
543 ***************************************************************************************/
544void passPowerUp (void)
545{
546
547 /* PASS power domain is turned OFF by default. It needs to be turned on before doing any
548 * PASS device register access. This not required for the simulator. */
549
550 /* Set PASS Power domain to ON */
551 CSL_PSC_enablePowerDomain (CSL_PSC_PD_NETCP);
552
553 /* Enable the clocks for PASS modules */
554 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_PA, PSC_MODSTATE_ENABLE);
555 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_CPGMAC, PSC_MODSTATE_ENABLE);
556 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_SA, PSC_MODSTATE_ENABLE);
557
558 /* Start the state transition */
559 CSL_PSC_startStateTransition (CSL_PSC_PD_NETCP);
560
561 /* Wait until the state transition process is completed. */
562 while (!CSL_PSC_isStateTransitionDone (CSL_PSC_PD_NETCP));
563
564}
565
566/** ============================================================================
567 * @n@b TrigPacketToPdsp5
568 *
569 * @b Description
570 * @n This API is called to trigger the packet tobe send to an interface via PDSP5.
571 * On success, this API increments a global Tx counter to indicate the same.
572 *
573 * @param[in]
574 * @n None
575 *
576 * @return int32_t
577 * -1 - Error
578 * 0 - Success
579 * =============================================================================
580 */
581int32_t TrigPacketToPdsp5(int emac_dest_port)
582{
583 Cppi_Desc* pCppiDesc;
584 uint32_t dataBufferSize;
585 char psFlags = (cpswSimTest)?pa_EMAC_PORT_NOT_SPECIFIED:(char)(emac_dest_port + pa_EMAC_PORT_0);
586 uint8_t *pkt = pktMatch;
587
588 if (emac_dest_port == -1) {
589 psFlags = pa_EMAC_PORT_NOT_SPECIFIED;
590 pkt = pktDrop;
591 }
592
593 paCmdInfo_t cmdInfo;
594 uint32_t cmdBuf[4];
595 uint16_t cmdSize = sizeof(cmdBuf);
596
597 paCmdNextRoute_t routeCmdEth = {
598 0, /* ctrlBitfield */
599 pa_DEST_EMAC, /* Route - host */
600 0, /* pktType don't care */
601 0, /* flow Id */
602 0, /* Queue */
603 0, /* SWInfo 0 */
604 0, /* SWInfo 1 */
605 0 /* multiRouteIndex (not used) */
606 };
607
608 routeCmdEth.pktType_emacCtrl = psFlags;
609
610 /* Command : Next route */
611 cmdInfo.cmd = pa_CMD_NEXT_ROUTE;
612 cmdInfo.params.route = routeCmdEth;
613
614 /* Get a free descriptor from the global free queue we setup
615 * during initialization.
616 */
617 if ((pCppiDesc = Qmss_queuePop (gTxFreeQHnd)) == NULL)
618 {
619 System_printf ("No Tx free descriptor. Cant run send/rcv test \n");
620 return -1;
621 }
622
623 /* The descriptor address returned from the hardware has the
624 * descriptor size appended to the address in the last 4 bits.
625 *
626 * To get the true descriptor size, always mask off the last
627 * 4 bits of the address.
628 */
629 pCppiDesc = (Ptr) ((uint32_t) pCppiDesc & 0xFFFFFFF0);
630
631 dataBufferSize = sizeof (pktMatch);
632 Cppi_setData ( Cppi_DescType_HOST,
633 (Cppi_Desc *) pCppiDesc,
634 (uint8_t *) Convert_CoreLocal2GlobalAddr((uint32_t)pkt),
635 dataBufferSize
636 );
637 Cppi_setPacketLen (Cppi_DescType_HOST, (Cppi_Desc *)pCppiDesc, dataBufferSize);
638
639 Pa_formatTxCmd ( 1, /* nCmd */
640 &cmdInfo, /* command info */
641 0, /* offset */
642 (Ptr)&cmdBuf[0], /* Command buffer */
643 &cmdSize); /* Command size */
644
645 /* Attach the command in PS data */
646 Cppi_setPSData (Cppi_DescType_HOST, (Cppi_Desc *)pCppiDesc, (uint8_t *)cmdBuf, cmdSize);
647
648 Cppi_setPSFlags(Cppi_DescType_HOST, (Cppi_Desc *)pCppiDesc, 0);
649
650 if (pdsp_halt)
651 mdebugHaltPdsp(nssGblCfgParams.layout.qPaTxCmdIndex);
652
653 Qmss_queuePush (gPaTxQHnd[nssGblCfgParams.layout.qPaTxCmdIndex], pCppiDesc, dataBufferSize, SIZE_HOST_DESC, Qmss_Location_TAIL);
654
655 /* Increment the application transmit counter */
656 actual.txCount ++;
657
658 /* Give some time for the PA to process the packet */
659 CycleDelay (10000);
660
661 return 0;
662}
663/** ============================================================================
664 * @n@b TrigPacketToPdsp0
665 *
666 * @b Description
667 * @n This API is called to trigger the packet tobe send to pdsp0 from an interface.
668 * On success, this API increments a global Tx counter to indicate the same.
669 *
670 * @param[in]
671 * @n None
672 *
673 * @return int32_t
674 * -1 - Error
675 * 0 - Success
676 * =============================================================================
677 */
678int32_t TrigPacketToPdsp0(int emac_dest_port)
679{
680 Cppi_Desc* pCppiDesc;
681 uint32_t dataBufferSize;
682 char psFlags = (cpswSimTest)?pa_EMAC_PORT_NOT_SPECIFIED:(char)(emac_dest_port + pa_EMAC_PORT_0);
683 Cppi_DescTag tag;
684
685 /* Get a free descriptor from the global free queue we setup
686 * during initialization.
687 */
688 if ((pCppiDesc = Qmss_queuePop (gTxFreeQHnd)) == NULL)
689 {
690 System_printf ("No Tx free descriptor. Cant run send/rcv test \n");
691 return -1;
692 }
693
694 /* The descriptor address returned from the hardware has the
695 * descriptor size appended to the address in the last 4 bits.
696 *
697 * To get the true descriptor size, always mask off the last
698 * 4 bits of the address.
699 */
700 pCppiDesc = (Ptr) ((uint32_t) pCppiDesc & 0xFFFFFFF0);
701
702 dataBufferSize = sizeof (pktMatch);
703 //dataBufferSize = 1000;
704 Cppi_setData ( Cppi_DescType_HOST,
705 (Cppi_Desc *) pCppiDesc,
706 (uint8_t *) Convert_CoreLocal2GlobalAddr((uint32_t)pktMatch),
707 dataBufferSize
708 );
709 Cppi_setPacketLen (Cppi_DescType_HOST, (Cppi_Desc *)pCppiDesc, dataBufferSize);
710
711 /* Force the packet to the specific EMAC port */
712 if (!nssGblCfgParams.layout.fNssGen2)
713 {
714 Cppi_setPSFlags(Cppi_DescType_HOST, (Cppi_Desc *)pCppiDesc, psFlags);
715 }
716 else
717 {
718 tag.srcTagHi = 0;
719 tag.srcTagLo = 0;
720 tag.destTagHi = 0;
721 tag.destTagLo = psFlags;
722 Cppi_setTag(Cppi_DescType_HOST, (Cppi_Desc *)pCppiDesc, (Cppi_DescTag *)&tag);
723 }
724
725 /* Clear PS Data */
726 Cppi_setPSLen (Cppi_DescType_HOST, (Cppi_Desc *)pCppiDesc, 0);
727
728 if (pdsp_halt)
729 mdebugHaltPdsp(0);
730 /* Send the packet out the mac. It will loop back to PA if the mac/switch
731 * have been configured properly
732 */
733 if (no_bootMode == TRUE)
734 Qmss_queuePush (gPaTxQHnd[nssGblCfgParams.layout.qCpswEthIndex], pCppiDesc, dataBufferSize, SIZE_HOST_DESC, Qmss_Location_TAIL);
735 else {
736 Qmss_queuePush (gPaTxQHnd[nssGblCfgParams.layout.qPaInputIndex], pCppiDesc, dataBufferSize, SIZE_HOST_DESC, Qmss_Location_TAIL);
737 }
738
739 /* Increment the application transmit counter */
740 actual.txCount ++;
741
742 /* Give some time for the PA to process the packet */
743 //CycleDelay (10000);
744
745 return 0;
746}
747
748/** ============================================================================
749 * @n@b VerifyPacket
750 *
751 * @b Description
752 * @n This API verifies a packet received against the expected data and
753 * returns 0 to inidcate success and -1 to indicate a mismatch.
754 *
755 * @param[in]
756 * @n pCppiDesc Packet descriptor received.
757 *
758 * @return int32_t
759 * -1 - Error
760 * 0 - Success
761 * =============================================================================
762 */
763int32_t VerifyPacket_queue (int direction, int emac_dest_port, uint32_t swInfoMatch)
764{
765 Cppi_Desc *pCppiDesc;
766 Cppi_HostDesc *pHostDesc;
767 uint8_t *pDataBuffer;
768 int32_t i;
769 Qmss_QueueHnd queue;
770
771 /* Pop the descriptor from captured queue */
772 /* Get a free descriptor from the global free queue we setup
773 * during initialization.
774 */
775 if (direction)
776 {
777 queue = dest_emac_port_id + PA_PKT_CAP_INGRESS_CAP_BASE_QUEUE;
778 }
779 else
780 {
781 queue = dest_emac_port_id + PA_PKT_CAP_EGRESS_CAP_BASE_QUEUE;
782 }
783 while ((pCppiDesc = Qmss_queuePop (queue)) != NULL)
784 {
785 /* The descriptor address returned from the hardware has the
786 * descriptor size appended to the address in the last 4 bits.
787 *
788 * To get the true descriptor size, always mask off the last
789 * 4 bits of the address.
790 */
791 pCppiDesc = (Ptr) ((uint32_t) pCppiDesc & 0xFFFFFFF0);
792
793 pHostDesc = (Cppi_HostDesc *)pCppiDesc;
794
795 /* Verify the application software info we received is same
796 * as what we had sent earlier.
797 */
798 if (pHostDesc->softwareInfo0 != swInfoMatch)
799 {
800 System_printf ("VerifyPacket: Found an entry in receive queue with swinfo0 = 0x%08x, expected 0x%08x\n",
801 pHostDesc->softwareInfo0, 0xaaaaaaaa);
802
803 pHostDesc->buffLen = pHostDesc->origBufferLen;
804 Qmss_queuePush (gRxFreeQHnd, (Ptr)pHostDesc, pHostDesc->buffLen, SIZE_HOST_DESC, Qmss_Location_TAIL);
805
806 return -1;
807 }
808
809 /* Verify the packet matches what we had sent */
810 pDataBuffer = (uint8_t *) pHostDesc->buffPtr;
811 for (i = 42; i < sizeof (pktMatch); i++)
812 {
813 if (pktMatch[i] != pDataBuffer[i])
814 {
815 System_printf ("VerifyPacket: Byte %d expected 0x%02x, found 0x%02x\n", i, pktMatch[i], pDataBuffer[i]);
816 System_flush();
817
818 /* Free the packet back to the Rx FDQ */
819 pHostDesc->buffLen = pHostDesc->origBufferLen;
820 Qmss_queuePush (gRxFreeQHnd, (Ptr)pHostDesc, pHostDesc->buffLen, SIZE_HOST_DESC, Qmss_Location_TAIL);
821 return -1;
822 }
823 }
824
825 //System_printf ("Packet Received Verified Successfully!\n");
826
827 /* Increment Rx counter to indicate the number of successfully
828 * received packets by the example app.
829 */
830 actual.cloneCaptureCount ++;
831
832 /* Reset the buffer lenght and put the descriptor back on the free queue */
833 pHostDesc->buffLen = pHostDesc->origBufferLen;
834 Qmss_queuePush (gRxFreeQHnd, (Ptr)pHostDesc, pHostDesc->buffLen, SIZE_HOST_DESC, Qmss_Location_TAIL);
835 }
836 /* Verify packet done. Return success. */
837 return 0;
838}
839
840/** ============================================================================
841 * @n@b VerifyPacket_portMirror
842 *
843 * @b Description
844 * @n This API verifies a packet received against the expected data and
845 * returns 0 to inidcate success and -1 to indicate a mismatch.
846 *
847 * @param[in]
848 * @n pCppiDesc Packet descriptor received.
849 *
850 * @return int32_t
851 * -1 - Error
852 * 0 - Success
853 * =============================================================================
854 */
855int32_t VerifyPacket_port (Cppi_Desc* pCppiDesc, int emac_dest_port, uint32_t swInfoMatch)
856{
857 Cppi_HostDesc *pHostDesc;
858 uint8_t *pDataBuffer;
859 int32_t i;
860 uint32_t infoLen;
861 pasahoLongInfo_t *pinfo;
862 uint8_t portNum;
863
864 pHostDesc = (Cppi_HostDesc *)pCppiDesc;
865
866 /* Verify the application software info we received is same
867 * as what we had sent earlier.
868 */
869 if (pHostDesc->softwareInfo0 != swInfoMatch)
870 {
871 System_printf ("VerifyPacket: Found an entry in receive queue with swinfo0 = 0x%08x, expected 0x%08x\n",
872 pHostDesc->softwareInfo0, 0xaaaaaaaa);
873
874 pHostDesc->buffLen = pHostDesc->origBufferLen;
875 Qmss_queuePush (gRxFreeQHnd, (Ptr)pHostDesc, pHostDesc->buffLen, SIZE_HOST_DESC, Qmss_Location_TAIL);
876
877 return -1;
878 }
879
880 /* Get the parse information, make sure there is an L4 offset */
881 if (Cppi_getPSData (Cppi_DescType_HOST, Cppi_PSLoc_PS_IN_DESC, (Cppi_Desc *)pHostDesc, (uint8_t **)&pinfo, &infoLen) != CPPI_SOK) {
882 System_printf ("VerifyPacket: Error getting control info from received data packet\n");
883 return (-1);
884 }
885 else if(!cpswSimTest)
886 {
887 /* do not check the port number if linux boot is true */
888 if ( (no_bootMode == TRUE) && (emac_dest_port != -1) )
889 {
890
891 /* Verify the input port number */
892 portNum = PASAHO_LINFO_READ_INPORT(pinfo);
893
894 if ( (portNum == (pa_EMAC_PORT_0 + emac_dest_port)) )
895 {
896 /* actual port number */
897 actual.emacRxCount ++;
898 }
899 else if ( (portNum == INGRESS_MIRROR_PORT) ||
900 (portNum == EGRESS_MIRROR_PORT) )
901 {
902 /* expected port number */
903 actual.cloneCaptureCount ++;
904 }
905 else
906 {
907 /* Un expected port for NON CPPI Port egress test */
908 System_printf ("VerifyPacket: receive packet from unexpected EMAC PORT %d (expected %d)\n", portNum - 1, emac_dest_port);
909 System_flush();
910 }
911 }
912 }
913
914 /* Verify the packet matches what we had sent */
915 pDataBuffer = (uint8_t *) pHostDesc->buffPtr;
916 for (i = 42; i < sizeof (pktMatch); i++)
917 {
918 if (pktMatch[i] != pDataBuffer[i])
919 {
920 System_printf ("VerifyPacket: Byte %d expected 0x%02x, found 0x%02x\n", i, pktMatch[i], pDataBuffer[i]);
921 System_flush();
922
923 /* Free the packet back to the Rx FDQ */
924 pHostDesc->buffLen = pHostDesc->origBufferLen;
925 Qmss_queuePush (gRxFreeQHnd, (Ptr)pHostDesc, pHostDesc->buffLen, SIZE_HOST_DESC, Qmss_Location_TAIL);
926 return -1;
927 }
928 }
929
930 /* Reset the buffer lenght and put the descriptor back on the free queue */
931 pHostDesc->buffLen = pHostDesc->origBufferLen;
932 Qmss_queuePush (gRxFreeQHnd, (Ptr)pHostDesc, pHostDesc->buffLen, SIZE_HOST_DESC, Qmss_Location_TAIL);
933
934 /* Verify packet done. Return success. */
935 return 0;
936}
937
938/** ============================================================================
939 * @n@b Download_PAFirmware
940 *
941 * @b Description
942 * @n This API downloads the PA firmware required for PDSP operation.
943 *
944 * @param[in]
945 * @n None
946 *
947 * @return int32_t
948 * -1 - Error
949 * 0 - Success
950 * =============================================================================
951 */
952int32_t Download_PAFirmware (void)
953{
954 extern Pa_Handle gPAInstHnd;
955
956 int i;
957
958 /* Hold the PA in reset state during download */
959 Pa_resetControl (gPAInstHnd, pa_STATE_RESET);
960
961
962 for ( i = 0; i < nssGblCfgParams.layout.numPaPdsps; i++)
963 {
964
965 Pa_downloadImage (gPAInstHnd, i,
966 (Ptr)nssGblCfgParams.layout.paPdspImage[i],
967 nssGblCfgParams.layout.paPdspImageSize[i]);
968 }
969
970 /* Enable the PA back */
971 Pa_resetControl (gPAInstHnd, pa_STATE_ENABLE);
972
973 return 0;
974}
975
976void CycleDelay (int32_t count)
977{
978#ifdef _TMS320C6X
979 uint32_t TSCLin;
980
981 if (count <= 0)
982 return;
983
984 /* Get the current TSCL */
985 TSCLin = TSCL ;
986
987 while ((TSCL - TSCLin) < (uint32_t)count);
988#else
989 uint32_t start, end, cycles;
990
991 if (count <= 0)
992 return;
993
994 __asm__ __volatile__ ("MRC p15, 0, %0, c9, c13, 0\t\n": "=r"(start));
995 cycles = (uint32_t)count;
996 if ((0x100000000 - start) < (uint32_t)cycles) {
997 do {
998 __asm__ __volatile__ ("MRC p15, 0, %0, c9, c13, 0\t\n": "=r"(end));
999 } while (end > start);
1000 cycles -= 0x100000000 - start;
1001 start = 0;
1002 }
1003 do {
1004 __asm__ __volatile__ ("MRC p15, 0, %0, c9, c13, 0\t\n": "=r"(end));
1005 } while ((end - start) < cycles);
1006#endif
1007}
1008
1009void APP_exit (int32_t code)
1010{
1011 BIOS_exit(code);
1012}
1013
1014
1015int setupFramework(void)
1016{
1017#if RM
1018 if (setupRm ())
1019 {
1020 System_printf ("Function setupRm failed\n");
1021 System_flush();
1022 return (-1);
1023 }
1024#endif
1025 /* Initialize the components required to run the example:
1026 * (1) QMSS
1027 * (2) CPPI
1028 * (3) Ethernet switch subsystem + MDIO + SGMII
1029 */
1030
1031 /* Initialize QMSS */
1032 if (Init_Qmss () != 0)
1033 {
1034 System_printf ("QMSS init failed \n");
1035 System_flush();
1036 return (-1);
1037 }
1038 else
1039 {
1040 System_printf ("QMSS successfully initialized \n");
1041 System_flush();
1042 }
1043
1044 /* Initialize CPPI */
1045 if (Init_Cppi () != 0)
1046 {
1047 System_printf ("CPPI init failed \n");
1048 System_flush();
1049 return (-1);
1050 }
1051 else
1052 {
1053 System_printf ("CPPI successfully initialized \n");
1054 System_flush();
1055 }
1056
1057 /* Init PA LLD */
1058 if (Init_PASS () != 0)
1059 {
1060 System_printf ("PASS init failed \n");
1061 System_flush();
1062 return (-1);
1063 }
1064 else
1065 {
1066 System_printf ("PASS successfully initialized \n");
1067 System_flush();
1068 }
1069#ifndef __LINUX_USER_SPACE
1070 if (no_bootMode == TRUE)
1071 {
1072 /* Initialize the CPSW switch */
1073 if (Init_Cpsw () != 0)
1074 {
1075 System_printf ("Ethernet subsystem init failed \n");
1076 System_flush();
1077 return (-1);
1078 }
1079 else
1080 {
1081 System_printf ("Ethernet subsystem successfully initialized \n");
1082 System_flush();
1083 }
1084 }
1085#endif
1086
1087 /* Setup Tx */
1088 if (Setup_Tx () != 0)
1089 {
1090 System_printf ("Tx setup failed \n");
1091 System_flush();
1092 return (-1);
1093 }
1094 else
1095 {
1096 System_printf ("Tx setup successfully done \n");
1097 System_flush();
1098 }
1099
1100 /* Setup Rx */
1101 if (Setup_Rx () != 0)
1102 {
1103 System_printf ("Rx setup failed \n");
1104 System_flush();
1105 return (-1);
1106 }
1107 else
1108 {
1109 System_printf ("Rx setup successfully done \n");
1110 System_flush();
1111 }
1112
1113 /* Setup PA */
1114 if (Setup_PASS () != 0)
1115 {
1116 System_printf ("PASS setup failed \n");
1117 System_flush();
1118 return (-1);
1119 }
1120 else
1121 {
1122 System_printf ("PASS setup successfully done \n");
1123 System_flush();
1124 }
1125
1126 return 0;
1127}
1128int32_t pa_global_config (paCtrlInfo_t* cfgInfo, uint32_t swInfoId )
1129{
1130 int32_t j;
1131 uint16_t cmdSize;
1132 paCmdReply_t cmdReplyInfo = { pa_DEST_HOST, /* Replies go to the host */
1133 0, /* User chosen ID to go to swinfo0 */
1134 0, /* Destination queue */
1135 0 /* Flow ID */
1136 };
1137 paReturn_t retVal;
1138 paEntryHandle_t retHandle;
1139 int32_t handleType, cmdDest;
1140 uint32_t psCmd = ((uint32_t)(4 << 5) << 24);
1141 uint32_t myswinfo[] = {0x11112222, 0x33334444};
1142 Cppi_HostDesc* pHostDesc;
1143
1144 /* Get a Tx free descriptor to send a command to the PA PDSP */
1145 if ((pHostDesc = Qmss_queuePop (gTxFreeQHnd)) == NULL)
1146 {
1147 System_printf ("Error obtaining a Tx free descriptor \n");
1148 return -1;
1149 }
1150
1151 /* The descriptor address returned from the hardware has the
1152 * descriptor size appended to the address in the last 4 bits.
1153 *
1154 * To get the true descriptor pointer, always mask off the last
1155 * 4 bits of the address.
1156 */
1157 pHostDesc = (Ptr) ((uint32_t) pHostDesc & 0xFFFFFFF0);
1158
1159 /* Populate the Rx free descriptor with the fixed command buffer. */
1160 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)pHostDesc, (uint8_t *)Convert_CoreLocal2GlobalAddr((uint32_t)gPaCmdBuf1), pa_EMAC_PORT_CONFIG_MIN_CMD_BUF_SIZE_BYTES);
1161
1162 /* Save original buffer information */
1163 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)pHostDesc, (uint8_t *)Convert_CoreLocal2GlobalAddr((uint32_t)gPaCmdBuf1), pa_EMAC_PORT_CONFIG_MIN_CMD_BUF_SIZE_BYTES);
1164
1165 cmdSize = pHostDesc->buffLen;
1166 cmdReplyInfo.replyId = swInfoId; /* unique for each pa control command */
1167
1168 /* Get the PA response queue number and populate the destination queue number
1169 * in the PA response configuration.
1170 */
1171 cmdReplyInfo.queue = Qmss_getQIDFromHandle(gPaCfgCmdRespQHnd);
1172 cmdReplyInfo.flowId = (uint8_t)Cppi_getFlowId(gRxFlowHnd);
1173
1174 retVal = Pa_control (gPAInstHnd,
1175 cfgInfo,
1176 (paCmd_t) pHostDesc->buffPtr,
1177 &cmdSize,
1178 &cmdReplyInfo,
1179 &cmdDest);
1180 if (retVal != pa_OK)
1181 {
1182 System_printf ("Pa_control returned error %d\n", retVal);
1183 return -1;
1184 }
1185
1186 /* This sets the extended info for descriptors, and this is required so PS info
1187 * goes to the right spot
1188 */
1189 Cppi_setSoftwareInfo (Cppi_DescType_HOST, (Cppi_Desc *)pHostDesc, (uint8_t *)myswinfo);
1190
1191 /* Set the buffer length to the size used. It will be restored when the descriptor
1192 * is returned
1193 */
1194 Cppi_setPacketLen (Cppi_DescType_HOST, (Cppi_Desc *)pHostDesc, cmdSize);
1195 pHostDesc->buffLen = cmdSize;
1196
1197 /* Mark the packet as a configuration packet */
1198 Cppi_setPSData (Cppi_DescType_HOST, (Cppi_Desc *)pHostDesc, (uint8_t *)&psCmd, 4);
1199
1200 if (pdsp_halt)
1201 mdebugHaltPdsp(cmdDest);
1202
1203 /* Send the command to the PA and wait for the return */
1204 Qmss_queuePush (gPaTxQHnd[cmdDest],
1205 pHostDesc,
1206 pHostDesc->buffLen,
1207 SIZE_HOST_DESC,
1208 Qmss_Location_TAIL
1209 );
1210
1211 /* Poll on the PA response queue to see if response from PA has come */
1212 for (j = 0; j < 100; j++)
1213 {
1214 CycleDelay (1000);
1215
1216 if (Qmss_getQueueEntryCount (gPaCfgCmdRespQHnd) > 0)
1217 {
1218 /* We have a response from PA PDSP for the command we submitted earlier for
1219 * MAC address addition.
1220 */
1221 pHostDesc = Qmss_queuePop (gPaCfgCmdRespQHnd);
1222
1223 /* Clear the size bytes */
1224 pHostDesc = (Ptr) ((uint32_t) pHostDesc & 0xFFFFFFF0);
1225
1226 if (pHostDesc->softwareInfo0 != cmdReplyInfo.replyId)
1227 {
1228 System_printf ("Found an entry in PA response queue with swinfo0 = 0x%08x, expected 0x%08x\n",
1229 pHostDesc->softwareInfo0, cmdReplyInfo.replyId);
1230 pHostDesc->buffLen = pHostDesc->origBufferLen;
1231 Qmss_queuePush (gRxFreeQHnd, pHostDesc, pHostDesc->buffLen, SIZE_HOST_DESC, Qmss_Location_TAIL);
1232
1233 return -1;
1234 }
1235
1236 retVal = Pa_forwardResult (gPAInstHnd, (Ptr)pHostDesc->buffPtr, &retHandle, &handleType, &cmdDest);
1237 if (retVal != pa_OK)
1238 {
1239 System_printf ("PA sub-system rejected Pa_control command\n");
1240 return -1;
1241 }
1242
1243 /* Reset the buffer lenght and put the descriptor back on the Tx free queue */
1244 pHostDesc->buffLen = pHostDesc->origBufferLen;
1245 Qmss_queuePush (gRxFreeQHnd, pHostDesc, pHostDesc->buffLen, SIZE_HOST_DESC, Qmss_Location_TAIL);
1246
1247 break;
1248 }
1249 }
1250
1251 if (j == 100)
1252 {
1253 System_printf ("pa_global_config(): Timeout waiting for reply from PA to Pa_control command\n");
1254 return -1;
1255 }
1256
1257 return 0;
1258}
1259
1260int32_t pcap_enable_global_ingress ( )
1261{
1262 paCtrlInfo_t cfgInfo;
1263 paPacketControl2Config_t pktCtrl2Cfg;
1264 uint32_t swInfoId = 0xFFFF0001;
1265
1266 memset (&cfgInfo, 0, sizeof (cfgInfo));
1267 memset (&pktCtrl2Cfg, 0, sizeof (pktCtrl2Cfg));
1268
1269 pktCtrl2Cfg.validBitMap = pa_PKT_CTRL2_VALID_EMAC_IF_IGRESS_CLONE;
1270 pktCtrl2Cfg.ctrlBitMap = pa_PKT_CTRL_EMAC_IF_IGRESS_CLONE;
1271
1272 cfgInfo.code = pa_CONTROL_SYS_CONFIG;
1273 cfgInfo.params.sysCfg.pPktControl2 = &pktCtrl2Cfg;
1274
1275 pa_global_config (&cfgInfo, swInfoId );
1276
1277 return (0);
1278}
1279
1280int32_t pcap_enable_global_egress ( )
1281{
1282 paCtrlInfo_t cfgInfo;
1283 paPacketControl2Config_t pktCtrl2Cfg;
1284 uint32_t swInfoId = 0xFFFF0002;
1285
1286 memset (&cfgInfo, 0, sizeof (cfgInfo));
1287 memset (&pktCtrl2Cfg, 0, sizeof (pktCtrl2Cfg));
1288
1289 pktCtrl2Cfg.validBitMap = pa_PKT_CTRL2_VALID_EMAC_IF_EGRESS_CLONE;
1290 pktCtrl2Cfg.ctrlBitMap = pa_PKT_CTRL_EMAC_IF_EGRESS_CLONE;
1291
1292 cfgInfo.code = pa_CONTROL_SYS_CONFIG;
1293 cfgInfo.params.sysCfg.pPktControl2 = &pktCtrl2Cfg;
1294
1295 pa_global_config (&cfgInfo, swInfoId );
1296
1297 return 0;
1298}
1299
1300int32_t pcap_disable_global_ingress ( )
1301{
1302 paCtrlInfo_t cfgInfo;
1303 paPacketControl2Config_t pktCtrl2Cfg;
1304 uint32_t swInfoId = 0xFFFF0003;
1305
1306 memset (&cfgInfo, 0, sizeof (cfgInfo));
1307 memset (&pktCtrl2Cfg, 0, sizeof (pktCtrl2Cfg));
1308
1309 pktCtrl2Cfg.validBitMap = pa_PKT_CTRL2_VALID_EMAC_IF_IGRESS_CLONE;
1310 pktCtrl2Cfg.ctrlBitMap = ~pa_PKT_CTRL_EMAC_IF_IGRESS_CLONE;
1311
1312 cfgInfo.code = pa_CONTROL_SYS_CONFIG;
1313 cfgInfo.params.sysCfg.pPktControl2 = &pktCtrl2Cfg;
1314
1315 pa_global_config (&cfgInfo, swInfoId );
1316
1317 return 0;
1318}
1319
1320int32_t pcap_disable_global_egress ( )
1321{
1322 paCtrlInfo_t cfgInfo;
1323 paPacketControl2Config_t pktCtrl2Cfg;
1324 uint32_t swInfoId = 0xFFFF0004;
1325
1326 memset (&cfgInfo, 0, sizeof (cfgInfo));
1327 memset (&pktCtrl2Cfg, 0, sizeof (pktCtrl2Cfg));
1328
1329 pktCtrl2Cfg.validBitMap = pa_PKT_CTRL2_VALID_EMAC_IF_EGRESS_CLONE;
1330 pktCtrl2Cfg.ctrlBitMap = ~pa_PKT_CTRL_EMAC_IF_EGRESS_CLONE;
1331
1332 cfgInfo.code = pa_CONTROL_SYS_CONFIG;
1333 cfgInfo.params.sysCfg.pPktControl2 = &pktCtrl2Cfg;
1334
1335 pa_global_config (&cfgInfo, swInfoId );
1336
1337 return 0;
1338}
1339
1340int32_t pkt_capture_test(int ingress, int dest_emac_port_id)
1341{
1342 paCtrlInfo_t paCtrl;
1343 uint32_t cmdId;
1344 int i, retVal = 0;
1345 int maxRetry = MAX_RETRIES;
1346 paReturn_t paRetVal;
1347 /* Can be at most pa_MAX_NUM_EMAC_PORT_CONFIG_ENTRIES */
1348 paPktCaptureConfig_t pktCapCfg[1];
1349
1350 /* Clear the paCtrl before configuration */
1351 memset (&paCtrl, 0, sizeof (paCtrl));
1352
1353 /* Clear the expected counts */
1354 memset (&expected, 0, sizeof (expected));
1355
1356 /* Clear the packet capture config structure */
1357 memset (pktCapCfg, 0, sizeof (pktCapCfg));
1358
1359 /* 1. Ingress Packet Capture test */
1360 cmdId = 0xbbbbcccc;
1361
1362 if (ingress)
1363 {
1364 pktCapCfg[0].ctrlBitMap = pa_PKT_CLONE_ENABLE | pa_PKT_CLONE_INGRESS;
1365 pktCapCfg[0].portToBeCaptured = (dest_emac_port_id + pa_EMAC_PORT_0);
1366 pktCapCfg[0].flowId = PA_PKT_CAP_FLOW;
1367 pktCapCfg[0].queue = (dest_emac_port_id + PA_PKT_CAP_INGRESS_CAP_BASE_QUEUE);
1368 pktCapCfg[0].swInfo0 = PA_PKT_CAP_SWINFO_HIGH_WORD | dest_emac_port_id;
1369
1370 paCtrl.code = pa_CONTROL_EMAC_PORT_CONFIG;
1371 paCtrl.params.emacPortCfg.numEntries = 1;
1372 paCtrl.params.emacPortCfg.cfgType = pa_EMAC_PORT_CFG_PKT_CAPTURE;
1373 paCtrl.params.emacPortCfg.u.pktCapCfg = &pktCapCfg[0];
1374
1375 paRetVal = pa_global_config(&paCtrl, cmdId);
1376 if (paRetVal != pa_OK)
1377 return -1;
1378 paRetVal = pcap_enable_global_ingress();
1379 if (paRetVal != pa_OK)
1380 return -1;
1381
1382 }
1383 else
1384 {
1385 pktCapCfg[0].ctrlBitMap = pa_PKT_CLONE_ENABLE;
1386 pktCapCfg[0].portToBeCaptured = (dest_emac_port_id + pa_EMAC_PORT_0);
1387 pktCapCfg[0].flowId = PA_PKT_CAP_FLOW;
1388 pktCapCfg[0].queue = (dest_emac_port_id + PA_PKT_CAP_EGRESS_CAP_BASE_QUEUE);
1389 pktCapCfg[0].swInfo0 = PA_PKT_CAP_SWINFO_HIGH_WORD | dest_emac_port_id;
1390
1391 paCtrl.code = pa_CONTROL_EMAC_PORT_CONFIG;
1392 paCtrl.params.emacPortCfg.numEntries = 1;
1393 paCtrl.params.emacPortCfg.cfgType = pa_EMAC_PORT_CFG_PKT_CAPTURE;
1394 paCtrl.params.emacPortCfg.u.pktCapCfg = &pktCapCfg[0];
1395
1396 paRetVal = pa_global_config(&paCtrl, cmdId);
1397 if (paRetVal != pa_OK)
1398 return -1;
1399 paRetVal = pcap_enable_global_egress();
1400 if (paRetVal != pa_OK)
1401 return -1;
1402 }
1403
1404 /* Set the expectations */
1405 if (dest_emac_port_id != -1)
1406 {
1407 expected.emacRxCount = MAX_NUM_PACKETS;
1408 expected.cloneCaptureCount = MAX_NUM_PACKETS;
1409 }
1410 else
1411 {
1412 expected.emacRxCount = 0; /* CPPI egress test */
1413 expected.cloneCaptureCount = MAX_NUM_PACKETS;
1414 }
1415 expected.txCount = MAX_NUM_PACKETS;
1416
1417 /* Clear the acutal counts */
1418 memset (&actual, 0, sizeof (actual));
1419
1420 /* This would do a packet capture at the queue configured from ingress traffic */
1421 for (i = 0; i < MAX_NUM_PACKETS; i ++)
1422 {
1423 if (ingress) {
1424 if (TrigPacketToPdsp0 (dest_emac_port_id) != 0)
1425 {
1426 System_printf ("Packet %d send to pdsp0 failed \n", i);
1427 System_flush();
1428 return (-1);
1429 }
1430 }
1431 else
1432 {
1433 if (TrigPacketToPdsp5 (dest_emac_port_id) != 0)
1434 {
1435 System_printf ("Packet %d send to pdsp5 failed \n", i);
1436 System_flush();
1437 return (-1);
1438 }
1439 }
1440 }
1441
1442 /* Check if expected values match with actual values */
1443 maxRetry = MAX_NUM_PACKETS*2;
1444 do {
1445 /* Wait for some cycles to receive all packets */
1446 CycleDelay(200000);
1447
1448 /* This would validate all the captured packets in the host */
1449 if (VerifyPacket_queue(ingress, dest_emac_port_id, PA_PKT_CAP_SWINFO_HIGH_WORD | dest_emac_port_id) != 0)
1450 {
1451 System_printf (" verify packet for host error \n");
1452 System_flush();
1453 retVal = -1;
1454 break;
1455 }
1456
1457 if (memcmp(&expected,&actual, sizeof(pktStats_t)))
1458 {
1459 if (--maxRetry == 0) {
1460 System_printf (" Packet capture test Failed \n");
1461 System_flush();
1462 retVal = -1;
1463 }
1464 }
1465 else {
1466 break;
1467 }
1468 }while (maxRetry);
1469
1470 if (ingress)
1471 {
1472 if (retVal == 0)
1473 System_printf ("|Ingress Packet Capture Test \t|%d\t\t|%d\t\t|%d\t\t|%d\t\t\t|PASS\n", dest_emac_port_id + pa_EMAC_PORT_0,actual.txCount, actual.emacRxCount, actual.cloneCaptureCount);
1474 else
1475 System_printf ("|Ingress Packet Capture Test \t|%d\t\t|%d\t\t|%d\t\t|%d\t\t\t|FAIL\n", dest_emac_port_id + pa_EMAC_PORT_0,actual.txCount, actual.emacRxCount, actual.cloneCaptureCount);
1476 }
1477 else
1478 {
1479 if (retVal == 0)
1480 System_printf ("|Egress Packet Capture Test \t|%d\t\t|%d\t\t|%d\t\t|%d\t\t\t|PASS\n", dest_emac_port_id + pa_EMAC_PORT_0,actual.txCount, actual.emacRxCount, actual.cloneCaptureCount);
1481 else
1482 System_printf ("|Egress Packet Capture Test \t|%d\t\t|%d\t\t|%d\t\t|%d\t\t\t|FAIL\n", dest_emac_port_id + pa_EMAC_PORT_0,actual.txCount, actual.emacRxCount, actual.cloneCaptureCount);
1483 }
1484
1485 System_flush();
1486
1487 paRetVal = pcap_disable_global_ingress();
1488 if (paRetVal != pa_OK)
1489 return (-1);
1490 paRetVal = pcap_disable_global_egress();
1491 if (paRetVal != pa_OK)
1492 return (-1);
1493 return (retVal);
1494}
1495
1496int32_t port_mirror_test(int direction, int dest_emac_port_id)
1497{
1498 paCtrlInfo_t paCtrl;
1499 uint32_t cmdId;
1500 int i, maxRetry = MAX_RETRIES;
1501 int32_t retVal = 0;
1502 paReturn_t paRetVal;
1503
1504 /* can be at most pa_MAX_NUM_EMAC_PORT_CONFIG_ENTRIES */
1505 paPortMirrorConfig_t mirrorCfg[1];
1506
1507 /* Clear the paCtrl before configuration */
1508 memset (&paCtrl, 0, sizeof (paCtrl));
1509
1510 /* Clear the expected counts */
1511 memset (&expected, 0, sizeof (expected));
1512
1513 /* Clear the mirror configuration */
1514 memset (&mirrorCfg[0], 0, sizeof (paPortMirrorConfig_t));
1515
1516 /* 1. Ingress Packet Capture test */
1517 cmdId = 0xbbbbcccc;
1518
1519 if (direction)
1520 {
1521 mirrorCfg[0].ctrlBitMap = pa_PKT_CLONE_ENABLE | pa_PKT_CLONE_INGRESS;
1522 mirrorCfg[0].portToBeMirrored = (dest_emac_port_id + pa_EMAC_PORT_0);
1523 mirrorCfg[0].mirrorPort = INGRESS_MIRROR_PORT;
1524
1525 paCtrl.code = pa_CONTROL_EMAC_PORT_CONFIG;
1526 paCtrl.params.emacPortCfg.numEntries = 1;
1527 paCtrl.params.emacPortCfg.cfgType = pa_EMAC_PORT_CFG_MIRROR;
1528 paCtrl.params.emacPortCfg.u.mirrorCfg = &mirrorCfg[0];
1529
1530
1531 paRetVal = pa_global_config(&paCtrl, cmdId);
1532 if (paRetVal != pa_OK)
1533 return -1;
1534 paRetVal = pcap_enable_global_ingress();
1535 if (paRetVal != pa_OK)
1536 return -1;
1537
1538 }
1539 else
1540 {
1541 mirrorCfg[0].ctrlBitMap = pa_PKT_CLONE_ENABLE;
1542 mirrorCfg[0].portToBeMirrored = (dest_emac_port_id + pa_EMAC_PORT_0);
1543 mirrorCfg[0].mirrorPort = EGRESS_MIRROR_PORT;
1544
1545 paCtrl.code = pa_CONTROL_EMAC_PORT_CONFIG;
1546 paCtrl.params.emacPortCfg.numEntries = 1;
1547 paCtrl.params.emacPortCfg.cfgType = pa_EMAC_PORT_CFG_MIRROR;
1548 paCtrl.params.emacPortCfg.u.mirrorCfg = &mirrorCfg[0];
1549
1550 pa_global_config(&paCtrl, cmdId);
1551 pcap_enable_global_egress();
1552 }
1553
1554 /* Set the expectations */
1555 expected.cloneCaptureCount = MAX_NUM_PACKETS;
1556 expected.emacRxCount = MAX_NUM_PACKETS;
1557 expected.txCount = MAX_NUM_PACKETS;
1558
1559 /* Clear the acutal counts */
1560 memset (&actual, 0, sizeof (actual));
1561
1562 /* This would do a packet capture at the queue configured from ingress traffic */
1563 for (i = 0; i < MAX_NUM_PACKETS; i ++)
1564 {
1565 if (direction) {
1566 if (TrigPacketToPdsp0 (dest_emac_port_id) != 0)
1567 {
1568 System_printf ("Packet %d send to pdsp0 failed \n", i);
1569 System_flush();
1570 return (-1);
1571 }
1572 }
1573 else
1574 {
1575 if (TrigPacketToPdsp5 (dest_emac_port_id) != 0)
1576 {
1577 System_printf ("Packet %d send to pdsp5 failed \n", i);
1578 System_flush();
1579 return (-1);
1580 }
1581 }
1582
1583 }
1584
1585 /* Check if expected values match with actual values */
1586 maxRetry = MAX_NUM_PACKETS*2;
1587 do {
1588 /* Wait for some cycles to receive all packets */
1589 CycleDelay(200000);
1590
1591 if (memcmp(&expected,&actual, sizeof(pktStats_t)))
1592 {
1593 if (--maxRetry == 0) {
1594 System_printf (" Port Mirror test Failed \n");
1595 System_flush();
1596 retVal = -1;
1597 }
1598 }
1599 else {
1600 break;
1601 }
1602
1603 }while (maxRetry);
1604
1605 if (direction)
1606 {
1607 if (retVal == 0)
1608 System_printf ("|Ingress Port Mirror Test \t|%d\t\t|%d\t\t|%d\t\t|%d\t\t\t|PASS\n", dest_emac_port_id + pa_EMAC_PORT_0,actual.txCount, actual.emacRxCount, actual.cloneCaptureCount);
1609 else
1610 System_printf ("|Ingress Port Mirror Test \t|%d\t\t|%d\t\t|%d\t\t|%d\t\t\t|FAIL\n", dest_emac_port_id + pa_EMAC_PORT_0,actual.txCount, actual.emacRxCount, actual.cloneCaptureCount);
1611 }
1612 else
1613 {
1614 if (retVal == 0)
1615 System_printf ("|Egress Port Mirror Test \t|%d\t\t|%d\t\t|%d\t\t|%d\t\t\t|PASS\n", dest_emac_port_id + pa_EMAC_PORT_0,actual.txCount, actual.emacRxCount, actual.cloneCaptureCount);
1616 else
1617 System_printf ("|Egress Port Mirror Test \t|%d\t\t|%d\t\t|%d\t\t|%d\t\t\t|FAIL\n", dest_emac_port_id + pa_EMAC_PORT_0,actual.txCount, actual.emacRxCount, actual.cloneCaptureCount);
1618 }
1619
1620 if (direction)
1621 {
1622 paRetVal = pcap_disable_global_ingress();
1623 }
1624 else
1625 {
1626 paRetVal = pcap_disable_global_egress();
1627 }
1628
1629 if (paRetVal != pa_OK)
1630 return -1;
1631
1632 System_flush();
1633
1634 return (retVal);
1635}
1636/* Nothing past this point */
diff --git a/test/PAPktCapTest/src/armv7/bios/fw_main.c b/test/PAPktCapTest/src/armv7/bios/fw_main.c
new file mode 100755
index 0000000..0ee2dd4
--- /dev/null
+++ b/test/PAPktCapTest/src/armv7/bios/fw_main.c
@@ -0,0 +1,143 @@
1/* (C) Copyright 2012, Texas Instruments, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions
5 * are met:
6 *
7 * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the
13 * distribution.
14 *
15 * Neither the name of Texas Instruments Incorporated nor the names of
16 * its contributors may be used to endorse or promote products derived
17 * from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31*/
32
33/** ============================================================================
34 * @n@b main
35 *
36 * @b Description
37 * @n Entry point for single core example application.
38 *
39 * @param[in]
40 * @n None
41 *
42 * @return
43 * @n None
44 * =============================================================================
45 */
46#include <pcap_singlecore.h>
47#include <stdio.h>
48#include "ti/csl/csl_bootcfgAux.h"
49
50#ifdef SIMULATOR_SUPPORT
51uint32_t autodetectLogic = FALSE;
52#else
53uint32_t autodetectLogic = TRUE;
54#endif
55int32_t main (void)
56{
57 Task_Params pCapTaskParams;
58 uint32_t bootMode;
59
60#ifdef __ARM_ARCH_7A__
61 /* Add MMU entries for MMR's required for PCIE example */
62 Uint32 privid, index;
63 CSL_MsmcRegs *msmc = (CSL_MsmcRegs *)CSL_MSMC_CFG_REGS;
64 Mmu_DescriptorAttrs attrs;
65 extern char ti_sysbios_family_arm_a15_Mmu_Module_State_0_secondLevelTableBuf_1__A;
66 uint32_t addr = (uint32_t)&ti_sysbios_family_arm_a15_Mmu_Module_State_0_secondLevelTableBuf_1__A;
67
68 Mmu_initDescAttrs(&attrs);
69
70 attrs.type = Mmu_DescriptorType_TABLE;
71 attrs.shareable = 0; // non-shareable
72 attrs.accPerm = 1; // read/write at any privelege level
73 attrs.attrIndx = 0; // Use MAIR0 Register Byte 3 for
74 // determining the memory attributes
75 // for each MMU entry
76
77
78 // Update the first level table's MMU entry for 0x80000000 with the
79 // new attributes.
80 Mmu_setFirstLevelDesc((Ptr)0x40000000, (UInt64)addr, &attrs);
81
82 // Set up SES & SMS to make all masters coherent
83 for (privid = 0; privid < 16; privid++)
84 {
85 for (index = 0; index < 8; index++)
86 {
87 uint32_t ses_mpaxh = msmc->SES_MPAX_PER_PRIVID[privid].SES[index].MPAXH;
88 uint32_t sms_mpaxh = msmc->SMS_MPAX_PER_PRIVID[privid].SMS[index].MPAXH;
89 if (CSL_FEXT (ses_mpaxh, MSMC_SES_MPAXH_0_SEGSZ) != 0)
90 {
91 // Clear the "US" bit to make coherent. This is at 0x80.
92 ses_mpaxh &= ~0x80;
93 msmc->SES_MPAX_PER_PRIVID[privid].SES[index].MPAXH = ses_mpaxh;
94 }
95 if (CSL_FEXT (sms_mpaxh, MSMC_SMS_MPAXH_0_SEGSZ) != 0)
96 {
97 // Clear the "US" bit to make coherent. This is at 0x80.
98 sms_mpaxh &= ~0x80;
99 msmc->SMS_MPAX_PER_PRIVID[privid].SMS[index].MPAXH = sms_mpaxh;
100 }
101 }
102 }
103#endif
104
105#ifdef _TMS320C6X
106 /* Init internal cycle counter */
107 TSCL = 1;
108#endif
109 if (autodetectLogic == TRUE)
110 {
111 bootMode = CSL_BootCfgGetBootMode() & 0x7;
112
113 if (bootMode == 0)
114 no_bootMode = TRUE;
115 else
116 no_bootMode = FALSE;
117 }
118 else {
119 no_bootMode = TRUE;
120 }
121
122 if (!cpswSimTest)
123 {
124 if (no_bootMode == TRUE)
125 {
126 passPowerUp();
127 }
128 }
129
130 /* Initialize the task params */
131 Task_Params_init(&pCapTaskParams);
132 pCapTaskParams.stackSize = 1024*8;
133
134
135 /* Create the CPSW single core example task */
136 Task_create((Task_FuncPtr)&pCap_SingleCoreApp, &pCapTaskParams, NULL);
137
138 /* Start the BIOS Task scheduler */
139 BIOS_start ();
140
141 return 0;
142}
143
diff --git a/test/PAPktCapTest/src/armv7/bios/pcap_linker.cmd b/test/PAPktCapTest/src/armv7/bios/pcap_linker.cmd
new file mode 100755
index 0000000..ae36a92
--- /dev/null
+++ b/test/PAPktCapTest/src/armv7/bios/pcap_linker.cmd
@@ -0,0 +1,7 @@
1SECTIONS
2{
3 .init_array > L2SRAM
4 .sharedGRL: load >> L2SRAM
5 .sharedPolicy: load >> L2SRAM
6 .rm: load >> MSMCSRAM
7}
diff --git a/test/PAUnitTest/src/armv7/bios/framework.c b/test/PAUnitTest/src/armv7/bios/framework.c
new file mode 100755
index 0000000..a1316c0
--- /dev/null
+++ b/test/PAUnitTest/src/armv7/bios/framework.c
@@ -0,0 +1,2672 @@
1/*
2 *
3 * Copyright (C) 2010-2014 Texas Instruments Incorporated - http://www.ti.com/
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the
16 * distribution.
17 *
18 * Neither the name of Texas Instruments Incorporated nor the names of
19 * its contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34*/
35
36
37
38/* Generate and verify the system test framework
39 *
40 * The test framework consists of the pa driver instance, a cppi/cdma/qm configuration,
41 * memory for packet transmission and reception, and semaphores that are used
42 * for every test in the PA unit test.
43 *
44 */
45
46#include "../../pautest.h"
47
48#include <ti/drv/pa/pa_osal.h>
49#include <ti/csl/cslr_device.h>
50#include <ti/csl/csl_psc.h>
51#include <ti/csl/csl_pscAux.h>
52
53//#include <ti/csl/cslr_cp_ace.h>
54
55/* CSL CHIP, SEM Functional layer includes */
56#include <ti/csl/csl_chip.h>
57#include <ti/csl/csl_semAux.h>
58#include <ti/csl/csl_cpsw.h>
59
60/* Firmware images */
61#include <ti/drv/pa/fw/pafw.h>
62#include <ti/drv/qmss/qmss_firmware.h>
63
64/**********************************************************************
65 ****************************** Defines *******************************
66 **********************************************************************/
67#define MAX_NUM_CORES 8
68
69/* Hardware Semaphore to synchronize access from
70 * multiple applications (PA applications and non-PASS applications)
71 * across different cores to the QMSS library.
72 */
73#define QMSS_HW_SEM 3
74
75/* Hardware Semaphore to synchronize access from
76 * multiple applications (PASS applications and non-PASS applications)
77 * across different cores to the CPPI library.
78 */
79#define CPPI_HW_SEM 4
80
81/* Hardware Semaphore to synchronize access from
82 * multiple applications (PASS applications and non-PASS applications)
83 * across different cores to the PA library.
84 */
85#define PA_HW_SEM 5
86
87#undef L2_CACHE
88#ifdef L2_CACHE
89 /* Invalidate L2 cache. This should invalidate L1D as well.
90 * Wait until operation is complete. */
91#define SYS_CACHE_INV(addr, size, code) CACHE_invL2 (addr, size, code)
92
93 /* Writeback L2 cache. This should Writeback L1D as well.
94 * Wait until operation is complete. */
95#define SYS_CACHE_WB(addr, size, code) CACHE_wbL2 (addr, size, code)
96
97#else
98 /* Invalidate L1D cache and wait until operation is complete.
99 * Use this approach if L2 cache is not enabled */
100#define SYS_CACHE_INV(addr, size, code) CACHE_invL1d (addr, size, code)
101 /* Writeback L1D cache and wait until operation is complete.
102 * Use this approach if L2 cache is not enabled */
103#define SYS_CACHE_WB(addr, size, code) CACHE_wbL1d (addr, size, code)
104
105#endif
106
107
108/**********************************************************************
109 ************************** Global Variables **************************
110 **********************************************************************/
111uint32_t qmssMallocCounter = 0;
112uint32_t qmssFreeCounter = 0;
113uint32_t cppiMallocCounter = 0;
114uint32_t cppiFreeCounter = 0;
115uint32_t paMemProtNestedLevel= 0;
116uint32_t rmMallocCounter = 0;
117uint32_t rmFreeCounter = 0;
118
119uint32_t coreKey [MAX_NUM_CORES];
120
121/* QMSS device specific configuration */
122extern Qmss_GlobalConfigParams qmssGblCfgParams;
123extern Qmss_GlobalConfigParams qmssNetssGblCfgParams;
124
125/* CPPI device specific configuration */
126extern Cppi_GlobalConfigParams cppiGblCfgParams;
127
128#undef DBG_MULTI_CORE
129
130/* Simulator debug level regs */
131paLog_t *paLogLevel = (paLog_t *)PA_LOG_IF;
132
133/**
134 * @b Description
135 * @n
136 * General Memory Barrier guarantees that all LOAD and STORE operations that were issued before the
137 * barrier occur before the LOAD and STORE operations issued after the barrier
138 *
139 */
140static inline void memBarrier(void) {__sync_synchronize();}
141
142/**********************************************************************
143 *************************** OSAL Functions **************************
144 **********************************************************************/
145
146/*
147 * Netss Local PKTDMA related convert functions
148 */
149
150void* Netss_qmssVirtToPhy (void *ptr)
151{
152 uint32_t addr = (uint32_t) ptr;
153
154 {
155 if ((addr & 0xFF000000) == CSL_NETCP_CFG_REGS)
156 {
157 addr = (addr & 0x00FFFFFF) | 0xFF000000;
158 }
159 }
160
161 return ((void *)addr);
162}
163
164void* Netss_qmssPhyToVirt (void *ptr)
165{
166 uint32_t addr = (uint32_t) ptr;
167
168 {
169 if ((addr & 0xFF000000) == 0xFF000000)
170 {
171 addr = (addr & 0x00FFFFFF) | CSL_NETCP_CFG_REGS;
172 }
173 }
174
175 return ((void *)addr);
176}
177
178void* Netss_qmssConvertDescVirtToPhy(uint32_t QID, void *descAddr)
179{
180 uint32_t addr = (uint32_t) descAddr;
181
182 {
183 if ((addr & 0xFF000000) == CSL_NETCP_CFG_REGS)
184 {
185 addr = (addr & 0x00FFFFFF) | 0xFF000000;
186 }
187 }
188
189 return ((void *)addr);
190}
191
192void* Netss_qmssConvertDescPhyToVirt(uint32_t QID, void *descAddr)
193{
194 uint32_t addr = (uint32_t) descAddr;
195
196 {
197 if ((addr & 0xFF000000) == 0xFF000000)
198 {
199 addr = (addr & 0x00FFFFFF) | CSL_NETCP_CFG_REGS;
200 }
201 }
202
203 return ((void *)addr);
204}
205
206/**
207 * @b Description
208 * @n
209 * The function is used to allocate a memory block of the specified size.
210 *
211 * @param[in] num_bytes
212 * Number of bytes to be allocated.
213 *
214 * @retval
215 * Allocated block address
216 */
217Ptr Osal_qmssMalloc (uint32_t num_bytes)
218{
219 Error_Block errorBlock;
220 Ptr dataPtr;
221
222 /* Increment the allocation counter. */
223 qmssMallocCounter++;
224
225 /* Allocate memory. */
226 dataPtr = Memory_alloc(NULL, num_bytes, 0, &errorBlock);
227 return (dataPtr);
228}
229
230/**
231 * @b Description
232 * @n
233 * The function is used to free a memory block of the specified size.
234 *
235 * @param[in] ptr
236 * Pointer to the memory block to be cleaned up.
237 *
238 * @param[in] size
239 * Size of the memory block to be cleaned up.
240 *
241 * @retval
242 * Not Applicable
243 */
244void Osal_qmssFree (Ptr ptr, uint32_t size)
245{
246 /* Increment the free counter. */
247 qmssFreeCounter++;
248 Memory_free(NULL, ptr, size);
249}
250
251/**
252 * @b Description
253 * @n
254 * The function is used to enter a critical section.
255 * Function protects against
256 *
257 * access from multiple cores
258 * and
259 * access from multiple threads on single core
260 *
261 * @param[in] key
262 * Key used to lock the critical section.
263 *
264 * @retval
265 * Not Applicable
266 */
267Ptr Osal_qmssCsEnter (void)
268{
269#ifdef _TMS320C6X
270 uint32_t coreNum = CSL_chipReadReg (CSL_CHIP_DNUM);
271#else
272 uint32_t coreNum = 0;
273#endif
274 /* Get the hardware semaphore.
275 *
276 * Acquire Multi core QMSS synchronization lock
277 */
278 while ((CSL_semAcquireDirect (QMSS_HW_SEM)) == 0);
279
280 /* Disable all interrupts and OS scheduler.
281 *
282 * Acquire Multi threaded / process synchronization lock.
283 */
284 coreKey [coreNum] = Hwi_disable();
285
286 return NULL;
287}
288
289/**
290 * @b Description
291 * @n
292 * The function is used to exit a critical section
293 * protected using Osal_qmssCsEnter() API.
294 *
295 * @param[in] key
296 * Key used to unlock the critical section.
297 *
298 * @retval
299 * Not Applicable
300 */
301void Osal_qmssCsExit (Ptr CsHandle)
302{
303#ifdef _TMS320C6X
304 uint32_t coreNum = CSL_chipReadReg (CSL_CHIP_DNUM);
305#else
306 uint32_t coreNum = 0;
307#endif
308 /* Enable all interrupts and enables the OS scheduler back on.
309 *
310 * Release multi-threaded / multi-process lock on this core.
311 */
312 Hwi_restore(coreKey [coreNum]);
313
314 /* Release the hardware semaphore
315 *
316 * Release multi-core lock.
317 */
318 CSL_semReleaseSemaphore (QMSS_HW_SEM);
319
320 return;
321}
322
323 /**
324 * ============================================================================
325 * @n@b Osal_qmssAccCsEnter
326 *
327 * @b brief
328 * @n This API ensures multi-core and multi-threaded
329 * synchronization to the caller.
330 *
331 * This is a BLOCKING API.
332 *
333 * This API ensures multi-core synchronization between
334 * multiple processes trying to access QMSS shared
335 * library at the same time.
336 *
337 * @param[in] None
338 *
339 * @return
340 * Handle used to lock critical section
341 * =============================================================================
342 */
343Ptr Osal_qmssAccCsEnter (Void)
344{
345 /* This is a suboptimal implementation for this OSAL, please refer to
346 * QMSS examples for optimal implementation of this function
347 */
348
349 return (Osal_qmssCsEnter());
350}
351
352/**
353 * ============================================================================
354 * @n@b Osal_qmssAccCsExit
355 *
356 * @b brief
357 * @n This API needs to be called to exit a previously
358 * acquired critical section lock using @a Osal_qmssAccCsEnter ()
359 * API. It resets the multi-core and multi-threaded lock,
360 * enabling another process/core to grab QMSS access.
361 *
362 * @param[in] CsHandle
363 * Handle for unlocking critical section.
364 *
365 * @return None
366 * =============================================================================
367 */
368Void Osal_qmssAccCsExit (Ptr CsHandle)
369{
370 /* This is a suboptimal implementation for this OSAL, please refer to
371 * QMSS examples for optimal implementation of this function
372 */
373 Osal_qmssCsExit(CsHandle);
374 return;
375}
376
377/**
378 * ============================================================================
379 * @n@b Osal_qmssMtCsEnter
380 *
381 * @b brief
382 * @n This API ensures ONLY multi-threaded
383 * synchronization to the QMSS user.
384 *
385 * This is a BLOCKING API.
386 *
387 * @param[in] None
388 *
389 * @return
390 * Handle used to lock critical section
391 * =============================================================================
392 */
393Ptr Osal_qmssMtCsEnter (void)
394{
395 /* Disable all interrupts and OS scheduler.
396 *
397 * Acquire Multi threaded / process synchronization lock.
398 */
399 //coreKey [CSL_chipReadReg (CSL_CHIP_DNUM)] = Hwi_disable();
400
401 return NULL;
402}
403
404/**
405 * ============================================================================
406 * @n@b Osal_qmssMtCsExit
407 *
408 * @b brief
409 * @n This API needs to be called to exit a previously
410 * acquired critical section lock using @a Osal_cpswQmssMtCsEnter ()
411 * API. It resets the multi-threaded lock, enabling another process
412 * on the current core to grab it.
413 *
414 * @param[in] CsHandle
415 * Handle for unlocking critical section.
416 *
417 * @return None
418 * =============================================================================
419 */
420void Osal_qmssMtCsExit (Ptr CsHandle)
421{
422 /* Enable all interrupts and enables the OS scheduler back on.
423 *
424 * Release multi-threaded / multi-process lock on this core.
425 */
426 //Hwi_restore(key);
427
428 return;
429}
430
431/**
432 * @b Description
433 * @n
434 * The function is the QMSS OSAL Logging API which logs
435 * the messages on the console.
436 *
437 * @param[in] fmt
438 * Formatted String.
439 *
440 * @retval
441 * Not Applicable
442 */
443void Osal_qmssLog ( String fmt, ... )
444{
445}
446
447/**
448 * @b Description
449 * @n
450 * The function is used to indicate that a block of memory is
451 * about to be accessed. If the memory block is cached then this
452 * indicates that the application would need to ensure that the
453 * cache is updated with the data from the actual memory.
454 *
455 * @param[in] blockPtr
456 * Address of the block which is to be invalidated
457 *
458 * @param[in] size
459 * Size of the block to be invalidated
460
461 * @retval
462 * Not Applicable
463 */
464void Osal_qmssBeginMemAccess (void *blockPtr, uint32_t size)
465{
466#ifdef _TMS320C6X
467 uint32_t key;
468
469 /* Disable Interrupts */
470 key = Hwi_disable();
471
472 /* Cleanup the prefetch buffer also. */
473 CSL_XMC_invalidatePrefetchBuffer();
474
475 SYS_CACHE_INV (blockPtr, size, CACHE_FENCE_WAIT);
476
477 asm (" nop 4");
478 asm (" nop 4");
479 asm (" nop 4");
480 asm (" nop 4");
481
482 /* Reenable Interrupts. */
483 Hwi_restore(key);
484#endif
485 return;
486}
487
488/**
489 * @b Description
490 * @n
491 * The function is used to indicate that the block of memory has
492 * finished being accessed. If the memory block is cached then the
493 * application would need to ensure that the contents of the cache
494 * are updated immediately to the actual memory.
495 *
496 * @param[in] blockPtr
497 * Address of the block which is to be written back
498 *
499 * @param[in] size
500 * Size of the block to be written back
501
502 * @retval
503 * Not Applicable
504 */
505void Osal_qmssEndMemAccess (void *blockPtr, uint32_t size)
506{
507#ifdef _TMS320C6X
508 uint32_t key;
509
510 /* Disable Interrupts */
511 key = Hwi_disable();
512
513 SYS_CACHE_WB (blockPtr, size, CACHE_FENCE_WAIT);
514
515 asm (" nop 4");
516 asm (" nop 4");
517 asm (" nop 4");
518 asm (" nop 4");
519
520 /* Reenable Interrupts. */
521 Hwi_restore(key);
522#endif
523 return;
524}
525
526/******************************************************************************
527* Function to issue memory barrier
528*
529* NOTE: QMSS unit tests are not using CPPI descriptors
530******************************************************************************/
531void* Osal_qmssMemBarrier(uint32_t QID, void *descAddr)
532{
533 /* Issue memory barrier */
534 memBarrier();
535 return descAddr;
536}
537
538/**
539 * @b Description
540 * @n
541 * The function is used to allocate a memory block of the specified size.
542 *
543 * Note: If the LLD is used by applications on multiple core, the "cppiHeap"
544 * should be in shared memory
545 *
546 * @param[in] num_bytes
547 * Number of bytes to be allocated.
548 *
549 * @retval
550 * Allocated block address
551 */
552Ptr Osal_cppiMalloc (uint32_t num_bytes)
553{
554 Error_Block errorBlock;
555 Ptr dataPtr;
556
557 /* Increment the allocation counter. */
558 cppiMallocCounter++;
559
560 /* Allocate memory. */
561 dataPtr = Memory_alloc(NULL, num_bytes, 0, &errorBlock);
562 return (dataPtr);
563}
564
565/**
566 * @b Description
567 * @n
568 * The function is used to free a memory block of the specified size allocated
569 * using Osal_cppiMalloc() API.
570 *
571 * @param[in] ptr
572 * Pointer to the memory block to be cleaned up.
573 *
574 * @param[in] size
575 * Size of the memory block to be cleaned up.
576 *
577 * @retval
578 * Not Applicable
579 */
580void Osal_cppiFree (Ptr ptr, uint32_t size)
581{
582 /* Increment the free counter. */
583 cppiFreeCounter++;
584 Memory_free (NULL, ptr, size);
585}
586
587/**
588 * ============================================================================
589 * @n@b Osal_cppiCsEnter
590 *
591 * @b brief
592 * @n This API ensures multi-core and multi-threaded
593 * synchronization to the caller.
594 *
595 * This is a BLOCKING API.
596 *
597 * This API ensures multi-core synchronization between
598 * multiple processes trying to access CPPI shared
599 * library at the same time.
600 *
601 * @param[in]
602 * @n None
603 *
604 * @return
605 * @n Handle used to lock critical section
606 * =============================================================================
607 */
608Ptr Osal_cppiCsEnter (void)
609{
610#ifdef _TMS320C6X
611 uint32_t coreNum = CSL_chipReadReg (CSL_CHIP_DNUM);
612#else
613 uint32_t coreNum = 0;
614#endif
615 /* Get the hardware semaphore.
616 *
617 * Acquire Multi core CPPI synchronization lock
618 */
619 while ((CSL_semAcquireDirect (CPPI_HW_SEM)) == 0);
620
621 /* Disable all interrupts and OS scheduler.
622 *
623 * Acquire Multi threaded / process synchronization lock.
624 */
625 coreKey [coreNum] = Hwi_disable();
626
627 return NULL;
628}
629
630/**
631 * ============================================================================
632 * @n@b Osal_cppiCsExit
633 *
634 * @b brief
635 * @n This API needs to be called to exit a previously
636 * acquired critical section lock using @a Osal_cppiCsEnter ()
637 * API. It resets the multi-core and multi-threaded lock,
638 * enabling another process/core to grab CPPI access.
639 *
640 * @param[in] CsHandle
641 * Handle for unlocking critical section.
642 *
643 * @return None
644 * =============================================================================
645 */
646void Osal_cppiCsExit (Ptr CsHandle)
647{
648#ifdef _TMS320C6X
649 uint32_t coreNum = CSL_chipReadReg (CSL_CHIP_DNUM);
650#else
651 uint32_t coreNum = 0;
652#endif
653 /* Enable all interrupts and enables the OS scheduler back on.
654 *
655 * Release multi-threaded / multi-process lock on this core.
656 */
657 Hwi_restore(coreKey [coreNum]);
658
659 /* Release the hardware semaphore
660 *
661 * Release multi-core lock.
662 */
663 CSL_semReleaseSemaphore (CPPI_HW_SEM);
664
665 return;
666}
667
668/**
669 * @b Description
670 * @n
671 * The function is the CPPI OSAL Logging API which logs
672 * the messages on the console.
673 *
674 * @param[in] fmt
675 * Formatted String.
676 *
677 * @retval
678 * Not Applicable
679 */
680void Osal_cppiLog ( String fmt, ... )
681{
682}
683
684/**
685 * @b Description
686 * @n
687 * The function is used to indicate that a block of memory is
688 * about to be accessed. If the memory block is cached then this
689 * indicates that the application would need to ensure that the
690 * cache is updated with the data from the actual memory.
691 *
692 * @param[in] blockPtr
693 * Address of the block which is to be invalidated
694 *
695 * @param[in] size
696 * Size of the block to be invalidated
697
698 * @retval
699 * Not Applicable
700 */
701void Osal_cppiBeginMemAccess (void *blockPtr, uint32_t size)
702{
703#ifdef _TMS320C6X
704 uint32_t key;
705
706 /* Disable Interrupts */
707 key = Hwi_disable();
708
709 /* Cleanup the prefetch buffer also. */
710 CSL_XMC_invalidatePrefetchBuffer();
711
712 SYS_CACHE_INV (blockPtr, size, CACHE_FENCE_WAIT);
713
714 asm (" nop 4");
715 asm (" nop 4");
716 asm (" nop 4");
717 asm (" nop 4");
718
719 /* Reenable Interrupts. */
720 Hwi_restore(key);
721#endif
722 return;
723}
724
725/**
726 * @b Description
727 * @n
728 * The function is used to indicate that the block of memory has
729 * finished being accessed. If the memory block is cached then the
730 * application would need to ensure that the contents of the cache
731 * are updated immediately to the actual memory.
732 *
733 * @param[in] blockPtr
734 * Address of the block which is to be written back
735 *
736 * @param[in] size
737 * Size of the block to be written back
738
739 * @retval
740 * Not Applicable
741 */
742void Osal_cppiEndMemAccess (void *blockPtr, uint32_t size)
743{
744#ifdef _TMS320C6X
745 uint32_t key;
746
747 /* Disable Interrupts */
748 key = Hwi_disable();
749
750 SYS_CACHE_WB (blockPtr, size, CACHE_FENCE_WAIT);
751
752 asm (" nop 4");
753 asm (" nop 4");
754 asm (" nop 4");
755 asm (" nop 4");
756
757 /* Reenable Interrupts. */
758 Hwi_restore(key);
759#endif
760 return;
761}
762
763
764/**
765 * @brief This macro is used to alert the application that the PA is
766 * going to access table memory. The application must ensure
767 * cache coherency and semaphores for multi-core applications
768 *
769 *
770 * <b> Prototype: </b>
771 * The following is the C prototype for the expected OSAL API.
772 *
773 * @verbatim
774 void Osal_paBeginMemAccess (void* addr, uint32_t sizeWords)
775 @endverbatim
776 *
777 * <b> Parameters </b>
778 * @n The address of the table to be accessed
779 * @n The number of bytes in the table
780 *
781 * @note PA will make nested calls to this function for memory access
782 * protection of different memory tables. The multicore semaphore
783 * should be allocated only for the first call of a nested group
784 * of calls.
785 */
786
787
788void Osal_paBeginMemAccess (Ptr addr, uint32_t size)
789{
790#ifdef _TMS320C6X
791 uint32_t key;
792
793 /* Disable Interrupts */
794 key = Hwi_disable();
795
796 /* Cleanup the prefetch buffer also. */
797 CSL_XMC_invalidatePrefetchBuffer();
798
799 SYS_CACHE_INV (addr, size, CACHE_FENCE_WAIT);
800
801 asm (" nop 4");
802 asm (" nop 4");
803 asm (" nop 4");
804 asm (" nop 4");
805
806 /* Reenable Interrupts. */
807 Hwi_restore(key);
808#endif
809}
810
811/**
812 * @brief This macro is used to alert the application that the PA
813 * has completed access to table memory. This call will always
814 * be made following a call to Osal_paBeginMemAccess and have
815 * the same parameters
816 *
817 * <b> Prototype: </b>
818 * The following is the C prototype for the expected OSAL API.
819 *
820 * @verbatim
821 void Osal_paEndMemAccess (void* addr, uint32_t sizeWords)
822 @endverbatim
823 *
824 * <b> Parameters </b>
825 * @n The address of the table to be accessed
826 * @n The number of bytes in the table
827 *
828 * @note PA will make nested calls to this function for memory access
829 * protection of different memory tables. The multicore semaphore
830 * should be freed when all previous memory access has completed,
831 * in other words, when the nested call level reaches 0.
832 */
833
834void Osal_paEndMemAccess (Ptr addr, uint32_t size)
835{
836#ifdef _TMS320C6X
837 uint32_t key;
838
839 /* Disable Interrupts */
840 key = Hwi_disable();
841
842 SYS_CACHE_WB (addr, size, CACHE_FENCE_WAIT);
843
844 asm (" nop 4");
845 asm (" nop 4");
846 asm (" nop 4");
847 asm (" nop 4");
848
849 /* Reenable Interrupts. */
850 Hwi_restore(key);
851#endif
852}
853
854/**
855 * @b Description
856 * @n
857 * The function is used to enter a critical section.
858 * Function protects against
859 *
860 * access from multiple threads on single core
861 * and
862 * access from multiple cores
863 *
864 * @param[in] key
865 * Key used to lock the critical section.
866 *
867 * @retval
868 * Not Applicable
869 */
870void Osal_paMtCsEnter (uint32_t *key)
871{
872
873 /* Get the hardware semaphore.
874 *
875 * Acquire Multi core PA synchronization lock
876 */
877 while ((CSL_semAcquireDirect (PA_HW_SEM)) == 0);
878 *key = 0;
879}
880
881/**
882 * @b Description
883 * @n
884 * The function is used to exit a critical section
885 * protected using Osal_salldCsEnter() API.
886 *
887 * @param[in] key
888 * Key used to unlock the critical section.
889 *
890 * @retval
891 * Not Applicable
892 */
893void Osal_paMtCsExit (uint32_t key)
894{
895 /* Release the hardware semaphore */
896 CSL_semReleaseSemaphore (PA_HW_SEM);
897}
898
899/**
900 * @b Description
901 * @n
902 * The function is used to allocate a memory block of the specified size.
903 *
904 * @param[in] num_bytes
905 * Number of bytes to be allocated.
906 *
907 * @retval
908 * Allocated block address
909 */
910void *Osal_rmMalloc (uint32_t num_bytes)
911{
912 Error_Block errorBlock;
913
914 /* Increment the allocation counter. */
915 rmMallocCounter++;
916
917 /* Allocate memory. */
918 return Memory_alloc(NULL, num_bytes, 0, &errorBlock);
919}
920
921/**
922 * @b Description
923 * @n
924 * The function is used to free a memory block of the specified size.
925 *
926 * @param[in] ptr
927 * Pointer to the memory block to be cleaned up.
928 *
929 * @param[in] size
930 * Size of the memory block to be cleaned up.
931 *
932 * @retval
933 * Not Applicable
934 */
935void Osal_rmFree (void *ptr, uint32_t size)
936{
937 /* Increment the free counter. */
938 rmFreeCounter++;
939 Memory_free(NULL, ptr, size);
940}
941
942/* FUNCTION PURPOSE: Critical section enter
943 ***********************************************************************
944 * DESCRIPTION: The function is used to enter a critical section.
945 * Function protects against
946 *
947 * access from multiple cores
948 * and
949 * access from multiple threads on single core
950 */
951void *Osal_rmCsEnter(void)
952{
953
954 return NULL;
955}
956
957/* FUNCTION PURPOSE: Critical section exit
958 ***********************************************************************
959 * DESCRIPTION: The function is used to exit a critical section
960 * protected using Osal_cppiCsEnter() API.
961 */
962void Osal_rmCsExit(void *CsHandle)
963{
964
965}
966
967/* FUNCTION PURPOSE: Multi-threaded critical section enter
968***********************************************************************
969* DESCRIPTION: The function is used to enter a multi-threaded critical
970* section. Function protects against
971 *
972 * access from multiple threads on single core
973*/
974void *Osal_rmMtCsEnter(void *mtSemObj)
975{
976
977 return NULL;
978}
979
980/* FUNCTION PURPOSE: Multi-threaded critical section exit
981***********************************************************************
982* DESCRIPTION: The function is used to exit a multi-threaded critical
983* section protected using Osal_rmMtCsEnter() API.
984*/
985void Osal_rmMtCsExit(void *mtSemObj, void *CsHandle)
986{
987
988}
989
990/* FUNCTION PURPOSE: Critical section exit
991 ***********************************************************************
992 * DESCRIPTION: The function is used to indicate that a block of memory is
993 * about to be accessed. If the memory block is cached then this
994 * indicates that the application would need to ensure that the
995 * cache is updated with the data from the actual memory.
996 */
997void Osal_rmBeginMemAccess(void *ptr, uint32_t size)
998{
999#ifdef _TMS320C6X
1000 uint32_t key;
1001
1002 /* Disable Interrupts */
1003 key = Hwi_disable();
1004
1005 /* Cleanup the prefetch buffer also. */
1006 CSL_XMC_invalidatePrefetchBuffer();
1007
1008#ifdef L2_CACHE
1009 /* Invalidate L2 cache. This should invalidate L1D as well.
1010 * Wait until operation is complete. */
1011 CACHE_invL2 (ptr, size, CACHE_FENCE_WAIT);
1012#else
1013 /* Invalidate L1D cache and wait until operation is complete.
1014 * Use this approach if L2 cache is not enabled */
1015 CACHE_invL1d (ptr, size, CACHE_FENCE_WAIT);
1016#endif
1017
1018 /* Reenable Interrupts. */
1019 Hwi_restore(key);
1020#endif
1021 return;
1022}
1023
1024/* FUNCTION PURPOSE: Critical section exit
1025 ***********************************************************************
1026 * DESCRIPTION: The function is used to indicate that the block of memory has
1027 * finished being accessed. If the memory block is cached then the
1028 * application would need to ensure that the contents of the cache
1029 * are updated immediately to the actual memory.
1030 */
1031void Osal_rmEndMemAccess(void *ptr, uint32_t size)
1032{
1033#ifdef _TMS320C6X
1034 uint32_t key;
1035
1036 /* Disable Interrupts */
1037 key = Hwi_disable();
1038
1039#ifdef L2_CACHE
1040 /* Writeback L2 cache. This should Writeback L1D as well.
1041 * Wait until operation is complete. */
1042 CACHE_wbL2 (ptr, size, CACHE_FENCE_WAIT);
1043
1044#else
1045 /* Writeback L1D cache and wait until operation is complete.
1046 * Use this approach if L2 cache is not enabled */
1047 CACHE_wbL1d (ptr, size, CACHE_FENCE_WAIT);
1048#endif
1049
1050 /* Reenable Interrupts. */
1051 Hwi_restore(key);
1052#endif
1053 return;
1054}
1055
1056/**
1057 * @b Description
1058 * @n
1059 * The function is used to create a task blocking object
1060 * capable of blocking the task a RM instance is running
1061 * within
1062 *
1063 * @retval
1064 * Allocated task blocking object
1065 */
1066void *Osal_rmTaskBlockCreate(void)
1067{
1068 Semaphore_Params semParams;
1069
1070 Semaphore_Params_init(&semParams);
1071 return((void *)Semaphore_create(0, &semParams, NULL));
1072}
1073
1074/**
1075 * @b Description
1076 * @n
1077 * The function is used to block a task whose context a
1078 * RM instance is running within.
1079 *
1080 * @param[in] handle
1081 * Task blocking object handle.
1082 *
1083 * @retval
1084 * Not Applicable
1085 */
1086void Osal_rmTaskBlock(void *handle)
1087{
1088 Semaphore_pend((Semaphore_Handle)handle, BIOS_WAIT_FOREVER);
1089}
1090
1091/**
1092 * @b Description
1093 * @n
1094 * The function is used to unblock a task whose context a
1095 * RM instance is running within.
1096 *
1097 * @param[in] handle
1098 * Task blocking object handle.
1099 *
1100 * @retval
1101 * Not Applicable
1102 */
1103void Osal_rmTaskUnblock(void *handle)
1104{
1105 Semaphore_post((Semaphore_Handle)handle);
1106}
1107
1108/**
1109 * @b Description
1110 * @n
1111 * The function is used to delete a task blocking object
1112 * provided to a RM instance
1113 *
1114 * @param[in] handle
1115 * Task blocking object handle.
1116 *
1117 * @retval
1118 * Not Applicable
1119 */
1120void Osal_rmTaskBlockDelete(void *handle)
1121{
1122 Semaphore_delete((Semaphore_Handle *)&handle);
1123}
1124
1125/**
1126 * @b Description
1127 * @n
1128 * The function is the RM OSAL Logging API which logs
1129 * the messages on the console.
1130 *
1131 * @param[in] fmt
1132 * Formatted String.
1133 *
1134 * @retval
1135 * Not Applicable
1136 */
1137void Osal_rmLog (char *fmt, ... )
1138{
1139 VaList ap;
1140
1141 va_start(ap, fmt);
1142 System_vprintf(fmt, ap);
1143 va_end(ap);
1144 System_flush();
1145}
1146
1147
1148int downloadPaFirmware (void)
1149{
1150
1151 int ret = pa_OK, i;
1152 uint32_t version;
1153
1154 Pa_resetControl (tFramework.passHandle, pa_STATE_RESET);
1155
1156#ifndef NSS_GEN2
1157
1158 /* PDPSs 0-2 use image c1 */
1159 Pa_downloadImage (tFramework.passHandle, 0, (Ptr)c1_0, c1_0Size);
1160 Pa_downloadImage (tFramework.passHandle, 1, (Ptr)c1_1, c1_1Size);
1161 Pa_downloadImage (tFramework.passHandle, 2, (Ptr)c1_2, c1_2Size);
1162
1163 /* PDSP 3 uses image c2 */
1164 Pa_downloadImage (tFramework.passHandle, 3, (Ptr)c2, c2Size);
1165
1166 /* PDSPs 4-5 use image m */
1167 for (i = 4; i < 6; i++)
1168 Pa_downloadImage (tFramework.passHandle, i, (Ptr)m, mSize);
1169
1170#else
1171
1172 Pa_downloadImage (tFramework.passHandle, 0, (Ptr)in0_pdsp0, in0_pdsp0Size);
1173 Pa_downloadImage (tFramework.passHandle, 1, (Ptr)in0_pdsp1, in0_pdsp1Size);
1174 Pa_downloadImage (tFramework.passHandle, 2, (Ptr)in1_pdsp0, in1_pdsp0Size);
1175 Pa_downloadImage (tFramework.passHandle, 3, (Ptr)in1_pdsp1, in1_pdsp1Size);
1176 Pa_downloadImage (tFramework.passHandle, 4, (Ptr)in2_pdsp0, in2_pdsp0Size);
1177 Pa_downloadImage (tFramework.passHandle, 5, (Ptr)in3_pdsp0, in3_pdsp0Size);
1178 Pa_downloadImage (tFramework.passHandle, 6, (Ptr)in4_pdsp0, in4_pdsp0Size);
1179 Pa_downloadImage (tFramework.passHandle, 7, (Ptr)in4_pdsp1, in4_pdsp1Size);
1180 Pa_downloadImage (tFramework.passHandle, 8, (Ptr)post_pdsp0, post_pdsp0Size);
1181 Pa_downloadImage (tFramework.passHandle, 9, (Ptr)post_pdsp1, post_pdsp1Size);
1182 Pa_downloadImage (tFramework.passHandle, 10, (Ptr)eg0_pdsp0, eg0_pdsp0Size);
1183 Pa_downloadImage (tFramework.passHandle, 11, (Ptr)eg0_pdsp1, eg0_pdsp1Size);
1184 Pa_downloadImage (tFramework.passHandle, 12, (Ptr)eg0_pdsp2, eg0_pdsp2Size);
1185 Pa_downloadImage (tFramework.passHandle, 13, (Ptr)eg1_pdsp0, eg1_pdsp0Size);
1186 Pa_downloadImage (tFramework.passHandle, 14, (Ptr)eg2_pdsp0, eg2_pdsp0Size);
1187
1188#endif
1189
1190 ret = Pa_resetControl (tFramework.passHandle, pa_STATE_ENABLE);
1191
1192 if (ret != pa_STATE_ENABLE)
1193 {
1194 System_printf ("downloadPaFirmware: Pa_resetControl return with error code %d\n", ret);
1195 System_flush();
1196 //return (-1);
1197 }
1198
1199 for ( i = 0; i < TF_NUM_PDSPS; i++)
1200 {
1201 Pa_getPDSPVersion(tFramework.passHandle, i, &version);
1202 System_printf ("PDSP %d version = 0x%08x\n", i, version);
1203 System_flush();
1204 }
1205
1206 return (0);
1207
1208}
1209
1210/* The PA LLD instance is created, the PA firmware is
1211 * downloaded and started */
1212int initPa (void)
1213{
1214 paSizeInfo_t paSize;
1215 paConfig_t paCfg;
1216 paRaConfig_t raCfg;
1217 paTimestampConfig_t tsCfg;
1218 int ret;
1219 int sizes[pa_N_BUFS];
1220 int aligns[pa_N_BUFS];
1221 void* bases[pa_N_BUFS];
1222
1223
1224 /* The maximum number of handles that can exists are 32 for L2, and 64 for L3. */
1225 memset(&paSize, 0, sizeof(paSizeInfo_t));
1226 memset(&paCfg, 0, sizeof(paConfig_t));
1227
1228 memset(bases, 0, sizeof(bases));
1229 memset(sizes, 0, sizeof(sizes));
1230 memset(aligns, 0, sizeof(aligns));
1231 memset(&raCfg, 0, sizeof(paRaConfig_t));
1232 paSize.nMaxL2 = TF_MAX_NUM_L2_HANDLES;
1233 paSize.nMaxL3 = TF_MAX_NUM_L3_HANDLES;
1234 paSize.nMaxVlnk = TF_MAX_NUM_VLINK_HANDLES;
1235 paSize.nUsrStats = pa_USR_STATS_MAX_COUNTERS;
1236
1237#ifdef NSS_GEN2
1238
1239 paSize.nMaxAcl = TF_MAX_NUM_ACL_HANDLES;
1240 paSize.nMaxFc = TF_MAX_NUM_FC_HANDLES;
1241 paSize.nMaxEoam = TF_MAX_NUM_EOAM_HANDLES;
1242#endif
1243
1244 ret = Pa_getBufferReq(&paSize, sizes, aligns);
1245
1246 if (ret != pa_OK) {
1247 System_printf ("initPa: Pa_getBufferReq() return with error code %d\n", ret);
1248 return (-1);
1249 }
1250
1251 /* The first buffer is used as the instance buffer */
1252 if ((uint32_t)memPaInst & (aligns[pa_BUF_INST] - 1)) {
1253 System_printf ("initPa: Pa_getBufferReq requires %d alignment for instance buffer, but address is 0x%08x\n", aligns[pa_BUF_INST], (uint32_t)memPaInst);
1254 return (-1);
1255 }
1256
1257 if (sizeof(memPaInst) < sizes[pa_BUF_INST]) {
1258 System_printf ("initPa: Pa_getBufferReq requires size %d for instance buffer, have only %d\n", sizes[pa_BUF_INST], sizeof(memPaInst));
1259 return (-1);
1260 }
1261
1262 bases[pa_BUF_INST] = (void *)memPaInst;
1263
1264
1265 /* The second buffer is the L2 table */
1266 if ((uint32_t)memL2Ram & (aligns[pa_BUF_L2_TABLE] - 1)) {
1267 System_printf ("initPa: Pa_getBufferReq requires %d alignment for L2 buffer, but address is 0x%08x\n", aligns[pa_BUF_L2_TABLE], (uint32_t)memL2Ram);
1268 return (-1);
1269 }
1270
1271 if (sizeof(memL2Ram) < sizes[pa_BUF_L2_TABLE]) {
1272 System_printf ("initPa: Pa_getBufferReq requires %d bytes for L2 buffer, have only %d\n", sizes[pa_BUF_L2_TABLE], sizeof(memL2Ram));
1273 return (-1);
1274 }
1275
1276 bases[pa_BUF_L2_TABLE] = (void *)memL2Ram;
1277
1278 /* The third buffer is the L3 table */
1279 if ((uint32_t)memL3Ram & (aligns[pa_BUF_L3_TABLE] - 1)) {
1280 System_printf ("initPa: Pa_getBufferReq requires %d alignment for L3 buffer, but address is 0x%08x\n", aligns[pa_BUF_L3_TABLE], (uint32_t)memL3Ram);
1281 return (-1);
1282 }
1283
1284 if (sizeof(memL3Ram) < sizes[pa_BUF_L3_TABLE]) {
1285 System_printf ("initPa: Pa_getBufferReq requires %d bytes for L3 buffer, have only %d\n", sizes[pa_BUF_L3_TABLE], sizeof(memL3Ram));
1286 return (-1);
1287 }
1288
1289 bases[pa_BUF_L3_TABLE] = (void *)memL3Ram;
1290
1291 /* The fourth buffer is the User Statistics Link table */
1292 if ((uint32_t)memUsrStatsLnkTbl & (aligns[pa_BUF_USR_STATS_TABLE] - 1)) {
1293 System_printf ("initPa: Pa_getBufferReq requires %d alignment for User Statistics buffer, but address is 0x%08x\n", aligns[pa_BUF_USR_STATS_TABLE], (uint32_t)memUsrStatsLnkTbl);
1294 return (-1);
1295 }
1296
1297 if (sizeof(memUsrStatsLnkTbl) < sizes[pa_BUF_USR_STATS_TABLE]) {
1298 System_printf ("initPa: Pa_getBufferReq requires %d bytes for User Statistics buffer, have only %d\n", sizes[pa_BUF_USR_STATS_TABLE], sizeof(memUsrStatsLnkTbl));
1299 return (-1);
1300 }
1301
1302 bases[pa_BUF_USR_STATS_TABLE] = (void *)memUsrStatsLnkTbl;
1303
1304
1305 /* The fifth buffer is the Virtual Link table */
1306 if ((uint32_t)memVLinkRam & (aligns[pa_BUF_VLINK_TABLE] - 1)) {
1307 System_printf ("initPa: Pa_getBufferReq requires %d alignment for Virtual Link buffer, but address is 0x%08x\n", aligns[pa_BUF_VLINK_TABLE], (uint32_t)memVLinkRam);
1308 return (-1);
1309 }
1310
1311 if (sizeof(memVLinkRam) < sizes[pa_BUF_VLINK_TABLE]) {
1312 System_printf ("initPa: Pa_getBufferReq requires %d bytes for Virtual Link buffer, have only %d\n", sizes[pa_BUF_VLINK_TABLE], sizeof(memVLinkRam));
1313 return (-1);
1314 }
1315
1316 bases[pa_BUF_VLINK_TABLE] = (void *)memVLinkRam;
1317
1318#ifdef NSS_GEN2
1319
1320 /* The 6th buffer is the ACL table */
1321 if ((uint32_t)memAclRam & (aligns[pa_BUF_ACL_TABLE] - 1)) {
1322 System_printf ("initPa: Pa_getBufferReq requires %d alignment for ACL buffer, but address is 0x%08x\n", aligns[pa_BUF_ACL_TABLE], (uint32_t)memAclRam);
1323 return (-1);
1324 }
1325
1326 if (sizeof(memAclRam) < sizes[pa_BUF_ACL_TABLE]) {
1327 System_printf ("initPa: Pa_getBufferReq requires %d bytes for ACL buffer, have only %d\n", sizes[pa_BUF_ACL_TABLE], sizeof(memAclRam));
1328 return (-1);
1329 }
1330
1331 bases[pa_BUF_ACL_TABLE] = (void *)memAclRam;
1332
1333 /* The 7th buffer is the Flow Cache table */
1334 if ((uint32_t)memFcRam & (aligns[pa_BUF_FC_TABLE] - 1)) {
1335 System_printf ("initPa: Pa_getBufferReq requires %d alignment for Flow Cache buffer, but address is 0x%08x\n", aligns[pa_BUF_FC_TABLE], (uint32_t)memFcRam);
1336 return (-1);
1337 }
1338
1339 if (sizeof(memFcRam) < sizes[pa_BUF_FC_TABLE]) {
1340 System_printf ("initPa: Pa_getBufferReq requires %d bytes for Flow Cache buffer, have only %d\n", sizes[pa_BUF_FC_TABLE], sizeof(memFcRam));
1341 return (-1);
1342 }
1343
1344 bases[pa_BUF_FC_TABLE] = (void *)memFcRam;
1345
1346 /* The 8th buffer is the Eoam table */
1347 if ((uint32_t)memEoamRam & (aligns[pa_BUF_EOAM_TABLE] - 1)) {
1348 System_printf ("initPa: Pa_getBufferReq requires %d alignment for Ethernet OAM buffer, but address is 0x%08x\n", aligns[pa_BUF_EOAM_TABLE], (uint32_t)memEoamRam);
1349 return (-1);
1350 }
1351
1352 if (sizeof(memEoamRam) < sizes[pa_BUF_EOAM_TABLE]) {
1353 System_printf ("initPa: Pa_getBufferReq requires %d bytes for Ethernet OAM buffer, have only %d\n", sizes[pa_BUF_EOAM_TABLE], sizeof(memEoamRam));
1354 return (-1);
1355 }
1356
1357 bases[pa_BUF_EOAM_TABLE] = (void *)memEoamRam;
1358
1359 /* set default RA system configuration */
1360 raCfg.ipv4MinPktSize = 28; /* 20-byte IPv4 header plus 8-byte payload */
1361 raCfg.numCxts = 250;
1362 raCfg.cxtDiscardThresh = 250;
1363 raCfg.nodeDiscardThresh = 1000;
1364 raCfg.cxtTimeout = 60000;
1365 raCfg.clockRate = 350;
1366 raCfg.heapRegionThresh = 250;
1367#ifndef SIMULATOR_SUPPORT
1368 raCfg.heapBase[0] = 0xFF000000UL;
1369#else
1370 raCfg.heapBase[0] = 0x80000000UL;
1371#endif
1372
1373 paCfg.raCfg = &raCfg;
1374#endif
1375 paCfg.initTable = TRUE;
1376#ifndef SIMULATOR_SUPPORT
1377 paCfg.initDefaultRoute = TRUE;
1378#endif
1379 paCfg.baseAddr = CSL_NETCP_CFG_REGS;
1380 paCfg.sizeCfg = &paSize;
1381
1382#if RM
1383 paCfg.rmServiceHandle = rmServiceHandle;
1384#endif /* RM */
1385
1386 ret = Pa_create (&paCfg, bases, &tFramework.passHandle);
1387 if (ret != pa_OK) {
1388 System_printf ("initPa: Pa_create returned with error code %d\n", ret);
1389 return (-1);
1390 }
1391
1392 /* Download the firmware */
1393 if (downloadPaFirmware ())
1394 return (-1);
1395
1396 /* Enable Timer for timestamp */
1397 memset(&tsCfg, 0, sizeof(paTimestampConfig_t));
1398 tsCfg.enable = TRUE;
1399 tsCfg.factor = pa_TIMESTAMP_SCALER_FACTOR_2;
1400
1401 if(Pa_configTimestamp(tFramework.passHandle, &tsCfg) != pa_OK)
1402 return (-1);
1403
1404 return (0);
1405
1406}
1407
1408int setupQmMem (void)
1409{
1410 Qmss_InitCfg qmssInitConfig;
1411 Qmss_MemRegInfo memInfo;
1412 Cppi_DescCfg descCfg;
1413 int32_t result;
1414 int n;
1415
1416 memset (&qmssInitConfig, 0, sizeof (Qmss_InitCfg));
1417 memset (memDescRam, 0, sizeof (memDescRam));
1418
1419 //qmssInitConfig.linkingRAM0Base = utilgAddr((uint32_t)memLinkRam);
1420 qmssInitConfig.linkingRAM0Base = 0;
1421 qmssInitConfig.linkingRAM0Size = TF_NUM_DESC;
1422 qmssInitConfig.linkingRAM1Base = 0;
1423 qmssInitConfig.maxDescNum = TF_NUM_DESC;
1424
1425 qmssInitConfig.pdspFirmware[0].pdspId = Qmss_PdspId_PDSP1;
1426#ifdef _LITTLE_ENDIAN
1427 qmssInitConfig.pdspFirmware[0].firmware = (void *) &acc48_le;
1428 qmssInitConfig.pdspFirmware[0].size = sizeof (acc48_le);
1429#else
1430 qmssInitConfig.pdspFirmware[0].firmware = (void *) &acc48_be;
1431 qmssInitConfig.pdspFirmware[0].size = sizeof (acc48_be);
1432#endif
1433
1434#if RM
1435 if (rmServiceHandle)
1436 qmssGblCfgParams.qmRmServiceHandle = rmServiceHandle;
1437#endif
1438
1439 result = Qmss_init (&qmssInitConfig, &qmssGblCfgParams);
1440 if (result != QMSS_SOK) {
1441 System_printf ("setupQmMem: Qmss_Init failed with error code %d\n", result);
1442 return (-1);
1443 }
1444
1445 result = Qmss_start();
1446 if (result != QMSS_SOK) {
1447 System_printf ("setupQmMem: Qmss_start failed with error code %d\n", result);
1448 return (-1);
1449 }
1450
1451 /* Setup a single memory region for descriptors */
1452 memset(&memInfo, 0, sizeof(memInfo));
1453 memset (memDescRam, 0, sizeof(memDescRam));
1454 memInfo.descBase = (uint32_t *)utilgAddr((uint32_t)memDescRam);
1455 memInfo.descSize = TF_SIZE_DESC;
1456 memInfo.descNum = TF_NUM_DESC;
1457 memInfo.manageDescFlag = Qmss_ManageDesc_MANAGE_DESCRIPTOR;
1458 memInfo.memRegion = Qmss_MemRegion_MEMORY_REGION0;
1459 memInfo.startIndex = 0;
1460
1461 result = Qmss_insertMemoryRegion (&memInfo);
1462 if (result < QMSS_SOK) {
1463 System_printf ("setupQmMem: Qmss_insertMemoryRegion returned error code %d\n", result);
1464 return (-1);
1465 }
1466
1467
1468 /* Initialize the descriptors. This function opens a general
1469 * purpose queue and intializes the memory from region 0, placing
1470 * the initialized descriptors onto that queue */
1471 memset(&descCfg, 0, sizeof(descCfg));
1472 descCfg.queueGroup = 0;
1473 descCfg.memRegion = Qmss_MemRegion_MEMORY_REGION0;
1474 descCfg.descNum = TF_NUM_DESC;
1475 descCfg.destQueueNum = TF_Q_FREE_DESC;
1476 descCfg.queueType = Qmss_QueueType_GENERAL_PURPOSE_QUEUE;
1477 descCfg.initDesc = Cppi_InitDesc_INIT_DESCRIPTOR;
1478 descCfg.descType = Cppi_DescType_HOST;
1479 descCfg.returnQueue.qNum = QMSS_PARAM_NOT_SPECIFIED;
1480 descCfg.returnQueue.qMgr = 0;
1481 descCfg.epibPresent = Cppi_EPIB_EPIB_PRESENT;
1482
1483 //descCfg.cfg.host.returnPolicy = Cppi_ReturnPolicy_RETURN_ENTIRE_PACKET;
1484 descCfg.cfg.host.returnPolicy = Cppi_ReturnPolicy_RETURN_BUFFER;
1485 descCfg.cfg.host.psLocation = Cppi_PSLoc_PS_IN_DESC;
1486
1487 tFramework.QfreeDesc = Cppi_initDescriptor (&descCfg, (uint32_t *)&n);
1488
1489 if (n != descCfg.descNum) {
1490 System_printf ("setupQmMem: expected %d descriptors to be initialized, only %d are initialized\n", descCfg.descNum, n);
1491 return (-1);
1492 }
1493
1494 return (0);
1495
1496}
1497
1498int setupPassQmMem (void)
1499{
1500
1501#ifdef NETSS_INTERNAL_PKTDMA
1502
1503 Qmss_InitCfg qmssInitConfig;
1504 Qmss_StartCfg qmssStartConfig;
1505 Qmss_MemRegInfo memInfo;
1506 Cppi_DescCfg descCfg;
1507 int32_t result;
1508 int n;
1509
1510 memset (&qmssInitConfig, 0, sizeof (Qmss_InitCfg));
1511 memset (&qmssStartConfig, 0, sizeof (Qmss_StartCfg));
1512
1513 //qmssInitConfig.linkingRAM0Base = utilgAddr((uint32_t)memLinkRam); // It should be 0x0 for internal RAM
1514 qmssInitConfig.linkingRAM0Base = 0;
1515 qmssInitConfig.linkingRAM0Size = TF_NUM_DESC;
1516 qmssInitConfig.linkingRAM1Base = 0;
1517 qmssInitConfig.maxDescNum = TF_NUM_DESC;
1518
1519 // Supply virtual-2-physical conversion functions
1520 qmssNetssGblCfgParams.virt2Phy = Netss_qmssVirtToPhy;
1521 qmssNetssGblCfgParams.phy2Virt = Netss_qmssPhyToVirt;
1522 qmssNetssGblCfgParams.virt2PhyDesc = Netss_qmssConvertDescVirtToPhy;
1523 qmssNetssGblCfgParams.phy2VirtDesc = Netss_qmssConvertDescPhyToVirt;
1524
1525 result = Qmss_initSubSys (&tFramework.tfPaQmssHandle, Qmss_SubSys_NETSS, &qmssInitConfig, &qmssNetssGblCfgParams);
1526 if (result != QMSS_SOK) {
1527 System_printf ("setupPassQmMem: Qmss_Init failed with error code %d\n", result);
1528 return (-1);
1529 }
1530
1531 result = Qmss_startSubSysCfg(&tFramework.tfPaQmssHandle, Qmss_SubSys_NETSS, &qmssStartConfig);
1532 if (result != QMSS_SOK) {
1533 System_printf ("setupPassQmMem: Qmss_start failed with error code %d\n", result);
1534 return (-1);
1535 }
1536
1537 /* Setup a single memory region for descriptors */
1538 memset(&memInfo, 0, sizeof(memInfo));
1539 memset (passDescRam, 0, TF_SIZE_DESC*TF_NUM_DESC);
1540 memInfo.descBase = (uint32_t *)(passDescRam);
1541 memInfo.descSize = TF_SIZE_DESC;
1542 memInfo.descNum = TF_NUM_DESC;
1543 memInfo.manageDescFlag = Qmss_ManageDesc_MANAGE_DESCRIPTOR;
1544 memInfo.memRegion = Qmss_MemRegion_MEMORY_REGION0;
1545 memInfo.startIndex = 0;
1546
1547 result = Qmss_insertMemoryRegionSubSys (tFramework.tfPaQmssHandle, &memInfo);
1548 if (result < QMSS_SOK) {
1549 System_printf ("setupQmMem: Qmss_insertMemoryRegion returned error code %d\n", result);
1550 return (-1);
1551 }
1552
1553 /* Initialize the descriptors. This function opens a general
1554 * purpose queue and intializes the memory from region 0, placing
1555 * the initialized descriptors onto that queue */
1556 memset(&descCfg, 0, sizeof(descCfg));
1557 descCfg.queueGroup = 0;
1558 descCfg.memRegion = Qmss_MemRegion_MEMORY_REGION0;
1559 descCfg.descNum = TF_NUM_DESC;
1560 descCfg.destQueueNum = TF_Q_LOC_FREE_DESC;
1561 descCfg.queueType = Qmss_QueueType_GENERAL_PURPOSE_QUEUE;
1562 descCfg.initDesc = Cppi_InitDesc_INIT_DESCRIPTOR;
1563 descCfg.descType = Cppi_DescType_HOST;
1564 descCfg.returnQueue.qNum = QMSS_PARAM_NOT_SPECIFIED;
1565 descCfg.returnQueue.qMgr = 0;
1566 descCfg.epibPresent = Cppi_EPIB_EPIB_PRESENT;
1567
1568 //descCfg.cfg.host.returnPolicy = Cppi_ReturnPolicy_RETURN_ENTIRE_PACKET;
1569 descCfg.cfg.host.returnPolicy = Cppi_ReturnPolicy_RETURN_BUFFER;
1570 descCfg.cfg.host.psLocation = Cppi_PSLoc_PS_IN_DESC;
1571
1572 tFramework.QLocfreeDesc = Cppi_initDescriptorSubSys (tFramework.tfPaQmssHandle, &descCfg, (uint32_t *)&n);
1573
1574 if (n != descCfg.descNum) {
1575 System_printf ("setupPassQmMem: expected %d descriptors to be initialized, only %d are initialized\n", descCfg.descNum, n);
1576 return (-1);
1577 }
1578#endif
1579
1580 return (0);
1581
1582}
1583
1584int setupCpdma (void)
1585{
1586 Cppi_CpDmaInitCfg cpdmaCfg;
1587 Cppi_RxChInitCfg rxChCfg;
1588 Cppi_TxChInitCfg txChCfg;
1589#if RM
1590 Cppi_StartCfg cppiStartCfg;
1591#endif
1592
1593 int32_t result;
1594 int i;
1595 uint8_t isAlloc;
1596
1597 result = Cppi_init (&cppiGblCfgParams);
1598 if (result != CPPI_SOK) {
1599 System_printf ("setupCpdma: cpp_Init returned error %d\n", result);
1600 return (-1);
1601 }
1602
1603 memset(&cpdmaCfg, 0, sizeof(Cppi_CpDmaInitCfg));
1604 cpdmaCfg.dmaNum = Cppi_CpDma_NETCP_CPDMA;
1605
1606 tFramework.tfPaCppiHandle = Cppi_open (&cpdmaCfg);
1607 if (tFramework.tfPaCppiHandle == NULL) {
1608 System_printf ("setupCpdma: cppi_Open returned NULL PA cppi handle\n");
1609 return (-1);
1610 }
1611
1612#if RM
1613 if (rmServiceHandle)
1614 {
1615 cppiStartCfg.rmServiceHandle = rmServiceHandle;
1616 Cppi_startCfg(&cppiStartCfg);
1617 }
1618#endif
1619
1620
1621#ifdef NETSS_INTERNAL_PKTDMA
1622
1623 memset(&cpdmaCfg, 0, sizeof(Cppi_CpDmaInitCfg));
1624 cpdmaCfg.dmaNum = Cppi_CpDma_NETCP_LOCAL_CPDMA;
1625 cpdmaCfg.qm0BaseAddress = 0xff1b8000; // will CSL definition
1626 cpdmaCfg.qm1BaseAddress = 0xff1b8400; // will CSL definition
1627 cpdmaCfg.qm2BaseAddress = 0xff1b8000; // will CSL definition
1628 cpdmaCfg.qm3BaseAddress = 0xff1b8400; // will CSL definition
1629
1630 tFramework.tfPaLocCppiHandle = Cppi_open (&cpdmaCfg);
1631 if (tFramework.tfPaLocCppiHandle == NULL) {
1632 System_printf ("setupCpdma: cppi_Open returned NULL PA local cppi handle\n");
1633 return (-1);
1634 }
1635
1636#endif
1637
1638 /* Open all rx channels */
1639 rxChCfg.rxEnable = Cppi_ChState_CHANNEL_DISABLE;
1640
1641 for (i = 0; i < TF_PA_NUM_RX_CPDMA_CHANNELS; i++) {
1642 rxChCfg.channelNum = i;
1643 tFramework.tfPaRxChHnd[i] = Cppi_rxChannelOpen (tFramework.tfPaCppiHandle, &rxChCfg, &isAlloc);
1644
1645 if (tFramework.tfPaRxChHnd[i] == NULL) {
1646 System_printf ("setupCpdma: cppi_RxChannelOpen returned NULL handle for channel number %d\n", i);
1647 return (-1);
1648 }
1649
1650 Cppi_channelEnable (tFramework.tfPaRxChHnd[i]);
1651 }
1652
1653 /* Open all tx channels. */
1654 txChCfg.priority = 2;
1655 txChCfg.txEnable = Cppi_ChState_CHANNEL_DISABLE;
1656 txChCfg.filterEPIB = FALSE;
1657 txChCfg.filterPS = FALSE;
1658 txChCfg.aifMonoMode = FALSE;
1659
1660
1661 for (i = 0; i < TF_PA_NUM_TX_CPDMA_CHANNELS; i++) {
1662 txChCfg.channelNum = i;
1663 tFramework.tfPaTxChHnd[i] = Cppi_txChannelOpen (tFramework.tfPaCppiHandle, &txChCfg, &isAlloc);
1664
1665 if (tFramework.tfPaTxChHnd[i] == NULL) {
1666 System_printf ("setupCpdma: cppi_TxChannelOpen returned NULL handle for channel number %d\n", i);
1667 return (-1);
1668 }
1669
1670 Cppi_channelEnable (tFramework.tfPaTxChHnd[i]);
1671 }
1672
1673 /* Clear CPPI Loobpack bit in PASS CDMA Global Emulation Control Register */
1674 Cppi_setCpdmaLoopback(tFramework.tfPaCppiHandle, 0);
1675
1676#ifdef NETSS_INTERNAL_PKTDMA
1677
1678 /* Open all local rx channels */
1679 rxChCfg.rxEnable = Cppi_ChState_CHANNEL_DISABLE;
1680
1681 for (i = 0; i < TF_PA_NUM_RX_CPDMA_CHANNELS; i++) {
1682 rxChCfg.channelNum = i;
1683 tFramework.tfPaLocRxChHnd[i] = Cppi_rxChannelOpen (tFramework.tfPaLocCppiHandle, &rxChCfg, &isAlloc);
1684
1685 if (tFramework.tfPaLocRxChHnd[i] == NULL) {
1686 System_printf ("setupCpdma: cppi_RxChannelOpen returned NULL handle for local rx channel number %d\n", i);
1687 return (-1);
1688 }
1689
1690 Cppi_channelEnable (tFramework.tfPaLocRxChHnd[i]);
1691 }
1692
1693 /* Open all locL tx channels. */
1694 txChCfg.priority = 2;
1695 txChCfg.txEnable = Cppi_ChState_CHANNEL_DISABLE;
1696 txChCfg.filterEPIB = FALSE;
1697 txChCfg.filterPS = FALSE;
1698 txChCfg.aifMonoMode = FALSE;
1699
1700
1701 for (i = 0; i < TF_PA_NUM_TX_CPDMA_CHANNELS; i++) {
1702 txChCfg.channelNum = i;
1703 tFramework.tfPaLocTxChHnd[i] = Cppi_txChannelOpen (tFramework.tfPaLocCppiHandle, &txChCfg, &isAlloc);
1704
1705 if (tFramework.tfPaLocTxChHnd[i] == NULL) {
1706 System_printf ("setupCpdma: cppi_TxChannelOpen returned NULL handle for local tx channel number %d\n", i);
1707 return (-1);
1708 }
1709
1710 Cppi_channelEnable (tFramework.tfPaLocTxChHnd[i]);
1711 }
1712
1713 /* Clear CPPI Loobpack bit in PASS CDMA Global Emulation Control Register */
1714 Cppi_setCpdmaLoopback(tFramework.tfPaLocCppiHandle, 0);
1715
1716#endif
1717
1718 return (0);
1719
1720}
1721
1722
1723/* Setup all the queues used in the example */
1724int setupQueues (void)
1725{
1726 int i;
1727 uint8_t isAlloc;
1728 Qmss_Queue q;
1729 Cppi_HostDesc *hd;
1730
1731
1732 /* The 10 PA transmit queues (corresponding to the 10 tx cdma channels */
1733 for (i = 0; i < TF_PA_NUM_TX_CPDMA_CHANNELS; i++) {
1734
1735 tFramework.QPaTx[i] = Qmss_queueOpen (Qmss_QueueType_PASS_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAlloc);
1736
1737 if (tFramework.QPaTx[i] < 0) {
1738 System_printf ("setupQueues: Qmss_queueOpen failed for PA transmit queue number %d\n", TF_PA_TX_QUEUE_BASE+i);
1739 return (-1);
1740 }
1741
1742 Qmss_setQueueThreshold (tFramework.QPaTx[i], 1, 1);
1743 }
1744
1745
1746 /* The default return queue for descriptors with linked buffers */
1747 tFramework.QDefRet = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_DEF_RET_Q, &isAlloc);
1748 if (tFramework.QDefRet < 0) {
1749 System_printf ("setupQueues: Qmss_queueOpen failed for queue %d\n", TF_DEF_RET_Q);
1750 return (-1);
1751 }
1752
1753 /* The queues with attached buffers */
1754 tFramework.QLinkedBuf1 = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LINKED_BUF_Q1, &isAlloc);
1755
1756 if (tFramework.QLinkedBuf1 < 0) {
1757 System_printf ("setupQueues: Qmss_queueOpen failed for queue %d\n", TF_LINKED_BUF_Q1);
1758 return (-1);
1759 }
1760
1761 tFramework.QLinkedBuf2 = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LINKED_BUF_Q2, &isAlloc);
1762
1763 if (tFramework.QLinkedBuf2 < 0) {
1764 System_printf ("SetupQueues: Qmss_queueOpen failed for queue %d\n", TF_LINKED_BUF_Q2);
1765 return (-1);
1766 }
1767
1768 tFramework.QLinkedBuf3 = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LINKED_BUF_Q3, &isAlloc);
1769
1770 if (tFramework.QLinkedBuf3 < 0) {
1771 System_printf ("SetupQueues: Qmss_queueOpen failed for queue %d\n", TF_LINKED_BUF_Q3);
1772 return (-1);
1773 }
1774
1775 /* Attach buffers to the queues and push them onto the queue */
1776 q.qMgr = 0;
1777
1778 q.qNum = TF_LINKED_BUF_Q1;
1779 for (i = 0; i < TF_LINKED_BUF_Q1_NBUFS; i++) {
1780
1781 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QfreeDesc)) & ~15);
1782 if (hd == NULL) {
1783 System_printf ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QfreeDesc);
1784 return (-1);
1785 }
1786
1787 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ1[i])), sizeof(memQ1[i]));
1788 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ1[i])), sizeof(memQ1[i]));
1789 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ1[i])), sizeof(memQ1[i]));
1790 hd->nextBDPtr = (uint32_t)NULL;
1791 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1792 Qmss_queuePushDesc (tFramework.QLinkedBuf1, (Ptr)hd);
1793
1794 }
1795
1796 q.qNum = TF_LINKED_BUF_Q2;
1797 for (i = 0; i < TF_LINKED_BUF_Q2_NBUFS; i++) {
1798
1799 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QfreeDesc)) & ~15);
1800 if (hd == NULL) {
1801 System_printf ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QfreeDesc);
1802 return (-1);
1803 }
1804
1805 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ2[i])), sizeof(memQ2[i]));
1806 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ2[i])), sizeof(memQ2[i]));
1807 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ2[i])), sizeof(memQ2[i]));
1808 hd->nextBDPtr = (uint32_t)NULL;
1809 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1810 Qmss_queuePushDesc (tFramework.QLinkedBuf2, (Ptr)hd);
1811
1812 }
1813
1814 q.qNum = TF_LINKED_BUF_Q3;
1815 for (i = 0; i < TF_LINKED_BUF_Q3_NBUFS; i++) {
1816
1817 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QfreeDesc)) & ~15);
1818 if (hd == NULL) {
1819 System_printf ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QfreeDesc);
1820 return (-1);
1821 }
1822
1823 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ3[i])), sizeof(memQ3[i]));
1824 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ3[i])), sizeof(memQ3[i]));
1825 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ3[i])), sizeof(memQ3[i]));
1826 hd->nextBDPtr = (uint32_t)NULL;
1827 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1828 Qmss_queuePushDesc (tFramework.QLinkedBuf3, (Ptr)hd);
1829
1830 }
1831
1832 tFramework.QCommonCmdRep = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_COMMON_CMD_REPL_Q, &isAlloc);
1833 if (tFramework.QCommonCmdRep < 0) {
1834 System_printf ("SetupQueues: Qmss_queueOpen failed for queue %d\n", TF_COMMON_CMD_REPL_Q);
1835 return (-1);
1836 }
1837
1838
1839
1840 /* General purpose queues */
1841 for (i = 0; i < TF_NUM_GEN_QUEUES; i++) {
1842
1843 tFramework.QGen[i] = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_FIRST_GEN_QUEUE + i, &isAlloc);
1844
1845 if (tFramework.QGen[i] < 0) {
1846 System_printf ("SetupQueues: Qmss_queueOpen failed for queue %d\n", TF_FIRST_GEN_QUEUE + i);
1847 return (-1);
1848 }
1849 }
1850
1851#ifdef NETSS_INTERNAL_PKTDMA
1852
1853 /* The queues with attached buffers */
1854 tFramework.QLocLinkedBuf1 = Qmss_queueOpenSubSys (tFramework.tfPaQmssHandle, Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LOC_LINKED_BUF_Q1, &isAlloc);
1855
1856 if (tFramework.QLinkedBuf1 < 0) {
1857 System_printf ("setupQueues: Qmss_queueOpenSubSys failed for queue %d\n", TF_LOC_LINKED_BUF_Q1);
1858 return (-1);
1859 }
1860
1861 tFramework.QLocLinkedBuf2 = Qmss_queueOpenSubSys (tFramework.tfPaQmssHandle, Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LOC_LINKED_BUF_Q2, &isAlloc);
1862
1863 if (tFramework.QLinkedBuf2 < 0) {
1864 System_printf ("SetupQueues: Qmss_queueOpenSubSys failed for queue %d\n", TF_LOC_LINKED_BUF_Q2);
1865 return (-1);
1866 }
1867
1868 tFramework.QLocLinkedBuf3 = Qmss_queueOpenSubSys (tFramework.tfPaQmssHandle, Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LOC_LINKED_BUF_Q3, &isAlloc);
1869
1870 if (tFramework.QLinkedBuf3 < 0) {
1871 System_printf ("SetupQueues: Qmss_queueOpenSubSys failed for queue %d\n", TF_LOC_LINKED_BUF_Q3);
1872 return (-1);
1873 }
1874
1875 /* Attach buffers to the queues and push them onto the queue */
1876 q.qMgr = 0;
1877
1878 q.qNum = TF_LOC_LINKED_BUF_Q1;
1879 for (i = 0; i < TF_LINKED_BUF_Q1_NBUFS; i++) {
1880
1881 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QLocfreeDesc)) & ~15);
1882 if (hd == NULL) {
1883 System_printf ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QLocfreeDesc);
1884 return (-1);
1885 }
1886
1887 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ1[i], sizeof(memLocQ1[i]));
1888 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ1[i], sizeof(memLocQ1[i]));
1889 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ1[i], sizeof(memLocQ1[i]));
1890 hd->nextBDPtr = NULL;
1891 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1892 Qmss_queuePushDesc (tFramework.QLocLinkedBuf1, (Ptr)hd);
1893
1894 }
1895
1896 q.qNum = TF_LOC_LINKED_BUF_Q2;
1897 for (i = 0; i < TF_LINKED_BUF_Q2_NBUFS; i++) {
1898
1899 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QLocfreeDesc)) & ~15);
1900 if (hd == NULL) {
1901 System_printf ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QLocfreeDesc);
1902 return (-1);
1903 }
1904
1905 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ2[i], sizeof(memLocQ2[i]));
1906 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ2[i], sizeof(memLocQ2[i]));
1907 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ2[i], sizeof(memLocQ2[i]));
1908 hd->nextBDPtr = NULL;
1909 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1910 Qmss_queuePushDesc (tFramework.QLocLinkedBuf2, (Ptr)hd);
1911
1912 }
1913
1914 q.qNum = TF_LOC_LINKED_BUF_Q3;
1915 for (i = 0; i < TF_LINKED_BUF_Q3_NBUFS; i++) {
1916
1917 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QLocfreeDesc)) & ~15);
1918 if (hd == NULL) {
1919 System_printf ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QLocfreeDesc);
1920 return (-1);
1921 }
1922
1923 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ3[i], sizeof(memLocQ3[i]));
1924 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ3[i], sizeof(memLocQ3[i]));
1925 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ3[i], sizeof(memLocQ3[i]));
1926 hd->nextBDPtr = NULL;
1927 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1928 Qmss_queuePushDesc (tFramework.QLocLinkedBuf3, (Ptr)hd);
1929
1930 }
1931
1932#endif
1933
1934 return (0);
1935
1936}
1937
1938/* Configure flows */
1939int setupFlows (void)
1940{
1941 Cppi_RxFlowCfg rxFlowCfg;
1942 uint8_t isAlloc;
1943 int i;
1944
1945 /* Configure Rx flow */
1946 rxFlowCfg.flowIdNum = CPPI_PARAM_NOT_SPECIFIED;
1947 rxFlowCfg.rx_dest_qnum = TF_FIRST_GEN_QUEUE + TF_NUM_GEN_QUEUES -1; /* Override in PA */
1948 rxFlowCfg.rx_dest_qmgr = 0;
1949 rxFlowCfg.rx_sop_offset = 0;
1950 rxFlowCfg.rx_ps_location = Cppi_PSLoc_PS_IN_DESC;
1951 rxFlowCfg.rx_desc_type = Cppi_DescType_HOST;
1952 rxFlowCfg.rx_error_handling = 1;
1953 rxFlowCfg.rx_psinfo_present = 1;
1954 rxFlowCfg.rx_einfo_present = 1;
1955
1956 rxFlowCfg.rx_dest_tag_lo = 0;
1957 rxFlowCfg.rx_dest_tag_hi = 0;
1958 rxFlowCfg.rx_src_tag_lo = 0;
1959 rxFlowCfg.rx_src_tag_hi = 0;
1960
1961 rxFlowCfg.rx_size_thresh0_en = 1;
1962 rxFlowCfg.rx_size_thresh1_en = 1;
1963 rxFlowCfg.rx_size_thresh2_en = 0;
1964 rxFlowCfg.rx_dest_tag_lo_sel = 4;
1965 rxFlowCfg.rx_dest_tag_hi_sel = 0;
1966 rxFlowCfg.rx_src_tag_lo_sel = 0;
1967 rxFlowCfg.rx_src_tag_hi_sel = 0;
1968
1969 rxFlowCfg.rx_fdq1_qnum = tFramework.QLinkedBuf2;
1970 rxFlowCfg.rx_fdq1_qmgr = 0;
1971 rxFlowCfg.rx_fdq0_sz0_qnum = tFramework.QLinkedBuf1;
1972 rxFlowCfg.rx_fdq0_sz0_qmgr = 0;
1973
1974 rxFlowCfg.rx_fdq3_qnum = tFramework.QLinkedBuf2;
1975 rxFlowCfg.rx_fdq3_qmgr = 0;
1976 rxFlowCfg.rx_fdq2_qnum = tFramework.QLinkedBuf2;
1977 rxFlowCfg.rx_fdq2_qmgr = 0;
1978
1979 rxFlowCfg.rx_size_thresh1 = TF_LINKED_BUF_Q2_BUF_SIZE;
1980 rxFlowCfg.rx_size_thresh0 = TF_LINKED_BUF_Q1_BUF_SIZE;
1981
1982 rxFlowCfg.rx_fdq0_sz1_qnum = tFramework.QLinkedBuf2;
1983 rxFlowCfg.rx_fdq0_sz1_qmgr = 0;
1984 rxFlowCfg.rx_size_thresh2 = 0;
1985
1986 rxFlowCfg.rx_fdq0_sz3_qnum = tFramework.QLinkedBuf3;
1987 rxFlowCfg.rx_fdq0_sz3_qmgr = 0;
1988 rxFlowCfg.rx_fdq0_sz2_qnum = tFramework.QLinkedBuf3;
1989 rxFlowCfg.rx_fdq0_sz2_qmgr = 0;
1990
1991 tFramework.tfPaFlowHnd[0] = Cppi_configureRxFlow (tFramework.tfPaCppiHandle, &rxFlowCfg, &isAlloc);
1992 if (tFramework.tfPaFlowHnd[0] == NULL) {
1993 System_printf ("setupFlows: cppi_ConfigureRxFlow returned NULL on flow 0\n");
1994 return (-1);
1995 }
1996
1997 tFramework.tfFlowNum[0] = (uint8_t)Cppi_getFlowId(tFramework.tfPaFlowHnd[0]);
1998 /* Create multiple identical flow for multi-interface testing */
1999 for ( i = 1; i < 4; i++)
2000 {
2001 rxFlowCfg.flowIdNum = CPPI_PARAM_NOT_SPECIFIED;
2002 tFramework.tfPaFlowHnd[i] = Cppi_configureRxFlow (tFramework.tfPaCppiHandle, &rxFlowCfg, &isAlloc);
2003 if (tFramework.tfPaFlowHnd[i] == NULL) {
2004 System_printf ("setupFlows: cppi_ConfigureRxFlow returned NULL on flow %d\n", i);
2005 return (-1);
2006 }
2007 tFramework.tfFlowNum[i] = (uint8_t)Cppi_getFlowId(tFramework.tfPaFlowHnd[i]);
2008 }
2009
2010 /* Rx Flow for Outer IP RA */
2011 rxFlowCfg.flowIdNum = CPPI_PARAM_NOT_SPECIFIED;
2012 rxFlowCfg.rx_dest_qnum = TF_FIRST_GEN_QUEUE + 6;
2013 rxFlowCfg.rx_src_tag_lo = 1;
2014 rxFlowCfg.rx_src_tag_lo_sel = 1;
2015
2016 tFramework.tfPaFlowHnd1 = Cppi_configureRxFlow (tFramework.tfPaCppiHandle, &rxFlowCfg, &isAlloc);
2017 if (tFramework.tfPaFlowHnd1 == NULL) {
2018 System_printf ("setupFlows: cppi_ConfigureRxFlow returned NULL on flow 1\n");
2019 return (-1);
2020 }
2021 tFramework.tfFlowNum1 = (uint8_t)Cppi_getFlowId(tFramework.tfPaFlowHnd1);
2022
2023 /* Rx Flow for Inner IP RA */
2024 rxFlowCfg.flowIdNum = CPPI_PARAM_NOT_SPECIFIED;
2025 rxFlowCfg.rx_dest_qnum = TF_FIRST_GEN_QUEUE + 7;
2026 rxFlowCfg.rx_src_tag_lo = 2;
2027 rxFlowCfg.rx_src_tag_lo_sel = 1;
2028
2029 tFramework.tfPaFlowHnd2 = Cppi_configureRxFlow (tFramework.tfPaCppiHandle, &rxFlowCfg, &isAlloc);
2030 if (tFramework.tfPaFlowHnd2 == NULL) {
2031 System_printf ("setupFlows: cppi_ConfigureRxFlow returned NULL on flow 2\n");
2032 return (-1);
2033 }
2034
2035 tFramework.tfFlowNum2 = (uint8_t)Cppi_getFlowId(tFramework.tfPaFlowHnd2);
2036
2037 #ifdef NETSS_INTERNAL_PKTDMA
2038
2039 /* Configure Local Rx flow */
2040 rxFlowCfg.flowIdNum = CPPI_PARAM_NOT_SPECIFIED;
2041 rxFlowCfg.rx_dest_qnum = 0; /* Override in PA */
2042 rxFlowCfg.rx_dest_qmgr = 0;
2043 rxFlowCfg.rx_sop_offset = 0;
2044 rxFlowCfg.rx_ps_location = Cppi_PSLoc_PS_IN_DESC;
2045 rxFlowCfg.rx_desc_type = Cppi_DescType_HOST;
2046 rxFlowCfg.rx_error_handling = 0;
2047 rxFlowCfg.rx_psinfo_present = 1;
2048 rxFlowCfg.rx_einfo_present = 1;
2049
2050 rxFlowCfg.rx_dest_tag_lo = 0;
2051 rxFlowCfg.rx_dest_tag_hi = 0;
2052 rxFlowCfg.rx_src_tag_lo = 0;
2053 rxFlowCfg.rx_src_tag_hi = 0;
2054
2055 rxFlowCfg.rx_size_thresh0_en = 1;
2056 rxFlowCfg.rx_size_thresh1_en = 1;
2057 rxFlowCfg.rx_size_thresh2_en = 0;
2058 rxFlowCfg.rx_dest_tag_lo_sel = 4;
2059 rxFlowCfg.rx_dest_tag_hi_sel = 0;
2060 rxFlowCfg.rx_src_tag_lo_sel = 0;
2061 rxFlowCfg.rx_src_tag_hi_sel = 0;
2062
2063 rxFlowCfg.rx_fdq1_qnum = TF_LOC_LINKED_BUF_Q2;
2064 rxFlowCfg.rx_fdq1_qmgr = 0;
2065 rxFlowCfg.rx_fdq0_sz0_qnum = TF_LOC_LINKED_BUF_Q1;
2066 rxFlowCfg.rx_fdq0_sz0_qmgr = 0;
2067
2068 rxFlowCfg.rx_fdq3_qnum = TF_LOC_LINKED_BUF_Q2;
2069 rxFlowCfg.rx_fdq3_qmgr = 0;
2070 rxFlowCfg.rx_fdq2_qnum = TF_LOC_LINKED_BUF_Q2;
2071 rxFlowCfg.rx_fdq2_qmgr = 0;
2072
2073 rxFlowCfg.rx_size_thresh1 = TF_LINKED_BUF_Q2_BUF_SIZE;
2074 rxFlowCfg.rx_size_thresh0 = TF_LINKED_BUF_Q1_BUF_SIZE;
2075
2076 rxFlowCfg.rx_fdq0_sz1_qnum = TF_LOC_LINKED_BUF_Q2;
2077 rxFlowCfg.rx_fdq0_sz1_qmgr = 0;
2078 rxFlowCfg.rx_size_thresh2 = 0;
2079
2080 rxFlowCfg.rx_fdq0_sz3_qnum = TF_LOC_LINKED_BUF_Q3;
2081 rxFlowCfg.rx_fdq0_sz3_qmgr = 0;
2082 rxFlowCfg.rx_fdq0_sz2_qnum = TF_LOC_LINKED_BUF_Q3;
2083 rxFlowCfg.rx_fdq0_sz2_qmgr = 0;
2084
2085 tFramework.tfPaLocFlowHnd0 = Cppi_configureRxFlow (tFramework.tfPaLocCppiHandle, &rxFlowCfg, &isAlloc);
2086 if (tFramework.tfPaLocFlowHnd0 == NULL) {
2087 System_printf ("setupFlows: cppi_ConfigureRxFlow returned NULL on local flow 0\n");
2088 return (-1);
2089 }
2090 tFramework.tfLocFlowNum = (uint8_t)Cppi_getFlowId(tFramework.tfPaLocFlowHnd0);
2091
2092 #endif
2093
2094 return (0);
2095
2096}
2097
2098/* Clear flows */
2099int clearFlows (void)
2100{
2101 int ret, i;
2102
2103 for (i = 0; i < 4; i++)
2104 {
2105 if ((ret = Cppi_closeRxFlow (tFramework.tfPaFlowHnd[i])) != CPPI_SOK)
2106 {
2107 System_printf ("clearFlows: Cppi_closeRxFlow returned error code (%d) for global flow id %d\n", ret, tFramework.tfFlowNum[i]);
2108 return (-1);
2109 }
2110 }
2111
2112 if ((ret = Cppi_closeRxFlow (tFramework.tfPaFlowHnd1)) != CPPI_SOK)
2113 {
2114 System_printf ("clearFlows: Cppi_closeRxFlow returned error code (%d) for global flow id %d\n", ret, tFramework.tfFlowNum1);
2115 return (-1);
2116 }
2117
2118 if ((ret = Cppi_closeRxFlow (tFramework.tfPaFlowHnd2)) != CPPI_SOK)
2119 {
2120 System_printf ("clearFlows: Cppi_closeRxFlow returned error code (%d) for global flow id %d\n", ret, tFramework.tfFlowNum2);
2121 return (-1);
2122 }
2123
2124 #ifdef NETSS_INTERNAL_PKTDMA
2125 if ((ret = Cppi_closeRxFlow (tFramework.tfPaLocFlowHnd0)) != CPPI_SOK)
2126 {
2127 System_printf ("clearFlows: Cppi_closeRxFlow returned error code (%d) for local flow id %d\n", ret, tFramework.tfLocFlowNum);
2128 return (-1);
2129 }
2130 #endif
2131
2132 return (0);
2133}
2134
2135int closeQueues(void) {
2136
2137 int i;
2138 tFramework_t *tf=&tFramework;
2139
2140 /* Clean and close all PASS transmit queues (corresponding to the tx cdma channels */
2141 for (i = 0; i < TF_PA_NUM_TX_CPDMA_CHANNELS; i++) {
2142 Qmss_queueEmpty (tf->QPaTx[i]);
2143 Qmss_queueClose (tf->QPaTx[i]);
2144 }
2145
2146 /* Empty the remaining queues */
2147 Qmss_queueEmpty (tf->QfreeDesc);
2148 Qmss_queueEmpty (tf->QDefRet);
2149 Qmss_queueEmpty (tf->QLinkedBuf1);
2150 Qmss_queueEmpty (tf->QLinkedBuf2);
2151 Qmss_queueEmpty (tf->QLinkedBuf3);
2152 Qmss_queueEmpty (tf->QCommonCmdRep);
2153
2154 /* Close the remaining queues */
2155 Qmss_queueClose (tf->QfreeDesc);
2156 Qmss_queueClose (tf->QDefRet);
2157 Qmss_queueClose (tf->QLinkedBuf1);
2158 Qmss_queueClose (tf->QLinkedBuf2);
2159 Qmss_queueClose (tf->QLinkedBuf3);
2160 Qmss_queueClose (tf->QCommonCmdRep);
2161
2162 /* Empty General purpose queues */
2163 for (i = 0; i < TF_NUM_GEN_QUEUES; i++) {
2164 Qmss_queueEmpty (tf->QGen[i]);
2165 Qmss_queueClose (tf->QGen[i]);
2166 }
2167
2168#ifdef NETSS_INTERNAL_PKTDMA
2169 /* Empty the remaining queues */
2170 Qmss_queueEmpty (tf->QLocfreeDesc);
2171 Qmss_queueEmpty (tf->QLocLinkedBuf1);
2172 Qmss_queueEmpty (tf->QLocLinkedBuf2);
2173 Qmss_queueEmpty (tf->QLocLinkedBuf3);
2174
2175 /* Close the remaining queues */
2176 Qmss_queueClose (tf->QLocfreeDesc);
2177 Qmss_queueClose (tf->QLocLinkedBuf1);
2178 Qmss_queueClose (tf->QLocLinkedBuf2);
2179 Qmss_queueClose (tf->QLocLinkedBuf3);
2180
2181#endif
2182
2183 return (0);
2184
2185}
2186
2187/* Clear PKTDMA*/
2188int clearCpdma (void)
2189{
2190 int i, ret;
2191
2192 /* Close the cpDma setup */
2193 for (i = 0; i < TF_PA_NUM_RX_CPDMA_CHANNELS; i++) {
2194 if ((ret = Cppi_channelClose (tFramework.tfPaRxChHnd[i])) != CPPI_SOK) {
2195 System_printf ("clearCpdma: Cppi_channelClose returned error code (%d) for PASS rx channel %d\n", ret, i);
2196 return (-1);
2197 }
2198 }
2199 for (i = 0; i < TF_PA_NUM_TX_CPDMA_CHANNELS; i++) {
2200 if ((ret = Cppi_channelClose (tFramework.tfPaTxChHnd[i])) != CPPI_SOK) {
2201 System_printf ("clearCpdma: Cppi_channelClose returned error code (%d) for PASS tx channel %d\n", ret, i);
2202 return (-1);
2203 }
2204 }
2205
2206#ifdef NETSS_INTERNAL_PKTDMA
2207
2208 /* Close the local cpDma setup */
2209 for (i = 0; i < TF_PA_NUM_RX_CPDMA_CHANNELS; i++) {
2210 if ((ret = Cppi_channelClose (tFramework.tfPaLocRxChHnd[i])) != CPPI_SOK) {
2211 System_printf ("clearCpdma: Cppi_channelClose returned error code (%d) for PASS local rx channel %d\n", ret, i);
2212 return (-1);
2213 }
2214 }
2215 for (i = 0; i < TF_PA_NUM_TX_CPDMA_CHANNELS; i++) {
2216 if ((ret = Cppi_channelClose (tFramework.tfPaLocTxChHnd[i])) != CPPI_SOK) {
2217 System_printf ("clearCpdma: Cppi_channelClose returned error code (%d) for PASS local tx channel %d\n", ret, i);
2218 return (-1);
2219 }
2220 }
2221
2222#endif
2223
2224 return (0);
2225
2226}
2227
2228
2229/* The QM/CPDMA are cleared */
2230int clearQm(void)
2231{
2232 int result;
2233
2234 /* clear the flows */
2235 if (clearFlows ()) {
2236 System_printf ("clearQm: clearFlows failed\n");
2237 return (-1);
2238 }
2239
2240 /* Close the queues that were setup */
2241 if (closeQueues()) {
2242 System_printf ("clearQm: closeQueues failed\n");
2243 return (-1);
2244 }
2245
2246 if (clearCpdma ()) {
2247 System_printf ("clearQm: clearCpdma failed\n");
2248 return (-1);
2249 }
2250
2251 /* Free the memory regions */
2252 if ((result = Qmss_removeMemoryRegion (Qmss_MemRegion_MEMORY_REGION0, 0)) != QMSS_SOK)
2253 {
2254 System_printf ("clearQm: Remove QMSS memory region error code : %d\n", result);
2255 return (-1);
2256 }
2257
2258#ifdef NETSS_INTERNAL_PKTDMA
2259
2260 /* Free the PASS Local QMSS memory regions */
2261 if ((result = Qmss_removeMemoryRegionSubSys (Qmss_SubSys_NETSS, Qmss_MemRegion_MEMORY_REGION0, 0)) != QMSS_SOK)
2262 {
2263 System_printf ("clearQm: Remove PASS internal QMSS memory region error code : %d\n", result);
2264 return (-1);
2265 }
2266
2267 /* Exit QMSS */
2268 if ((result = Qmss_exitSubSys (&tFramework.tfPaQmssHandle)) != QMSS_SOK)
2269 {
2270 System_printf ("clearQm: PASS internal QMSS_exit error code : %d\n", result);
2271 return (-1);
2272 }
2273
2274#endif
2275
2276 /* Exit QMSS */
2277 if ((result = Qmss_exit ()) != QMSS_SOK)
2278 {
2279 System_printf ("clearQm: QMSS_exit error code : %d\n", result);
2280 return (-1);
2281 }
2282
2283 return (0);
2284
2285}
2286
2287/* The QM/CPDMA are setup */
2288int initQm (void)
2289{
2290 if (setupQmMem()) {
2291 System_printf ("initQm: setupQmMem failed\n");
2292 return (-1);
2293 }
2294
2295 if (setupPassQmMem()) {
2296 System_printf ("initQm: setupPassQmMem failed\n");
2297 return (-1);
2298 }
2299
2300 if (setupCpdma ()) {
2301 System_printf ("initQm: setupCpdma failed\n");
2302 return (-1);
2303 }
2304
2305 if (setupQueues ()) {
2306 System_printf ("initQm: setupQueues failed\n");
2307 return (-1);
2308 }
2309
2310 if (setupFlows ()) {
2311 System_printf ("initQm: setupFlows failed\n");
2312 return (-1);
2313 }
2314
2315 return (0);
2316
2317}
2318
2319
2320int initSems (void)
2321{
2322#if 0
2323/* TBD: no longer used */
2324 Semaphore_Params params;
2325 Error_Block eb;
2326
2327 Semaphore_Params_init (&params);
2328 params.mode = Semaphore_Mode_BINARY;
2329
2330 #endif
2331
2332 return (0);
2333}
2334
2335/***************************************************************************************
2336 * FUNCTION PURPOSE: Power up PA subsystem
2337 ***************************************************************************************
2338 * DESCRIPTION: this function powers up the PA subsystem domains
2339 ***************************************************************************************/
2340void passPowerUp (void)
2341{
2342
2343 /* PASS power domain is turned OFF by default. It needs to be turned on before doing any
2344 * PASS device register access. This not required for the simulator. */
2345
2346 /* Set PASS Power domain to ON */
2347 CSL_PSC_enablePowerDomain (CSL_PSC_PD_NETCP);
2348
2349 /* Enable the clocks for PASS modules */
2350 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_PA, PSC_MODSTATE_ENABLE);
2351 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_CPGMAC, PSC_MODSTATE_ENABLE);
2352 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_SA, PSC_MODSTATE_ENABLE);
2353
2354 /* Start the state transition */
2355 CSL_PSC_startStateTransition (CSL_PSC_PD_NETCP);
2356
2357 /* Wait until the state transition process is completed. */
2358 utilCycleDelay (1000);
2359 //while (!CSL_PSC_isStateTransitionDone (CSL_PSC_PD_NETCP));
2360
2361#ifdef SOC_K2L
2362 CSL_PSC_enablePowerDomain (CSL_PSC_PD_OSR);
2363
2364 /* Enable the clocks for OSR modules */
2365 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_OSR, PSC_MODSTATE_ENABLE);
2366
2367 /* Start the state transition */
2368 CSL_PSC_startStateTransition (CSL_PSC_PD_OSR);
2369
2370 /* Wait until the state transition process is completed. */
2371 utilCycleDelay (1000);
2372#endif
2373
2374}
2375
2376/* Function to clear the command set use index and exception index use for PA */
2377void clearPaInfo(void)
2378{
2379 memset(&tFramework, 0, sizeof(tFramework_t));
2380}
2381
2382/** ============================================================================
2383 * @n@b Init_Switch
2384 *
2385 * @b Description
2386 * @n This API sets up the ethernet switch subsystem and its Address Lookup
2387 * Engine (ALE) in "Switch" mode.
2388 *
2389 * @param[in]
2390 * @n mtu Maximum Frame length to configure on the switch.
2391 *
2392 * @return
2393 * @n None
2394 * =============================================================================
2395 */
2396void initSwitch (uint32_t mtu)
2397{
2398 CSL_CPSW_PORTSTAT portStatCfg;
2399
2400 /* Enable the CPPI port, i.e., port 0 that does all
2401 * the data streaming in/out of EMAC.
2402 */
2403 CSL_CPSW_enablePort0 ();
2404 CSL_CPSW_disableVlanAware ();
2405 CSL_CPSW_setPort0VlanReg (0, 0, 0);
2406 CSL_CPSW_setPort0RxMaxLen (mtu);
2407
2408 /* Enable statistics on both the port groups:
2409 *
2410 * MAC Sliver ports - Port 1, Port 2
2411 * CPPI Port - Port 0
2412 */
2413 #if defined(SOC_K2K) || defined(SOC_K2H)
2414 portStatCfg.p0AStatEnable = 1;
2415 portStatCfg.p0BStatEnable = 1;
2416 portStatCfg.p1StatEnable = 1;
2417 portStatCfg.p2StatEnable = 1;
2418 #else
2419 portStatCfg.p0StatEnable = 1;
2420 portStatCfg.p1StatEnable = 1;
2421 portStatCfg.p2StatEnable = 1;
2422 portStatCfg.p3StatEnable = 1;
2423 portStatCfg.p4StatEnable = 1;
2424 portStatCfg.p5StatEnable = 1;
2425 portStatCfg.p6StatEnable = 1;
2426 portStatCfg.p7StatEnable = 1;
2427 portStatCfg.p8StatEnable = 1;
2428 #endif
2429 CSL_CPSW_setPortStatsEnableReg (&portStatCfg);
2430
2431 /* Setup the Address Lookup Engine (ALE) Configuration:
2432 * (1) Enable ALE.
2433 * (2) Clear stale ALE entries.
2434 * (3) Disable VLAN Aware lookups in ALE since
2435 * we are not using VLANs by default.
2436 * (4) No Flow control
2437 * (5) Configure the Unknown VLAN processing
2438 * properties for the switch, i.e., which
2439 * ports to send the packets to.
2440 */
2441 CSL_CPSW_enableAle ();
2442 CSL_CPSW_clearAleTable ();
2443
2444 CSL_CPSW_disableAleVlanAware ();
2445 CSL_CPSW_disableAleTxRateLimit ();
2446 CSL_CPSW_setAlePrescaleReg (125000000u/1000u);
2447 CSL_CPSW_setAleUnkownVlanReg (7, 3, 3, 7);
2448
2449 /* Done with switch configuration */
2450 return;
2451}
2452
2453/* Initialize the test framework */
2454int setupTestFramework (void)
2455{
2456 GateHwi_Params prms;
2457
2458 /* Create the HW disable gate. It is used by QM call backs */
2459 GateHwi_Params_init(&prms);
2460 tFramework.gateHwi = GateHwi_create(&prms, NULL);
2461
2462 /* Power up PA sub-systems */
2463 passPowerUp();
2464
2465 /* Clear the logs */
2466 clearPaInfo();
2467
2468#if RM
2469 /* Setup the RM client */
2470 if (setupRm ())
2471 {
2472 System_printf ("setupTestFramework: setupRm returned error, exiting\n");
2473 System_flush();
2474 return (-1);
2475 }
2476#endif
2477
2478 /* Setup the semaphores used for access to the PA tables.
2479 * This has to be done before the PA is initialized */
2480 if (initSems()) {
2481 System_printf ("setupTestFramework: initQm returned error, exiting\n");
2482 return (-1);
2483 }
2484
2485 /* Create the PA driver instance */
2486 if (initPa()) {
2487 System_printf ("setupTestFramework: initPa returned error, exiting\n");
2488 return (-1);
2489 }
2490
2491 /* Setup the QM with associated buffers and descriptors */
2492 if (initQm()) {
2493 System_printf ("setupTestFramework: initQm returned error, exiting\n");
2494 return (-1);
2495 }
2496
2497 return (0);
2498
2499}
2500
2501/* Clear the Test Framework */
2502int clearTestFramework (void)
2503{
2504 int retVal = 0;
2505
2506 /* QM Clean ups */
2507 clearQm();
2508
2509 return (retVal);
2510}
2511
2512/* Check that all the queues are setup correctly */
2513int verifyTestFramework (void)
2514{
2515 int i, j;
2516 int count;
2517 int returnVal = 0;
2518 Cppi_HostDesc *hd;
2519 uint8_t *bufp;
2520 uint32_t bufLen;
2521
2522 int32_t linkedQ[3];
2523 int32_t nbufs[] = { TF_LINKED_BUF_Q1_NBUFS, TF_LINKED_BUF_Q2_NBUFS, TF_LINKED_BUF_Q3_NBUFS };
2524 int32_t bSize[] = { TF_LINKED_BUF_Q1_BUF_SIZE, TF_LINKED_BUF_Q2_BUF_SIZE, TF_LINKED_BUF_Q3_BUF_SIZE };
2525 #ifdef NETSS_INTERNAL_PKTDMA
2526 int32_t linkedLocQ[3];
2527 #endif
2528
2529 linkedQ[0] = tFramework.QLinkedBuf1;
2530 linkedQ[1] = tFramework.QLinkedBuf2;
2531 linkedQ[2] = tFramework.QLinkedBuf3;
2532
2533 #ifdef NETSS_INTERNAL_PKTDMA
2534 linkedLocQ[0] = tFramework.QLocLinkedBuf1;
2535 linkedLocQ[1] = tFramework.QLocLinkedBuf2;
2536 linkedLocQ[2] = tFramework.QLocLinkedBuf3;
2537 #endif
2538
2539 /* clear up the gobal PASS settings */
2540 if (testCommonSetDefaultGlobalConfig())
2541 {
2542 System_printf ("verifyTestFramework: testCommonSetDefaultGlobalConfig returned error!\n");
2543 returnVal = -1;
2544 }
2545
2546 /* clean up exception routes */
2547 if (testExceptionSetRecover())
2548 {
2549 System_printf ("verifyTestFramework: testExceptionSetRecover returned error!\n");
2550 returnVal = -1;
2551 }
2552
2553 /* clean up multi-route groups */
2554 if (testCommonMultiRouteRecover())
2555 {
2556 System_printf ("verifyTestFramework: testCommonMultiRouteRecover returned error!\n");
2557 returnVal = -1;
2558 }
2559
2560 /* clean up command sets */
2561 if (testCommonCmdSetRecover()) {
2562 System_printf ("verifyTestFramework: testCommonCmdSetRecover returned error!\n");
2563 returnVal = -1;
2564 }
2565
2566 if (testCommonClearPaStats ()) {
2567 System_printf ("verifyTestFramework: testCommonCmdSetRecover returned error!\n");
2568 returnVal = -1;
2569 }
2570
2571
2572 /* Verify that all of the general purpose queues are empty */
2573 for (i = 0; i < TF_NUM_GEN_QUEUES; i++) {
2574 if ((count = Qmss_getQueueEntryCount (tFramework.QGen[i])) != 0) {
2575 System_printf ("verifyTestFramework: Expected 0 entry count for queue %d, found %d entries\n", tFramework.QGen[i], count);
2576 returnVal = -1;
2577 }
2578 }
2579
2580 /* Verify that the number of descriptors in the free descriptor queue is correct */
2581 count = Qmss_getQueueEntryCount (tFramework.QfreeDesc);
2582 if (count != (TF_NUM_DESC - TF_LINKED_BUF_Q1_NBUFS - TF_LINKED_BUF_Q2_NBUFS - TF_LINKED_BUF_Q3_NBUFS)) {
2583 System_printf ("verifyTestFramework: Expected %d entry count in the free descriptor queue (%d), found %d\n",
2584 TF_NUM_DESC - TF_LINKED_BUF_Q1_NBUFS - TF_LINKED_BUF_Q2_NBUFS - TF_LINKED_BUF_Q3_NBUFS,
2585 tFramework.QfreeDesc, count);
2586 returnVal = -1;
2587 }
2588
2589 /* Verify the number and sizing of descriptors with linked buffers in the three queues */
2590 for (j = 0; j < 3; j++) {
2591
2592 count = Qmss_getQueueEntryCount (linkedQ[j]);
2593 if (count != nbufs[j]) {
2594 System_printf ("verifyTestFramework: Expected %d entry count in linked buffer queue 1 (%d), found %d\n",
2595 nbufs[j], linkedQ[j], count);
2596 returnVal = -1;
2597 }
2598
2599 for (i = 0; i < count; i++) {
2600 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (linkedQ[j])) & ~15);
2601 Cppi_getOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, &bufp, &bufLen);
2602 Qmss_queuePush (linkedQ[j], (Ptr)hd, hd->buffLen, TF_SIZE_DESC, Qmss_Location_TAIL);
2603
2604 if (bufLen != bSize[j]) {
2605 System_printf ("verifyTestFramework: Linked buffer queue %d (%d) expected orignal length of %d, found %d\n",
2606 j, linkedQ[j], bSize[j], bufLen);
2607 returnVal = -1;
2608 break;
2609 }
2610 }
2611 }
2612
2613 #ifdef NETSS_INTERNAL_PKTDMA
2614
2615 /* Verify that the number of descriptors in the free descriptor queue is correct */
2616 count = Qmss_getQueueEntryCount (tFramework.QLocfreeDesc);
2617 if (count != (TF_NUM_DESC - TF_LINKED_BUF_Q1_NBUFS - TF_LINKED_BUF_Q2_NBUFS - TF_LINKED_BUF_Q3_NBUFS)) {
2618 System_printf ("verifyTestFramework: Expected %d entry count in the free descriptor queue (%d), found %d\n",
2619 TF_NUM_DESC - TF_LINKED_BUF_Q1_NBUFS - TF_LINKED_BUF_Q2_NBUFS - TF_LINKED_BUF_Q3_NBUFS,
2620 tFramework.QLocfreeDesc, count);
2621 returnVal = -1;
2622 }
2623
2624
2625
2626 /* Verify the number and sizing of descriptors with linked buffers in the three queues */
2627 for (j = 0; j < 3; j++) {
2628
2629 count = Qmss_getQueueEntryCount (linkedLocQ[j]);
2630 if (count != nbufs[j]) {
2631 System_printf ("verifyTestFramework: Expected %d entry count in Loc linked buffer queue %d (%d), found %d\n",
2632 nbufs[j], j, linkedQ[j], count);
2633 returnVal = -1;
2634 }
2635
2636 for (i = 0; i < count; i++) {
2637 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (linkedLocQ[j])) & ~15);
2638 Cppi_getOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, &bufp, &bufLen);
2639 //Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
2640 Qmss_queuePushDesc(linkedLocQ[j], (Ptr)hd);
2641
2642 if (bufLen != bSize[j]) {
2643 System_printf ("verifyTestFramework: Linked buffer queue %d (%d) expected orignal length of %d, found %d\n",
2644 j, linkedQ[j], bSize[j], bufLen);
2645 returnVal = -1;
2646 break;
2647 }
2648 }
2649 }
2650
2651 #endif
2652
2653 return (returnVal);
2654}
2655
2656int setupPktTestInfo(pktTestInfo_t* testInfo, int count, char* tfname)
2657{
2658 /* This is transperent function for the DSP *
2659 * Do not alter anything since everything needed is already in place */
2660 return (0);
2661}
2662
2663int setupIfPktTestInfo(ifPktTestInfo_t* testInfo, int count, char* tfname)
2664{
2665 /* This is transperent function for the DSP *
2666 * Do not alter anything since everything needed is already in place */
2667 return (0);
2668}
2669
2670/* Nothing past this point */
2671
2672
diff --git a/test/PAUnitTest/src/armv7/bios/testMain.c b/test/PAUnitTest/src/armv7/bios/testMain.c
new file mode 100755
index 0000000..863ce5a
--- /dev/null
+++ b/test/PAUnitTest/src/armv7/bios/testMain.c
@@ -0,0 +1,297 @@
1
2/*
3 *
4 * Copyright (C) 2010-2013 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the
17 * distribution.
18 *
19 * Neither the name of Texas Instruments Incorporated nor the names of
20 * its contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
29 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35*/
36
37#include "../../pautest.h"
38
39#ifdef __ARM_ARCH_7A__
40#include <ti/sysbios/family/arm/a15/Mmu.h>
41#endif
42
43#ifdef __ARM_ARCH_7A__
44#include <ti/csl/cslr_msmc.h>
45#endif
46
47#define PAU_TEST_REPETITION 1
48volatile int iteration;
49
50
51/* NULL terminated The list of tests */
52paTest_t paTestList[] = {
53#ifdef NSS_GEN2
54#ifndef __LINUX_USER_SPACE
55 { paTestEOAMFlow, "Pa_addEoamFlow and Ethernet OAM Support", PA_TEST_NOT_RUN },
56#endif
57#endif
58 { paTestUnconfigured, "Packet reception while unconfigured", PA_TEST_NOT_RUN },
59 { paTestSrioRouting, "Pa_addSrio and SRIO routing", PA_TEST_NOT_RUN },
60 { paTestL2Routing, "Pa_addMac and L2 routing", PA_TEST_NOT_RUN },
61 { paTestL3Routing, "Pa_addIp and L3 Routing", PA_TEST_NOT_RUN },
62#ifdef NSS_GEN2
63 { paTestACL, "Pa_addAcl and ACL filtering", PA_TEST_NOT_RUN },
64 { paTestACLRescore, "Pa_addAcl and ACL filtering with Rescoring", PA_TEST_NOT_RUN },
65#endif
66 { paTestL4Routing, "Pa_addPort and L4 Routing", PA_TEST_NOT_RUN },
67 { paTestPatchRoute, "Blind patch and route", PA_TEST_NOT_RUN },
68 { paTestTxFmtRt, "Tx checksum and routing", PA_TEST_NOT_RUN },
69 { paTestCustom, "Custom routing", PA_TEST_NOT_RUN },
70 { paTestMultiRouting, "Multi-routing", PA_TEST_NOT_RUN },
71 { paTestIPv4FragReassem,"IPv4 Fragmentation and Reassembly", PA_TEST_NOT_RUN },
72 { paTestIPv6FragReassem,"IPv6 Fragmentation and Reassembly", PA_TEST_NOT_RUN },
73#ifdef NSS_GEN2
74 { paTestEflow, "Egress Flow and Packet Forwarding Test", PA_TEST_NOT_RUN },
75#endif
76 { paTestUnconfigured, "Packet reception while unconfigured", PA_TEST_NOT_RUN },
77 { NULL, NULL, PA_TEST_NOT_RUN }
78};
79
80#define PAU_NUM_TESTS ((sizeof(paTestList)/sizeof(paTest_t)) - 1)
81
82void topLevelTest (UArg a0, UArg a1);
83
84/* The exit code is a global. This is used so
85 * the clock function can terminate the program with
86 * the proper exit code */
87int exitCode;
88
89/* Creates a single task - the top level task. This is a low priority task that
90 * spawns the individual tests */
91#ifdef _TMS320C6X
92extern cregister volatile unsigned int TSCL;
93#endif
94
95void main ()
96{
97 Task_Params tparams;
98
99#ifdef __ARM_ARCH_7A__
100 /* Add MMU entries for MMR's required for PCIE example */
101 Uint32 privid, index;
102 CSL_MsmcRegs *msmc = (CSL_MsmcRegs *)CSL_MSMC_CFG_REGS;
103 Mmu_DescriptorAttrs attrs;
104 extern char ti_sysbios_family_arm_a15_Mmu_Module_State_0_secondLevelTableBuf_1__A;
105 uint32_t addr = (uint32_t)&ti_sysbios_family_arm_a15_Mmu_Module_State_0_secondLevelTableBuf_1__A;
106
107 Mmu_initDescAttrs(&attrs);
108
109 attrs.type = Mmu_DescriptorType_TABLE;
110 attrs.shareable = 0; // non-shareable
111 attrs.accPerm = 1; // read/write at any privelege level
112 attrs.attrIndx = 0; // Use MAIR0 Register Byte 3 for
113 // determining the memory attributes
114 // for each MMU entry
115
116
117 // Update the first level table's MMU entry for 0x80000000 with the
118 // new attributes.
119 Mmu_setFirstLevelDesc((Ptr)0x40000000, (UInt64)addr, &attrs);
120
121 // Set up SES & SMS to make all masters coherent
122 for (privid = 0; privid < 16; privid++)
123 {
124 for (index = 0; index < 8; index++)
125 {
126 uint32_t ses_mpaxh = msmc->SES_MPAX_PER_PRIVID[privid].SES[index].MPAXH;
127 uint32_t sms_mpaxh = msmc->SMS_MPAX_PER_PRIVID[privid].SMS[index].MPAXH;
128 if (CSL_FEXT (ses_mpaxh, MSMC_SES_MPAXH_0_SEGSZ) != 0)
129 {
130 // Clear the "US" bit to make coherent. This is at 0x80.
131 ses_mpaxh &= ~0x80;
132 msmc->SES_MPAX_PER_PRIVID[privid].SES[index].MPAXH = ses_mpaxh;
133 }
134 if (CSL_FEXT (sms_mpaxh, MSMC_SMS_MPAXH_0_SEGSZ) != 0)
135 {
136 // Clear the "US" bit to make coherent. This is at 0x80.
137 sms_mpaxh &= ~0x80;
138 msmc->SMS_MPAX_PER_PRIVID[privid].SMS[index].MPAXH = sms_mpaxh;
139 }
140 }
141 }
142#endif
143
144 /* The only initial task is the top level test */
145 Task_Params_init (&tparams);
146 tparams.instance->name = "Top Level Test";
147 tparams.priority = 1;
148
149 Task_create (topLevelTest, &tparams, NULL);
150
151#ifdef _TMS320C6X
152 /* Start the cycle counter */
153 TSCL = 1;
154#endif
155
156 BIOS_start ();
157}
158
159
160void clk1Fxn (UArg a0)
161{
162 BIOS_exit (exitCode);
163}
164
165/* Initialize the test framework and launch the individual tests */
166void topLevelTest (UArg a0, UArg a1)
167{
168 Task_Params tparams;
169 Task_Handle thandle;
170 Task_Stat tstat;
171
172 Clock_Handle clkh;
173 Clock_Params clkParams;
174
175 int i;
176
177 int passCount;
178 int failCount;
179 int notRunCount;
180
181 /* For some reason some printfs are lost unless there is a delay between System_flush
182 * and System_exit, so delay is forced */
183 Clock_Params_init(&clkParams);
184 clkParams.period = 0;
185 clkParams.startFlag = FALSE;
186 clkh = Clock_create(clk1Fxn, 1, &clkParams, NULL);
187
188 System_printf ("\n\n ------- PA Unit Test Starting ---------\n");
189
190 /* Initialize the PA, PA cpdma, QM and CPPI. Each test will use
191 * the same framework */
192 if (setupTestFramework ()) {
193 System_printf ("topLevelTest (%s:%d): setupTestFramework returned error, exiting\n", __FILE__, __LINE__);
194 System_flush ();
195 exitCode = -1;
196 Clock_start(clkh);
197 Task_exit ();
198 }
199
200 /* Make sure the setup matches what is expected */
201 if (verifyTestFramework()) {
202 System_printf ("topLevelTest (%s:%d): verifyTestFramework returned error after initial framework setup, exiting\n", __FILE__, __LINE__);
203 System_flush();
204 exitCode = -1;
205 Clock_start(clkh);
206 Task_exit ();
207 }
208
209 /* Configure task parameters common to all test tasks */
210 Task_Params_init (&tparams);
211 tparams.priority = 2;
212 tparams.arg0 = (UArg) &tFramework;
213
214 /* Run the tests */
215 for (iteration = 0; iteration < PAU_TEST_REPETITION; iteration++) {
216 for (i = 0; paTestList[i].testFunction != NULL; i++ ) {
217
218 tparams.arg1 = (UArg)&paTestList[i];
219 tparams.instance->name = paTestList[i].name;
220
221 thandle = Task_create (paTestList[i].testFunction, &tparams, NULL);
222
223 /* The test task will terminate upon completion. Verify that the
224 * task has completed in case the task itself uses multiple tasks
225 * that all wind up idle for a while. */
226 do {
227 Task_stat (thandle, &tstat);
228 } while (tstat.mode != Task_Mode_TERMINATED);
229
230 Task_delete (&thandle);
231
232 if (paTestList[i].testStatus == PA_TEST_PASSED)
233 System_printf ("%s: PASSED\n", paTestList[i].name);
234 else
235 System_printf ("%s: FAILED\n", paTestList[i].name);
236
237 System_flush();
238
239 /* Do a quick check of the test framework */
240 if (verifyTestFramework ()) {
241 System_printf ("topLevelTest (%s:%d): verifyTestFramework returned error after test %s. Exiting.\n", __FILE__, __LINE__, paTestList[i].name);
242 exitCode = -1;
243 System_flush ();
244 Clock_start(clkh);
245 }
246 }
247 }
248
249
250 /* Summarize the test results */
251 for (i = passCount = failCount = notRunCount = 0; paTestList[i].testFunction != NULL; i++) {
252 if (paTestList[i].testStatus == PA_TEST_PASSED)
253 passCount += 1;
254 else if (paTestList[i].testStatus == PA_TEST_FAILED)
255 failCount += 1;
256 else
257 notRunCount += 1;
258 }
259
260 System_printf ("\n\nTest summary:\n\tTests Passed: %d\n\tTests Failed: %d\n\tTests not run: %d\n\n",
261 passCount, failCount, notRunCount);
262
263 if(passCount == PAU_NUM_TESTS)
264 System_printf ("All tests have passed!");
265
266 if (clearTestFramework ())
267 System_printf ("\n\n ------- PA Unit Test Clean Failed ---------\n");
268
269 System_printf ("\n\n ------- PA Unit Test Complete ---------\n");
270
271 System_flush();
272
273#if (RM) && !defined(__LINUX_USER_SPACE)
274 {
275 int32_t rmResult;
276
277 if ((rmResult = Rm_resourceStatus(rmHandle, FALSE)) != 0)
278 {
279 System_printf ("Error : Number of unfreed resources : %d\n", rmResult);
280 System_flush();
281 }
282 else
283 {
284 System_printf ("All resources freed successfully\n");
285 System_flush();
286 }
287 }
288#endif
289
290 Clock_start(clkh);
291
292 Task_exit();
293}
294
295
296
297
diff --git a/test/PAUnitTest/src/armv7/bios/testmem.c b/test/PAUnitTest/src/armv7/bios/testmem.c
new file mode 100755
index 0000000..2ad0146
--- /dev/null
+++ b/test/PAUnitTest/src/armv7/bios/testmem.c
@@ -0,0 +1,71 @@
1/*
2 *
3 * Copyright (C) 2010-2013 Texas Instruments Incorporated - http://www.ti.com/
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the
16 * distribution.
17 *
18 * Neither the name of Texas Instruments Incorporated nor the names of
19 * its contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34*/
35
36
37
38/* Static memory allocation for test framework */
39
40#include "../../pautest.h"
41
42tFramework_t tFramework;
43
44/* HW interrupt disable handle */
45GateHwi_Handle gateHwi;
46
47uint8_t memPaInst[TF_ROUND_UP(TF_PA_INST_SIZE, TF_CACHE_LINESZ)] __attribute__ ((aligned (TF_CACHE_LINESZ)));
48uint8_t memL2Ram[TF_ROUND_UP(TF_L2_TABLE_SIZE, TF_CACHE_LINESZ)] __attribute__ ((aligned (TF_CACHE_LINESZ)));
49uint8_t memL3Ram[TF_ROUND_UP(TF_L3_TABLE_SIZE, TF_CACHE_LINESZ)] __attribute__ ((aligned (TF_CACHE_LINESZ)));
50uint8_t memVLinkRam[TF_ROUND_UP(TF_VLINK_TABLE_SIZE, TF_CACHE_LINESZ)] __attribute__ ((aligned (TF_CACHE_LINESZ)));
51uint8_t memAclRam[TF_ROUND_UP(TF_ACL_TABLE_SIZE, TF_CACHE_LINESZ)] __attribute__ ((aligned (TF_CACHE_LINESZ)));
52uint8_t memEoamRam[TF_ROUND_UP(TF_EOAM_TABLE_SIZE, TF_CACHE_LINESZ)] __attribute__ ((aligned (TF_CACHE_LINESZ)));
53uint8_t memUsrStatsLnkTbl[TF_ROUND_UP(TF_USR_STATS_LNK_TABLE_SIZE, TF_CACHE_LINESZ)] __attribute__ ((aligned (TF_CACHE_LINESZ)));
54uint8_t memFcRam[TF_ROUND_UP(TF_FC_TABLE_SIZE, TF_CACHE_LINESZ)] __attribute__ ((aligned (TF_CACHE_LINESZ)));
55
56/* Memory used for the linking RAM and descriptor RAM */
57uint64_t memLinkRam[TF_NUM_DESC] __attribute__ ((aligned (16)));
58uint8_t memDescRam[TF_NUM_DESC * TF_SIZE_DESC] __attribute__ ((aligned (128)));
59
60#ifdef NETSS_INTERNAL_PKTDMA
61uint8_t* passDescRam = (uint8_t*)(CSL_NETCP_CFG_REGS + 0x001c0000);
62#endif
63
64/* Packet buffers attached to descriptors */
65unsigned char memQ1[TF_LINKED_BUF_Q1_NBUFS][TF_LINKED_BUF_Q1_BUF_SIZE] __attribute__ ((aligned (16)));
66unsigned char memQ2[TF_LINKED_BUF_Q2_NBUFS][TF_LINKED_BUF_Q2_BUF_SIZE] __attribute__ ((aligned (16)));
67unsigned char memQ3[TF_LINKED_BUF_Q3_NBUFS][TF_LINKED_BUF_Q3_BUF_SIZE] __attribute__ ((aligned (16)));
68
69unsigned char memLocQ1[TF_LINKED_BUF_Q1_NBUFS][TF_LINKED_BUF_Q1_BUF_SIZE] __attribute__ ((aligned (16)));
70unsigned char memLocQ2[TF_LINKED_BUF_Q2_NBUFS][TF_LINKED_BUF_Q2_BUF_SIZE] __attribute__ ((aligned (16)));
71unsigned char memLocQ3[TF_LINKED_BUF_Q3_NBUFS][TF_LINKED_BUF_Q3_BUF_SIZE] __attribute__ ((aligned (16)));