a9a0b77ecd724aef05b8073e4655407be66a7df0
1 /*
2 *
3 * Copyright (C) 2010-2020 Texas Instruments Incorporated - http://www.ti.com/
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the
16 * distribution.
17 *
18 * Neither the name of Texas Instruments Incorporated nor the names of
19 * its contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
36 /* Generate and verify the system test framework
37 *
38 * The test framework consists of the pa and sa driver instance, a cppi/cdma/qm configuration,
39 * memory for packet transmission and reception, and semaphores that are used
40 * for every test in the SA unit test.
41 *
42 */
44 #include "unittest.h"
45 #include "testconn.h"
46 #if !defined(NSS_LITE) && !defined(NSS_LITE2)
47 #include <ti/drv/pa/pa_osal.h>
48 #endif
50 #include <ti/csl/cslr_device.h>
52 #ifdef NSS_LITE2
53 #if defined (SOC_AM64X)
54 #include <ti/csl/csl_pktdma.h>
55 #else
56 #include <ti/csl/csl_udmap.h>
57 #endif
58 #include <ti/board/board.h>
59 #include <ti/osal/osal.h>
60 #include <ti/csl/arch/csl_arch.h>
61 #else
62 #include <ti/csl/csl_psc.h>
63 #include <ti/csl/csl_pscAux.h>
65 /* CSL CHIP, SEM Functional layer includes */
66 #include <ti/csl/csl_chip.h>
67 #include <ti/csl/csl_semAux.h>
69 /* QMSS device specific configuration */
70 extern Qmss_GlobalConfigParams qmssGblCfgParams;
71 extern Qmss_GlobalConfigParams qmssNetssGblCfgParams;
73 /* CPPI device specific configuration */
74 extern Cppi_GlobalConfigParams cppiGblCfgParams;
75 #endif
77 #ifndef ARM11
78 #ifdef _TMS320C6X
79 cregister volatile unsigned int CSR ;
80 cregister volatile unsigned int IER ;
81 #endif
82 #endif
84 #ifndef USE_BIOS
85 #ifndef ARM11
87 #define GIE (1U<<0)
89 #ifdef _TMS320C6X
90 /*****************************************************************************
91 * DESCRIPTION: This variable is specific bit-stack implementation. It is
92 * manipulated by thwCriticalBegin() and thwCriticalEnd() functions
93 * in order to properly recover status of INTM bit after
94 * completion of critical section. Up to 16 bits can be shifted in
95 * and out, meaning that 15-level nesting of critical sections
96 * is allowed (highly unlikely).
97 *****************************************************************************/
98 volatile uint16_t thwCriticalStatusSaved = 0;
100 /******************************************************************************
101 * FUNCTION PURPOSE: Start of critical section (up to 15 sections can be nested)
102 *
103 ******************************************************************************
104 * DESCRIPTION: Save current interrupt enable/disable status, and disable
105 * all interrupts. Bit ST1_INTM is pushed in global variable.
106 * IER0 and IER1 are saved and restored
107 *****************************************************************************/
108 void thwCriticalBegin (void)
109 {
111 uint32_t ierStore;
113 /* Disable interrupts by clearing the IER0 */
114 ierStore = IER;
115 IER = 0;
116 /* Disable interrupts by clearing the IER1 */
118 if(!(CSR & (GIE))) /* Is global interrupt enabled ? */
119 {
120 CSR &= ~(GIE);
121 // asm(" bit(st1, #st1_intm) = #1"); /* Always disable interrupt on exit */
122 asm (" NOP");
123 asm (" NOP");
124 asm (" NOP");
125 thwCriticalStatusSaved <<= 1; /* Bit stack prepared */
126 thwCriticalStatusSaved |= 1; /* It is disabled ! */
127 }
128 else { /* else ... means it is enabled */
129 CSR &= ~(GIE);
130 // asm(" bit(st1, #st1_intm) = #1"); /* Always disable interrupt on exit */
131 asm (" NOP");
132 asm (" NOP");
133 asm (" NOP");
134 thwCriticalStatusSaved <<= 1; /* Bit stack prepared */
135 }
136 IER = ierStore;
137 }
138 /******************************************************************************
139 * FUNCTION PURPOSE: End of critical section (up to 15 sections can be nested)
140 *
141 ******************************************************************************
142 * DESCRIPTION: Restore interrupt enable/disable status encountered on entry to
143 * critical section. Bit ST1_INTM is poped from the global variable.
144 *****************************************************************************/
145 void thwCriticalEnd (void)
146 {
147 if(thwCriticalStatusSaved & 0x0001) /* Was the last global interrupt
148 * disabled ? */
149 thwCriticalStatusSaved >>= 1; /* Drop the last bit saved !
150 * It was already disabled, prior to
151 * critical section entry */
152 else {
153 thwCriticalStatusSaved >>= 1; /* Drop the last bit saved ! */
154 asm (" NOP");
155 asm (" NOP");
156 asm (" NOP");
157 // asm(" bit(st1, #st1_intm) = #0"); /* It was enabled prior to critical
158 // * section entry */
159 CSR |= (GIE);
160 }
161 } /* End of thwCriticalEnd() */
162 #endif /* #ifdef _TMS320C6X */
163 #endif /* ARM11 */
164 #else
166 /**********************************************************************
167 ****************************** Defines *******************************
168 **********************************************************************/
169 #define MAX_NUM_CORES 8
171 /* Hardware Semaphore to synchronize access from
172 * multiple applications (PA applications and non-PASS applications)
173 * across different cores to the QMSS library.
174 */
175 #define QMSS_HW_SEM 3
177 /* Hardware Semaphore to synchronize access from
178 * multiple applications (PASS applications and non-PASS applications)
179 * across different cores to the CPPI library.
180 */
181 #define CPPI_HW_SEM 4
183 /* Hardware Semaphore to synchronize access from
184 * multiple applications (PASS applications and non-PASS applications)
185 * across different cores to the PA library.
186 */
187 #define PA_HW_SEM 5
189 /**
190 * @b Description
191 * @n
192 * General Memory Barrier guarantees that all LOAD and STORE operations that were issued before the
193 * barrier occur before the LOAD and STORE operations issued after the barrier
194 *
195 */
196 #if defined(__ARM_ARCH_7A__)
197 void memBarrier(void) {
198 __sync_synchronize();
199 }
200 #endif
202 /**********************************************************************
203 ************************** Global Variables **************************
204 **********************************************************************/
205 uint32_t paMemProtNestedLevel= 0;
206 uint32_t coreKey [MAX_NUM_CORES];
208 #endif /* USE_BIOS */
210 uint32_t qmssMallocCounter = 0;
211 uint32_t qmssFreeCounter = 0;
212 uint32_t cppiMallocCounter = 0;
213 uint32_t cppiFreeCounter = 0;
215 /*
216 * Netss Local PKTDMA related convert functions
217 */
219 uintptr_t Netss_qmssVirtToPhy (void *ptr)
220 {
221 uintptr_t addr = (uintptr_t)ptr;
223 #if !defined(USE_BIOS) && !defined(NSS_LITE2)
224 {
225 if ((addr & 0xFF000000) == CSL_NETCP_CFG_REGS)
226 {
227 addr = (addr & 0x00FFFFFF) | 0xFF000000;
228 }
229 }
230 #endif
232 return ((uintptr_t)addr);
233 }
235 uintptr_t Netss_qmssPhyToVirt (void *ptr)
236 {
237 uintptr_t addr = (uintptr_t)ptr;
239 #if !defined(USE_BIOS) && !defined(NSS_LITE2)
240 {
241 if ((addr & 0xFF000000) == 0xFF000000)
242 {
243 addr = (addr & 0x00FFFFFF) | CSL_NETCP_CFG_REGS;
244 }
245 }
246 #endif
248 return ((uintptr_t)addr);
249 }
251 #ifndef NSS_LITE2
252 /**
253 * @brief This macro is used to alert the application that the PA is
254 * going to access table memory. The application must ensure
255 * cache coherency for multi-core applications
256 *
257 *
258 * <b> Prototype: </b>
259 * The following is the C prototype for the expected OSAL API.
260 *
261 * @verbatim
262 void Osal_paBeginMemAccess (void* addr, uint32_t sizeWords)
263 @endverbatim
264 *
265 * <b> Parameters </b>
266 * @n The address of the table to be accessed
267 * @n The number of bytes in the table
268 *
269 * @note PA will make nested calls to this function for memory access
270 * protection of different memory tables.
271 */
273 void Osal_paBeginMemAccess (Ptr addr, uint32_t size)
274 {
275 #ifdef USE_BIOS
276 #ifdef _TMS320C6X
277 uint32_t key;
279 /* Disable Interrupts */
280 key = Hwi_disable();
282 /* Cleanup the prefetch buffer also. */
283 CSL_XMC_invalidatePrefetchBuffer();
285 SYS_CACHE_INV (addr, size, CACHE_FENCE_WAIT);
287 asm (" nop 4");
288 asm (" nop 4");
289 asm (" nop 4");
290 asm (" nop 4");
292 /* Reenable Interrupts. */
293 Hwi_restore(key);
294 #endif
295 #else
296 if ((addr != (Ptr)memL2Ram) && (addr != (Ptr)memL3Ram))
297 {
298 SALog ("Osal_paBeginMemAccess: Invalid address supplied!\n");
299 }
301 #ifndef ARM11
302 thwCriticalBegin();
303 #endif
305 #endif
307 }
309 /**
310 * @brief This macro is used to alert the application that the PA
311 * has completed access to table memory. This call will always
312 * be made following a call to Osal_paBeginMemAccess and have
313 * the same parameters
314 *
315 * <b> Prototype: </b>
316 * The following is the C prototype for the expected OSAL API.
317 *
318 * @verbatim
319 void Osal_paEndMemAccess (void* addr, uint32_t sizeWords)
320 @endverbatim
321 *
322 * <b> Parameters </b>
323 * @n The address of the table to be accessed
324 * @n The number of bytes in the table
325 *
326 * @note PA will make nested calls to this function for memory access
327 * protection of different memory tables.
328 */
329 void Osal_paEndMemAccess (Ptr addr, uint32_t size)
330 {
331 #ifdef USE_BIOS
332 #ifdef _TMS320C6X
333 uint32_t key;
335 /* Disable Interrupts */
336 key = Hwi_disable();
338 SYS_CACHE_WB (addr, size, CACHE_FENCE_WAIT);
340 asm (" nop 4");
341 asm (" nop 4");
342 asm (" nop 4");
343 asm (" nop 4");
345 /* Reenable Interrupts. */
346 Hwi_restore(key);
347 #endif
348 #else
349 #ifndef ARM11
350 thwCriticalEnd();
351 #endif
352 #endif
355 }
357 /**
358 * @b Description
359 * @n
360 * The function is used to enter a critical section.
361 * Function protects against
362 *
363 * access from multiple threads on single core
364 * and
365 * access from multiple cores
366 *
367 * @param[in] key
368 * Key used to lock the critical section.
369 *
370 * @retval
371 * Not Applicable
372 */
373 void Osal_paMtCsEnter (uint32_t *key)
374 {
376 /* Get the hardware semaphore.
377 *
378 * Acquire Multi core PA synchronization lock
379 */
380 while ((CSL_semAcquireDirect (PA_HW_SEM)) == 0);
381 *key = 0;
382 }
384 /**
385 * @b Description
386 * @n
387 * The function is used to exit a critical section
388 * protected using Osal_salldCsEnter() API.
389 *
390 * @param[in] key
391 * Key used to unlock the critical section.
392 *
393 * @retval
394 * Not Applicable
395 */
396 void Osal_paMtCsExit (uint32_t key)
397 {
398 /* Release the hardware semaphore */
399 CSL_semReleaseSemaphore (PA_HW_SEM);
400 }
402 void* Netss_qmssConvertDescVirtToPhy(uint32_t QID, void *descAddr)
403 {
404 uint32_t addr = (uint32_t) descAddr;
406 #ifndef USE_BIOS
407 {
408 if ((addr & 0xFF000000) == CSL_NETCP_CFG_REGS)
409 {
410 addr = (addr & 0x00FFFFFF) | 0xFF000000;
411 }
412 }
413 #else
414 #ifdef __ARM_ARCH_7A__
415 memBarrier();
416 #endif
417 #endif
419 return ((void *)addr);
420 }
422 void* Netss_qmssConvertDescPhyToVirt(uint32_t QID, void *descAddr)
423 {
424 uint32_t addr = (uint32_t) descAddr;
426 #ifndef USE_BIOS
427 {
428 if ((addr & 0xFF000000) == 0xFF000000)
429 {
430 addr = (addr & 0x00FFFFFF) | CSL_NETCP_CFG_REGS;
431 }
432 }
433 #else
434 #ifdef __ARM_ARCH_7A__
435 memBarrier();
436 #endif
437 #endif
439 return ((void *)addr);
440 }
442 /**
443 * @b Description
444 * @n
445 * The function is used to allocate a memory block of the specified size.
446 *
447 * @param[in] num_bytes
448 * Number of bytes to be allocated.
449 *
450 * @retval
451 * Allocated block address
452 */
453 Ptr Osal_qmssMalloc (uint32_t num_bytes)
454 {
455 Error_Block errorBlock;
456 Ptr dataPtr;
458 /* Increment the allocation counter. */
459 qmssMallocCounter++;
461 /* Allocate memory. */
462 dataPtr = Memory_alloc(NULL, num_bytes, 0, &errorBlock);
463 return (dataPtr);
464 }
467 /**
468 * @b Description
469 * @n
470 * The function is used to enter a critical section.
471 * Function protects against
472 *
473 * access from multiple cores
474 * and
475 * access from multiple threads on single core
476 *
477 * @param[in] key
478 * Key used to lock the critical section.
479 *
480 * @retval
481 * Not Applicable
482 */
483 Ptr Osal_qmssCsEnter (void)
484 {
485 #ifdef USE_BIOS
486 #ifdef _TMS320C6X
487 uint32_t coreNum = CSL_chipReadReg(CSL_CHIP_DNUM);
488 #else
489 uint32_t coreNum = 0;
490 #endif
491 /* Get the hardware semaphore.
492 *
493 * Acquire Multi core QMSS synchronization lock
494 */
495 while ((CSL_semAcquireDirect (QMSS_HW_SEM)) == 0);
497 /* Disable all interrupts and OS scheduler.
498 *
499 * Acquire Multi threaded / process synchronization lock.
500 */
501 coreKey [coreNum] = Hwi_disable();
503 return NULL;
504 #else
505 #ifndef ARM11
506 thwCriticalBegin();
507 #endif
508 return NULL;
509 #endif
510 }
512 /**
513 * @b Description
514 * @n
515 * The function is used to exit a critical section
516 * protected using Osal_qmssCsEnter() API.
517 *
518 * @param[in] key
519 * Key used to unlock the critical section.
520 *
521 * @retval
522 * Not Applicable
523 */
524 void Osal_qmssCsExit (Ptr CsHandle)
525 {
526 #ifdef USE_BIOS
527 #ifdef _TMS320C6X
528 uint32_t coreNum = CSL_chipReadReg(CSL_CHIP_DNUM);
529 #else
530 uint32_t coreNum = 0;
531 #endif
532 /* Enable all interrupts and enables the OS scheduler back on.
533 *
534 * Release multi-threaded / multi-process lock on this core.
535 */
536 Hwi_restore(coreKey [coreNum]);
538 /* Release the hardware semaphore
539 *
540 * Release multi-core lock.
541 */
542 CSL_semReleaseSemaphore (QMSS_HW_SEM);
543 #else
544 #ifndef ARM11
545 thwCriticalEnd();
546 #endif
547 #endif
549 }
551 /******************************************************************************
552 * Function to issue memory barrier
553 *
554 * NOTE: QMSS unit tests are not using CPPI descriptors
555 ******************************************************************************/
556 void* Osal_qmssMemBarrier(uint32_t QID, void *descAddr)
557 {
558 #ifdef __ARM_ARCH_7A__
559 /* Issue memory barrier */
560 memBarrier();
561 #endif
562 return descAddr;
563 }
565 /**
566 * ============================================================================
567 * @n@b Osal_cppiCsEnter
568 *
569 * @b brief
570 * @n This API ensures multi-core and multi-threaded
571 * synchronization to the caller.
572 *
573 * This is a BLOCKING API.
574 *
575 * This API ensures multi-core synchronization between
576 * multiple processes trying to access CPPI shared
577 * library at the same time.
578 *
579 * @param[in]
580 * @n None
581 *
582 * @return
583 * @n Handle used to lock critical section
584 * =============================================================================
585 */
586 Ptr Osal_cppiCsEnter (void)
587 {
588 #ifdef USE_BIOS
589 #ifdef _TMS320C6X
590 uint32_t coreNum = CSL_chipReadReg(CSL_CHIP_DNUM);
591 #else
592 uint32_t coreNum = 0;
593 #endif
594 /* Get the hardware semaphore.
595 *
596 * Acquire Multi core CPPI synchronization lock
597 */
598 while ((CSL_semAcquireDirect (CPPI_HW_SEM)) == 0);
600 /* Disable all interrupts and OS scheduler.
601 *
602 * Acquire Multi threaded / process synchronization lock.
603 */
604 coreKey [coreNum] = Hwi_disable();
606 return NULL;
607 #else
608 #ifndef ARM11
609 thwCriticalBegin();
610 #endif
611 return NULL;
612 #endif
613 }
615 /**
616 * ============================================================================
617 * @n@b Osal_cppiCsExit
618 *
619 * @b brief
620 * @n This API needs to be called to exit a previously
621 * acquired critical section lock using @a Osal_cppiCsEnter ()
622 * API. It resets the multi-core and multi-threaded lock,
623 * enabling another process/core to grab CPPI access.
624 *
625 * @param[in] CsHandle
626 * Handle for unlocking critical section.
627 *
628 * @return None
629 * =============================================================================
630 */
631 void Osal_cppiCsExit (Ptr CsHandle)
632 {
633 #ifdef USE_BIOS
634 #ifdef _TMS320C6X
635 uint32_t coreNum = CSL_chipReadReg(CSL_CHIP_DNUM);
636 #else
637 uint32_t coreNum = 0;
638 #endif
639 /* Enable all interrupts and enables the OS scheduler back on.
640 *
641 * Release multi-threaded / multi-process lock on this core.
642 */
643 Hwi_restore(coreKey [coreNum]);
645 /* Release the hardware semaphore
646 *
647 * Release multi-core lock.
648 */
649 CSL_semReleaseSemaphore (CPPI_HW_SEM);
651 return;
652 #else
653 #ifndef ARM11
654 thwCriticalEnd();
655 #endif
656 #endif
657 }
659 /**
660 * @b Description
661 * @n
662 * The function is used to allocate a memory block of the specified size.
663 *
664 * Note: If the LLD is used by applications on multiple core, the "cppiHeap"
665 * should be in shared memory
666 *
667 * @param[in] num_bytes
668 * Number of bytes to be allocated.
669 *
670 * @retval
671 * Allocated block address
672 */
673 Ptr Osal_cppiMalloc (uint32_t num_bytes)
674 {
675 Error_Block errorBlock;
676 Ptr dataPtr;
678 /* Increment the allocation counter. */
679 cppiMallocCounter++;
681 /* Allocate memory. */
682 dataPtr = Memory_alloc(NULL, num_bytes, 0, &errorBlock);
683 return (dataPtr);
684 }
686 /**
687 * @b Description
688 * @n
689 * The function is used to free a memory block of the specified size allocated
690 * using Osal_cppiMalloc() API.
691 *
692 * @param[in] ptr
693 * Pointer to the memory block to be cleaned up.
694 *
695 * @param[in] size
696 * Size of the memory block to be cleaned up.
697 *
698 * @retval
699 * Not Applicable
700 */
701 void Osal_cppiFree (void *ptr, uint32_t size)
702 {
703 /* Increment the free counter. */
704 cppiFreeCounter++;
705 Memory_free (NULL, ptr, size);
706 }
709 /**
710 * ============================================================================
711 * @n@b Osal_qmssMtCsEnter
712 *
713 * @b brief
714 * @n This API ensures ONLY multi-threaded
715 * synchronization to the QMSS user.
716 *
717 * This is a BLOCKING API.
718 *
719 * @param[in] None
720 *
721 * @return
722 * Handle used to lock critical section
723 * =============================================================================
724 */
725 Ptr Osal_qmssMtCsEnter (void)
726 {
727 #ifdef USE_BIOS
728 /* Disable all interrupts and OS scheduler.
729 *
730 * Acquire Multi threaded / process synchronization lock.
731 */
732 //coreKey [CSL_chipReadReg(CSL_CHIP_DNUM)] = Hwi_disable();
734 return NULL;
735 #else
736 #ifndef ARM11
737 thwCriticalBegin();
738 #endif
739 return NULL;
740 #endif
742 }
744 /**
745 * ============================================================================
746 * @n@b Osal_qmssMtCsExit
747 *
748 * @b brief
749 * @n This API needs to be called to exit a previously
750 * acquired critical section lock using @a Osal_cpswQmssMtCsEnter ()
751 * API. It resets the multi-threaded lock, enabling another process
752 * on the current core to grab it.
753 *
754 * @param[in] CsHandle
755 * Handle for unlocking critical section.
756 *
757 * @return None
758 * =============================================================================
759 */
760 void Osal_qmssMtCsExit (Ptr CsHandle)
761 {
762 #ifdef USE_BIOS
763 /* Enable all interrupts and enables the OS scheduler back on.
764 *
765 * Release multi-threaded / multi-process lock on this core.
766 */
767 //Hwi_restore(key);
769 return;
770 #else
771 #ifndef ARM11
772 thwCriticalEnd();
773 #endif
774 #endif
776 }
778 /**
779 * @b Description
780 * @n
781 * The function is used to indicate that a block of memory is
782 * about to be accessed. If the memory block is cached then this
783 * indicates that the application would need to ensure that the
784 * cache is updated with the data from the actual memory.
785 *
786 * @param[in] blockPtr
787 * Address of the block which is to be invalidated
788 *
789 * @param[in] size
790 * Size of the block to be invalidated
792 * @retval
793 * Not Applicable
794 */
795 void Osal_cppiBeginMemAccess (void *blockPtr, uint32_t size)
796 {
797 #ifdef _TMS320C6X
798 uint32_t key;
800 /* Disable Interrupts */
801 key = Hwi_disable();
803 /* Cleanup the prefetch buffer also. */
804 CSL_XMC_invalidatePrefetchBuffer();
806 SYS_CACHE_INV (blockPtr, size, CACHE_FENCE_WAIT);
808 asm (" nop 4");
809 asm (" nop 4");
810 asm (" nop 4");
811 asm (" nop 4");
813 /* Reenable Interrupts. */
814 Hwi_restore(key);
815 #endif
817 return;
818 }
820 /**
821 * @b Description
822 * @n
823 * The function is used to indicate that the block of memory has
824 * finished being accessed. If the memory block is cached then the
825 * application would need to ensure that the contents of the cache
826 * are updated immediately to the actual memory.
827 *
828 * @param[in] blockPtr
829 * Address of the block which is to be written back
830 *
831 * @param[in] size
832 * Size of the block to be written back
834 * @retval
835 * Not Applicable
836 */
837 void Osal_cppiEndMemAccess (void *blockPtr, uint32_t size)
838 {
839 #ifdef _TMS320C6X
840 uint32_t key;
842 /* Disable Interrupts */
843 key = Hwi_disable();
845 SYS_CACHE_WB (blockPtr, size, CACHE_FENCE_WAIT);
847 asm (" nop 4");
848 asm (" nop 4");
849 asm (" nop 4");
850 asm (" nop 4");
852 /* Reenable Interrupts. */
853 Hwi_restore(key);
854 #endif
855 return;
856 }
858 /**
859 * @b Description
860 * @n
861 * The function is used to indicate that a block of memory is
862 * about to be accessed. If the memory block is cached then this
863 * indicates that the application would need to ensure that the
864 * cache is updated with the data from the actual memory.
865 *
866 * @param[in] blockPtr
867 * Address of the block which is to be invalidated
868 *
869 * @param[in] size
870 * Size of the block to be invalidated
872 * @retval
873 * Not Applicable
874 */
875 void Osal_qmssBeginMemAccess (void *blockPtr, uint32_t size)
876 {
877 #ifdef _TMS320C6X
878 uint32_t key;
880 /* Disable Interrupts */
881 key = Hwi_disable();
883 /* Cleanup the prefetch buffer also. */
884 CSL_XMC_invalidatePrefetchBuffer();
886 SYS_CACHE_INV (blockPtr, size, CACHE_FENCE_WAIT);
888 asm (" nop 4");
889 asm (" nop 4");
890 asm (" nop 4");
891 asm (" nop 4");
893 /* Reenable Interrupts. */
894 Hwi_restore(key);
895 #endif
897 return;
898 }
900 /**
901 * @b Description
902 * @n
903 * The function is used to indicate that the block of memory has
904 * finished being accessed. If the memory block is cached then the
905 * application would need to ensure that the contents of the cache
906 * are updated immediately to the actual memory.
907 *
908 * @param[in] blockPtr
909 * Address of the block which is to be written back
910 *
911 * @param[in] size
912 * Size of the block to be written back
914 * @retval
915 * Not Applicable
916 */
917 void Osal_qmssEndMemAccess (void *blockPtr, uint32_t size)
918 {
919 #ifdef _TMS320C6X
920 uint32_t key;
922 /* Disable Interrupts */
923 key = Hwi_disable();
925 SYS_CACHE_WB (blockPtr, size, CACHE_FENCE_WAIT);
927 asm (" nop 4");
928 asm (" nop 4");
929 asm (" nop 4");
930 asm (" nop 4");
932 /* Reenable Interrupts. */
933 Hwi_restore(key);
934 #endif
936 return;
937 }
939 int downloadPaFirmware (void)
940 {
941 #if !defined(NSS_LITE) && !defined(NSS_LITE2)
942 int ret = pa_OK, i;
943 uint32_t version;
945 Pa_resetControl (tFramework.passHandle, pa_STATE_RESET);
947 for ( i = 0; i < nssGblCfgParams.layout.numPaPdsps; i++)
948 {
950 Pa_downloadImage (tFramework.passHandle, i,
951 (Ptr)nssGblCfgParams.layout.paPdspImage[i],
952 nssGblCfgParams.layout.paPdspImageSize[i]);
953 }
955 ret = Pa_resetControl (tFramework.passHandle, pa_STATE_ENABLE);
957 if (ret != pa_STATE_ENABLE)
958 {
959 SALog ("downloadPaFirmware: Pa_resetControl return with error code %d\n", ret);
960 System_flush();
961 return (-1);
962 }
964 for ( i = 0; i < nssGblCfgParams.layout.numPaPdsps; i++)
965 {
966 Pa_getPDSPVersion(tFramework.passHandle, i, &version);
967 SALog ("PDSP %d version = 0x%08x\n", i, version);
968 System_flush();
969 }
970 #endif
971 return (0);
972 }
974 /* The PA LLD instance is created, the PA firmware is
975 * downloaded and started */
976 int initPa (void)
977 {
978 #if !defined(NSS_LITE) && !defined(NSS_LITE2)
979 paSizeInfo_t paSize;
980 paConfig_t paCfg;
981 paRaConfig_t raCfg;
982 int ret;
983 int sizes[pa_N_BUFS];
984 int aligns[pa_N_BUFS];
985 void* bases[pa_N_BUFS];
987 /* The maximum number of handles that can exists are 32 for L2, and 64 for L3. */
988 memset(&paSize, 0, sizeof(paSizeInfo_t));
989 memset(&paCfg, 0, sizeof(paConfig_t));
990 memset(&raCfg, 0, sizeof(paRaConfig_t));
991 memset(sizes, 0, sizeof(sizes));
992 memset(aligns, 0, sizeof(aligns));
993 memset(bases, 0, pa_N_BUFS*sizeof(void*));
994 paSize.nMaxL2 = TF_MAX_NUM_L2_HANDLES;
995 paSize.nMaxL3 = TF_MAX_NUM_L3_HANDLES;
996 paSize.nMaxVlnk = 0;
997 paSize.nUsrStats = 0;
998 paSize.nMaxAcl = 0;
999 paSize.nMaxFc = 0;
1000 paSize.nUsrStats = 0;
1002 ret = Pa_getBufferReq(&paSize, sizes, aligns);
1004 if (ret != pa_OK) {
1005 SALog ("initPa: Pa_getBufferReq() return with error code %d\n", ret);
1006 return (-1);
1007 }
1009 /* The first buffer is used as the instance buffer */
1010 if ((uint32_t)memPaInst & (aligns[pa_BUF_INST] - 1)) {
1011 SALog ("initPa: Pa_getBufferReq requires %d alignment for instance buffer, but address is 0x%08x\n", aligns[pa_BUF_INST], (uint32_t)memPaInst);
1012 return (-1);
1013 }
1015 if (sizeof(memPaInst) < sizes[pa_BUF_INST]) {
1016 SALog ("initPa: Pa_getBufferReq requires size %d for instance buffer, have only %d\n", sizes[pa_BUF_INST], sizeof(memPaInst));
1017 return (-1);
1018 }
1020 bases[pa_BUF_INST] = (void *)memPaInst;
1023 /* The second buffer is the L2 table */
1024 if ((uint32_t)memL2Ram & (aligns[pa_BUF_L2_TABLE] - 1)) {
1025 SALog ("initPa: Pa_getBufferReq requires %d alignment for L2 buffer, but address is 0x%08x\n", aligns[pa_BUF_L2_TABLE], (uint32_t)memL2Ram);
1026 return (-1);
1027 }
1029 if (sizeof(memL2Ram) < sizes[pa_BUF_L2_TABLE]) {
1030 SALog ("initPa: Pa_getBufferReq requires %d bytes for buffer L2 buffer, have only %d\n", sizes[pa_BUF_L2_TABLE], sizeof(memL2Ram));
1031 return (-1);
1032 }
1034 bases[pa_BUF_L2_TABLE] = (void *)memL2Ram;
1036 /* The third buffer is the L3 table */
1037 if ((uint32_t)memL3Ram & (aligns[pa_BUF_L3_TABLE] - 1)) {
1038 SALog ("initPa: Pa_getBufferReq requires %d alignment for L3 buffer, but address is 0x%08x\n", aligns[pa_BUF_L3_TABLE], (uint32_t)memL3Ram);
1039 return (-1);
1040 }
1042 if (sizeof(memL3Ram) < sizes[pa_BUF_L3_TABLE]) {
1043 SALog ("initPa: Pa_getBufferReq requires %d bytes for L3 buffer, have only %d\n", sizes[pa_BUF_L3_TABLE], sizeof(memL3Ram));
1044 return (-1);
1045 }
1047 bases[pa_BUF_L3_TABLE] = (void *)memL3Ram;
1049 if (nssGblCfgParams.layout.fNssGen2)
1050 {
1051 /* set default RA system configuration */
1052 raCfg.ipv4MinPktSize = 28; /* 20-byte IPv4 header plus 8-byte payload */
1053 raCfg.numCxts = 0x400;
1054 raCfg.cxtDiscardThresh = 0x400;
1055 raCfg.nodeDiscardThresh = 0x1000;
1056 raCfg.cxtTimeout = 60000;
1057 raCfg.clockRate = 350;
1058 raCfg.heapRegionThresh = 0x400;
1059 raCfg.heapBase[0] = 0x90000000ULL;
1061 paCfg.raCfg = &raCfg;
1062 }
1064 paCfg.initTable = TRUE;
1065 #ifndef SIMULATOR_SUPPORT
1066 paCfg.initDefaultRoute = TRUE;
1067 #endif
1068 paCfg.baseAddr = CSL_NETCP_CFG_REGS;
1069 paCfg.sizeCfg = &paSize;
1072 ret = Pa_create (&paCfg, bases, &tFramework.passHandle);
1073 if (ret != pa_OK) {
1074 SALog ("initPa: Pa_create returned with error code %d\n", ret);
1075 return (-1);
1076 }
1078 /* Download the firmware */
1079 if (downloadPaFirmware ())
1080 return (-1);
1082 #endif
1084 return (0);
1086 }
1088 int setupQmMem (void)
1089 {
1090 Qmss_InitCfg qmssInitConfig;
1091 Qmss_MemRegInfo memInfo;
1092 Cppi_DescCfg descCfg;
1093 int32_t result;
1094 int n;
1096 memset (&qmssInitConfig, 0, sizeof (Qmss_InitCfg));
1097 memset (memDescRam, 0, sizeof (memDescRam));
1099 qmssInitConfig.linkingRAM0Base = 0; /* Use internal linking RAM */
1100 #if !defined(NSS_LITE) && !defined(NSS_LITE2)
1101 qmssInitConfig.linkingRAM0Size = 0;
1102 #else
1103 qmssInitConfig.linkingRAM0Size = TF_NUM_DESC; /* 0 */
1104 #endif
1105 qmssInitConfig.linkingRAM1Base = 0;
1106 qmssInitConfig.maxDescNum = TF_NUM_DESC;
1108 result = Qmss_init (&qmssInitConfig, &qmssGblCfgParams);
1109 if (result != QMSS_SOK) {
1110 SALog ("setupQmMem: qmss_Init failed with error code %d\n", result);
1111 return (-1);
1112 }
1114 result = Qmss_start();
1115 if (result != QMSS_SOK) {
1116 SALog ("setupQmMem: Qmss_start failed with error code %d\n", result);
1117 return (-1);
1118 }
1120 /* Setup a single memory region for descriptors */
1121 memset(&memInfo, 0, sizeof(memInfo));
1122 memset (memDescRam, 0, sizeof(memDescRam));
1123 memInfo.descBase = (uint32_t *)utilgAddr((uint32_t)memDescRam);
1124 memInfo.descSize = TF_SIZE_DESC;
1125 memInfo.descNum = TF_NUM_DESC;
1126 memInfo.manageDescFlag = Qmss_ManageDesc_MANAGE_DESCRIPTOR;
1127 memInfo.memRegion = Qmss_MemRegion_MEMORY_REGION0;
1128 memInfo.startIndex = 0;
1130 result = Qmss_insertMemoryRegion (&memInfo);
1131 if (result < QMSS_SOK) {
1132 SALog ("setupQmMem: Qmss_insertMemoryRegion returned error code %d\n", result);
1133 return (-1);
1134 }
1137 /* Initialize the descriptors. This function opens a general
1138 * purpose queue and intializes the memory from region 0, placing
1139 * the initialized descriptors onto that queue */
1140 memset(&descCfg, 0, sizeof(descCfg));
1141 descCfg.queueGroup = 0;
1142 descCfg.memRegion = Qmss_MemRegion_MEMORY_REGION0;
1143 descCfg.descNum = TF_NUM_DESC;
1144 descCfg.destQueueNum = TF_Q_FREE_DESC;
1145 descCfg.queueType = Qmss_QueueType_GENERAL_PURPOSE_QUEUE;
1146 descCfg.initDesc = Cppi_InitDesc_INIT_DESCRIPTOR;
1147 descCfg.descType = Cppi_DescType_HOST;
1148 descCfg.returnQueue.qNum = QMSS_PARAM_NOT_SPECIFIED;
1149 descCfg.returnQueue.qMgr = 0;
1150 descCfg.epibPresent = Cppi_EPIB_EPIB_PRESENT;
1152 /* descCfg.returnPushPolicy = Qmss_Location_TAIL; */
1153 descCfg.cfg.host.returnPolicy = Cppi_ReturnPolicy_RETURN_ENTIRE_PACKET;
1154 descCfg.cfg.host.psLocation = Cppi_PSLoc_PS_IN_DESC;
1156 tFramework.QfreeDesc = Cppi_initDescriptor (&descCfg, (uint32_t *)&n);
1158 if (n != descCfg.descNum) {
1159 SALog ("setupQmMem: expected %d descriptors to be initialized, only %d are initialized\n", descCfg.descNum, n);
1160 return (-1);
1161 }
1163 return (0);
1165 }
1167 int setupPassQmMem (void)
1168 {
1170 #ifdef NETSS_INTERNAL_PKTDMA
1172 Qmss_InitCfg qmssInitConfig;
1173 Qmss_StartCfg qmssStartConfig;
1174 Qmss_MemRegInfo memInfo;
1175 Cppi_DescCfg descCfg;
1176 int32_t result;
1177 int n;
1179 memset (&qmssInitConfig, 0, sizeof (Qmss_InitCfg));
1180 memset (&qmssStartConfig, 0, sizeof (Qmss_StartCfg));
1182 //qmssInitConfig.linkingRAM0Base = utilgAddr((uint32_t)memLinkRam); // It should be 0x0 for internal RAM
1183 qmssInitConfig.linkingRAM0Base = 0;
1184 qmssInitConfig.linkingRAM0Size = TF_NUM_DESC;
1185 qmssInitConfig.linkingRAM1Base = 0;
1186 qmssInitConfig.maxDescNum = TF_NUM_DESC;
1188 // Supply virtual-2-physical conversion functions
1189 qmssNetssGblCfgParams.virt2Phy = Netss_qmssVirtToPhy;
1190 qmssNetssGblCfgParams.phy2Virt = Netss_qmssPhyToVirt;
1191 qmssNetssGblCfgParams.virt2PhyDesc = Netss_qmssConvertDescVirtToPhy;
1192 qmssNetssGblCfgParams.phy2VirtDesc = Netss_qmssConvertDescPhyToVirt;
1194 result = Qmss_initSubSys (&tFramework.tfPaQmssHandle, Qmss_SubSys_NETSS, &qmssInitConfig, &qmssNetssGblCfgParams);
1195 if (result != QMSS_SOK) {
1196 SALog ("setupPassQmMem: Qmss_Init failed with error code %d\n", result);
1197 return (-1);
1198 }
1200 result = Qmss_startSubSysCfg(&tFramework.tfPaQmssHandle, Qmss_SubSys_NETSS, &qmssStartConfig);
1201 if (result != QMSS_SOK) {
1202 SALog ("setupPassQmMem: Qmss_start failed with error code %d\n", result);
1203 return (-1);
1204 }
1206 /* Setup a single memory region for descriptors */
1207 memset(&memInfo, 0, sizeof(memInfo));
1208 memset (passDescRam, 0, TF_SIZE_DESC*TF_NUM_DESC);
1209 memInfo.descBase = (uint32_t *)(passDescRam);
1210 memInfo.descSize = TF_SIZE_DESC;
1211 memInfo.descNum = TF_NUM_DESC;
1212 memInfo.manageDescFlag = Qmss_ManageDesc_MANAGE_DESCRIPTOR;
1213 memInfo.memRegion = Qmss_MemRegion_MEMORY_REGION0;
1214 memInfo.startIndex = 0;
1216 result = Qmss_insertMemoryRegionSubSys (tFramework.tfPaQmssHandle, &memInfo);
1217 if (result < QMSS_SOK) {
1218 SALog ("setupQmMem: Qmss_insertMemoryRegion returned error code %d\n", result);
1219 return (-1);
1220 }
1223 /* Initialize the descriptors. This function opens a general
1224 * purpose queue and intializes the memory from region 0, placing
1225 * the initialized descriptors onto that queue */
1226 memset(&descCfg, 0, sizeof(descCfg));
1227 descCfg.queueGroup = 0;
1228 descCfg.memRegion = Qmss_MemRegion_MEMORY_REGION0;
1229 descCfg.descNum = TF_NUM_DESC;
1230 descCfg.destQueueNum = TF_Q_LOC_FREE_DESC;
1231 descCfg.queueType = Qmss_QueueType_GENERAL_PURPOSE_QUEUE;
1232 descCfg.initDesc = Cppi_InitDesc_INIT_DESCRIPTOR;
1233 descCfg.descType = Cppi_DescType_HOST;
1234 descCfg.returnQueue.qNum = QMSS_PARAM_NOT_SPECIFIED;
1235 descCfg.returnQueue.qMgr = 0;
1236 descCfg.epibPresent = Cppi_EPIB_EPIB_PRESENT;
1238 //descCfg.cfg.host.returnPolicy = Cppi_ReturnPolicy_RETURN_ENTIRE_PACKET;
1239 descCfg.cfg.host.returnPolicy = Cppi_ReturnPolicy_RETURN_BUFFER;
1240 descCfg.cfg.host.psLocation = Cppi_PSLoc_PS_IN_DESC;
1242 tFramework.QLocfreeDesc = Cppi_initDescriptorSubSys (tFramework.tfPaQmssHandle, &descCfg, (uint32_t *)&n);
1244 if (n != descCfg.descNum) {
1245 SALog ("setupPassQmMem: expected %d descriptors to be initialized, only %d are initialized\n", descCfg.descNum, n);
1246 return (-1);
1247 }
1248 #endif
1250 return (0);
1252 }
1254 int closeQmMem(void)
1255 {
1256 Qmss_Result qmss_result;
1258 if ((qmss_result = Qmss_removeMemoryRegion (Qmss_MemRegion_MEMORY_REGION0, 0)) != QMSS_SOK)
1259 {
1260 SALog ("Error : Remove memory region error code : %d\n", qmss_result);
1261 return qmss_result;
1262 }
1264 if ((qmss_result = Qmss_exit ()))
1265 {
1266 SALog ("Error : exit error code : %d\n", qmss_result);
1267 return qmss_result;
1268 }
1270 #ifdef NETSS_INTERNAL_PKTDMA
1272 if ((qmss_result = Qmss_removeMemoryRegionSubSys (tFramework.tfPaQmssHandle, Qmss_MemRegion_MEMORY_REGION0, 0)) != QMSS_SOK)
1273 {
1274 SALog ("Error : Remove PASS QMSS memory region error code : %d\n", qmss_result);
1275 return qmss_result;
1276 }
1278 if ((qmss_result = Qmss_exitSubSys (tFramework.tfPaQmssHandle)))
1279 {
1280 SALog ("Error : PASS QMSS exit error code : %d\n", qmss_result);
1281 return qmss_result;
1282 }
1284 #endif
1286 return qmss_result;
1287 }
1289 int setupCpdma (void)
1290 {
1291 Cppi_CpDmaInitCfg cpdmaCfg;
1292 Cppi_RxChInitCfg rxChCfg;
1293 Cppi_TxChInitCfg txChCfg;
1295 int32_t result;
1296 int i;
1297 uint8_t isAlloc;
1299 result = Cppi_init (&cppiGblCfgParams);
1300 if (result != CPPI_SOK) {
1301 SALog ("setupCpdma: cpp_Init returned error %d\n", result);
1302 return (-1);
1303 }
1305 memset(&cpdmaCfg, 0, sizeof(Cppi_CpDmaInitCfg));
1306 cpdmaCfg.dmaNum = Cppi_CpDma_NETCP_CPDMA;
1308 tFramework.tfPaCppiHandle = Cppi_open (&cpdmaCfg);
1309 if (tFramework.tfPaCppiHandle == NULL) {
1310 SALog ("setupCpdma: cppi_Open returned NULL cppi handle\n");
1311 return (-1);
1312 }
1314 #ifdef NETSS_INTERNAL_PKTDMA
1316 memset(&cpdmaCfg, 0, sizeof(Cppi_CpDmaInitCfg));
1317 cpdmaCfg.dmaNum = Cppi_CpDma_NETCP_LOCAL_CPDMA;
1318 cpdmaCfg.qm0BaseAddress = 0xff1b8000; // will CSL definition
1319 cpdmaCfg.qm1BaseAddress = 0xff1b8400; // will CSL definition
1320 cpdmaCfg.qm2BaseAddress = 0xff1b8000; // will CSL definition
1321 cpdmaCfg.qm3BaseAddress = 0xff1b8400; // will CSL definition
1323 tFramework.tfPaLocCppiHandle = Cppi_open (&cpdmaCfg);
1324 if (tFramework.tfPaLocCppiHandle == NULL) {
1325 SALog ("setupCpdma: cppi_Open returned NULL PA local cppi handle\n");
1326 return (-1);
1327 }
1329 #endif
1331 /* Open all rx channels */
1332 rxChCfg.rxEnable = Cppi_ChState_CHANNEL_DISABLE;
1334 for (i = 0; i < nssGblCfgParams.layout.numRxCpdmaChans; i++) {
1335 rxChCfg.channelNum = i;
1336 tFramework.tfPaRxChHnd[i] = Cppi_rxChannelOpen (tFramework.tfPaCppiHandle, &rxChCfg, &isAlloc);
1338 if (tFramework.tfPaRxChHnd[i] == NULL) {
1339 SALog ("setupCpdma: cppi_RxChannelOpen returned NULL handle for channel number %d\n", i);
1340 return (-1);
1341 }
1343 Cppi_channelEnable (tFramework.tfPaRxChHnd[i]);
1344 }
1346 /* Open all 10 tx channels. */
1347 txChCfg.priority = 2;
1348 txChCfg.txEnable = Cppi_ChState_CHANNEL_DISABLE;
1349 txChCfg.filterEPIB = FALSE;
1350 txChCfg.filterPS = FALSE;
1351 txChCfg.aifMonoMode = FALSE;
1353 for (i = 0; i < nssGblCfgParams.layout.numTxCpdmaChans; i++) {
1354 txChCfg.channelNum = i;
1355 tFramework.tfPaTxChHnd[i] = Cppi_txChannelOpen (tFramework.tfPaCppiHandle, &txChCfg, &isAlloc);
1357 if (tFramework.tfPaTxChHnd[i] == NULL) {
1358 SALog ("setupCpdma: cppi_TxChannelOpen returned NULL handle for channel number %d\n", i);
1359 return (-1);
1360 }
1362 Cppi_channelEnable (tFramework.tfPaTxChHnd[i]);
1363 }
1365 /* Clear CPPI Loobpack bit in PASS CDMA Global Emulation Control Register */
1366 Cppi_setCpdmaLoopback(tFramework.tfPaCppiHandle, 0);
1368 #ifdef NETSS_INTERNAL_PKTDMA
1370 /* Open all local rx channels */
1371 rxChCfg.rxEnable = Cppi_ChState_CHANNEL_DISABLE;
1373 for (i = 0; i < nssGblCfgParams.layout.numRxCpdmaChans; i++) {
1374 rxChCfg.channelNum = i;
1375 tFramework.tfPaLocRxChHnd[i] = Cppi_rxChannelOpen (tFramework.tfPaLocCppiHandle, &rxChCfg, &isAlloc);
1377 if (tFramework.tfPaLocRxChHnd[i] == NULL) {
1378 SALog ("setupCpdma: cppi_RxChannelOpen returned NULL handle for local rx channel number %d\n", i);
1379 return (-1);
1380 }
1382 Cppi_channelEnable (tFramework.tfPaLocRxChHnd[i]);
1383 }
1385 /* Open all locL tx channels. */
1386 txChCfg.priority = 2;
1387 txChCfg.txEnable = Cppi_ChState_CHANNEL_DISABLE;
1388 txChCfg.filterEPIB = FALSE;
1389 txChCfg.filterPS = FALSE;
1390 txChCfg.aifMonoMode = FALSE;
1392 for (i = 0; i < nssGblCfgParams.layout.numTxCpdmaChans; i++) {
1393 txChCfg.channelNum = i;
1394 tFramework.tfPaLocTxChHnd[i] = Cppi_txChannelOpen (tFramework.tfPaLocCppiHandle, &txChCfg, &isAlloc);
1396 if (tFramework.tfPaLocTxChHnd[i] == NULL) {
1397 SALog ("setupCpdma: cppi_TxChannelOpen returned NULL handle for local tx channel number %d\n", i);
1398 return (-1);
1399 }
1401 Cppi_channelEnable (tFramework.tfPaLocTxChHnd[i]);
1402 }
1404 /* Clear CPPI Loobpack bit in PASS CDMA Global Emulation Control Register */
1405 Cppi_setCpdmaLoopback(tFramework.tfPaLocCppiHandle, 0);
1407 #endif
1409 return (0);
1411 }
1413 static int closeCpdma(void)
1414 {
1415 int i;
1416 Cppi_Result cppi_result;
1418 for (i = 0; i < nssGblCfgParams.layout.numTxCpdmaChans; i++)
1419 {
1420 if ((cppi_result = Cppi_channelClose (tFramework.tfPaTxChHnd[i])) != CPPI_SOK) {
1421 SALog ("Cppi_channelClose for Tx err: %d \n", cppi_result);
1422 return (cppi_result);
1423 }
1424 }
1426 for (i = 0; i < nssGblCfgParams.layout.numRxCpdmaChans; i++)
1427 {
1428 if ((cppi_result = Cppi_channelClose (tFramework.tfPaRxChHnd[i])) != CPPI_SOK) {
1429 SALog ("Cppi_channelClose for Rx err: %d \n", cppi_result);
1430 return (cppi_result);
1431 }
1432 }
1434 #ifdef NETSS_INTERNAL_PKTDMA
1436 /* Close the local cpDma setup */
1437 for (i = 0; i < nssGblCfgParams.layout.numRxCpdmaChans; i++) {
1438 if ((ret = Cppi_channelClose (tFramework.tfPaLocRxChHnd[i])) != CPPI_SOK) {
1439 SALog ("clearCpdma: Cppi_channelClose returned error code (%d) for PASS local rx channel %d\n", ret, i);
1440 return (-1);
1441 }
1442 }
1443 for (i = 0; i < nssGblCfgParams.layout.numTxCpdmaChans; i++) {
1444 if ((ret = Cppi_channelClose (tFramework.tfPaLocTxChHnd[i])) != CPPI_SOK) {
1445 SALog ("clearCpdma: Cppi_channelClose returned error code (%d) for PASS local tx channel %d\n", ret, i);
1446 return (-1);
1447 }
1448 }
1450 #endif
1453 /* Close CPPI CPDMA instance */
1454 {
1455 if ((cppi_result = Cppi_close (tFramework.tfPaCppiHandle)) != CPPI_SOK)
1456 {
1457 SALog ("Error: Closing CPPI CPDMA error code : %d\n", cppi_result);
1458 return (cppi_result);
1459 }
1461 #ifdef NETSS_INTERNAL_PKTDMA
1462 if ((cppi_result = Cppi_close (tFramework.tfPaLocCppiHandle)) != CPPI_SOK)
1463 {
1464 SALog ("Error: Closing CPPI CPDMA error code : %d\n", cppi_result);
1465 return (cppi_result);
1466 }
1467 #endif
1469 /* Deinitialize CPPI LLD */
1470 if ((cppi_result = Cppi_exit ()) != CPPI_SOK)
1471 {
1472 SALog ("Error : Exiting CPPI error code : %d\n", cppi_result);
1473 return (cppi_result);
1474 }
1475 }
1477 return (cppi_result);
1478 }
1482 /* Setup all the queues used in the example */
1483 int setupQueues (void)
1484 {
1485 int i;
1486 uint8_t isAlloc;
1488 Qmss_Queue q;
1489 Cppi_HostDesc *hd;
1491 /* The 10 PA transmit queues (corresponding to the 10 tx cdma channels */
1492 for (i = 0; i < nssGblCfgParams.layout.numTxQueues; i++) {
1494 tFramework.QPaTx[i] = Qmss_queueOpen (Qmss_QueueType_PASS_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAlloc);
1496 if (tFramework.QPaTx[i] < 0) {
1497 SALog ("setupQueues: Qmss_queueOpen failed for PA transmit queue number %d\n", nssGblCfgParams.layout.txQueueBase+i);
1498 return (-1);
1499 }
1501 Qmss_setQueueThreshold (tFramework.QPaTx[i], 1, 1);
1503 }
1505 /* The queues with attached buffers */
1506 tFramework.QLinkedBuf1 = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LINKED_BUF_Q1, &isAlloc);
1508 if (tFramework.QLinkedBuf1 < 0) {
1509 SALog ("setupQueues: Qmss_queueOpen failed for queue %d\n", TF_LINKED_BUF_Q1);
1510 return (-1);
1511 }
1513 tFramework.QLinkedBuf2 = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LINKED_BUF_Q2, &isAlloc);
1515 if (tFramework.QLinkedBuf2 < 0) {
1516 SALog ("SetupQueues: Qmss_queueOpen failed for queue %d\n", TF_LINKED_BUF_Q2);
1517 return (-1);
1518 }
1520 tFramework.QLinkedBuf3 = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LINKED_BUF_Q3, &isAlloc);
1522 if (tFramework.QLinkedBuf3 < 0) {
1523 SALog ("SetupQueues: Qmss_queueOpen failed for queue %d\n", TF_LINKED_BUF_Q3);
1524 return (-1);
1525 }
1527 tFramework.QLinkedBuf4 = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LINKED_BUF_Q4, &isAlloc);
1529 if (tFramework.QLinkedBuf4 < 0) {
1530 SALog ("SetupQueues: Qmss_queueOpen failed for queue %d\n", TF_LINKED_BUF_Q3);
1531 return (-1);
1532 }
1534 tFramework.QHostLinkedBuf1 = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_HOST_LINKED_BUF_Q1, &isAlloc);
1536 if (tFramework.QHostLinkedBuf1 < 0) {
1537 SALog ("setupQueues: Qmss_queueOpen failed for queue %d\n", TF_HOST_LINKED_BUF_Q1);
1538 return (-1);
1539 }
1541 tFramework.QHostLinkedBuf2 = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_HOST_LINKED_BUF_Q2, &isAlloc);
1543 if (tFramework.QHostLinkedBuf2 < 0) {
1544 SALog ("SetupQueues: Qmss_queueOpen failed for queue %d\n", TF_HOST_LINKED_BUF_Q2);
1545 return (-1);
1546 }
1548 tFramework.QHostLinkedBuf3 = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_HOST_LINKED_BUF_Q3, &isAlloc);
1550 if (tFramework.QHostLinkedBuf3 < 0) {
1551 SALog ("SetupQueues: Qmss_queueOpen failed for queue %d\n", TF_HOST_LINKED_BUF_Q3);
1552 return (-1);
1553 }
1555 tFramework.QHostLinkedBuf4 = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_HOST_LINKED_BUF_Q4, &isAlloc);
1557 if (tFramework.QHostLinkedBuf4 < 0) {
1558 SALog ("SetupQueues: Qmss_queueOpen failed for queue %d\n", TF_HOST_LINKED_BUF_Q3);
1559 return (-1);
1560 }
1564 /* Attach buffers to the queues and push them onto the queue */
1565 q.qMgr = 0;
1566 q.qNum = tFramework.QLinkedBuf1;
1568 for (i = 0; i < TF_LINKED_BUF_Q1_NBUFS; i++) {
1570 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QfreeDesc)) & ~15);
1571 if (hd == NULL) {
1572 SALog ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QfreeDesc);
1573 return (-1);
1574 }
1576 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ1[i])), sizeof(memQ1[i]));
1577 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ1[i])), sizeof(memQ1[i]));
1578 hd->nextBDPtr = NULL;
1579 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1580 Cppi_setReturnPolicy (Cppi_DescType_HOST, (Cppi_Desc *)hd, Cppi_ReturnPolicy_RETURN_BUFFER);
1581 Qmss_queuePushDesc (tFramework.QLinkedBuf1, (Ptr)hd);
1583 }
1585 q.qNum = tFramework.QLinkedBuf2;
1587 for (i = 0; i < TF_LINKED_BUF_Q2_NBUFS; i++) {
1589 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QfreeDesc)) & ~15);
1590 if (hd == NULL) {
1591 SALog ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QfreeDesc);
1592 return (-1);
1593 }
1595 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ2[i])), sizeof(memQ2[i]));
1596 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ2[i])), sizeof(memQ2[i]));
1597 hd->nextBDPtr = NULL;
1598 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1599 Cppi_setReturnPolicy (Cppi_DescType_HOST, (Cppi_Desc *)hd, Cppi_ReturnPolicy_RETURN_BUFFER);
1600 Qmss_queuePushDesc (tFramework.QLinkedBuf2, (Ptr)hd);
1602 }
1604 q.qNum = tFramework.QLinkedBuf3;
1606 for (i = 0; i < TF_LINKED_BUF_Q3_NBUFS; i++) {
1608 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QfreeDesc)) & ~15);
1609 if (hd == NULL) {
1610 SALog ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QfreeDesc);
1611 return (-1);
1612 }
1614 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ3[i])), sizeof(memQ3[i]));
1615 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ3[i])), sizeof(memQ3[i]));
1616 hd->nextBDPtr = NULL;
1617 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1618 Cppi_setReturnPolicy (Cppi_DescType_HOST, (Cppi_Desc *)hd, Cppi_ReturnPolicy_RETURN_BUFFER);
1619 Qmss_queuePushDesc (tFramework.QLinkedBuf3, (Ptr)hd);
1621 }
1623 q.qNum = tFramework.QLinkedBuf4;
1625 for (i = 0; i < TF_LINKED_BUF_Q4_NBUFS; i++) {
1627 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QfreeDesc)) & ~15);
1628 if (hd == NULL) {
1629 SALog ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QfreeDesc);
1630 return (-1);
1631 }
1633 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ4[i])), sizeof(memQ4[i]));
1634 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memQ4[i])), sizeof(memQ4[i]));
1635 hd->nextBDPtr = NULL;
1636 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1637 Cppi_setReturnPolicy (Cppi_DescType_HOST, (Cppi_Desc *)hd, Cppi_ReturnPolicy_RETURN_BUFFER);
1638 Qmss_queuePushDesc (tFramework.QLinkedBuf4, (Ptr)hd);
1640 }
1642 /* Attach buffers to the queues and push them onto the queue */
1643 q.qNum = tFramework.QHostLinkedBuf1;
1645 for (i = 0; i < TF_HOST_LINKED_BUF_Q1_NBUFS; i++) {
1647 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QfreeDesc)) & ~15);
1648 if (hd == NULL) {
1649 SALog ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QfreeDesc);
1650 return (-1);
1651 }
1653 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memHostQ1[i])), sizeof(memHostQ1[i]));
1654 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memHostQ1[i])), sizeof(memHostQ1[i]));
1655 hd->nextBDPtr = NULL;
1656 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1657 Cppi_setReturnPolicy (Cppi_DescType_HOST, (Cppi_Desc *)hd, Cppi_ReturnPolicy_RETURN_BUFFER);
1658 Qmss_queuePushDesc (tFramework.QHostLinkedBuf1, (Ptr)hd);
1660 }
1662 q.qNum = tFramework.QHostLinkedBuf2;
1664 for (i = 0; i < TF_HOST_LINKED_BUF_Q2_NBUFS; i++) {
1666 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QfreeDesc)) & ~15);
1667 if (hd == NULL) {
1668 SALog ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QfreeDesc);
1669 return (-1);
1670 }
1672 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memHostQ2[i])), sizeof(memHostQ2[i]));
1673 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memHostQ2[i])), sizeof(memHostQ2[i]));
1674 hd->nextBDPtr = NULL;
1675 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1676 Cppi_setReturnPolicy (Cppi_DescType_HOST, (Cppi_Desc *)hd, Cppi_ReturnPolicy_RETURN_BUFFER);
1677 Qmss_queuePushDesc (tFramework.QHostLinkedBuf2, (Ptr)hd);
1679 }
1681 q.qNum = tFramework.QHostLinkedBuf3;
1683 for (i = 0; i < TF_HOST_LINKED_BUF_Q3_NBUFS; i++) {
1685 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QfreeDesc)) & ~15);
1686 if (hd == NULL) {
1687 SALog ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QfreeDesc);
1688 return (-1);
1689 }
1691 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memHostQ3[i])), sizeof(memHostQ3[i]));
1692 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memHostQ3[i])), sizeof(memHostQ3[i]));
1693 hd->nextBDPtr = NULL;
1694 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1695 Cppi_setReturnPolicy (Cppi_DescType_HOST, (Cppi_Desc *)hd, Cppi_ReturnPolicy_RETURN_BUFFER);
1696 Qmss_queuePushDesc (tFramework.QHostLinkedBuf3, (Ptr)hd);
1698 }
1700 q.qNum = tFramework.QHostLinkedBuf4;
1702 for (i = 0; i < TF_HOST_LINKED_BUF_Q4_NBUFS; i++) {
1704 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QfreeDesc)) & ~15);
1705 if (hd == NULL) {
1706 SALog ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QfreeDesc);
1707 return (-1);
1708 }
1710 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memHostQ4[i])), sizeof(memHostQ4[i]));
1711 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)utilgAddr((uint32_t)(memHostQ4[i])), sizeof(memHostQ4[i]));
1712 hd->nextBDPtr = NULL;
1713 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1714 Cppi_setReturnPolicy (Cppi_DescType_HOST, (Cppi_Desc *)hd, Cppi_ReturnPolicy_RETURN_BUFFER);
1715 Qmss_queuePushDesc (tFramework.QHostLinkedBuf4, (Ptr)hd);
1717 }
1719 /* General purpose queues */
1720 for (i = 0; i < TF_NUM_GEN_QUEUES; i++) {
1722 tFramework.QGen[i] = Qmss_queueOpen (Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_FIRST_GEN_QUEUE + i, &isAlloc);
1724 if (tFramework.QGen[i] < 0) {
1725 SALog ("SetupQueues: Qmss_queueOpen failed for queue %d\n", TF_FIRST_GEN_QUEUE + i);
1726 return (-1);
1727 }
1728 }
1730 #ifdef NETSS_INTERNAL_PKTDMA
1732 /* The queues with attached buffers */
1733 tFramework.QLocLinkedBuf1 = Qmss_queueOpenSubSys (tFramework.tfPaQmssHandle, Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LOC_LINKED_BUF_Q1, &isAlloc);
1735 if (tFramework.QLinkedBuf1 < 0) {
1736 SALog ("setupQueues: Qmss_queueOpenSubSys failed for queue %d\n", TF_LOC_LINKED_BUF_Q1);
1737 return (-1);
1738 }
1740 tFramework.QLocLinkedBuf2 = Qmss_queueOpenSubSys (tFramework.tfPaQmssHandle, Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LOC_LINKED_BUF_Q2, &isAlloc);
1742 if (tFramework.QLinkedBuf2 < 0) {
1743 SALog ("SetupQueues: Qmss_queueOpenSubSys failed for queue %d\n", TF_LOC_LINKED_BUF_Q2);
1744 return (-1);
1745 }
1747 tFramework.QLocLinkedBuf3 = Qmss_queueOpenSubSys (tFramework.tfPaQmssHandle, Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LOC_LINKED_BUF_Q3, &isAlloc);
1749 if (tFramework.QLinkedBuf3 < 0) {
1750 SALog ("SetupQueues: Qmss_queueOpenSubSys failed for queue %d\n", TF_LOC_LINKED_BUF_Q3);
1751 return (-1);
1752 }
1754 tFramework.QLocLinkedBuf4 = Qmss_queueOpenSubSys (tFramework.tfPaQmssHandle, Qmss_QueueType_GENERAL_PURPOSE_QUEUE, TF_LOC_LINKED_BUF_Q4, &isAlloc);
1756 if (tFramework.QLinkedBuf4 < 0) {
1757 SALog ("SetupQueues: Qmss_queueOpenSubSys failed for queue %d\n", TF_LOC_LINKED_BUF_Q4);
1758 return (-1);
1759 }
1762 /* Attach buffers to the queues and push them onto the queue */
1763 q.qMgr = 0;
1765 q.qNum = TF_LOC_LINKED_BUF_Q1;
1766 for (i = 0; i < TF_LINKED_BUF_Q1_NBUFS; i++) {
1768 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QLocfreeDesc)) & ~15);
1769 if (hd == NULL) {
1770 SALog ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QLocfreeDesc);
1771 return (-1);
1772 }
1774 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ1[i], sizeof(memLocQ1[i]));
1775 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ1[i], sizeof(memLocQ1[i]));
1776 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ1[i], sizeof(memLocQ1[i]));
1777 hd->nextBDPtr = NULL;
1778 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1779 Qmss_queuePushDesc (tFramework.QLocLinkedBuf1, (Ptr)hd);
1781 }
1783 q.qNum = TF_LOC_LINKED_BUF_Q2;
1784 for (i = 0; i < TF_LINKED_BUF_Q2_NBUFS; i++) {
1786 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QLocfreeDesc)) & ~15);
1787 if (hd == NULL) {
1788 SALog ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QLocfreeDesc);
1789 return (-1);
1790 }
1792 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ2[i], sizeof(memLocQ2[i]));
1793 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ2[i], sizeof(memLocQ2[i]));
1794 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ2[i], sizeof(memLocQ2[i]));
1795 hd->nextBDPtr = NULL;
1796 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1797 Qmss_queuePushDesc (tFramework.QLocLinkedBuf2, (Ptr)hd);
1799 }
1801 q.qNum = TF_LOC_LINKED_BUF_Q3;
1802 for (i = 0; i < TF_LINKED_BUF_Q3_NBUFS; i++) {
1804 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QLocfreeDesc)) & ~15);
1805 if (hd == NULL) {
1806 SALog ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QLocfreeDesc);
1807 return (-1);
1808 }
1810 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ3[i], sizeof(memLocQ3[i]));
1811 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ3[i], sizeof(memLocQ3[i]));
1812 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ3[i], sizeof(memLocQ3[i]));
1813 hd->nextBDPtr = NULL;
1814 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1815 Qmss_queuePushDesc (tFramework.QLocLinkedBuf3, (Ptr)hd);
1817 }
1819 q.qNum = TF_LOC_LINKED_BUF_Q4;
1820 for (i = 0; i < TF_LINKED_BUF_Q4_NBUFS; i++) {
1822 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (tFramework.QLocfreeDesc)) & ~15);
1823 if (hd == NULL) {
1824 SALog ("setupQueues: Qmss_queuePop returned NULL on pop from queue number %d\n", tFramework.QLocfreeDesc);
1825 return (-1);
1826 }
1828 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ4[i], sizeof(memLocQ4[i]));
1829 Cppi_setData (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ4[i], sizeof(memLocQ4[i]));
1830 Cppi_setOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, (uint8_t *)memLocQ4[i], sizeof(memLocQ4[i]));
1831 hd->nextBDPtr = NULL;
1832 Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
1833 Qmss_queuePushDesc (tFramework.QLocLinkedBuf4, (Ptr)hd);
1835 }
1838 #endif
1842 return (0);
1844 }
1846 int closeQueues(void)
1847 {
1848 int i;
1850 /* The PA transmit queues (corresponding to the tx cdma channels */
1851 for (i = 0; i < nssGblCfgParams.layout.numTxQueues; i++) {
1852 Qmss_queueEmpty (tFramework.QPaTx[i]);
1853 Qmss_queueClose (tFramework.QPaTx[i]);
1854 }
1856 Qmss_queueEmpty(tFramework.QLinkedBuf1);
1857 Qmss_queueClose(tFramework.QLinkedBuf1);
1859 Qmss_queueEmpty(tFramework.QLinkedBuf2);
1860 Qmss_queueClose(tFramework.QLinkedBuf2);
1862 Qmss_queueEmpty(tFramework.QLinkedBuf3);
1863 Qmss_queueClose(tFramework.QLinkedBuf3);
1865 Qmss_queueEmpty(tFramework.QLinkedBuf4);
1866 Qmss_queueClose(tFramework.QLinkedBuf4);
1868 Qmss_queueEmpty(tFramework.QHostLinkedBuf1);
1869 Qmss_queueClose(tFramework.QHostLinkedBuf1);
1871 Qmss_queueEmpty(tFramework.QHostLinkedBuf2);
1872 Qmss_queueClose(tFramework.QHostLinkedBuf2);
1874 Qmss_queueEmpty(tFramework.QHostLinkedBuf3);
1875 Qmss_queueClose(tFramework.QHostLinkedBuf3);
1877 Qmss_queueEmpty(tFramework.QHostLinkedBuf4);
1878 Qmss_queueClose(tFramework.QHostLinkedBuf4);
1880 Qmss_queueEmpty(tFramework.QfreeDesc);
1881 Qmss_queueClose(tFramework.QfreeDesc);
1883 /* General purpose queues */
1884 for (i = 0; i < TF_NUM_GEN_QUEUES; i++) {
1885 Qmss_queueEmpty(tFramework.QGen[i]);
1886 Qmss_queueClose(tFramework.QGen[i]);
1887 }
1889 #ifdef NETSS_INTERNAL_PKTDMA
1891 Qmss_queueEmpty(tFramework.QLocLinkedBuf1);
1892 Qmss_queueClose(tFramework.QLocLinkedBuf1);
1894 Qmss_queueEmpty(tFramework.QLocLinkedBuf2);
1895 Qmss_queueClose(tFramework.QLocLinkedBuf2);
1897 Qmss_queueEmpty(tFramework.QLocLinkedBuf3);
1898 Qmss_queueClose(tFramework.QLocLinkedBuf3);
1900 Qmss_queueEmpty(tFramework.QLocLinkedBuf4);
1901 Qmss_queueClose(tFramework.QLocLinkedBuf4);
1903 Qmss_queueEmpty(tFramework.QLocfreeDesc);
1904 Qmss_queueClose(tFramework.QLocfreeDesc);
1906 #endif
1907 return 0;
1908 }
1913 /* Configure flows */
1914 int setupFlows (void)
1915 {
1916 Cppi_RxFlowCfg rxFlowCfg;
1917 uint8_t isAlloc;
1919 /* Configure Rx flow */
1920 rxFlowCfg.flowIdNum = tFramework.tfFlowNum = 0;
1921 rxFlowCfg.rx_dest_qnum = TF_FIRST_GEN_QUEUE + TF_NUM_GEN_QUEUES -1; /* Override in PA */
1922 rxFlowCfg.rx_dest_qmgr = 0;
1923 rxFlowCfg.rx_sop_offset = 0;
1924 rxFlowCfg.rx_ps_location = Cppi_PSLoc_PS_IN_DESC;
1925 rxFlowCfg.rx_desc_type = Cppi_DescType_HOST;
1926 rxFlowCfg.rx_error_handling = 1;
1927 rxFlowCfg.rx_psinfo_present = 1;
1928 rxFlowCfg.rx_einfo_present = 1;
1930 rxFlowCfg.rx_dest_tag_lo = 0;
1931 rxFlowCfg.rx_dest_tag_hi = 0;
1932 rxFlowCfg.rx_src_tag_lo = 0;
1933 rxFlowCfg.rx_src_tag_hi = 0;
1935 rxFlowCfg.rx_size_thresh0_en = 1;
1936 rxFlowCfg.rx_size_thresh1_en = 1;
1937 rxFlowCfg.rx_size_thresh2_en = 1;
1939 rxFlowCfg.rx_dest_tag_lo_sel = 0;
1940 rxFlowCfg.rx_dest_tag_hi_sel = 0;
1941 rxFlowCfg.rx_src_tag_lo_sel = 0;
1942 rxFlowCfg.rx_src_tag_hi_sel = 0;
1944 rxFlowCfg.rx_fdq1_qnum = tFramework.QLinkedBuf2;
1945 rxFlowCfg.rx_fdq1_qmgr = 0;
1946 rxFlowCfg.rx_fdq2_qnum = tFramework.QLinkedBuf2;
1947 rxFlowCfg.rx_fdq2_qmgr = 0;
1948 rxFlowCfg.rx_fdq3_qnum = tFramework.QLinkedBuf2;
1949 rxFlowCfg.rx_fdq3_qmgr = 0;
1951 rxFlowCfg.rx_size_thresh0 = TF_LINKED_BUF_Q1_BUF_SIZE;
1952 rxFlowCfg.rx_size_thresh1 = TF_LINKED_BUF_Q2_BUF_SIZE;
1953 rxFlowCfg.rx_size_thresh2 = TF_LINKED_BUF_Q3_BUF_SIZE;
1955 rxFlowCfg.rx_fdq0_sz0_qnum = tFramework.QLinkedBuf4;
1956 rxFlowCfg.rx_fdq0_sz0_qmgr = 0;
1957 rxFlowCfg.rx_fdq0_sz1_qnum = tFramework.QLinkedBuf4;
1958 rxFlowCfg.rx_fdq0_sz1_qmgr = 0;
1959 rxFlowCfg.rx_fdq0_sz2_qnum = tFramework.QLinkedBuf4;
1960 rxFlowCfg.rx_fdq0_sz2_qmgr = 0;
1961 rxFlowCfg.rx_fdq0_sz3_qnum = tFramework.QLinkedBuf4;
1962 rxFlowCfg.rx_fdq0_sz3_qmgr = 0;
1964 tFramework.tfPaFlowHnd = Cppi_configureRxFlow (tFramework.tfPaCppiHandle, &rxFlowCfg, &isAlloc);
1965 if (tFramework.tfPaFlowHnd == NULL) {
1966 SALog ("setupFlows: cppi_ConfigureRxFlow returned NULL\n");
1967 return (-1);
1968 }
1970 #ifdef NETSS_INTERNAL_PKTDMA
1972 /* Configure Local Rx flow */
1973 rxFlowCfg.flowIdNum = tFramework.tfLocFlowNum = 0;
1974 rxFlowCfg.rx_dest_qnum = 0; /* Override in PA */
1975 rxFlowCfg.rx_dest_qmgr = 0;
1976 rxFlowCfg.rx_sop_offset = 0;
1977 rxFlowCfg.rx_ps_location = Cppi_PSLoc_PS_IN_DESC;
1978 rxFlowCfg.rx_desc_type = Cppi_DescType_HOST;
1979 rxFlowCfg.rx_error_handling = 1;
1980 rxFlowCfg.rx_psinfo_present = 1;
1981 rxFlowCfg.rx_einfo_present = 1;
1983 rxFlowCfg.rx_dest_tag_lo = 0;
1984 rxFlowCfg.rx_dest_tag_hi = 0;
1985 rxFlowCfg.rx_src_tag_lo = 0;
1986 rxFlowCfg.rx_src_tag_hi = 0;
1988 rxFlowCfg.rx_size_thresh0_en = 1;
1989 rxFlowCfg.rx_size_thresh1_en = 1;
1990 rxFlowCfg.rx_size_thresh2_en = 1;
1992 rxFlowCfg.rx_fdq1_qnum = TF_LOC_LINKED_BUF_Q2;
1993 rxFlowCfg.rx_fdq1_qmgr = 0;
1994 rxFlowCfg.rx_fdq3_qnum = TF_LOC_LINKED_BUF_Q2;
1995 rxFlowCfg.rx_fdq3_qmgr = 0;
1996 rxFlowCfg.rx_fdq2_qnum = TF_LOC_LINKED_BUF_Q2;
1997 rxFlowCfg.rx_fdq2_qmgr = 0;
1999 rxFlowCfg.rx_size_thresh2 = TF_LINKED_BUF_Q3_BUF_SIZE;
2000 rxFlowCfg.rx_size_thresh1 = TF_LINKED_BUF_Q2_BUF_SIZE;
2001 rxFlowCfg.rx_size_thresh0 = TF_LINKED_BUF_Q1_BUF_SIZE;
2003 rxFlowCfg.rx_fdq0_sz0_qnum = TF_LOC_LINKED_BUF_Q1;
2004 rxFlowCfg.rx_fdq0_sz0_qmgr = 0;
2005 rxFlowCfg.rx_fdq0_sz1_qnum = TF_LOC_LINKED_BUF_Q2;
2006 rxFlowCfg.rx_fdq0_sz1_qmgr = 0;
2007 rxFlowCfg.rx_fdq0_sz2_qnum = TF_LOC_LINKED_BUF_Q3;
2008 rxFlowCfg.rx_fdq0_sz2_qmgr = 0;
2009 rxFlowCfg.rx_fdq0_sz3_qnum = TF_LOC_LINKED_BUF_Q4;
2010 rxFlowCfg.rx_fdq0_sz3_qmgr = 0;
2012 tFramework.tfPaLocFlowHnd0 = Cppi_configureRxFlow (tFramework.tfPaLocCppiHandle, &rxFlowCfg, &isAlloc);
2013 if (tFramework.tfPaLocFlowHnd0 == NULL) {
2014 SALog ("setupFlows: cppi_ConfigureRxFlow returned NULL on local flow 0\n");
2015 return (-1);
2016 }
2018 #endif
2022 return (0);
2024 }
2026 int closeFlows(void)
2027 {
2028 Cppi_Result cppi_result;
2030 if ((cppi_result = Cppi_closeRxFlow (tFramework.tfPaFlowHnd)) != CPPI_SOK) {
2031 SALog ("closeFlows: Cppi_closeRxFlow failed with error code = %d\n", cppi_result);
2033 return (-1);
2034 }
2036 #ifdef NETSS_INTERNAL_PKTDMA
2037 if ((cppi_result = Cppi_closeRxFlow (tFramework.tfPaLocFlowHnd)) != CPPI_SOK) {
2038 SALog ("closeFlows: Cppi_closeRxFlow failed with error code = %d\n", cppi_result);
2040 return (-1);
2041 }
2043 #endif
2045 return 0;
2046 }
2047 #else
2049 /*
2050 * UDMA driver objects
2051 */
2052 struct Udma_DrvObj gUdmaDrvObj;
2053 struct Udma_ChObj gUdmaTxChObj;
2054 struct Udma_ChObj gUdmaRxChObj[2];
2055 struct Udma_FlowObj gUdmaFlowObj;
2056 struct Udma_RingObj gUdmaRingObj[2];
2057 struct Udma_EventObj gUdmaEvtObj;
2058 #if defined (AM64X_USE_DEFAULT_FLOW)
2059 struct Udma_EventObj gUdmaEvtObj0;
2060 #endif
2062 volatile int gRxPktCntInRing = 0;
2064 void framework_rxIsrFxn(Udma_EventHandle eventHandle,
2065 uint32_t eventType,
2066 void *appData)
2067 {
2068 gRxPktCntInRing++;
2069 return;
2070 }
2071 #if defined (AM64X_USE_DEFAULT_FLOW)
2072 volatile int gRxPktCntInRing0 = 0;
2074 void framework_rxIsrFxn0(Udma_EventHandle eventHandle,
2075 uint32_t eventType,
2076 void *appData)
2077 {
2078 gRxPktCntInRing0++;
2079 return;
2080 }
2081 #endif
2083 int frameworkUdmaInitDrv(void)
2084 {
2085 int32_t retVal = UDMA_SOK;
2086 Udma_InitPrms initPrms;
2087 uint32_t instId;
2089 #if defined(DMA_TYPE_LCDMA)
2090 instId = UDMA_INST_ID_PKTDMA_0;
2091 #else
2093 #if defined(BUILD_MPU)
2094 instId = UDMA_INST_ID_MAIN_0;
2095 #else
2096 instId = UDMA_INST_ID_MCU_0;
2097 #endif /* BUILD_MPU */
2098 #endif /* DMA_TYPE_LCDMA */
2100 UdmaInitPrms_init(instId, &initPrms);
2102 #if defined (DMA_TYPE_LCDMA) && defined(BUILD_MPU)
2103 /*PKTDMA Tx Ch 25 (SAUL Tx Ch1) */
2104 initPrms.rmInitPrms.startMappedTxCh[UDMA_MAPPED_TX_GROUP_SAUL] = 25U;
2105 initPrms.rmInitPrms.numMappedTxCh[UDMA_MAPPED_TX_GROUP_SAUL] = 1U;
2107 /* PKTDMA Rx Ch 19,20 (SAUL Rx Ch2,3) */
2108 initPrms.rmInitPrms.startMappedRxCh[UDMA_MAPPED_RX_GROUP_SAUL- UDMA_NUM_MAPPED_TX_GROUP] = 19U;
2109 initPrms.rmInitPrms.numMappedRxCh[UDMA_MAPPED_RX_GROUP_SAUL- UDMA_NUM_MAPPED_TX_GROUP] = 2U;
2111 /* PKTDMA Tx Rings 88 to 95 (which are tied to SAUL Tx Ch1) */
2112 initPrms.rmInitPrms.startMappedRing[UDMA_MAPPED_TX_GROUP_SAUL] = 88U;
2113 initPrms.rmInitPrms.startMappedRing[UDMA_MAPPED_RX_GROUP_SAUL] = 8U;
2115 /* PKTDMA Rx Rings 40 to 47[with offset 112](which are tied to SAUL Rx Ch2,3) */
2116 initPrms.rmInitPrms.numMappedRing[UDMA_MAPPED_TX_GROUP_SAUL] = 152U;
2117 initPrms.rmInitPrms.numMappedRing[UDMA_MAPPED_RX_GROUP_SAUL] = 8U;
2119 #endif
2121 retVal = Udma_init(&gUdmaDrvObj, &initPrms);
2122 if(UDMA_SOK == retVal)
2123 {
2124 tFramework.gDrvHandle = &gUdmaDrvObj;
2125 }
2126 else
2127 {
2128 SALog("error in frameworkUdmaInitDrv() \n");
2129 System_flush();
2130 }
2131 return (retVal);
2132 }
2134 int frameworkUdmaSetupTxChannel(void)
2135 {
2136 uint32_t chType;
2137 Udma_ChPrms chPrms;
2138 Udma_ChTxPrms txPrms;
2139 Udma_RingHandle ringHandle;
2141 int32_t retVal = UDMA_SOK;
2142 /* Create the Tx Channel */
2143 /* TX channel parameters */
2144 #if defined (SOC_AM64X)
2145 chType = UDMA_CH_TYPE_TX_MAPPED;
2146 #else
2147 chType = UDMA_CH_TYPE_TX;
2148 #endif
2149 UdmaChPrms_init(&chPrms, chType);
2151 #if defined (SOC_AM64X)
2152 chPrms.mappedChGrp = UDMA_MAPPED_TX_GROUP_SAUL;
2153 chPrms.peerChNum = TF_SA2UL_PEER_TXCHAN;
2154 chPrms.fqRingPrms.ringMem = &memTxRing[0];
2155 chPrms.fqRingPrms.elemCnt = TF_RING_TRCNT;
2156 /* this is the dual ring mode */
2157 chPrms.fqRingPrms.mode = TISCI_MSG_VALUE_RM_RING_MODE_RING;
2158 #if defined (ACP_COHERENCY)
2159 chPrms.fqRingPrms.asel = CSL_LCDMA_RINGACC_ASEL_ENDPOINT_ACP_WR_ALLOC;
2160 #else
2161 chPrms.fqRingPrms.asel = CSL_LCDMA_RINGACC_ASEL_ENDPOINT_PHYSADDR;
2162 #endif
2164 #else
2165 chPrms.peerChNum = TF_SA2UL_PEER_TXCHAN;
2166 chPrms.fqRingPrms.ringMem = &memTxRing[0];
2167 chPrms.cqRingPrms.ringMem = &memTxCompRing[0];
2168 chPrms.fqRingPrms.elemCnt = TF_RING_TRCNT;
2169 chPrms.cqRingPrms.elemCnt = TF_RING_TRCNT;
2171 chPrms.fqRingPrms.mode = TISCI_MSG_VALUE_RM_RING_MODE_RING;
2172 chPrms.cqRingPrms.mode = TISCI_MSG_VALUE_RM_RING_MODE_RING;
2173 #endif
2174 /* Open TX channel for transmit */
2175 tFramework.gTxChHandle = &gUdmaTxChObj;
2176 retVal = Udma_chOpen(tFramework.gDrvHandle, tFramework.gTxChHandle, chType, &chPrms);
2178 if(UDMA_SOK == retVal)
2179 {
2180 UdmaChTxPrms_init(&txPrms, chType);
2181 txPrms.dmaPriority = UDMA_DEFAULT_UTC_CH_DMA_PRIORITY;
2182 txPrms.fetchWordSize = TF_SIZE_DESC >> 2;
2183 retVal = Udma_chConfigTx(tFramework.gTxChHandle, &txPrms);
2184 if(UDMA_SOK == retVal)
2185 {
2186 retVal = Udma_chEnable(tFramework.gTxChHandle);
2187 }
2188 }
2189 else
2190 {
2191 SALog("error in Tx Udma_chOpen() \n");
2192 System_flush();
2193 }
2195 if (retVal == UDMA_SOK)
2196 {
2197 /* Update the Tx Ring numbers */
2198 ringHandle = Udma_chGetFqRingHandle(tFramework.gTxChHandle);
2199 tFramework.gTxRingHandle = ringHandle;
2200 tFramework.txRingNum = Udma_ringGetNum(ringHandle);
2202 ringHandle = Udma_chGetCqRingHandle(tFramework.gTxChHandle);
2203 tFramework.gTxComplRingHandle = ringHandle;
2204 tFramework.txComplRingNum = Udma_ringGetNum(ringHandle);
2205 }
2206 return (retVal);
2208 }
2210 int frameworkUdmaSetupRxChannel(void)
2211 {
2212 uint32_t chType;
2213 Udma_ChPrms chPrms;
2214 Udma_ChRxPrms rxPrms;
2215 Udma_RingHandle ringHandle;
2216 #if defined(SOC_AM64X)
2217 /* Flow allocation happens inside UDMA LLD */
2218 #else
2219 Udma_FlowHandle flowHandle;
2220 Udma_FlowPrms flowPrms;
2221 #endif
2222 Udma_EventPrms eventPrms;
2223 uint32_t intArg = 0;
2224 int32_t retVal = UDMA_SOK;
2226 /* Create the Rx Channel */
2227 /* TX channel parameters */
2228 #if defined (SOC_AM64X)
2229 chType = UDMA_CH_TYPE_RX_MAPPED;
2230 #else
2231 chType = UDMA_CH_TYPE_RX;
2233 /* Note that the ring memory is not provided for the second channel thread for SA as
2234 * We create the channel and ring for thr1 and use that flow for SA2UL applications
2235 */
2237 /* create a flow allocation here for Rx channels */
2238 tFramework.gRxFlowHandle = flowHandle = &gUdmaFlowObj;
2239 retVal = Udma_flowAlloc(tFramework.gDrvHandle, flowHandle,1);
2241 if (retVal != UDMA_SOK)
2242 {
2243 return (retVal);
2244 }
2245 /* Update the created default flow with above configurations for sa2ul */
2246 tFramework.tfFlowNum = Udma_flowGetNum(flowHandle);
2247 #endif
2249 /* Create the Rx Channel */
2250 /* RX channel parameters */
2251 UdmaChPrms_init(&chPrms, chType);
2252 #if defined(SOC_AM64X)
2253 chPrms.peerChNum = TF_SA2UL_PEER_RXCHAN1;
2254 chPrms.mappedChGrp = UDMA_MAPPED_RX_GROUP_SAUL;
2255 chPrms.fqRingPrms.ringMem = &memRxFreeRing[0];
2256 chPrms.fqRingPrms.elemCnt = TF_RING_TRCNT;
2257 chPrms.fqRingPrms.mode = TISCI_MSG_VALUE_RM_RING_MODE_RING;
2258 #if defined (ACP_COHERENCY)
2259 chPrms.fqRingPrms.asel = CSL_LCDMA_RINGACC_ASEL_ENDPOINT_ACP_WR_ALLOC;
2260 #else
2261 chPrms.fqRingPrms.asel = CSL_LCDMA_RINGACC_ASEL_ENDPOINT_PHYSADDR;
2262 #endif
2263 #else
2264 chPrms.peerChNum = TF_SA2UL_PEER_RXCHAN1;
2265 chPrms.fqRingPrms.ringMem = &memRxFreeRing[0];
2266 chPrms.cqRingPrms.ringMem = &memRxRing[0];
2267 chPrms.fqRingPrms.elemCnt = TF_RING_TRCNT;
2268 chPrms.cqRingPrms.elemCnt = TF_RING_TRCNT;
2270 chPrms.fqRingPrms.mode = TISCI_MSG_VALUE_RM_RING_MODE_MESSAGE;
2271 chPrms.cqRingPrms.mode = TISCI_MSG_VALUE_RM_RING_MODE_MESSAGE;
2272 #endif
2273 /* Open RX channel for receive from SA */
2274 tFramework.gRxChHandle[1] = &gUdmaRxChObj[1];
2275 retVal = Udma_chOpen(tFramework.gDrvHandle, tFramework.gRxChHandle[1], chType, &chPrms);
2277 if(UDMA_SOK == retVal)
2278 {
2279 UdmaChRxPrms_init(&rxPrms, chType);
2280 rxPrms.dmaPriority = UDMA_DEFAULT_UTC_CH_DMA_PRIORITY;
2281 rxPrms.fetchWordSize = TF_SIZE_DESC >> 2;
2282 #if defined(SOC_AM64X)
2283 rxPrms.flowEInfoPresent = 1;
2284 rxPrms.flowPsInfoPresent = 1;
2285 #else
2286 rxPrms.flowIdFwRangeStart = tFramework.tfFlowNum;
2287 rxPrms.flowIdFwRangeCnt = 1;
2288 rxPrms.configDefaultFlow = FALSE;
2289 #endif
2290 retVal = Udma_chConfigRx(tFramework.gRxChHandle[1], &rxPrms);
2291 }
2292 else
2293 {
2294 SALog("error in Rx-1 Udma_chOpen() \n");
2295 System_flush();
2296 }
2297 #if defined(SOC_AM64X)
2298 /* Update the Rx Ring numbers */
2299 ringHandle = Udma_chGetFqRingHandle(tFramework.gRxChHandle[1]);
2300 tFramework.gRxFreeRingHandle = ringHandle;
2301 tFramework.rxFreeRingNum = Udma_ringGetNum(ringHandle);
2302 tFramework.gRxFlowHandle = Udma_chGetDefaultFlowHandle(tFramework.gRxChHandle[1]);
2303 tFramework.tfFlowNum = Udma_flowGetNum(tFramework.gRxFlowHandle);
2304 #if defined (AM64X_USE_DEFAULT_FLOW)
2305 tFramework.tfFlowNum = 0x3FFF;
2306 #endif
2307 /* Update the Rx Ring numbers */
2308 ringHandle = Udma_chGetCqRingHandle(tFramework.gRxChHandle[1]);
2309 tFramework.gRxRingHandle = ringHandle;
2310 tFramework.rxComplRingNum = Udma_ringGetNum(ringHandle);
2311 #endif
2313 /* Create the channel for the second thread with same flow as other thread */
2314 UdmaChPrms_init(&chPrms, chType);
2315 chPrms.peerChNum = TF_SA2UL_PEER_RXCHAN0;
2316 #if defined(SOC_AM64X)
2317 chPrms.mappedChGrp = UDMA_MAPPED_RX_GROUP_SAUL;
2318 chPrms.fqRingPrms.elemCnt = TF_RING_TRCNT;
2319 chPrms.fqRingPrms.mode = TISCI_MSG_VALUE_RM_RING_MODE_RING;
2320 #if defined (ACP_COHERENCY)
2321 chPrms.fqRingPrms.asel = CSL_LCDMA_RINGACC_ASEL_ENDPOINT_ACP_WR_ALLOC;
2322 #else
2323 chPrms.fqRingPrms.asel = CSL_LCDMA_RINGACC_ASEL_ENDPOINT_PHYSADDR;
2324 #endif
2325 #if defined(AM64X_USE_DEFAULT_FLOW)
2326 chPrms.fqRingPrms.ringMem = &memRxRing[0];
2327 #endif
2328 #endif
2329 tFramework.gRxChHandle[0] = &gUdmaRxChObj[0];
2330 retVal = Udma_chOpen(tFramework.gDrvHandle, tFramework.gRxChHandle[0], chType, &chPrms);
2332 if(UDMA_SOK == retVal)
2333 {
2334 UdmaChRxPrms_init(&rxPrms, chType);
2335 rxPrms.dmaPriority = UDMA_DEFAULT_UTC_CH_DMA_PRIORITY;
2336 rxPrms.fetchWordSize = TF_SIZE_DESC >> 2;
2337 #if defined(SOC_AM64X)
2338 #if defined(AM64X_USE_DEFAULT_FLOW)
2339 rxPrms.flowEInfoPresent = 1;
2340 rxPrms.flowPsInfoPresent = 1;
2341 #endif
2342 #else
2343 rxPrms.flowIdFwRangeStart = tFramework.tfFlowNum;
2344 rxPrms.flowIdFwRangeCnt = 1;
2345 #endif
2346 rxPrms.configDefaultFlow = FALSE;
2348 retVal = Udma_chConfigRx(tFramework.gRxChHandle[0], &rxPrms);
2349 }
2350 else
2351 {
2352 SALog("error in Rx-0 Udma_chOpen() \n");
2353 System_flush();
2354 }
2355 #if defined(SOC_AM64X)
2356 /* Nothing to do since flow is already setup earlier */
2357 /* Dual ring mode, the same is completion ring */
2358 tFramework.rxComplRingNum = tFramework.rxFreeRingNum;
2359 /* Register Ring complete Isr */
2360 tFramework.gRxEvtHandle = &gUdmaEvtObj;
2362 /* Initialize event parameters */
2363 intArg = tFramework.rxFreeRingNum;
2364 UdmaEventPrms_init(&eventPrms);
2365 eventPrms.eventType = UDMA_EVENT_TYPE_DMA_COMPLETION;
2366 eventPrms.eventMode = UDMA_EVENT_MODE_SHARED;
2367 eventPrms.chHandle = tFramework.gRxChHandle[1];
2368 eventPrms.masterEventHandle = NULL;
2369 eventPrms.eventCb = &framework_rxIsrFxn;
2370 eventPrms.appData = (void *)(uintptr_t)intArg;
2371 retVal = Udma_eventRegister(tFramework.gDrvHandle, tFramework.gRxEvtHandle, &eventPrms);
2372 if (retVal != UDMA_SOK)
2373 {
2374 SALog("error in Udma_eventRegister() \n");
2375 System_flush();
2376 }
2378 #if defined (AM64X_USE_DEFAULT_FLOW)
2379 /* Nothing to do since flow is already setup earlier */
2380 /* Dual ring mode, the same is completion ring */
2381 /* Update the Rx Ring numbers */
2382 ringHandle = Udma_chGetCqRingHandle(tFramework.gRxChHandle[0]);
2383 tFramework.gRxRingHandle = ringHandle;
2384 tFramework.rxComplRingNum = Udma_ringGetNum(ringHandle);;
2386 /* Register Ring complete Isr */
2387 tFramework.gRxEvtHandle = &gUdmaEvtObj0;
2389 /* Initialize event parameters */
2390 intArg = tFramework.rxComplRingNum;
2391 UdmaEventPrms_init(&eventPrms);
2392 eventPrms.eventType = UDMA_EVENT_TYPE_DMA_COMPLETION;
2393 eventPrms.eventMode = UDMA_EVENT_MODE_SHARED;
2394 eventPrms.chHandle = tFramework.gRxChHandle[0];
2395 eventPrms.masterEventHandle = NULL;
2396 eventPrms.eventCb = &framework_rxIsrFxn0;
2397 eventPrms.appData = (void *)(uintptr_t)intArg;
2398 retVal = Udma_eventRegister(tFramework.gDrvHandle, tFramework.gRxEvtHandle, &eventPrms);
2399 if (retVal != UDMA_SOK)
2400 {
2401 SALog("error in Udma_eventRegister() \n");
2402 System_flush();
2403 }
2404 #endif
2405 #else
2406 if (UDMA_SOK == retVal)
2407 {
2408 /* Update the Rx Ring numbers */
2409 ringHandle = Udma_chGetFqRingHandle(tFramework.gRxChHandle[1]);
2410 tFramework.gRxFreeRingHandle = ringHandle;
2411 tFramework.rxFreeRingNum = Udma_ringGetNum(ringHandle);
2413 ringHandle = Udma_chGetCqRingHandle(tFramework.gRxChHandle[1]);
2414 tFramework.gRxRingHandle = ringHandle;
2415 tFramework.rxComplRingNum = Udma_ringGetNum(ringHandle);
2417 /* Update the flow configuration to be used for both SA2UL Rx channels */
2418 /* Update the Rx Flow to be used for SA2UL */
2419 UdmaFlowPrms_init(&flowPrms, UDMA_CH_TYPE_RX);
2420 flowPrms.einfoPresent = TRUE;
2421 flowPrms.psInfoPresent = TRUE;
2422 flowPrms.errorHandling = TRUE;
2423 flowPrms.descType = CSL_UDMAP_DESC_TYPE_HOST;
2424 flowPrms.psLocation = CSL_UDMAP_PS_LOC_DESC;
2425 flowPrms.defaultRxCQ = tFramework.rxComplRingNum;
2426 flowPrms.srcTagLo = 0;
2427 flowPrms.srcTagLoSel = 4;
2428 flowPrms.srcTagHi = 0;
2429 flowPrms.srcTagHiSel = 2;
2430 flowPrms.destTagLo = 0;
2431 flowPrms.destTagLoSel = 4;
2432 flowPrms.destTagHi = 0;
2433 flowPrms.destTagHiSel = 5;
2435 /* Use the same free queue as default flow is not used in
2436 * selecting different queues based on threshold */
2437 flowPrms.fdq0Sz0Qnum = tFramework.rxFreeRingNum;
2438 flowPrms.fdq0Sz1Qnum = tFramework.rxFreeRingNum;
2439 flowPrms.fdq0Sz2Qnum = tFramework.rxFreeRingNum;
2440 flowPrms.fdq0Sz3Qnum = tFramework.rxFreeRingNum;
2441 flowPrms.fdq1Qnum = tFramework.rxFreeRingNum;
2442 flowPrms.fdq2Qnum = tFramework.rxFreeRingNum;
2443 flowPrms.fdq3Qnum = tFramework.rxFreeRingNum;
2445 /* Update the created default flow with above configurations for sa2ul */
2446 retVal = Udma_flowConfig(flowHandle, 0U, &flowPrms);
2447 if (retVal != UDMA_SOK)
2448 {
2449 SALog("error in Flow Config Udma_flowConfig() \n");
2450 System_flush();
2451 }
2453 /* Register Ring complete Isr */
2454 tFramework.gRxEvtHandle = &gUdmaEvtObj;
2456 /* Initialize event parameters */
2457 intArg = tFramework.rxComplRingNum;
2458 UdmaEventPrms_init(&eventPrms);
2459 eventPrms.eventType = UDMA_EVENT_TYPE_DMA_COMPLETION;
2460 eventPrms.eventMode = UDMA_EVENT_MODE_SHARED;
2461 eventPrms.chHandle = tFramework.gRxChHandle[1];
2462 eventPrms.masterEventHandle = NULL;
2463 eventPrms.eventCb = &framework_rxIsrFxn;
2464 eventPrms.appData = (void *)(uintptr_t)intArg;
2465 retVal = Udma_eventRegister(tFramework.gDrvHandle, tFramework.gRxEvtHandle, &eventPrms);
2466 if (retVal != UDMA_SOK)
2467 {
2468 SALog("error in Udma_eventRegister() \n");
2469 System_flush();
2470 }
2472 }
2473 #endif
2474 /* Enable the channel after everything is setup */
2475 if(UDMA_SOK == retVal)
2476 {
2477 retVal = Udma_chEnable(tFramework.gRxChHandle[1]);
2478 if (UDMA_SOK == retVal)
2479 {
2480 retVal = Udma_chEnable(tFramework.gRxChHandle[0]);
2481 }
2483 if (retVal != UDMA_SOK)
2484 {
2485 SALog("error in Udma_chEnable() \n");
2486 System_flush();
2487 }
2489 }
2490 return (retVal);
2492 }
2494 /* Pause/Resume Tx DMA Channel for sa2ul */
2495 int salld_test_controlTxDma(uint32_t ctrl)
2496 {
2497 int ret;
2498 #if defined (__aarch64__)
2499 __asm (" isb ;");
2500 CSL_a53v8DsbSy();
2501 #endif
2503 #if defined (BUILD_MCU)
2504 CSL_armR5Dsb();
2505 #endif
2507 if (ctrl == SA2UL_UDMAP_TX_PAUSE)
2508 {
2509 ret = Udma_chPause(tFramework.gTxChHandle);
2510 }
2511 else
2512 {
2513 ret = Udma_chResume(tFramework.gTxChHandle);
2514 }
2516 #if defined (__aarch64__)
2517 __asm (" isb ;");
2518 CSL_a53v8DsbSy();
2519 #endif
2521 #if defined (BUILD_MCU)
2522 CSL_armR5Dsb();
2523 #endif
2525 return (ret);
2526 }
2528 //=============================================================================
2529 // RingPush
2530 // Copy packet descriptor ptr to the specified ring's next free entry
2531 // (pass-by-reference) and commit it.
2532 //=============================================================================
2533 void RingPush( Udma_RingHandle ringHandle, uint32_t pktSize, physptr_t ptr )
2534 {
2535 #if defined (TEST_CORE_CACHE_COHERENT) || defined (ACP_COHERENCY)
2536 /* No cache operations are needed */
2537 #else
2538 const void *virtBufPtr;
2539 #endif
2540 physptr_t physDescPtr;
2541 int32_t retVal;
2542 FW_CPPI_DESC_T *pDesc = (FW_CPPI_DESC_T *) ptr;
2543 if ( (pDesc == NULL) || (ringHandle == NULL) )
2544 {
2545 return;
2546 }
2548 pDesc->hostDesc.bufPtr = (uint64_t) Osal_VirtToPhys ((void *)(uintptr_t)pDesc->hostDesc.bufPtr);
2549 pDesc->hostDesc.orgBufPtr = (uint64_t) Osal_VirtToPhys ((void *)(uintptr_t)pDesc->hostDesc.orgBufPtr);
2550 physDescPtr = (uint64_t) Osal_VirtToPhys ((void *)&pDesc->hostDesc);
2552 #if defined (ACP_COHERENCY)
2553 physDescPtr = CSL_pktdmaMakeAselAddr((uint64_t) physDescPtr, \
2554 CSL_LCDMA_RINGACC_ASEL_ENDPOINT_ACP_WR_ALLOC);
2555 pDesc->hostDesc.bufPtr = CSL_pktdmaMakeAselAddr((uint64_t) pDesc->hostDesc.bufPtr, \
2556 CSL_LCDMA_RINGACC_ASEL_ENDPOINT_ACP_WR_ALLOC);
2558 #endif
2560 #if defined (TEST_CORE_CACHE_COHERENT) || defined (ACP_COHERENCY)
2561 /* No cache operations are needed */
2562 #else
2563 /* Wb Invdata cache */
2564 CacheP_wbInv((const void *)&pDesc->hostDesc, TF_SIZE_DESC);
2565 virtBufPtr = (const void *)(uintptr_t)pDesc->hostDesc.bufPtr;
2566 CacheP_wbInv(virtBufPtr, pktSize);
2567 #endif
2568 retVal = Udma_ringQueueRaw(ringHandle,(uint64_t)physDescPtr);
2570 if (retVal != UDMA_SOK)
2571 {
2572 while(1);
2573 }
2575 return;
2576 }
2578 //=============================================================================
2579 // RingPop
2580 // Return the next packet descriptor ptr (if available) from the specified
2581 // ring and acknowledge it.
2582 //=============================================================================
2583 int32_t RingPop( Udma_RingHandle ringHandle, FW_CPPI_DESC_T **pAppDesc )
2584 {
2585 uint64_t pDesc = 0;
2586 int32_t retVal = 0;
2587 FW_CPPI_DESC_T *pVirtHostDesc;
2588 #if defined (TEST_CORE_CACHE_COHERENT) || defined (ACP_COHERENCY)
2589 /* No cache operations are needed */
2590 #else
2591 uint32_t pktsize;
2592 #endif
2593 if ((pAppDesc == (FW_CPPI_DESC_T **)NULL) || (ringHandle == (Udma_RingHandle)NULL))
2594 {
2595 return -1; /* NULL not allowed */
2596 }
2599 Udma_ringDequeueRaw(ringHandle, &pDesc);
2601 #if defined (ACP_COHERENCY)
2602 pDesc = CSL_pktdmaClrAselInAddr((uint64_t) pDesc);
2603 #endif
2605 if(pDesc == 0)
2606 {
2607 *pAppDesc = (FW_CPPI_DESC_T *)NULL;
2608 retVal = -1;
2609 }
2610 else
2611 {
2612 *pAppDesc = pVirtHostDesc = (FW_CPPI_DESC_T *)Osal_PhysToVirt(pDesc);
2613 #if defined (TEST_CORE_CACHE_COHERENT) || defined (ACP_COHERENCY)
2614 /* No cache operations are needed */
2615 #else
2616 CacheP_Inv((const void *) &pVirtHostDesc->hostDesc, TF_SIZE_DESC);
2617 #endif
2618 pVirtHostDesc->hostDesc.bufPtr = (uint64_t)Osal_PhysToVirt(pVirtHostDesc->hostDesc.bufPtr);
2619 pVirtHostDesc->hostDesc.orgBufPtr = (uint64_t)Osal_PhysToVirt(pVirtHostDesc->hostDesc.orgBufPtr);
2620 #if defined (TEST_CORE_CACHE_COHERENT) || defined (ACP_COHERENCY)
2621 /* No cache operations are needed, clear any ASEL bits */
2622 pVirtHostDesc->hostDesc.bufPtr = CSL_pktdmaClrAselInAddr((uint64_t)pVirtHostDesc->hostDesc.bufPtr);
2623 #else
2624 pktsize = CSL_FEXT (pVirtHostDesc->hostDesc.descInfo, UDMAP_CPPI5_PD_DESCINFO_PKTLEN);
2625 CacheP_Inv((const void *)(uintptr_t)pVirtHostDesc->hostDesc.bufPtr, (int32_t)pktsize);
2626 #endif
2627 }
2629 return (retVal);
2630 }
2632 /** ============================================================================
2633 * @n@b fwTx_ready_push
2634 *
2635 * @b Description
2636 * @n This functions puts TX descriptors into txReadyDescs
2637 *
2638 * @param[in]
2639 * @n None
2640 *
2641 * @return none
2642 * =============================================================================
2643 */
2644 static void
2645 fwTx_ready_push
2646 (
2647 uint32_t size,
2648 physptr_t phys
2649 )
2650 {
2651 FW_CPPI_DESC_T *pCppiDesc = (FW_CPPI_DESC_T *)Osal_PhysToVirt (phys);
2653 pCppiDesc->nextPtr = (uint64_t) (uintptr_t) tFramework.txReadyDescs;
2654 tFramework.txReadyDescs = pCppiDesc;
2655 } /* fwTx_ready_push */
2657 void setup_cppi5InitHostDescQueueTx(uint32_t retqIdx, uint32_t descCnt, void (*pfPush)(uint32_t, physptr_t) )
2658 {
2659 uint8_t *pBuffer;
2660 CSL_UdmapCppi5HMPD *pDesc;
2661 FW_CPPI_DESC_T *pCppiDesc;
2662 uint32_t i;
2664 for(i=0; i<descCnt; i++)
2665 {
2666 pBuffer = &memBufRamTx[i][0];
2667 pCppiDesc = (FW_CPPI_DESC_T *) &memDescRamTx[i];
2668 pDesc = (CSL_UdmapCppi5HMPD *) &pCppiDesc->hostDesc;
2670 /* setup the descriptor */
2671 memset(pCppiDesc, 0, sizeof(FW_CPPI_DESC_T));
2672 CSL_udmapCppi5SetDescType(pDesc, CSL_UDMAP_CPPI5_PD_DESCINFO_DTYPE_VAL_HOST);
2673 #if defined (DMA_TYPE_LCDMA)
2674 #else
2675 CSL_udmapCppi5SetReturnPolicy( pDesc,
2676 CSL_UDMAP_CPPI5_PD_DESCINFO_DTYPE_VAL_HOST,
2677 CSL_UDMAP_CPPI5_PD_PKTINFO2_RETPOLICY_VAL_ENTIRE_PKT,
2678 CSL_UDMAP_CPPI5_PD_PKTINFO2_EARLYRET_VAL_NO,
2679 CSL_UDMAP_CPPI5_PD_PKTINFO2_RETPUSHPOLICY_VAL_TO_TAIL,
2680 retqIdx);
2681 #endif
2682 pDesc->bufPtr = (uint64_t) (uintptr_t) Osal_VirtToPhys(pBuffer);
2683 pDesc->bufInfo1 = TF_DESC_BUFSIZE;
2684 #if defined (DMA_TYPE_LCDMA)
2685 /* On AM64X (LCDMA), there is no original buffer fields */
2686 #else
2687 pDesc->orgBufLen = TF_DESC_BUFSIZE;
2688 pDesc->orgBufPtr = pDesc->bufPtr;
2689 #endif
2691 #if defined (TEST_CORE_CACHE_COHERENT)
2692 /* No cache operations are needed */
2693 #else
2694 /* make sure the descriptor is written back to memory for coherancy */
2695 CacheP_wbInv(pCppiDesc, sizeof(FW_CPPI_DESC_T));
2696 #endif
2697 pfPush( 0, (physptr_t)Osal_VirtToPhys(pCppiDesc));
2698 }
2699 }
2701 /** ============================================================================
2702 * @n@b emac_rx_free_push
2703 *
2704 * @b Description
2705 * @n This function attaches buffers to rx free descriptors
2706 *
2707 * =============================================================================
2708 */
2709 static void
2710 fwRx_free_push
2711 (
2712 uint32_t size,
2713 physptr_t phys
2714 )
2715 {
2716 uint32_t pktLen;
2717 CSL_UdmapCppi5HMPD *pDesc = (CSL_UdmapCppi5HMPD *)Osal_PhysToVirt (phys);
2719 pktLen = CSL_udmapCppi5GetPktLen((void *) pDesc);
2720 /* Push descriptor to Rx free descriptor queue */
2721 RingPush (tFramework.gRxFreeRingHandle, pktLen,(physptr_t) pDesc);
2723 } /* fwRx_free_push */
2725 #if defined (AM64X_USE_DEFAULT_FLOW)
2726 static void
2727 fwRx_free_push0
2728 (
2729 uint32_t size,
2730 physptr_t phys
2731 )
2732 {
2733 uint32_t pktLen;
2734 CSL_UdmapCppi5HMPD *pDesc = (CSL_UdmapCppi5HMPD *)Osal_PhysToVirt (phys);
2736 pktLen = CSL_udmapCppi5GetPktLen((void *) pDesc);
2738 /* Push descriptor to Rx free descriptor queue */
2739 RingPush (tFramework.gRxRingHandle, pktLen,(physptr_t) pDesc);
2741 } /* fwRx_free_push0 */
2743 #endif
2746 void setup_cppi5InitHostDescQueueRx (uint32_t retqIdx, uint32_t start, uint32_t descCnt, uint32_t buffSize, void (*pfPush)(uint32_t, physptr_t) )
2747 {
2748 uint8_t *pBuffer;
2749 CSL_UdmapCppi5HMPD *pDesc;
2750 uint32_t i;
2752 for(i=start; i<descCnt; i++)
2753 {
2754 pBuffer = &memBufRamRx[i][0];
2755 pDesc = (CSL_UdmapCppi5HMPD *)&memDescRamRx[i][0];
2757 /* setup the descriptor */
2758 memset(pDesc, 0, sizeof(CSL_UdmapCppi5HMPD));
2759 CSL_udmapCppi5SetDescType(pDesc, CSL_UDMAP_CPPI5_PD_DESCINFO_DTYPE_VAL_HOST);
2760 #if defined (DMA_TYPE_LCDMA)
2761 #else
2762 CSL_udmapCppi5SetReturnPolicy( pDesc, CSL_UDMAP_CPPI5_PD_DESCINFO_DTYPE_VAL_HOST,
2763 CSL_UDMAP_CPPI5_PD_PKTINFO2_RETPOLICY_VAL_ENTIRE_PKT,
2764 CSL_UDMAP_CPPI5_PD_PKTINFO2_EARLYRET_VAL_NO,
2765 CSL_UDMAP_CPPI5_PD_PKTINFO2_RETPUSHPOLICY_VAL_TO_TAIL,
2766 retqIdx);
2767 #endif
2768 pDesc->bufPtr = (uint64_t) (uintptr_t) Osal_VirtToPhys(pBuffer);
2769 pDesc->bufInfo1 = buffSize;
2770 #if defined (DMA_TYPE_LCDMA)
2771 /* On AM64X (LCDMA), there is no original buffer fields */
2772 #else
2773 pDesc->orgBufLen = buffSize;
2774 pDesc->orgBufPtr = pDesc->bufPtr;
2775 #endif
2776 pfPush(0, (physptr_t)Osal_VirtToPhys(pDesc));
2777 }
2778 }
2780 int initNavss(void)
2781 {
2782 int32_t retVal = UDMA_SOK;
2784 /* Initialize UDMA */
2785 retVal = frameworkUdmaInitDrv();
2787 if (retVal != UDMA_SOK)
2788 {
2789 SALog("error in creating the udma drv handle \n");
2790 System_flush();
2791 return (-1);
2792 }
2794 /* Create a Tx Channel */
2795 retVal = frameworkUdmaSetupTxChannel();
2796 if (retVal != UDMA_SOK)
2797 {
2798 SALog("error in creating the udma tx channel \n");
2799 System_flush();
2800 return (-1);
2801 }
2803 /* Create the descriptor pool for the Tx Ring */
2804 setup_cppi5InitHostDescQueueTx(tFramework.txComplRingNum, TF_NUM_DESC, fwTx_ready_push);
2806 /* Create the Rx channel for SA */
2807 retVal = frameworkUdmaSetupRxChannel();
2808 if (retVal != UDMA_SOK)
2809 {
2810 SALog("error in creating the udma rx channels \n");
2811 System_flush();
2812 return (-1);
2813 }
2815 /* Create the descriptor pool for the Rx ring */
2816 if (retVal == UDMA_SOK)
2817 {
2818 #if defined(AM64X_USE_DEFAULT_FLOW)
2819 setup_cppi5InitHostDescQueueRx (tFramework.rxFreeRingNum, 0,
2820 TF_NUM_DESC/2,
2821 TF_DESC_BUFSIZE, fwRx_free_push );
2822 setup_cppi5InitHostDescQueueRx (tFramework.rxComplRingNum, TF_NUM_DESC/2,
2823 TF_NUM_DESC,
2824 TF_DESC_BUFSIZE, fwRx_free_push0 );
2825 #else
2826 setup_cppi5InitHostDescQueueRx (tFramework.rxFreeRingNum, 0,
2827 TF_NUM_DESC,
2828 TF_DESC_BUFSIZE, fwRx_free_push );
2829 #endif
2830 }
2832 return(retVal);
2833 }
2834 #endif
2836 #if defined (NSS_LITE2)
2837 int setupNavss(void)
2838 {
2839 /*
2840 * Only perform Navss init if we have not decided to bypass the test. In
2841 * case a bypass was decided, do not fail on Navss init so that other tests
2842 * can still run.
2843 */
2844 if (testCommonGetTestStatus(saDataModeTest) == SA_TEST_NOT_RUN)
2845 {
2846 if (initNavss()) {
2847 SALog ("initNavss: setupNavss failed\n");
2848 return (-1);
2849 }
2850 }
2851 return(0);
2852 }
2853 #endif
2855 /* The QM/CPDMA are setup */
2856 int initQm (void)
2857 {
2858 #ifdef NSS_LITE2
2859 return (setupNavss());
2861 #else
2862 if (setupQmMem()) {
2863 SALog ("initQm: setupQmMem failed\n");
2864 return (-1);
2865 }
2867 if (setupPassQmMem()) {
2868 SALog ("initQm: setupPassQmMem failed\n");
2869 return (-1);
2870 }
2872 if (setupCpdma ()) {
2873 SALog ("initQm: setupCpdma failed\n");
2874 return (-1);
2875 }
2877 if (setupQueues ()) {
2878 SALog ("initQm: setupQueues failed\n");
2879 return (-1);
2880 }
2882 if (setupFlows ()) {
2883 SALog ("initQm: setupFlows failed\n");
2884 return (-1);
2885 }
2886 return (0);
2887 #endif
2889 }
2891 /* clean up for QM/CPPI */
2892 static int exitQm(void)
2893 {
2894 #ifdef NSS_LITE2
2896 #else
2897 if (closeFlows ()) {
2898 SALog ("exitQm: closeFlows failed\n");
2899 return (-1);
2900 }
2902 if (closeQueues ()) {
2903 SALog ("exitQm: closeQueues failed\n");
2904 return (-1);
2905 }
2907 if (closeCpdma ()) {
2908 SALog ("exitQm: closeCpdma failed\n");
2909 return (-1);
2910 }
2912 if (closeQmMem()) {
2913 SALog ("exitQm: closeQmMem failed\n");
2914 return (-1);
2915 }
2916 #endif
2918 return 0;
2921 }
2924 /* Two semaphores are used to gate access to the PA handle tables */
2925 int initSems (void)
2926 {
2927 SemaphoreP_Params params;
2929 SemaphoreP_Params_init (¶ms);
2930 params.mode = SemaphoreP_Mode_BINARY;
2931 tFramework.tfSaSem = SemaphoreP_create (1, ¶ms);
2932 return (0);
2933 }
2935 /* Two semaphores are used to gate access to the PA handle tables */
2936 static int deleteSems (void)
2937 {
2938 SemaphoreP_delete (tFramework.tfSaSem);
2939 return (0);
2940 }
2942 /***************************************************************************************
2943 * FUNCTION PURPOSE: Power up PA subsystem
2944 ***************************************************************************************
2945 * DESCRIPTION: this function powers up the PA subsystem domains
2946 ***************************************************************************************/
2947 void passPowerUp (void)
2948 {
2949 /* PASS power domain is turned OFF by default. It needs to be turned on before doing any
2950 * PASS device register access. This not required for the simulator. */
2952 #if !defined(NSS_LITE) && !defined(NSS_LITE2)
2953 /* Set PASS Power domain to ON */
2954 CSL_PSC_enablePowerDomain (CSL_PSC_PD_NETCP);
2956 /* Enable the clocks for PASS modules */
2957 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_PA, PSC_MODSTATE_ENABLE);
2958 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_CPGMAC, PSC_MODSTATE_ENABLE);
2959 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_SA, PSC_MODSTATE_ENABLE);
2961 /* Start the state transition */
2962 CSL_PSC_startStateTransition (CSL_PSC_PD_NETCP);
2964 /* Wait until the state transition process is completed. */
2965 while (!CSL_PSC_isStateTransitionDone (CSL_PSC_PD_NETCP));
2967 #if defined(DEVICE_K2L) || defined(SOC_K2L)
2968 CSL_PSC_enablePowerDomain (CSL_PSC_PD_OSR);
2970 /* Enable the clocks for OSR modules */
2971 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_OSR, PSC_MODSTATE_ENABLE);
2973 /* Start the state transition */
2974 CSL_PSC_startStateTransition (CSL_PSC_PD_OSR);
2976 /* Wait until the state transition process is completed. */
2977 utilCycleDelay (1000);
2978 #endif
2981 #else
2983 #if !defined(NSS_LITE2)
2984 /* Set NSS Power domain to ON */
2985 CSL_PSC_enablePowerDomain (CSL_PSC_PD_NSS);
2987 /* Enable the clocks for NSS modules */
2988 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_NSS, PSC_MODSTATE_ENABLE);
2990 /* Start the state transition */
2991 CSL_PSC_startStateTransition (CSL_PSC_PD_NSS);
2993 /* Wait until the state transition process is completed. */
2994 while (!CSL_PSC_isStateTransitionDone (CSL_PSC_PD_NSS));
2996 /* Set SA Power domain to ON */
2997 CSL_PSC_enablePowerDomain (CSL_PSC_PD_SA);
2999 /* Enable the clocks for SA modules */
3000 CSL_PSC_setModuleNextState (CSL_PSC_LPSC_SA, PSC_MODSTATE_ENABLE);
3002 /* Start the state transition */
3003 CSL_PSC_startStateTransition (CSL_PSC_PD_SA);
3005 /* Wait until the state transition process is completed. */
3006 while (!CSL_PSC_isStateTransitionDone (CSL_PSC_PD_SA));
3007 #endif
3009 #endif
3011 }
3013 /* Initialize the test framework */
3014 int setupTestFramework (void)
3015 {
3016 uint8_t limitAccess = false;
3018 /* Setup the semaphores used for access to the PA tables.
3019 * This has to be done before the PA is initialized */
3020 if (initSems()) {
3021 SALog ("setupTestFramework: initQm returned error, exiting\n");
3022 return (-1);
3023 }
3025 /* Power up PA sub-systems */
3026 passPowerUp();
3028 #if !defined(NSS_LITE) && !defined(NSS_LITE2)
3029 /* Create the PA driver instance */
3030 if (initPa()) {
3031 SALog ("setupTestFramework: initPa returned error, exiting\n");
3032 return (-1);
3033 }
3034 #endif
3036 #if defined(NSS_LITE2)
3037 /*
3038 * Check SA resource availability if running on HS device and configure
3039 * which tests and test parameters are enabled.
3040 */
3041 limitAccess = configSaRes();
3042 #endif
3044 /* Setup the QM with associated buffers and descriptors */
3045 if (initQm()) {
3046 SALog ("setupTestFramework: initQm returned error, exiting\n");
3047 return (-1);
3048 }
3050 /* Initialize the SA unit test support and create the SA driver instance */
3051 salld_sim_initialize(limitAccess);
3053 /* Initialize the test connection module */
3054 #if !defined(NSS_LITE2)
3055 sauConnInit();
3056 #endif
3058 return (0);
3060 }
3062 int exitTestFramework(void)
3063 {
3065 salld_sim_close_sa();
3067 /* Delete the semaphores created */
3068 if (deleteSems()) {
3069 SALog ("exitTestFramework: deleteSems returned error, exiting\n");
3070 return (-1);
3071 }
3073 /* Clean up CPPI/QM entries */
3074 if (exitQm()) {
3075 SALog ("exitTestFramework: exitQm returned error, exiting\n");
3076 return (-1);
3077 }
3079 return 0;
3080 }
3083 /* Check that all the queues are setup correctly */
3084 #ifdef NSS_LITE2
3085 int verifyTestFramework (void)
3086 {
3087 return(0);
3088 }
3089 #else
3090 int verifyTestFramework (void)
3091 {
3092 int i, j;
3093 int count;
3094 int returnVal = 0;
3095 Cppi_HostDesc *hd;
3096 uint8_t *bufp;
3097 uint32_t bufLen;
3099 int32_t linkedQ[8];
3100 #ifdef NETSS_INTERNAL_PKTDMA
3101 int32_t linkedLocQ[4];
3102 #endif
3104 int32_t nbufs[] = { TF_LINKED_BUF_Q1_NBUFS, TF_LINKED_BUF_Q2_NBUFS, TF_LINKED_BUF_Q3_NBUFS, TF_LINKED_BUF_Q4_NBUFS,
3105 TF_HOST_LINKED_BUF_Q1_NBUFS, TF_HOST_LINKED_BUF_Q2_NBUFS, TF_HOST_LINKED_BUF_Q3_NBUFS, TF_HOST_LINKED_BUF_Q4_NBUFS};
3106 int32_t bSize[] = { TF_LINKED_BUF_Q1_BUF_SIZE, TF_LINKED_BUF_Q2_BUF_SIZE, TF_LINKED_BUF_Q3_BUF_SIZE, TF_LINKED_BUF_Q4_BUF_SIZE,
3107 TF_HOST_LINKED_BUF_Q1_BUF_SIZE, TF_HOST_LINKED_BUF_Q2_BUF_SIZE, TF_HOST_LINKED_BUF_Q3_BUF_SIZE, TF_HOST_LINKED_BUF_Q4_BUF_SIZE };
3109 linkedQ[0] = tFramework.QLinkedBuf1;
3110 linkedQ[1] = tFramework.QLinkedBuf2;
3111 linkedQ[2] = tFramework.QLinkedBuf3;
3112 linkedQ[3] = tFramework.QLinkedBuf4;
3113 linkedQ[4] = tFramework.QHostLinkedBuf1;
3114 linkedQ[5] = tFramework.QHostLinkedBuf2;
3115 linkedQ[6] = tFramework.QHostLinkedBuf3;
3116 linkedQ[7] = tFramework.QHostLinkedBuf4;
3118 #ifdef NETSS_INTERNAL_PKTDMA
3119 linkedLocQ[0] = tFramework.QLocLinkedBuf1;
3120 linkedLocQ[1] = tFramework.QLocLinkedBuf2;
3121 linkedLocQ[2] = tFramework.QLocLinkedBuf3;
3122 linkedLocQ[3] = tFramework.QLocLinkedBuf4;
3123 #endif
3125 /* Verify that all of the general purpose queues are empty */
3126 for (i = 0; i < TF_NUM_GEN_QUEUES; i++) {
3127 if ((count = Qmss_getQueueEntryCount (tFramework.QGen[i])) != 0) {
3128 SALog ("verifyTestFramework: Expected 0 entry count for queue %d, found %d entries\n", tFramework.QGen[i], count);
3129 returnVal = -1;
3130 }
3131 }
3133 /* Verify that the number of descriptors in the free descriptor queue is correct */
3134 count = Qmss_getQueueEntryCount (tFramework.QfreeDesc);
3135 if (count != (TF_NUM_DESC - TF_NUM_RES_DESC)) {
3136 SALog ("verifyTestFramework: Expected %d entry count in the free descriptor queue (%d), found %d\n",
3137 TF_NUM_DESC - TF_NUM_RES_DESC,
3138 tFramework.QfreeDesc, count);
3139 returnVal = -1;
3140 }
3142 /* Verify the number and sizing of descriptors with linked buffers in the three queues */
3143 for (j = 0; j < 8; j++) {
3145 count = Qmss_getQueueEntryCount (linkedQ[j]);
3146 if (count != nbufs[j]) {
3147 SALog ("verifyTestFramework: Expected %d entry count in linked buffer queue 1 (%d), found %d\n",
3148 nbufs[j], linkedQ[j], count);
3149 returnVal = -1;
3150 }
3152 for (i = 0; i < count; i++) {
3153 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (linkedQ[j])) & ~15);
3154 Cppi_getOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, &bufp, &bufLen);
3155 #if !defined(NSS_LITE) && !defined(NSS_LITE2)
3156 Qmss_queuePush (linkedQ[j], (Ptr)hd, hd->buffLen, TF_SIZE_DESC, Qmss_Location_TAIL);
3157 #else
3158 Qmss_queuePushDesc (linkedQ[j], (Ptr)hd);
3159 #endif
3160 if (bufLen != bSize[j]) {
3161 SALog ("verifyTestFramework: Linked buffer queue %d (%d) expected orignal length of %d, found %d\n",
3162 j, linkedQ[j], bSize[j], bufLen);
3163 returnVal = -1;
3164 break;
3165 }
3166 }
3167 }
3169 #ifdef NETSS_INTERNAL_PKTDMA
3172 /* Verify that the number of descriptors in the free descriptor queue is correct */
3173 count = Qmss_getQueueEntryCount (tFramework.QLocfreeDesc);
3174 if (count != (TF_NUM_DESC - TF_LINKED_BUF_Q1_NBUFS - TF_LINKED_BUF_Q2_NBUFS - TF_LINKED_BUF_Q3_NBUFS - TF_LINKED_BUF_Q4_NBUFS)) {
3175 SALog ("verifyTestFramework: Expected %d entry count in the free descriptor queue (%d), found %d\n",
3176 TF_NUM_DESC - TF_LINKED_BUF_Q1_NBUFS - TF_LINKED_BUF_Q2_NBUFS - TF_LINKED_BUF_Q3_NBUFS - TF_LINKED_BUF_Q4_NBUFS,
3177 tFramework.QLocfreeDesc, count);
3178 returnVal = -1;
3179 }
3183 /* Verify the number and sizing of descriptors with linked buffers in the three queues */
3184 for (j = 0; j < 4; j++) {
3186 count = Qmss_getQueueEntryCount (linkedLocQ[j]);
3187 if (count != nbufs[j]) {
3188 SALog ("verifyTestFramework: Expected %d entry count in Loc linked buffer queue %d (%d), found %d\n",
3189 nbufs[j], j, linkedQ[j], count);
3190 returnVal = -1;
3191 }
3193 for (i = 0; i < count; i++) {
3194 hd = (Cppi_HostDesc *)(((uint32_t)Qmss_queuePop (linkedLocQ[j])) & ~15);
3195 Cppi_getOriginalBufInfo (Cppi_DescType_HOST, (Cppi_Desc *)hd, &bufp, &bufLen);
3196 //Cppi_setReturnQueue (Cppi_DescType_HOST, (Cppi_Desc *)hd, q);
3197 Qmss_queuePushDesc(linkedLocQ[j], (Ptr)hd);
3199 if (bufLen != bSize[j]) {
3200 SALog ("verifyTestFramework: Linked buffer queue %d (%d) expected orignal length of %d, found %d\n",
3201 j, linkedQ[j], bSize[j], bufLen);
3202 returnVal = -1;
3203 break;
3204 }
3205 }
3206 }
3208 #endif
3211 return (returnVal);
3212 }
3213 #endif
3216 #ifndef USE_BIOS
3217 void Task_exit(void)
3218 {
3219 while (TRUE)
3220 {
3222 }
3223 }
3224 #endif