1 /**
2 * @file fm_clean.c
3 *
4 * @brief
5 * Fault Management fault cleanup source
6 *
7 * \par
8 * ============================================================================
9 * @n (C) Copyright 2014, Texas Instruments, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the
21 * distribution.
22 *
23 * Neither the name of Texas Instruments Incorporated nor the names of
24 * its contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * \par
40 */
42 #include <c6x.h>
44 /* Standard Include Files. */
45 #include <string.h>
47 /* CSL Includes */
48 #include <ti/csl/csl_sem.h>
49 #include <ti/csl/csl_qm_queue.h>
50 #include <ti/csl/csl_pscAux.h>
51 #include <ti/csl/csl_chipAux.h>
52 #include <ti/csl/csl_edma3Aux.h>
54 #include <ti/csl/cslr.h>
55 #include <ti/csl/cslr_device.h>
56 #include <ti/csl/cslr_pa_ss.h>
57 #include <ti/csl/cslr_cp_ace.h>
58 #include <ti/csl/cslr_cpintc.h>
59 #include <ti/csl/cslr_tmr.h>
61 /* LLD Includes */
62 #include <ti/drv/cppi/cppi_drv.h>
63 #include <ti/drv/cppi/cppi_desc.h>
64 #include <ti/drv/qmss/qmss_drv.h>
65 #include <ti/drv/qmss/qmss_acc.h>
66 #include <ti/drv/qmss/qmss_qm.h>
67 #include <ti/drv/pa/pa.h>
68 #include <ti/drv/pa/pasahost.h>
69 #include <ti/drv/pa/fw/pafw.h>
70 #include <ti/drv/aif2/aif2.h>
72 /* FM API Include */
73 #include <ti/instrumentation/fault_mgmt/fault_mgmt.h>
75 /* FM Internal Includes */
76 #include <ti/instrumentation/fault_mgmt/include/fm_loc.h>
77 #include <ti/instrumentation/fault_mgmt/include/fm_cleanloc.h>
78 #include <ti/instrumentation/fault_mgmt/include/fm_exclusionloc.h>
80 /* OSAL Includes */
81 #include <ti/instrumentation/fault_mgmt/fault_mgmt_osal.h>
83 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
85 #define NUM_MONO_DESC 32
86 #define SIZE_MONO_DESC 128
87 #define MONO_DESC_DATA_OFFSET 36
89 #define PA_INST_SIZE 128 /* Required size = 84 */
90 #define PA_MAX_NUM_L2_HANDLES 64
91 #define PA_L2_TABLE_SIZE (PA_MAX_NUM_L2_HANDLES * 32) /* Requires 32 bytes per entry */
92 #define PA_MAX_NUM_L3_HANDLES 128
93 #define PA_L3_TABLE_SIZE (PA_MAX_NUM_L3_HANDLES * 72) /* Requires 72 bytes per entry */
95 #define PA_MAX_NUM_CPPI_TX_CH 9
97 #if defined(_LITTLE_ENDIAN)
98 #define SWIZ(x) (sizeof((x)) == 1 ? (x) : (sizeof((x)) == 2 ? swiz16((x)) : (sizeof((x)) == 4 ? swiz32((x)) : 0)))
99 #else
100 #define SWIZ(x) (x)
101 #endif
103 /**********************************************************************
104 ********************** Cleanup Globals *******************************
105 **********************************************************************/
107 /* Tracks whether LLDs have been initialized during the cleanup process */
108 uint32_t initComplete = FM_FALSE;
110 #pragma DATA_ALIGN (monoDesc, 128)
111 uint8_t monoDesc[SIZE_MONO_DESC * NUM_MONO_DESC];
113 /* Array used to store Queues that cannot be used by DSP until
114 * they can be closed. Must be global to memory corruption
115 * when initializing the array since it may potentially be
116 * larger than the stack*/
117 Qmss_QueueHnd invalidQs[QMSS_MAX_GENERAL_PURPOSE_QUEUE];
119 /* PA memory */
120 #pragma DATA_ALIGN (paMemPaInst, 8)
121 uint8_t paMemPaInst[PA_INST_SIZE];
122 #pragma DATA_ALIGN (paMemL2Ram, 8)
123 uint8_t paMemL2Ram[PA_L2_TABLE_SIZE];
124 #pragma DATA_ALIGN(paMemL3Ram, 8);
125 uint8_t paMemL3Ram[PA_L3_TABLE_SIZE];
127 /* PASS Tx Queues */
128 Qmss_QueueHnd paTxQs[PA_MAX_NUM_CPPI_TX_CH];
129 #endif /* !(K2H && K2K && K2L && K2E) */
131 /* EDMA3 object - global to avoid overflowing stack */
132 CSL_Edma3Obj edmaObjCC;
134 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
135 /* AIF2 object - Global to avoid overflowing stack */
136 AIF_ConfigObj locAifObj;
138 /**********************************************************************
139 ******************** External Variables ******************************
140 **********************************************************************/
142 /* Location in memory where cleanup status is written */
143 extern int32_t fmCleanupStatus[32];
145 /* Heap needed to initialize CPPI prior to IO halt execution */
146 extern uint8_t tempCppiHeap[];
147 #endif /* !(K2H && K2K && K2L && K2E) */
149 /* CPPI Global configuration parameters */
150 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
151 extern Cppi_GlobalConfigParams cppiGblCfgParams[];
152 #else
153 extern Cppi_GlobalConfigParams cppiGblCfgParams;
154 #endif
156 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
157 /* QMSS Global configuration parameters */
158 extern Qmss_GlobalConfigParams qmssGblCfgParams;
160 /**********************************************************************
161 ********************* Local Cleanup Functions ************************
162 **********************************************************************/
164 /* START: PA API hardcoded here until it can be added to PA LLD */
165 #if defined(_LITTLE_ENDIAN)
166 /*********************************************************************
167 * FUNCTION PURPOSE: Swizzling
168 *********************************************************************
169 * DESCRIPTION: The PA sub-system requires all multi-byte fields in
170 * big endian format.
171 *********************************************************************/
172 static inline uint16_t swiz16(uint16_t x)
173 {
174 return ((x >> 8) | (x << 8));
175 }
177 static inline uint32_t swiz32 (uint32_t x)
178 {
179 return (((x) >> 24) | (((x) >> 8) & 0xff00L) | (((x) << 8) & 0xff0000L) | ((x) << 24));
180 }
181 #endif
183 /* Hardcode here until API added to PA to remove entries with minimal
184 * overhead. This define maps to PAFRM_CONFIG_COMMAND_DEL_LUT1 (in src/pafrm.h) */
185 #define temp_PAFRM_CONFIG_COMMAND_DEL_LUT1 2
187 /* Hardcode here until API added to PA to remove entries with minimal
188 * overhead. This define maps to PAFRM_CONFIG_COMMAND_SEC_BYTE (in src/pafrm.h) */
189 #define temp_PAFRM_CONFIG_COMMAND_SEC_BYTE 0xce
191 /* Hardcode here until API added to PA to remove entries with minimal
192 * overhead. This define maps to PAFRM_DEST_PKTDMA (in src/pafrm.h) */
193 #define temp_PAFRM_DEST_PKTDMA 6
194 /* Hardcode here until API added to PA to remove entries with minimal
195 * overhead. This define maps to PAFRM_DEST_DISCARD (in src/pafrm.h) */
196 #define temp_PAFRM_DEST_DISCARD 10
198 /* Commands to PA - Hardcode here until API added to PA to remove entries with minimal
199 * overhead. This structure maps to pafrmCommand_t (in src/pafrm.h) */
200 typedef struct {
201 uint32_t commandResult; /* Returned to the host, ignored on entry to the PASS */
202 uint8_t command; /* Command value */
203 uint8_t magic; /* Magic value */
204 uint16_t comId; /* Used by the host to identify command results */
205 uint32_t retContext; /* Returned in swInfo to identify packet as a command */
206 uint16_t replyQueue; /* Specifies the queue number for the message reply. 0xffff to toss the reply */
207 uint8_t replyDest; /* Reply destination (host0, host1, discard are the only valid values) */
208 uint8_t flowId; /* Flow ID used to assign packet at reply */
209 uint32_t cmd; /* First word of the command */
210 } tempPaCmd;
212 /* Delete entry from LUT1 - Hardcode here until API added to PA to remove entries with minimal
213 * overhead. This structure maps to pafrmCommandDelLut1_t (in src/pafrm.h) */
214 typedef struct {
215 uint8_t index; /* LUT1 index */
216 } tempPaCmdDelLut1;
218 /*************************************************************************
219 * FUNCTION PURPOSE: Format Firmware Command Header
220 *************************************************************************
221 * DESCRIPTION: Clear and construct the firmware command header
222 * Returns pointer to the firmware command
223 *************************************************************************/
224 static tempPaCmd *pa_format_fcmd(void *pCmd, paCmdReply_t *reply, uint8_t lutIndex)
225 {
226 tempPaCmd *fcmd = (tempPaCmd *) pCmd;
227 uint16_t csize = sizeof(tempPaCmd)+sizeof(tempPaCmdDelLut1)-sizeof(uint32_t);
228 tempPaCmdDelLut1 *del;
229 uint8_t lut = temp_PAFRM_CONFIG_COMMAND_DEL_LUT1;
231 memset(fcmd, 0, csize);
233 fcmd->command = SWIZ(lut);
234 fcmd->magic = temp_PAFRM_CONFIG_COMMAND_SEC_BYTE;
235 fcmd->comId = 0;
236 fcmd->retContext = SWIZ(reply->replyId);
237 fcmd->replyQueue = SWIZ(reply->queue);
238 fcmd->flowId = SWIZ(reply->flowId);
240 /* Validity of the destination was already checked (HOST), so no other cases
241 * must be considered */
242 if (reply->dest == pa_DEST_HOST)
243 fcmd->replyDest = temp_PAFRM_DEST_PKTDMA;
244 else
245 fcmd->replyDest = temp_PAFRM_DEST_DISCARD;
247 del = (tempPaCmdDelLut1 *)&(fcmd->cmd);
249 del->index = lutIndex;
250 del->index = SWIZ(del->index);
252 return(fcmd);
253 }
254 /* END: PA API hardcoded here until it can be added to PA LLD */
256 /* FUNCTION PURPOSE: Converts L2 addresses to global
257 ***********************************************************************
258 * DESCRIPTION: Converts local l2 addresses to their global address
259 */
260 static uint32_t l2_global_address (uint32_t addr)
261 {
262 /* Compute the global address. */
263 return (addr + (0x10000000 + (DNUM * 0x1000000)));
264 }
266 /* FUNCTION PURPOSE: Cycle Delay
267 ***********************************************************************
268 * DESCRIPTION: Delays for the specified amount of cycles
269 */
270 static void cycleDelay(int count, int initTSCL)
271 {
272 uint32_t TSCLin = TSCL;
274 if (initTSCL) {
275 CSL_chipWriteTSCL(0);
276 }
277 else {
278 if (count <= 0)
279 return;
281 while ((TSCL - TSCLin) < (uint32_t)count);
282 }
283 }
285 /* FUNCTION PURPOSE: Resets a specified CPDMA channel or flow
286 ***********************************************************************
287 * DESCRIPTION: Resets the specified CPDMA channel or flow
288 */
289 static void resetDmaCh(Cppi_Handle cppiHandle, int32_t dmaNum, int32_t chNum, Fm_ResType resType)
290 {
291 Cppi_RxChInitCfg rxChCfg;
292 Cppi_TxChInitCfg txChCfg;
293 Cppi_ChHnd chHandle;
294 Cppi_RxFlowCfg rxFlowCfg;
295 Cppi_FlowHnd flowHandle;
296 uint8_t isAllocated;
297 Cppi_Result cppiResult;
299 if (resType == Fm_res_CpdmaRxCh) {
300 memset((void *) &rxChCfg, 0, sizeof(rxChCfg));
301 rxChCfg.channelNum = chNum;
303 if (chHandle = Cppi_rxChannelOpen(cppiHandle, &rxChCfg, &isAllocated)) {
304 if(cppiResult = Cppi_channelDisable(chHandle) != CPPI_SOK) {
305 Fault_Mgmt_osalLog("Failed to disable cppi DMA %d rx ch %d with err %d\n", dmaNum, chNum, cppiResult);
306 }
307 if(cppiResult = Cppi_channelClose(chHandle) != CPPI_SOK) {
308 Fault_Mgmt_osalLog("Failed to close cppi DMA %d rx ch %d with err %d\n", dmaNum, chNum, cppiResult);
309 }
310 }
311 else {
312 Fault_Mgmt_osalLog("DMA %d, RX channel %d failed to open\n", dmaNum, chNum);
313 }
314 }
315 else if (resType == Fm_res_CpdmaTxCh) {
316 memset((void *) &txChCfg, 0, sizeof(txChCfg));
317 txChCfg.channelNum = chNum;
319 if (chHandle = Cppi_txChannelOpen(cppiHandle, &txChCfg, &isAllocated)) {
320 if(cppiResult = Cppi_channelDisable(chHandle) != CPPI_SOK) {
321 Fault_Mgmt_osalLog("Failed to disable cppi DMA %d tx ch %d with err %d\n", dmaNum, chNum, cppiResult);
322 }
323 if(cppiResult = Cppi_channelClose(chHandle) != CPPI_SOK) {
324 Fault_Mgmt_osalLog("Failed to close cppiDMA %d tx ch %d with err %d\n", dmaNum, chNum, cppiResult);
325 }
326 }
327 else {
328 Fault_Mgmt_osalLog("DMA %d, TX channel %d failed to open\n", dmaNum, chNum);
329 }
330 }
331 else if (resType == Fm_res_CpdmaRxFlow) {
332 memset((void *) &rxFlowCfg, 0, sizeof(rxFlowCfg));
333 rxFlowCfg.flowIdNum = chNum;
335 if (flowHandle = Cppi_configureRxFlow(cppiHandle, &rxFlowCfg, &isAllocated)) {
336 if(cppiResult = Cppi_closeRxFlow(flowHandle) != CPPI_SOK) {
337 Fault_Mgmt_osalLog("Failed to disable cppi DMA %d rx flow %d with err %d\n", dmaNum, chNum, cppiResult);
338 }
339 }
340 else {
341 Fault_Mgmt_osalLog("DMA %d, RX flow %d failed to open\n", dmaNum, chNum);
342 }
343 }
344 }
346 /* FUNCTION PURPOSE: Gets the max CPPI rx flows for a CPDMA
347 ***********************************************************************
348 * DESCRIPTION: Returns the maximum number of rx flows for the
349 * given CPDMA
350 *
351 * CPPI API hardcoded here until it can be added to CPPI LLD
352 */
353 static uint32_t getDmaMaxRxFlow(Cppi_CpDma dmaNum)
354 {
355 uint32_t maxRxFlow;
357 maxRxFlow = cppiGblCfgParams[dmaNum].maxRxFlow;
358 return (uint32_t) maxRxFlow;
359 }
361 /* FUNCTION PURPOSE: Enables a CPPI tx channel
362 ***********************************************************************
363 * DESCRIPTION: Directly accesses the CPPI registers to Enable
364 * the specified transmit channel
365 *
366 * CPPI API hardcoded here until it can be added to CPPI LLD
367 */
368 static Cppi_Result txChannelExpressEnable (Cppi_CpDma dmaNum, uint32_t channelNum)
369 {
370 uint32_t value = 0;
371 Cppi_Result retVal = CPPI_SOK;
373 if (channelNum > cppiGblCfgParams[dmaNum].maxTxCh) {
374 retVal = FM_ERROR_CPPI_TX_CHANNEL_INVALID;
375 goto exitCs;
376 }
378 CSL_FINS (value, CPPIDMA_TX_CHANNEL_CONFIG_TX_CHANNEL_GLOBAL_CONFIG_REG_A_TX_ENABLE, (uint32_t) 1);
379 cppiGblCfgParams[dmaNum].txChRegs->TX_CHANNEL_GLOBAL_CONFIG[channelNum].TX_CHANNEL_GLOBAL_CONFIG_REG_A = value;
381 exitCs:
382 return retVal;
383 }
385 /* FUNCTION PURPOSE: Disables a CPPI tx channel
386 ***********************************************************************
387 * DESCRIPTION: Directly accesses the CPPI registers to disable
388 * the specified transmit channel
389 *
390 * CPPI API hardcoded here until it can be added to CPPI LLD
391 */
392 static Cppi_Result txChannelExpressDisable (Cppi_CpDma dmaNum, uint32_t channelNum)
393 {
394 Cppi_Result retVal = CPPI_SOK;
396 if (channelNum > cppiGblCfgParams[dmaNum].maxTxCh) {
397 retVal = FM_ERROR_CPPI_TX_CHANNEL_INVALID;
398 goto exitCs;
399 }
401 cppiGblCfgParams[dmaNum].txChRegs->TX_CHANNEL_GLOBAL_CONFIG[channelNum].TX_CHANNEL_GLOBAL_CONFIG_REG_A = 0;
403 exitCs:
404 return retVal;
405 }
407 /* FUNCTION PURPOSE: Disable TX DMAs used by Linux
408 ***********************************************************************
409 * DESCRIPTION: Disables the PASS and QMSS TX DMAs that are owned and
410 * operated by Linux.
411 */
412 static void linuxTxDmaDisable(Fm_ExcludedResource *excludedResList, uint32_t listSize)
413 {
414 Fm_ExclusionParams exclusionParams;
415 int32_t i;
417 /* Disable all PASS & QMSS TX DMAs owned by Linux */
418 memset(&exclusionParams, 0, sizeof(exclusionParams));
419 exclusionParams.exclusionList = excludedResList;
420 exclusionParams.numListEntries = listSize;
422 exclusionParams.resType = Fm_res_CpdmaTxCh;
423 exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_PASS_CPDMA;
424 for (i = 0; i < fmGetDmaMaxTxCh(Cppi_CpDma_PASS_CPDMA); i++) {
425 exclusionParams.resourceNum = i;
426 if (fmExclusionIsExcluded(&exclusionParams)) {
427 txChannelExpressDisable(Cppi_CpDma_PASS_CPDMA, i);
428 }
429 }
430 exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_QMSS_CPDMA;
431 for (i = 0; i < fmGetDmaMaxTxCh(Cppi_CpDma_QMSS_CPDMA); i++) {
432 exclusionParams.resourceNum = i;
433 if (fmExclusionIsExcluded(&exclusionParams)) {
434 txChannelExpressDisable(Cppi_CpDma_QMSS_CPDMA, i);
435 }
436 }
437 }
439 /* FUNCTION PURPOSE: Enable TX DMAs used by Linux
440 ***********************************************************************
441 * DESCRIPTION: Enables the PASS and QMSS TX DMAs that are owned and
442 * operated by Linux.
443 */
444 static void linuxTxDmaEnable(Fm_ExcludedResource *excludedResList, uint32_t listSize)
445 {
446 Fm_ExclusionParams exclusionParams;
447 int32_t i;
449 memset(&exclusionParams, 0, sizeof(exclusionParams));
450 exclusionParams.exclusionList = excludedResList;
451 exclusionParams.numListEntries = listSize;
453 /* Enable the Linux TX CPDMAs */
454 exclusionParams.resType = Fm_res_CpdmaTxCh;
455 exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_PASS_CPDMA;
456 for (i = 0; i < fmGetDmaMaxTxCh(Cppi_CpDma_PASS_CPDMA); i++) {
457 exclusionParams.resourceNum = i;
458 if (fmExclusionIsExcluded(&exclusionParams)) {
459 txChannelExpressEnable(Cppi_CpDma_PASS_CPDMA, i);
460 }
461 }
462 exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_QMSS_CPDMA;
463 for (i = 0; i < fmGetDmaMaxTxCh(Cppi_CpDma_QMSS_CPDMA); i++) {
464 exclusionParams.resourceNum = i;
465 if (fmExclusionIsExcluded(&exclusionParams)) {
466 txChannelExpressEnable(Cppi_CpDma_QMSS_CPDMA, i);
467 }
468 }
469 }
471 /* FUNCTION PURPOSE: Disables a QMSS accumulator channel
472 ***********************************************************************
473 * DESCRIPTION: Disables a QMSS accumulator channel the same as the
474 * Qmss_disableAccumulator API except a timeout is added
475 * when waiting for response from the PDSP firmware.
476 *
477 * QMSS API hardcoded here until it can be added to QMSS LLD
478 */
479 static Qmss_Result disableAccumChWithTimeout(Qmss_PdspId pdspId, uint8_t channel)
480 {
481 Qmss_AccCmd cmd;
482 volatile uint32_t *cmdPtr, *reg;
483 uint32_t index;
484 uint8_t result;
485 void *key;
486 uint32_t gotResponse = FM_FALSE;
487 uint32_t timeoutCnt;
489 /* Begin Critical Section before accessing shared resources. */
490 key = Qmss_osalCsEnter ();
492 while(!gotResponse) {
493 memset ((void *) &cmd, 0, sizeof (Qmss_AccCmd));
494 CSL_FINSR (cmd.word0, 7, 0, channel);
495 CSL_FINSR (cmd.word0, 15, 8, Qmss_AccCmd_DISABLE_CHANNEL);
497 /* Point to the accumulator command register's last word */
498 reg = (uint32_t *) ((uint8_t *) qmssGblCfgParams.qmPdspCmdReg[pdspId] + 4 * 4);
500 /* Write command word last */
501 cmdPtr = ((uint32_t *) &cmd) + 4;
503 for (index = 0; index < 5; index++)
504 *reg-- = *cmdPtr--;
506 /* Wait for the command to clear */
507 reg++;
508 timeoutCnt = 0;
509 do
510 {
511 result = CSL_FEXTR (*reg, 15, 8);
513 if (result != 0) {
514 gotResponse = FM_TRUE;
515 }
516 else {
517 cycleDelay(1000, FM_FALSE);
518 timeoutCnt += 1000;
519 if (timeoutCnt >= 1000000000) {
520 /* Resend the command */
521 break;
522 }
523 }
524 } while (result != 0);
525 }
527 /* End Critical Section */
528 Qmss_osalCsExit (key);
530 return (Qmss_Result) (CSL_FEXTR (*reg, 31, 24));
531 }
534 /* FUNCTION PURPOSE: Resets a QMSS memory region
535 ***********************************************************************
536 * DESCRIPTION: Directly accesses the QMSS descriptor registers
537 * to reset a specified memory region
538 *
539 * QMSS API hardcoded here until it can be added to QMSS LLD
540 */
541 static Qmss_Result expressResetMemoryRegion (Qmss_MemRegion memRegion)
542 {
543 int32_t index = (int32_t) memRegion;
545 if (memRegion == Qmss_MemRegion_MEMORY_REGION_NOT_SPECIFIED)
546 return QMSS_MEMREGION_INVALID_INDEX;
548 qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_BASE_ADDRESS_REG = (uint32_t)0;
549 qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_START_INDEX_REG = (uint32_t)0;
550 qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_DESCRIPTOR_SETUP_REG = 0;
552 return QMSS_SOK;
553 }
555 /* FUNCTION PURPOSE: Gets a Memory Region Base Address
556 ***********************************************************************
557 * DESCRIPTION: Directly accesses the QMSS descriptor registers
558 * to get the region base address. A return of NULL
559 * means memory region is not in use. The physical
560 * base address can not be NULL if it is in use.
561 *
562 * QMSS API hardcoded here until it can be added to QMSS LLD
563 */
564 static uint32_t getMemoryRegionBaseAddr (Qmss_MemRegion memRegion)
565 {
566 int32_t index = (int32_t) memRegion;
568 return (qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_BASE_ADDRESS_REG);
569 }
571 /* FUNCTION PURPOSE: Gets a Memory Region Descriptor Block Size
572 ***********************************************************************
573 * DESCRIPTION: Directly accesses the QMSS descriptor registers
574 * to get the descriptor block size for the region
575 *
576 * QMSS API hardcoded here until it can be added to QMSS LLD
577 */
578 static uint32_t getMemoryRegionDescBlockSize (Qmss_MemRegion memRegion)
579 {
580 int32_t index = (int32_t) memRegion;
581 uint32_t descSizeBytes = 0;
582 uint32_t powRegSize = 0;
583 uint32_t numDesc = 0;
585 descSizeBytes = (uint32_t) CSL_FEXT (qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_DESCRIPTOR_SETUP_REG,
586 QM_DESCRIPTOR_REGION_CONFIG_MEMORY_REGION_DESCRIPTOR_SETUP_REG_DESC_SIZE);
587 /* Value stored as multiplier minus 1 that needs to be applied to 16 to get descriptor size */
588 descSizeBytes = (descSizeBytes + 1) * 16;
590 powRegSize = (uint32_t) CSL_FEXT (qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_DESCRIPTOR_SETUP_REG,
591 QM_DESCRIPTOR_REGION_CONFIG_MEMORY_REGION_DESCRIPTOR_SETUP_REG_REG_SIZE);
592 /* Value stored as 2^(5+stored_value) = number of descriptors */
593 numDesc = (32UL << powRegSize);
595 return (numDesc * descSizeBytes);
596 }
598 /* FUNCTION PURPOSE: Cleans QMSS queues
599 ***********************************************************************
600 * DESCRIPTION: QMSS queues cleaned. The steps taken to clean the
601 * queues differs based on whether the queue is part of
602 * the provided exclusion list. Queues not in the
603 * exclusion list will be emptied of all descriptors
604 * Queues in the exclusion list will be cleaned
605 * using the following process:
606 *
607 * Queue cleanup process
608 * - Disable all PASS DMA TX channels that are owned by Linux
609 * - Pause QoS (if supported - not currently supported)
610 * - Wipe all QMSS queues that are DSP owned
611 * - Read QMSS memory region registers for Linux inserted regions to get addresses associated
612 * with Linux pushed descriptors
613 * - For each queue that may be used by Linux
614 * - Allocate a scratch queue and a cleanup queue from the list of wiped QMSS queues
615 * - Divert all descriptors in Linux owned queue to scratch queue
616 * - Pop descriptors off scratch queue
617 * - Discard if descriptor not in Linux memory region
618 * - Push onto cleanup queue if descript in Linux memory region
619 * - Divert descriptors in cleanup queue back to original queue
620 * - Enable PASS DMA TX channels
621 */
622 static int32_t cleanQmssQueues(Fm_ExcludedResource *excludedResList, uint32_t listSize)
623 {
624 Fm_ExclusionParams exclusionParams;
625 uint8_t isAllocated;
626 int32_t i, j;
627 Qmss_QueueHnd scratchQ = NULL;
628 Qmss_QueueHnd cleanQ = NULL;
629 uint32_t desc;
630 uint32_t memRegStart;
631 uint32_t memRegEnd;
632 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
634 /* Stop flow of descriptors while cleaning QM */
635 linuxTxDmaDisable(excludedResList, listSize);
637 /* Cleanup QMSS queues */
639 memset(&exclusionParams, 0, sizeof(exclusionParams));
640 exclusionParams.exclusionList = excludedResList;
641 exclusionParams.numListEntries = listSize;
643 /* Empty all queues not owned by Linux first */
644 exclusionParams.resType = Fm_res_QmssQueue;
645 for (i = 0; i < QMSS_MAX_QUEUES; i++) {
646 exclusionParams.resourceNum = i;
647 if (!fmExclusionIsExcluded(&exclusionParams)) {
648 Qmss_queueEmpty((Qmss_QueueHnd) i);
649 }
650 }
652 /* Clean all Linux-owned queues of DSP-based descriptors second */
654 /* Allocate a scratchQ for temporarily storing the descriptor contents of a queue being swept of
655 * DSP descriptors */
656 memset(&invalidQs[0], 0, sizeof(invalidQs));
657 i = 0;
658 while (scratchQ == NULL) {
659 scratchQ = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated);
660 exclusionParams.resourceNum = scratchQ;
661 if (fmExclusionIsExcluded(&exclusionParams)) {
662 /* Store the queues that can't be used until after both the scratchQ and cleanQ have been found. */
663 invalidQs[i++] = scratchQ;
664 scratchQ = NULL;
665 }
666 }
667 /* Allocate a cleanQ to temporarily store the linux-based descriptors filtered from a Linux-based queue. */
668 while (cleanQ == NULL) {
669 cleanQ = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated);
670 exclusionParams.resourceNum = cleanQ;
671 if (fmExclusionIsExcluded(&exclusionParams)) {
672 /* Store the queues that can't be used until after both the scratchQ and cleanQ have been found. */
673 invalidQs[i++] = cleanQ;
674 cleanQ = NULL;
675 }
676 }
677 /* Free any invalidQs */
678 for (j = 0; j < i; j++) {
679 Qmss_queueClose(invalidQs[j]);
680 }
682 /* Need to check allocated queue against those owned by linux */
683 for (i = 0; i < QMSS_MAX_QUEUES; i++) {
684 exclusionParams.resType = Fm_res_QmssQueue;
685 exclusionParams.resourceNum = i;
686 if (fmExclusionIsExcluded(&exclusionParams)) {
687 /* In K2 will need to open twice the scratch and clean queues (one from each QM) */
688 Qmss_queueDivert(i, scratchQ, Qmss_Location_TAIL);
690 /* Pop each decriptor from the queue being swept. if the descriptor lies within
691 * the range of a Linux-owned memory region it is saved via a push onto a clean Q. Otherwise,
692 * the descriptor is dropped. */
693 while (desc = (uint32_t)Qmss_queuePop(scratchQ)) {
694 for (j = 0; j < QMSS_MAX_MEM_REGIONS; j++) {
695 if (memRegStart = getMemoryRegionBaseAddr((Qmss_MemRegion)j)) {
696 memRegEnd = memRegStart + getMemoryRegionDescBlockSize((Qmss_MemRegion)j);
697 if ((desc >= memRegStart) && (desc < memRegEnd)) {
698 /* Save descriptor */
699 Qmss_queuePushDesc(cleanQ, (void *)desc);
700 break;
701 }
702 }
703 }
704 }
705 /* Move Linux-based descriptors in the cleanQ back into the original queue. The descriptors are
706 * diverted to the HEAD in case any descriptors were pushed into the original queue during the
707 * sweep process. Pushing to HEAD will guarantee the original descriptors are handled prior to the
708 * new ones */
709 Qmss_queueDivert(cleanQ, i, Qmss_Location_HEAD);
710 }
711 }
713 /* Restart flow of descriptors */
714 linuxTxDmaEnable(excludedResList, listSize);
716 /* Leave DMA handles hanging since it can't be closed without closing all the channels. Channels
717 * can't be closed without wiping Linux configuration */
718 return (retVal);
719 }
721 /* FUNCTION PURPOSE: Resets PA PDSPs and LUTs
722 ***********************************************************************
723 * DESCRIPTION: Resets PA PDSPs and LUTs based on the provided
724 * exclusion list.
725 *
726 * TAKEN FROM pa.c (Pa_resetControl). Replace this code
727 * with call to modified Pa_resetControl when it doesn't
728 * automatically reset all PDSPs. Until then use this
729 * modified function
730 *
731 * To reset portions of PA used by DSP
732 * - Disable DSP owned PDSPs
733 * - Clear LUT2 if completely owned by DSP
734 * - Redownload DSP owned PDSPs
735 * - Reenable DSP owned PDSPs
736 */
737 static paReturn_t resetPaPdspsAndLuts(Pa_Handle paHandle, uint32_t numPdsps,
738 Fm_ExcludedResource *excludedResList, uint32_t listSize)
739 {
740 CSL_Pa_ssRegs *passRegs;
741 Fm_ExclusionParams exclusionParams;
742 uint32_t i;
743 uint32_t resetLut2 = FM_TRUE;
744 paReturn_t paRet = pa_OK;
746 if (listSize) {
747 /* A present exclusion list signifies another core (typically ARM Linux) is in
748 * control of PA. As a result, the firmware download and reset must be selective */
750 /* Initialize the exclusion parameters */
751 memset(&exclusionParams, 0, sizeof(exclusionParams));
752 exclusionParams.exclusionList = excludedResList;
753 exclusionParams.numListEntries = listSize;
755 passRegs = (CSL_Pa_ssRegs *)CSL_PA_SS_CFG_REGS;
757 /* Put each of the PDSPs into reset (PC = 0)*/
758 exclusionParams.resType = Fm_res_PaPdsp;
759 for (i = 0; i < numPdsps; i++) {
760 exclusionParams.resourceNum = i;
761 if (!fmExclusionIsExcluded(&exclusionParams)) {
762 passRegs->PDSP_CTLSTAT[i].PDSP_CONTROL = 0;
763 }
764 }
766 /* Reset LUT2 if applicable */
767 for (i = 0; i < listSize; i++) {
768 if ((excludedResList[i].resType == Fm_res_PaLutEntry) &&
769 (excludedResList[i].exResInfo == 2)) {
770 resetLut2 = FM_FALSE;
771 break;
772 }
773 }
775 if (resetLut2) {
776 passRegs->LUT2.LUT2_SOFT_RESET = 1;
777 }
779 exclusionParams.resType = Fm_res_PaPdsp;
780 /* PDPSs 0-2 use image c1 */
781 for (i = 0; i < 3; i++) {
782 exclusionParams.resourceNum = i;
783 if (!fmExclusionIsExcluded(&exclusionParams)) {
784 paRet = Pa_downloadImage (paHandle, i, (Ptr)c1, c1Size);
785 if (paRet != pa_OK) {
786 goto errorExit;
787 }
788 }
789 }
790 /* PDSP 3 uses image c2 */
791 exclusionParams.resourceNum = 3;
792 if (!fmExclusionIsExcluded(&exclusionParams)) {
793 paRet = Pa_downloadImage (paHandle, 3, (Ptr)c2, c2Size);
794 if (paRet != pa_OK) {
795 goto errorExit;
796 }
797 }
798 /* PDSPs 4-5 use image m */
799 for (i = 4; i < numPdsps; i++) {
800 exclusionParams.resourceNum = i;
801 if (!fmExclusionIsExcluded(&exclusionParams)) {
802 paRet = Pa_downloadImage (paHandle, i, (Ptr)m, mSize);
803 if (paRet != pa_OK) {
804 goto errorExit;
805 }
806 }
807 }
809 /* Should be able to use PA's PDSP enable API since an active PDSP will not be
810 * modified */
811 paRet = Pa_resetControl(paHandle, pa_STATE_ENABLE);
812 if (paRet == pa_STATE_ENABLE) {
813 paRet = pa_OK;
814 }
815 }
816 else {
817 /* Perform full firmware download and reset since another core is not
818 * in control of PA */
820 Pa_resetControl (paHandle, pa_STATE_RESET);
822 /* PDPSs 0-2 use image c1 */
823 for (i = 0; i < 3; i++)
824 Pa_downloadImage (paHandle, i, (Ptr)c1, c1Size);
826 /* PDSP 3 uses image c2 */
827 Pa_downloadImage (paHandle, 3, (Ptr)c2, c2Size);
829 /* PDSPs 4-5 use image m */
830 for (i = 4; i < 6; i++)
831 Pa_downloadImage (paHandle, i, (Ptr)m, mSize);
833 paRet = Pa_resetControl (paHandle, pa_STATE_ENABLE);
835 if (paRet == pa_STATE_ENABLE) {
836 paRet = pa_OK;
837 }
838 }
840 errorExit:
841 return (paRet);
842 }
844 /* FUNCTION PURPOSE: Check if NetCp (PA & SA) subsystem is powered up
845 ***************************************************************************************
846 * DESCRIPTION: This function checks the power status of the NetCp (PA & SA) subsystem domains
847 */
848 static uint32_t isNetCpPoweredUp (void)
849 {
850 /* Get peripheral PSC status */
851 if ((CSL_PSC_getPowerDomainState(CSL_PSC_PD_PASS) == PSC_PDSTATE_ON) &&
852 (CSL_PSC_getModuleState (CSL_PSC_LPSC_PKTPROC) == PSC_MODSTATE_ENABLE) &&
853 (CSL_PSC_getModuleState (CSL_PSC_LPSC_CPGMAC) == PSC_MODSTATE_ENABLE) &&
854 (CSL_PSC_getModuleState (CSL_PSC_LPSC_Crypto) == PSC_MODSTATE_ENABLE)) {
855 /* On */
856 return (FM_TRUE);
857 }
858 else {
859 return (FM_FALSE);
860 }
861 }
863 /* FUNCTION PURPOSE: Resets and Initializes PA
864 ***********************************************************************
865 * DESCRIPTION: Resets and intializes PA. Certain steps
866 * will be skipped if an exclusion list
867 * is provided. This signifies another
868 * entity (typically ARM Linux) has already setup
869 * portions of PA which should not be reset
870 */
871 static Pa_Handle resetAndInitPa(uint32_t numPdsps, Fm_ExcludedResource *excludedResList, uint32_t listSize)
872 {
873 paSizeInfo_t paSize;
874 paConfig_t paCfg;
875 paReturn_t paRet;
876 int bufSizes[pa_N_BUFS];
877 int bufAligns[pa_N_BUFS];
878 void *bufBases[pa_N_BUFS];
879 Pa_Handle paHandle = NULL;
881 /* Stop flow of descriptors while resetting the PA PDSPs */
882 linuxTxDmaDisable(excludedResList, listSize);
883 /* Delay so PDSPs can finish processing any queued descriptors (commands) */
884 cycleDelay(5000, FM_FALSE);
886 /* The maximum number of handles that can exists are 32 for L2, and 64 for L3. */
887 memset(&paSize, 0, sizeof(paSizeInfo_t));
888 memset(&paCfg, 0, sizeof(paConfig_t));
889 memset(bufBases, 0, sizeof(bufBases));
890 paSize.nMaxL2 = PA_MAX_NUM_L2_HANDLES;
891 paSize.nMaxL3 = PA_MAX_NUM_L3_HANDLES;
892 paSize.nUsrStats = 0;
893 paSize.nVlnkMax = 0;
894 paRet = Pa_getBufferReq(&paSize, bufSizes, bufAligns);
896 if (paRet != pa_OK) {
897 goto errorExit;
898 }
900 /* The first buffer is used as the instance buffer */
901 if (((Uint32)paMemPaInst & (bufAligns[0] - 1)) ||
902 (sizeof(paMemPaInst) < bufSizes[0])) {
903 goto errorExit;
904 }
905 bufBases[0] = (void *)paMemPaInst;
907 /* The second buffer is the L2 table */
908 if (((Uint32)paMemL2Ram & (bufAligns[1] - 1)) ||
909 (sizeof(paMemL2Ram) < bufSizes[1])) {
910 goto errorExit;
911 }
912 bufBases[1] = (void *)paMemL2Ram;
914 /* The third buffer is the L3 table */
915 if (((Uint32)paMemL3Ram & (bufAligns[2] - 1)) ||
916 (sizeof(paMemL3Ram) < bufSizes[2])) {
917 goto errorExit;
918 }
919 bufBases[2] = (void *)paMemL3Ram;
921 paCfg.initTable = TRUE;
922 if (excludedResList) {
923 paCfg.initDefaultRoute = FALSE;
924 }
925 else {
926 paCfg.initDefaultRoute = TRUE;
927 }
928 paCfg.baseAddr = CSL_PA_SS_CFG_REGS;
929 paCfg.sizeCfg = &paSize;
931 paRet = Pa_create(&paCfg, bufBases, &paHandle);
932 if (paRet != pa_OK) {
933 goto errorExit;
934 }
936 /* Reset the portions of PA that are used by the DSP. Avoid resetting
937 * anything used by Linux */
938 paRet = resetPaPdspsAndLuts(paHandle, numPdsps, excludedResList, listSize);
939 if (paRet != pa_OK) {
940 paHandle = NULL;
941 }
943 /* Restart DMAs */
944 linuxTxDmaEnable(excludedResList, listSize);
946 errorExit:
947 return (paHandle);
948 }
950 /* FUNCTION PURPOSE: Resets the PA Global Config
951 ***********************************************************************
952 * DESCRIPTION: The PASS global configuration stored in the PA
953 * scratch memory is reset to default values
954 */
955 static Fm_Result paSetDefaultGblCfg(Pa_Handle passHandle, Qmss_QueueHnd cmdRespQ, Qmss_QueueHnd freeQ, int16_t flowId)
956 {
957 paSysConfig_t paDefGlobalCfg;
958 Cppi_Desc *monolithicDesc;
959 paCmdReply_t paReply;
960 uint8_t *descBuf;
961 uint32_t cmdLen;
962 int cmdDest;
963 uint32_t psCmd;
964 tempPaCmd *paRespCmd;
965 uint32_t paRespCmdLen;
966 paReturn_t paRet;
967 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
968 paCtrlInfo_t ctrlInfo;
969 /* Set the default values, taken from pa.h */
970 paProtocolLimit_t paDefProtocolLimit = {
971 pa_PROTOCOL_LIMIT_NUM_VLANS_DEF, /* Number of VLANs */
972 pa_PROTOCOL_LIMIT_NUM_IP_DEF, /* Number of IPs */
973 pa_PROTOCOL_LIMIT_NUM_GRE_DEF /* Number of GREs */
974 };
975 paCmdSetConfig_t paDefCmdSetCfg = {
976 64 /* Number of command sets */
977 };
978 paUsrStatsConfig_t paDefUsrStatsCfg = {
979 (pa_USR_STATS_MAX_COUNTERS - pa_USR_STATS_MAX_COUNTERS), /* Number of user stats */
980 (pa_USR_STATS_MAX_64B_COUNTERS - pa_USR_STATS_MAX_64B_COUNTERS) /* Number of 64-bit user stats */
981 };
983 paQueueDivertConfig_t paDefQueueDivertCfg = {
984 0, /* Monitoring Queue */
985 0 /* flow Id */
986 };
987 paPacketControlConfig_t paDefPktCtrlCfg = {
988 pa_PKT_CTRL_HDR_VERIFY_IP, /* ctrlBitMap */
989 0, /* rxPaddingErrStatsIndex */
990 0 /* txPaddingStatsIndex */
991 };
992 paIpReassmConfig_t paDefReassmConfig = {
993 0, /* numTrafficFlow */
994 0, /* destFlowId */
995 0 /* destQueue */
996 };
997 paIpsecNatTConfig_t paDefNatTCfg = {
998 0, /* ctrlBitMap */
999 0 /* UDP port number */
1000 };
1002 paDefGlobalCfg.pCmdSetConfig = &paDefCmdSetCfg;
1003 paDefGlobalCfg.pInIpReassmConfig = &paDefReassmConfig;
1004 paDefGlobalCfg.pOutIpReassmConfig = &paDefReassmConfig;
1005 paDefGlobalCfg.pPktControl = &paDefPktCtrlCfg;
1006 paDefGlobalCfg.pProtoLimit = &paDefProtocolLimit;
1007 paDefGlobalCfg.pQueueDivertConfig = &paDefQueueDivertCfg;
1008 paDefGlobalCfg.pUsrStatsConfig = &paDefUsrStatsCfg;
1010 memset(&paReply, 0, sizeof(paReply));
1011 paReply.dest = pa_DEST_HOST;
1012 paReply.queue = Qmss_getQIDFromHandle(cmdRespQ);
1013 paReply.flowId = flowId;
1015 /* Set system global default configuration */
1016 ctrlInfo.code = pa_CONTROL_SYS_CONFIG;
1017 ctrlInfo.params.sysCfg = paDefGlobalCfg;
1019 monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(freeQ));
1020 Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc, &descBuf, &cmdLen);
1021 cmdLen = SIZE_MONO_DESC - Cppi_getDataOffset(Cppi_DescType_MONOLITHIC, monolithicDesc);
1022 paRet = Pa_control(passHandle, &ctrlInfo, (paCmd_t)descBuf, (uint16_t *)&cmdLen, &paReply, &cmdDest);
1023 if (paRet != pa_OK) {
1024 if (paRet == pa_INSUFFICIENT_CMD_BUFFER_SIZE) {
1025 retVal = FM_ERROR_DESC_BUF_TOO_SMALL;
1026 }
1027 else {
1028 retVal = FM_ERROR_PASS_SETTING_DEF_GLBL_CMD;
1029 }
1030 goto cleanupQueue;
1031 }
1033 psCmd = PASAHO_PACFG_CMD;
1034 Cppi_setPSData (Cppi_DescType_MONOLITHIC, monolithicDesc, (uint8_t *)&psCmd, 4);
1035 Cppi_setPacketLen(Cppi_DescType_MONOLITHIC, monolithicDesc, cmdLen);
1036 Qmss_queuePushDescSize(paTxQs[cmdDest - pa_CMD_TX_DEST_0], (uint32_t *)monolithicDesc, SIZE_MONO_DESC);
1038 /* Wait for response from PA */
1039 while (Qmss_getQueueEntryCount(cmdRespQ) == 0){};
1040 monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(cmdRespQ));
1041 Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc,(uint8_t **)&paRespCmd, &paRespCmdLen);
1043 if (paRespCmd->commandResult) {
1044 retVal = FM_ERROR_PASS_GBL_DEF_CFG_NOT_SET;
1045 goto cleanupQueue;
1046 }
1047 Qmss_queuePushDescSize(freeQ, monolithicDesc, SIZE_MONO_DESC);
1049 /* Set nat-t global default configuration */
1050 ctrlInfo.code = pa_CONTROL_IPSEC_NAT_T_CONFIG;
1051 ctrlInfo.params.ipsecNatTDetCfg = paDefNatTCfg;
1052 ctrlInfo.params.ipsecNatTDetCfg.ctrlBitMap = 0;
1054 monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(freeQ));
1055 Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc, &descBuf, &cmdLen);
1056 cmdLen = SIZE_MONO_DESC - Cppi_getDataOffset(Cppi_DescType_MONOLITHIC, monolithicDesc);
1057 paRet = Pa_control(passHandle, &ctrlInfo, (paCmd_t)descBuf, (uint16_t *)&cmdLen, &paReply, &cmdDest);
1058 if (paRet != pa_OK) {
1059 if (paRet == pa_INSUFFICIENT_CMD_BUFFER_SIZE) {
1060 retVal = FM_ERROR_DESC_BUF_TOO_SMALL;
1061 }
1062 else {
1063 retVal = FM_ERROR_PASS_SETTING_DEF_NATT_CMD;
1064 }
1065 goto cleanupQueue;
1066 }
1068 psCmd = PASAHO_PACFG_CMD;
1069 Cppi_setPSData (Cppi_DescType_MONOLITHIC, monolithicDesc, (uint8_t *)&psCmd, 4);
1070 Cppi_setPacketLen(Cppi_DescType_MONOLITHIC, monolithicDesc, cmdLen);
1071 Qmss_queuePushDescSize(paTxQs[cmdDest - pa_CMD_TX_DEST_0], (uint32_t *)monolithicDesc, SIZE_MONO_DESC);
1073 /* Wait for response from PA */
1074 while (Qmss_getQueueEntryCount(cmdRespQ) == 0){};
1075 monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(cmdRespQ));
1076 Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc,(uint8_t **)&paRespCmd, &paRespCmdLen);
1078 if (paRespCmd->commandResult) {
1079 retVal = FM_ERROR_PASS_NATT_DEF_CFG_NOT_SET;
1080 }
1082 cleanupQueue:
1083 Qmss_queuePushDescSize(freeQ, monolithicDesc, SIZE_MONO_DESC);
1085 return (retVal);
1086 }
1088 /* FUNCTION PURPOSE: Powers down a peripheral
1089 ***********************************************************************
1090 * DESCRIPTION: Powers down the peripheral for a given power
1091 * domain number if the peripheral is currently on.
1092 */
1093 static void periphPowerDown(uint32_t pwrDmnNum)
1094 {
1095 if ((CSL_PSC_getPowerDomainState(pwrDmnNum) == PSC_PDSTATE_ON)) {
1096 /* Peripheral is ON */
1098 /* Power OFF */
1100 //Wait for any previous transitions to complete
1101 while (!CSL_PSC_isStateTransitionDone (pwrDmnNum));
1102 //Write Switch input into the corresponding PDCTL register
1103 CSL_PSC_disablePowerDomain (pwrDmnNum);
1104 //Write PTCMD to start the transition
1105 CSL_PSC_startStateTransition (pwrDmnNum);
1106 //Wait for the transition to complete
1107 while (!CSL_PSC_isStateTransitionDone (pwrDmnNum));
1108 }
1109 }
1111 /**********************************************************************
1112 **************************** Cleanup APIs ****************************
1113 **********************************************************************/
1115 /* FUNCTION PURPOSE: Cleanup peripheral init code
1116 ***********************************************************************
1117 * DESCRIPTION: Initializes some peripherals via their LLDs so that
1118 * API calls used to reset their peripheral resources
1119 * will succeed. QMSS init, CPPI init, etc.
1120 */
1121 Fm_Result fmCleanupInit(uint32_t fullInit)
1122 {
1123 Qmss_InitCfg qmssInitCfg;
1124 Qmss_Result qmssResult;
1125 uint32_t heapSize;
1126 Cppi_InitCfg cppiInitCfg;
1127 Cppi_Result cppiResult;
1128 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
1130 Fault_Mgmt_osalLog("Fault Cleanup: LLD Initialization\n");
1131 /* Writeback status so that Host can view it */
1132 fmCleanupStatus[0] = FM_STATUS_CLEANUP_INITIALIZATION;
1133 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1135 /* Init TSCL */
1136 cycleDelay(0, FM_TRUE);
1138 /* Init QMSS */
1139 memset ((void *) &qmssInitCfg, 0, sizeof(qmssInitCfg));
1140 if (fullInit) {
1141 /* Set up the linking RAM. Use internal Linking RAM. */
1142 qmssInitCfg.linkingRAM0Base = 0;
1143 qmssInitCfg.linkingRAM0Size = 0;
1144 qmssInitCfg.linkingRAM1Base = 0x0;
1145 qmssInitCfg.maxDescNum = 0x3fff;
1146 }
1147 else {
1148 qmssInitCfg.qmssHwStatus = QMSS_HW_INIT_COMPLETE;
1149 }
1150 qmssInitCfg.maxDescNum = NUM_MONO_DESC;
1151 qmssResult = Qmss_init(&qmssInitCfg, &qmssGblCfgParams);
1152 if (qmssResult != QMSS_SOK) {
1153 Fault_Mgmt_osalLog("Fault Cleanup: Failed to Init QMSS LLD\n");
1154 retVal = FM_ERROR_QMSS_INIT_FAILED;
1155 goto errorExit;
1156 }
1158 qmssResult = Qmss_start ();
1159 if (qmssResult != QMSS_SOK) {
1160 Fault_Mgmt_osalLog("Fault Cleanup: Failed to Start QMSS LLD\n");
1161 retVal = FM_ERROR_QMSS_INIT_FAILED;
1162 goto errorExit;
1163 }
1165 /* Init CPPI */
1166 cppiResult = Cppi_getHeapReq(cppiGblCfgParams, &heapSize);
1167 cppiInitCfg.heapParams.staticHeapBase = &tempCppiHeap[0];
1168 cppiInitCfg.heapParams.staticHeapSize = heapSize;
1169 cppiInitCfg.heapParams.heapAlignPow2 = 8;
1170 cppiInitCfg.heapParams.dynamicHeapBlockSize = -1;
1172 cppiResult = Cppi_initCfg(cppiGblCfgParams, &cppiInitCfg);
1173 if (cppiResult != CPPI_SOK) {
1174 Fault_Mgmt_osalLog("Fault Cleanup: Failed to Init CPPI LLD\n");
1175 retVal = FM_ERROR_CPPI_INIT_FAILED;
1176 goto errorExit;
1177 }
1179 initComplete = FM_TRUE;
1181 errorExit:
1182 return (retVal);
1183 }
1185 /* FUNCTION PURPOSE: Resets CPPI peripheral resources
1186 ***********************************************************************
1187 * DESCRIPTION: Resets CPPI peripheral resources to their PoR
1188 * state. Resources in the exclusion list will
1189 * not be reset.
1190 */
1191 Fm_Result fmCleanCppi(Fm_ExcludedResource *excludedResList, uint32_t listSize)
1192 {
1193 Fm_ExclusionParams exclusionParams;
1194 Cppi_CpDmaInitCfg dmaCfg;
1195 Cppi_Handle cppiHandle;
1196 int32_t i, j;
1198 if (!initComplete) {
1199 return (FM_ERROR_CLEANUP_INIT_NOT_COMPLETE);
1200 }
1202 Fault_Mgmt_osalLog("Fault Cleanup: CPPI\n");
1203 /* Writeback status so that Host can view it */
1204 fmCleanupStatus[0] = FM_STATUS_CLEANUP_CPDMA;
1205 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1207 /* CPPI PoR reset process - Reset all DMA rx/tx channels and flows except
1208 * those owned by Linux */
1210 memset(&exclusionParams, 0, sizeof(exclusionParams));
1211 exclusionParams.exclusionList = excludedResList;
1212 exclusionParams.numListEntries = listSize;
1214 /* Reset CPPI channels and flows */
1215 for (i = 0; i < CPPI_MAX_CPDMA; i++) {
1216 if (fmIsWirelessPeriphPoweredOnForCpdma((Cppi_CpDma) i)) {
1217 memset ((void *) &dmaCfg, 0, sizeof(dmaCfg));
1218 dmaCfg.dmaNum = (Cppi_CpDma) i;
1220 if (cppiHandle = Cppi_open(&dmaCfg)) {
1221 exclusionParams.u.cpdmaParams.dma = dmaCfg.dmaNum;
1223 exclusionParams.resType = Fm_res_CpdmaRxCh;
1224 for (j = 0; j < fmGetDmaMaxRxCh(dmaCfg.dmaNum); j++) {
1225 exclusionParams.resourceNum = j;
1226 if (!fmExclusionIsExcluded(&exclusionParams)) {
1227 resetDmaCh(cppiHandle, i, j, Fm_res_CpdmaRxCh);
1228 }
1229 }
1231 exclusionParams.resType = Fm_res_CpdmaTxCh;
1232 for (j = 0; j < fmGetDmaMaxTxCh(dmaCfg.dmaNum); j++) {
1233 exclusionParams.resourceNum = j;
1234 if (!fmExclusionIsExcluded(&exclusionParams)) {
1235 resetDmaCh(cppiHandle, i, j, Fm_res_CpdmaTxCh);
1236 }
1237 }
1239 exclusionParams.resType = Fm_res_CpdmaRxFlow;
1240 for (j = 0; j < getDmaMaxRxFlow(dmaCfg.dmaNum); j++) {
1241 exclusionParams.resourceNum = j;
1242 if (!fmExclusionIsExcluded(&exclusionParams)) {
1243 resetDmaCh(cppiHandle, i, j, Fm_res_CpdmaRxFlow);
1244 }
1245 }
1246 }
1247 else {
1248 Fault_Mgmt_osalLog("Fault Cleanup: Failed to open CPDMA with index %d\n", i);
1249 }
1250 }
1251 }
1253 return (FM_FAULT_CLEANUP_OK);
1254 }
1256 /* FUNCTION PURPOSE: Resets QMSS accumulator peripheral resources
1257 ***********************************************************************
1258 * DESCRIPTION: Resets QMSS accumulator peripheral resources to
1259 * their PoR state. Resources in the exclusion list will
1260 * not be reset.
1261 *
1262 * NOTE: This API should be called before queues
1263 * are cleaned.
1264 */
1265 Fm_Result fmCleanQmssAccum(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList,
1266 uint32_t listSize)
1267 {
1268 Qmss_Result qmssResult;
1269 int32_t i;
1270 Fm_ExclusionParams exclusionParams;
1271 Qmss_IntdInterruptType intdType;
1273 if (!initComplete) {
1274 return (FM_ERROR_CLEANUP_INIT_NOT_COMPLETE);
1275 }
1277 Fault_Mgmt_osalLog("Fault Cleanup: QMSS Accumulator\n");
1278 /* Writeback status so that Host can view it */
1279 fmCleanupStatus[0] = FM_STATUS_CLEANUP_QMSS_ACCUM;
1280 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1282 /* QMSS cleanup - Disable all the accumulator channels except those owned by Linux */
1283 memset(&exclusionParams, 0, sizeof(exclusionParams));
1284 exclusionParams.exclusionList = excludedResList;
1285 exclusionParams.numListEntries = listSize;
1287 if (listSize) {
1288 /* Only clean up accumulator channels if another processor is maintaining the
1289 * QM PDSPs. Another processor is in play if the exclusion list is populated */
1290 exclusionParams.resType = Fm_res_QmssAccumCh;
1291 for (i = 0; i < fmGblCfgParams->maxQmssAccumCh; i++) {
1292 exclusionParams.resourceNum = i;
1293 if (!fmExclusionIsExcluded(&exclusionParams)) {
1294 /* Clear channel's pending interrupts */
1295 if ((i >= fmGblCfgParams->highAccum.start) && (i <= fmGblCfgParams->highAccum.end)) {
1296 intdType = Qmss_IntdInterruptType_HIGH;
1297 }
1298 else if ((i >= fmGblCfgParams->loAccum.start) && (i <= fmGblCfgParams->loAccum.end)) {
1299 intdType = Qmss_IntdInterruptType_LOW;
1300 }
1301 else {
1302 return (FM_ERROR_INVALID_ACCUM_CH);
1303 }
1305 while (qmssGblCfgParams.qmQueIntdReg->INTCNT_REG[i]) {
1306 Qmss_ackInterrupt(i, 1);
1307 Qmss_setEoiVector(intdType, i);
1308 }
1310 qmssResult = disableAccumChWithTimeout(Qmss_PdspId_PDSP1, i);
1311 if (qmssResult < 0) {
1312 Fault_Mgmt_osalLog("Failed to disable PDSP1 accum ch %d with err %d\n", i, qmssResult);
1313 }
1314 }
1315 }
1316 }
1318 return (FM_FAULT_CLEANUP_OK);
1319 }
1321 /* FUNCTION PURPOSE: Resets QMSS peripheral queue resources
1322 ***********************************************************************
1323 * DESCRIPTION: Resets QMSS peripheral queue resources to their PoR
1324 * state. Resources in the exclusion list will
1325 * not be reset.
1326 *
1327 * NOTE: This API should be called after accumulator
1328 * channels are disabled.
1329 */
1330 Fm_Result fmCleanQmssQueue(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList,
1331 uint32_t listSize)
1332 {
1333 int32_t i;
1334 Fm_ExclusionParams exclusionParams;
1336 if (!initComplete) {
1337 return (FM_ERROR_CLEANUP_INIT_NOT_COMPLETE);
1338 }
1340 Fault_Mgmt_osalLog("Fault Cleanup: QMSS Queues\n");
1341 /* Writeback status so that Host can view it */
1342 fmCleanupStatus[0] = FM_STATUS_CLEANUP_QMSS_QUEUE;
1343 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1345 /* QMSS cleanup - Don't touch QoS clusters (using different firmware downloaded by Linux kernel)
1346 * - Sweep QMSS queues of all DSP-based descriptors
1347 * - Clear all memory region registers not inserted by Linux */
1348 memset(&exclusionParams, 0, sizeof(exclusionParams));
1349 exclusionParams.exclusionList = excludedResList;
1350 exclusionParams.numListEntries = listSize;
1352 /* Cleanup the QMSS queues of DSP-based descriptors */
1353 cleanQmssQueues(excludedResList, listSize);
1355 exclusionParams.resType = Fm_res_QmssMemRegion;
1356 for (i = 0; i < QMSS_MAX_MEM_REGIONS; i++) {
1357 exclusionParams.resourceNum = i;
1358 if (!fmExclusionIsExcluded(&exclusionParams)) {
1359 expressResetMemoryRegion((Qmss_MemRegion)i);
1360 }
1361 }
1363 return (FM_FAULT_CLEANUP_OK);
1364 }
1366 /* FUNCTION PURPOSE: Resets PA peripheral resources
1367 ***********************************************************************
1368 * DESCRIPTION: Resets PA peripheral resources to their PoR
1369 * state. Resources in the exclusion list will
1370 * not be reset.
1371 */
1372 Fm_Result fmCleanPa(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList,
1373 uint32_t listSize)
1374 {
1375 Pa_Handle passHandle;
1376 Qmss_MemRegInfo memInfo;
1377 Cppi_CpDmaInitCfg dmaCfg;
1378 Cppi_TxChInitCfg txChCfg;
1379 Cppi_RxChInitCfg rxChCfg;
1380 Cppi_RxFlowCfg rxFlowCfg;
1381 Cppi_Handle cppiHandle;
1382 Cppi_ChHnd paTxCh;
1383 Cppi_ChHnd paRxCh;
1384 Cppi_FlowHnd rxFlowHnd = NULL;
1385 Qmss_QueueHnd freeQ = NULL;
1386 Qmss_QueueHnd cmdRespQ = NULL;
1387 Cppi_DescCfg descCfg;
1388 Qmss_Queue queInfo;
1389 uint8_t isAllocated;
1390 uint32_t numAllocated;
1391 int32_t i, j;
1392 Fm_ExclusionParams exclusionParams;
1393 Cppi_Desc *monolithicDesc;
1394 paCmdReply_t paReply;
1395 tempPaCmd paDelCmd;
1396 tempPaCmd *paRespCmd;
1397 uint32_t paRespCmdLen;
1398 uint32_t psCmd;
1399 Cppi_Result cppiResult;
1400 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
1402 if (!initComplete) {
1403 retVal = FM_ERROR_CLEANUP_INIT_NOT_COMPLETE;
1404 goto errorExit;
1405 }
1407 Fault_Mgmt_osalLog("Fault Cleanup: PA\n");
1408 /* Writeback status so that Host can view it */
1409 fmCleanupStatus[0] = FM_STATUS_CLEANUP_PA;
1410 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1412 /* Only cleanup if NetCp is on */
1413 if (isNetCpPoweredUp()) {
1414 /* Init PA and reset PDSPs 1-5 */
1415 if ((passHandle = resetAndInitPa(fmGblCfgParams->maxPaPdsps, excludedResList, listSize)) == NULL) {
1416 retVal = FM_ERROR_PA_INIT_FAILED;
1417 goto errorExit;
1418 }
1420 /* Allocate QMSS and CPPI resources needed to send commands to PA */
1422 /* Initialize the exclusion parameters */
1423 memset(&exclusionParams, 0, sizeof(exclusionParams));
1424 exclusionParams.exclusionList = excludedResList;
1425 exclusionParams.numListEntries = listSize;
1427 /* Make sure descriptor can fit delete command */
1428 if ((SIZE_MONO_DESC - MONO_DESC_DATA_OFFSET) < sizeof(paDelCmd)) {
1429 retVal = FM_ERROR_DESC_BUF_TOO_SMALL;
1430 goto errorExit;
1431 }
1433 /* Setup memory region for monolithic descriptors */
1434 memset(&memInfo, 0, sizeof(memInfo));
1435 memset ((void *) monoDesc, 0, SIZE_MONO_DESC * NUM_MONO_DESC);
1436 memInfo.descBase = (uint32_t *) l2_global_address ((uint32_t) monoDesc);
1437 memInfo.descSize = SIZE_MONO_DESC;
1438 memInfo.descNum = NUM_MONO_DESC;
1439 memInfo.manageDescFlag = Qmss_ManageDesc_MANAGE_DESCRIPTOR;
1440 memInfo.startIndex = 0;
1441 /* Find a memory region not used by Linux */
1442 exclusionParams.resType = Fm_res_QmssMemRegion;
1443 for (i = 0; i < QMSS_MAX_MEM_REGIONS; i++) {
1444 exclusionParams.resourceNum = i;
1445 if (!fmExclusionIsExcluded(&exclusionParams)) {
1446 memInfo.memRegion = (Qmss_MemRegion)i;
1447 break;
1448 }
1449 }
1451 if (Qmss_insertMemoryRegion(&memInfo) < QMSS_SOK) {
1452 retVal = FM_ERROR_QMSS_INIT_FAILED_DURING_PA_RECOV;
1453 goto cleanupMemRegion;
1454 }
1456 /* Open queues required to send commands to PA:
1457 * - freeQ (GP) - contains unused descriptors
1458 * - cmdSendQ (NetCP TX) - used to send commands to PA
1459 * - cmdRespQ (GP) - used to receive command responses from PA */
1460 memset(&invalidQs[0], 0, sizeof(invalidQs));
1461 exclusionParams.resType = Fm_res_QmssQueue;
1462 i = 0;
1463 while (freeQ == NULL) {
1464 freeQ = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated);
1465 exclusionParams.resourceNum = Qmss_getQIDFromHandle(freeQ);
1466 if (fmExclusionIsExcluded(&exclusionParams)) {
1467 /* Store the queues that can't be used until after all queues have been found. */
1468 invalidQs[i++] = freeQ;
1469 freeQ = NULL;
1470 }
1471 }
1472 while (cmdRespQ == NULL) {
1473 cmdRespQ = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated);
1474 exclusionParams.resourceNum = Qmss_getQIDFromHandle(cmdRespQ);
1475 if (fmExclusionIsExcluded(&exclusionParams)) {
1476 /* Store the queues that can't be used until after all queues have been found. */
1477 invalidQs[i++] = cmdRespQ;
1478 cmdRespQ = NULL;
1479 }
1480 }
1481 /* Free any invalidQs */
1482 for (j = 0; j < i; j++) {
1483 Qmss_queueClose(invalidQs[j]);
1484 }
1486 /* Open all PASS tx queues. It's okay if already opened by Linux. Just need the interface
1487 * to PA PDSPs */
1488 for (i = 0; i < PA_MAX_NUM_CPPI_TX_CH; i++) {
1489 paTxQs[i] = Qmss_queueOpen(Qmss_QueueType_PASS_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated);
1490 }
1492 /* Setup the descriptors for freeQ */
1493 memset(&descCfg, 0, sizeof(descCfg));
1494 descCfg.memRegion = memInfo.memRegion;
1495 descCfg.descNum = NUM_MONO_DESC;
1496 descCfg.destQueueNum = Qmss_getQIDFromHandle(freeQ);
1497 descCfg.queueType = Qmss_QueueType_STARVATION_COUNTER_QUEUE;
1498 descCfg.initDesc = Cppi_InitDesc_INIT_DESCRIPTOR;
1499 descCfg.descType = Cppi_DescType_MONOLITHIC;
1500 descCfg.epibPresent = Cppi_EPIB_NO_EPIB_PRESENT;
1501 descCfg.cfg.mono.dataOffset = MONO_DESC_DATA_OFFSET;
1502 /* Descriptor should be recycled back to freeQ */
1503 queInfo = Qmss_getQueueNumber(freeQ);
1504 descCfg.returnQueue.qMgr = queInfo.qMgr;
1505 descCfg.returnQueue.qNum = queInfo.qNum;
1507 /* Initialize the descriptors and push to free Queue */
1508 if (Cppi_initDescriptor(&descCfg, &numAllocated) < CPPI_SOK) {
1509 retVal = FM_ERROR_CPPI_INIT_FAILED_DURING_PA_RECOV;
1510 goto cleanupResources;
1511 }
1512 /* Writeback changes to the monolithic descriptors */
1513 Fault_Mgmt_osalEndMemAccess(&monoDesc[0], sizeof(monoDesc));
1515 /* Open PASS DMA to send commands to and receive responses from PA */
1516 memset ((void *)&dmaCfg, 0, sizeof(dmaCfg));
1517 dmaCfg.dmaNum = Cppi_CpDma_PASS_CPDMA;
1518 cppiHandle = Cppi_open(&dmaCfg);
1520 /* Open PASS rxChs - Doesn't matter if already used by Linux */
1521 memset(&rxChCfg, 0, sizeof(rxChCfg));
1522 for (i = 0; i < fmGetDmaMaxRxCh(Cppi_CpDma_PASS_CPDMA); i++) {
1523 rxChCfg.channelNum = i;
1524 rxChCfg.rxEnable = Cppi_ChState_CHANNEL_DISABLE;
1525 paRxCh = Cppi_rxChannelOpen(cppiHandle, &rxChCfg, &isAllocated);
1526 Cppi_channelEnable(paRxCh);
1527 }
1529 /* Open all cppi tx channels to go with the queues - don't need to save handle */
1530 for (i = 0; i < PA_MAX_NUM_CPPI_TX_CH; i ++) {
1531 memset(&txChCfg, 0, sizeof(txChCfg));
1532 txChCfg.txEnable = Cppi_ChState_CHANNEL_DISABLE;
1533 txChCfg.channelNum = Qmss_getQIDFromHandle(paTxQs[i]) - QMSS_PASS_QUEUE_BASE;
1534 paTxCh = Cppi_txChannelOpenWithHwCfg(cppiHandle, &txChCfg, &isAllocated, 0);
1535 Cppi_channelEnable(paTxCh);
1536 }
1538 /* Open a PASS rxFlow */
1539 memset(&rxFlowCfg, 0, sizeof(rxFlowCfg));
1540 exclusionParams.resType = Fm_res_CpdmaRxFlow;
1541 exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_PASS_CPDMA;
1542 for (i = 0; i < getDmaMaxRxFlow(Cppi_CpDma_PASS_CPDMA); i++) {
1543 exclusionParams.resourceNum = i;
1544 if (!fmExclusionIsExcluded(&exclusionParams)) {
1545 rxFlowCfg.flowIdNum = i;
1546 break;
1547 }
1548 }
1549 rxFlowCfg.rx_ps_location = Cppi_PSLoc_PS_IN_DESC;
1550 rxFlowCfg.rx_desc_type = Cppi_DescType_MONOLITHIC;
1551 rxFlowCfg.rx_sop_offset = MONO_DESC_DATA_OFFSET;
1552 queInfo = Qmss_getQueueNumber(freeQ);
1553 rxFlowCfg.rx_fdq0_sz0_qnum = queInfo.qNum;
1554 rxFlowCfg.rx_fdq0_sz0_qmgr = queInfo.qMgr;
1555 rxFlowHnd = Cppi_configureRxFlow(cppiHandle, &rxFlowCfg, &isAllocated);
1557 if (listSize) {
1558 /* A present exclusion list signifies another core (typically ARM Linux) is in
1559 * control of PA. As a result, LUT1 entry deletion must take place and be selective. If
1560 * listSize is 0 the resetPaPdspsAndLuts function will have reset all PDSPs cleaning out
1561 * all LUT entries. */
1562 exclusionParams.resType = Fm_res_PaLutEntry;
1563 exclusionParams.u.lutParams.lutInst = 1;
1564 for (i = 0; i < fmGblCfgParams->maxLut1Entries; i++) {
1565 exclusionParams.resourceNum = i;
1566 if (!fmExclusionIsExcluded(&exclusionParams)) {
1567 memset(&paReply, 0, sizeof(paReply));
1568 paReply.dest = pa_DEST_HOST;
1569 paReply.queue = Qmss_getQIDFromHandle(cmdRespQ);
1570 paReply.flowId = rxFlowCfg.flowIdNum;
1571 pa_format_fcmd((void *) &paDelCmd, &paReply, (uint8_t) i);
1573 monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(freeQ));
1574 Cppi_setData(Cppi_DescType_MONOLITHIC, monolithicDesc, (uint8_t *)&paDelCmd, sizeof(paDelCmd));
1575 psCmd = PASAHO_PACFG_CMD;
1576 Cppi_setPSData (Cppi_DescType_MONOLITHIC, monolithicDesc, (uint8_t *)&psCmd, 4);
1577 Cppi_setPacketLen(Cppi_DescType_MONOLITHIC, monolithicDesc, sizeof(paDelCmd));
1578 /* LUT entry delete commands sent to PDSP 0 (Queue 640) */
1579 Qmss_queuePushDescSize(paTxQs[pa_CMD_TX_DEST_0], (uint32_t *)monolithicDesc, SIZE_MONO_DESC);
1581 /* Wait for response from PA */
1582 while (Qmss_getQueueEntryCount(cmdRespQ) == 0){};
1583 monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(cmdRespQ));
1584 Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc,(uint8_t **)&paRespCmd, &paRespCmdLen);
1586 if (paRespCmd->commandResult) {
1587 retVal = FM_ERROR_LUT1_INDEX_NOT_REMOVED;
1588 Qmss_queuePushDescSize(freeQ, monolithicDesc, SIZE_MONO_DESC);
1589 goto cleanupResources;
1590 }
1591 Qmss_queuePushDescSize(freeQ, monolithicDesc, SIZE_MONO_DESC);
1592 }
1593 }
1594 }
1596 if ((retVal = paSetDefaultGblCfg(passHandle, cmdRespQ, freeQ, rxFlowCfg.flowIdNum)) != FM_FAULT_CLEANUP_OK) {
1597 goto cleanupResources;
1598 }
1600 cleanupResources:
1601 if ((cppiResult = Cppi_closeRxFlow(rxFlowHnd)) != CPPI_SOK) {
1602 Fault_Mgmt_osalLog("Failed to disable PASS rx flow %d with err %d\n", rxFlowCfg.flowIdNum, cppiResult);
1603 }
1605 /* All descriptors should be in freeQ */
1606 Qmss_queueEmpty(freeQ);
1607 Qmss_queueEmpty(cmdRespQ);
1609 cleanupMemRegion:
1610 expressResetMemoryRegion(memInfo.memRegion);
1611 }
1612 errorExit:
1613 return (retVal);
1614 }
1616 /* FUNCTION PURPOSE: Cleans SA security context
1617 ***********************************************************************
1618 * DESCRIPTION: Evicts and tears down the SA security contexts
1619 * so that IPsec traffic can be reestablished
1620 */
1621 Fm_Result fmCleanSa(Fm_ExcludedResource *excludedResList, uint32_t listSize)
1622 {
1623 CSL_Cp_aceRegs *pSaRegs = (CSL_Cp_aceRegs *)CSL_PA_SS_CFG_CP_ACE_CFG_REGS;;
1624 int i;
1625 uint32_t ctxCachCtrl;
1626 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
1628 Fault_Mgmt_osalLog("Fault Cleanup: SA\n");
1629 /* Writeback status so that Host can view it */
1630 fmCleanupStatus[0] = FM_STATUS_CLEANUP_SA;
1631 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1633 /* Only cleanup if context cache is enabled */
1634 if ((pSaRegs->MMR.CMD_STATUS & CSL_CP_ACE_CMD_STATUS_CTXCACH_EN_MASK)) {
1636 /* Stop flow of descriptors while resetting SA */
1637 linuxTxDmaDisable(excludedResList, listSize);
1640 /* Clear the security context cache - Allows IPsec tunnels to be recreated
1641 * from scratch after cleanup */
1642 ctxCachCtrl = pSaRegs->MMR.CTXCACH_CTRL;
1643 ctxCachCtrl |= CSL_CP_ACE_CTXCACH_CTRL_CLR_CACHE_TABLE_MASK;
1644 pSaRegs->MMR.CTXCACH_CTRL = ctxCachCtrl;
1646 /* Wait for bit to clear for completion */
1647 do {
1648 for (i = 0; i < 100; i++) {
1649 asm (" nop ");
1650 }
1651 } while (pSaRegs->MMR.CTXCACH_CTRL & CSL_CP_ACE_CTXCACH_CTRL_CLR_CACHE_TABLE_MASK);
1653 /* Restart DMAs */
1654 linuxTxDmaEnable(excludedResList, listSize);
1655 }
1657 return (retVal);
1658 }
1660 /* FUNCTION PURPOSE: Resets Semaphore peripheral resources
1661 ***********************************************************************
1662 * DESCRIPTION: Resets Semaphore peripheral resources to their PoR
1663 * state. Resources in the exclusion list will
1664 * not be reset.
1665 */
1666 Fm_Result fmCleanSemaphores(Fm_ExcludedResource *excludedResList, uint32_t listSize)
1667 {
1668 Fault_Mgmt_osalLog("Fault Cleanup: Semaphore\n");
1669 /* Writeback status so that Host can view it */
1670 fmCleanupStatus[0] = FM_STATUS_CLEANUP_SEMAPHORE;
1671 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1673 /* Soft reset semaphores through the SEM_RST_RUN register */
1674 CSL_FINS(hSEM->SEM_RST_RUN, SEM_SEM_RST_RUN_RESET, 1);
1676 return (FM_FAULT_CLEANUP_OK);
1677 }
1679 /* FUNCTION PURPOSE: Resets the CICs
1680 ***********************************************************************
1681 * DESCRIPTION: Clears a CIC of all system interrupt to host interrupt
1682 * mappings except those routed to Linux
1683 */
1684 Fm_Result fmCleanCics(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList,
1685 uint32_t listSize)
1686 {
1687 int32_t i, j, k;
1688 volatile CSL_CPINTCRegs *regs;
1689 uint32_t numSysInt;
1690 uint32_t numHostInt;
1691 Fm_ExclusionParams exclusionParams;
1692 uint32_t entireCicDisable;
1693 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
1695 Fault_Mgmt_osalLog("Fault Cleanup: CIC\n");
1696 /* Writeback status so that Host can view it */
1697 fmCleanupStatus[0] = FM_STATUS_CLEANUP_CIC;
1698 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1700 memset(&exclusionParams, 0, sizeof(exclusionParams));
1701 exclusionParams.exclusionList = excludedResList;
1702 exclusionParams.numListEntries = listSize;
1703 exclusionParams.resType = Fm_res_CicHostInt;
1705 for (i = 0; i < fmGblCfgParams->maxCic; i++) {
1706 exclusionParams.u.cicParams.cic = i;
1708 /* Get CIC params */
1709 regs = fmGblCfgParams->cicParams[i].cicRegs;
1710 numSysInt = fmGblCfgParams->cicParams[i].maxNumSysInt;
1711 numHostInt = fmGblCfgParams->cicParams[i].maxNumHostInt;
1712 entireCicDisable = FM_TRUE;
1714 /* Unmap and disable all host interrupts not in exclusion list */
1715 for (j = 0; j < numHostInt; j++) {
1716 exclusionParams.resourceNum = j;
1717 if (!fmExclusionIsExcluded(&exclusionParams)) {
1718 /* Disable the host interrupt */
1719 regs->HINT_ENABLE_CLR_INDEX_REG = CSL_FMK(CPINTC_HINT_ENABLE_CLR_INDEX_REG_HINT_ENABLE_CLR_INDEX, j);
1721 for (k = 0; k < numSysInt; k++) {
1722 /* Clear system int channel map routed to host int */
1723 if (regs->CH_MAP[k] == j) {
1724 regs->CH_MAP[k] = 0;
1726 /* Disable sys int since not routed anymore */
1727 regs->ENABLE_CLR_INDEX_REG = CSL_FMK(CPINTC_ENABLE_CLR_INDEX_REG_ENABLE_CLR_INDEX, k);
1729 /* Clear any pending interrupts */
1730 regs->STATUS_CLR_INDEX_REG = CSL_FMK(CPINTC_STATUS_CLR_INDEX_REG_STATUS_CLR_INDEX, k);
1731 }
1732 }
1733 }
1734 else {
1735 /* At least one host interrupt in this CIC is excluded. Don't
1736 * perform global disable of host interrupts for this CIC */
1737 entireCicDisable = FM_FALSE;
1738 }
1739 }
1741 /* Global disable of host interrupts if none excluded */
1742 if (entireCicDisable) {
1743 regs->GLOBAL_ENABLE_HINT_REG = CSL_FMK(CPINTC_GLOBAL_ENABLE_HINT_REG_ENABLE_HINT_ANY, 0);
1744 }
1745 }
1747 return (retVal);
1748 }
1750 /* FUNCTION PURPOSE: Resets Timers
1751 ***********************************************************************
1752 * DESCRIPTION: Disables and resets all device timers except
1753 * those used by Linux
1754 */
1755 Fm_Result fmCleanTimers(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList,
1756 uint32_t listSize)
1757 {
1758 volatile CSL_TmrRegs *regs;
1759 int32_t i;
1760 Uint32 tmpReg;
1761 Fm_ExclusionParams exclusionParams;
1762 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
1764 Fault_Mgmt_osalLog("Fault Cleanup: Timers\n");
1765 /* Writeback status so that Host can view it */
1766 fmCleanupStatus[0] = FM_STATUS_CLEANUP_TIMER;
1767 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1769 memset(&exclusionParams, 0, sizeof(exclusionParams));
1770 exclusionParams.exclusionList = excludedResList;
1771 exclusionParams.numListEntries = listSize;
1772 exclusionParams.resType = Fm_res_Timer;
1774 for (i = 0; i < fmGblCfgParams->maxTimers; i++) {
1775 exclusionParams.resourceNum = i;
1776 if (!fmExclusionIsExcluded(&exclusionParams)) {
1777 regs = fmGblCfgParams->timerParams[i].timerRegs;
1779 /* Disable the LOW and HIGH Timers. */
1780 tmpReg = regs->TCR;
1781 CSL_FINST(tmpReg, TMR_TCR_ENAMODE_LO, DISABLE);
1782 CSL_FINST(tmpReg, TMR_TCR_ENAMODE_HI, DISABLE);
1783 regs->TCR = tmpReg;
1785 /* Reset after disable */
1786 tmpReg = regs->TGCR;
1787 CSL_FINST(tmpReg, TMR_TGCR_TIMLORS, RESET_ON);
1788 CSL_FINST(tmpReg, TMR_TGCR_TIMHIRS, RESET_ON);
1789 regs->TGCR = tmpReg;
1790 }
1791 }
1793 return (retVal);
1794 }
1796 /* FUNCTION PURPOSE: Resets AIF2
1797 ***********************************************************************
1798 * DESCRIPTION: Resets the AIF2 peripheral and then powers it down
1799 */
1800 Fm_Result fmCleanAif2(void)
1801 {
1802 Fault_Mgmt_osalLog("Fault Cleanup: AIF2\n");
1803 /* Writeback status so that Host can view it */
1804 fmCleanupStatus[0] = FM_STATUS_CLEANUP_AIF2;
1805 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1807 if ((CSL_PSC_getPowerDomainState(CSL_PSC_PD_AI) == PSC_PDSTATE_ON)) {
1808 /* On */
1809 memset(&locAifObj, 0, sizeof(locAifObj));
1810 AIF_resetAif(&locAifObj);
1812 #if 0 /* K2 only - Add back when K2 support is added */
1813 /* Reset SERDES separately */
1814 /* Link 0 to Link 3*/
1815 CSL_AIF2SerdesShutdown(CSL_AIF2_SERDES_B8_CFG_REGS);
1816 /* Link 4 to Link 5*/
1817 CSL_AIF2SerdesShutdown(CSL_AIF2_SERDES_B4_CFG_REGS);
1818 #endif
1819 }
1820 periphPowerDown(CSL_PSC_PD_AI);
1822 return (FM_FAULT_CLEANUP_OK);
1823 }
1825 /* FUNCTION PURPOSE: Resets TCP3D
1826 ***********************************************************************
1827 * DESCRIPTION: Powers down the TCP3D peripheral
1828 */
1829 Fm_Result fmCleanTcp3d(void)
1830 {
1831 Fault_Mgmt_osalLog("Fault Cleanup: TCP3D\n");
1832 /* Writeback status so that Host can view it */
1833 fmCleanupStatus[0] = FM_STATUS_CLEANUP_TCP3D;
1834 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1836 periphPowerDown(CSL_PSC_PD_TCP3D_A);
1837 periphPowerDown(CSL_PSC_PD_TCP3D_B);
1839 return (FM_FAULT_CLEANUP_OK);
1840 }
1842 /* FUNCTION PURPOSE: Resets BCP
1843 ***********************************************************************
1844 * DESCRIPTION: Powers down the BCP peripheral
1845 */
1846 Fm_Result fmCleanBcp(void)
1847 {
1848 Fault_Mgmt_osalLog("Fault Cleanup: BCP\n");
1849 /* Writeback status so that Host can view it */
1850 fmCleanupStatus[0] = FM_STATUS_CLEANUP_BCP;
1851 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1853 periphPowerDown(CSL_PSC_PD_BCP);
1855 return (FM_FAULT_CLEANUP_OK);
1856 }
1858 /* FUNCTION PURPOSE: Resets FFTC
1859 ***********************************************************************
1860 * DESCRIPTION: Powers down the FFTC peripheral
1861 */
1862 Fm_Result fmCleanFftc(void)
1863 {
1864 Fault_Mgmt_osalLog("Fault Cleanup: FFTC (A & B)\n");
1865 /* Writeback status so that Host can view it */
1866 fmCleanupStatus[0] = FM_STATUS_CLEANUP_FFTC;
1867 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1869 periphPowerDown(CSL_PSC_PD_FFTC_AB);
1871 return (FM_FAULT_CLEANUP_OK);
1872 }
1874 /* FUNCTION PURPOSE: Resets VCP
1875 ***********************************************************************
1876 * DESCRIPTION: Powers down the VCP peripheral
1877 */
1878 Fm_Result fmCleanVcp(void)
1879 {
1880 Fault_Mgmt_osalLog("Fault Cleanup: VCP\n");
1881 /* Writeback status so that Host can view it */
1882 fmCleanupStatus[0] = FM_STATUS_CLEANUP_VCP;
1883 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1885 periphPowerDown(CSL_PSC_PD_PD_VCP_BCD);
1887 return (FM_FAULT_CLEANUP_OK);
1888 }
1889 #endif /* !(K2H && K2K && K2L && K2E) */
1891 /* FUNCTION PURPOSE: Resets EDMA3 peripheral resources
1892 ***********************************************************************
1893 * DESCRIPTION: Resets EDMA3 peripheral resources to their PoR
1894 * state. Resources in the exclusion list will
1895 * not be reset.
1896 */
1897 Fm_Result fmCleanEdma3(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList,
1898 uint32_t listSize, uint32_t provideStatus)
1899 {
1900 CSL_Edma3Handle edmaCCModule;
1901 int32_t i, j, k;
1902 CSL_Status status;
1903 Fm_ExclusionParams exclusionParams;
1905 if (provideStatus) {
1906 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
1907 Fault_Mgmt_osalLog("Fault Cleanup: EDMA3\n");
1908 /* Writeback status so that Host can view it */
1909 fmCleanupStatus[0] = FM_STATUS_CLEANUP_EDMA3;
1910 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1911 #endif /* !(K2H && K2K && K2L && K2E) */
1912 }
1914 memset(&exclusionParams, 0, sizeof(exclusionParams));
1915 exclusionParams.exclusionList = excludedResList;
1916 exclusionParams.numListEntries = listSize;
1918 for (i = 0; i < fmGblCfgParams->maxEdma3Cc; i++) {
1919 /* Module Level Open */
1920 memset((void *)&edmaCCModule, 0, sizeof(edmaCCModule));
1921 memset((void *)&edmaObjCC, 0, sizeof(edmaObjCC));
1922 edmaCCModule = CSL_edma3Open(&edmaObjCC, i, NULL, &status);
1923 if ((edmaCCModule == NULL) || (status != CSL_SOK)) {
1924 return(FM_ERROR_EDMA3_INIT_FAILED);
1925 }
1927 exclusionParams.u.edma3Params.edma3Num = i;
1929 /* Disable CC channels */
1930 exclusionParams.resType = Fm_res_Edma3DmaCh;
1931 for (j = 0; j < edmaObjCC.cfgInfo.numDMAChannel; j++) {
1932 exclusionParams.resourceNum = j;
1933 if (!fmExclusionIsExcluded(&exclusionParams)) {
1934 CSL_edma3DMAChannelDisable(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1935 CSL_edma3ClearDMAChannelEvent(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1936 for (k = 0; k < edmaObjCC.cfgInfo.numRegions; k++) {
1937 CSL_edma3DMAChannelDisable(edmaCCModule, k, j);
1938 CSL_edma3ClearDMAChannelEvent(edmaCCModule, k, j);
1939 }
1940 CSL_edma3ClearDMAChannelSecondaryEvents(edmaCCModule, j);
1941 }
1942 }
1944 exclusionParams.resType = Fm_res_Edma3QdmaCh;
1945 for (j = 0; j < edmaObjCC.cfgInfo.numQDMAChannel; j++) {
1946 exclusionParams.resourceNum = j;
1947 if (!fmExclusionIsExcluded(&exclusionParams)) {
1948 CSL_edma3QDMAChannelDisable(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1949 CSL_edma3ClearQDMAChannelSecondaryEvents(edmaCCModule, j);
1950 }
1951 }
1953 exclusionParams.resType = Fm_res_Edma3IntCh;
1954 for (j = 0; j < edmaObjCC.cfgInfo.numINTChannel; j++) {
1955 exclusionParams.resourceNum = j;
1956 if (!fmExclusionIsExcluded(&exclusionParams)) {
1957 if (j < 32) {
1958 CSL_edma3InterruptLoDisable(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1959 CSL_edma3ClearLoPendingInterrupts (edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1960 }
1961 else {
1962 CSL_edma3InterruptHiDisable(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1963 CSL_edma3ClearHiPendingInterrupts (edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1964 }
1965 }
1966 }
1967 }
1969 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
1970 return (FM_FAULT_CLEANUP_OK);
1971 #else
1972 return (0);
1973 #endif
1974 }
1976 /* FUNCTION PURPOSE: Gets the max CPPI tx ch for a CPDMA
1977 ***********************************************************************
1978 * DESCRIPTION: Returns the maximum number of tx ch for the
1979 * given CPDMA
1980 *
1981 * CPPI API hardcoded here until it can be added to CPPI LLD
1982 */
1983 uint32_t fmGetDmaMaxTxCh(Cppi_CpDma dmaNum)
1984 {
1985 uint32_t maxTxCh;
1987 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
1988 maxTxCh = cppiGblCfgParams[dmaNum].maxTxCh;
1989 #else
1990 maxTxCh = cppiGblCfgParams.cpDmaCfgs[dmaNum].maxTxCh;
1991 #endif
1992 return (uint32_t) maxTxCh;
1993 }
1995 /* FUNCTION PURPOSE: Gets the max CPPI rx ch for a CPDMA
1996 ***********************************************************************
1997 * DESCRIPTION: Returns the maximum number of rx ch for the
1998 * given CPDMA
1999 *
2000 * CPPI API hardcoded here until it can be added to CPPI LLD
2001 */
2002 uint32_t fmGetDmaMaxRxCh(Cppi_CpDma dmaNum)
2003 {
2004 uint32_t maxRxCh;
2006 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
2007 maxRxCh = cppiGblCfgParams[dmaNum].maxRxCh;
2008 #else
2009 maxRxCh = cppiGblCfgParams.cpDmaCfgs[dmaNum].maxRxCh;
2010 #endif
2011 return (uint32_t) maxRxCh;
2012 }