1 /**
2 * @file fm_clean.c
3 *
4 * @brief
5 * Fault Management fault cleanup source
6 *
7 * \par
8 * ============================================================================
9 * @n (C) Copyright 2014, Texas Instruments, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the
21 * distribution.
22 *
23 * Neither the name of Texas Instruments Incorporated nor the names of
24 * its contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * \par
40 */
42 #include <c6x.h>
44 /* Standard Include Files. */
45 #include <string.h>
47 /* CSL Includes */
48 #include <ti/csl/csl_sem.h>
49 #include <ti/csl/csl_qm_queue.h>
50 #include <ti/csl/csl_pscAux.h>
51 #include <ti/csl/csl_chipAux.h>
52 #include <ti/csl/csl_edma3Aux.h>
54 #include <ti/csl/cslr.h>
55 #include <ti/csl/cslr_device.h>
56 #include <ti/csl/cslr_pa_ss.h>
57 #include <ti/csl/cslr_cp_ace.h>
58 #include <ti/csl/cslr_cpintc.h>
59 #include <ti/csl/cslr_tmr.h>
61 /* LLD Includes */
62 #include <ti/drv/cppi/cppi_drv.h>
63 #include <ti/drv/cppi/cppi_desc.h>
64 #include <ti/drv/qmss/qmss_drv.h>
65 #include <ti/drv/qmss/qmss_acc.h>
66 #include <ti/drv/qmss/qmss_qm.h>
67 #include <ti/drv/pa/pa.h>
68 #include <ti/drv/pa/pasahost.h>
69 #include <ti/drv/pa/fw/pafw.h>
70 #if (defined(DEVICE_K2H) && defined(DEVICE_K2K))
71 #include <ti/drv/aif2/aif2.h>
72 #endif
74 /* FM API Include */
75 #include <ti/instrumentation/fault_mgmt/fault_mgmt.h>
77 /* FM Internal Includes */
78 #include <ti/instrumentation/fault_mgmt/include/fm_loc.h>
79 #include <ti/instrumentation/fault_mgmt/include/fm_cleanloc.h>
80 #include <ti/instrumentation/fault_mgmt/include/fm_exclusionloc.h>
82 /* OSAL Includes */
83 #include <ti/instrumentation/fault_mgmt/fault_mgmt_osal.h>
85 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
87 #define NUM_MONO_DESC 32
88 #define SIZE_MONO_DESC 128
89 #define MONO_DESC_DATA_OFFSET 36
91 #define PA_INST_SIZE 128 /* Required size = 84 */
92 #define PA_MAX_NUM_L2_HANDLES 64
93 #define PA_L2_TABLE_SIZE (PA_MAX_NUM_L2_HANDLES * 32) /* Requires 32 bytes per entry */
94 #define PA_MAX_NUM_L3_HANDLES 128
95 #define PA_L3_TABLE_SIZE (PA_MAX_NUM_L3_HANDLES * 72) /* Requires 72 bytes per entry */
97 #define PA_MAX_NUM_CPPI_TX_CH 9
99 #if defined(_LITTLE_ENDIAN)
100 #define SWIZ(x) (sizeof((x)) == 1 ? (x) : (sizeof((x)) == 2 ? swiz16((x)) : (sizeof((x)) == 4 ? swiz32((x)) : 0)))
101 #else
102 #define SWIZ(x) (x)
103 #endif
105 /**********************************************************************
106 ********************** Cleanup Globals *******************************
107 **********************************************************************/
109 /* Tracks whether LLDs have been initialized during the cleanup process */
110 uint32_t initComplete = FM_FALSE;
112 #pragma DATA_ALIGN (monoDesc, 128)
113 uint8_t monoDesc[SIZE_MONO_DESC * NUM_MONO_DESC];
115 /* Array used to store Queues that cannot be used by DSP until
116 * they can be closed. Must be global to memory corruption
117 * when initializing the array since it may potentially be
118 * larger than the stack*/
119 Qmss_QueueHnd invalidQs[QMSS_MAX_GENERAL_PURPOSE_QUEUE];
121 /* PA memory */
122 #pragma DATA_ALIGN (paMemPaInst, 8)
123 uint8_t paMemPaInst[PA_INST_SIZE];
124 #pragma DATA_ALIGN (paMemL2Ram, 8)
125 uint8_t paMemL2Ram[PA_L2_TABLE_SIZE];
126 #pragma DATA_ALIGN(paMemL3Ram, 8);
127 uint8_t paMemL3Ram[PA_L3_TABLE_SIZE];
129 /* PASS Tx Queues */
130 Qmss_QueueHnd paTxQs[PA_MAX_NUM_CPPI_TX_CH];
131 #endif /* !(K2H && K2K && K2L && K2E) */
133 /* EDMA3 object - global to avoid overflowing stack */
134 CSL_Edma3Obj edmaObjCC;
136 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
137 /* AIF2 object - Global to avoid overflowing stack */
138 AIF_ConfigObj locAifObj;
140 /**********************************************************************
141 ******************** External Variables ******************************
142 **********************************************************************/
144 /* Location in memory where cleanup status is written */
145 extern int32_t fmCleanupStatus[32];
147 /* Heap needed to initialize CPPI prior to IO halt execution */
148 extern uint8_t tempCppiHeap[];
149 #endif /* !(K2H && K2K && K2L && K2E) */
151 /* CPPI Global configuration parameters */
152 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
153 extern Cppi_GlobalConfigParams cppiGblCfgParams[];
154 #else
155 extern Cppi_GlobalConfigParams cppiGblCfgParams;
156 #endif
158 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
159 /* QMSS Global configuration parameters */
160 extern Qmss_GlobalConfigParams qmssGblCfgParams;
162 /**********************************************************************
163 ********************* Local Cleanup Functions ************************
164 **********************************************************************/
166 /* START: PA API hardcoded here until it can be added to PA LLD */
167 #if defined(_LITTLE_ENDIAN)
168 /*********************************************************************
169 * FUNCTION PURPOSE: Swizzling
170 *********************************************************************
171 * DESCRIPTION: The PA sub-system requires all multi-byte fields in
172 * big endian format.
173 *********************************************************************/
174 static inline uint16_t swiz16(uint16_t x)
175 {
176 return ((x >> 8) | (x << 8));
177 }
179 static inline uint32_t swiz32 (uint32_t x)
180 {
181 return (((x) >> 24) | (((x) >> 8) & 0xff00L) | (((x) << 8) & 0xff0000L) | ((x) << 24));
182 }
183 #endif
185 /* Hardcode here until API added to PA to remove entries with minimal
186 * overhead. This define maps to PAFRM_CONFIG_COMMAND_DEL_LUT1 (in src/pafrm.h) */
187 #define temp_PAFRM_CONFIG_COMMAND_DEL_LUT1 2
189 /* Hardcode here until API added to PA to remove entries with minimal
190 * overhead. This define maps to PAFRM_CONFIG_COMMAND_SEC_BYTE (in src/pafrm.h) */
191 #define temp_PAFRM_CONFIG_COMMAND_SEC_BYTE 0xce
193 /* Hardcode here until API added to PA to remove entries with minimal
194 * overhead. This define maps to PAFRM_DEST_PKTDMA (in src/pafrm.h) */
195 #define temp_PAFRM_DEST_PKTDMA 6
196 /* Hardcode here until API added to PA to remove entries with minimal
197 * overhead. This define maps to PAFRM_DEST_DISCARD (in src/pafrm.h) */
198 #define temp_PAFRM_DEST_DISCARD 10
200 /* Commands to PA - Hardcode here until API added to PA to remove entries with minimal
201 * overhead. This structure maps to pafrmCommand_t (in src/pafrm.h) */
202 typedef struct {
203 uint32_t commandResult; /* Returned to the host, ignored on entry to the PASS */
204 uint8_t command; /* Command value */
205 uint8_t magic; /* Magic value */
206 uint16_t comId; /* Used by the host to identify command results */
207 uint32_t retContext; /* Returned in swInfo to identify packet as a command */
208 uint16_t replyQueue; /* Specifies the queue number for the message reply. 0xffff to toss the reply */
209 uint8_t replyDest; /* Reply destination (host0, host1, discard are the only valid values) */
210 uint8_t flowId; /* Flow ID used to assign packet at reply */
211 uint32_t cmd; /* First word of the command */
212 } tempPaCmd;
214 /* Delete entry from LUT1 - Hardcode here until API added to PA to remove entries with minimal
215 * overhead. This structure maps to pafrmCommandDelLut1_t (in src/pafrm.h) */
216 typedef struct {
217 uint8_t index; /* LUT1 index */
218 } tempPaCmdDelLut1;
220 /*************************************************************************
221 * FUNCTION PURPOSE: Format Firmware Command Header
222 *************************************************************************
223 * DESCRIPTION: Clear and construct the firmware command header
224 * Returns pointer to the firmware command
225 *************************************************************************/
226 static tempPaCmd *pa_format_fcmd(void *pCmd, paCmdReply_t *reply, uint8_t lutIndex)
227 {
228 tempPaCmd *fcmd = (tempPaCmd *) pCmd;
229 uint16_t csize = sizeof(tempPaCmd)+sizeof(tempPaCmdDelLut1)-sizeof(uint32_t);
230 tempPaCmdDelLut1 *del;
231 uint8_t lut = temp_PAFRM_CONFIG_COMMAND_DEL_LUT1;
233 memset(fcmd, 0, csize);
235 fcmd->command = SWIZ(lut);
236 fcmd->magic = temp_PAFRM_CONFIG_COMMAND_SEC_BYTE;
237 fcmd->comId = 0;
238 fcmd->retContext = SWIZ(reply->replyId);
239 fcmd->replyQueue = SWIZ(reply->queue);
240 fcmd->flowId = SWIZ(reply->flowId);
242 /* Validity of the destination was already checked (HOST), so no other cases
243 * must be considered */
244 if (reply->dest == pa_DEST_HOST)
245 fcmd->replyDest = temp_PAFRM_DEST_PKTDMA;
246 else
247 fcmd->replyDest = temp_PAFRM_DEST_DISCARD;
249 del = (tempPaCmdDelLut1 *)&(fcmd->cmd);
251 del->index = lutIndex;
252 del->index = SWIZ(del->index);
254 return(fcmd);
255 }
256 /* END: PA API hardcoded here until it can be added to PA LLD */
258 /* FUNCTION PURPOSE: Converts L2 addresses to global
259 ***********************************************************************
260 * DESCRIPTION: Converts local l2 addresses to their global address
261 */
262 static uint32_t l2_global_address (uint32_t addr)
263 {
264 /* Compute the global address. */
265 return (addr + (0x10000000 + (DNUM * 0x1000000)));
266 }
268 /* FUNCTION PURPOSE: Cycle Delay
269 ***********************************************************************
270 * DESCRIPTION: Delays for the specified amount of cycles
271 */
272 static void cycleDelay(int count, int initTSCL)
273 {
274 uint32_t TSCLin = TSCL;
276 if (initTSCL) {
277 CSL_chipWriteTSCL(0);
278 }
279 else {
280 if (count <= 0)
281 return;
283 while ((TSCL - TSCLin) < (uint32_t)count);
284 }
285 }
287 /* FUNCTION PURPOSE: Resets a specified CPDMA channel or flow
288 ***********************************************************************
289 * DESCRIPTION: Resets the specified CPDMA channel or flow
290 */
291 static void resetDmaCh(Cppi_Handle cppiHandle, int32_t dmaNum, int32_t chNum, Fm_ResType resType)
292 {
293 Cppi_RxChInitCfg rxChCfg;
294 Cppi_TxChInitCfg txChCfg;
295 Cppi_ChHnd chHandle;
296 Cppi_RxFlowCfg rxFlowCfg;
297 Cppi_FlowHnd flowHandle;
298 uint8_t isAllocated;
299 Cppi_Result cppiResult;
301 if (resType == Fm_res_CpdmaRxCh) {
302 memset((void *) &rxChCfg, 0, sizeof(rxChCfg));
303 rxChCfg.channelNum = chNum;
305 if (chHandle = Cppi_rxChannelOpen(cppiHandle, &rxChCfg, &isAllocated)) {
306 if(cppiResult = Cppi_channelDisable(chHandle) != CPPI_SOK) {
307 Fault_Mgmt_osalLog("Failed to disable cppi DMA %d rx ch %d with err %d\n", dmaNum, chNum, cppiResult);
308 }
309 if(cppiResult = Cppi_channelClose(chHandle) != CPPI_SOK) {
310 Fault_Mgmt_osalLog("Failed to close cppi DMA %d rx ch %d with err %d\n", dmaNum, chNum, cppiResult);
311 }
312 }
313 else {
314 Fault_Mgmt_osalLog("DMA %d, RX channel %d failed to open\n", dmaNum, chNum);
315 }
316 }
317 else if (resType == Fm_res_CpdmaTxCh) {
318 memset((void *) &txChCfg, 0, sizeof(txChCfg));
319 txChCfg.channelNum = chNum;
321 if (chHandle = Cppi_txChannelOpen(cppiHandle, &txChCfg, &isAllocated)) {
322 if(cppiResult = Cppi_channelDisable(chHandle) != CPPI_SOK) {
323 Fault_Mgmt_osalLog("Failed to disable cppi DMA %d tx ch %d with err %d\n", dmaNum, chNum, cppiResult);
324 }
325 if(cppiResult = Cppi_channelClose(chHandle) != CPPI_SOK) {
326 Fault_Mgmt_osalLog("Failed to close cppiDMA %d tx ch %d with err %d\n", dmaNum, chNum, cppiResult);
327 }
328 }
329 else {
330 Fault_Mgmt_osalLog("DMA %d, TX channel %d failed to open\n", dmaNum, chNum);
331 }
332 }
333 else if (resType == Fm_res_CpdmaRxFlow) {
334 memset((void *) &rxFlowCfg, 0, sizeof(rxFlowCfg));
335 rxFlowCfg.flowIdNum = chNum;
337 if (flowHandle = Cppi_configureRxFlow(cppiHandle, &rxFlowCfg, &isAllocated)) {
338 if(cppiResult = Cppi_closeRxFlow(flowHandle) != CPPI_SOK) {
339 Fault_Mgmt_osalLog("Failed to disable cppi DMA %d rx flow %d with err %d\n", dmaNum, chNum, cppiResult);
340 }
341 }
342 else {
343 Fault_Mgmt_osalLog("DMA %d, RX flow %d failed to open\n", dmaNum, chNum);
344 }
345 }
346 }
348 /* FUNCTION PURPOSE: Gets the max CPPI rx flows for a CPDMA
349 ***********************************************************************
350 * DESCRIPTION: Returns the maximum number of rx flows for the
351 * given CPDMA
352 *
353 * CPPI API hardcoded here until it can be added to CPPI LLD
354 */
355 static uint32_t getDmaMaxRxFlow(Cppi_CpDma dmaNum)
356 {
357 uint32_t maxRxFlow;
359 maxRxFlow = cppiGblCfgParams[dmaNum].maxRxFlow;
360 return (uint32_t) maxRxFlow;
361 }
363 /* FUNCTION PURPOSE: Enables a CPPI tx channel
364 ***********************************************************************
365 * DESCRIPTION: Directly accesses the CPPI registers to Enable
366 * the specified transmit channel
367 *
368 * CPPI API hardcoded here until it can be added to CPPI LLD
369 */
370 static Cppi_Result txChannelExpressEnable (Cppi_CpDma dmaNum, uint32_t channelNum)
371 {
372 uint32_t value = 0;
373 Cppi_Result retVal = CPPI_SOK;
375 if (channelNum > cppiGblCfgParams[dmaNum].maxTxCh) {
376 retVal = FM_ERROR_CPPI_TX_CHANNEL_INVALID;
377 goto exitCs;
378 }
380 CSL_FINS (value, CPPIDMA_TX_CHANNEL_CONFIG_TX_CHANNEL_GLOBAL_CONFIG_REG_A_TX_ENABLE, (uint32_t) 1);
381 cppiGblCfgParams[dmaNum].txChRegs->TX_CHANNEL_GLOBAL_CONFIG[channelNum].TX_CHANNEL_GLOBAL_CONFIG_REG_A = value;
383 exitCs:
384 return retVal;
385 }
387 /* FUNCTION PURPOSE: Disables a CPPI tx channel
388 ***********************************************************************
389 * DESCRIPTION: Directly accesses the CPPI registers to disable
390 * the specified transmit channel
391 *
392 * CPPI API hardcoded here until it can be added to CPPI LLD
393 */
394 static Cppi_Result txChannelExpressDisable (Cppi_CpDma dmaNum, uint32_t channelNum)
395 {
396 Cppi_Result retVal = CPPI_SOK;
398 if (channelNum > cppiGblCfgParams[dmaNum].maxTxCh) {
399 retVal = FM_ERROR_CPPI_TX_CHANNEL_INVALID;
400 goto exitCs;
401 }
403 cppiGblCfgParams[dmaNum].txChRegs->TX_CHANNEL_GLOBAL_CONFIG[channelNum].TX_CHANNEL_GLOBAL_CONFIG_REG_A = 0;
405 exitCs:
406 return retVal;
407 }
409 /* FUNCTION PURPOSE: Disable TX DMAs used by Linux
410 ***********************************************************************
411 * DESCRIPTION: Disables the PASS and QMSS TX DMAs that are owned and
412 * operated by Linux.
413 */
414 static void linuxTxDmaDisable(Fm_ExcludedResource *excludedResList, uint32_t listSize)
415 {
416 Fm_ExclusionParams exclusionParams;
417 int32_t i;
419 /* Disable all PASS & QMSS TX DMAs owned by Linux */
420 memset(&exclusionParams, 0, sizeof(exclusionParams));
421 exclusionParams.exclusionList = excludedResList;
422 exclusionParams.numListEntries = listSize;
424 exclusionParams.resType = Fm_res_CpdmaTxCh;
425 exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_PASS_CPDMA;
426 for (i = 0; i < fmGetDmaMaxTxCh(Cppi_CpDma_PASS_CPDMA); i++) {
427 exclusionParams.resourceNum = i;
428 if (fmExclusionIsExcluded(&exclusionParams)) {
429 txChannelExpressDisable(Cppi_CpDma_PASS_CPDMA, i);
430 }
431 }
432 exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_QMSS_CPDMA;
433 for (i = 0; i < fmGetDmaMaxTxCh(Cppi_CpDma_QMSS_CPDMA); i++) {
434 exclusionParams.resourceNum = i;
435 if (fmExclusionIsExcluded(&exclusionParams)) {
436 txChannelExpressDisable(Cppi_CpDma_QMSS_CPDMA, i);
437 }
438 }
439 }
441 /* FUNCTION PURPOSE: Enable TX DMAs used by Linux
442 ***********************************************************************
443 * DESCRIPTION: Enables the PASS and QMSS TX DMAs that are owned and
444 * operated by Linux.
445 */
446 static void linuxTxDmaEnable(Fm_ExcludedResource *excludedResList, uint32_t listSize)
447 {
448 Fm_ExclusionParams exclusionParams;
449 int32_t i;
451 memset(&exclusionParams, 0, sizeof(exclusionParams));
452 exclusionParams.exclusionList = excludedResList;
453 exclusionParams.numListEntries = listSize;
455 /* Enable the Linux TX CPDMAs */
456 exclusionParams.resType = Fm_res_CpdmaTxCh;
457 exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_PASS_CPDMA;
458 for (i = 0; i < fmGetDmaMaxTxCh(Cppi_CpDma_PASS_CPDMA); i++) {
459 exclusionParams.resourceNum = i;
460 if (fmExclusionIsExcluded(&exclusionParams)) {
461 txChannelExpressEnable(Cppi_CpDma_PASS_CPDMA, i);
462 }
463 }
464 exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_QMSS_CPDMA;
465 for (i = 0; i < fmGetDmaMaxTxCh(Cppi_CpDma_QMSS_CPDMA); i++) {
466 exclusionParams.resourceNum = i;
467 if (fmExclusionIsExcluded(&exclusionParams)) {
468 txChannelExpressEnable(Cppi_CpDma_QMSS_CPDMA, i);
469 }
470 }
471 }
473 /* FUNCTION PURPOSE: Disables a QMSS accumulator channel
474 ***********************************************************************
475 * DESCRIPTION: Disables a QMSS accumulator channel the same as the
476 * Qmss_disableAccumulator API except a timeout is added
477 * when waiting for response from the PDSP firmware.
478 *
479 * QMSS API hardcoded here until it can be added to QMSS LLD
480 */
481 static Qmss_Result disableAccumChWithTimeout(Qmss_PdspId pdspId, uint8_t channel)
482 {
483 Qmss_AccCmd cmd;
484 volatile uint32_t *cmdPtr, *reg;
485 uint32_t index;
486 uint8_t result;
487 void *key;
488 uint32_t gotResponse = FM_FALSE;
489 uint32_t timeoutCnt;
491 /* Begin Critical Section before accessing shared resources. */
492 key = Qmss_osalCsEnter ();
494 while(!gotResponse) {
495 memset ((void *) &cmd, 0, sizeof (Qmss_AccCmd));
496 CSL_FINSR (cmd.word0, 7, 0, channel);
497 CSL_FINSR (cmd.word0, 15, 8, Qmss_AccCmd_DISABLE_CHANNEL);
499 /* Point to the accumulator command register's last word */
500 reg = (uint32_t *) ((uint8_t *) qmssGblCfgParams.qmPdspCmdReg[pdspId] + 4 * 4);
502 /* Write command word last */
503 cmdPtr = ((uint32_t *) &cmd) + 4;
505 for (index = 0; index < 5; index++)
506 *reg-- = *cmdPtr--;
508 /* Wait for the command to clear */
509 reg++;
510 timeoutCnt = 0;
511 do
512 {
513 result = CSL_FEXTR (*reg, 15, 8);
515 if (result != 0) {
516 gotResponse = FM_TRUE;
517 }
518 else {
519 cycleDelay(1000, FM_FALSE);
520 timeoutCnt += 1000;
521 if (timeoutCnt >= 1000000000) {
522 /* Resend the command */
523 break;
524 }
525 }
526 } while (result != 0);
527 }
529 /* End Critical Section */
530 Qmss_osalCsExit (key);
532 return (Qmss_Result) (CSL_FEXTR (*reg, 31, 24));
533 }
536 /* FUNCTION PURPOSE: Resets a QMSS memory region
537 ***********************************************************************
538 * DESCRIPTION: Directly accesses the QMSS descriptor registers
539 * to reset a specified memory region
540 *
541 * QMSS API hardcoded here until it can be added to QMSS LLD
542 */
543 static Qmss_Result expressResetMemoryRegion (Qmss_MemRegion memRegion)
544 {
545 int32_t index = (int32_t) memRegion;
547 if (memRegion == Qmss_MemRegion_MEMORY_REGION_NOT_SPECIFIED)
548 return QMSS_MEMREGION_INVALID_INDEX;
550 qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_BASE_ADDRESS_REG = (uint32_t)0;
551 qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_START_INDEX_REG = (uint32_t)0;
552 qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_DESCRIPTOR_SETUP_REG = 0;
554 return QMSS_SOK;
555 }
557 /* FUNCTION PURPOSE: Gets a Memory Region Base Address
558 ***********************************************************************
559 * DESCRIPTION: Directly accesses the QMSS descriptor registers
560 * to get the region base address. A return of NULL
561 * means memory region is not in use. The physical
562 * base address can not be NULL if it is in use.
563 *
564 * QMSS API hardcoded here until it can be added to QMSS LLD
565 */
566 static uint32_t getMemoryRegionBaseAddr (Qmss_MemRegion memRegion)
567 {
568 int32_t index = (int32_t) memRegion;
570 return (qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_BASE_ADDRESS_REG);
571 }
573 /* FUNCTION PURPOSE: Gets a Memory Region Descriptor Block Size
574 ***********************************************************************
575 * DESCRIPTION: Directly accesses the QMSS descriptor registers
576 * to get the descriptor block size for the region
577 *
578 * QMSS API hardcoded here until it can be added to QMSS LLD
579 */
580 static uint32_t getMemoryRegionDescBlockSize (Qmss_MemRegion memRegion)
581 {
582 int32_t index = (int32_t) memRegion;
583 uint32_t descSizeBytes = 0;
584 uint32_t powRegSize = 0;
585 uint32_t numDesc = 0;
587 descSizeBytes = (uint32_t) CSL_FEXT (qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_DESCRIPTOR_SETUP_REG,
588 QM_DESCRIPTOR_REGION_CONFIG_MEMORY_REGION_DESCRIPTOR_SETUP_REG_DESC_SIZE);
589 /* Value stored as multiplier minus 1 that needs to be applied to 16 to get descriptor size */
590 descSizeBytes = (descSizeBytes + 1) * 16;
592 powRegSize = (uint32_t) CSL_FEXT (qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_DESCRIPTOR_SETUP_REG,
593 QM_DESCRIPTOR_REGION_CONFIG_MEMORY_REGION_DESCRIPTOR_SETUP_REG_REG_SIZE);
594 /* Value stored as 2^(5+stored_value) = number of descriptors */
595 numDesc = (32UL << powRegSize);
597 return (numDesc * descSizeBytes);
598 }
600 /* FUNCTION PURPOSE: Cleans QMSS queues
601 ***********************************************************************
602 * DESCRIPTION: QMSS queues cleaned. The steps taken to clean the
603 * queues differs based on whether the queue is part of
604 * the provided exclusion list. Queues not in the
605 * exclusion list will be emptied of all descriptors
606 * Queues in the exclusion list will be cleaned
607 * using the following process:
608 *
609 * Queue cleanup process
610 * - Disable all PASS DMA TX channels that are owned by Linux
611 * - Pause QoS (if supported - not currently supported)
612 * - Wipe all QMSS queues that are DSP owned
613 * - Read QMSS memory region registers for Linux inserted regions to get addresses associated
614 * with Linux pushed descriptors
615 * - For each queue that may be used by Linux
616 * - Allocate a scratch queue and a cleanup queue from the list of wiped QMSS queues
617 * - Divert all descriptors in Linux owned queue to scratch queue
618 * - Pop descriptors off scratch queue
619 * - Discard if descriptor not in Linux memory region
620 * - Push onto cleanup queue if descript in Linux memory region
621 * - Divert descriptors in cleanup queue back to original queue
622 * - Enable PASS DMA TX channels
623 */
624 static int32_t cleanQmssQueues(Fm_ExcludedResource *excludedResList, uint32_t listSize)
625 {
626 Fm_ExclusionParams exclusionParams;
627 uint8_t isAllocated;
628 int32_t i, j;
629 Qmss_QueueHnd scratchQ = NULL;
630 Qmss_QueueHnd cleanQ = NULL;
631 uint32_t desc;
632 uint32_t memRegStart;
633 uint32_t memRegEnd;
634 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
636 /* Stop flow of descriptors while cleaning QM */
637 linuxTxDmaDisable(excludedResList, listSize);
639 /* Cleanup QMSS queues */
641 memset(&exclusionParams, 0, sizeof(exclusionParams));
642 exclusionParams.exclusionList = excludedResList;
643 exclusionParams.numListEntries = listSize;
645 /* Empty all queues not owned by Linux first */
646 exclusionParams.resType = Fm_res_QmssQueue;
647 for (i = 0; i < QMSS_MAX_QUEUES; i++) {
648 exclusionParams.resourceNum = i;
649 if (!fmExclusionIsExcluded(&exclusionParams)) {
650 Qmss_queueEmpty((Qmss_QueueHnd) i);
651 }
652 }
654 /* Clean all Linux-owned queues of DSP-based descriptors second */
656 /* Allocate a scratchQ for temporarily storing the descriptor contents of a queue being swept of
657 * DSP descriptors */
658 memset(&invalidQs[0], 0, sizeof(invalidQs));
659 i = 0;
660 while (scratchQ == NULL) {
661 scratchQ = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated);
662 exclusionParams.resourceNum = scratchQ;
663 if (fmExclusionIsExcluded(&exclusionParams)) {
664 /* Store the queues that can't be used until after both the scratchQ and cleanQ have been found. */
665 invalidQs[i++] = scratchQ;
666 scratchQ = NULL;
667 }
668 }
669 /* Allocate a cleanQ to temporarily store the linux-based descriptors filtered from a Linux-based queue. */
670 while (cleanQ == NULL) {
671 cleanQ = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated);
672 exclusionParams.resourceNum = cleanQ;
673 if (fmExclusionIsExcluded(&exclusionParams)) {
674 /* Store the queues that can't be used until after both the scratchQ and cleanQ have been found. */
675 invalidQs[i++] = cleanQ;
676 cleanQ = NULL;
677 }
678 }
679 /* Free any invalidQs */
680 for (j = 0; j < i; j++) {
681 Qmss_queueClose(invalidQs[j]);
682 }
684 /* Need to check allocated queue against those owned by linux */
685 for (i = 0; i < QMSS_MAX_QUEUES; i++) {
686 exclusionParams.resType = Fm_res_QmssQueue;
687 exclusionParams.resourceNum = i;
688 if (fmExclusionIsExcluded(&exclusionParams)) {
689 /* In K2 will need to open twice the scratch and clean queues (one from each QM) */
690 Qmss_queueDivert(i, scratchQ, Qmss_Location_TAIL);
692 /* Pop each decriptor from the queue being swept. if the descriptor lies within
693 * the range of a Linux-owned memory region it is saved via a push onto a clean Q. Otherwise,
694 * the descriptor is dropped. */
695 while (desc = (uint32_t)Qmss_queuePop(scratchQ)) {
696 for (j = 0; j < QMSS_MAX_MEM_REGIONS; j++) {
697 if (memRegStart = getMemoryRegionBaseAddr((Qmss_MemRegion)j)) {
698 memRegEnd = memRegStart + getMemoryRegionDescBlockSize((Qmss_MemRegion)j);
699 if ((desc >= memRegStart) && (desc < memRegEnd)) {
700 /* Save descriptor */
701 Qmss_queuePushDesc(cleanQ, (void *)desc);
702 break;
703 }
704 }
705 }
706 }
707 /* Move Linux-based descriptors in the cleanQ back into the original queue. The descriptors are
708 * diverted to the HEAD in case any descriptors were pushed into the original queue during the
709 * sweep process. Pushing to HEAD will guarantee the original descriptors are handled prior to the
710 * new ones */
711 Qmss_queueDivert(cleanQ, i, Qmss_Location_HEAD);
712 }
713 }
715 /* Restart flow of descriptors */
716 linuxTxDmaEnable(excludedResList, listSize);
718 /* Leave DMA handles hanging since it can't be closed without closing all the channels. Channels
719 * can't be closed without wiping Linux configuration */
720 return (retVal);
721 }
723 /* FUNCTION PURPOSE: Resets PA PDSPs and LUTs
724 ***********************************************************************
725 * DESCRIPTION: Resets PA PDSPs and LUTs based on the provided
726 * exclusion list.
727 *
728 * TAKEN FROM pa.c (Pa_resetControl). Replace this code
729 * with call to modified Pa_resetControl when it doesn't
730 * automatically reset all PDSPs. Until then use this
731 * modified function
732 *
733 * To reset portions of PA used by DSP
734 * - Disable DSP owned PDSPs
735 * - Clear LUT2 if completely owned by DSP
736 * - Redownload DSP owned PDSPs
737 * - Reenable DSP owned PDSPs
738 */
739 static paReturn_t resetPaPdspsAndLuts(Pa_Handle paHandle, uint32_t numPdsps,
740 Fm_ExcludedResource *excludedResList, uint32_t listSize)
741 {
742 CSL_Pa_ssRegs *passRegs;
743 Fm_ExclusionParams exclusionParams;
744 uint32_t i;
745 uint32_t resetLut2 = FM_TRUE;
746 paReturn_t paRet = pa_OK;
748 if (listSize) {
749 /* A present exclusion list signifies another core (typically ARM Linux) is in
750 * control of PA. As a result, the firmware download and reset must be selective */
752 /* Initialize the exclusion parameters */
753 memset(&exclusionParams, 0, sizeof(exclusionParams));
754 exclusionParams.exclusionList = excludedResList;
755 exclusionParams.numListEntries = listSize;
757 passRegs = (CSL_Pa_ssRegs *)CSL_PA_SS_CFG_REGS;
759 /* Put each of the PDSPs into reset (PC = 0)*/
760 exclusionParams.resType = Fm_res_PaPdsp;
761 for (i = 0; i < numPdsps; i++) {
762 exclusionParams.resourceNum = i;
763 if (!fmExclusionIsExcluded(&exclusionParams)) {
764 passRegs->PDSP_CTLSTAT[i].PDSP_CONTROL = 0;
765 }
766 }
768 /* Reset LUT2 if applicable */
769 for (i = 0; i < listSize; i++) {
770 if ((excludedResList[i].resType == Fm_res_PaLutEntry) &&
771 (excludedResList[i].exResInfo == 2)) {
772 resetLut2 = FM_FALSE;
773 break;
774 }
775 }
777 if (resetLut2) {
778 passRegs->LUT2.LUT2_SOFT_RESET = 1;
779 }
781 exclusionParams.resType = Fm_res_PaPdsp;
782 /* PDPSs 0-2 use image c1 */
783 for (i = 0; i < 3; i++) {
784 exclusionParams.resourceNum = i;
785 if (!fmExclusionIsExcluded(&exclusionParams)) {
786 paRet = Pa_downloadImage (paHandle, i, (Ptr)c1, c1Size);
787 if (paRet != pa_OK) {
788 goto errorExit;
789 }
790 }
791 }
792 /* PDSP 3 uses image c2 */
793 exclusionParams.resourceNum = 3;
794 if (!fmExclusionIsExcluded(&exclusionParams)) {
795 paRet = Pa_downloadImage (paHandle, 3, (Ptr)c2, c2Size);
796 if (paRet != pa_OK) {
797 goto errorExit;
798 }
799 }
800 /* PDSPs 4-5 use image m */
801 for (i = 4; i < numPdsps; i++) {
802 exclusionParams.resourceNum = i;
803 if (!fmExclusionIsExcluded(&exclusionParams)) {
804 paRet = Pa_downloadImage (paHandle, i, (Ptr)m, mSize);
805 if (paRet != pa_OK) {
806 goto errorExit;
807 }
808 }
809 }
811 /* Should be able to use PA's PDSP enable API since an active PDSP will not be
812 * modified */
813 paRet = Pa_resetControl(paHandle, pa_STATE_ENABLE);
814 if (paRet == pa_STATE_ENABLE) {
815 paRet = pa_OK;
816 }
817 }
818 else {
819 /* Perform full firmware download and reset since another core is not
820 * in control of PA */
822 Pa_resetControl (paHandle, pa_STATE_RESET);
824 /* PDPSs 0-2 use image c1 */
825 for (i = 0; i < 3; i++)
826 Pa_downloadImage (paHandle, i, (Ptr)c1, c1Size);
828 /* PDSP 3 uses image c2 */
829 Pa_downloadImage (paHandle, 3, (Ptr)c2, c2Size);
831 /* PDSPs 4-5 use image m */
832 for (i = 4; i < 6; i++)
833 Pa_downloadImage (paHandle, i, (Ptr)m, mSize);
835 paRet = Pa_resetControl (paHandle, pa_STATE_ENABLE);
837 if (paRet == pa_STATE_ENABLE) {
838 paRet = pa_OK;
839 }
840 }
842 errorExit:
843 return (paRet);
844 }
846 /* FUNCTION PURPOSE: Check if NetCp (PA & SA) subsystem is powered up
847 ***************************************************************************************
848 * DESCRIPTION: This function checks the power status of the NetCp (PA & SA) subsystem domains
849 */
850 static uint32_t isNetCpPoweredUp (void)
851 {
852 /* Get peripheral PSC status */
853 if ((CSL_PSC_getPowerDomainState(CSL_PSC_PD_PASS) == PSC_PDSTATE_ON) &&
854 (CSL_PSC_getModuleState (CSL_PSC_LPSC_PKTPROC) == PSC_MODSTATE_ENABLE) &&
855 (CSL_PSC_getModuleState (CSL_PSC_LPSC_CPGMAC) == PSC_MODSTATE_ENABLE) &&
856 (CSL_PSC_getModuleState (CSL_PSC_LPSC_Crypto) == PSC_MODSTATE_ENABLE)) {
857 /* On */
858 return (FM_TRUE);
859 }
860 else {
861 return (FM_FALSE);
862 }
863 }
865 /* FUNCTION PURPOSE: Resets and Initializes PA
866 ***********************************************************************
867 * DESCRIPTION: Resets and intializes PA. Certain steps
868 * will be skipped if an exclusion list
869 * is provided. This signifies another
870 * entity (typically ARM Linux) has already setup
871 * portions of PA which should not be reset
872 */
873 static Pa_Handle resetAndInitPa(uint32_t numPdsps, Fm_ExcludedResource *excludedResList, uint32_t listSize)
874 {
875 paSizeInfo_t paSize;
876 paConfig_t paCfg;
877 paReturn_t paRet;
878 int bufSizes[pa_N_BUFS];
879 int bufAligns[pa_N_BUFS];
880 void *bufBases[pa_N_BUFS];
881 Pa_Handle paHandle = NULL;
883 /* Stop flow of descriptors while resetting the PA PDSPs */
884 linuxTxDmaDisable(excludedResList, listSize);
885 /* Delay so PDSPs can finish processing any queued descriptors (commands) */
886 cycleDelay(5000, FM_FALSE);
888 /* The maximum number of handles that can exists are 32 for L2, and 64 for L3. */
889 memset(&paSize, 0, sizeof(paSizeInfo_t));
890 memset(&paCfg, 0, sizeof(paConfig_t));
891 memset(bufBases, 0, sizeof(bufBases));
892 paSize.nMaxL2 = PA_MAX_NUM_L2_HANDLES;
893 paSize.nMaxL3 = PA_MAX_NUM_L3_HANDLES;
894 paSize.nUsrStats = 0;
895 paSize.nVlnkMax = 0;
896 paRet = Pa_getBufferReq(&paSize, bufSizes, bufAligns);
898 if (paRet != pa_OK) {
899 goto errorExit;
900 }
902 /* The first buffer is used as the instance buffer */
903 if (((Uint32)paMemPaInst & (bufAligns[0] - 1)) ||
904 (sizeof(paMemPaInst) < bufSizes[0])) {
905 goto errorExit;
906 }
907 bufBases[0] = (void *)paMemPaInst;
909 /* The second buffer is the L2 table */
910 if (((Uint32)paMemL2Ram & (bufAligns[1] - 1)) ||
911 (sizeof(paMemL2Ram) < bufSizes[1])) {
912 goto errorExit;
913 }
914 bufBases[1] = (void *)paMemL2Ram;
916 /* The third buffer is the L3 table */
917 if (((Uint32)paMemL3Ram & (bufAligns[2] - 1)) ||
918 (sizeof(paMemL3Ram) < bufSizes[2])) {
919 goto errorExit;
920 }
921 bufBases[2] = (void *)paMemL3Ram;
923 paCfg.initTable = TRUE;
924 if (excludedResList) {
925 paCfg.initDefaultRoute = FALSE;
926 }
927 else {
928 paCfg.initDefaultRoute = TRUE;
929 }
930 paCfg.baseAddr = CSL_PA_SS_CFG_REGS;
931 paCfg.sizeCfg = &paSize;
933 paRet = Pa_create(&paCfg, bufBases, &paHandle);
934 if (paRet != pa_OK) {
935 goto errorExit;
936 }
938 /* Reset the portions of PA that are used by the DSP. Avoid resetting
939 * anything used by Linux */
940 paRet = resetPaPdspsAndLuts(paHandle, numPdsps, excludedResList, listSize);
941 if (paRet != pa_OK) {
942 paHandle = NULL;
943 }
945 /* Restart DMAs */
946 linuxTxDmaEnable(excludedResList, listSize);
948 errorExit:
949 return (paHandle);
950 }
952 /* FUNCTION PURPOSE: Resets the PA Global Config
953 ***********************************************************************
954 * DESCRIPTION: The PASS global configuration stored in the PA
955 * scratch memory is reset to default values
956 */
957 static Fm_Result paSetDefaultGblCfg(Pa_Handle passHandle, Qmss_QueueHnd cmdRespQ, Qmss_QueueHnd freeQ, int16_t flowId)
958 {
959 paSysConfig_t paDefGlobalCfg;
960 Cppi_Desc *monolithicDesc;
961 paCmdReply_t paReply;
962 uint8_t *descBuf;
963 uint32_t cmdLen;
964 int cmdDest;
965 uint32_t psCmd;
966 tempPaCmd *paRespCmd;
967 uint32_t paRespCmdLen;
968 paReturn_t paRet;
969 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
970 paCtrlInfo_t ctrlInfo;
971 /* Set the default values, taken from pa.h */
972 paProtocolLimit_t paDefProtocolLimit = {
973 pa_PROTOCOL_LIMIT_NUM_VLANS_DEF, /* Number of VLANs */
974 pa_PROTOCOL_LIMIT_NUM_IP_DEF, /* Number of IPs */
975 pa_PROTOCOL_LIMIT_NUM_GRE_DEF /* Number of GREs */
976 };
977 paCmdSetConfig_t paDefCmdSetCfg = {
978 64 /* Number of command sets */
979 };
980 paUsrStatsConfig_t paDefUsrStatsCfg = {
981 (pa_USR_STATS_MAX_COUNTERS - pa_USR_STATS_MAX_COUNTERS), /* Number of user stats */
982 (pa_USR_STATS_MAX_64B_COUNTERS - pa_USR_STATS_MAX_64B_COUNTERS) /* Number of 64-bit user stats */
983 };
985 paQueueDivertConfig_t paDefQueueDivertCfg = {
986 0, /* Monitoring Queue */
987 0 /* flow Id */
988 };
989 paPacketControlConfig_t paDefPktCtrlCfg = {
990 pa_PKT_CTRL_HDR_VERIFY_IP, /* ctrlBitMap */
991 0, /* rxPaddingErrStatsIndex */
992 0 /* txPaddingStatsIndex */
993 };
994 paIpReassmConfig_t paDefReassmConfig = {
995 0, /* numTrafficFlow */
996 0, /* destFlowId */
997 0 /* destQueue */
998 };
999 paIpsecNatTConfig_t paDefNatTCfg = {
1000 0, /* ctrlBitMap */
1001 0 /* UDP port number */
1002 };
1004 paDefGlobalCfg.pCmdSetConfig = &paDefCmdSetCfg;
1005 paDefGlobalCfg.pInIpReassmConfig = &paDefReassmConfig;
1006 paDefGlobalCfg.pOutIpReassmConfig = &paDefReassmConfig;
1007 paDefGlobalCfg.pPktControl = &paDefPktCtrlCfg;
1008 paDefGlobalCfg.pProtoLimit = &paDefProtocolLimit;
1009 paDefGlobalCfg.pQueueDivertConfig = &paDefQueueDivertCfg;
1010 paDefGlobalCfg.pUsrStatsConfig = &paDefUsrStatsCfg;
1012 memset(&paReply, 0, sizeof(paReply));
1013 paReply.dest = pa_DEST_HOST;
1014 paReply.queue = Qmss_getQIDFromHandle(cmdRespQ);
1015 paReply.flowId = flowId;
1017 /* Set system global default configuration */
1018 ctrlInfo.code = pa_CONTROL_SYS_CONFIG;
1019 ctrlInfo.params.sysCfg = paDefGlobalCfg;
1021 monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(freeQ));
1022 Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc, &descBuf, &cmdLen);
1023 cmdLen = SIZE_MONO_DESC - Cppi_getDataOffset(Cppi_DescType_MONOLITHIC, monolithicDesc);
1024 paRet = Pa_control(passHandle, &ctrlInfo, (paCmd_t)descBuf, (uint16_t *)&cmdLen, &paReply, &cmdDest);
1025 if (paRet != pa_OK) {
1026 if (paRet == pa_INSUFFICIENT_CMD_BUFFER_SIZE) {
1027 retVal = FM_ERROR_DESC_BUF_TOO_SMALL;
1028 }
1029 else {
1030 retVal = FM_ERROR_PASS_SETTING_DEF_GLBL_CMD;
1031 }
1032 goto cleanupQueue;
1033 }
1035 psCmd = PASAHO_PACFG_CMD;
1036 Cppi_setPSData (Cppi_DescType_MONOLITHIC, monolithicDesc, (uint8_t *)&psCmd, 4);
1037 Cppi_setPacketLen(Cppi_DescType_MONOLITHIC, monolithicDesc, cmdLen);
1038 Qmss_queuePushDescSize(paTxQs[cmdDest - pa_CMD_TX_DEST_0], (uint32_t *)monolithicDesc, SIZE_MONO_DESC);
1040 /* Wait for response from PA */
1041 while (Qmss_getQueueEntryCount(cmdRespQ) == 0){};
1042 monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(cmdRespQ));
1043 Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc,(uint8_t **)&paRespCmd, &paRespCmdLen);
1045 if (paRespCmd->commandResult) {
1046 retVal = FM_ERROR_PASS_GBL_DEF_CFG_NOT_SET;
1047 goto cleanupQueue;
1048 }
1049 Qmss_queuePushDescSize(freeQ, monolithicDesc, SIZE_MONO_DESC);
1051 /* Set nat-t global default configuration */
1052 ctrlInfo.code = pa_CONTROL_IPSEC_NAT_T_CONFIG;
1053 ctrlInfo.params.ipsecNatTDetCfg = paDefNatTCfg;
1054 ctrlInfo.params.ipsecNatTDetCfg.ctrlBitMap = 0;
1056 monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(freeQ));
1057 Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc, &descBuf, &cmdLen);
1058 cmdLen = SIZE_MONO_DESC - Cppi_getDataOffset(Cppi_DescType_MONOLITHIC, monolithicDesc);
1059 paRet = Pa_control(passHandle, &ctrlInfo, (paCmd_t)descBuf, (uint16_t *)&cmdLen, &paReply, &cmdDest);
1060 if (paRet != pa_OK) {
1061 if (paRet == pa_INSUFFICIENT_CMD_BUFFER_SIZE) {
1062 retVal = FM_ERROR_DESC_BUF_TOO_SMALL;
1063 }
1064 else {
1065 retVal = FM_ERROR_PASS_SETTING_DEF_NATT_CMD;
1066 }
1067 goto cleanupQueue;
1068 }
1070 psCmd = PASAHO_PACFG_CMD;
1071 Cppi_setPSData (Cppi_DescType_MONOLITHIC, monolithicDesc, (uint8_t *)&psCmd, 4);
1072 Cppi_setPacketLen(Cppi_DescType_MONOLITHIC, monolithicDesc, cmdLen);
1073 Qmss_queuePushDescSize(paTxQs[cmdDest - pa_CMD_TX_DEST_0], (uint32_t *)monolithicDesc, SIZE_MONO_DESC);
1075 /* Wait for response from PA */
1076 while (Qmss_getQueueEntryCount(cmdRespQ) == 0){};
1077 monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(cmdRespQ));
1078 Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc,(uint8_t **)&paRespCmd, &paRespCmdLen);
1080 if (paRespCmd->commandResult) {
1081 retVal = FM_ERROR_PASS_NATT_DEF_CFG_NOT_SET;
1082 }
1084 cleanupQueue:
1085 Qmss_queuePushDescSize(freeQ, monolithicDesc, SIZE_MONO_DESC);
1087 return (retVal);
1088 }
1090 /* FUNCTION PURPOSE: Powers down a peripheral
1091 ***********************************************************************
1092 * DESCRIPTION: Powers down the peripheral for a given power
1093 * domain number if the peripheral is currently on.
1094 */
1095 static void periphPowerDown(uint32_t pwrDmnNum)
1096 {
1097 if ((CSL_PSC_getPowerDomainState(pwrDmnNum) == PSC_PDSTATE_ON)) {
1098 /* Peripheral is ON */
1100 /* Power OFF */
1102 //Wait for any previous transitions to complete
1103 while (!CSL_PSC_isStateTransitionDone (pwrDmnNum));
1104 //Write Switch input into the corresponding PDCTL register
1105 CSL_PSC_disablePowerDomain (pwrDmnNum);
1106 //Write PTCMD to start the transition
1107 CSL_PSC_startStateTransition (pwrDmnNum);
1108 //Wait for the transition to complete
1109 while (!CSL_PSC_isStateTransitionDone (pwrDmnNum));
1110 }
1111 }
1113 /**********************************************************************
1114 **************************** Cleanup APIs ****************************
1115 **********************************************************************/
1117 /* FUNCTION PURPOSE: Cleanup peripheral init code
1118 ***********************************************************************
1119 * DESCRIPTION: Initializes some peripherals via their LLDs so that
1120 * API calls used to reset their peripheral resources
1121 * will succeed. QMSS init, CPPI init, etc.
1122 */
1123 Fm_Result fmCleanupInit(uint32_t fullInit)
1124 {
1125 Qmss_InitCfg qmssInitCfg;
1126 Qmss_Result qmssResult;
1127 uint32_t heapSize;
1128 Cppi_InitCfg cppiInitCfg;
1129 Cppi_Result cppiResult;
1130 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
1132 Fault_Mgmt_osalLog("Fault Cleanup: LLD Initialization\n");
1133 /* Writeback status so that Host can view it */
1134 fmCleanupStatus[0] = FM_STATUS_CLEANUP_INITIALIZATION;
1135 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1137 /* Init TSCL */
1138 cycleDelay(0, FM_TRUE);
1140 /* Init QMSS */
1141 memset ((void *) &qmssInitCfg, 0, sizeof(qmssInitCfg));
1142 if (fullInit) {
1143 /* Set up the linking RAM. Use internal Linking RAM. */
1144 qmssInitCfg.linkingRAM0Base = 0;
1145 qmssInitCfg.linkingRAM0Size = 0;
1146 qmssInitCfg.linkingRAM1Base = 0x0;
1147 qmssInitCfg.maxDescNum = 0x3fff;
1148 }
1149 else {
1150 qmssInitCfg.qmssHwStatus = QMSS_HW_INIT_COMPLETE;
1151 }
1152 qmssInitCfg.maxDescNum = NUM_MONO_DESC;
1153 qmssResult = Qmss_init(&qmssInitCfg, &qmssGblCfgParams);
1154 if (qmssResult != QMSS_SOK) {
1155 Fault_Mgmt_osalLog("Fault Cleanup: Failed to Init QMSS LLD\n");
1156 retVal = FM_ERROR_QMSS_INIT_FAILED;
1157 goto errorExit;
1158 }
1160 qmssResult = Qmss_start ();
1161 if (qmssResult != QMSS_SOK) {
1162 Fault_Mgmt_osalLog("Fault Cleanup: Failed to Start QMSS LLD\n");
1163 retVal = FM_ERROR_QMSS_INIT_FAILED;
1164 goto errorExit;
1165 }
1167 /* Init CPPI */
1168 cppiResult = Cppi_getHeapReq(cppiGblCfgParams, &heapSize);
1169 cppiInitCfg.heapParams.staticHeapBase = &tempCppiHeap[0];
1170 cppiInitCfg.heapParams.staticHeapSize = heapSize;
1171 cppiInitCfg.heapParams.heapAlignPow2 = 8;
1172 cppiInitCfg.heapParams.dynamicHeapBlockSize = -1;
1174 cppiResult = Cppi_initCfg(cppiGblCfgParams, &cppiInitCfg);
1175 if (cppiResult != CPPI_SOK) {
1176 Fault_Mgmt_osalLog("Fault Cleanup: Failed to Init CPPI LLD\n");
1177 retVal = FM_ERROR_CPPI_INIT_FAILED;
1178 goto errorExit;
1179 }
1181 initComplete = FM_TRUE;
1183 errorExit:
1184 return (retVal);
1185 }
1187 /* FUNCTION PURPOSE: Resets CPPI peripheral resources
1188 ***********************************************************************
1189 * DESCRIPTION: Resets CPPI peripheral resources to their PoR
1190 * state. Resources in the exclusion list will
1191 * not be reset.
1192 */
1193 Fm_Result fmCleanCppi(Fm_ExcludedResource *excludedResList, uint32_t listSize)
1194 {
1195 Fm_ExclusionParams exclusionParams;
1196 Cppi_CpDmaInitCfg dmaCfg;
1197 Cppi_Handle cppiHandle;
1198 int32_t i, j;
1200 if (!initComplete) {
1201 return (FM_ERROR_CLEANUP_INIT_NOT_COMPLETE);
1202 }
1204 Fault_Mgmt_osalLog("Fault Cleanup: CPPI\n");
1205 /* Writeback status so that Host can view it */
1206 fmCleanupStatus[0] = FM_STATUS_CLEANUP_CPDMA;
1207 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1209 /* CPPI PoR reset process - Reset all DMA rx/tx channels and flows except
1210 * those owned by Linux */
1212 memset(&exclusionParams, 0, sizeof(exclusionParams));
1213 exclusionParams.exclusionList = excludedResList;
1214 exclusionParams.numListEntries = listSize;
1216 /* Reset CPPI channels and flows */
1217 for (i = 0; i < CPPI_MAX_CPDMA; i++) {
1218 if (fmIsWirelessPeriphPoweredOnForCpdma((Cppi_CpDma) i)) {
1219 memset ((void *) &dmaCfg, 0, sizeof(dmaCfg));
1220 dmaCfg.dmaNum = (Cppi_CpDma) i;
1222 if (cppiHandle = Cppi_open(&dmaCfg)) {
1223 exclusionParams.u.cpdmaParams.dma = dmaCfg.dmaNum;
1225 exclusionParams.resType = Fm_res_CpdmaRxCh;
1226 for (j = 0; j < fmGetDmaMaxRxCh(dmaCfg.dmaNum); j++) {
1227 exclusionParams.resourceNum = j;
1228 if (!fmExclusionIsExcluded(&exclusionParams)) {
1229 resetDmaCh(cppiHandle, i, j, Fm_res_CpdmaRxCh);
1230 }
1231 }
1233 exclusionParams.resType = Fm_res_CpdmaTxCh;
1234 for (j = 0; j < fmGetDmaMaxTxCh(dmaCfg.dmaNum); j++) {
1235 exclusionParams.resourceNum = j;
1236 if (!fmExclusionIsExcluded(&exclusionParams)) {
1237 resetDmaCh(cppiHandle, i, j, Fm_res_CpdmaTxCh);
1238 }
1239 }
1241 exclusionParams.resType = Fm_res_CpdmaRxFlow;
1242 for (j = 0; j < getDmaMaxRxFlow(dmaCfg.dmaNum); j++) {
1243 exclusionParams.resourceNum = j;
1244 if (!fmExclusionIsExcluded(&exclusionParams)) {
1245 resetDmaCh(cppiHandle, i, j, Fm_res_CpdmaRxFlow);
1246 }
1247 }
1248 }
1249 else {
1250 Fault_Mgmt_osalLog("Fault Cleanup: Failed to open CPDMA with index %d\n", i);
1251 }
1252 }
1253 }
1255 return (FM_FAULT_CLEANUP_OK);
1256 }
1258 /* FUNCTION PURPOSE: Resets QMSS accumulator peripheral resources
1259 ***********************************************************************
1260 * DESCRIPTION: Resets QMSS accumulator peripheral resources to
1261 * their PoR state. Resources in the exclusion list will
1262 * not be reset.
1263 *
1264 * NOTE: This API should be called before queues
1265 * are cleaned.
1266 */
1267 Fm_Result fmCleanQmssAccum(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList,
1268 uint32_t listSize)
1269 {
1270 Qmss_Result qmssResult;
1271 int32_t i;
1272 Fm_ExclusionParams exclusionParams;
1273 Qmss_IntdInterruptType intdType;
1275 if (!initComplete) {
1276 return (FM_ERROR_CLEANUP_INIT_NOT_COMPLETE);
1277 }
1279 Fault_Mgmt_osalLog("Fault Cleanup: QMSS Accumulator\n");
1280 /* Writeback status so that Host can view it */
1281 fmCleanupStatus[0] = FM_STATUS_CLEANUP_QMSS_ACCUM;
1282 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1284 /* QMSS cleanup - Disable all the accumulator channels except those owned by Linux */
1285 memset(&exclusionParams, 0, sizeof(exclusionParams));
1286 exclusionParams.exclusionList = excludedResList;
1287 exclusionParams.numListEntries = listSize;
1289 if (listSize) {
1290 /* Only clean up accumulator channels if another processor is maintaining the
1291 * QM PDSPs. Another processor is in play if the exclusion list is populated */
1292 exclusionParams.resType = Fm_res_QmssAccumCh;
1293 for (i = 0; i < fmGblCfgParams->maxQmssAccumCh; i++) {
1294 exclusionParams.resourceNum = i;
1295 if (!fmExclusionIsExcluded(&exclusionParams)) {
1296 /* Clear channel's pending interrupts */
1297 if ((i >= fmGblCfgParams->highAccum.start) && (i <= fmGblCfgParams->highAccum.end)) {
1298 intdType = Qmss_IntdInterruptType_HIGH;
1299 }
1300 else if ((i >= fmGblCfgParams->loAccum.start) && (i <= fmGblCfgParams->loAccum.end)) {
1301 intdType = Qmss_IntdInterruptType_LOW;
1302 }
1303 else {
1304 return (FM_ERROR_INVALID_ACCUM_CH);
1305 }
1307 while (qmssGblCfgParams.qmQueIntdReg->INTCNT_REG[i]) {
1308 Qmss_ackInterrupt(i, 1);
1309 Qmss_setEoiVector(intdType, i);
1310 }
1312 qmssResult = disableAccumChWithTimeout(Qmss_PdspId_PDSP1, i);
1313 if (qmssResult < 0) {
1314 Fault_Mgmt_osalLog("Failed to disable PDSP1 accum ch %d with err %d\n", i, qmssResult);
1315 }
1316 }
1317 }
1318 }
1320 return (FM_FAULT_CLEANUP_OK);
1321 }
1323 /* FUNCTION PURPOSE: Resets QMSS peripheral queue resources
1324 ***********************************************************************
1325 * DESCRIPTION: Resets QMSS peripheral queue resources to their PoR
1326 * state. Resources in the exclusion list will
1327 * not be reset.
1328 *
1329 * NOTE: This API should be called after accumulator
1330 * channels are disabled.
1331 */
1332 Fm_Result fmCleanQmssQueue(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList,
1333 uint32_t listSize)
1334 {
1335 int32_t i;
1336 Fm_ExclusionParams exclusionParams;
1338 if (!initComplete) {
1339 return (FM_ERROR_CLEANUP_INIT_NOT_COMPLETE);
1340 }
1342 Fault_Mgmt_osalLog("Fault Cleanup: QMSS Queues\n");
1343 /* Writeback status so that Host can view it */
1344 fmCleanupStatus[0] = FM_STATUS_CLEANUP_QMSS_QUEUE;
1345 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1347 /* QMSS cleanup - Don't touch QoS clusters (using different firmware downloaded by Linux kernel)
1348 * - Sweep QMSS queues of all DSP-based descriptors
1349 * - Clear all memory region registers not inserted by Linux */
1350 memset(&exclusionParams, 0, sizeof(exclusionParams));
1351 exclusionParams.exclusionList = excludedResList;
1352 exclusionParams.numListEntries = listSize;
1354 /* Cleanup the QMSS queues of DSP-based descriptors */
1355 cleanQmssQueues(excludedResList, listSize);
1357 exclusionParams.resType = Fm_res_QmssMemRegion;
1358 for (i = 0; i < QMSS_MAX_MEM_REGIONS; i++) {
1359 exclusionParams.resourceNum = i;
1360 if (!fmExclusionIsExcluded(&exclusionParams)) {
1361 expressResetMemoryRegion((Qmss_MemRegion)i);
1362 }
1363 }
1365 return (FM_FAULT_CLEANUP_OK);
1366 }
1368 /* FUNCTION PURPOSE: Resets PA peripheral resources
1369 ***********************************************************************
1370 * DESCRIPTION: Resets PA peripheral resources to their PoR
1371 * state. Resources in the exclusion list will
1372 * not be reset.
1373 */
1374 Fm_Result fmCleanPa(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList,
1375 uint32_t listSize)
1376 {
1377 Pa_Handle passHandle;
1378 Qmss_MemRegInfo memInfo;
1379 Cppi_CpDmaInitCfg dmaCfg;
1380 Cppi_TxChInitCfg txChCfg;
1381 Cppi_RxChInitCfg rxChCfg;
1382 Cppi_RxFlowCfg rxFlowCfg;
1383 Cppi_Handle cppiHandle;
1384 Cppi_ChHnd paTxCh;
1385 Cppi_ChHnd paRxCh;
1386 Cppi_FlowHnd rxFlowHnd = NULL;
1387 Qmss_QueueHnd freeQ = NULL;
1388 Qmss_QueueHnd cmdRespQ = NULL;
1389 Cppi_DescCfg descCfg;
1390 Qmss_Queue queInfo;
1391 uint8_t isAllocated;
1392 uint32_t numAllocated;
1393 int32_t i, j;
1394 Fm_ExclusionParams exclusionParams;
1395 Cppi_Desc *monolithicDesc;
1396 paCmdReply_t paReply;
1397 tempPaCmd paDelCmd;
1398 tempPaCmd *paRespCmd;
1399 uint32_t paRespCmdLen;
1400 uint32_t psCmd;
1401 Cppi_Result cppiResult;
1402 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
1404 if (!initComplete) {
1405 retVal = FM_ERROR_CLEANUP_INIT_NOT_COMPLETE;
1406 goto errorExit;
1407 }
1409 Fault_Mgmt_osalLog("Fault Cleanup: PA\n");
1410 /* Writeback status so that Host can view it */
1411 fmCleanupStatus[0] = FM_STATUS_CLEANUP_PA;
1412 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1414 /* Only cleanup if NetCp is on */
1415 if (isNetCpPoweredUp()) {
1416 /* Init PA and reset PDSPs 1-5 */
1417 if ((passHandle = resetAndInitPa(fmGblCfgParams->maxPaPdsps, excludedResList, listSize)) == NULL) {
1418 retVal = FM_ERROR_PA_INIT_FAILED;
1419 goto errorExit;
1420 }
1422 /* Allocate QMSS and CPPI resources needed to send commands to PA */
1424 /* Initialize the exclusion parameters */
1425 memset(&exclusionParams, 0, sizeof(exclusionParams));
1426 exclusionParams.exclusionList = excludedResList;
1427 exclusionParams.numListEntries = listSize;
1429 /* Make sure descriptor can fit delete command */
1430 if ((SIZE_MONO_DESC - MONO_DESC_DATA_OFFSET) < sizeof(paDelCmd)) {
1431 retVal = FM_ERROR_DESC_BUF_TOO_SMALL;
1432 goto errorExit;
1433 }
1435 /* Setup memory region for monolithic descriptors */
1436 memset(&memInfo, 0, sizeof(memInfo));
1437 memset ((void *) monoDesc, 0, SIZE_MONO_DESC * NUM_MONO_DESC);
1438 memInfo.descBase = (uint32_t *) l2_global_address ((uint32_t) monoDesc);
1439 memInfo.descSize = SIZE_MONO_DESC;
1440 memInfo.descNum = NUM_MONO_DESC;
1441 memInfo.manageDescFlag = Qmss_ManageDesc_MANAGE_DESCRIPTOR;
1442 memInfo.startIndex = 0;
1443 /* Find a memory region not used by Linux */
1444 exclusionParams.resType = Fm_res_QmssMemRegion;
1445 for (i = 0; i < QMSS_MAX_MEM_REGIONS; i++) {
1446 exclusionParams.resourceNum = i;
1447 if (!fmExclusionIsExcluded(&exclusionParams)) {
1448 memInfo.memRegion = (Qmss_MemRegion)i;
1449 break;
1450 }
1451 }
1453 if (Qmss_insertMemoryRegion(&memInfo) < QMSS_SOK) {
1454 retVal = FM_ERROR_QMSS_INIT_FAILED_DURING_PA_RECOV;
1455 goto cleanupMemRegion;
1456 }
1458 /* Open queues required to send commands to PA:
1459 * - freeQ (GP) - contains unused descriptors
1460 * - cmdSendQ (NetCP TX) - used to send commands to PA
1461 * - cmdRespQ (GP) - used to receive command responses from PA */
1462 memset(&invalidQs[0], 0, sizeof(invalidQs));
1463 exclusionParams.resType = Fm_res_QmssQueue;
1464 i = 0;
1465 while (freeQ == NULL) {
1466 freeQ = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated);
1467 exclusionParams.resourceNum = Qmss_getQIDFromHandle(freeQ);
1468 if (fmExclusionIsExcluded(&exclusionParams)) {
1469 /* Store the queues that can't be used until after all queues have been found. */
1470 invalidQs[i++] = freeQ;
1471 freeQ = NULL;
1472 }
1473 }
1474 while (cmdRespQ == NULL) {
1475 cmdRespQ = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated);
1476 exclusionParams.resourceNum = Qmss_getQIDFromHandle(cmdRespQ);
1477 if (fmExclusionIsExcluded(&exclusionParams)) {
1478 /* Store the queues that can't be used until after all queues have been found. */
1479 invalidQs[i++] = cmdRespQ;
1480 cmdRespQ = NULL;
1481 }
1482 }
1483 /* Free any invalidQs */
1484 for (j = 0; j < i; j++) {
1485 Qmss_queueClose(invalidQs[j]);
1486 }
1488 /* Open all PASS tx queues. It's okay if already opened by Linux. Just need the interface
1489 * to PA PDSPs */
1490 for (i = 0; i < PA_MAX_NUM_CPPI_TX_CH; i++) {
1491 paTxQs[i] = Qmss_queueOpen(Qmss_QueueType_PASS_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated);
1492 }
1494 /* Setup the descriptors for freeQ */
1495 memset(&descCfg, 0, sizeof(descCfg));
1496 descCfg.memRegion = memInfo.memRegion;
1497 descCfg.descNum = NUM_MONO_DESC;
1498 descCfg.destQueueNum = Qmss_getQIDFromHandle(freeQ);
1499 descCfg.queueType = Qmss_QueueType_STARVATION_COUNTER_QUEUE;
1500 descCfg.initDesc = Cppi_InitDesc_INIT_DESCRIPTOR;
1501 descCfg.descType = Cppi_DescType_MONOLITHIC;
1502 descCfg.epibPresent = Cppi_EPIB_NO_EPIB_PRESENT;
1503 descCfg.cfg.mono.dataOffset = MONO_DESC_DATA_OFFSET;
1504 /* Descriptor should be recycled back to freeQ */
1505 queInfo = Qmss_getQueueNumber(freeQ);
1506 descCfg.returnQueue.qMgr = queInfo.qMgr;
1507 descCfg.returnQueue.qNum = queInfo.qNum;
1509 /* Initialize the descriptors and push to free Queue */
1510 if (Cppi_initDescriptor(&descCfg, &numAllocated) < CPPI_SOK) {
1511 retVal = FM_ERROR_CPPI_INIT_FAILED_DURING_PA_RECOV;
1512 goto cleanupResources;
1513 }
1514 /* Writeback changes to the monolithic descriptors */
1515 Fault_Mgmt_osalEndMemAccess(&monoDesc[0], sizeof(monoDesc));
1517 /* Open PASS DMA to send commands to and receive responses from PA */
1518 memset ((void *)&dmaCfg, 0, sizeof(dmaCfg));
1519 dmaCfg.dmaNum = Cppi_CpDma_PASS_CPDMA;
1520 cppiHandle = Cppi_open(&dmaCfg);
1522 /* Open PASS rxChs - Doesn't matter if already used by Linux */
1523 memset(&rxChCfg, 0, sizeof(rxChCfg));
1524 for (i = 0; i < fmGetDmaMaxRxCh(Cppi_CpDma_PASS_CPDMA); i++) {
1525 rxChCfg.channelNum = i;
1526 rxChCfg.rxEnable = Cppi_ChState_CHANNEL_DISABLE;
1527 paRxCh = Cppi_rxChannelOpen(cppiHandle, &rxChCfg, &isAllocated);
1528 Cppi_channelEnable(paRxCh);
1529 }
1531 /* Open all cppi tx channels to go with the queues - don't need to save handle */
1532 for (i = 0; i < PA_MAX_NUM_CPPI_TX_CH; i ++) {
1533 memset(&txChCfg, 0, sizeof(txChCfg));
1534 txChCfg.txEnable = Cppi_ChState_CHANNEL_DISABLE;
1535 txChCfg.channelNum = Qmss_getQIDFromHandle(paTxQs[i]) - QMSS_PASS_QUEUE_BASE;
1536 paTxCh = Cppi_txChannelOpenWithHwCfg(cppiHandle, &txChCfg, &isAllocated, 0);
1537 Cppi_channelEnable(paTxCh);
1538 }
1540 /* Open a PASS rxFlow */
1541 memset(&rxFlowCfg, 0, sizeof(rxFlowCfg));
1542 exclusionParams.resType = Fm_res_CpdmaRxFlow;
1543 exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_PASS_CPDMA;
1544 for (i = 0; i < getDmaMaxRxFlow(Cppi_CpDma_PASS_CPDMA); i++) {
1545 exclusionParams.resourceNum = i;
1546 if (!fmExclusionIsExcluded(&exclusionParams)) {
1547 rxFlowCfg.flowIdNum = i;
1548 break;
1549 }
1550 }
1551 rxFlowCfg.rx_ps_location = Cppi_PSLoc_PS_IN_DESC;
1552 rxFlowCfg.rx_desc_type = Cppi_DescType_MONOLITHIC;
1553 rxFlowCfg.rx_sop_offset = MONO_DESC_DATA_OFFSET;
1554 queInfo = Qmss_getQueueNumber(freeQ);
1555 rxFlowCfg.rx_fdq0_sz0_qnum = queInfo.qNum;
1556 rxFlowCfg.rx_fdq0_sz0_qmgr = queInfo.qMgr;
1557 rxFlowHnd = Cppi_configureRxFlow(cppiHandle, &rxFlowCfg, &isAllocated);
1559 if (listSize) {
1560 /* A present exclusion list signifies another core (typically ARM Linux) is in
1561 * control of PA. As a result, LUT1 entry deletion must take place and be selective. If
1562 * listSize is 0 the resetPaPdspsAndLuts function will have reset all PDSPs cleaning out
1563 * all LUT entries. */
1564 exclusionParams.resType = Fm_res_PaLutEntry;
1565 exclusionParams.u.lutParams.lutInst = 1;
1566 for (i = 0; i < fmGblCfgParams->maxLut1Entries; i++) {
1567 exclusionParams.resourceNum = i;
1568 if (!fmExclusionIsExcluded(&exclusionParams)) {
1569 memset(&paReply, 0, sizeof(paReply));
1570 paReply.dest = pa_DEST_HOST;
1571 paReply.queue = Qmss_getQIDFromHandle(cmdRespQ);
1572 paReply.flowId = rxFlowCfg.flowIdNum;
1573 pa_format_fcmd((void *) &paDelCmd, &paReply, (uint8_t) i);
1575 monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(freeQ));
1576 Cppi_setData(Cppi_DescType_MONOLITHIC, monolithicDesc, (uint8_t *)&paDelCmd, sizeof(paDelCmd));
1577 psCmd = PASAHO_PACFG_CMD;
1578 Cppi_setPSData (Cppi_DescType_MONOLITHIC, monolithicDesc, (uint8_t *)&psCmd, 4);
1579 Cppi_setPacketLen(Cppi_DescType_MONOLITHIC, monolithicDesc, sizeof(paDelCmd));
1580 /* LUT entry delete commands sent to PDSP 0 (Queue 640) */
1581 Qmss_queuePushDescSize(paTxQs[pa_CMD_TX_DEST_0], (uint32_t *)monolithicDesc, SIZE_MONO_DESC);
1583 /* Wait for response from PA */
1584 while (Qmss_getQueueEntryCount(cmdRespQ) == 0){};
1585 monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(cmdRespQ));
1586 Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc,(uint8_t **)&paRespCmd, &paRespCmdLen);
1588 if (paRespCmd->commandResult) {
1589 retVal = FM_ERROR_LUT1_INDEX_NOT_REMOVED;
1590 Qmss_queuePushDescSize(freeQ, monolithicDesc, SIZE_MONO_DESC);
1591 goto cleanupResources;
1592 }
1593 Qmss_queuePushDescSize(freeQ, monolithicDesc, SIZE_MONO_DESC);
1594 }
1595 }
1596 }
1598 if ((retVal = paSetDefaultGblCfg(passHandle, cmdRespQ, freeQ, rxFlowCfg.flowIdNum)) != FM_FAULT_CLEANUP_OK) {
1599 goto cleanupResources;
1600 }
1602 cleanupResources:
1603 if ((cppiResult = Cppi_closeRxFlow(rxFlowHnd)) != CPPI_SOK) {
1604 Fault_Mgmt_osalLog("Failed to disable PASS rx flow %d with err %d\n", rxFlowCfg.flowIdNum, cppiResult);
1605 }
1607 /* All descriptors should be in freeQ */
1608 Qmss_queueEmpty(freeQ);
1609 Qmss_queueEmpty(cmdRespQ);
1611 cleanupMemRegion:
1612 expressResetMemoryRegion(memInfo.memRegion);
1613 }
1614 errorExit:
1615 return (retVal);
1616 }
1618 /* FUNCTION PURPOSE: Cleans SA security context
1619 ***********************************************************************
1620 * DESCRIPTION: Evicts and tears down the SA security contexts
1621 * so that IPsec traffic can be reestablished
1622 */
1623 Fm_Result fmCleanSa(Fm_ExcludedResource *excludedResList, uint32_t listSize)
1624 {
1625 CSL_Cp_aceRegs *pSaRegs = (CSL_Cp_aceRegs *)CSL_PA_SS_CFG_CP_ACE_CFG_REGS;;
1626 int i;
1627 uint32_t ctxCachCtrl;
1628 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
1630 Fault_Mgmt_osalLog("Fault Cleanup: SA\n");
1631 /* Writeback status so that Host can view it */
1632 fmCleanupStatus[0] = FM_STATUS_CLEANUP_SA;
1633 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1635 /* Only cleanup if context cache is enabled */
1636 if ((pSaRegs->MMR.CMD_STATUS & CSL_CP_ACE_CMD_STATUS_CTXCACH_EN_MASK)) {
1638 /* Stop flow of descriptors while resetting SA */
1639 linuxTxDmaDisable(excludedResList, listSize);
1642 /* Clear the security context cache - Allows IPsec tunnels to be recreated
1643 * from scratch after cleanup */
1644 ctxCachCtrl = pSaRegs->MMR.CTXCACH_CTRL;
1645 ctxCachCtrl |= CSL_CP_ACE_CTXCACH_CTRL_CLR_CACHE_TABLE_MASK;
1646 pSaRegs->MMR.CTXCACH_CTRL = ctxCachCtrl;
1648 /* Wait for bit to clear for completion */
1649 do {
1650 for (i = 0; i < 100; i++) {
1651 asm (" nop ");
1652 }
1653 } while (pSaRegs->MMR.CTXCACH_CTRL & CSL_CP_ACE_CTXCACH_CTRL_CLR_CACHE_TABLE_MASK);
1655 /* Restart DMAs */
1656 linuxTxDmaEnable(excludedResList, listSize);
1657 }
1659 return (retVal);
1660 }
1662 /* FUNCTION PURPOSE: Resets Semaphore peripheral resources
1663 ***********************************************************************
1664 * DESCRIPTION: Resets Semaphore peripheral resources to their PoR
1665 * state. Resources in the exclusion list will
1666 * not be reset.
1667 */
1668 Fm_Result fmCleanSemaphores(Fm_ExcludedResource *excludedResList, uint32_t listSize)
1669 {
1670 Fault_Mgmt_osalLog("Fault Cleanup: Semaphore\n");
1671 /* Writeback status so that Host can view it */
1672 fmCleanupStatus[0] = FM_STATUS_CLEANUP_SEMAPHORE;
1673 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1675 /* Soft reset semaphores through the SEM_RST_RUN register */
1676 CSL_FINS(hSEM->SEM_RST_RUN, SEM_SEM_RST_RUN_RESET, 1);
1678 return (FM_FAULT_CLEANUP_OK);
1679 }
1681 /* FUNCTION PURPOSE: Resets the CICs
1682 ***********************************************************************
1683 * DESCRIPTION: Clears a CIC of all system interrupt to host interrupt
1684 * mappings except those routed to Linux
1685 */
1686 Fm_Result fmCleanCics(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList,
1687 uint32_t listSize)
1688 {
1689 int32_t i, j, k;
1690 volatile CSL_CPINTCRegs *regs;
1691 uint32_t numSysInt;
1692 uint32_t numHostInt;
1693 Fm_ExclusionParams exclusionParams;
1694 uint32_t entireCicDisable;
1695 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
1697 Fault_Mgmt_osalLog("Fault Cleanup: CIC\n");
1698 /* Writeback status so that Host can view it */
1699 fmCleanupStatus[0] = FM_STATUS_CLEANUP_CIC;
1700 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1702 memset(&exclusionParams, 0, sizeof(exclusionParams));
1703 exclusionParams.exclusionList = excludedResList;
1704 exclusionParams.numListEntries = listSize;
1705 exclusionParams.resType = Fm_res_CicHostInt;
1707 for (i = 0; i < fmGblCfgParams->maxCic; i++) {
1708 exclusionParams.u.cicParams.cic = i;
1710 /* Get CIC params */
1711 regs = fmGblCfgParams->cicParams[i].cicRegs;
1712 numSysInt = fmGblCfgParams->cicParams[i].maxNumSysInt;
1713 numHostInt = fmGblCfgParams->cicParams[i].maxNumHostInt;
1714 entireCicDisable = FM_TRUE;
1716 /* Unmap and disable all host interrupts not in exclusion list */
1717 for (j = 0; j < numHostInt; j++) {
1718 exclusionParams.resourceNum = j;
1719 if (!fmExclusionIsExcluded(&exclusionParams)) {
1720 /* Disable the host interrupt */
1721 regs->HINT_ENABLE_CLR_INDEX_REG = CSL_FMK(CPINTC_HINT_ENABLE_CLR_INDEX_REG_HINT_ENABLE_CLR_INDEX, j);
1723 for (k = 0; k < numSysInt; k++) {
1724 /* Clear system int channel map routed to host int */
1725 if (regs->CH_MAP[k] == j) {
1726 regs->CH_MAP[k] = 0;
1728 /* Disable sys int since not routed anymore */
1729 regs->ENABLE_CLR_INDEX_REG = CSL_FMK(CPINTC_ENABLE_CLR_INDEX_REG_ENABLE_CLR_INDEX, k);
1731 /* Clear any pending interrupts */
1732 regs->STATUS_CLR_INDEX_REG = CSL_FMK(CPINTC_STATUS_CLR_INDEX_REG_STATUS_CLR_INDEX, k);
1733 }
1734 }
1735 }
1736 else {
1737 /* At least one host interrupt in this CIC is excluded. Don't
1738 * perform global disable of host interrupts for this CIC */
1739 entireCicDisable = FM_FALSE;
1740 }
1741 }
1743 /* Global disable of host interrupts if none excluded */
1744 if (entireCicDisable) {
1745 regs->GLOBAL_ENABLE_HINT_REG = CSL_FMK(CPINTC_GLOBAL_ENABLE_HINT_REG_ENABLE_HINT_ANY, 0);
1746 }
1747 }
1749 return (retVal);
1750 }
1752 /* FUNCTION PURPOSE: Resets Timers
1753 ***********************************************************************
1754 * DESCRIPTION: Disables and resets all device timers except
1755 * those used by Linux
1756 */
1757 Fm_Result fmCleanTimers(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList,
1758 uint32_t listSize)
1759 {
1760 volatile CSL_TmrRegs *regs;
1761 int32_t i;
1762 Uint32 tmpReg;
1763 Fm_ExclusionParams exclusionParams;
1764 Fm_Result retVal = FM_FAULT_CLEANUP_OK;
1766 Fault_Mgmt_osalLog("Fault Cleanup: Timers\n");
1767 /* Writeback status so that Host can view it */
1768 fmCleanupStatus[0] = FM_STATUS_CLEANUP_TIMER;
1769 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1771 memset(&exclusionParams, 0, sizeof(exclusionParams));
1772 exclusionParams.exclusionList = excludedResList;
1773 exclusionParams.numListEntries = listSize;
1774 exclusionParams.resType = Fm_res_Timer;
1776 for (i = 0; i < fmGblCfgParams->maxTimers; i++) {
1777 exclusionParams.resourceNum = i;
1778 if (!fmExclusionIsExcluded(&exclusionParams)) {
1779 regs = fmGblCfgParams->timerParams[i].timerRegs;
1781 /* Disable the LOW and HIGH Timers. */
1782 tmpReg = regs->TCR;
1783 CSL_FINST(tmpReg, TMR_TCR_ENAMODE_LO, DISABLE);
1784 CSL_FINST(tmpReg, TMR_TCR_ENAMODE_HI, DISABLE);
1785 regs->TCR = tmpReg;
1787 /* Reset after disable */
1788 tmpReg = regs->TGCR;
1789 CSL_FINST(tmpReg, TMR_TGCR_TIMLORS, RESET_ON);
1790 CSL_FINST(tmpReg, TMR_TGCR_TIMHIRS, RESET_ON);
1791 regs->TGCR = tmpReg;
1792 }
1793 }
1795 return (retVal);
1796 }
1798 /* FUNCTION PURPOSE: Resets AIF2
1799 ***********************************************************************
1800 * DESCRIPTION: Resets the AIF2 peripheral and then powers it down
1801 */
1802 Fm_Result fmCleanAif2(void)
1803 {
1804 Fault_Mgmt_osalLog("Fault Cleanup: AIF2\n");
1805 /* Writeback status so that Host can view it */
1806 fmCleanupStatus[0] = FM_STATUS_CLEANUP_AIF2;
1807 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1809 if ((CSL_PSC_getPowerDomainState(CSL_PSC_PD_AI) == PSC_PDSTATE_ON)) {
1810 /* On */
1811 memset(&locAifObj, 0, sizeof(locAifObj));
1812 AIF_resetAif(&locAifObj);
1814 #if 0 /* K2 only - Add back when K2 support is added */
1815 /* Reset SERDES separately */
1816 /* Link 0 to Link 3*/
1817 CSL_AIF2SerdesShutdown(CSL_AIF2_SERDES_B8_CFG_REGS);
1818 /* Link 4 to Link 5*/
1819 CSL_AIF2SerdesShutdown(CSL_AIF2_SERDES_B4_CFG_REGS);
1820 #endif
1821 }
1822 periphPowerDown(CSL_PSC_PD_AI);
1824 return (FM_FAULT_CLEANUP_OK);
1825 }
1827 /* FUNCTION PURPOSE: Resets TCP3D
1828 ***********************************************************************
1829 * DESCRIPTION: Powers down the TCP3D peripheral
1830 */
1831 Fm_Result fmCleanTcp3d(void)
1832 {
1833 Fault_Mgmt_osalLog("Fault Cleanup: TCP3D\n");
1834 /* Writeback status so that Host can view it */
1835 fmCleanupStatus[0] = FM_STATUS_CLEANUP_TCP3D;
1836 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1838 periphPowerDown(CSL_PSC_PD_TCP3D_A);
1839 periphPowerDown(CSL_PSC_PD_TCP3D_B);
1841 return (FM_FAULT_CLEANUP_OK);
1842 }
1844 /* FUNCTION PURPOSE: Resets BCP
1845 ***********************************************************************
1846 * DESCRIPTION: Powers down the BCP peripheral
1847 */
1848 Fm_Result fmCleanBcp(void)
1849 {
1850 Fault_Mgmt_osalLog("Fault Cleanup: BCP\n");
1851 /* Writeback status so that Host can view it */
1852 fmCleanupStatus[0] = FM_STATUS_CLEANUP_BCP;
1853 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1855 periphPowerDown(CSL_PSC_PD_BCP);
1857 return (FM_FAULT_CLEANUP_OK);
1858 }
1860 /* FUNCTION PURPOSE: Resets FFTC
1861 ***********************************************************************
1862 * DESCRIPTION: Powers down the FFTC peripheral
1863 */
1864 Fm_Result fmCleanFftc(void)
1865 {
1866 Fault_Mgmt_osalLog("Fault Cleanup: FFTC (A & B)\n");
1867 /* Writeback status so that Host can view it */
1868 fmCleanupStatus[0] = FM_STATUS_CLEANUP_FFTC;
1869 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1871 periphPowerDown(CSL_PSC_PD_FFTC_AB);
1873 return (FM_FAULT_CLEANUP_OK);
1874 }
1876 /* FUNCTION PURPOSE: Resets VCP
1877 ***********************************************************************
1878 * DESCRIPTION: Powers down the VCP peripheral
1879 */
1880 Fm_Result fmCleanVcp(void)
1881 {
1882 Fault_Mgmt_osalLog("Fault Cleanup: VCP\n");
1883 /* Writeback status so that Host can view it */
1884 fmCleanupStatus[0] = FM_STATUS_CLEANUP_VCP;
1885 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1887 periphPowerDown(CSL_PSC_PD_PD_VCP_BCD);
1889 return (FM_FAULT_CLEANUP_OK);
1890 }
1891 #endif /* !(K2H && K2K && K2L && K2E) */
1893 /* FUNCTION PURPOSE: Resets EDMA3 peripheral resources
1894 ***********************************************************************
1895 * DESCRIPTION: Resets EDMA3 peripheral resources to their PoR
1896 * state. Resources in the exclusion list will
1897 * not be reset.
1898 */
1899 Fm_Result fmCleanEdma3(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList,
1900 uint32_t listSize, uint32_t provideStatus)
1901 {
1902 CSL_Edma3Handle edmaCCModule;
1903 int32_t i, j, k;
1904 CSL_Status status;
1905 Fm_ExclusionParams exclusionParams;
1907 if (provideStatus) {
1908 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
1909 Fault_Mgmt_osalLog("Fault Cleanup: EDMA3\n");
1910 /* Writeback status so that Host can view it */
1911 fmCleanupStatus[0] = FM_STATUS_CLEANUP_EDMA3;
1912 Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus));
1913 #endif /* !(K2H && K2K && K2L && K2E) */
1914 }
1916 memset(&exclusionParams, 0, sizeof(exclusionParams));
1917 exclusionParams.exclusionList = excludedResList;
1918 exclusionParams.numListEntries = listSize;
1920 for (i = 0; i < fmGblCfgParams->maxEdma3Cc; i++) {
1921 /* Module Level Open */
1922 memset((void *)&edmaCCModule, 0, sizeof(edmaCCModule));
1923 memset((void *)&edmaObjCC, 0, sizeof(edmaObjCC));
1924 edmaCCModule = CSL_edma3Open(&edmaObjCC, i, NULL, &status);
1925 if ((edmaCCModule == NULL) || (status != CSL_SOK)) {
1926 return(FM_ERROR_EDMA3_INIT_FAILED);
1927 }
1929 exclusionParams.u.edma3Params.edma3Num = i;
1931 /* Disable CC channels */
1932 exclusionParams.resType = Fm_res_Edma3DmaCh;
1933 for (j = 0; j < edmaObjCC.cfgInfo.numDMAChannel; j++) {
1934 exclusionParams.resourceNum = j;
1935 if (!fmExclusionIsExcluded(&exclusionParams)) {
1936 CSL_edma3DMAChannelDisable(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1937 CSL_edma3ClearDMAChannelEvent(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1938 for (k = 0; k < edmaObjCC.cfgInfo.numRegions; k++) {
1939 CSL_edma3DMAChannelDisable(edmaCCModule, k, j);
1940 CSL_edma3ClearDMAChannelEvent(edmaCCModule, k, j);
1941 }
1942 CSL_edma3ClearDMAChannelSecondaryEvents(edmaCCModule, j);
1943 }
1944 }
1946 exclusionParams.resType = Fm_res_Edma3QdmaCh;
1947 for (j = 0; j < edmaObjCC.cfgInfo.numQDMAChannel; j++) {
1948 exclusionParams.resourceNum = j;
1949 if (!fmExclusionIsExcluded(&exclusionParams)) {
1950 CSL_edma3QDMAChannelDisable(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1951 CSL_edma3ClearQDMAChannelSecondaryEvents(edmaCCModule, j);
1952 }
1953 }
1955 exclusionParams.resType = Fm_res_Edma3IntCh;
1956 for (j = 0; j < edmaObjCC.cfgInfo.numINTChannel; j++) {
1957 exclusionParams.resourceNum = j;
1958 if (!fmExclusionIsExcluded(&exclusionParams)) {
1959 if (j < 32) {
1960 CSL_edma3InterruptLoDisable(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1961 CSL_edma3ClearLoPendingInterrupts (edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1962 }
1963 else {
1964 CSL_edma3InterruptHiDisable(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1965 CSL_edma3ClearHiPendingInterrupts (edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j);
1966 }
1967 }
1968 }
1969 }
1971 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
1972 return (FM_FAULT_CLEANUP_OK);
1973 #else
1974 return (0);
1975 #endif
1976 }
1978 /* FUNCTION PURPOSE: Gets the max CPPI tx ch for a CPDMA
1979 ***********************************************************************
1980 * DESCRIPTION: Returns the maximum number of tx ch for the
1981 * given CPDMA
1982 *
1983 * CPPI API hardcoded here until it can be added to CPPI LLD
1984 */
1985 uint32_t fmGetDmaMaxTxCh(Cppi_CpDma dmaNum)
1986 {
1987 uint32_t maxTxCh;
1989 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
1990 maxTxCh = cppiGblCfgParams[dmaNum].maxTxCh;
1991 #else
1992 maxTxCh = cppiGblCfgParams.cpDmaCfgs[dmaNum].maxTxCh;
1993 #endif
1994 return (uint32_t) maxTxCh;
1995 }
1997 /* FUNCTION PURPOSE: Gets the max CPPI rx ch for a CPDMA
1998 ***********************************************************************
1999 * DESCRIPTION: Returns the maximum number of rx ch for the
2000 * given CPDMA
2001 *
2002 * CPPI API hardcoded here until it can be added to CPPI LLD
2003 */
2004 uint32_t fmGetDmaMaxRxCh(Cppi_CpDma dmaNum)
2005 {
2006 uint32_t maxRxCh;
2008 #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E))
2009 maxRxCh = cppiGblCfgParams[dmaNum].maxRxCh;
2010 #else
2011 maxRxCh = cppiGblCfgParams.cpDmaCfgs[dmaNum].maxRxCh;
2012 #endif
2013 return (uint32_t) maxRxCh;
2014 }