/** * @file fm_clean.c * * @brief * Fault Management fault cleanup source * * \par * ============================================================================ * @n (C) Copyright 2014, Texas Instruments, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the * distribution. * * Neither the name of Texas Instruments Incorporated nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * \par */ #include /* Standard Include Files. */ #include /* CSL Includes */ #include #include #include #include #include #include #include #include #include #include #include /* LLD Includes */ #include #include #include #include #include #include #include #include #include /* FM API Include */ #include /* FM Internal Includes */ #include #include #include /* OSAL Includes */ #include #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E)) #define NUM_MONO_DESC 32 #define SIZE_MONO_DESC 128 #define MONO_DESC_DATA_OFFSET 36 #define PA_INST_SIZE 128 /* Required size = 84 */ #define PA_MAX_NUM_L2_HANDLES 64 #define PA_L2_TABLE_SIZE (PA_MAX_NUM_L2_HANDLES * 32) /* Requires 32 bytes per entry */ #define PA_MAX_NUM_L3_HANDLES 128 #define PA_L3_TABLE_SIZE (PA_MAX_NUM_L3_HANDLES * 72) /* Requires 72 bytes per entry */ #define PA_MAX_NUM_CPPI_TX_CH 9 #if defined(_LITTLE_ENDIAN) #define SWIZ(x) (sizeof((x)) == 1 ? (x) : (sizeof((x)) == 2 ? swiz16((x)) : (sizeof((x)) == 4 ? swiz32((x)) : 0))) #else #define SWIZ(x) (x) #endif /********************************************************************** ********************** Cleanup Globals ******************************* **********************************************************************/ /* Tracks whether LLDs have been initialized during the cleanup process */ uint32_t initComplete = FM_FALSE; #pragma DATA_ALIGN (monoDesc, 128) uint8_t monoDesc[SIZE_MONO_DESC * NUM_MONO_DESC]; /* Array used to store Queues that cannot be used by DSP until * they can be closed. Must be global to memory corruption * when initializing the array since it may potentially be * larger than the stack*/ Qmss_QueueHnd invalidQs[QMSS_MAX_GENERAL_PURPOSE_QUEUE]; /* PA memory */ #pragma DATA_ALIGN (paMemPaInst, 8) uint8_t paMemPaInst[PA_INST_SIZE]; #pragma DATA_ALIGN (paMemL2Ram, 8) uint8_t paMemL2Ram[PA_L2_TABLE_SIZE]; #pragma DATA_ALIGN(paMemL3Ram, 8); uint8_t paMemL3Ram[PA_L3_TABLE_SIZE]; /* PASS Tx Queues */ Qmss_QueueHnd paTxQs[PA_MAX_NUM_CPPI_TX_CH]; #endif /* !(K2H && K2K && K2L && K2E) */ /* EDMA3 object - global to avoid overflowing stack */ CSL_Edma3Obj edmaObjCC; #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E)) /* AIF2 object - Global to avoid overflowing stack */ AIF_ConfigObj locAifObj; /********************************************************************** ******************** External Variables ****************************** **********************************************************************/ /* Location in memory where cleanup status is written */ extern int32_t fmCleanupStatus[32]; /* Heap needed to initialize CPPI prior to IO halt execution */ extern uint8_t tempCppiHeap[]; #endif /* !(K2H && K2K && K2L && K2E) */ /* CPPI Global configuration parameters */ #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E)) extern Cppi_GlobalConfigParams cppiGblCfgParams[]; #else extern Cppi_GlobalConfigParams cppiGblCfgParams; #endif #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E)) /* QMSS Global configuration parameters */ extern Qmss_GlobalConfigParams qmssGblCfgParams; /********************************************************************** ********************* Local Cleanup Functions ************************ **********************************************************************/ /* START: PA API hardcoded here until it can be added to PA LLD */ #if defined(_LITTLE_ENDIAN) /********************************************************************* * FUNCTION PURPOSE: Swizzling ********************************************************************* * DESCRIPTION: The PA sub-system requires all multi-byte fields in * big endian format. *********************************************************************/ static inline uint16_t swiz16(uint16_t x) { return ((x >> 8) | (x << 8)); } static inline uint32_t swiz32 (uint32_t x) { return (((x) >> 24) | (((x) >> 8) & 0xff00L) | (((x) << 8) & 0xff0000L) | ((x) << 24)); } #endif /* Hardcode here until API added to PA to remove entries with minimal * overhead. This define maps to PAFRM_CONFIG_COMMAND_DEL_LUT1 (in src/pafrm.h) */ #define temp_PAFRM_CONFIG_COMMAND_DEL_LUT1 2 /* Hardcode here until API added to PA to remove entries with minimal * overhead. This define maps to PAFRM_CONFIG_COMMAND_SEC_BYTE (in src/pafrm.h) */ #define temp_PAFRM_CONFIG_COMMAND_SEC_BYTE 0xce /* Hardcode here until API added to PA to remove entries with minimal * overhead. This define maps to PAFRM_DEST_PKTDMA (in src/pafrm.h) */ #define temp_PAFRM_DEST_PKTDMA 6 /* Hardcode here until API added to PA to remove entries with minimal * overhead. This define maps to PAFRM_DEST_DISCARD (in src/pafrm.h) */ #define temp_PAFRM_DEST_DISCARD 10 /* Commands to PA - Hardcode here until API added to PA to remove entries with minimal * overhead. This structure maps to pafrmCommand_t (in src/pafrm.h) */ typedef struct { uint32_t commandResult; /* Returned to the host, ignored on entry to the PASS */ uint8_t command; /* Command value */ uint8_t magic; /* Magic value */ uint16_t comId; /* Used by the host to identify command results */ uint32_t retContext; /* Returned in swInfo to identify packet as a command */ uint16_t replyQueue; /* Specifies the queue number for the message reply. 0xffff to toss the reply */ uint8_t replyDest; /* Reply destination (host0, host1, discard are the only valid values) */ uint8_t flowId; /* Flow ID used to assign packet at reply */ uint32_t cmd; /* First word of the command */ } tempPaCmd; /* Delete entry from LUT1 - Hardcode here until API added to PA to remove entries with minimal * overhead. This structure maps to pafrmCommandDelLut1_t (in src/pafrm.h) */ typedef struct { uint8_t index; /* LUT1 index */ } tempPaCmdDelLut1; /************************************************************************* * FUNCTION PURPOSE: Format Firmware Command Header ************************************************************************* * DESCRIPTION: Clear and construct the firmware command header * Returns pointer to the firmware command *************************************************************************/ static tempPaCmd *pa_format_fcmd(void *pCmd, paCmdReply_t *reply, uint8_t lutIndex) { tempPaCmd *fcmd = (tempPaCmd *) pCmd; uint16_t csize = sizeof(tempPaCmd)+sizeof(tempPaCmdDelLut1)-sizeof(uint32_t); tempPaCmdDelLut1 *del; uint8_t lut = temp_PAFRM_CONFIG_COMMAND_DEL_LUT1; memset(fcmd, 0, csize); fcmd->command = SWIZ(lut); fcmd->magic = temp_PAFRM_CONFIG_COMMAND_SEC_BYTE; fcmd->comId = 0; fcmd->retContext = SWIZ(reply->replyId); fcmd->replyQueue = SWIZ(reply->queue); fcmd->flowId = SWIZ(reply->flowId); /* Validity of the destination was already checked (HOST), so no other cases * must be considered */ if (reply->dest == pa_DEST_HOST) fcmd->replyDest = temp_PAFRM_DEST_PKTDMA; else fcmd->replyDest = temp_PAFRM_DEST_DISCARD; del = (tempPaCmdDelLut1 *)&(fcmd->cmd); del->index = lutIndex; del->index = SWIZ(del->index); return(fcmd); } /* END: PA API hardcoded here until it can be added to PA LLD */ /* FUNCTION PURPOSE: Converts L2 addresses to global *********************************************************************** * DESCRIPTION: Converts local l2 addresses to their global address */ static uint32_t l2_global_address (uint32_t addr) { /* Compute the global address. */ return (addr + (0x10000000 + (DNUM * 0x1000000))); } /* FUNCTION PURPOSE: Cycle Delay *********************************************************************** * DESCRIPTION: Delays for the specified amount of cycles */ static void cycleDelay(int count, int initTSCL) { uint32_t TSCLin = TSCL; if (initTSCL) { CSL_chipWriteTSCL(0); } else { if (count <= 0) return; while ((TSCL - TSCLin) < (uint32_t)count); } } /* FUNCTION PURPOSE: Resets a specified CPDMA channel or flow *********************************************************************** * DESCRIPTION: Resets the specified CPDMA channel or flow */ static void resetDmaCh(Cppi_Handle cppiHandle, int32_t dmaNum, int32_t chNum, Fm_ResType resType) { Cppi_RxChInitCfg rxChCfg; Cppi_TxChInitCfg txChCfg; Cppi_ChHnd chHandle; Cppi_RxFlowCfg rxFlowCfg; Cppi_FlowHnd flowHandle; uint8_t isAllocated; Cppi_Result cppiResult; if (resType == Fm_res_CpdmaRxCh) { memset((void *) &rxChCfg, 0, sizeof(rxChCfg)); rxChCfg.channelNum = chNum; if (chHandle = Cppi_rxChannelOpen(cppiHandle, &rxChCfg, &isAllocated)) { if(cppiResult = Cppi_channelDisable(chHandle) != CPPI_SOK) { Fault_Mgmt_osalLog("Failed to disable cppi DMA %d rx ch %d with err %d\n", dmaNum, chNum, cppiResult); } if(cppiResult = Cppi_channelClose(chHandle) != CPPI_SOK) { Fault_Mgmt_osalLog("Failed to close cppi DMA %d rx ch %d with err %d\n", dmaNum, chNum, cppiResult); } } else { Fault_Mgmt_osalLog("DMA %d, RX channel %d failed to open\n", dmaNum, chNum); } } else if (resType == Fm_res_CpdmaTxCh) { memset((void *) &txChCfg, 0, sizeof(txChCfg)); txChCfg.channelNum = chNum; if (chHandle = Cppi_txChannelOpen(cppiHandle, &txChCfg, &isAllocated)) { if(cppiResult = Cppi_channelDisable(chHandle) != CPPI_SOK) { Fault_Mgmt_osalLog("Failed to disable cppi DMA %d tx ch %d with err %d\n", dmaNum, chNum, cppiResult); } if(cppiResult = Cppi_channelClose(chHandle) != CPPI_SOK) { Fault_Mgmt_osalLog("Failed to close cppiDMA %d tx ch %d with err %d\n", dmaNum, chNum, cppiResult); } } else { Fault_Mgmt_osalLog("DMA %d, TX channel %d failed to open\n", dmaNum, chNum); } } else if (resType == Fm_res_CpdmaRxFlow) { memset((void *) &rxFlowCfg, 0, sizeof(rxFlowCfg)); rxFlowCfg.flowIdNum = chNum; if (flowHandle = Cppi_configureRxFlow(cppiHandle, &rxFlowCfg, &isAllocated)) { if(cppiResult = Cppi_closeRxFlow(flowHandle) != CPPI_SOK) { Fault_Mgmt_osalLog("Failed to disable cppi DMA %d rx flow %d with err %d\n", dmaNum, chNum, cppiResult); } } else { Fault_Mgmt_osalLog("DMA %d, RX flow %d failed to open\n", dmaNum, chNum); } } } /* FUNCTION PURPOSE: Gets the max CPPI rx flows for a CPDMA *********************************************************************** * DESCRIPTION: Returns the maximum number of rx flows for the * given CPDMA * * CPPI API hardcoded here until it can be added to CPPI LLD */ static uint32_t getDmaMaxRxFlow(Cppi_CpDma dmaNum) { uint32_t maxRxFlow; maxRxFlow = cppiGblCfgParams[dmaNum].maxRxFlow; return (uint32_t) maxRxFlow; } /* FUNCTION PURPOSE: Enables a CPPI tx channel *********************************************************************** * DESCRIPTION: Directly accesses the CPPI registers to Enable * the specified transmit channel * * CPPI API hardcoded here until it can be added to CPPI LLD */ static Cppi_Result txChannelExpressEnable (Cppi_CpDma dmaNum, uint32_t channelNum) { uint32_t value = 0; Cppi_Result retVal = CPPI_SOK; if (channelNum > cppiGblCfgParams[dmaNum].maxTxCh) { retVal = FM_ERROR_CPPI_TX_CHANNEL_INVALID; goto exitCs; } CSL_FINS (value, CPPIDMA_TX_CHANNEL_CONFIG_TX_CHANNEL_GLOBAL_CONFIG_REG_A_TX_ENABLE, (uint32_t) 1); cppiGblCfgParams[dmaNum].txChRegs->TX_CHANNEL_GLOBAL_CONFIG[channelNum].TX_CHANNEL_GLOBAL_CONFIG_REG_A = value; exitCs: return retVal; } /* FUNCTION PURPOSE: Disables a CPPI tx channel *********************************************************************** * DESCRIPTION: Directly accesses the CPPI registers to disable * the specified transmit channel * * CPPI API hardcoded here until it can be added to CPPI LLD */ static Cppi_Result txChannelExpressDisable (Cppi_CpDma dmaNum, uint32_t channelNum) { Cppi_Result retVal = CPPI_SOK; if (channelNum > cppiGblCfgParams[dmaNum].maxTxCh) { retVal = FM_ERROR_CPPI_TX_CHANNEL_INVALID; goto exitCs; } cppiGblCfgParams[dmaNum].txChRegs->TX_CHANNEL_GLOBAL_CONFIG[channelNum].TX_CHANNEL_GLOBAL_CONFIG_REG_A = 0; exitCs: return retVal; } /* FUNCTION PURPOSE: Disable TX DMAs used by Linux *********************************************************************** * DESCRIPTION: Disables the PASS and QMSS TX DMAs that are owned and * operated by Linux. */ static void linuxTxDmaDisable(Fm_ExcludedResource *excludedResList, uint32_t listSize) { Fm_ExclusionParams exclusionParams; int32_t i; /* Disable all PASS & QMSS TX DMAs owned by Linux */ memset(&exclusionParams, 0, sizeof(exclusionParams)); exclusionParams.exclusionList = excludedResList; exclusionParams.numListEntries = listSize; exclusionParams.resType = Fm_res_CpdmaTxCh; exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_PASS_CPDMA; for (i = 0; i < fmGetDmaMaxTxCh(Cppi_CpDma_PASS_CPDMA); i++) { exclusionParams.resourceNum = i; if (fmExclusionIsExcluded(&exclusionParams)) { txChannelExpressDisable(Cppi_CpDma_PASS_CPDMA, i); } } exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_QMSS_CPDMA; for (i = 0; i < fmGetDmaMaxTxCh(Cppi_CpDma_QMSS_CPDMA); i++) { exclusionParams.resourceNum = i; if (fmExclusionIsExcluded(&exclusionParams)) { txChannelExpressDisable(Cppi_CpDma_QMSS_CPDMA, i); } } } /* FUNCTION PURPOSE: Enable TX DMAs used by Linux *********************************************************************** * DESCRIPTION: Enables the PASS and QMSS TX DMAs that are owned and * operated by Linux. */ static void linuxTxDmaEnable(Fm_ExcludedResource *excludedResList, uint32_t listSize) { Fm_ExclusionParams exclusionParams; int32_t i; memset(&exclusionParams, 0, sizeof(exclusionParams)); exclusionParams.exclusionList = excludedResList; exclusionParams.numListEntries = listSize; /* Enable the Linux TX CPDMAs */ exclusionParams.resType = Fm_res_CpdmaTxCh; exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_PASS_CPDMA; for (i = 0; i < fmGetDmaMaxTxCh(Cppi_CpDma_PASS_CPDMA); i++) { exclusionParams.resourceNum = i; if (fmExclusionIsExcluded(&exclusionParams)) { txChannelExpressEnable(Cppi_CpDma_PASS_CPDMA, i); } } exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_QMSS_CPDMA; for (i = 0; i < fmGetDmaMaxTxCh(Cppi_CpDma_QMSS_CPDMA); i++) { exclusionParams.resourceNum = i; if (fmExclusionIsExcluded(&exclusionParams)) { txChannelExpressEnable(Cppi_CpDma_QMSS_CPDMA, i); } } } /* FUNCTION PURPOSE: Disables a QMSS accumulator channel *********************************************************************** * DESCRIPTION: Disables a QMSS accumulator channel the same as the * Qmss_disableAccumulator API except a timeout is added * when waiting for response from the PDSP firmware. * * QMSS API hardcoded here until it can be added to QMSS LLD */ static Qmss_Result disableAccumChWithTimeout(Qmss_PdspId pdspId, uint8_t channel) { Qmss_AccCmd cmd; volatile uint32_t *cmdPtr, *reg; uint32_t index; uint8_t result; void *key; uint32_t gotResponse = FM_FALSE; uint32_t timeoutCnt; /* Begin Critical Section before accessing shared resources. */ key = Qmss_osalCsEnter (); while(!gotResponse) { memset ((void *) &cmd, 0, sizeof (Qmss_AccCmd)); CSL_FINSR (cmd.word0, 7, 0, channel); CSL_FINSR (cmd.word0, 15, 8, Qmss_AccCmd_DISABLE_CHANNEL); /* Point to the accumulator command register's last word */ reg = (uint32_t *) ((uint8_t *) qmssGblCfgParams.qmPdspCmdReg[pdspId] + 4 * 4); /* Write command word last */ cmdPtr = ((uint32_t *) &cmd) + 4; for (index = 0; index < 5; index++) *reg-- = *cmdPtr--; /* Wait for the command to clear */ reg++; timeoutCnt = 0; do { result = CSL_FEXTR (*reg, 15, 8); if (result != 0) { gotResponse = FM_TRUE; } else { cycleDelay(1000, FM_FALSE); timeoutCnt += 1000; if (timeoutCnt >= 1000000000) { /* Resend the command */ break; } } } while (result != 0); } /* End Critical Section */ Qmss_osalCsExit (key); return (Qmss_Result) (CSL_FEXTR (*reg, 31, 24)); } /* FUNCTION PURPOSE: Resets a QMSS memory region *********************************************************************** * DESCRIPTION: Directly accesses the QMSS descriptor registers * to reset a specified memory region * * QMSS API hardcoded here until it can be added to QMSS LLD */ static Qmss_Result expressResetMemoryRegion (Qmss_MemRegion memRegion) { int32_t index = (int32_t) memRegion; if (memRegion == Qmss_MemRegion_MEMORY_REGION_NOT_SPECIFIED) return QMSS_MEMREGION_INVALID_INDEX; qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_BASE_ADDRESS_REG = (uint32_t)0; qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_START_INDEX_REG = (uint32_t)0; qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_DESCRIPTOR_SETUP_REG = 0; return QMSS_SOK; } /* FUNCTION PURPOSE: Gets a Memory Region Base Address *********************************************************************** * DESCRIPTION: Directly accesses the QMSS descriptor registers * to get the region base address. A return of NULL * means memory region is not in use. The physical * base address can not be NULL if it is in use. * * QMSS API hardcoded here until it can be added to QMSS LLD */ static uint32_t getMemoryRegionBaseAddr (Qmss_MemRegion memRegion) { int32_t index = (int32_t) memRegion; return (qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_BASE_ADDRESS_REG); } /* FUNCTION PURPOSE: Gets a Memory Region Descriptor Block Size *********************************************************************** * DESCRIPTION: Directly accesses the QMSS descriptor registers * to get the descriptor block size for the region * * QMSS API hardcoded here until it can be added to QMSS LLD */ static uint32_t getMemoryRegionDescBlockSize (Qmss_MemRegion memRegion) { int32_t index = (int32_t) memRegion; uint32_t descSizeBytes = 0; uint32_t powRegSize = 0; uint32_t numDesc = 0; descSizeBytes = (uint32_t) CSL_FEXT (qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_DESCRIPTOR_SETUP_REG, QM_DESCRIPTOR_REGION_CONFIG_MEMORY_REGION_DESCRIPTOR_SETUP_REG_DESC_SIZE); /* Value stored as multiplier minus 1 that needs to be applied to 16 to get descriptor size */ descSizeBytes = (descSizeBytes + 1) * 16; powRegSize = (uint32_t) CSL_FEXT (qmssGblCfgParams.qmDescReg->MEMORY_REGION_BASE_ADDRESS_GROUP[index].MEMORY_REGION_DESCRIPTOR_SETUP_REG, QM_DESCRIPTOR_REGION_CONFIG_MEMORY_REGION_DESCRIPTOR_SETUP_REG_REG_SIZE); /* Value stored as 2^(5+stored_value) = number of descriptors */ numDesc = (32UL << powRegSize); return (numDesc * descSizeBytes); } /* FUNCTION PURPOSE: Cleans QMSS queues *********************************************************************** * DESCRIPTION: QMSS queues cleaned. The steps taken to clean the * queues differs based on whether the queue is part of * the provided exclusion list. Queues not in the * exclusion list will be emptied of all descriptors * Queues in the exclusion list will be cleaned * using the following process: * * Queue cleanup process * - Disable all PASS DMA TX channels that are owned by Linux * - Pause QoS (if supported - not currently supported) * - Wipe all QMSS queues that are DSP owned * - Read QMSS memory region registers for Linux inserted regions to get addresses associated * with Linux pushed descriptors * - For each queue that may be used by Linux * - Allocate a scratch queue and a cleanup queue from the list of wiped QMSS queues * - Divert all descriptors in Linux owned queue to scratch queue * - Pop descriptors off scratch queue * - Discard if descriptor not in Linux memory region * - Push onto cleanup queue if descript in Linux memory region * - Divert descriptors in cleanup queue back to original queue * - Enable PASS DMA TX channels */ static int32_t cleanQmssQueues(Fm_ExcludedResource *excludedResList, uint32_t listSize) { Fm_ExclusionParams exclusionParams; uint8_t isAllocated; int32_t i, j; Qmss_QueueHnd scratchQ = NULL; Qmss_QueueHnd cleanQ = NULL; uint32_t desc; uint32_t memRegStart; uint32_t memRegEnd; Fm_Result retVal = FM_FAULT_CLEANUP_OK; /* Stop flow of descriptors while cleaning QM */ linuxTxDmaDisable(excludedResList, listSize); /* Cleanup QMSS queues */ memset(&exclusionParams, 0, sizeof(exclusionParams)); exclusionParams.exclusionList = excludedResList; exclusionParams.numListEntries = listSize; /* Empty all queues not owned by Linux first */ exclusionParams.resType = Fm_res_QmssQueue; for (i = 0; i < QMSS_MAX_QUEUES; i++) { exclusionParams.resourceNum = i; if (!fmExclusionIsExcluded(&exclusionParams)) { Qmss_queueEmpty((Qmss_QueueHnd) i); } } /* Clean all Linux-owned queues of DSP-based descriptors second */ /* Allocate a scratchQ for temporarily storing the descriptor contents of a queue being swept of * DSP descriptors */ memset(&invalidQs[0], 0, sizeof(invalidQs)); i = 0; while (scratchQ == NULL) { scratchQ = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated); exclusionParams.resourceNum = scratchQ; if (fmExclusionIsExcluded(&exclusionParams)) { /* Store the queues that can't be used until after both the scratchQ and cleanQ have been found. */ invalidQs[i++] = scratchQ; scratchQ = NULL; } } /* Allocate a cleanQ to temporarily store the linux-based descriptors filtered from a Linux-based queue. */ while (cleanQ == NULL) { cleanQ = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated); exclusionParams.resourceNum = cleanQ; if (fmExclusionIsExcluded(&exclusionParams)) { /* Store the queues that can't be used until after both the scratchQ and cleanQ have been found. */ invalidQs[i++] = cleanQ; cleanQ = NULL; } } /* Free any invalidQs */ for (j = 0; j < i; j++) { Qmss_queueClose(invalidQs[j]); } /* Need to check allocated queue against those owned by linux */ for (i = 0; i < QMSS_MAX_QUEUES; i++) { exclusionParams.resType = Fm_res_QmssQueue; exclusionParams.resourceNum = i; if (fmExclusionIsExcluded(&exclusionParams)) { /* In K2 will need to open twice the scratch and clean queues (one from each QM) */ Qmss_queueDivert(i, scratchQ, Qmss_Location_TAIL); /* Pop each decriptor from the queue being swept. if the descriptor lies within * the range of a Linux-owned memory region it is saved via a push onto a clean Q. Otherwise, * the descriptor is dropped. */ while (desc = (uint32_t)Qmss_queuePop(scratchQ)) { for (j = 0; j < QMSS_MAX_MEM_REGIONS; j++) { if (memRegStart = getMemoryRegionBaseAddr((Qmss_MemRegion)j)) { memRegEnd = memRegStart + getMemoryRegionDescBlockSize((Qmss_MemRegion)j); if ((desc >= memRegStart) && (desc < memRegEnd)) { /* Save descriptor */ Qmss_queuePushDesc(cleanQ, (void *)desc); break; } } } } /* Move Linux-based descriptors in the cleanQ back into the original queue. The descriptors are * diverted to the HEAD in case any descriptors were pushed into the original queue during the * sweep process. Pushing to HEAD will guarantee the original descriptors are handled prior to the * new ones */ Qmss_queueDivert(cleanQ, i, Qmss_Location_HEAD); } } /* Restart flow of descriptors */ linuxTxDmaEnable(excludedResList, listSize); /* Leave DMA handles hanging since it can't be closed without closing all the channels. Channels * can't be closed without wiping Linux configuration */ return (retVal); } /* FUNCTION PURPOSE: Resets PA PDSPs and LUTs *********************************************************************** * DESCRIPTION: Resets PA PDSPs and LUTs based on the provided * exclusion list. * * TAKEN FROM pa.c (Pa_resetControl). Replace this code * with call to modified Pa_resetControl when it doesn't * automatically reset all PDSPs. Until then use this * modified function * * To reset portions of PA used by DSP * - Disable DSP owned PDSPs * - Clear LUT2 if completely owned by DSP * - Redownload DSP owned PDSPs * - Reenable DSP owned PDSPs */ static paReturn_t resetPaPdspsAndLuts(Pa_Handle paHandle, uint32_t numPdsps, Fm_ExcludedResource *excludedResList, uint32_t listSize) { CSL_Pa_ssRegs *passRegs; Fm_ExclusionParams exclusionParams; uint32_t i; uint32_t resetLut2 = FM_TRUE; paReturn_t paRet = pa_OK; if (listSize) { /* A present exclusion list signifies another core (typically ARM Linux) is in * control of PA. As a result, the firmware download and reset must be selective */ /* Initialize the exclusion parameters */ memset(&exclusionParams, 0, sizeof(exclusionParams)); exclusionParams.exclusionList = excludedResList; exclusionParams.numListEntries = listSize; passRegs = (CSL_Pa_ssRegs *)CSL_PA_SS_CFG_REGS; /* Put each of the PDSPs into reset (PC = 0)*/ exclusionParams.resType = Fm_res_PaPdsp; for (i = 0; i < numPdsps; i++) { exclusionParams.resourceNum = i; if (!fmExclusionIsExcluded(&exclusionParams)) { passRegs->PDSP_CTLSTAT[i].PDSP_CONTROL = 0; } } /* Reset LUT2 if applicable */ for (i = 0; i < listSize; i++) { if ((excludedResList[i].resType == Fm_res_PaLutEntry) && (excludedResList[i].exResInfo == 2)) { resetLut2 = FM_FALSE; break; } } if (resetLut2) { passRegs->LUT2.LUT2_SOFT_RESET = 1; } exclusionParams.resType = Fm_res_PaPdsp; /* PDPSs 0-2 use image c1 */ for (i = 0; i < 3; i++) { exclusionParams.resourceNum = i; if (!fmExclusionIsExcluded(&exclusionParams)) { paRet = Pa_downloadImage (paHandle, i, (Ptr)c1, c1Size); if (paRet != pa_OK) { goto errorExit; } } } /* PDSP 3 uses image c2 */ exclusionParams.resourceNum = 3; if (!fmExclusionIsExcluded(&exclusionParams)) { paRet = Pa_downloadImage (paHandle, 3, (Ptr)c2, c2Size); if (paRet != pa_OK) { goto errorExit; } } /* PDSPs 4-5 use image m */ for (i = 4; i < numPdsps; i++) { exclusionParams.resourceNum = i; if (!fmExclusionIsExcluded(&exclusionParams)) { paRet = Pa_downloadImage (paHandle, i, (Ptr)m, mSize); if (paRet != pa_OK) { goto errorExit; } } } /* Should be able to use PA's PDSP enable API since an active PDSP will not be * modified */ paRet = Pa_resetControl(paHandle, pa_STATE_ENABLE); if (paRet == pa_STATE_ENABLE) { paRet = pa_OK; } } else { /* Perform full firmware download and reset since another core is not * in control of PA */ Pa_resetControl (paHandle, pa_STATE_RESET); /* PDPSs 0-2 use image c1 */ for (i = 0; i < 3; i++) Pa_downloadImage (paHandle, i, (Ptr)c1, c1Size); /* PDSP 3 uses image c2 */ Pa_downloadImage (paHandle, 3, (Ptr)c2, c2Size); /* PDSPs 4-5 use image m */ for (i = 4; i < 6; i++) Pa_downloadImage (paHandle, i, (Ptr)m, mSize); paRet = Pa_resetControl (paHandle, pa_STATE_ENABLE); if (paRet == pa_STATE_ENABLE) { paRet = pa_OK; } } errorExit: return (paRet); } /* FUNCTION PURPOSE: Check if NetCp (PA & SA) subsystem is powered up *************************************************************************************** * DESCRIPTION: This function checks the power status of the NetCp (PA & SA) subsystem domains */ static uint32_t isNetCpPoweredUp (void) { /* Get peripheral PSC status */ if ((CSL_PSC_getPowerDomainState(CSL_PSC_PD_PASS) == PSC_PDSTATE_ON) && (CSL_PSC_getModuleState (CSL_PSC_LPSC_PKTPROC) == PSC_MODSTATE_ENABLE) && (CSL_PSC_getModuleState (CSL_PSC_LPSC_CPGMAC) == PSC_MODSTATE_ENABLE) && (CSL_PSC_getModuleState (CSL_PSC_LPSC_Crypto) == PSC_MODSTATE_ENABLE)) { /* On */ return (FM_TRUE); } else { return (FM_FALSE); } } /* FUNCTION PURPOSE: Resets and Initializes PA *********************************************************************** * DESCRIPTION: Resets and intializes PA. Certain steps * will be skipped if an exclusion list * is provided. This signifies another * entity (typically ARM Linux) has already setup * portions of PA which should not be reset */ static Pa_Handle resetAndInitPa(uint32_t numPdsps, Fm_ExcludedResource *excludedResList, uint32_t listSize) { paSizeInfo_t paSize; paConfig_t paCfg; paReturn_t paRet; int bufSizes[pa_N_BUFS]; int bufAligns[pa_N_BUFS]; void *bufBases[pa_N_BUFS]; Pa_Handle paHandle = NULL; /* Stop flow of descriptors while resetting the PA PDSPs */ linuxTxDmaDisable(excludedResList, listSize); /* Delay so PDSPs can finish processing any queued descriptors (commands) */ cycleDelay(5000, FM_FALSE); /* The maximum number of handles that can exists are 32 for L2, and 64 for L3. */ memset(&paSize, 0, sizeof(paSizeInfo_t)); memset(&paCfg, 0, sizeof(paConfig_t)); memset(bufBases, 0, sizeof(bufBases)); paSize.nMaxL2 = PA_MAX_NUM_L2_HANDLES; paSize.nMaxL3 = PA_MAX_NUM_L3_HANDLES; paSize.nUsrStats = 0; paSize.nVlnkMax = 0; paRet = Pa_getBufferReq(&paSize, bufSizes, bufAligns); if (paRet != pa_OK) { goto errorExit; } /* The first buffer is used as the instance buffer */ if (((Uint32)paMemPaInst & (bufAligns[0] - 1)) || (sizeof(paMemPaInst) < bufSizes[0])) { goto errorExit; } bufBases[0] = (void *)paMemPaInst; /* The second buffer is the L2 table */ if (((Uint32)paMemL2Ram & (bufAligns[1] - 1)) || (sizeof(paMemL2Ram) < bufSizes[1])) { goto errorExit; } bufBases[1] = (void *)paMemL2Ram; /* The third buffer is the L3 table */ if (((Uint32)paMemL3Ram & (bufAligns[2] - 1)) || (sizeof(paMemL3Ram) < bufSizes[2])) { goto errorExit; } bufBases[2] = (void *)paMemL3Ram; paCfg.initTable = TRUE; if (excludedResList) { paCfg.initDefaultRoute = FALSE; } else { paCfg.initDefaultRoute = TRUE; } paCfg.baseAddr = CSL_PA_SS_CFG_REGS; paCfg.sizeCfg = &paSize; paRet = Pa_create(&paCfg, bufBases, &paHandle); if (paRet != pa_OK) { goto errorExit; } /* Reset the portions of PA that are used by the DSP. Avoid resetting * anything used by Linux */ paRet = resetPaPdspsAndLuts(paHandle, numPdsps, excludedResList, listSize); if (paRet != pa_OK) { paHandle = NULL; } /* Restart DMAs */ linuxTxDmaEnable(excludedResList, listSize); errorExit: return (paHandle); } /* FUNCTION PURPOSE: Resets the PA Global Config *********************************************************************** * DESCRIPTION: The PASS global configuration stored in the PA * scratch memory is reset to default values */ static Fm_Result paSetDefaultGblCfg(Pa_Handle passHandle, Qmss_QueueHnd cmdRespQ, Qmss_QueueHnd freeQ, int16_t flowId) { paSysConfig_t paDefGlobalCfg; Cppi_Desc *monolithicDesc; paCmdReply_t paReply; uint8_t *descBuf; uint32_t cmdLen; int cmdDest; uint32_t psCmd; tempPaCmd *paRespCmd; uint32_t paRespCmdLen; paReturn_t paRet; Fm_Result retVal = FM_FAULT_CLEANUP_OK; paCtrlInfo_t ctrlInfo; /* Set the default values, taken from pa.h */ paProtocolLimit_t paDefProtocolLimit = { pa_PROTOCOL_LIMIT_NUM_VLANS_DEF, /* Number of VLANs */ pa_PROTOCOL_LIMIT_NUM_IP_DEF, /* Number of IPs */ pa_PROTOCOL_LIMIT_NUM_GRE_DEF /* Number of GREs */ }; paCmdSetConfig_t paDefCmdSetCfg = { 64 /* Number of command sets */ }; paUsrStatsConfig_t paDefUsrStatsCfg = { (pa_USR_STATS_MAX_COUNTERS - pa_USR_STATS_MAX_COUNTERS), /* Number of user stats */ (pa_USR_STATS_MAX_64B_COUNTERS - pa_USR_STATS_MAX_64B_COUNTERS) /* Number of 64-bit user stats */ }; paQueueDivertConfig_t paDefQueueDivertCfg = { 0, /* Monitoring Queue */ 0 /* flow Id */ }; paPacketControlConfig_t paDefPktCtrlCfg = { pa_PKT_CTRL_HDR_VERIFY_IP, /* ctrlBitMap */ 0, /* rxPaddingErrStatsIndex */ 0 /* txPaddingStatsIndex */ }; paIpReassmConfig_t paDefReassmConfig = { 0, /* numTrafficFlow */ 0, /* destFlowId */ 0 /* destQueue */ }; paIpsecNatTConfig_t paDefNatTCfg = { 0, /* ctrlBitMap */ 0 /* UDP port number */ }; paDefGlobalCfg.pCmdSetConfig = &paDefCmdSetCfg; paDefGlobalCfg.pInIpReassmConfig = &paDefReassmConfig; paDefGlobalCfg.pOutIpReassmConfig = &paDefReassmConfig; paDefGlobalCfg.pPktControl = &paDefPktCtrlCfg; paDefGlobalCfg.pProtoLimit = &paDefProtocolLimit; paDefGlobalCfg.pQueueDivertConfig = &paDefQueueDivertCfg; paDefGlobalCfg.pUsrStatsConfig = &paDefUsrStatsCfg; memset(&paReply, 0, sizeof(paReply)); paReply.dest = pa_DEST_HOST; paReply.queue = Qmss_getQIDFromHandle(cmdRespQ); paReply.flowId = flowId; /* Set system global default configuration */ ctrlInfo.code = pa_CONTROL_SYS_CONFIG; ctrlInfo.params.sysCfg = paDefGlobalCfg; monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(freeQ)); Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc, &descBuf, &cmdLen); cmdLen = SIZE_MONO_DESC - Cppi_getDataOffset(Cppi_DescType_MONOLITHIC, monolithicDesc); paRet = Pa_control(passHandle, &ctrlInfo, (paCmd_t)descBuf, (uint16_t *)&cmdLen, &paReply, &cmdDest); if (paRet != pa_OK) { if (paRet == pa_INSUFFICIENT_CMD_BUFFER_SIZE) { retVal = FM_ERROR_DESC_BUF_TOO_SMALL; } else { retVal = FM_ERROR_PASS_SETTING_DEF_GLBL_CMD; } goto cleanupQueue; } psCmd = PASAHO_PACFG_CMD; Cppi_setPSData (Cppi_DescType_MONOLITHIC, monolithicDesc, (uint8_t *)&psCmd, 4); Cppi_setPacketLen(Cppi_DescType_MONOLITHIC, monolithicDesc, cmdLen); Qmss_queuePushDescSize(paTxQs[cmdDest - pa_CMD_TX_DEST_0], (uint32_t *)monolithicDesc, SIZE_MONO_DESC); /* Wait for response from PA */ while (Qmss_getQueueEntryCount(cmdRespQ) == 0){}; monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(cmdRespQ)); Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc,(uint8_t **)&paRespCmd, &paRespCmdLen); if (paRespCmd->commandResult) { retVal = FM_ERROR_PASS_GBL_DEF_CFG_NOT_SET; goto cleanupQueue; } Qmss_queuePushDescSize(freeQ, monolithicDesc, SIZE_MONO_DESC); /* Set nat-t global default configuration */ ctrlInfo.code = pa_CONTROL_IPSEC_NAT_T_CONFIG; ctrlInfo.params.ipsecNatTDetCfg = paDefNatTCfg; ctrlInfo.params.ipsecNatTDetCfg.ctrlBitMap = 0; monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(freeQ)); Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc, &descBuf, &cmdLen); cmdLen = SIZE_MONO_DESC - Cppi_getDataOffset(Cppi_DescType_MONOLITHIC, monolithicDesc); paRet = Pa_control(passHandle, &ctrlInfo, (paCmd_t)descBuf, (uint16_t *)&cmdLen, &paReply, &cmdDest); if (paRet != pa_OK) { if (paRet == pa_INSUFFICIENT_CMD_BUFFER_SIZE) { retVal = FM_ERROR_DESC_BUF_TOO_SMALL; } else { retVal = FM_ERROR_PASS_SETTING_DEF_NATT_CMD; } goto cleanupQueue; } psCmd = PASAHO_PACFG_CMD; Cppi_setPSData (Cppi_DescType_MONOLITHIC, monolithicDesc, (uint8_t *)&psCmd, 4); Cppi_setPacketLen(Cppi_DescType_MONOLITHIC, monolithicDesc, cmdLen); Qmss_queuePushDescSize(paTxQs[cmdDest - pa_CMD_TX_DEST_0], (uint32_t *)monolithicDesc, SIZE_MONO_DESC); /* Wait for response from PA */ while (Qmss_getQueueEntryCount(cmdRespQ) == 0){}; monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(cmdRespQ)); Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc,(uint8_t **)&paRespCmd, &paRespCmdLen); if (paRespCmd->commandResult) { retVal = FM_ERROR_PASS_NATT_DEF_CFG_NOT_SET; } cleanupQueue: Qmss_queuePushDescSize(freeQ, monolithicDesc, SIZE_MONO_DESC); return (retVal); } /* FUNCTION PURPOSE: Powers down a peripheral *********************************************************************** * DESCRIPTION: Powers down the peripheral for a given power * domain number if the peripheral is currently on. */ static void periphPowerDown(uint32_t pwrDmnNum) { if ((CSL_PSC_getPowerDomainState(pwrDmnNum) == PSC_PDSTATE_ON)) { /* Peripheral is ON */ /* Power OFF */ //Wait for any previous transitions to complete while (!CSL_PSC_isStateTransitionDone (pwrDmnNum)); //Write Switch input into the corresponding PDCTL register CSL_PSC_disablePowerDomain (pwrDmnNum); //Write PTCMD to start the transition CSL_PSC_startStateTransition (pwrDmnNum); //Wait for the transition to complete while (!CSL_PSC_isStateTransitionDone (pwrDmnNum)); } } /********************************************************************** **************************** Cleanup APIs **************************** **********************************************************************/ /* FUNCTION PURPOSE: Cleanup peripheral init code *********************************************************************** * DESCRIPTION: Initializes some peripherals via their LLDs so that * API calls used to reset their peripheral resources * will succeed. QMSS init, CPPI init, etc. */ Fm_Result fmCleanupInit(uint32_t fullInit) { Qmss_InitCfg qmssInitCfg; Qmss_Result qmssResult; uint32_t heapSize; Cppi_InitCfg cppiInitCfg; Cppi_Result cppiResult; Fm_Result retVal = FM_FAULT_CLEANUP_OK; Fault_Mgmt_osalLog("Fault Cleanup: LLD Initialization\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_INITIALIZATION; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); /* Init TSCL */ cycleDelay(0, FM_TRUE); /* Init QMSS */ memset ((void *) &qmssInitCfg, 0, sizeof(qmssInitCfg)); if (fullInit) { /* Set up the linking RAM. Use internal Linking RAM. */ qmssInitCfg.linkingRAM0Base = 0; qmssInitCfg.linkingRAM0Size = 0; qmssInitCfg.linkingRAM1Base = 0x0; qmssInitCfg.maxDescNum = 0x3fff; } else { qmssInitCfg.qmssHwStatus = QMSS_HW_INIT_COMPLETE; } qmssInitCfg.maxDescNum = NUM_MONO_DESC; qmssResult = Qmss_init(&qmssInitCfg, &qmssGblCfgParams); if (qmssResult != QMSS_SOK) { Fault_Mgmt_osalLog("Fault Cleanup: Failed to Init QMSS LLD\n"); retVal = FM_ERROR_QMSS_INIT_FAILED; goto errorExit; } qmssResult = Qmss_start (); if (qmssResult != QMSS_SOK) { Fault_Mgmt_osalLog("Fault Cleanup: Failed to Start QMSS LLD\n"); retVal = FM_ERROR_QMSS_INIT_FAILED; goto errorExit; } /* Init CPPI */ cppiResult = Cppi_getHeapReq(cppiGblCfgParams, &heapSize); cppiInitCfg.heapParams.staticHeapBase = &tempCppiHeap[0]; cppiInitCfg.heapParams.staticHeapSize = heapSize; cppiInitCfg.heapParams.heapAlignPow2 = 8; cppiInitCfg.heapParams.dynamicHeapBlockSize = -1; cppiResult = Cppi_initCfg(cppiGblCfgParams, &cppiInitCfg); if (cppiResult != CPPI_SOK) { Fault_Mgmt_osalLog("Fault Cleanup: Failed to Init CPPI LLD\n"); retVal = FM_ERROR_CPPI_INIT_FAILED; goto errorExit; } initComplete = FM_TRUE; errorExit: return (retVal); } /* FUNCTION PURPOSE: Resets CPPI peripheral resources *********************************************************************** * DESCRIPTION: Resets CPPI peripheral resources to their PoR * state. Resources in the exclusion list will * not be reset. */ Fm_Result fmCleanCppi(Fm_ExcludedResource *excludedResList, uint32_t listSize) { Fm_ExclusionParams exclusionParams; Cppi_CpDmaInitCfg dmaCfg; Cppi_Handle cppiHandle; int32_t i, j; if (!initComplete) { return (FM_ERROR_CLEANUP_INIT_NOT_COMPLETE); } Fault_Mgmt_osalLog("Fault Cleanup: CPPI\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_CPDMA; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); /* CPPI PoR reset process - Reset all DMA rx/tx channels and flows except * those owned by Linux */ memset(&exclusionParams, 0, sizeof(exclusionParams)); exclusionParams.exclusionList = excludedResList; exclusionParams.numListEntries = listSize; /* Reset CPPI channels and flows */ for (i = 0; i < CPPI_MAX_CPDMA; i++) { if (fmIsWirelessPeriphPoweredOnForCpdma((Cppi_CpDma) i)) { memset ((void *) &dmaCfg, 0, sizeof(dmaCfg)); dmaCfg.dmaNum = (Cppi_CpDma) i; if (cppiHandle = Cppi_open(&dmaCfg)) { exclusionParams.u.cpdmaParams.dma = dmaCfg.dmaNum; exclusionParams.resType = Fm_res_CpdmaRxCh; for (j = 0; j < fmGetDmaMaxRxCh(dmaCfg.dmaNum); j++) { exclusionParams.resourceNum = j; if (!fmExclusionIsExcluded(&exclusionParams)) { resetDmaCh(cppiHandle, i, j, Fm_res_CpdmaRxCh); } } exclusionParams.resType = Fm_res_CpdmaTxCh; for (j = 0; j < fmGetDmaMaxTxCh(dmaCfg.dmaNum); j++) { exclusionParams.resourceNum = j; if (!fmExclusionIsExcluded(&exclusionParams)) { resetDmaCh(cppiHandle, i, j, Fm_res_CpdmaTxCh); } } exclusionParams.resType = Fm_res_CpdmaRxFlow; for (j = 0; j < getDmaMaxRxFlow(dmaCfg.dmaNum); j++) { exclusionParams.resourceNum = j; if (!fmExclusionIsExcluded(&exclusionParams)) { resetDmaCh(cppiHandle, i, j, Fm_res_CpdmaRxFlow); } } } else { Fault_Mgmt_osalLog("Fault Cleanup: Failed to open CPDMA with index %d\n", i); } } } return (FM_FAULT_CLEANUP_OK); } /* FUNCTION PURPOSE: Resets QMSS accumulator peripheral resources *********************************************************************** * DESCRIPTION: Resets QMSS accumulator peripheral resources to * their PoR state. Resources in the exclusion list will * not be reset. * * NOTE: This API should be called before queues * are cleaned. */ Fm_Result fmCleanQmssAccum(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList, uint32_t listSize) { Qmss_Result qmssResult; int32_t i; Fm_ExclusionParams exclusionParams; Qmss_IntdInterruptType intdType; if (!initComplete) { return (FM_ERROR_CLEANUP_INIT_NOT_COMPLETE); } Fault_Mgmt_osalLog("Fault Cleanup: QMSS Accumulator\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_QMSS_ACCUM; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); /* QMSS cleanup - Disable all the accumulator channels except those owned by Linux */ memset(&exclusionParams, 0, sizeof(exclusionParams)); exclusionParams.exclusionList = excludedResList; exclusionParams.numListEntries = listSize; if (listSize) { /* Only clean up accumulator channels if another processor is maintaining the * QM PDSPs. Another processor is in play if the exclusion list is populated */ exclusionParams.resType = Fm_res_QmssAccumCh; for (i = 0; i < fmGblCfgParams->maxQmssAccumCh; i++) { exclusionParams.resourceNum = i; if (!fmExclusionIsExcluded(&exclusionParams)) { /* Clear channel's pending interrupts */ if ((i >= fmGblCfgParams->highAccum.start) && (i <= fmGblCfgParams->highAccum.end)) { intdType = Qmss_IntdInterruptType_HIGH; } else if ((i >= fmGblCfgParams->loAccum.start) && (i <= fmGblCfgParams->loAccum.end)) { intdType = Qmss_IntdInterruptType_LOW; } else { return (FM_ERROR_INVALID_ACCUM_CH); } while (qmssGblCfgParams.qmQueIntdReg->INTCNT_REG[i]) { Qmss_ackInterrupt(i, 1); Qmss_setEoiVector(intdType, i); } qmssResult = disableAccumChWithTimeout(Qmss_PdspId_PDSP1, i); if (qmssResult < 0) { Fault_Mgmt_osalLog("Failed to disable PDSP1 accum ch %d with err %d\n", i, qmssResult); } } } } return (FM_FAULT_CLEANUP_OK); } /* FUNCTION PURPOSE: Resets QMSS peripheral queue resources *********************************************************************** * DESCRIPTION: Resets QMSS peripheral queue resources to their PoR * state. Resources in the exclusion list will * not be reset. * * NOTE: This API should be called after accumulator * channels are disabled. */ Fm_Result fmCleanQmssQueue(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList, uint32_t listSize) { int32_t i; Fm_ExclusionParams exclusionParams; if (!initComplete) { return (FM_ERROR_CLEANUP_INIT_NOT_COMPLETE); } Fault_Mgmt_osalLog("Fault Cleanup: QMSS Queues\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_QMSS_QUEUE; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); /* QMSS cleanup - Don't touch QoS clusters (using different firmware downloaded by Linux kernel) * - Sweep QMSS queues of all DSP-based descriptors * - Clear all memory region registers not inserted by Linux */ memset(&exclusionParams, 0, sizeof(exclusionParams)); exclusionParams.exclusionList = excludedResList; exclusionParams.numListEntries = listSize; /* Cleanup the QMSS queues of DSP-based descriptors */ cleanQmssQueues(excludedResList, listSize); exclusionParams.resType = Fm_res_QmssMemRegion; for (i = 0; i < QMSS_MAX_MEM_REGIONS; i++) { exclusionParams.resourceNum = i; if (!fmExclusionIsExcluded(&exclusionParams)) { expressResetMemoryRegion((Qmss_MemRegion)i); } } return (FM_FAULT_CLEANUP_OK); } /* FUNCTION PURPOSE: Resets PA peripheral resources *********************************************************************** * DESCRIPTION: Resets PA peripheral resources to their PoR * state. Resources in the exclusion list will * not be reset. */ Fm_Result fmCleanPa(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList, uint32_t listSize) { Pa_Handle passHandle; Qmss_MemRegInfo memInfo; Cppi_CpDmaInitCfg dmaCfg; Cppi_TxChInitCfg txChCfg; Cppi_RxChInitCfg rxChCfg; Cppi_RxFlowCfg rxFlowCfg; Cppi_Handle cppiHandle; Cppi_ChHnd paTxCh; Cppi_ChHnd paRxCh; Cppi_FlowHnd rxFlowHnd = NULL; Qmss_QueueHnd freeQ = NULL; Qmss_QueueHnd cmdRespQ = NULL; Cppi_DescCfg descCfg; Qmss_Queue queInfo; uint8_t isAllocated; uint32_t numAllocated; int32_t i, j; Fm_ExclusionParams exclusionParams; Cppi_Desc *monolithicDesc; paCmdReply_t paReply; tempPaCmd paDelCmd; tempPaCmd *paRespCmd; uint32_t paRespCmdLen; uint32_t psCmd; Cppi_Result cppiResult; Fm_Result retVal = FM_FAULT_CLEANUP_OK; if (!initComplete) { retVal = FM_ERROR_CLEANUP_INIT_NOT_COMPLETE; goto errorExit; } Fault_Mgmt_osalLog("Fault Cleanup: PA\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_PA; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); /* Only cleanup if NetCp is on */ if (isNetCpPoweredUp()) { /* Init PA and reset PDSPs 1-5 */ if ((passHandle = resetAndInitPa(fmGblCfgParams->maxPaPdsps, excludedResList, listSize)) == NULL) { retVal = FM_ERROR_PA_INIT_FAILED; goto errorExit; } /* Allocate QMSS and CPPI resources needed to send commands to PA */ /* Initialize the exclusion parameters */ memset(&exclusionParams, 0, sizeof(exclusionParams)); exclusionParams.exclusionList = excludedResList; exclusionParams.numListEntries = listSize; /* Make sure descriptor can fit delete command */ if ((SIZE_MONO_DESC - MONO_DESC_DATA_OFFSET) < sizeof(paDelCmd)) { retVal = FM_ERROR_DESC_BUF_TOO_SMALL; goto errorExit; } /* Setup memory region for monolithic descriptors */ memset(&memInfo, 0, sizeof(memInfo)); memset ((void *) monoDesc, 0, SIZE_MONO_DESC * NUM_MONO_DESC); memInfo.descBase = (uint32_t *) l2_global_address ((uint32_t) monoDesc); memInfo.descSize = SIZE_MONO_DESC; memInfo.descNum = NUM_MONO_DESC; memInfo.manageDescFlag = Qmss_ManageDesc_MANAGE_DESCRIPTOR; memInfo.startIndex = 0; /* Find a memory region not used by Linux */ exclusionParams.resType = Fm_res_QmssMemRegion; for (i = 0; i < QMSS_MAX_MEM_REGIONS; i++) { exclusionParams.resourceNum = i; if (!fmExclusionIsExcluded(&exclusionParams)) { memInfo.memRegion = (Qmss_MemRegion)i; break; } } if (Qmss_insertMemoryRegion(&memInfo) < QMSS_SOK) { retVal = FM_ERROR_QMSS_INIT_FAILED_DURING_PA_RECOV; goto cleanupMemRegion; } /* Open queues required to send commands to PA: * - freeQ (GP) - contains unused descriptors * - cmdSendQ (NetCP TX) - used to send commands to PA * - cmdRespQ (GP) - used to receive command responses from PA */ memset(&invalidQs[0], 0, sizeof(invalidQs)); exclusionParams.resType = Fm_res_QmssQueue; i = 0; while (freeQ == NULL) { freeQ = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated); exclusionParams.resourceNum = Qmss_getQIDFromHandle(freeQ); if (fmExclusionIsExcluded(&exclusionParams)) { /* Store the queues that can't be used until after all queues have been found. */ invalidQs[i++] = freeQ; freeQ = NULL; } } while (cmdRespQ == NULL) { cmdRespQ = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated); exclusionParams.resourceNum = Qmss_getQIDFromHandle(cmdRespQ); if (fmExclusionIsExcluded(&exclusionParams)) { /* Store the queues that can't be used until after all queues have been found. */ invalidQs[i++] = cmdRespQ; cmdRespQ = NULL; } } /* Free any invalidQs */ for (j = 0; j < i; j++) { Qmss_queueClose(invalidQs[j]); } /* Open all PASS tx queues. It's okay if already opened by Linux. Just need the interface * to PA PDSPs */ for (i = 0; i < PA_MAX_NUM_CPPI_TX_CH; i++) { paTxQs[i] = Qmss_queueOpen(Qmss_QueueType_PASS_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated); } /* Setup the descriptors for freeQ */ memset(&descCfg, 0, sizeof(descCfg)); descCfg.memRegion = memInfo.memRegion; descCfg.descNum = NUM_MONO_DESC; descCfg.destQueueNum = Qmss_getQIDFromHandle(freeQ); descCfg.queueType = Qmss_QueueType_STARVATION_COUNTER_QUEUE; descCfg.initDesc = Cppi_InitDesc_INIT_DESCRIPTOR; descCfg.descType = Cppi_DescType_MONOLITHIC; descCfg.epibPresent = Cppi_EPIB_NO_EPIB_PRESENT; descCfg.cfg.mono.dataOffset = MONO_DESC_DATA_OFFSET; /* Descriptor should be recycled back to freeQ */ queInfo = Qmss_getQueueNumber(freeQ); descCfg.returnQueue.qMgr = queInfo.qMgr; descCfg.returnQueue.qNum = queInfo.qNum; /* Initialize the descriptors and push to free Queue */ if (Cppi_initDescriptor(&descCfg, &numAllocated) < CPPI_SOK) { retVal = FM_ERROR_CPPI_INIT_FAILED_DURING_PA_RECOV; goto cleanupResources; } /* Writeback changes to the monolithic descriptors */ Fault_Mgmt_osalEndMemAccess(&monoDesc[0], sizeof(monoDesc)); /* Open PASS DMA to send commands to and receive responses from PA */ memset ((void *)&dmaCfg, 0, sizeof(dmaCfg)); dmaCfg.dmaNum = Cppi_CpDma_PASS_CPDMA; cppiHandle = Cppi_open(&dmaCfg); /* Open PASS rxChs - Doesn't matter if already used by Linux */ memset(&rxChCfg, 0, sizeof(rxChCfg)); for (i = 0; i < fmGetDmaMaxRxCh(Cppi_CpDma_PASS_CPDMA); i++) { rxChCfg.channelNum = i; rxChCfg.rxEnable = Cppi_ChState_CHANNEL_DISABLE; paRxCh = Cppi_rxChannelOpen(cppiHandle, &rxChCfg, &isAllocated); Cppi_channelEnable(paRxCh); } /* Open all cppi tx channels to go with the queues - don't need to save handle */ for (i = 0; i < PA_MAX_NUM_CPPI_TX_CH; i ++) { memset(&txChCfg, 0, sizeof(txChCfg)); txChCfg.txEnable = Cppi_ChState_CHANNEL_DISABLE; txChCfg.channelNum = Qmss_getQIDFromHandle(paTxQs[i]) - QMSS_PASS_QUEUE_BASE; paTxCh = Cppi_txChannelOpenWithHwCfg(cppiHandle, &txChCfg, &isAllocated, 0); Cppi_channelEnable(paTxCh); } /* Open a PASS rxFlow */ memset(&rxFlowCfg, 0, sizeof(rxFlowCfg)); exclusionParams.resType = Fm_res_CpdmaRxFlow; exclusionParams.u.cpdmaParams.dma = Cppi_CpDma_PASS_CPDMA; for (i = 0; i < getDmaMaxRxFlow(Cppi_CpDma_PASS_CPDMA); i++) { exclusionParams.resourceNum = i; if (!fmExclusionIsExcluded(&exclusionParams)) { rxFlowCfg.flowIdNum = i; break; } } rxFlowCfg.rx_ps_location = Cppi_PSLoc_PS_IN_DESC; rxFlowCfg.rx_desc_type = Cppi_DescType_MONOLITHIC; rxFlowCfg.rx_sop_offset = MONO_DESC_DATA_OFFSET; queInfo = Qmss_getQueueNumber(freeQ); rxFlowCfg.rx_fdq0_sz0_qnum = queInfo.qNum; rxFlowCfg.rx_fdq0_sz0_qmgr = queInfo.qMgr; rxFlowHnd = Cppi_configureRxFlow(cppiHandle, &rxFlowCfg, &isAllocated); if (listSize) { /* A present exclusion list signifies another core (typically ARM Linux) is in * control of PA. As a result, LUT1 entry deletion must take place and be selective. If * listSize is 0 the resetPaPdspsAndLuts function will have reset all PDSPs cleaning out * all LUT entries. */ exclusionParams.resType = Fm_res_PaLutEntry; exclusionParams.u.lutParams.lutInst = 1; for (i = 0; i < fmGblCfgParams->maxLut1Entries; i++) { exclusionParams.resourceNum = i; if (!fmExclusionIsExcluded(&exclusionParams)) { memset(&paReply, 0, sizeof(paReply)); paReply.dest = pa_DEST_HOST; paReply.queue = Qmss_getQIDFromHandle(cmdRespQ); paReply.flowId = rxFlowCfg.flowIdNum; pa_format_fcmd((void *) &paDelCmd, &paReply, (uint8_t) i); monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(freeQ)); Cppi_setData(Cppi_DescType_MONOLITHIC, monolithicDesc, (uint8_t *)&paDelCmd, sizeof(paDelCmd)); psCmd = PASAHO_PACFG_CMD; Cppi_setPSData (Cppi_DescType_MONOLITHIC, monolithicDesc, (uint8_t *)&psCmd, 4); Cppi_setPacketLen(Cppi_DescType_MONOLITHIC, monolithicDesc, sizeof(paDelCmd)); /* LUT entry delete commands sent to PDSP 0 (Queue 640) */ Qmss_queuePushDescSize(paTxQs[pa_CMD_TX_DEST_0], (uint32_t *)monolithicDesc, SIZE_MONO_DESC); /* Wait for response from PA */ while (Qmss_getQueueEntryCount(cmdRespQ) == 0){}; monolithicDesc = (Cppi_Desc *) QMSS_DESC_PTR(Qmss_queuePop(cmdRespQ)); Cppi_getData(Cppi_DescType_MONOLITHIC, monolithicDesc,(uint8_t **)&paRespCmd, &paRespCmdLen); if (paRespCmd->commandResult) { retVal = FM_ERROR_LUT1_INDEX_NOT_REMOVED; Qmss_queuePushDescSize(freeQ, monolithicDesc, SIZE_MONO_DESC); goto cleanupResources; } Qmss_queuePushDescSize(freeQ, monolithicDesc, SIZE_MONO_DESC); } } } if ((retVal = paSetDefaultGblCfg(passHandle, cmdRespQ, freeQ, rxFlowCfg.flowIdNum)) != FM_FAULT_CLEANUP_OK) { goto cleanupResources; } cleanupResources: if ((cppiResult = Cppi_closeRxFlow(rxFlowHnd)) != CPPI_SOK) { Fault_Mgmt_osalLog("Failed to disable PASS rx flow %d with err %d\n", rxFlowCfg.flowIdNum, cppiResult); } /* All descriptors should be in freeQ */ Qmss_queueEmpty(freeQ); Qmss_queueEmpty(cmdRespQ); cleanupMemRegion: expressResetMemoryRegion(memInfo.memRegion); } errorExit: return (retVal); } /* FUNCTION PURPOSE: Cleans SA security context *********************************************************************** * DESCRIPTION: Evicts and tears down the SA security contexts * so that IPsec traffic can be reestablished */ Fm_Result fmCleanSa(Fm_ExcludedResource *excludedResList, uint32_t listSize) { CSL_Cp_aceRegs *pSaRegs = (CSL_Cp_aceRegs *)CSL_PA_SS_CFG_CP_ACE_CFG_REGS;; int i; uint32_t ctxCachCtrl; Fm_Result retVal = FM_FAULT_CLEANUP_OK; Fault_Mgmt_osalLog("Fault Cleanup: SA\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_SA; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); /* Only cleanup if context cache is enabled */ if ((pSaRegs->MMR.CMD_STATUS & CSL_CP_ACE_CMD_STATUS_CTXCACH_EN_MASK)) { /* Stop flow of descriptors while resetting SA */ linuxTxDmaDisable(excludedResList, listSize); /* Clear the security context cache - Allows IPsec tunnels to be recreated * from scratch after cleanup */ ctxCachCtrl = pSaRegs->MMR.CTXCACH_CTRL; ctxCachCtrl |= CSL_CP_ACE_CTXCACH_CTRL_CLR_CACHE_TABLE_MASK; pSaRegs->MMR.CTXCACH_CTRL = ctxCachCtrl; /* Wait for bit to clear for completion */ do { for (i = 0; i < 100; i++) { asm (" nop "); } } while (pSaRegs->MMR.CTXCACH_CTRL & CSL_CP_ACE_CTXCACH_CTRL_CLR_CACHE_TABLE_MASK); /* Restart DMAs */ linuxTxDmaEnable(excludedResList, listSize); } return (retVal); } /* FUNCTION PURPOSE: Resets Semaphore peripheral resources *********************************************************************** * DESCRIPTION: Resets Semaphore peripheral resources to their PoR * state. Resources in the exclusion list will * not be reset. */ Fm_Result fmCleanSemaphores(Fm_ExcludedResource *excludedResList, uint32_t listSize) { Fault_Mgmt_osalLog("Fault Cleanup: Semaphore\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_SEMAPHORE; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); /* Soft reset semaphores through the SEM_RST_RUN register */ CSL_FINS(hSEM->SEM_RST_RUN, SEM_SEM_RST_RUN_RESET, 1); return (FM_FAULT_CLEANUP_OK); } /* FUNCTION PURPOSE: Resets the CICs *********************************************************************** * DESCRIPTION: Clears a CIC of all system interrupt to host interrupt * mappings except those routed to Linux */ Fm_Result fmCleanCics(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList, uint32_t listSize) { int32_t i, j, k; volatile CSL_CPINTCRegs *regs; uint32_t numSysInt; uint32_t numHostInt; Fm_ExclusionParams exclusionParams; uint32_t entireCicDisable; Fm_Result retVal = FM_FAULT_CLEANUP_OK; Fault_Mgmt_osalLog("Fault Cleanup: CIC\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_CIC; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); memset(&exclusionParams, 0, sizeof(exclusionParams)); exclusionParams.exclusionList = excludedResList; exclusionParams.numListEntries = listSize; exclusionParams.resType = Fm_res_CicHostInt; for (i = 0; i < fmGblCfgParams->maxCic; i++) { exclusionParams.u.cicParams.cic = i; /* Get CIC params */ regs = fmGblCfgParams->cicParams[i].cicRegs; numSysInt = fmGblCfgParams->cicParams[i].maxNumSysInt; numHostInt = fmGblCfgParams->cicParams[i].maxNumHostInt; entireCicDisable = FM_TRUE; /* Unmap and disable all host interrupts not in exclusion list */ for (j = 0; j < numHostInt; j++) { exclusionParams.resourceNum = j; if (!fmExclusionIsExcluded(&exclusionParams)) { /* Disable the host interrupt */ regs->HINT_ENABLE_CLR_INDEX_REG = CSL_FMK(CPINTC_HINT_ENABLE_CLR_INDEX_REG_HINT_ENABLE_CLR_INDEX, j); for (k = 0; k < numSysInt; k++) { /* Clear system int channel map routed to host int */ if (regs->CH_MAP[k] == j) { regs->CH_MAP[k] = 0; /* Disable sys int since not routed anymore */ regs->ENABLE_CLR_INDEX_REG = CSL_FMK(CPINTC_ENABLE_CLR_INDEX_REG_ENABLE_CLR_INDEX, k); /* Clear any pending interrupts */ regs->STATUS_CLR_INDEX_REG = CSL_FMK(CPINTC_STATUS_CLR_INDEX_REG_STATUS_CLR_INDEX, k); } } } else { /* At least one host interrupt in this CIC is excluded. Don't * perform global disable of host interrupts for this CIC */ entireCicDisable = FM_FALSE; } } /* Global disable of host interrupts if none excluded */ if (entireCicDisable) { regs->GLOBAL_ENABLE_HINT_REG = CSL_FMK(CPINTC_GLOBAL_ENABLE_HINT_REG_ENABLE_HINT_ANY, 0); } } return (retVal); } /* FUNCTION PURPOSE: Resets Timers *********************************************************************** * DESCRIPTION: Disables and resets all device timers except * those used by Linux */ Fm_Result fmCleanTimers(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList, uint32_t listSize) { volatile CSL_TmrRegs *regs; int32_t i; Uint32 tmpReg; Fm_ExclusionParams exclusionParams; Fm_Result retVal = FM_FAULT_CLEANUP_OK; Fault_Mgmt_osalLog("Fault Cleanup: Timers\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_TIMER; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); memset(&exclusionParams, 0, sizeof(exclusionParams)); exclusionParams.exclusionList = excludedResList; exclusionParams.numListEntries = listSize; exclusionParams.resType = Fm_res_Timer; for (i = 0; i < fmGblCfgParams->maxTimers; i++) { exclusionParams.resourceNum = i; if (!fmExclusionIsExcluded(&exclusionParams)) { regs = fmGblCfgParams->timerParams[i].timerRegs; /* Disable the LOW and HIGH Timers. */ tmpReg = regs->TCR; CSL_FINST(tmpReg, TMR_TCR_ENAMODE_LO, DISABLE); CSL_FINST(tmpReg, TMR_TCR_ENAMODE_HI, DISABLE); regs->TCR = tmpReg; /* Reset after disable */ tmpReg = regs->TGCR; CSL_FINST(tmpReg, TMR_TGCR_TIMLORS, RESET_ON); CSL_FINST(tmpReg, TMR_TGCR_TIMHIRS, RESET_ON); regs->TGCR = tmpReg; } } return (retVal); } /* FUNCTION PURPOSE: Resets AIF2 *********************************************************************** * DESCRIPTION: Resets the AIF2 peripheral and then powers it down */ Fm_Result fmCleanAif2(void) { Fault_Mgmt_osalLog("Fault Cleanup: AIF2\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_AIF2; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); if ((CSL_PSC_getPowerDomainState(CSL_PSC_PD_AI) == PSC_PDSTATE_ON)) { /* On */ memset(&locAifObj, 0, sizeof(locAifObj)); AIF_resetAif(&locAifObj); #if 0 /* K2 only - Add back when K2 support is added */ /* Reset SERDES separately */ /* Link 0 to Link 3*/ CSL_AIF2SerdesShutdown(CSL_AIF2_SERDES_B8_CFG_REGS); /* Link 4 to Link 5*/ CSL_AIF2SerdesShutdown(CSL_AIF2_SERDES_B4_CFG_REGS); #endif } periphPowerDown(CSL_PSC_PD_AI); return (FM_FAULT_CLEANUP_OK); } /* FUNCTION PURPOSE: Resets TCP3D *********************************************************************** * DESCRIPTION: Powers down the TCP3D peripheral */ Fm_Result fmCleanTcp3d(void) { Fault_Mgmt_osalLog("Fault Cleanup: TCP3D\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_TCP3D; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); periphPowerDown(CSL_PSC_PD_TCP3D_A); periphPowerDown(CSL_PSC_PD_TCP3D_B); return (FM_FAULT_CLEANUP_OK); } /* FUNCTION PURPOSE: Resets BCP *********************************************************************** * DESCRIPTION: Powers down the BCP peripheral */ Fm_Result fmCleanBcp(void) { Fault_Mgmt_osalLog("Fault Cleanup: BCP\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_BCP; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); periphPowerDown(CSL_PSC_PD_BCP); return (FM_FAULT_CLEANUP_OK); } /* FUNCTION PURPOSE: Resets FFTC *********************************************************************** * DESCRIPTION: Powers down the FFTC peripheral */ Fm_Result fmCleanFftc(void) { Fault_Mgmt_osalLog("Fault Cleanup: FFTC (A & B)\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_FFTC; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); periphPowerDown(CSL_PSC_PD_FFTC_AB); return (FM_FAULT_CLEANUP_OK); } /* FUNCTION PURPOSE: Resets VCP *********************************************************************** * DESCRIPTION: Powers down the VCP peripheral */ Fm_Result fmCleanVcp(void) { Fault_Mgmt_osalLog("Fault Cleanup: VCP\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_VCP; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); periphPowerDown(CSL_PSC_PD_PD_VCP_BCD); return (FM_FAULT_CLEANUP_OK); } #endif /* !(K2H && K2K && K2L && K2E) */ /* FUNCTION PURPOSE: Resets EDMA3 peripheral resources *********************************************************************** * DESCRIPTION: Resets EDMA3 peripheral resources to their PoR * state. Resources in the exclusion list will * not be reset. */ Fm_Result fmCleanEdma3(Fm_GlobalConfigParams *fmGblCfgParams, Fm_ExcludedResource *excludedResList, uint32_t listSize, uint32_t provideStatus) { CSL_Edma3Handle edmaCCModule; int32_t i, j, k; CSL_Status status; Fm_ExclusionParams exclusionParams; if (provideStatus) { #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E)) Fault_Mgmt_osalLog("Fault Cleanup: EDMA3\n"); /* Writeback status so that Host can view it */ fmCleanupStatus[0] = FM_STATUS_CLEANUP_EDMA3; Fault_Mgmt_osalEndMemAccess(&fmCleanupStatus[0], sizeof(fmCleanupStatus)); #endif /* !(K2H && K2K && K2L && K2E) */ } memset(&exclusionParams, 0, sizeof(exclusionParams)); exclusionParams.exclusionList = excludedResList; exclusionParams.numListEntries = listSize; for (i = 0; i < fmGblCfgParams->maxEdma3Cc; i++) { /* Module Level Open */ memset((void *)&edmaCCModule, 0, sizeof(edmaCCModule)); memset((void *)&edmaObjCC, 0, sizeof(edmaObjCC)); edmaCCModule = CSL_edma3Open(&edmaObjCC, i, NULL, &status); if ((edmaCCModule == NULL) || (status != CSL_SOK)) { return(FM_ERROR_EDMA3_INIT_FAILED); } exclusionParams.u.edma3Params.edma3Num = i; /* Disable CC channels */ exclusionParams.resType = Fm_res_Edma3DmaCh; for (j = 0; j < edmaObjCC.cfgInfo.numDMAChannel; j++) { exclusionParams.resourceNum = j; if (!fmExclusionIsExcluded(&exclusionParams)) { CSL_edma3DMAChannelDisable(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j); CSL_edma3ClearDMAChannelEvent(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j); for (k = 0; k < edmaObjCC.cfgInfo.numRegions; k++) { CSL_edma3DMAChannelDisable(edmaCCModule, k, j); CSL_edma3ClearDMAChannelEvent(edmaCCModule, k, j); } CSL_edma3ClearDMAChannelSecondaryEvents(edmaCCModule, j); } } exclusionParams.resType = Fm_res_Edma3QdmaCh; for (j = 0; j < edmaObjCC.cfgInfo.numQDMAChannel; j++) { exclusionParams.resourceNum = j; if (!fmExclusionIsExcluded(&exclusionParams)) { CSL_edma3QDMAChannelDisable(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j); CSL_edma3ClearQDMAChannelSecondaryEvents(edmaCCModule, j); } } exclusionParams.resType = Fm_res_Edma3IntCh; for (j = 0; j < edmaObjCC.cfgInfo.numINTChannel; j++) { exclusionParams.resourceNum = j; if (!fmExclusionIsExcluded(&exclusionParams)) { if (j < 32) { CSL_edma3InterruptLoDisable(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j); CSL_edma3ClearLoPendingInterrupts (edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j); } else { CSL_edma3InterruptHiDisable(edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j); CSL_edma3ClearHiPendingInterrupts (edmaCCModule, CSL_EDMA3_REGION_GLOBAL, j); } } } } #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E)) return (FM_FAULT_CLEANUP_OK); #else return (0); #endif } /* FUNCTION PURPOSE: Gets the max CPPI tx ch for a CPDMA *********************************************************************** * DESCRIPTION: Returns the maximum number of tx ch for the * given CPDMA * * CPPI API hardcoded here until it can be added to CPPI LLD */ uint32_t fmGetDmaMaxTxCh(Cppi_CpDma dmaNum) { uint32_t maxTxCh; #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E)) maxTxCh = cppiGblCfgParams[dmaNum].maxTxCh; #else maxTxCh = cppiGblCfgParams.cpDmaCfgs[dmaNum].maxTxCh; #endif return (uint32_t) maxTxCh; } /* FUNCTION PURPOSE: Gets the max CPPI rx ch for a CPDMA *********************************************************************** * DESCRIPTION: Returns the maximum number of rx ch for the * given CPDMA * * CPPI API hardcoded here until it can be added to CPPI LLD */ uint32_t fmGetDmaMaxRxCh(Cppi_CpDma dmaNum) { uint32_t maxRxCh; #if (!defined(DEVICE_K2H) && !defined(DEVICE_K2K) && !defined(DEVICE_K2L) && !defined(DEVICE_K2E)) maxRxCh = cppiGblCfgParams[dmaNum].maxRxCh; #else maxRxCh = cppiGblCfgParams.cpDmaCfgs[dmaNum].maxRxCh; #endif return (uint32_t) maxRxCh; }