summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: b26c9a7)
raw | patch | inline | side by side (parent: b26c9a7)
author | Tinku Mannan <tmannan@ti.com> | |
Tue, 15 Oct 2013 14:00:13 +0000 (10:00 -0400) | ||
committer | Tinku Mannan <tmannan@ti.com> | |
Tue, 15 Oct 2013 14:00:13 +0000 (10:00 -0400) |
12 files changed:
index 4dfac7464c3c3d09ca2de2334ab39b201a579f1d..34fc55e9ff6b2f8916084d0e8061b14291777c53 100755 (executable)
{
NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;
int i;
- hplib_mSpinLockLock(&pnetapiShm->netapi_util_lock);
+ pp->spinLock.lock(&pnetapiShm->netapi_util_lock);
for(i=0;i<TUNE_NETAPI_MAX_HEAPS;i++)
{
if (!pp->createdHeaps[i])
{
pp->createdHeaps[i]=h;
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_util_lock);
+ pp->spinLock.unlock(&pnetapiShm->netapi_util_lock);
return 1;
}
}
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_util_lock);
+ pp->spinLock.unlock(&pnetapiShm->netapi_util_lock);
return -1;
}
{
NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;
int i;
- hplib_mSpinLockLock(&pnetapiShm->netapi_util_lock);
+ pp->spinLock.lock(&pnetapiShm->netapi_util_lock);
for(i=0;i<TUNE_NETAPI_MAX_HEAPS;i++)
{
if (pp->createdHeaps[i] == h)
{
pp->createdHeaps[i]=NULL;
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_util_lock);
+ pp->spinLock.unlock(&pnetapiShm->netapi_util_lock);
return 1;
}
}
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_util_lock);
+ pp->spinLock.unlock(&pnetapiShm->netapi_util_lock);
return -1;
}
index 87951ceaa2a422b2e2596cb86ccb3b3ce4c4810a..ff63191307027a1af874186b190ae4701f2c8d09 100755 (executable)
int def_heap_buf_size; /**< Size of buffers in default heap, max amount of area for packet data */
int def_heap_tailroom_size; /**< Size of tailroom in reserve */
int def_heap_extra_size; /**< Size of extra space at end of buffer */
+ int def_multi_process; /**< Flag to indicate if NETAPI init is for multi-process environment */
} NETAPI_CFG_T;
/* @note:
index 4d4b0dc35b73837c1baabf6736ff31e48dd6a61c..c0c95aafafe9de201cb137cf81ff5a098b27d9cb 100755 (executable)
TUNE_NETAPI_DEFAULT_NUM_BUFFERS, //#descriptors+buffers in default heap
TUNE_NETAPI_DEFAULT_NUM_SOLO_DESCRIPTORS, //#descriptors w/o buffers in default heap
TUNE_NETAPI_DEFAULT_BUFFER_SIZE, //size of buffers in default heap
- 0,0
+ 0,0,0
+};
+HPLIB_SPINLOCK_IF_T spinlock_lol =
+{
+ hplib_mSpinLockInit,
+ hplib_mSpinLockTryLock,
+ hplib_mSpinLockIsLocked,
+ hplib_mSpinLockLock,
+ hplib_mSpinLockUnlock,
+ hplib_mRWLockInit,
+ hplib_mRWLockWriteLock,
+ hplib_mRWLockWriteUnlock,
+ hplib_mRWLockReadLock,
+ hplib_mRWLockReadUnlock
+};
+
+
+HPLIB_SPINLOCK_IF_T spinlock_mp =
+{
+ hplib_mSpinLockInit,
+ hplib_mSpinLockTryLock,
+ hplib_mSpinLockIsLocked,
+ hplib_mSpinLockLockMP,
+ hplib_mSpinLockUnlockMP,
+ hplib_mRWLockInit,
+ hplib_mRWLockWriteLockMP,
+ hplib_mRWLockWriteUnlockMP,
+ hplib_mRWLockReadLockMP,
+ hplib_mRWLockReadUnlockMP
};
/* Global variablesto hold virtual address of various subsystems */
{
netapi_Log("netapi_init: hplib_shmAddEntry sucess for NETAPI_ENTRY\n");
}
- Osal_create();
+ hplib_utilOsalCreate();
}
/*ALL others: we are not the system master; assume SYS_MASTER has
created SHM area for us already, so just open it */
// and process global in netapi_proc_global
p->global = (void *) &pnetapiShm->netapi_global;
p->proc_global = (void *)&netapi_proc_global;
+ /* Update spinLock to point to either MP spinlock or fast spinlocks, this is
+ for all callers of netapi_init*/
+ if(p_cfg)
+ {
+ if(p_cfg->def_multi_process)
+ p->spinLock = spinlock_mp;
+ else
+ p->spinLock= spinlock_lol;
+ }
+ else
+ p->spinLock = spinlock_lol;
/* SYS_MASTER, PROC_MASTER: save a pointer to its netapi structure
globably for other threads/core of process to use */
index 3358d509c5f24f3bfc851256d0ba83294131c53b..7ba66535692ad9c2979ea4f28866434721f5feb0 100755 (executable)
#include "ti/runtime/netapi/pktio.h"
#include "ti/drv/nwal/nwal.h"
#include "ti/drv/nwal/nwal_util.h"
-
+#include "ti/runtime/hplib/hplib.h"
extern hplib_virtualAddrInfo_T netapi_VM_VirtAddr[HPLIB_MAX_MEM_POOLS];
/* thread cookie */
void* cookie; /*set by calling thread */
+ HPLIB_SPINLOCK_IF_T spinLock;
} NETAPI_HANDLE_T;
typedef struct NETAPI_SHM_Tag
void *netapip_netcpCfgGetSaHandles( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
int sa_slot, void ** p_sideband);
void* netapip_netcpCfgGetMacHandle(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,int iface_no);
-NetapiNwalTransInfo_t * netapip_getFreeTransInfo(NETAPI_PROC_GLOBAL_T *p_global, nwal_TransID_t *pTransId);
+NetapiNwalTransInfo_t * netapip_getFreeTransInfo(NETAPI_HANDLE_T *p_handle,
+ NETAPI_PROC_GLOBAL_T *p_global,
+ nwal_TransID_t *pTransId);
void netapip_freeTransInfo(NetapiNwalTransInfo_t* pTransInfo);
void *netapip_netcpCfgGetPolicy( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
int policy_slot);
index ceec93e3a2cd84379c69036d00ce77e3be86bab7..3ebfebaa2376c0255be89d5760ade2359fe51f07 100755 (executable)
}
/* reserve a slot */
- tunnelId = netapip_netcpCfgFindSaSlot(&netapi_get_global()->nwal_context,
- iface_no);
+ tunnelId = netapip_netcpCfgFindSaSlot(n,
+ &netapi_get_global()->nwal_context,
+ iface_no);
if (tunnelId <0)
{
if (inflow_mode & NETAPI_SEC_SA_INFLOW)
{
- pTransInfo = netapip_getFreeTransInfo((NETAPI_PROC_GLOBAL_T *) n->proc_global, &trans_id);
+ pTransInfo = netapip_getFreeTransInfo(n,
+ (NETAPI_PROC_GLOBAL_T *) n->proc_global,
+ &trans_id);
if (!pTransInfo)
{
*perr = NETAPI_ERR_BUSY;
if(handle_inflow)
{
/* get a transaction id */
- pTransInfo = netapip_getFreeTransInfo((NETAPI_PROC_GLOBAL_T *) n->proc_global, &trans_id);
+ pTransInfo = netapip_getFreeTransInfo(n,
+ (NETAPI_PROC_GLOBAL_T *) n->proc_global,
+ &trans_id);
if (!pTransInfo)
{
*perr = NETAPI_ERR_BUSY;
}
/* get a transaction id */
- pTransInfo = netapip_getFreeTransInfo((NETAPI_PROC_GLOBAL_T *) n->proc_global, &trans_id);
+ pTransInfo = netapip_getFreeTransInfo(n,
+ (NETAPI_PROC_GLOBAL_T *) n->proc_global,
+ &trans_id);
if (!pTransInfo)
{
*perr = NETAPI_ERR_BUSY;
&createParam.appRxPktQueue);
}
/* reserve a slot */
- policyId = netapip_netcpCfgFindPolicySlot(&netapi_get_global()->nwal_context,
- tunnelId);
+ policyId = netapip_netcpCfgFindPolicySlot(n,
+ &netapi_get_global()->nwal_context,
+ tunnelId);
if (policyId <0)
{
*perr= NETAPI_ERR_NOMEM;
*perr =0;
/* get a transaction id */
- pTransInfo = netapip_getFreeTransInfo((NETAPI_PROC_GLOBAL_T *) n->proc_global, &trans_id);
+ pTransInfo = netapip_getFreeTransInfo(n,
+ (NETAPI_PROC_GLOBAL_T *) n->proc_global,
+ &trans_id);
if (!pTransInfo)
{
*perr = NETAPI_ERR_BUSY;
index e8bcc5dd4b12c32bad775ec3d905d56794489775..a342bb02cef2f20db36c9afc70392afdb7eadc7c 100755 (executable)
********************************************************************
* DESCRIPTION: Netapi internal function to get a free transaction id.
********************************************************************/
-NetapiNwalTransInfo_t * netapip_getFreeTransInfo(NETAPI_PROC_GLOBAL_T *p_proc_global,
+NetapiNwalTransInfo_t * netapip_getFreeTransInfo(NETAPI_HANDLE_T *p_handle,
+ NETAPI_PROC_GLOBAL_T *p_proc_global,
nwal_TransID_t *pTransId)
{
uint16_t count=0;
count=0;
- hplib_mSpinLockLock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.lock(&pnetapiShm->netapi_netcp_cfg_lock);
while(count < TUNE_NETAPI_MAX_NUM_TRANS)
{
if((p_proc_global->nwal_context.transInfos[count].inUse) != nwal_TRUE)
{
p_proc_global->nwal_context.transInfos[count].inUse = nwal_TRUE;
*pTransId = count;
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
return(&p_proc_global->nwal_context.transInfos[count]);
}
count++;
}
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
/* trouble. need to wait for one to free up*/
/* to do: handle this by forcing a poll of cntrl queue*/
netapi_Log(">netcp_cfg: trying to get free transaction slot but all full!!\n");
********************************************************************
* DESCRIPTION: Netapi internal function to find a free slot for an SA
********************************************************************/
-int netapip_netcpCfgFindPolicySlot(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+int netapip_netcpCfgFindPolicySlot(NETAPI_HANDLE_T *p_handle,
+ NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
int tunnel)
{
int i;
if ((tunnel <0 ) || (tunnel >=TUNE_NETAPI_MAX_SA))
return -1;
- hplib_mSpinLockLock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.lock(&pnetapiShm->netapi_netcp_cfg_lock);
//find a free entry
for(i=0;i<TUNE_NETAPI_MAX_POLICY;i++)
{
if (!p->policy[i].in_use)
{
p->policy[i].in_use = 2; //pending
- p->policy[i].tunnel= tunnel; //save tunnel this is linked to
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p->policy[i].tunnel= tunnel; //save tunnel this is linked to
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
return i;
}
}
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
return -1;
}
********************************************************************
* DESCRIPTION: Netapi internal function to find a free slot in SA list for an SA
********************************************************************/
-int netapip_netcpCfgFindSaSlot(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+int netapip_netcpCfgFindSaSlot(NETAPI_HANDLE_T *p_handle,
+ NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
int iface)
{
int i;
{
if ((iface <0 ) || (iface >=TUNE_NETAPI_MAX_NUM_MAC)) return -1;
}
- hplib_mSpinLockLock(&pnetapiShm->netapi_netcp_cfg_lock);
- //find a free entry
+ p_handle->spinLock.lock(&pnetapiShm->netapi_netcp_cfg_lock);
+ //find a free entry
for(i=0;i<TUNE_NETAPI_MAX_SA;i++)
{
if (!p->tunnel[i].in_use)
{
p->tunnel[i].in_use = 2; //pending
p->tunnel[i].iface= iface; //save iface
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
return i;
}
}
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
return -1;
}
********************************************************************
* DESCRIPTION: Netapi internal function to find a free slot for a flow
********************************************************************/
-static int netapip_netcpCfgFindFlowSlot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p)
+static int netapip_netcpCfgFindFlowSlot(NETAPI_HANDLE_T *p_handle,
+ NETAPI_NWAL_GLOBAL_CONTEXT_T *p)
{
int i;
//find a free entry
- hplib_mSpinLockLock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.lock(&pnetapiShm->netapi_netcp_cfg_lock);
for(i=0;i<TUNE_NETAPI_MAX_FLOWS;i++)
{
if (!p->flows[i].in_use)
{
p->flows[i].in_use = 2; /* pending */
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
return i;
}
}
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
return -1;
}
@@ -390,14 +394,15 @@ static NETCP_CFG_FLOW_HANDLE_T netapip_netcpCfgInsertFlow(NETAPI_NWAL_GLOBAL_CON
* DESCRIPTION: Netapi internal function to find entry matching the flowid. Returns
* the slot number and the cPPI handle.
********************************************************************/
-static int netapip_netcpCfgFindFlow(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+static int netapip_netcpCfgFindFlow(NETAPI_HANDLE_T *p_handle,
+ NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
int flowid,
int dma_engine,
void ** handle)
{
int i;
*handle=NULL;
- hplib_mSpinLockLock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.lock(&pnetapiShm->netapi_netcp_cfg_lock);
for(i=0;i<TUNE_NETAPI_MAX_FLOWS;i++)
{
if ((p->flows[i].in_use)&&
(p->flows[i].flow.dma_engine == dma_engine))
{
*handle = p->flows[i].handle;
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
return i;
}
}
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
- return -1;
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ return -1;
}
***************************************************************************
* DESCRIPTION: NNetapi internal function to find a free slot for IP rule in IP slot list
***************************************************************************/
-static int netapip_netcpCfgFindIpSlot(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+static int netapip_netcpCfgFindIpSlot(NETAPI_HANDLE_T *p_handle,
+ NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
int iface_no)
{
int i;
//find a free entry
- hplib_mSpinLockLock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.lock(&pnetapiShm->netapi_netcp_cfg_lock);
for(i=0;i<TUNE_NETAPI_MAX_NUM_IP;i++)
{
if (!p->ips[i].in_use)
{
p->ips[i].in_use = 2; //pending
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
return i;
}
}
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
return -1;
}
*******************************************************************************
* DESCRIPTION: Netapi internal function to find a free slot for classifier rule
*******************************************************************************/
-static int netapip_netcpCfgFindClassSlot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p)
+static int netapip_netcpCfgFindClassSlot(NETAPI_HANDLE_T *p_handle,
+ NETAPI_NWAL_GLOBAL_CONTEXT_T *p)
{
int i;
- hplib_mSpinLockLock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.lock(&pnetapiShm->netapi_netcp_cfg_lock);
for(i=0;i<TUNE_NETAPI_MAX_CLASSIFIERS;i++)
{
if (!p->classi[i].in_use)
{
p->classi[i].in_use = 2; //pending
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
return i;
}
}
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_netcp_cfg_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_netcp_cfg_lock);
return -1;
}
/*******************************************************************************
}
*err =0;
- pTransInfo = netapip_getFreeTransInfo((NETAPI_PROC_GLOBAL_T *) n->proc_global, &transId);
+ pTransInfo = netapip_getFreeTransInfo(n,
+ (NETAPI_PROC_GLOBAL_T *) n->proc_global,
+ &transId);
if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return ;}
pTransInfo->transType = NETAPI_NWAL_HANDLE_STAT_REQUEST;
pTransInfo->netapi_handle = h;
int * err)
{
NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;
- nwalMacParam_t MacInfo=
+ nwalMacParam_t macInfo=
{
0, /* validParams */
0, /* ifNum */
if ((!n) || (!p_mac)) {*err = NETAPI_ERR_BAD_INPUT; return -1;}
*err =0;
- pTransInfo = netapip_getFreeTransInfo((NETAPI_PROC_GLOBAL_T *) n->proc_global, &trans_id);
+ pTransInfo = netapip_getFreeTransInfo(n,
+ (NETAPI_PROC_GLOBAL_T *) n->proc_global,
+ &trans_id);
if (!pTransInfo)
{
*err = NETAPI_ERR_BUSY;
pTransInfo->netapi_handle = h;
/* set up MacInfo */
- memcpy(&MacInfo.macAddr,p_mac,6);
+ memcpy(&macInfo.macAddr,p_mac,6);
/* todo: vlan */
if (switch_port)
{
- MacInfo.validParams =NWAL_SET_MAC_VALID_PARAM_IFNUM ;
- MacInfo.ifNum = switch_port; /* */
+ macInfo.validParams |= NWAL_SET_MAC_VALID_PARAM_IFNUM ;
+ macInfo.ifNum = switch_port; /* */
}
if (route != NULL)
{
- netapip_netcpCfgBuildRoute(route,&MacInfo.appRxPktFlowId, &MacInfo.appRxPktQueue);
+ netapip_netcpCfgBuildRoute(route,&macInfo.appRxPktFlowId, &macInfo.appRxPktQueue);
}
retValue = nwal_setMacIface( ((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,
trans_id,
(nwal_AppId) (NETAPI_NETCP_MATCH_GENERIC_MAC | iface_no),
- &MacInfo,
+ &macInfo,
&pTransInfo->handle);
if(retValue != nwal_OK)
{
*err =0;
//get a transaction id
- pTransInfo = netapip_getFreeTransInfo((NETAPI_PROC_GLOBAL_T *) n->proc_global, &trans_id);
+ pTransInfo = netapip_getFreeTransInfo(n,
+ (NETAPI_PROC_GLOBAL_T *) n->proc_global,
+ &trans_id);
if (!pTransInfo)
{
*err = NETAPI_ERR_BUSY;
if (flag) //if adding IP to MAC then reserve a slot to save info
{
//find free slot for IP & reserve
- ip_slot= netapip_netcpCfgFindIpSlot(&netapi_get_global()->nwal_context,
- iface_no);
+ ip_slot= netapip_netcpCfgFindIpSlot(n,
+ &netapi_get_global()->nwal_context,
+ iface_no);
if (ip_slot <0)
{
*err= NETAPI_ERR_NOMEM; //no room
}
//get a transaction object for config action
- pTransInfo = netapip_getFreeTransInfo((NETAPI_PROC_GLOBAL_T *) n->proc_global, &trans_id);
+ pTransInfo = netapip_getFreeTransInfo(n,
+ (NETAPI_PROC_GLOBAL_T *) n->proc_global,
+ &trans_id);
if (!pTransInfo)
{
*err = NETAPI_ERR_BUSY;
if (flag)
{
netapi_Log ("netcp_cfg: IP added to interface %d (slot%d)\n", iface_no, ip_slot);
- netapip_netcpCfgInsertIp(&netapi_get_global()->nwal_context, ipType,
- ip_addr, ip_qualifiers, iface_no, ip_slot,
- pTransInfo->handle, user_data);
+ netapip_netcpCfgInsertIp(&netapi_get_global()->nwal_context,
+ ipType,
+ ip_addr,
+ ip_qualifiers,
+ iface_no,
+ ip_slot,
+ pTransInfo->handle,
+ user_data);
}
temp = (NETCP_CFG_IP_T) pTransInfo->handle;
*err =0;
//get a transaction id
- pTransInfo = netapip_getFreeTransInfo((NETAPI_PROC_GLOBAL_T *) n->proc_global, &trans_id);
+ pTransInfo = netapip_getFreeTransInfo(n,
+ (NETAPI_PROC_GLOBAL_T *) n->proc_global,
+ &trans_id);
if (!pTransInfo)
{
*err = NETAPI_ERR_BUSY;
//find free slot for CLASS & reserve
- class_slot= netapip_netcpCfgFindClassSlot(&netapi_get_global()->nwal_context);
+ class_slot= netapip_netcpCfgFindClassSlot(n,
+ &netapi_get_global()->nwal_context);
if(class_slot<0) {*err = NETAPI_ERR_NOMEM; return -1;}
classHandle = NETAPI_NETCP_MATCH_CLASS |
(class_slot << NETAPI_NETCP_MATCH_ID_SHIFT) |
//get a transaction id
- pTransInfo = netapip_getFreeTransInfo((NETAPI_PROC_GLOBAL_T *) n->proc_global, &trans_id);
+ pTransInfo = netapip_getFreeTransInfo(n,
+ (NETAPI_PROC_GLOBAL_T *) n->proc_global,
+ &trans_id);
if (!pTransInfo)
{
*err = NETAPI_ERR_BUSY;
&netapi_get_global()->nwal_context,
class_slot );
//get a transaction id
- pTransInfo = netapip_getFreeTransInfo((NETAPI_PROC_GLOBAL_T *) n->proc_global, &trans_id);
+ pTransInfo = netapip_getFreeTransInfo(n,
+ (NETAPI_PROC_GLOBAL_T *) n->proc_global,
+ &trans_id);
if (!pTransInfo)
{
*err = NETAPI_ERR_BUSY;
Cppi_FlowHnd FlowHnd;
int slot;
NETCP_CFG_FLOW_HANDLE_T retVal;
-
+ NETAPI_HANDLE_T * pHandle = (NETAPI_HANDLE_T *) h;
*err= 0; /* ok */
if (!p_cfg)
{
return NULL;
}
//get a slot to save new flow
- slot = netapip_netcpCfgFindFlowSlot(&netapi_get_global()->nwal_context);
+ slot = netapip_netcpCfgFindFlowSlot(pHandle,
+ &netapi_get_global()->nwal_context);
+
if (slot<0) { *err= NETAPI_ERR_NOMEM; return NULL; }
//configure flow
int slot;
void * handle;
*err=0;
+ NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;
/* find entry */
- slot = netapip_netcpCfgFindFlow(&netapi_get_global()->nwal_context,
- ((NETCP_CFG_FLOW_T *) f) ->flowid,
- ((NETCP_CFG_FLOW_T *) f) ->dma_engine,
- &handle);
+ slot = netapip_netcpCfgFindFlow(n,
+ &netapi_get_global()->nwal_context,
+ ((NETCP_CFG_FLOW_T *) f) ->flowid,
+ ((NETCP_CFG_FLOW_T *) f) ->dma_engine,
+ &handle);
if (slot<0)
{
*err = NETAPI_ERR_BAD_INPUT;
}
Cppi_closeRxFlow( (Cppi_FlowHnd) handle);
- netapip_netcpCfgDeleteFlow(&netapi_get_global()->nwal_context, slot);
+ netapip_netcpCfgDeleteFlow(&netapi_get_global()->nwal_context,
+ slot);
netapi_Log(">netcp cfg: flow %d (dma index %d) deleted\n",
((NETCP_CFG_FLOW_T *) f)->flowid,
((NETCP_CFG_FLOW_T *) f)->dma_engine);
index 583b4949c843a4d8767833b39791390e58da504b..f06df46d319bc2166bfbd07c84fe812d6a4164ad 100755 (executable)
/* Utilites*/
static PKTIO_HANDLE_T * netapip_pktioGetFreeChannelSlot(NETAPI_T n)
{
+ NETAPI_HANDLE_T *p_handle;
PKTIO_HANDLE_T ** pp = (PKTIO_HANDLE_T **) netapi_get_pktio_list(n);
int i;
- hplib_mSpinLockLock(&pnetapiShm->netapi_pktio_lock);
+ p_handle = (NETAPI_HANDLE_T*) n;
+ p_handle->spinLock.lock(&pnetapiShm->netapi_pktio_lock);
for(i=0;i<NETAPI_MAX_PKTIO;i++)
{
if (pp[i]->inuse != PKTIO_INUSE)
{
pp[i]->inuse = PKTIO_INUSE;
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_pktio_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_pktio_lock);
return pp[i];
}
}
- hplib_mSpinLockUnlock(&pnetapiShm->netapi_pktio_lock);
+ p_handle->spinLock.unlock(&pnetapiShm->netapi_pktio_lock);
return NULL;
}
index 33f8591448653162d11e2260b217127abaebc443..2c9a8ab1d70fe84dbeac743da7651124703c872a 100755 (executable)
CPU_ZERO( &cpu_set);
CPU_SET( 1, &cpu_set);
- hplib_utilSetupThread(1, &cpu_set);
+ hplib_utilSetupThread(1, &cpu_set,hplib_spinLock_Type_LOL);
for(;!((volatile)QUIT);)
{
CPU_ZERO( &cpu_set);
CPU_SET( 2, &cpu_set);
- hplib_utilSetupThread(2, &cpu_set);
+ hplib_utilSetupThread(2, &cpu_set,hplib_spinLock_Type_LOL);
#ifdef INTERNAL_PACKETS
//generate packets internally by allocating from OurHeap (the NETAPI
signal(SIGINT,netTest_utilMySig);
CPU_ZERO( &cpu_set);
CPU_SET( 0, &cpu_set);
- hplib_utilSetupThread(0, &cpu_set);
+ hplib_utilSetupThread(0, &cpu_set, hplib_spinLock_Type_LOL);
/*******************************************/
index fafdcc11f071feb5e52a86219794433400b53c23..69f399275f5028c1e249824937475c0be966f468 100755 (executable)
CPU_ZERO( &cpu_set);
CPU_SET( 2, &cpu_set);
- hplib_utilSetupThread(2, &cpu_set);
+ hplib_utilSetupThread(2, &cpu_set, hplib_spinLock_Type_LOL);
start_time = hplib_mUtilGetTimestamp();
//DAL we poll the default pktio channel for pkts from net
CPU_ZERO( &cpu_set);
CPU_SET( 0, &cpu_set);
- hplib_utilSetupThread(2, &cpu_set);
+ hplib_utilSetupThread(2, &cpu_set, hplib_spinLock_Type_LOL);
/*******************************************/
diff --git a/ti/runtime/netapi/test/net_test_loopback.c b/ti/runtime/netapi/test/net_test_loopback.c
index 1a07f690ec4182783b32d3d2fe9335eb34c47ddd..3d19e9fe69cb7f96873870de811fd1c4e0d425cb 100755 (executable)
printf("slow_path_thread: setting cpu %d to cpu_set\n", i);
CPU_SET( 0, &cpu_set);
}
- hplib_utilSetupThread(thread_num, &cpu_set);
+ hplib_utilSetupThread(thread_num, &cpu_set, hplib_spinLock_Type_LOL);
#else
for (i = netTestCfg.sp_proc_start[index]; i <= netTestCfg.sp_proc_end[index];i++)
{
printf("slow_path_thread: setting cpu %d to cpu_set\n", i);
CPU_SET( i, &cpu_set);
}
- hplib_utilSetupThread(thread_num, &cpu_set);
+ hplib_utilSetupThread(thread_num, &cpu_set, hplib_spinLock_Type_LOL);
#endif
worker_nh[thread_num] = netapi_init(NETAPI_CORE_MASTER,NULL);
if (worker_nh[thread_num] == NULL)
{
CPU_SET( 0, &cpu_set);
}
- hplib_utilSetupThread(thread_num, &cpu_set);
+ hplib_utilSetupThread(thread_num, &cpu_set, hplib_spinLock_Type_LOL);
#else
for (i = netTestCfg.fp_proc_start[index]; i <= netTestCfg.fp_proc_end[index];i++)
{
netapi_Log("fast_path_thread: setting cpu %d to cpu_set\n", i);
CPU_SET( i, &cpu_set);
}
- hplib_utilSetupThread(thread_num, &cpu_set);
+ hplib_utilSetupThread(thread_num, &cpu_set, hplib_spinLock_Type_LOL);
#endif
worker_nh[thread_num]=netapi_init(NETAPI_CORE_MASTER,NULL);
if (worker_nh[thread_num] == NULL)
/* assign main net_test thread to run on core 0 */
CPU_ZERO( &cpu_set);
CPU_SET( 0, &cpu_set);
- hplib_utilSetupThread(0, &cpu_set);
+ hplib_utilSetupThread(0, &cpu_set, hplib_spinLock_Type_LOL);
#endif
/* create netapi */
diff --git a/ti/runtime/netapi/test/net_test_max_params.c b/ti/runtime/netapi/test/net_test_max_params.c
index f5cbf1ce895bdbcecd7e29cae3bf252501806b79..b5aae76a9f97f657755a97b31ad935a2c08c8f03 100755 (executable)
/* assign main net_test thread to run on core 0 */
CPU_ZERO( &cpu_set);
CPU_SET( 0, &cpu_set);
- hplib_utilSetupThread(0, &cpu_set);
+ hplib_utilSetupThread(0, &cpu_set, hplib_spinLock_Type_LOL);
#endif
/* create netapi */
index 89b292336bed2a8041f9ca05f78e68af37bc6f9d..81e8131c179d7ab37f45fd52890016ec614445b8 100755 (executable)
printf("slow_path_thread: setting cpu %d to cpu_set\n", i);
CPU_SET( i, &cpu_set);
}
- hplib_utilSetupThread(0, &cpu_set);
+ hplib_utilSetupThread(0, &cpu_set, hplib_spinLock_Type_LOL);
#else
for (i = netTestCfg.sp_proc_start[index]; i <= netTestCfg.sp_proc_end[index];i++)
{
printf("slow_path_thread: setting cpu %d to cpu_set\n", i);
CPU_SET( i, &cpu_set);
}
- hplib_utilSetupThread(thread_num, &cpu_set);
+ hplib_utilSetupThread(thread_num, &cpu_set, hplib_spinLock_Type_LOL);
#endif
worker_nh[thread_num]=netapi_init(NETAPI_CORE_MASTER,NULL);
if (worker_nh[thread_num] == NULL)
printf("fast_path_thread: setting cpu %d to cpu_set\n", i);
CPU_SET( i, &cpu_set);
}
- hplib_utilSetupThread(thread_num, &cpu_set);
+ hplib_utilSetupThread(thread_num, &cpu_set, hplib_spinLock_Type_LOL);
#else
for (i = netTestCfg.fp_proc_start[index]; i <= netTestCfg.fp_proc_end[index];i++)
{
printf("fast_path_thread: setting cpu %d to cpu_set\n", i);
CPU_SET( i, &cpu_set);
}
- hplib_utilSetupThread(thread_num, &cpu_set);
+ hplib_utilSetupThread(thread_num, &cpu_set, hplib_spinLock_Type_LOL);
#endif
worker_nh[thread_num]=netapi_init(NETAPI_CORE_MASTER,NULL);
if (worker_nh[thread_num] == NULL)
/* assign main net_test thread to run on core 0 */
CPU_ZERO( &cpu_set);
CPU_SET( 0, &cpu_set);
- hplib_utilSetupThread(0, &cpu_set);
+ hplib_utilSetupThread(0, &cpu_set, hplib_spinLock_Type_LOL);
#endif
/* create netapi */
netapi_handle = netapi_init(NETAPI_SYS_MASTER, &our_netapi_default_cfg);