summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 0d764f4)
raw | patch | inline | side by side (parent: 0d764f4)
author | Ramsey Harris <ramsey@ti.com> | |
Wed, 25 Feb 2015 00:58:00 +0000 (16:58 -0800) | ||
committer | Robert Tivy <rtivy@ti.com> | |
Wed, 25 Feb 2015 22:49:23 +0000 (14:49 -0800) |
Added new config param: MultiProc.procAddrMode. This is used to
specify which processors require dedicated resources. This config
param is used by MessageQ and NameServer to control the size of
resource arrays. When using ProcAddrMode_Cluster, the resource array
size matches the number of members in the cluster. Modified
TransportRpmsgSetup and TransportShmNotify to always use the cluster
size because that is the only mode they support. These changes are
needed to minimize memory usage in large processor systems.
specify which processors require dedicated resources. This config
param is used by MessageQ and NameServer to control the size of
resource arrays. When using ProcAddrMode_Cluster, the resource array
size matches the number of members in the cluster. Modified
TransportRpmsgSetup and TransportShmNotify to always use the cluster
size because that is the only mode they support. These changes are
needed to minimize memory usage in large processor systems.
13 files changed:
diff --git a/packages/ti/ipc/transports/TransportRpmsgSetup.c b/packages/ti/ipc/transports/TransportRpmsgSetup.c
index 753c36ec5856bb8bf2a26fb8fc0576cfe4452111..1dfb4cc6b31dd4f5000a15c45bce924343bb55d0 100644 (file)
/*
- * Copyright (c) 2012-2013, Texas Instruments Incorporated
+ * Copyright (c) 2012-2015 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/*
* ======== TransportRpmsgSetup_attach ========
*/
-Int TransportRpmsgSetup_attach(UInt16 remoteProcId, Ptr sharedAddr)
+Int TransportRpmsgSetup_attach(UInt16 procId, Ptr sharedAddr)
{
TransportRpmsg_Handle handle;
TransportRpmsg_Params params;
Int status = MessageQ_E_FAIL;
Error_Block eb;
+ Int index;
- Log_print1(Diags_INFO, "TransportRpmsgSetup_attach: remoteProcId: %d",
- remoteProcId);
+ Log_print1(Diags_INFO, "TransportRpmsgSetup_attach: procId=%d", procId);
Error_init(&eb);
+ /* procId is always in cluster */
+ index = ti_sdo_utils_MultiProc_getClusterId(procId);
+
/* init the transport parameters */
TransportRpmsg_Params_init(¶ms);
params.sharedAddr = sharedAddr; /* Not used yet */
- handle = TransportRpmsg_create(remoteProcId, ¶ms, &eb);
+ handle = TransportRpmsg_create(procId, ¶ms, &eb);
if (handle != NULL) {
- TransportRpmsgSetup_module->handles[remoteProcId] = handle;
+ TransportRpmsgSetup_module->handles[index] = handle;
status = MessageQ_S_SUCCESS;
}
/*
* ======== TransportRpmsgSetup_detach ========
*/
-Int TransportRpmsgSetup_detach(UInt16 remoteProcId)
+Int TransportRpmsgSetup_detach(UInt16 procId)
{
TransportRpmsg_Handle handle;
+ Int index;
- System_printf("TransportRpmsgSetup_detach: remoteProcId: %d\n",
- remoteProcId);
+ Log_print1(Diags_INFO, "TransportRpmsgSetup_detach: procId=%d", procId);
- handle = TransportRpmsgSetup_module->handles[remoteProcId];
+ /* procId is always in cluster */
+ index = ti_sdo_utils_MultiProc_getClusterId(procId);
+ handle = TransportRpmsgSetup_module->handles[index];
/* Trying to detach an un-attached processor should fail */
if (handle == NULL) {
}
/* Unregister the instance */
- TransportRpmsgSetup_module->handles[remoteProcId] = NULL;
+ TransportRpmsgSetup_module->handles[index] = NULL;
TransportRpmsg_delete(&handle);
/*
* ======== TransportRpmsgSetup_isRegistered ========
*/
-Bool TransportRpmsgSetup_isRegistered(UInt16 remoteProcId)
+Bool TransportRpmsgSetup_isRegistered(UInt16 procId)
{
Bool registered;
+ Int index;
- registered = (TransportRpmsgSetup_module->handles[remoteProcId] != NULL);
+ /* procId is always in cluster */
+ index = ti_sdo_utils_MultiProc_getClusterId(procId);
+ registered = (TransportRpmsgSetup_module->handles[index] != NULL);
return (registered);
}
+
+/*
+ * ======== TransportRpmsgSetup_sharedMemReq ========
+ */
+SizeT TransportRpmsgSetup_sharedMemReq(Ptr sharedAddr)
+{
+ return (0);
+}
diff --git a/packages/ti/ipc/transports/TransportRpmsgSetup.xdc b/packages/ti/ipc/transports/TransportRpmsgSetup.xdc
index 47d9d756ea050ef38e32149f4fb02fdfb8836745..64d0e73592419d19f1516731229b26be6e73e0c9 100644 (file)
/*
- * Copyright (c) 2012-2013, Texas Instruments Incorporated
+ * Copyright (c) 2012-2015 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/*!
* ======== TransportRpmsgSetup ========
- * Manages the setup of TransportRpmsg instances.
+ * Manages the setup of TransportRpmsg instances
*
- * create or open the TransportRpmsg for each pair of devices.
+ * Create or open the TransportRpmsg for each pair of devices.
+ *
+ * @a(Note)
+ * This modules reflects upon the {@link ti.sdo.utils.MultiProc#procAddrMode}
+ * configuration parameter. Some internal data structure allocations are
+ * optimized for the given processor address mode.
*/
module TransportRpmsgSetup inherits ti.sdo.ipc.interfaces.ITransportSetup
/* Module Status object */
struct Module_State {
- TransportRpmsg.Handle handles[]; /* handle per remote proc */
+ TransportRpmsg.Handle handles[]; /* handle per proc in cluster */
}
}
diff --git a/packages/ti/ipc/transports/TransportRpmsgSetup.xs b/packages/ti/ipc/transports/TransportRpmsgSetup.xs
index 7adac0de6bb1b2a72ed3dac915612ae9c77905d7..a435cee410a2eb79030f8ff410b669bec598e019 100644 (file)
/*
- * Copyright (c) 2012-2013, Texas Instruments Incorporated
+ * Copyright (c) 2012-2015 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
/*
* ======== TransportRpmsgSetup.xs ========
*/
-
-var TransportRpmsgSetup = null;
-var TransportRpmsg = null;
-var MultiProc = null;
+var MultiProc = null;
/*
* ======== module$use ========
*/
function module$use()
{
- TransportRpmsgSetup = this;
- TransportRpmsg = xdc.useModule("ti.ipc.transports.TransportRpmsg");
+ xdc.useModule("ti.ipc.transports.TransportRpmsg");
MultiProc = xdc.useModule("ti.sdo.utils.MultiProc");
}
/*
* ======== module$static$init ========
*/
-function module$static$init(mod, params)
+function module$static$init(state, mod)
{
- /* set the length of handles to the number of processors */
- mod.handles.length = MultiProc.numProcessors;
+ /* This module is used only for attaching to processors in the
+ * cluster. The cluster size is either a subset of all processors
+ * or contains all processors (i.e. no cluster defined).
+ *
+ * NB: In fact, this module only attaches to the HOST processor.
+ */
+ state.handles.length = MultiProc.numProcsInCluster;
- /* init the remote processor handles to null */
- for (var i=0; i < mod.handles.length; i++) {
- mod.handles[i] = null;
+ for (var i = 0; i < state.handles.length; i++) {
+ state.handles[i] = null;
}
}
index b10171bf545cb10339f8c89928031950263ec0fd..ffc13f0aeb819383223bc220d44e569279ee6d79 100644 (file)
ITransport_Handle baseTrans;
INetworkTransport_Handle netTrans;
MessageQ_QueueIndex queueIndex;
+ UInt16 index;
Assert_isTrue((msg != NULL), ti_sdo_ipc_MessageQ_A_invalidMsg);
priority = (UInt)((msg->flags) &
ti_sdo_ipc_MessageQ_TRANSPORTPRIORITYMASK);
+ switch (ti_sdo_utils_MultiProc_procAddrMode) {
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Global:
+ index = dstProcId;
+ break;
+
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Cluster:
+ index = dstProcId - MultiProc_getBaseIdOfCluster();
+ break;
+
+ default:
+ Assert_isTrue(FALSE, NULL);
+ break;
+ }
+
+ if (index >= MessageQ_module->transports.length) {
+ /* raise error */
+ status = MessageQ_E_FAIL;
+ goto leave;
+ }
+
/* Call the transport associated with this message queue */
- transport = MessageQ_module->transports[dstProcId][priority];
+ transport = MessageQ_module->transports.elem[index][priority];
+
if (transport == NULL) {
/* Try the other transport */
priority = !priority;
- transport = MessageQ_module->transports[dstProcId][priority];
+ transport = MessageQ_module->transports.elem[index][priority];
}
/* assert transport is not null */
/*
* ======== ti_sdo_ipc_MessageQ_registerTransport ========
- * Register a transport
*/
Bool ti_sdo_ipc_MessageQ_registerTransport(IMessageQTransport_Handle handle,
UInt16 procId, UInt priority)
{
Bool flag = FALSE;
UInt key;
+ UInt16 index;
+
+ Assert_isTrue(procId < ti_sdo_utils_MultiProc_numProcessors,
+ ti_sdo_ipc_MessageQ_A_procIdInvalid);
+
+ switch (ti_sdo_utils_MultiProc_procAddrMode) {
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Global:
+ index = procId;
+ break;
+
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Cluster:
+ index = procId - MultiProc_getBaseIdOfCluster();
+ break;
+
+ default:
+ Assert_isTrue(FALSE, NULL);
+ break;
+ }
- /* Make sure the procId is valid */
- Assert_isTrue(procId < ti_sdo_utils_MultiProc_numProcessors, ti_sdo_ipc_MessageQ_A_procIdInvalid);
+ Assert_isTrue(index < MessageQ_module->transports.length,
+ ti_sdo_ipc_MessageQ_A_procIdInvalid);
/* lock scheduler */
key = Hwi_disable();
- /* Make sure the id is not already in use */
- if (MessageQ_module->transports[procId][priority] == NULL) {
- MessageQ_module->transports[procId][priority] = handle;
+ /* make sure the id is not already in use */
+ if (MessageQ_module->transports.elem[index][priority] == NULL) {
+ MessageQ_module->transports.elem[index][priority] = handle;
flag = TRUE;
}
/*
* ======== ti_sdo_ipc_MessageQ_unregisterTransport ========
- * Unregister a heap
*/
Void ti_sdo_ipc_MessageQ_unregisterTransport(UInt16 procId, UInt priority)
{
UInt key;
+ UInt16 index;
+
+ Assert_isTrue(procId < ti_sdo_utils_MultiProc_numProcessors,
+ ti_sdo_ipc_MessageQ_A_procIdInvalid);
+
+ switch (ti_sdo_utils_MultiProc_procAddrMode) {
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Global:
+ index = procId;
+ break;
+
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Cluster:
+ index = procId - MultiProc_getBaseIdOfCluster();
+ break;
+
+ default:
+ Assert_isTrue(FALSE, NULL);
+ break;
+ }
- /* Make sure the procId is valid */
- Assert_isTrue(procId < ti_sdo_utils_MultiProc_numProcessors, ti_sdo_ipc_MessageQ_A_procIdInvalid);
+ Assert_isTrue(index < MessageQ_module->transports.length,
+ ti_sdo_ipc_MessageQ_A_procIdInvalid);
/* lock scheduler */
key = Hwi_disable();
- MessageQ_module->transports[procId][priority] = NULL;
+ MessageQ_module->transports.elem[index][priority] = NULL;
/* unlock scheduler */
Hwi_restore(key);
index 711246c80b469ddbc6cea69d2e991f0fceec74b5..7e5b5db28bd923fdcc57f44be72f03b9ff3c06d7 100644 (file)
* any two processors. The IMessageQTransport instances are created via the
* {@link #SetupTransportProxy}. The instances are responsible for
* registering themselves with MessageQ. This is accomplished via the
- * {@link #registerTransport} function.
+ * {@link #registerTransport} function.
+ *
+ * @a(Note)
+ * This modules reflects upon the {@link ti.sdo.utils.MultiProc#procAddrMode}
+ * configuration parameter. Some internal data structure allocations are
+ * optimized for the given processor address mode. For example, when using
+ * {@link ti.sdo.utils.MultiProc#ProcAddrMode_Global}, a message can be
+ * addressed to any processor using only the destination queueId. However,
+ * when using {@link ti.sdo.utils.MultiProc#ProcAddrMode_Cluster}, only
+ * the processors within your cluster can be addressed using only the
+ * destination queueId. For processors outside the cluster, you must also
+ * specify the Transport ID.
*/
@ModuleStartup
};
struct Module_State {
- IMessageQTransport.Handle transports[][2];
+ IMessageQTransport.Handle transports[length][2];
Handle queues[];
IHeap.Handle heaps[];
IGateProvider.Handle gate;
index 8fd693c948e3c91c948e4a396f677cf962b94e45..31e99a81069a25bd405378910a333436868210bc 100644 (file)
/*
- * Copyright (c) 2012-2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (c) 2012-2015 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
state.heaps[mod.staticHeaps[i].heapId] = mod.staticHeaps[i].heap;
}
- /* Set the length of the transport array */
- state.transports.length = MultiProc.numProcessors;
+ if (MultiProc.procAddrMode == MultiProc.ProcAddrMode_Global) {
+ /* global address mode: need transport handle for every processor */
+ state.transports.length = MultiProc.numProcessors;
+ }
+ else if (MultiProc.procAddrMode == MultiProc.ProcAddrMode_Cluster) {
+ /* cluster address mode: need transport only for cluster members */
+ state.transports.length = MultiProc.numProcsInCluster;
+ }
+ else {
+ MessageQ.$logError("Unknown MultiProc.procAddrMode", this);
+ }
- /* Initialize all the transports to null */
for (var i = 0; i < state.transports.length; i++) {
state.transports[i][0] = null;
state.transports[i][1] = null;
}
- /*
- * Sort the static Transports by processor id into the
- * transport array
- */
- for (var i = 0; i < mod.staticTransports.length; i++) {
-
- /* Make sure the procId is not too big */
- if (mod.staticTransports[i].procId >= MultiProc.numProcessors) {
- MessageQ.$logError("MessageQ Out of range procId ("
- + mod.staticTransports[i].procId + "). Max procId is "
- + (MultiProc.numProcessors) + " (MultiProc.numProcessors).",
- this);
+ /* sort the static transports by processor id into the transport array */
+ if (MultiProc.procAddrMode == MultiProc.ProcAddrMode_Global) {
+ for (var i = 0; i < mod.staticTransports.length; i++) {
+ /* make sure the procId is not too big */
+ if (mod.staticTransports[i].procId >= MultiProc.numProcessors) {
+ MessageQ.$logError("MessageQ Out of range procId ("
+ + mod.staticTransports[i].procId + "). Max procId is "
+ + (MultiProc.numProcessors) + " (MultiProc."
+ + "numProcessors).", this);
+ }
+
+ /* make sure the same id is not used twice */
+ if (state.transports[mod.staticTransports[i].procId] != null) {
+ MessageQ.$logError("Cannot register multiple transports to "
+ + "same remote processor ("
+ + mod.staticTransports[i].procId + ").", this);
+ }
+
+ state.transports[mod.staticTransports[i].procId] =
+ mod.staticTransports[i].transport;
}
-
- /* Make sure the same id is not used twice */
- if (state.transports[mod.staticTransports[i].procId] != null) {
- MessageQ.$logError("Cannot register multiple transports to one"
- + " remote processor " + mod.staticTransports[i].procId
- + ".", this);
+ }
+ else if (MultiProc.procAddrMode == MultiProc.ProcAddrMode_Cluster) {
+ for (var i = 0; i < mod.staticTransports.length; i++) {
+ var clusterId = mod.staticTransports[i].procId
+ - MultiProc.baseIdOfCluster;
+
+ /* validate clusterId */
+ if (clusterId >= MultiProc.numProcsInCluster) {
+ MessageQ.$logError("procId=" + mod.staticTransports[i].procId
+ + " is not in cluster", this);
+ }
+
+ /* make sure the same id is not used twice */
+ if (state.transports[clusterId] != null) {
+ MessageQ.$logError("Cannot register multiple transports to "
+ + "same remote processor ("
+ + mod.staticTransports[i].procId + ").", this);
+ }
+
+ state.transports[clusterId] = mod.staticTransports[i].transport;
}
-
- state.transports[mod.staticTransports[i].procId] =
- mod.staticTransports[i].transport;
+ }
+ else {
+ MessageQ.$logError("Unknown MultiProc.procAddrMode", this);
}
/* initialize the registered transport array */
diff --git a/packages/ti/sdo/ipc/transports/TransportShmNotifySetup.c b/packages/ti/sdo/ipc/transports/TransportShmNotifySetup.c
index 170d4bee5faf8018b965edc2ef6ad4e1cd4c6f0d..b62204a0c1577feb675696bc5481df961c6002c8 100644 (file)
/*
- * Copyright (c) 2012-2013, Texas Instruments Incorporated
+ * Copyright (c) 2012-2015 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/*
* ======== TransportShmNotifySetup_attach ========
*/
-Int TransportShmNotifySetup_attach(UInt16 remoteProcId, Ptr sharedAddr)
+Int TransportShmNotifySetup_attach(UInt16 procId, Ptr sharedAddr)
{
TransportShmNotify_Handle handle;
TransportShmNotify_Params params;
Int status = MessageQ_E_FAIL;
Error_Block eb;
+ Int index;
Error_init(&eb);
+ /* procId is always in cluster */
+ index = ti_sdo_utils_MultiProc_getClusterId(procId);
+
/* init the transport parameters */
TransportShmNotify_Params_init(¶ms);
params.priority = TransportShmNotifySetup_priority;
/* make sure notify driver has been created */
- if (Notify_intLineRegistered(remoteProcId, 0)) {
- handle = TransportShmNotify_create(remoteProcId, ¶ms, &eb);
+ if (Notify_intLineRegistered(procId, 0)) {
+ handle = TransportShmNotify_create(procId, ¶ms, &eb);
if (handle != NULL) {
- TransportShmNotifySetup_module->handles[remoteProcId] = handle;
+ TransportShmNotifySetup_module->handles[index] = handle;
status = MessageQ_S_SUCCESS;
}
}
/*
* ======== TransportShmNotifySetup_detach ========
*/
-Int TransportShmNotifySetup_detach(UInt16 remoteProcId)
+Int TransportShmNotifySetup_detach(UInt16 procId)
{
TransportShmNotify_Handle handle;
+ Int index;
- handle = TransportShmNotifySetup_module->handles[remoteProcId];
+ /* procId is always in cluster */
+ index = ti_sdo_utils_MultiProc_getClusterId(procId);
+ handle = TransportShmNotifySetup_module->handles[index];
/* Trying to detach an un-attached processor should fail */
if (handle == NULL) {
}
/* Unregister the instance */
- TransportShmNotifySetup_module->handles[remoteProcId] = NULL;
+ TransportShmNotifySetup_module->handles[index] = NULL;
TransportShmNotify_delete(&handle);
/*
* ======== TransportShmNotifySetup_isRegistered ========
*/
-Bool TransportShmNotifySetup_isRegistered(UInt16 remoteProcId)
+Bool TransportShmNotifySetup_isRegistered(UInt16 procId)
{
Bool registered;
+ Int index;
- registered = (TransportShmNotifySetup_module->handles[remoteProcId] != NULL);
+ /* procId is always in cluster */
+ index = ti_sdo_utils_MultiProc_getClusterId(procId);
+ registered = (TransportShmNotifySetup_module->handles[index] != NULL);
return (registered);
}
diff --git a/packages/ti/sdo/ipc/transports/TransportShmNotifySetup.xdc b/packages/ti/sdo/ipc/transports/TransportShmNotifySetup.xdc
index 8141b8ac8c3a21a6b2d01fa805275d335d928765..93a76e5c23992150d53d8389529266c72d6b4262 100644 (file)
/*
- * Copyright (c) 2012-2013, Texas Instruments Incorporated
+ * Copyright (c) 2012-2015 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* ======== TransportShmNotifySetup ========
* Manages the setup of TransportShmNotify instances.
*
- * create or open the TransportShmNotify for each pair of devices.
+ * Create or open the TransportShmNotify for each pair of devices.
+ *
+ * @a(Note)
+ * This modules reflects upon the {@link ti.sdo.utils.MultiProc#procAddrMode}
+ * configuration parameter. Some internal data structure allocations are
+ * optimized for the given processor address mode.
*/
module TransportShmNotifySetup inherits ti.sdo.ipc.interfaces.ITransportSetup
/* Module Status object */
struct Module_State {
- TransportShmNotify.Handle handles[]; /* handle per remote proc */
+ TransportShmNotify.Handle handles[]; /* handle per proc in cluster */
}
}
diff --git a/packages/ti/sdo/ipc/transports/TransportShmNotifySetup.xs b/packages/ti/sdo/ipc/transports/TransportShmNotifySetup.xs
index db8ca734a7e37f23ab3fa57495dc857bdd5cc62d..c90c58e4c88dce94b8099b8cabcd461e8ba8ef59 100644 (file)
/*
- * Copyright (c) 2012-2013, Texas Instruments Incorporated
+ * Copyright (c) 2012-2015 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
/*
* ======== TransportShmNotifySetup.xs ========
*/
-
-var TransportShmNotifySetup = null;
-var TransportShmNotify = null;
-var MultiProc = null;
+var MultiProc = null;
/*
* ======== module$use ========
*/
function module$use()
{
- TransportShmNotifySetup = this;
- TransportShmNotify =
- xdc.useModule("ti.sdo.ipc.transports.TransportShmNotify");
+ xdc.useModule("ti.sdo.ipc.transports.TransportShmNotify");
MultiProc = xdc.useModule("ti.sdo.utils.MultiProc");
}
/*
* ======== module$static$init ========
*/
-function module$static$init(mod, params)
+function module$static$init(state, mod)
{
- /* set the length of handles to the number of processors */
- mod.handles.length = MultiProc.numProcessors;
- /* init the remote processor handles to null */
- for (var i=0; i < mod.handles.length; i++) {
- mod.handles[i] = null;
+ /* This module is used only for attaching to processors in the
+ * cluster. The cluster size is either a subset of all processors
+ * or contains all processors (i.e. no cluster defined).
+ */
+ state.handles.length = MultiProc.numProcsInCluster;
+
+ for (var i = 0; i < state.handles.length; i++) {
+ state.handles[i] = null;
}
}
index d085933094861691f1b6b11e5b81f88b3ba94aaa..3c019274700f5d4c177f0f94686602ad235cfd12 100644 (file)
/*
- * Copyright (c) 2012-2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (c) 2012-2015 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*/
config UInt16 numProcessors = 1;
+ /*!
+ * ======== ProcAddrMode ========
+ * Enumerate the Processor Addressing Modes
+ *
+ * This enumeration defines the various processor addressing modes
+ * which might impact the behavior and resource allocations of modules
+ * that communicate with other processors in the system (i.e. IPC).
+ *
+ * It is a way for the system integrator to control the internal
+ * behavior and resource allocations of various module to suit the
+ * needs of the program. However, it is at the discretion of each
+ * module on how to respond to these processor addressing modes.
+ *
+ * For example, the NameServer module reflects on this mode when
+ * constructing its internal data structures. For the 'Global' mode,
+ * it will allocate a resource for every processor in the system.
+ * When using 'Cluster' mode, resources are only allocated for
+ * processors in the cluster. A side-effect is that when using
+ * Cluster mode, name queries cannot be addressed to processors
+ * outside of the cluster.
+ *
+ * Specify the addressing mode by setting the
+ * {@link #procAddrMode MultiProc.procAddrMode} configuration
+ * parameter.
+ *
+ * @field(ProcAddrMode_Global) Every processor in the system must
+ * be directly addressable. Usually, this requires a resource
+ * allocation for each processor. This might require a dedicated
+ * hardware resource and/or a memory allocation on behalf of every
+ * processor in the sytem. For large systems, this might result in
+ * significant memory requirements. Use with caution.
+ *
+ * @field(ProcAddrMode_Cluster) Direct addressing is required
+ * only for the processors in your cluster. Processors outside of
+ * the cluster may share resources. This mode limits the per processor
+ * resource allocations to just the processors within your cluster.
+ * This address mode is typically used for large processor systems.
+ *
+ * @see {@link #procAddrMode MultiProc.procAddrMode}
+ */
+ enum ProcAddrMode {
+ ProcAddrMode_Global,
+ ProcAddrMode_Cluster
+ };
+
+ /*!
+ * ======== procAddrMode ========
+ * Define which processor addressing mode is in operation
+ *
+ * This configuration parameter is reflected upon by various system
+ * components whose implementation is impacted by the processor
+ * addressing mode currently in effect. It allows modules to optimize
+ * their behavior and resource allocations for any given processor
+ * address mode.
+ *
+ * The MultiProc module has no specific behavior associated with
+ * this configuration parameter. It is simply a convenient location
+ * for such a configuration parameter as most processor aware
+ * modules already depend on MultiProc.
+ *
+ * @see {@link #ProcAddrMode MultiProc.ProcAddrMode}
+ */
+ config ProcAddrMode procAddrMode = MultiProc.ProcAddrMode_Global;
+
/*! @_nodoc
* ======== getClusterId ========
*/
index f6a4ebc0f3cdf9b6682ff39e6dfc5d52afc4311c..97303bc0e6526b818a7590279bc05f3716f92820 100644 (file)
/*
- * Copyright (c) 2012-2014, Texas Instruments Incorporated
+ * Copyright (c) 2012-2015 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
Int i;
Int status = NameServer_E_FAIL;
Error_Block eb;
+ UInt16 baseId;
+ UInt16 length;
+ UInt16 index;
Error_init(&eb);
+ /* processor address mode determines cluster baseId */
+ switch (ti_sdo_utils_MultiProc_procAddrMode) {
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Global:
+ baseId = 0;
+ break;
+
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Cluster:
+ baseId = MultiProc_getBaseIdOfCluster();
+ break;
+
+ default:
+ Assert_isTrue(FALSE, NULL);
+ break;
+ }
+
/*
* Query all the processors.
*/
if (status == NameServer_E_NOTFOUND) {
/* To eliminate code if possible */
if (ti_sdo_utils_NameServer_singleProcessor == FALSE) {
- /* Query all the remote processors */
- for (i = 0; i < ti_sdo_utils_MultiProc_numProcessors; i++) {
- /* Skip the local table. It was already searched */
- if (i != MultiProc_self()) {
- if (NameServer_module->nsRemoteHandle[i] != NULL) {
- status = INameServerRemote_get(
- NameServer_module->nsRemoteHandle[i],
- obj->name, name, value, len, NULL, &eb);
- }
-
- /* continue only if not found */
- if ((status >= 0) ||
- ((status < 0) &&
- (status != NameServer_E_NOTFOUND) &&
- (status != NameServer_E_TIMEOUT))) {
- break;
- }
+ length = (UInt16)NameServer_module->nsRemoteHandle.length;
+
+ for (i = 0; i < length; i++) {
+ /* skip myself, local table already searched above */
+ if ((baseId + i) == MultiProc_self()) {
+ continue;
+ }
+ if (NameServer_module->nsRemoteHandle.elem[i] != NULL) {
+ status = INameServerRemote_get(
+ NameServer_module->nsRemoteHandle.elem[i],
+ obj->name, name, value, len, NULL, &eb);
+ }
+ /* stop looking if found or encoutered fatal error */
+ if ((status >= 0) || ((status != NameServer_E_NOTFOUND)
+ && (status != NameServer_E_TIMEOUT))) {
+ break;
}
}
}
}
}
else {
- /*
- * Search the query list. It might contain the local proc
+ /* Search the query list. It might contain the local proc
* somewhere in the list.
*/
- i = 0;
status = NameServer_E_NOTFOUND;
- while (procId[i] != MultiProc_INVALIDID) {
+
+ for (i = 0; procId[i] != MultiProc_INVALIDID; i++) {
if (procId[i] == MultiProc_self()) {
- /* Check local */
+ /* check local */
status = NameServer_getLocal(handle, name, value, len);
}
else if (ti_sdo_utils_NameServer_singleProcessor == FALSE) {
- /* Check remote */
- if (NameServer_module->nsRemoteHandle[procId[i]] != NULL) {
+ index = procId[i] - baseId;
+ /* check remote */
+ if (NameServer_module->nsRemoteHandle.elem[index] != NULL) {
status = INameServerRemote_get(
- NameServer_module->nsRemoteHandle[procId[i]],
- obj->name, name, value, len, NULL, &eb);
+ NameServer_module->nsRemoteHandle.elem[index],
+ obj->name, name, value, len, NULL, &eb);
}
}
-
- /* continue only if not found */
- if ((status >= 0) ||
- ((status < 0) &&
- (status != NameServer_E_NOTFOUND) &&
- (status != NameServer_E_TIMEOUT))) {
- break;
- }
- else {
- i++;
-
- /* if we've queried all procs then exit */
- if (i == MultiProc_getNumProcsInCluster()) {
- break;
- }
+ /* stop looking if found or encoutered fatal error */
+ if ((status >= 0) || ((status != NameServer_E_NOTFOUND)
+ && (status != NameServer_E_TIMEOUT))) {
+ break;
}
}
}
Int i;
ti_sdo_utils_NameServer_Object *obj;
- for (i = 0; i < ti_sdo_utils_MultiProc_numProcessors; i++) {
- NameServer_module->nsRemoteHandle[i] = NULL;
- }
-
/* Finish setting up the freeList */
for (i = 0; i < ti_sdo_utils_NameServer_Object_count(); i++) {
obj = ti_sdo_utils_NameServer_Object_get(NULL, i);
Bool ti_sdo_utils_NameServer_isRegistered(UInt16 procId)
{
Bool registered;
+ UInt16 index;
Assert_isTrue(procId < ti_sdo_utils_MultiProc_numProcessors,
ti_sdo_utils_NameServer_A_invArgument);
- registered = (NameServer_module->nsRemoteHandle[procId] != NULL);
+ switch (ti_sdo_utils_MultiProc_procAddrMode) {
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Global:
+ index = procId;
+ break;
+
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Cluster:
+ index = procId - MultiProc_getBaseIdOfCluster();
+ break;
+
+ default:
+ Assert_isTrue(FALSE, NULL);
+ break;
+ }
+
+ Assert_isTrue(index < NameServer_module->nsRemoteHandle.length,
+ ti_sdo_utils_NameServer_A_invArgument);
+
+ registered = (NameServer_module->nsRemoteHandle.elem[index] != NULL);
return (registered);
}
/*
* ======== ti_sdo_utils_NameServer_registerRemoteDriver ========
*/
-Int ti_sdo_utils_NameServer_registerRemoteDriver(INameServerRemote_Handle nsrHandle,
- UInt16 procId)
+Int ti_sdo_utils_NameServer_registerRemoteDriver(INameServerRemote_Handle
+ nsrHandle, UInt16 procId)
{
- Int status;
+ Int status;
+ UInt16 index;
UInt key;
Assert_isTrue(procId < ti_sdo_utils_MultiProc_numProcessors,
ti_sdo_utils_NameServer_A_invArgument);
+ switch (ti_sdo_utils_MultiProc_procAddrMode) {
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Global:
+ index = procId;
+ break;
+
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Cluster:
+ index = procId - MultiProc_getBaseIdOfCluster();
+ break;
+
+ default:
+ Assert_isTrue(FALSE, NULL);
+ break;
+ }
+
+ Assert_isTrue(index < NameServer_module->nsRemoteHandle.length,
+ ti_sdo_utils_NameServer_A_invArgument);
+
key = Hwi_disable();
- if (NameServer_module->nsRemoteHandle[procId] != NULL) {
+ if (NameServer_module->nsRemoteHandle.elem[index] != NULL) {
status = NameServer_E_FAIL;
}
else {
- NameServer_module->nsRemoteHandle[procId] = nsrHandle;
+ NameServer_module->nsRemoteHandle.elem[index] = nsrHandle;
status = NameServer_S_SUCCESS;
}
@@ -900,14 +938,32 @@ Int ti_sdo_utils_NameServer_registerRemoteDriver(INameServerRemote_Handle nsrHan
*/
Void ti_sdo_utils_NameServer_unregisterRemoteDriver(UInt16 procId)
{
+ UInt16 index;
UInt key;
Assert_isTrue(procId < ti_sdo_utils_MultiProc_numProcessors,
ti_sdo_utils_NameServer_A_invArgument);
+ switch (ti_sdo_utils_MultiProc_procAddrMode) {
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Global:
+ index = procId;
+ break;
+
+ case ti_sdo_utils_MultiProc_ProcAddrMode_Cluster:
+ index = procId - MultiProc_getBaseIdOfCluster();
+ break;
+
+ default:
+ Assert_isTrue(FALSE, NULL);
+ break;
+ }
+
+ Assert_isTrue(index < NameServer_module->nsRemoteHandle.length,
+ ti_sdo_utils_NameServer_A_invArgument);
+
key = Hwi_disable();
- NameServer_module->nsRemoteHandle[procId] = NULL;
+ NameServer_module->nsRemoteHandle.elem[index] = NULL;
Hwi_restore(key);
}
index cb90ed8d5d24a8931c474d2bef6e44a857071ccd..b266d52af08b5c18d98d4908680ce2cb0ff6d168 100644 (file)
/*
- * Copyright (c) 2012-2013, Texas Instruments Incorporated
+ * Copyright (c) 2012-2015 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* module configuration, Errors, and Asserts.
* @p
*
+ * @a(Note)
+ * This modules reflects upon the {@link ti.sdo.utils.MultiProc#procAddrMode}
+ * configuration parameter. Some internal data structure allocations are
+ * optimized for the given processor address mode. For example, when using
+ * MultiProc.ProcAddrMode_Global, resource is allocated for every processor
+ * in the system. When using MultiProc.ProcAddrMode_Cluster, resources are
+ * only allocated for processors in the cluster. A side-effect is that when
+ * using Cluster mode, name queries cannot be addressed to processors
+ * outside of the cluster.
*/
@ModuleStartup
};
struct Module_State {
- INameServerRemote.Handle nsRemoteHandle[];
+ INameServerRemote.Handle nsRemoteHandle[length];
GateSwi.Handle gate;
};
}
index ccdcdebc98589a6d22d003909fc3361ec028230d..f6590c429615dfe2e484fd4180baf9152f327d97 100644 (file)
/*
- * Copyright (c) 2012-2013, Texas Instruments Incorporated
+ * Copyright (c) 2012-2015 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/*
* ======== module$static$init ========
*/
-function module$static$init(mod, params)
+function module$static$init(state, mod)
{
/* This will result is some code reduction if building whole_program. */
if (MultiProc.numProcessors == 1) {
NameServer.singleProcessor = false;
}
- /* Array of NameServerRemote instances */
- mod.nsRemoteHandle.length = MultiProc.numProcessors;
+ if (MultiProc.procAddrMode == MultiProc.ProcAddrMode_Global) {
+ /* global address mode: NameServerRemote instance for every processor */
+ state.nsRemoteHandle.length = MultiProc.numProcessors;
+ }
+ else if (MultiProc.procAddrMode == MultiProc.ProcAddrMode_Cluster) {
+ /* cluster address mode: need instance only for cluster members */
+ state.nsRemoteHandle.length = MultiProc.numProcsInCluster;
+ }
+ else {
+ NameServer.$logError("Unknown MultiProc.procAddrMode", this);
+ }
+
+ for (var i = 0; i < state.nsRemoteHandle.length; i++) {
+ state.nsRemoteHandle[i] = null;
+ }
/* Gate for all NameServer critical regions */
- mod.gate = GateSwi.create();
+ state.gate = GateSwi.create();
}
/*