Linux: Cleanup on Failure in Ipc_start
authorAngela Stegmaier <angelabaker@ti.com>
Wed, 7 Mar 2018 19:41:08 +0000 (13:41 -0600)
committerAngela Stegmaier <angelabaker@ti.com>
Tue, 20 Mar 2018 14:58:39 +0000 (09:58 -0500)
Add the appropriate cleanup calls when Ipc_start fails, so
that Ipc_start can be called again in a clean state. This
allows retry of Ipc_start in case of failure due to various
conditions including if reponses are delayed from the remote
core for some reason.

Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
linux/src/api/Ipc.c

index 4f87d3a924c1cbd25c43caec82f3e9ffc6835d2d..30301e14bd1fb5f75909c1dedc113c1e69437fd9 100644 (file)
@@ -208,6 +208,7 @@ Int Ipc_start(Void)
 
     if (status < 0) {
         fprintf(stderr, "Ipc_start: NameServer_setup() failed: %d\n", status);
+        LAD_disconnect(ladHandle);
         status = Ipc_E_FAIL;
         goto exit;
     }
@@ -225,6 +226,10 @@ Int Ipc_start(Void)
     status = Ipc_module.transportFactory->createFxn();
 
     if (status < 0) {
+        MessageQ_unregisterHeap(Ipc_module.config.idHeapStd);
+        MessageQ_destroy();
+        NameServer_destroy();
+        LAD_disconnect(ladHandle);
         goto exit;
     }
 
@@ -266,6 +271,32 @@ Int Ipc_start(Void)
         if (status < 0) {
             fprintf(stderr, "Ipc_start: GateHWSpinlock_start failed: %d\n",
                     status);
+            if (Ipc_module.config.procSync == Ipc_ProcSync_ALL) {
+                clusterSize = MultiProc_getNumProcsInCluster();
+                baseId = MultiProc_getBaseIdOfCluster();
+
+                for (clusterId = 0; clusterId < clusterSize; clusterId++) {
+                    procId = baseId + clusterId;
+
+                    if (MultiProc_self() == procId) {
+                        continue;
+                    }
+
+                    /*  For backward compatibility, we might not be attached to
+                     *  all cluster members. Skip unattached processors.
+                     */
+                    if (!Ipc_isAttached(procId)) {
+                        continue;
+                    }
+
+                    Ipc_detach(procId);
+                }
+            }
+            Ipc_module.transportFactory->deleteFxn();
+            MessageQ_unregisterHeap(Ipc_module.config.idHeapStd);
+            MessageQ_destroy();
+            NameServer_destroy();
+            LAD_disconnect(ladHandle);
             status = Ipc_E_FAIL;
             goto exit;
         }
@@ -282,6 +313,32 @@ Int Ipc_start(Void)
                 fprintf(stderr, "Ipc_start: GateMP_start failed: %d\n", status);
                 status = Ipc_E_FAIL;
                 GateHWSpinlock_stop();
+                if (Ipc_module.config.procSync == Ipc_ProcSync_ALL) {
+                    clusterSize = MultiProc_getNumProcsInCluster();
+                    baseId = MultiProc_getBaseIdOfCluster();
+
+                    for (clusterId = 0; clusterId < clusterSize; clusterId++) {
+                        procId = baseId + clusterId;
+
+                        if (MultiProc_self() == procId) {
+                            continue;
+                        }
+
+                        /*  For backward compatibility, we might not be attached to
+                         *  all cluster members. Skip unattached processors.
+                         */
+                        if (!Ipc_isAttached(procId)) {
+                            continue;
+                        }
+
+                        Ipc_detach(procId);
+                    }
+                }
+                Ipc_module.transportFactory->deleteFxn();
+                MessageQ_unregisterHeap(Ipc_module.config.idHeapStd);
+                MessageQ_destroy();
+                NameServer_destroy();
+                LAD_disconnect(ladHandle);
                 goto exit;
             }
         }