summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 8881d0b)
raw | patch | inline | side by side (parent: 8881d0b)
author | Chenchi Luo <a0282871@ti.com> | |
Thu, 21 May 2015 20:33:13 +0000 (15:33 -0500) | ||
committer | Chenchi Luo <a0282871@ti.com> | |
Thu, 21 May 2015 20:33:13 +0000 (15:33 -0500) |
2. Enable minimum memory mode for CNN testing
3. Enable batch mode for CNN testing/training
4. Add host/accelerator configurations
3. Enable batch mode for CNN testing/training
4. Add host/accelerator configurations
249 files changed:
diff --git a/build/makefile b/build/makefile
index 0253dfeb0a451b89eff83503dddfdf18f4395bab..1318e8b094de4146b9d46ba64071a7b56f00c774 100755 (executable)
--- a/build/makefile
+++ b/build/makefile
\r
\r
\r
-C_INCLUDE_PATH = $(SRC_DIR)/common/api $(SRC_DIR)/common/cnn $(SRC_DIR)/common/util \\r
+C_INCLUDE_PATH = $(SRC_DIR)/common/api $(SRC_DIR)/common/cnn $(SRC_DIR)/common/util $(SRC_DIR)/dsp\\r
$(TARGET_ROOT)/usr/include\r
C_INCLUDE_FLAG = $(foreach d, $(C_INCLUDE_PATH), -I$d)\r
CFLAGS += -g -O3 -fopenmp\r
diff --git a/debian/changelog b/debian/changelog
index 745791fa8768e4153efc2ea8c297d6ba117a5538..02d962107a85bedcff9edef7679dc4413e358e18 100644 (file)
--- a/debian/changelog
+++ b/debian/changelog
+ti-ml (01.00.01.00-0ubuntu0~ppa4) trusty; urgency=medium
+
+ * Fix for compilation of the 'caffe' example
+
+ -- TI Keystone PPA <ti-keystone-ppa@list.ti.com> Thu, 21 May 2015 15:43:15 -0400
+
+ti-ml (01.00.01.00-0ubuntu0~ppa3) trusty; urgency=medium
+
+ * Fixes for cross compilation
+
+ -- TI Keystone PPA <ti-keystone-ppa@list.ti.com> Wed, 20 May 2015 19:04:55 -0400
+
ti-ml (01.00.01.00-0ubuntu0~ppa2) trusty; urgency=medium
* Adding .testcfg and .testignore files for HPC autotests
diff --git a/debian/changelog~ b/debian/changelog~
--- /dev/null
+++ b/debian/changelog~
@@ -0,0 +1,36 @@
+ti-ml (01.00.01.00-0ubuntu0~ppa4) trusty; urgency=medium
+
+ * Fix for compilation of the 'cafffe' example
+
+ -- TI Keystone PPA <ti-keystone-ppa@list.ti.com> Thu, 21 May 2015 15:43:15 -0400
+
+ti-ml (01.00.01.00-0ubuntu0~ppa3) trusty; urgency=medium
+
+ * Fixes for cross compilation
+
+ -- TI Keystone PPA <ti-keystone-ppa@list.ti.com> Wed, 20 May 2015 19:04:55 -0400
+
+ti-ml (01.00.01.00-0ubuntu0~ppa2) trusty; urgency=medium
+
+ * Adding .testcfg and .testignore files for HPC autotests
+
+ -- TI Keystone PPA <ti-keystone-ppa@list.ti.com> Sat, 02 May 2015 16:54:33 -0400
+
+ti-ml (01.00.01.00-0ubuntu0~ppa1) trusty; urgency=medium
+
+ * Added pre-downloaded images to one of the examples(appCNNClassCIFAR10)
+
+ -- TI Keystone PPA <ti-keystone-ppa@list.ti.com> Fri, 01 May 2015 04:04:49 +0000
+
+ti-ml (01.00.00.00-0ubuntu0~ppa2) trusty; urgency=medium
+
+ * Fixing debian/control
+
+ -- TI Keystone PPA <ti-keystone-ppa@list.ti.com> Thu, 02 Apr 2015 12:44:35 -0400
+
+ti-ml (01.00.00.00-0ubuntu0~ppa1) trusty; urgency=medium
+
+ * Initial release with TI Machine Learning (TIML) library.
+
+ -- TI Keystone PPA <ti-keystone-ppa@list.ti.com> Mon, 23 Feb 2015 23:26:20 +0000
+
diff --git a/src/app/cnn/class/cifar10/appCNNClassCIFAR10Testing.bin b/src/app/cnn/class/cifar10/appCNNClassCIFAR10Testing.bin
new file mode 100755 (executable)
index 0000000..9b47d5b
Binary files /dev/null and b/src/app/cnn/class/cifar10/appCNNClassCIFAR10Testing.bin differ
index 0000000..9b47d5b
Binary files /dev/null and b/src/app/cnn/class/cifar10/appCNNClassCIFAR10Testing.bin differ
diff --git a/src/app/cnn/class/cifar10/appCNNClassCIFAR10Testing.c b/src/app/cnn/class/cifar10/appCNNClassCIFAR10Testing.c
index cac970cc308709517dd6ff58ffca6db17b8738c0..642e4ad73d1c8fd4fa54c2647d6cd75628f5d7ab 100644 (file)
#define MODEL_PATH "../../../../database/model/cifar10/databaseModelCIFAR10.m"
#define DATABASE_PATH "../../../../database/cifar10"
-#define IMAGE_PATH "../../../../database/cifar10/%1d.jpg"
-#define LABEL_PATH "../../../../database/cifar10/label.txt"
-#define IMAGE_NUM 3
#define TOP_N 1
#define IMAGE_ROW 32
#define IMAGE_COL 32
struct timespec endTime;
long testingTime;
int topN;
- int *label;
timlUtilImageSet training;
timlUtilImageSet testing;
- timlUtilImage image;
- char str[TIML_UTIL_MAX_STR];
- int i;
- FILE *fp;
- int read;
+ size_t mem1;
+ size_t mem2;
+ size_t mem3;
// init
err = 0;
// read the CNN config file
printf("1. Read the CNN config\n");
- timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH, 0);
+ timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH);
+ timlCNNAddAccuracyLayer(cnn, TOP_N);
+ timlCNNInitialize(cnn);
+ timlCNNLoadParamsFromFile(cnn, cnn->paramsFileName);
timlCNNSetMode(cnn, Util_Test);
- mem = timlCNNMemory(cnn);
timlCNNPrint(cnn);
- printf("CNN memory allocation = %.10f MB.\n", (float)mem/1024.0/1024.0);
- printf("CNN parameter # = %lu.\n", timlCNNGetParamsNum(cnn));
-
-// // read CIFAR10 database
-// printf("2. Read CIFAR10 database\n");
-// timlUtilReadCIFAR10(DATABASE_PATH, &training, &testing);
-
- testing.data = malloc(sizeof(float)*IMAGE_ROW*IMAGE_COL*IMAGE_CHANNEL*IMAGE_NUM);
- testing.label = malloc(sizeof(int)*IMAGE_NUM);
- testing.num = IMAGE_NUM;
- // read labels
- fp = fopen(LABEL_PATH, "rt");
- for (i = 0; i < IMAGE_NUM; i++) {
- read = fscanf(fp, "%d", testing.label + i);
- }
- fclose(fp);
-
- // read images
- for (i = 0; i < IMAGE_NUM; i++) {
- sprintf(str, IMAGE_PATH, i);
- image = timlUtilReadJPEG(str);
- cblas_scopy(dim, image.data, 1, testing.data + i*dim, 1);
- free(image.data);
- }
+
+ mem1 = cnn->forwardMemory + cnn->backwardMemory + cnn->fixedMemory + cnn->paramsMemory;
+ mem2 = cnn->forwardMemory + cnn->fixedMemory + cnn->paramsMemory;
+ mem3 = cnn->memPoolSize + cnn->fixedMemory + cnn->paramsMemory;
+ printf("CNN level 1 memory size = %10.4f MB.\n", (float)mem1/1024.0/1024.0);
+ printf("CNN level 2 memory size = %10.4f MB.\n", (float)mem2/1024.0/1024.0);
+ printf("CNN level 3 memory size = %10.4f MB.\n", (float)mem3/1024.0/1024.0);
+ printf("CNN forward memory size = %10.4f MB.\n", (float)cnn->forwardMemory/1024.0/1024.0);
+ printf("CNN memory pool size = %10.4f MB.\n", (float)cnn->memPoolSize/1024.0/1024.0);
+ printf("CNN params memory size = %10.4f MB.\n", (float)cnn->paramsMemory/1024.0/1024.0);
+
+ // read CIFAR10 database
+ printf("2. Read CIFAR10 database\n");
+ timlUtilReadCIFAR10(DATABASE_PATH, &training, &testing);
// testing
printf("3. Start testing\n");
- label = malloc(sizeof(int)*topN*testing.num);
clock_gettime(CLOCK_REALTIME, &startTime);
- timlCNNClassifyTopNBatchMode(cnn, testing.data, dim, testing.num, label, NULL, topN);
+ timlCNNClassifyAccuracy(cnn, testing.data, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, testing.label, 1, 1, testing.num, &classifyNum);
clock_gettime(CLOCK_REALTIME, &endTime);
testingTime = timlUtilDiffTime(startTime, endTime);
- classifyNum = timlUtilClassifyAccuracy(label, topN, testing.num, testing.label);
classifyPercent = (float)classifyNum/(float)testing.num;
printf("Testing time = %.3f s\n", testingTime/1000000.0);
printf("Classify accuracy = %.3f %%\n", classifyPercent*100.00);
// cleaning
printf("4. Clean up\n");
-// free(training.data);
-// free(training.label);
+ free(training.data);
+ free(training.label);
free(testing.data);
free(testing.label);
- free(label);
timlCNNDelete(cnn);
return err;
diff --git a/src/app/cnn/class/cifar10/appCNNClassCIFAR10Training.bin b/src/app/cnn/class/cifar10/appCNNClassCIFAR10Training.bin
new file mode 100755 (executable)
index 0000000..e7791bf
Binary files /dev/null and b/src/app/cnn/class/cifar10/appCNNClassCIFAR10Training.bin differ
index 0000000..e7791bf
Binary files /dev/null and b/src/app/cnn/class/cifar10/appCNNClassCIFAR10Training.bin differ
diff --git a/src/app/cnn/class/cifar10/appCNNClassCIFAR10Training.c b/src/app/cnn/class/cifar10/appCNNClassCIFAR10Training.c
index b082a84facf71d9ff97bd4ad82ea71b27d5c9495..b756e05aa048e0836e4a5483e3a0337335ffc3da 100644 (file)
#define IMAGE_COL 32\r
#define IMAGE_CHANNEL 3\r
#define BATCH_SIZE 100\r
+#define EPOCH 10\r
\r
/*******************************************************************************\r
*\r
\r
int appCNNClassCIFAR10Training()\r
{\r
- int i;\r
+ int i,j;\r
int dim;\r
int batchSize;\r
int batchNum;\r
\r
// build up the CNN\r
printf("1. Build up CNN\n");\r
- timlConvNeuralNetwork *cnn = timlCNNCreateConvNeuralNetwork(timlCNNTrainingParamsDefault(), 0);\r
+ timlConvNeuralNetwork *cnn = timlCNNCreateConvNeuralNetwork(timlCNNTrainingParamsDefault());\r
cnn->params.learningRate = 0.01;\r
+ cnn->params.maxBatchSize = BATCH_SIZE;\r
+ cnn->params.batchSize = BATCH_SIZE;\r
+ cnn->params.batchUpdate = BATCH_SIZE;\r
timlCNNAddInputLayer(cnn, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, timlCNNInputParamsDefault()); // input layer\r
timlCNNAddConvLayer(cnn, 5, 5, 1, 1, 32, timlCNNConvParamsDefault()); // conv layer\r
timlCNNAddPoolingLayer(cnn, 3, 3, 2, 2, CNN_MaxPooling, timlCNNPoolingParamsDefault()); // max pooling layer\r
timlCNNAddPoolingLayer(cnn, 3, 3, 2, 2, CNN_MeanPooling, timlCNNPoolingParamsDefault()); // max pooling layer\r
timlCNNAddLinearLayer(cnn, 64, timlCNNLinearParamsDefault()); // linear layer\r
timlCNNAddLinearLayer(cnn, 10, timlCNNLinearParamsDefault()); // linear layer\r
- timlCNNAddNonlinearLayer(cnn, Util_Softmax); // softmax layer\r
+ timlCNNAddSoftmaxCostLayer(cnn); // softmax cost layer\r
timlCNNInitialize(cnn);\r
timlCNNReset(cnn);\r
- mem = timlCNNMemory(cnn);\r
timlCNNPrint(cnn);\r
- printf("CNN memory allocation = %.10f MB.\n", (float)mem/1024.0/1024.0);\r
- printf("CNN parameter # = %lu.\n", timlCNNGetParamsNum(cnn));\r
+\r
\r
// read the CIFAR10 database\r
printf("2. Read CIFAR10 database\n");\r
printf("3. Start training\n");\r
batchNum = training.num/batchSize;\r
clock_gettime(CLOCK_REALTIME, &startTime);\r
- for (i = 0; i < batchNum; i++) {\r
- timlCNNSupervisedTrainingWithLabelBatchMode(cnn, training.data + i * batchSize * dim, training.label + i * batchSize, dim, batchSize);\r
+ for (j = 0; j < EPOCH; j++) {\r
+ for (i = 0; i < batchNum; i++) {\r
+ timlCNNSupervisedTrainingWithLabel(cnn, training.data + i*batchSize*dim, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, training.label + i*batchSize, 1, 1, batchSize);\r
+ }\r
}\r
clock_gettime(CLOCK_REALTIME, &endTime);\r
trainingTime = timlUtilDiffTime(startTime, endTime);\r
diff --git a/src/app/cnn/class/imagenet/appCNNClassImageNetAlexNetTesting.bin b/src/app/cnn/class/imagenet/appCNNClassImageNetAlexNetTesting.bin
new file mode 100755 (executable)
index 0000000..070ea6c
Binary files /dev/null and b/src/app/cnn/class/imagenet/appCNNClassImageNetAlexNetTesting.bin differ
index 0000000..070ea6c
Binary files /dev/null and b/src/app/cnn/class/imagenet/appCNNClassImageNetAlexNetTesting.bin differ
diff --git a/src/app/cnn/class/imagenet/appCNNClassImageNetAlexNetTesting.c b/src/app/cnn/class/imagenet/appCNNClassImageNetAlexNetTesting.c
index 778c731e471ba46115283d531bdbd2416a9cf4c8..b08ec151eb485ec7670e5b5a3839205b0dcb2b14 100644 (file)
#define LABEL_PATH "../../../../database/imagenet/test/label.txt"
#define IMAGE_PATH "../../../../database/imagenet/test/%010d.jpg"
#define TOP_N 5
-#define IMAGE_NUM 100
+#define IMAGE_NUM 1000
#define IMAGE_ROW 256
#define IMAGE_COL 256
#define IMAGE_CHANNEL 3
int err;
int success;
float classifyPercent;
- long mem;
+ long mem1, mem2, mem3;
FILE *fp;
struct timespec startTime;
struct timespec endTime;
float *testImage;
char str[TIML_UTIL_MAX_STR];
int topN;
- int *label;
// init
err = 0;
// read CNN config
printf("1. Read CNN config\n");
- timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH, 0);
- mem = timlCNNMemory(cnn);
+ timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH);
+ timlCNNAddAccuracyLayer(cnn, TOP_N);
+ timlCNNInitialize(cnn);
+ timlCNNLoadParamsFromFile(cnn, cnn->paramsFileName);
+
timlCNNPrint(cnn);
- printf("CNN memory allocation = %.10f MB.\n", (float)mem/1024.0/1024.0);
- printf("CNN parameter # = %lu.\n", timlCNNGetParamsNum(cnn));
+ mem1 = cnn->forwardMemory + cnn->backwardMemory + cnn->fixedMemory + cnn->paramsMemory;
+ mem2 = cnn->forwardMemory + cnn->fixedMemory + cnn->paramsMemory;
+ mem3 = cnn->memPoolSize + cnn->fixedMemory + cnn->paramsMemory;
+ printf("CNN level 1 memory size = %10.4f MB.\n", (float)mem1/1024.0/1024.0);
+ printf("CNN level 2 memory size = %10.4f MB.\n", (float)mem2/1024.0/1024.0);
+ printf("CNN level 3 memory size = %10.4f MB.\n", (float)mem3/1024.0/1024.0);
+ printf("CNN forward memory size = %10.4f MB.\n", (float)cnn->forwardMemory/1024.0/1024.0);
+ printf("CNN memory pool size = %10.4f MB.\n", (float)cnn->memPoolSize/1024.0/1024.0);
+ printf("CNN params memory size = %10.4f MB.\n", (float)cnn->paramsMemory/1024.0/1024.0);
timlCNNSetMode(cnn, Util_Test);
// read test images
image = timlUtilReadJPEG(str);
if (image.channel == 1) { // duplicate channels
for (j = 0; j < IMAGE_CHANNEL; j++) {
- cblas_scopy(IMAGE_ROW * IMAGE_COL, image.data, 1, testImage + i*dim + j*IMAGE_ROW*IMAGE_COL, 1);
+ cblas_scopy(IMAGE_ROW*IMAGE_COL, image.data, 1, testImage + i*dim + j*IMAGE_ROW*IMAGE_COL, 1);
}
}
else {
}
free(image.data);
}
- label = malloc(sizeof(int)*testNum*topN);
// testing
printf("3. Start testing\n");
clock_gettime(CLOCK_REALTIME, &startTime);
- timlCNNClassifyTopNBatchModeOpenMP(cnn, testImage, dim, testNum, label, NULL, topN);
+ timlCNNClassifyAccuracy(cnn, testImage, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, testLabel, 1, 1, testNum, &success);
clock_gettime(CLOCK_REALTIME, &endTime);
testingTime = timlUtilDiffTime(startTime, endTime);
- success = timlUtilClassifyAccuracy(label, topN, testNum, testLabel);
classifyPercent = (float)success/(float)testNum;
printf("Testing time = %.2f s.\n", testingTime/1000000.0);
printf("Top%d success percent = %.3f %%\n", topN, classifyPercent*100.00);
timlCNNDelete(cnn);
free(testLabel);
free(testImage);
- free(label);
return err;
}
diff --git a/src/app/cnn/class/imagenet/appCNNClassImageNetAlexNetTestingOpenMP.bin b/src/app/cnn/class/imagenet/appCNNClassImageNetAlexNetTestingOpenMP.bin
new file mode 100755 (executable)
index 0000000..6ea2c10
Binary files /dev/null and b/src/app/cnn/class/imagenet/appCNNClassImageNetAlexNetTestingOpenMP.bin differ
index 0000000..6ea2c10
Binary files /dev/null and b/src/app/cnn/class/imagenet/appCNNClassImageNetAlexNetTestingOpenMP.bin differ
diff --git a/src/app/cnn/class/imagenet/appCNNClassImageNetAlexNetTestingOpenMP.c b/src/app/cnn/class/imagenet/appCNNClassImageNetAlexNetTestingOpenMP.c
--- /dev/null
@@ -0,0 +1,184 @@
+/******************************************************************************/
+/*!
+ * \file appCNNClassImageNetAlexNetTesting.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../appCNNClass.h"
+
+
+/*******************************************************************************
+ *
+ * DEFINES
+ *
+ ******************************************************************************/
+
+#define MODEL_PATH "../../../../database/model/alexnet/databaseModelAlexNet.m"
+#define LABEL_PATH "../../../../database/imagenet/test/label.txt"
+#define IMAGE_PATH "../../../../database/imagenet/test/%010d.jpg"
+#define TOP_N 5
+#define IMAGE_NUM 1000
+#define IMAGE_ROW 256
+#define IMAGE_COL 256
+#define IMAGE_CHANNEL 3
+
+
+/*******************************************************************************
+ *
+ * main()
+ *
+ ******************************************************************************/
+
+int main()
+{
+ return appCNNClassImageNetAlexNetTestingOpenMP();
+}
+
+
+/******************************************************************************/
+/*!
+ * \ingroup appCNNClass
+ * \brief AlexNet classification testing example
+ */
+/******************************************************************************/
+
+int appCNNClassImageNetAlexNetTestingOpenMP()
+{
+ int i;
+ int j;
+ int read;
+ int err;
+ int success;
+ float classifyPercent;
+ long mem1, mem2, mem3;
+ FILE *fp;
+ struct timespec startTime;
+ struct timespec endTime;
+ long testingTime;
+ timlUtilImage image;
+ int testNum;
+ int dim;
+ int *testLabel;
+ float *testImage;
+ char str[TIML_UTIL_MAX_STR];
+ int topN;
+
+ // init
+ err = 0;
+ testNum = IMAGE_NUM;
+ dim = IMAGE_ROW*IMAGE_COL*IMAGE_CHANNEL;
+ topN = TOP_N;
+
+ setbuf(stdout, NULL); // do not buffer the console output
+
+ // read CNN config
+ printf("1. Read CNN config\n");
+ timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH);
+ timlCNNAddAccuracyLayer(cnn, TOP_N);
+ timlCNNInitialize(cnn);
+ timlCNNLoadParamsFromFile(cnn, cnn->paramsFileName);
+
+ timlCNNPrint(cnn);
+ mem1 = cnn->forwardMemory + cnn->backwardMemory + cnn->fixedMemory + cnn->paramsMemory;
+ mem2 = cnn->forwardMemory + cnn->fixedMemory + cnn->paramsMemory;
+ mem3 = cnn->memPoolSize + cnn->fixedMemory + cnn->paramsMemory;
+ printf("CNN level 1 memory size = %10.4f MB.\n", (float)mem1/1024.0/1024.0);
+ printf("CNN level 2 memory size = %10.4f MB.\n", (float)mem2/1024.0/1024.0);
+ printf("CNN level 3 memory size = %10.4f MB.\n", (float)mem3/1024.0/1024.0);
+ printf("CNN forward memory size = %10.4f MB.\n", (float)cnn->forwardMemory/1024.0/1024.0);
+ printf("CNN memory pool size = %10.4f MB.\n", (float)cnn->memPoolSize/1024.0/1024.0);
+ printf("CNN params memory size = %10.4f MB.\n", (float)cnn->paramsMemory/1024.0/1024.0);
+ timlCNNSetMode(cnn, Util_Test);
+
+ int thread = 4;
+ timlConvNeuralNetwork **cnnTeam = malloc(sizeof(timlConvNeuralNetwork*)*thread);
+ cnnTeam[0] = cnn;
+ for (i = 1; i < thread; i++){
+ cnnTeam[i] = timlCNNShareParams(cnnTeam[0]);
+ timlCNNInitialize(cnnTeam[i]);
+ }
+ // read test images
+ printf("2. Read test images\n");
+ testLabel = malloc(sizeof(int)*testNum);
+ testImage = malloc(sizeof(float)*IMAGE_ROW*IMAGE_COL*IMAGE_CHANNEL*testNum);
+
+ // read labels
+ fp = fopen(LABEL_PATH, "rt");
+ for (i = 0; i < testNum; i++) {
+ read = fscanf(fp, "%d", testLabel + i);
+ }
+ fclose(fp);
+
+ // read images
+ for (i = 0; i < testNum; i++) {
+ sprintf(str, IMAGE_PATH, i);
+ image = timlUtilReadJPEG(str);
+ if (image.channel == 1) { // duplicate channels
+ for (j = 0; j < IMAGE_CHANNEL; j++) {
+ cblas_scopy(IMAGE_ROW*IMAGE_COL, image.data, 1, testImage + i*dim + j*IMAGE_ROW*IMAGE_COL, 1);
+ }
+ }
+ else {
+ cblas_scopy(dim, image.data, 1, testImage + i*dim, 1);
+ }
+ free(image.data);
+ }
+
+ // testing
+ printf("3. Start testing\n");
+ clock_gettime(CLOCK_REALTIME, &startTime);
+ timlCNNClassifyAccuracyTeamModeOpenMP(cnnTeam, thread, testImage, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, testLabel, 1, 1, testNum, &success);
+ clock_gettime(CLOCK_REALTIME, &endTime);
+
+ testingTime = timlUtilDiffTime(startTime, endTime);
+ classifyPercent = (float)success/(float)testNum;
+ printf("Testing time = %.2f s.\n", testingTime/1000000.0);
+ printf("Top%1d success percent = %.3f %%\n", topN, classifyPercent*100.00);
+
+ // cleaning up
+ printf("4. Clean up\n");
+ for (i = 0; i < thread; i++){
+ timlCNNDelete(cnnTeam[i]);
+ }
+ free(testLabel);
+ free(testImage);
+
+ return err;
+}
diff --git a/src/app/cnn/class/imagenet/appCNNClassImageNetCaffeNetTesting.bin b/src/app/cnn/class/imagenet/appCNNClassImageNetCaffeNetTesting.bin
new file mode 100755 (executable)
index 0000000..854372a
Binary files /dev/null and b/src/app/cnn/class/imagenet/appCNNClassImageNetCaffeNetTesting.bin differ
index 0000000..854372a
Binary files /dev/null and b/src/app/cnn/class/imagenet/appCNNClassImageNetCaffeNetTesting.bin differ
diff --git a/src/app/cnn/class/imagenet/appCNNClassImageNetCaffeNetTesting.c b/src/app/cnn/class/imagenet/appCNNClassImageNetCaffeNetTesting.c
index 6ac14294dbc3abb91918180439692cadfc8b8d2b..4a1bca17e61d6fb0f4796d82f243257f9103b0d9 100644 (file)
#define LABEL_PATH "../../../../database/imagenet/test/label.txt"
#define IMAGE_PATH "../../../../database/imagenet/test/%010d.jpg"
#define TOP_N 5
-#define IMAGE_NUM 100
+#define IMAGE_NUM 1000
#define IMAGE_ROW 256
#define IMAGE_COL 256
#define IMAGE_CHANNEL 3
int err;
int success;
float classifyPercent;
- long mem;
+ long mem1;
+ long mem2;
+ long mem3;
FILE *fp;
struct timespec startTime;
struct timespec endTime;
int *testLabel;
float *testImage;
char str[TIML_UTIL_MAX_STR];
- int topN;
int *label;
// init
err = 0;
testNum = IMAGE_NUM;
dim = IMAGE_ROW*IMAGE_COL*IMAGE_CHANNEL;
- topN = TOP_N;
setbuf(stdout, NULL); // do not buffer the console output
// read CNN config
printf("1. Read CNN config\n");
- timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH, 0);
- mem = timlCNNMemory(cnn);
+ timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH);
+ timlCNNAddAccuracyLayer(cnn, TOP_N);
+ timlCNNInitialize(cnn);
+ timlCNNLoadParamsFromFile(cnn, cnn->paramsFileName);
timlCNNPrint(cnn);
- printf("CNN memory allocation = %.10f MB.\n", (float)mem/1024.0/1024.0);
- printf("CNN parameter # = %lu.\n", timlCNNGetParamsNum(cnn));
+ mem1 = cnn->forwardMemory + cnn->backwardMemory + cnn->fixedMemory + cnn->paramsMemory;
+ mem2 = cnn->forwardMemory + cnn->fixedMemory + cnn->paramsMemory;
+ mem3 = cnn->memPoolSize + cnn->fixedMemory + cnn->paramsMemory;
+ printf("CNN level 1 memory size = %10.4f MB.\n", (float)mem1/1024.0/1024.0);
+ printf("CNN level 2 memory size = %10.4f MB.\n", (float)mem2/1024.0/1024.0);
+ printf("CNN level 3 memory size = %10.4f MB.\n", (float)mem3/1024.0/1024.0);
+ printf("CNN memory pool size = %10.4f MB.\n", (float)cnn->memPoolSize/1024.0/1024.0);
+ printf("CNN params memory size = %10.4f MB.\n", (float)timlCNNGetParamsNum(cnn)*sizeof(float)/1024.0/1024.0);
timlCNNSetMode(cnn, Util_Test);
// read test images
}
free(image.data);
}
- label = malloc(sizeof(int)*testNum*topN);
// testing
printf("3. Start testing\n");
clock_gettime(CLOCK_REALTIME, &startTime);
- timlCNNClassifyTopNBatchModeOpenMP(cnn, testImage, dim, testNum, label, NULL, topN);
+ timlCNNClassifyAccuracy(cnn, testImage, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, testLabel, 1, 1, testNum, &success);
clock_gettime(CLOCK_REALTIME, &endTime);
testingTime = timlUtilDiffTime(startTime, endTime);
- success = timlUtilClassifyAccuracy(label, topN, testNum, testLabel);
classifyPercent = (float) success/(float) testNum;
printf("Testing time = %.2f s.\n", testingTime/1000000.0);
- printf("Top%d success percent = %.3f %%\n", topN, classifyPercent*100.00);
+ printf("Top%d success percent = %.3f %%\n", TOP_N, classifyPercent*100.00);
// cleaning
printf("4. Clean up\n");
diff --git a/src/app/cnn/class/imagenet/appCNNClassImageNetCaffeNetTraining.bin b/src/app/cnn/class/imagenet/appCNNClassImageNetCaffeNetTraining.bin
new file mode 100755 (executable)
index 0000000..ce1470e
Binary files /dev/null and b/src/app/cnn/class/imagenet/appCNNClassImageNetCaffeNetTraining.bin differ
index 0000000..ce1470e
Binary files /dev/null and b/src/app/cnn/class/imagenet/appCNNClassImageNetCaffeNetTraining.bin differ
diff --git a/src/app/cnn/class/imagenet/appCNNClassImageNetCaffeNetTraining.c b/src/app/cnn/class/imagenet/appCNNClassImageNetCaffeNetTraining.c
index 247f0fd33ec1079328b7b56740c01930f7ecd966..5eb12cada11a45f2f1a4e421cbe109764ba90d9d 100644 (file)
#define LABEL_PATH "../../../../database/imagenet/train/label.txt"\r
#define IMAGE_PATH "../../../../database/imagenet/train/%010d.jpg"\r
#define TOP_N 5\r
-#define IMAGE_NUM 100\r
-#define IMAGE_BATCH_SIZE 10\r
+#define IMAGE_NUM 1000\r
+#define IMAGE_BATCH_SIZE 100\r
#define IMAGE_ROW 256\r
#define IMAGE_COL 256\r
#define IMAGE_CROP_ROW 227\r
\r
// setup CNN\r
printf("1. Build up CNN\n");\r
- cnn = timlCNNCreateConvNeuralNetwork(timlCNNTrainingParamsDefault(), 0);\r
+ cnn = timlCNNCreateConvNeuralNetwork(timlCNNTrainingParamsDefault());\r
+ cnn->params.maxBatchSize = IMAGE_BATCH_SIZE;\r
cnn->params.batchSize = IMAGE_BATCH_SIZE;\r
+ cnn->params.batchUpdate = IMAGE_BATCH_SIZE;\r
cnn->params.learningRate = 0.1;\r
inputParams = timlCNNInputParamsDefault();\r
inputParams.row = IMAGE_ROW;\r
timlCNNAddNonlinearLayer(cnn, Util_Relu); // (21) relu layer\r
timlCNNAddDropoutLayer(cnn, 0.5); // (22) dropout layer\r
timlCNNAddLinearLayer(cnn, 1000, timlCNNLinearParamsDefault()); // (23) linear layer\r
- timlCNNAddNonlinearLayer(cnn, Util_Softmax); // (24) softmax layer\r
+ timlCNNAddSoftmaxCostLayer(cnn); // (24) softmax cost layer\r
timlCNNInitialize(cnn);\r
timlCNNReset(cnn);\r
- mem = timlCNNMemory(cnn);\r
+ timlCNNMemory(cnn);\r
timlCNNPrint(cnn);\r
+ mem = cnn->fixedMemory + cnn->forwardMemory;\r
printf("CNN memory = %.10f MB.\n", (float)mem/1024.0/1024.0);\r
paramsNum = timlCNNGetParamsNum(cnn);\r
printf("CNN params # = %ld.\n", paramsNum);\r
}\r
\r
// training\r
- timlCNNSupervisedTrainingWithLabelBatchMode(cnn, trainImage, trainLabel, dim, IMAGE_BATCH_SIZE);\r
+ timlCNNSupervisedTrainingWithLabel(cnn, trainImage, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, trainLabel, 1, 1, IMAGE_BATCH_SIZE);\r
}\r
clock_gettime(CLOCK_REALTIME, &endTime);\r
trainingTime = timlUtilDiffTime(startTime, endTime);\r
diff --git a/src/app/cnn/class/imagenet/appCNNClassImageNetVGGNetTesting.bin b/src/app/cnn/class/imagenet/appCNNClassImageNetVGGNetTesting.bin
new file mode 100755 (executable)
index 0000000..f18df18
Binary files /dev/null and b/src/app/cnn/class/imagenet/appCNNClassImageNetVGGNetTesting.bin differ
index 0000000..f18df18
Binary files /dev/null and b/src/app/cnn/class/imagenet/appCNNClassImageNetVGGNetTesting.bin differ
diff --git a/src/app/cnn/class/imagenet/appCNNClassImageNetVGGNetTesting.c b/src/app/cnn/class/imagenet/appCNNClassImageNetVGGNetTesting.c
index 95c19ac88b4b1e662eb442864213c44eb84930a5..52a0233cfe1a4e9ceba6fad0d640061f96dd215b 100644 (file)
#define LABEL_PATH "../../../../database/imagenet/test/label.txt"
#define IMAGE_PATH "../../../../database/imagenet/test/%010d.jpg"
#define TOP_N 5
-#define IMAGE_NUM 100
+#define IMAGE_NUM 1000
#define IMAGE_ROW 256
#define IMAGE_COL 256
#define IMAGE_CHANNEL 3
int err;
int success;
float classifyPercent;
- long mem;
+ long mem1, mem2, mem3;
FILE *fp;
struct timespec startTime;
struct timespec endTime;
// read CNN config
printf("1. Read CNN config\n");
- timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH, 0);
- mem = timlCNNMemory(cnn);
+ timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH);
+ timlCNNAddAccuracyLayer(cnn, TOP_N);
+ timlCNNInitialize(cnn);
+ timlCNNLoadParamsFromFile(cnn, cnn->paramsFileName);
timlCNNPrint(cnn);
- printf("CNN memory allocation = %.10f MB.\n", (float)mem/1024.0/1024.0);
- printf("CNN parameter # = %lu.\n", timlCNNGetParamsNum(cnn));
+ mem1 = cnn->forwardMemory + cnn->backwardMemory + cnn->fixedMemory + cnn->paramsMemory;
+ mem2 = cnn->forwardMemory + cnn->fixedMemory + cnn->paramsMemory;
+ mem3 = cnn->memPoolSize + cnn->fixedMemory + cnn->paramsMemory;
+ printf("CNN level 1 memory size = %10.4f MB.\n", (float)mem1/1024.0/1024.0);
+ printf("CNN level 2 memory size = %10.4f MB.\n", (float)mem2/1024.0/1024.0);
+ printf("CNN level 3 memory size = %10.4f MB.\n", (float)mem3/1024.0/1024.0);
+ printf("CNN memory pool size = %10.4f MB.\n", (float)cnn->memPoolSize/1024.0/1024.0);
+ printf("CNN params memory size = %10.4f MB.\n", (float)timlCNNGetParamsNum(cnn)*sizeof(float)/1024.0/1024.0);
timlCNNSetMode(cnn, Util_Test);
// read test images
// testing
printf("3. Start testing\n");
clock_gettime(CLOCK_REALTIME, &startTime);
- timlCNNClassifyTopNBatchModeOpenMP(cnn, testImage, dim, testNum, label, NULL, topN);
+ timlCNNClassifyAccuracy(cnn, testImage, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, testLabel, 1, 1, testNum, &success);
clock_gettime(CLOCK_REALTIME, &endTime);
testingTime = timlUtilDiffTime(startTime, endTime);
- success = timlUtilClassifyAccuracy(label, topN, testNum, testLabel);
classifyPercent = (float)success/(float)testNum;
printf("Testing time = %.2f s.\n", testingTime/1000000.0);
printf("Top%d success percent = %.2f %%\n", topN, classifyPercent*100.00);
diff --git a/src/app/cnn/class/mnist/appCNNClassMNISTTesting.bin b/src/app/cnn/class/mnist/appCNNClassMNISTTesting.bin
new file mode 100755 (executable)
index 0000000..5ac2257
Binary files /dev/null and b/src/app/cnn/class/mnist/appCNNClassMNISTTesting.bin differ
index 0000000..5ac2257
Binary files /dev/null and b/src/app/cnn/class/mnist/appCNNClassMNISTTesting.bin differ
diff --git a/src/app/cnn/class/mnist/appCNNClassMNISTTesting.c b/src/app/cnn/class/mnist/appCNNClassMNISTTesting.c
index 590777aeff94ec9a0f031c1ab74d8df6a23d708b..2b1d6129c6488500f99a903a4e97351e620b267b 100644 (file)
*/
/******************************************************************************/
-int appCNNClassMNISTTesting() {
+int appCNNClassMNISTTesting()
+{
int err;
int classifyNum;
float classifyPercent;
timlUtilImageSet testing;
timlUtilImageSet training;
int topN;
- int *label;
+ size_t mem1;
+ size_t mem2;
+ size_t mem3;
err = 0;
topN = TOP_N;
- label = malloc(sizeof(int)*topN*TEST_NUM);
+
dim = IMAGE_ROW*IMAGE_COL*IMAGE_CHANNEL;
setbuf(stdout, NULL); // do not buffer the console output
// read CNN config
printf("1. Read CNN config\n");
- timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH, 0);
+ timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH);
timlCNNSetMode(cnn, Util_Test);
- mem = timlCNNMemory(cnn);
+ timlCNNAddAccuracyLayer(cnn, 1);
+ timlCNNInitialize(cnn);
+ timlCNNLoadParamsFromFile(cnn, cnn->paramsFileName);
timlCNNPrint(cnn);
- printf("CNN memory allocation = %.10f MB.\n", (float)mem/1024.0/1024.0);
- printf("CNN parameter # = %lu.\n", timlCNNGetParamsNum(cnn));
+ mem1 = cnn->forwardMemory + cnn->backwardMemory + cnn->fixedMemory + cnn->paramsMemory;
+ mem2 = cnn->forwardMemory + cnn->fixedMemory + cnn->paramsMemory;
+ mem3 = cnn->memPoolSize + cnn->fixedMemory + cnn->paramsMemory;
+ printf("CNN level 1 memory size = %10.4f MB.\n", (float)mem1/1024.0/1024.0);
+ printf("CNN level 2 memory size = %10.4f MB.\n", (float)mem2/1024.0/1024.0);
+ printf("CNN level 3 memory size = %10.4f MB.\n", (float)mem3/1024.0/1024.0);
+ printf("CNN forward memory size = %10.4f MB.\n", (float)cnn->forwardMemory/1024.0/1024.0);
+ printf("CNN memory pool size = %10.4f MB.\n", (float)cnn->memPoolSize/1024.0/1024.0);
+ printf("CNN params memory size = %10.4f MB.\n", (float)cnn->paramsMemory/1024.0/1024.0);
// read MNIST database
printf("2. Read MNIST database\n");
// testing
printf("3. Start testing\n");
clock_gettime(CLOCK_REALTIME, &startTime);
- timlCNNClassifyTopNBatchMode(cnn, testing.data, dim, testing.num, label, NULL, topN);
+ timlCNNClassifyAccuracy(cnn, testing.data, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, testing.label, 1, 1, testing.num, &classifyNum);
clock_gettime(CLOCK_REALTIME, &endTime);
testingTime = timlUtilDiffTime(startTime, endTime);
- classifyNum = timlUtilClassifyAccuracy(label, topN, testing.num, testing.label);
classifyPercent = (float)classifyNum/(float)testing.num;
printf("Testing time = %.3f s\n", testingTime/1000000.0);
printf("Classify accuracy = %.3f %%\n", classifyPercent*100.00);
free(training.label);
free(testing.data);
free(testing.label);
- free(label);
timlCNNDelete(cnn);
return err;
diff --git a/src/app/cnn/class/mnist/appCNNClassMNISTTraining.bin b/src/app/cnn/class/mnist/appCNNClassMNISTTraining.bin
new file mode 100755 (executable)
index 0000000..0080415
Binary files /dev/null and b/src/app/cnn/class/mnist/appCNNClassMNISTTraining.bin differ
index 0000000..0080415
Binary files /dev/null and b/src/app/cnn/class/mnist/appCNNClassMNISTTraining.bin differ
diff --git a/src/app/cnn/class/mnist/appCNNClassMNISTTraining.c b/src/app/cnn/class/mnist/appCNNClassMNISTTraining.c
index 940a6a8057d513c9646012e06fd55f07146a653c..9f2709ae733c240655b7348f10f984fba2163572 100644 (file)
#define BATCH_SIZE 100\r
#define IMAGE_CHANNEL 1\r
#define LEARN_RATE 0.1\r
+#define EPOCH 10\r
\r
/*******************************************************************************\r
*\r
\r
int main()\r
{\r
-\r
return appCNNClassMNISTTraining();\r
}\r
\r
\r
int appCNNClassMNISTTraining()\r
{\r
- int i;\r
+ int i, j;\r
int dim;\r
long mem;\r
struct timespec startTime;\r
\r
// setup CNN\r
printf("1. Build up the CNN\n");\r
- timlConvNeuralNetwork *cnn = timlCNNCreateConvNeuralNetwork(timlCNNTrainingParamsDefault(), 0);\r
+ timlConvNeuralNetwork *cnn = timlCNNCreateConvNeuralNetwork(timlCNNTrainingParamsDefault());\r
cnn->params.learningRate = LEARN_RATE;\r
+ cnn->params.maxBatchSize = BATCH_SIZE;\r
cnn->params.batchSize = BATCH_SIZE;\r
+ cnn->params.batchUpdate = BATCH_SIZE;\r
inputParams = timlCNNInputParamsDefault();\r
inputParams.scale = 1.0/256.0;\r
timlCNNAddInputLayer(cnn, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, inputParams); // input layer\r
timlCNNAddLinearLayer(cnn, 500, timlCNNLinearParamsDefault()); // linear layer\r
timlCNNAddNonlinearLayer(cnn, Util_Relu); // relu layer\r
timlCNNAddLinearLayer(cnn, 10, timlCNNLinearParamsDefault()); // linear layer\r
- timlCNNAddNonlinearLayer(cnn, Util_Softmax); // softmax layer\r
+ timlCNNAddSoftmaxCostLayer(cnn); // softmax cost layer\r
timlCNNInitialize(cnn);\r
timlCNNReset(cnn);\r
- mem = timlCNNMemory(cnn);\r
timlCNNPrint(cnn);\r
- printf("CNN memory allocation = %.10f MB.\n", (float) mem/1024.0/1024.0);\r
- printf("CNN parameter # = %ld.\n", timlCNNGetParamsNum(cnn));\r
\r
// read MNIST database\r
printf("2. Read the MNIST database\n");\r
// training\r
printf("3. Start training\n");\r
clock_gettime(CLOCK_REALTIME, &startTime);\r
- for (i = 0; i < batchNum; i++) {\r
- timlCNNSupervisedTrainingWithLabelBatchMode(cnn, training.data + i*batchSize*dim, training.label + i*batchSize, dim, batchSize);\r
+ for (j =0; j < EPOCH; j++) {\r
+ for (i = 0; i < batchNum; i++) {\r
+ timlCNNSupervisedTrainingWithLabel(cnn, training.data + i*batchSize*dim, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, training.label + i*batchSize, 1, 1, batchSize);\r
+ }\r
}\r
clock_gettime(CLOCK_REALTIME, &endTime);\r
trainingTime = timlUtilDiffTime(startTime, endTime);\r
diff --git a/src/app/cnn/convert/imagenet/appCNNConvertImageNet.bin b/src/app/cnn/convert/imagenet/appCNNConvertImageNet.bin
new file mode 100755 (executable)
index 0000000..af1d85b
Binary files /dev/null and b/src/app/cnn/convert/imagenet/appCNNConvertImageNet.bin differ
index 0000000..af1d85b
Binary files /dev/null and b/src/app/cnn/convert/imagenet/appCNNConvertImageNet.bin differ
diff --git a/src/app/cnn/convert/sbd/appCNNConvertSBD.bin b/src/app/cnn/convert/sbd/appCNNConvertSBD.bin
new file mode 100755 (executable)
index 0000000..72c09bd
Binary files /dev/null and b/src/app/cnn/convert/sbd/appCNNConvertSBD.bin differ
index 0000000..72c09bd
Binary files /dev/null and b/src/app/cnn/convert/sbd/appCNNConvertSBD.bin differ
diff --git a/src/app/cnn/interop/caffe/appCNNInteropCaffe.bin b/src/app/cnn/interop/caffe/appCNNInteropCaffe.bin
new file mode 100755 (executable)
index 0000000..e2d47d7
Binary files /dev/null and b/src/app/cnn/interop/caffe/appCNNInteropCaffe.bin differ
index 0000000..e2d47d7
Binary files /dev/null and b/src/app/cnn/interop/caffe/appCNNInteropCaffe.bin differ
diff --git a/src/app/cnn/interop/caffe/appCNNInteropCaffeConvert.cpp b/src/app/cnn/interop/caffe/appCNNInteropCaffeConvert.cpp
index 60ca9b2695e15f5a520f6c343f27538978ffa1c3..59a4977ec68607bff0dba57e6ab029c73e06995d 100644 (file)
timlConvNeuralNetwork *cnn;;
int k;
- cnn = timlCNNCreateConvNeuralNetwork(timlCNNTrainingParamsDefault(), 0);
+ cnn = timlCNNCreateConvNeuralNetwork(timlCNNTrainingParamsDefault());
appCNNInteropCaffeReadProtoFromTextFile(netStructurePrototxtFileName, &netStructure);
appCNNInteropCaffeReadProtoFromBinaryFile(netParamPrototxtFileName, &netParam);
layerNum = netStructure.layers_size();
@@ -120,6 +120,10 @@ timlConvNeuralNetwork* appCNNInteropCaffeConvert(const char *netStructurePrototx
case CNN_Dropout:
appCNNInteropCaffeDropoutLayerConvert(cnn, netStructure.layers(i), netParam.layers(i + offset));
break;
+ case CNN_Softmax:
+ timlCNNAddSoftmaxLayer(cnn);
+ timlCNNSoftmaxInitialize(cnn->tail);
+ break;
default:
break;
}
diff --git a/src/app/cnn/interop/caffe/appCNNInteropCaffeLayerTypeConvert.cpp b/src/app/cnn/interop/caffe/appCNNInteropCaffeLayerTypeConvert.cpp
index de20560019eabb0177fda04f6d0b609f7e1fe899..10ef901a02f453a103db004635356b8871ddf9ef 100644 (file)
@@ -74,10 +74,12 @@ timlCNNLayerType appCNNInteropCaffeLayerTypeConvert(LayerParameter_LayerType typ
break;
case LayerParameter_LayerType_RELU:
case LayerParameter_LayerType_SIGMOID:
- case LayerParameter_LayerType_SOFTMAX:
case LayerParameter_LayerType_TANH:
return CNN_Nonlinear;
break;
+ case LayerParameter_LayerType_SOFTMAX:
+ return CNN_Softmax;
+ break;
default:
return CNN_Input;
}
diff --git a/src/app/cnn/interop/caffe/appCNNInteropCaffeNonlinearTypeConvert.cpp b/src/app/cnn/interop/caffe/appCNNInteropCaffeNonlinearTypeConvert.cpp
index abe197c2a5b5a2de14932229aa877b4e0889d8ca..8680bc472a8edba37d7d6ef0a100f1fb15995617 100644 (file)
case LayerParameter_LayerType_SIGMOID:
return Util_Sigmoid;
break;
- case LayerParameter_LayerType_SOFTMAX:
- return Util_Softmax;
- break;
case LayerParameter_LayerType_TANH:
return Util_Tanh;
break;
diff --git a/src/app/cnn/interop/caffe/appCNNInteropCaffeReadMean.cpp b/src/app/cnn/interop/caffe/appCNNInteropCaffeReadMean.cpp
index 73624b074763d00b24efbc8bb985c8d392cb955e..b32f6a7c9a7b16fe4051f69a7df34c8f55bc0cf8 100644 (file)
row = blob.height();
col = blob.width();
- timlUtilFree(layer->inputParams.mean);
- timlUtilMalloc((void**)&(layer->inputParams.mean), sizeof(float)*row*col*channel);
+ timlUtilFreeAcc(layer->inputParams.mean);
+ timlUtilMallocAcc((void**)&(layer->inputParams.mean), sizeof(float)*row*col*channel);
layer->inputParams.row = row;
layer->inputParams.col = col;
layer->inputParams.channel = channel;
index 50f6f39b5bd9946966d093a9b8bd3272b166b591..59b54e4651de8311a0f516b3bd4d53d6dc5706a4 100644 (file)
\r
int appCNNSceneSBDTesting();\r
\r
-int appCNNSceneClassify(timlConvNeuralNetwork *cnn, timlUtilImage image, int *labelMatrix, int scale);\r
+int appCNNSceneClassify(timlConvNeuralNetwork *cnn, float *image, int row, int col, int channel, int *labelMatrix, int scale);\r
\r
int appCNNSceneShuffleIdx(int *imageIdx, int *rowIdx, int *colIdx, appCNNSceneDataSet *dataSet);\r
\r
index b3ee97bc72b0a98e2228bb8fe364cbaacc73ce53..c871bf2fa35209658084adc79d04ddfce770883c 100644 (file)
*/
/******************************************************************************/
-int appCNNSceneClassify(timlConvNeuralNetwork *cnn, timlUtilImage image, int *labelMatrix, int scale)
+int appCNNSceneClassify(timlConvNeuralNetwork *cnn, float *image, int row, int col, int channel, int *labelMatrix, int scale)
{
int i;
int j;
int m;
int k;
int err;
- int numRow;
- int numCol;
- int numChannel;
float imageMean;
float imageDeviation;
int paddedRow;
@@ -84,73 +81,76 @@ int appCNNSceneClassify(timlConvNeuralNetwork *cnn, timlUtilImage image, int *la
// init
err = 0;
- numRow = image.row;
- numCol = image.col;
- numChannel = image.channel;
paddedRow = cnn->head->row;
paddedCol = cnn->head->col;
- paddedDim = paddedRow*paddedCol*numChannel;
+ paddedDim = paddedRow*paddedCol*channel;
paddedImage = malloc(sizeof(float)*paddedDim);
- resolutionLossRow = numRow/cnn->tail->row;
- resolutionLossCol = numCol/cnn->tail->col;
+ resolutionLossRow = row/cnn->tail->row;
+ resolutionLossCol = col/cnn->tail->col;
// image normalization (per image)
- for (k = 0; k < numChannel; k++) {
+ for (k = 0; k < channel; k++) {
imageMean = 0.0;
imageDeviation = 0.0;
- for (i = 0; i < numRow * numCol; i++) {
- imageMean += image.data[i + k*numRow*numCol];
+ for (i = 0; i < row * col; i++) {
+ imageMean += image[i + k*row*col];
}
- imageMean /= numRow*numCol;
- for (i = 0; i < numRow*numCol; i++) {
- image.data[i + k*numRow*numCol] -= imageMean;
+ imageMean /= row*col;
+ for (i = 0; i < row*col; i++) {
+ image[i + k*row*col] -= imageMean;
}
- for (i = 0; i < numRow * numCol; i++) {
- imageDeviation += image.data[i + k*numRow*numCol] * image.data[i + k*numRow*numCol];
+ for (i = 0; i < row * col; i++) {
+ imageDeviation += image[i + k*row*col] * image[i + k*row*col];
}
- imageDeviation /= numRow*numCol;
+ imageDeviation /= row*col;
imageDeviation = sqrtf(imageDeviation);
- for (i = 0; i < numRow*numCol; i++) {
- image.data[i + k*numRow*numCol] /= imageDeviation;
+ for (i = 0; i < row*col; i++) {
+ image[i + k*row*col] /= imageDeviation;
}
}
// main loop over each pixel on the image
for (m = -resolutionLossRow/2; m < resolutionLossRow/2; m += scale) {
for (k = -resolutionLossCol/2; k < resolutionLossCol/2; k += scale) {
- rowStart = (paddedRow - numRow)/2 - m;
- rowEnd = rowStart + numRow - 1;
- colStart = (paddedCol - numCol)/2 - k;
- colEnd = colStart + numCol - 1;
+ rowStart = (paddedRow - row)/2 - m;
+ rowEnd = rowStart + row - 1;
+ colStart = (paddedCol - col)/2 - k;
+ colEnd = colStart + col - 1;
// zero padding
for (i = 0; i < paddedRow; i++) {
for (j = 0; j < paddedCol; j++) {
if (i < rowStart || i > rowEnd || j < colStart || j > colEnd) {
- for (p = 0; p < numChannel; p++)
+ for (p = 0; p < channel; p++)
paddedImage[j + i*paddedCol + p*paddedRow*paddedCol] = 0.0;
}
else {
- for (p = 0; p < numChannel; p++)
- paddedImage[j + i*paddedCol + p*paddedRow*paddedCol] = image.data[j - colStart + (i - rowStart)*numCol + p*numRow*numCol];
+ for (p = 0; p < channel; p++)
+ paddedImage[j + i*paddedCol + p*paddedRow*paddedCol] = image[j - colStart + (i - rowStart)*col + p*row*col];
}
}
}
// cnn Forward Propagation
- err = timlCNNForwardPropagation(cnn, paddedImage, paddedDim);
+ err = timlCNNLoadImage(cnn, paddedImage, paddedRow, paddedCol, channel, 1);
+ err = timlCNNForwardPropagation(cnn);
// labeling
- appCNNSceneLabelMatrix(cnn->tail->featureMap, cnn->tail->row, cnn->tail->col, cnn->tail->channel, m, k, labelMatrix, numRow, numCol);
+ for (i = 0; i < cnn->tail->row; i++) {
+ for (j = 0; j < cnn->tail->col; j++) {
+ labelMatrix[k + resolutionLossCol/2 + j*resolutionLossCol + (m + resolutionLossRow/2 + i*resolutionLossRow)*col] = cnn->tail->accuracyParams.label[i*cnn->tail->col + j];
+ }
+ }
+
}
}
// up-sample the label matrix
- for (i = 0; i < image.row; i++) {
- for (j = 0; j < image.col; j++) {
+ for (i = 0; i < row; i++) {
+ for (j = 0; j < col; j++) {
rowDown = i/scale;
colDown = j/scale;
- labelMatrix[j + i*image.col] = labelMatrix[colDown*scale + rowDown*scale*image.col];
+ labelMatrix[j + i*col] = labelMatrix[colDown*scale + rowDown*scale*col];
}
}
diff --git a/src/app/cnn/scene/appCNNSceneClassifyOpenMP.c b/src/app/cnn/scene/appCNNSceneClassifyOpenMP.c
index cb8923cbffc88bef59ef78ce28c5edcd56f74457..a920b34ab2ec641135021033f0c0a6b0573146e9 100644 (file)
@@ -114,7 +114,7 @@ int appCNNSceneClassifyOpenMP(timlConvNeuralNetwork **cnnTeam, int teamNum, floa
}
}
- #pragma omp parallel num_threads(teamNum) private(threadIndex, m, k, i, j, p)
+ #pragma omp parallel num_threads(teamNum) private(threadIndex, m, k, i, j, p, rowStart, rowEnd, colStart, colEnd)
{
#pragma omp for collapse(2)
@@ -142,10 +142,16 @@ int appCNNSceneClassifyOpenMP(timlConvNeuralNetwork **cnnTeam, int teamNum, floa
}
// cnn Forward Propagation
- err = timlCNNForwardPropagation(cnnTeam[threadIndex], paddedImage + paddedDim*threadIndex, paddedDim);
+ err = timlCNNLoadImage(cnnTeam[threadIndex], paddedImage + paddedDim*threadIndex, paddedRow, paddedCol, channel, 1);
+ err = timlCNNForwardPropagation(cnnTeam[threadIndex]);
// labeling
- appCNNSceneLabelMatrix(cnnTeam[threadIndex]->tail->featureMap, cnnTeam[threadIndex]->tail->row, cnnTeam[threadIndex]->tail->col, cnnTeam[threadIndex]->tail->channel, m, k, labelMatrix, row, col);
+ for (i = 0; i < cnnTeam[threadIndex]->tail->row; i++) {
+ for (j = 0; j < cnnTeam[threadIndex]->tail->col; j++) {
+ labelMatrix[k + resolutionLossCol/2 + j*resolutionLossCol + (m + resolutionLossRow/2 + i*resolutionLossRow)*col] = cnnTeam[threadIndex]->tail->accuracyParams.label[i*cnnTeam[threadIndex]->tail->col + j];
+ }
+ }
+
} // end of k loop
} // end of m loop
} // end of OpenMP
index afcc5ad65783a8405e703ae29dce3daa4119ed91..3e36266d2972da63bcd37a47b12e6c84b159e0ec 100644 (file)
int i;
int j;
int k;
+ int index;
int imageRow;
int imageCol;
int numRow;
@@ -109,15 +110,20 @@ int appCNNSceneGetPatch(int imageIdx, int rowIdx, int colIdx, appCNNSceneDataSet
for (j = 0; j < patchSize; j++) {
imageRow = rowIdx - patchSize/2 + i;
imageCol = colIdx - patchSize/2 + j;
+
if (imageRow < 0 || imageRow >= numRow || imageCol < 0 || imageCol >= numCol) {
for (k = 0; k < numChannel; k++) {
- patch[j + i*patchSize + k*patchSize*patchSize] = 0.0;
+ index = j + i*patchSize + k*patchSize*patchSize;
+ patch[j + i*patchSize + k*patchSize*patchSize] = 0;
}
}
else {
- for (k = 0; k < numChannel; k++)
+ for (k = 0; k < numChannel; k++) {
+ index = j + i*patchSize + k*patchSize*patchSize;
patch[j + i*patchSize + k*patchSize*patchSize] = image.data[imageCol + imageRow*numCol + k*numRow*numCol];
+ }
}
+
}
}
diff --git a/src/app/cnn/scene/appCNNSceneSupervisedTraining.c b/src/app/cnn/scene/appCNNSceneSupervisedTraining.c
index 5c4a79fddd1d32b9f1e589be1297844421981628..8ed1e9c50f56116dd43077064545705936cfbdfc 100644 (file)
int i;
int j;
+ int index;
int err;
timlCNNLayer *bpStartLayer;
- int label;
+ int *label;
+ int labelTemp;
int iter;
int patchDim;
int *imageIdx;
@@ -69,10 +71,10 @@ int appCNNSceneSupervisedTraining(timlConvNeuralNetwork *cnn, appCNNSceneDataSet
int *colIdx;
float *patch;
int epoch;
- int batchSize;
+ int batchUpdate;
int batchNum;
- float *cost;
- float *batchCost;
+ float cost;
+ float batchCost;
int batchIndex;
// init
@@ -82,38 +84,33 @@ int appCNNSceneSupervisedTraining(timlConvNeuralNetwork *cnn, appCNNSceneDataSet
imageIdx = malloc(sizeof(int)*iter);
rowIdx = malloc(sizeof(int)*iter);
colIdx = malloc(sizeof(int)*iter);
- patch = malloc(sizeof(float)*patchDim);
+ patch = malloc(sizeof(float)*patchDim*cnn->params.batchUpdate);
+ label = malloc(sizeof(int)*cnn->params.batchUpdate);
epoch = cnn->params.epoch;
- batchSize = cnn->params.batchSize;
- batchNum = iter/batchSize;
- cost = malloc(sizeof(float)*batchSize);
- batchCost = malloc(sizeof(float)*batchNum*epoch);
+ batchUpdate = cnn->params.batchUpdate;
+ batchNum = iter/batchUpdate;
batchIndex = 0;
+ batchCost = 0;
+ index = 0;
// shuffle the training pixels
appCNNSceneShuffleIdx(imageIdx, rowIdx, colIdx, dataSet);
// training loop
- cnn->params.count = 0;
+
for (i = 0; i < epoch; i++) {
- cnn->params.count = 0;
for (j = 0; j < iter; j++) {
- label = appCNNSceneGetLabel(imageIdx[j], rowIdx[j], colIdx[j], dataSet);
- if (label != -1) {
- cnn->params.count += 1;
- appCNNSceneGetPatch(imageIdx[j], rowIdx[j], colIdx[j], dataSet, patch);
- err = timlCNNForwardPropagation(cnn, patch, patchDim);
- timlCNNCostWithLabel(cnn, label, cost + j%batchSize, &bpStartLayer);
- err = timlCNNBackPropagation(cnn, bpStartLayer);
- }
- else {
- cost[j%batchSize] = 0.0;
+ labelTemp = appCNNSceneGetLabel(imageIdx[j], rowIdx[j], colIdx[j], dataSet);
+ // make image and label batch
+ if (labelTemp != -1) {
+ label[index] = labelTemp;
+ appCNNSceneGetPatch(imageIdx[j], rowIdx[j], colIdx[j], dataSet, patch + index*patchDim);
+ index ++;
}
- if ((j + 1)%batchSize == 0) { // update parameters once each batch
- batchCost[batchIndex + i*batchNum] = timlUtilVectorSumFloat(cost, batchSize)/(double)cnn->params.count;
- timlCNNUpdateParams(cnn);
- printf("epoch = %d, batch = %d, cost = %f\n", i, batchIndex, batchCost[batchIndex + i*batchNum]);
- batchIndex += 1;
+ // call cnn training
+ if (index == cnn->params.batchUpdate) {
+ timlCNNSupervisedTrainingWithLabel(cnn, patch, dataSet->patchSize, dataSet->patchSize, dataSet->channel, label, 1, 1, cnn->params.batchUpdate);
+ index = 0;
}
}
}
@@ -121,8 +118,6 @@ int appCNNSceneSupervisedTraining(timlConvNeuralNetwork *cnn, appCNNSceneDataSet
free(imageIdx);
free(rowIdx);
free(colIdx);
- free(cost);
- free(batchCost);
return err;
diff --git a/src/app/cnn/scene/sbd/appCNNSceneSBDTesting.bin b/src/app/cnn/scene/sbd/appCNNSceneSBDTesting.bin
new file mode 100755 (executable)
index 0000000..0cb400d
Binary files /dev/null and b/src/app/cnn/scene/sbd/appCNNSceneSBDTesting.bin differ
index 0000000..0cb400d
Binary files /dev/null and b/src/app/cnn/scene/sbd/appCNNSceneSBDTesting.bin differ
diff --git a/src/app/cnn/scene/sbd/appCNNSceneSBDTesting.c b/src/app/cnn/scene/sbd/appCNNSceneSBDTesting.c
index 646e6c24a69c1f96d2bef69224c42e2ad0fa3fcd..313f52a3900cdd97ee6a76e4718a8575405abd4b 100644 (file)
******************************************************************************/
#define SCALE 4
-#define IMAGE_NUM 10
+#define IMAGE_NUM 100
#define IMAGE_ROW 240
#define IMAGE_COL 320
#define IMAGE_CHANNEL 3
// init
err = 0;
scale = SCALE;
- thread = omp_get_max_threads();
slTesting.num = IMAGE_NUM;
slTesting.row = IMAGE_ROW;
slTesting.col = IMAGE_COL;
// read CNN config
printf("1. Read cnn config\n");
- timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH, 0);
- mem = timlCNNMemory(cnn);
- printf("CNN memory = %.10f MB.\n", (float)mem/1024.0/1024.0);
-
- // create cnnTeam
- cnnTeam[0] = cnn;
- for (i = 1; i < thread; i++) {
- cnnTeam[i] = timlCNNShareParams(cnn, 0);
- }
+ timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH);
+ timlCNNAddAccuracyLayer(cnn, 1);
+ timlCNNInitialize(cnn);
+ timlCNNLoadParamsFromFile(cnn, cnn->paramsFileName);
// testing
printf("2. Start testing\n");
sprintf(str, slTesting.imageFileNameStr, i);
timlUtilReadFixedSizeJPEG(str, image, slTesting.row, slTesting.col, slTesting.channel);
clock_gettime(CLOCK_REALTIME, &startTime);
- appCNNSceneClassifyOpenMP(cnnTeam, thread, image, slTesting.row, slTesting.col, slTesting.channel, labelMatrix, scale);
+ appCNNSceneClassify(cnn, image, slTesting.row, slTesting.col, slTesting.channel, labelMatrix, scale);
clock_gettime(CLOCK_REALTIME, &endTime);
testingTime = timlUtilDiffTime(startTime, endTime);
diff --git a/src/app/cnn/scene/sbd/appCNNSceneSBDTestingOpenMP.bin b/src/app/cnn/scene/sbd/appCNNSceneSBDTestingOpenMP.bin
new file mode 100755 (executable)
index 0000000..72ad13f
Binary files /dev/null and b/src/app/cnn/scene/sbd/appCNNSceneSBDTestingOpenMP.bin differ
index 0000000..72ad13f
Binary files /dev/null and b/src/app/cnn/scene/sbd/appCNNSceneSBDTestingOpenMP.bin differ
diff --git a/src/app/cnn/scene/sbd/appCNNSceneSBDTestingOpenMP.c b/src/app/cnn/scene/sbd/appCNNSceneSBDTestingOpenMP.c
--- /dev/null
@@ -0,0 +1,177 @@
+/******************************************************************************/
+/*!
+ * \file appCNNSceneSBDTestingOpenMP.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../appCNNScene.h"
+
+
+/*******************************************************************************
+ *
+ * DEFINES
+ *
+ ******************************************************************************/
+
+#define SCALE 4
+#define IMAGE_NUM 10
+#define IMAGE_ROW 240
+#define IMAGE_COL 320
+#define IMAGE_CHANNEL 3
+#define PATCH_SIZE 133
+#define MODEL_PATH "../../../../database/model/sbd/databaseModelSBD.m"
+#define IMAGE_PATH "../../../../database/sbd/test/%03d.jpg"
+#define LABEL_PATH "../../../../database/sbd/test/%03d.txt"
+
+
+/*******************************************************************************
+ *
+ * main()
+ *
+ ******************************************************************************/
+
+int main()
+{
+ return appCNNSceneSBDTestingOpenMP();
+}
+
+
+/******************************************************************************/
+/*!
+ * \ingroup appCNNScene
+ * \brief Standford Backgournd Database Scene labeling testing example
+ */
+/******************************************************************************/
+
+int appCNNSceneSBDTestingOpenMP()
+{
+ int i;
+ int n;
+ int m;
+ int read;
+ struct timespec startTime;
+ struct timespec endTime;
+ long testingTime;
+ int err;
+ long mem;
+ int scale;
+ int *labelMatrix;
+ float *image;
+ int *trueLabelMatrix;
+ float labelAccuracy;
+ int thread;
+ timlConvNeuralNetwork **cnnTeam;
+ char str[TIML_UTIL_MAX_STR];
+ FILE *fp;
+ appCNNSceneDataSet slTesting;
+
+ // init
+ err = 0;
+ scale = SCALE;
+ thread = omp_get_max_threads();
+ slTesting.num = IMAGE_NUM;
+ slTesting.row = IMAGE_ROW;
+ slTesting.col = IMAGE_COL;
+ slTesting.channel = IMAGE_CHANNEL;
+ slTesting.patchSize = PATCH_SIZE;
+ slTesting.imageFileNameStr = IMAGE_PATH;
+ slTesting.labelFileNameStr = LABEL_PATH;
+ labelMatrix = malloc(sizeof(int)*slTesting.row*slTesting.col);
+ trueLabelMatrix = malloc(sizeof(int)*slTesting.row*slTesting.col);
+ image = malloc(sizeof(float)*slTesting.row*slTesting.col*slTesting.channel);
+ cnnTeam = malloc(sizeof(timlConvNeuralNetwork*)*thread);
+ setbuf(stdout, NULL); // do not buffer the console output
+
+ // read CNN config
+ printf("1. Read cnn config\n");
+ timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH);
+ timlCNNAddAccuracyLayer(cnn, 1);
+ timlCNNInitialize(cnn);
+ timlCNNLoadParamsFromFile(cnn, cnn->paramsFileName);
+
+ // create cnnTeam
+ cnnTeam[0] = cnn;
+ for (i = 1; i < thread; i++) {
+ cnnTeam[i] = timlCNNShareParams(cnn);
+ timlCNNInitialize(cnnTeam[i]);
+ }
+
+ // testing
+ printf("2. Start testing\n");
+ for (i = 0; i < slTesting.num; i++) {
+ printf("Read image %03d.jpg\n", i);
+ sprintf(str, slTesting.imageFileNameStr, i);
+ timlUtilReadFixedSizeJPEG(str, image, slTesting.row, slTesting.col, slTesting.channel);
+ clock_gettime(CLOCK_REALTIME, &startTime);
+ appCNNSceneClassifyOpenMP(cnnTeam, thread, image, slTesting.row, slTesting.col, slTesting.channel, labelMatrix, scale);
+ clock_gettime(CLOCK_REALTIME, &endTime);
+ testingTime = timlUtilDiffTime(startTime, endTime);
+
+ // read true label
+ sprintf(str, slTesting.labelFileNameStr, i);
+ fp = fopen(str, "rt");
+ for (n = 0; n < slTesting.row; n++) {
+ for (m = 0; m < slTesting.col; m++) {
+ read = fscanf(fp, "%d", trueLabelMatrix + n*slTesting.col + m);
+ }
+ read = fscanf(fp, "\n");
+ }
+ fclose(fp);
+
+ // calculate accuracy
+ labelAccuracy = appCNNSceneAccuracy(labelMatrix, trueLabelMatrix, slTesting.row*slTesting.col);
+ printf("Test image %03d label accuracy = %.2f %%\n", i, 100.0*labelAccuracy);
+ printf("Test image %03d time = %.3f s\n", i, testingTime/1000000.0);
+ }
+
+ // clean up
+ printf("4. Clean up");
+ for (i = 0; i < thread; i++) {
+ timlCNNDelete(cnnTeam[i]);
+ }
+ free(cnnTeam);
+ free(labelMatrix);
+ free(trueLabelMatrix);
+ free(image);
+
+ return err;
+
+}
+
diff --git a/src/app/cnn/scene/sbd/appCNNSceneSBDTraining.bin b/src/app/cnn/scene/sbd/appCNNSceneSBDTraining.bin
new file mode 100755 (executable)
index 0000000..58c1321
Binary files /dev/null and b/src/app/cnn/scene/sbd/appCNNSceneSBDTraining.bin differ
index 0000000..58c1321
Binary files /dev/null and b/src/app/cnn/scene/sbd/appCNNSceneSBDTraining.bin differ
diff --git a/src/app/cnn/scene/sbd/appCNNSceneSBDTraining.c b/src/app/cnn/scene/sbd/appCNNSceneSBDTraining.c
index 0901258d9fbbcb6d151ac599e685e55f8b5a4776..5f45a3bbc9eab6b005066e94d0b0c68e16392093 100644 (file)
// build up the CNN
printf("1. Build up the CNN\n");
- cnn = timlCNNCreateConvNeuralNetwork(timlCNNTrainingParamsDefault(), 0);
+ cnn = timlCNNCreateConvNeuralNetwork(timlCNNTrainingParamsDefault());
+ cnn->params.batchSize = 100;
+ cnn->params.batchUpdate = 100;
+ cnn->params.maxBatchSize = 100;
+ cnn->params.allocatorLevel = Util_AllocatorLevel1;
timlCNNAddInputLayer(cnn, PATCH_SIZE, PATCH_SIZE, IMAGE_CHANNEL, timlCNNInputParamsDefault());
timlCNNAddConvLayer(cnn, 6, 6, 1, 1, 25, timlCNNConvParamsDefault()); // conv layer
timlCNNAddNonlinearLayer(cnn, Util_Tanh); // tanh layer
timlCNNAddNonlinearLayer(cnn, Util_Tanh); // tanh layer
timlCNNAddPoolingLayer(cnn, 2, 2, 2, 2, CNN_MaxPooling, timlCNNPoolingParamsDefault()); // max pooling layer
timlCNNAddLinearLayer(cnn, 8, timlCNNLinearParamsDefault()); // linear layer
- timlCNNAddNonlinearLayer(cnn, Util_Softmax); // softmax layer
+ timlCNNAddSoftmaxCostLayer(cnn); // softmax cost layer
timlCNNInitialize(cnn);
timlCNNReset(cnn);
- mem = timlCNNMemory(cnn);
- printf("CNN memory = %.10f MB.\n", (float)mem/1024.0/1024.0);
+ timlCNNPrint(cnn);
// training
printf("2. Start training\n");
resizeRow = slTraining.row + (slTraining.patchSize/2)*2 - (resolutionLossRow - 1);
resizeCol = slTraining.col + (slTraining.patchSize/2)*2 - (resolutionLossCol - 1);
timlCNNResize(cnn, resizeRow, resizeCol, slTraining.channel);
- mem = timlCNNMemory(cnn);
- printf("CNN resized memory = %.10f MB.\n", (float)mem/1024.0/1024.0);
// clean up
printf("4. Clean up\n");
diff --git a/src/app/makefile b/src/app/makefile
index a914ca95a6f76afddae680aeac18546448902675..cfba1cd6a8e42f734a928d155d972a83add1d0df 100755 (executable)
--- a/src/app/makefile
+++ b/src/app/makefile
# appCNNInteropCaffe bins
$(APP_CNN_INTEROP_CAFFE_BINS): $(APP_CNN_INTEROP_CAFFE_BIN_OBJS)
- $(CPPC) $(CFLAGS) $(LD_FLAGS) -o $(APP_CNN_INTEROP_CAFFE_BINS) $^ \
+ $(CPPC) $(CFLAGS) $(LD_FLAGS) -o $(APP_CNN_INTEROP_CAFFE_BINS) $^ \
-lprotobuf \
$(C_LIB_FLAG) $(C_LIB_PATH_FLAG)
# appCNNInteropCaffe objs
$(APP_CNN_INTEROP_CAFFE_BIN_OBJS): %.o: %.cpp $(APP_CNN_INTEROP_CAFFE_HFILES)
- $(CPPC) -c $(CFLAGS) $(LD_FLAGS) -o $(LD_FLAGS) $@ $< \
+ $(CPPC) -c $(CFLAGS) $(LD_FLAGS) -o $@ $< \
$(C_INCLUDE_PATH_FLAG)
diff --git a/src/app/makefile~ b/src/app/makefile~
--- /dev/null
+++ b/src/app/makefile~
@@ -0,0 +1,182 @@
+################################################################################
+#
+# makefile
+#
+# Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the
+# distribution.
+#
+# Neither the name of Texas Instruments Incorporated nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+################################################################################
+
+UNAME_M :=$(shell uname -m)
+ifneq (,$(findstring 86, $(UNAME_M)))
+ # In a cross compile environment we are assuming that the EVM file system
+ # is located on the build host and necessary ARM libraries are installed
+ # on that file system.
+ ifneq ($(MAKECMDGOALS),clean)
+ ifeq ($(TARGET_ROOTDIR),)
+ $(error Environment variable TARGET_ROOTDIR must be defined. Set it to point at the EVM root file system)
+ endif
+ endif
+
+ # gcc ARM cross compiler will not, by default, search the host's
+ # /usr/include. Explicitly specify here to find dependent vendor headers
+CC = arm-linux-gnueabihf-gcc
+CPPC = arm-linux-gnueabihf-g++
+AR = arm-linux-gnueabihf-ar
+else
+CC = gcc
+CPPC = g++
+AR = @ar
+endif
+
+
+
+C_INCLUDE_PATH = ../common/api ../common/cnn ../common/util \
+$(TARGET_ROOTDIR)/usr/include
+C_INCLUDE_PATH_FLAG = $(foreach d, $(C_INCLUDE_PATH), -I$d)
+C_LIB = timl cblas_armplusdsp blis OpenCL stdc++ rt jpeg m
+C_LIB_FLAG = $(foreach d, $(C_LIB), -l$d)
+C_LIB_PATH = $(TARGET_ROOTDIR)/usr/lib ../../bin
+C_LIB_PATH_FLAG = $(foreach d, $(C_LIB_PATH), -L$d)
+CFLAGS += -g -O3 -fopenmp
+LD_FLAGS=-L$(TARGET_ROOTDIR)/lib -L$(TARGET_ROOTDIR)/usr/lib -Wl,-rpath-link,$(TARGET_ROOTDIR)/lib -Wl,-rpath-link,$(TARGET_ROOTDIR)/usr/lib
+
+ARFLAGS = -rcs
+RM = @rm
+RMFLAGS += -fr
+
+
+# APP CNN CLASS
+APP_CNN_CLASS_BIN_CFILES = $(shell find ./cnn/class -name "*.c")
+APP_CNN_CLASS_BIN_OBJS = $(patsubst %.c, %.o, $(APP_CNN_CLASS_BIN_CFILES))
+APP_CNN_CLASS_BINS = $(patsubst %.c, %.bin, $(APP_CNN_CLASS_BIN_CFILES))
+APP_CNN_CLASS_HFILES = $(shell find ./cnn/class -name "*.h")
+
+# APP CNN SL
+APP_CNN_SL_BIN_HFILES = $(shell find ./cnn/scene -name "*.h")
+APP_CNN_SL_BIN_CFILES = $(shell find ./cnn/scene/sbd -name "*.c")
+APP_CNN_SL_BIN_OBJS = $(patsubst %.c, %.o, $(APP_CNN_SL_BIN_CFILES))
+APP_CNN_SL_BINS = $(patsubst %.c, %.bin, $(APP_CNN_SL_BIN_CFILES))
+APP_CNN_SL_CFILES = $(shell find ./cnn/scene -name "*.c")
+APP_CNN_SL_OBJS = $(patsubst %.c, %.o, $(APP_CNN_SL_CFILES))
+APP_CNN_SL_AUX_OBJS = $(filter-out $(APP_CNN_SL_BIN_OBJS), $(APP_CNN_SL_OBJS))
+
+# APP CNN INTEROP CAFFE
+APP_CNN_INTEROP_CAFFE_BIN_CFILES = $(shell find ./cnn/interop/caffe -name "*.cpp")
+APP_CNN_INTEROP_CAFFE_BIN_OBJS = $(patsubst %.cpp, %.o, $(APP_CNN_INTEROP_CAFFE_BIN_CFILES))
+APP_CNN_INTEROP_CAFFE_BINS = ./cnn/interop/caffe/appCNNInteropCaffe.bin
+APP_CNN_INTEROP_CAFFE_HFILES = $(shell find ./cnn/interop/caffe -name "*.hpp")
+
+# APP CNN CONVERT IMAGENET
+APP_CNN_CONVERT_IMAGENET_BIN_CFILES = $(shell find ./cnn/convert/imagenet -name "*.cpp")
+APP_CNN_CONVERT_IMAGENET_BIN_OBJS = $(patsubst %.cpp, %.o, $(APP_CNN_CONVERT_IMAGENET_BIN_CFILES))
+APP_CNN_CONVERT_IMAGENET_BINS = ./cnn/convert/imagenet/appCNNConvertImageNet.bin
+APP_CNN_CONVERT_IMAGENET_HFILES = $(shell find ./cnn/convert/imagenet -name "*.hpp")
+
+# APP CNN CONVERT SBD
+APP_CNN_CONVERT_SBD_BIN_CFILES = $(shell find ./cnn/convert/sbd -name "*.cpp")
+APP_CNN_CONVERT_SBD_BIN_OBJS = $(patsubst %.cpp, %.o, $(APP_CNN_CONVERT_SBD_BIN_CFILES))
+APP_CNN_CONVERT_SBD_BINS = ./cnn/convert/sbd/appCNNConvertSBD.bin
+APP_CNN_CONVERT_SBD_HFILES = $(shell find ./cnn/convert/sbd -name "*.hpp")
+
+
+APP_CNN_BINS = \
+$(APP_CNN_CLASS_BINS) \
+$(APP_CNN_SL_BINS) \
+$(APP_CNN_CONVERT_IMAGENET_BINS) \
+$(APP_CNN_CONVERT_SBD_BINS) \
+$(APP_CNN_INTEROP_CAFFE_BINS)
+
+APP_CNN_OBJS = \
+$(APP_CNN_CLASS_BIN_OBJS) \
+$(APP_CNN_SL_OBJS) \
+$(APP_CNN_INTEROP_CAFFE_BIN_OBJS) \
+$(APP_CNN_CONVERT_IMAGENET_BIN_OBJS) \
+$(APP_CNN_CONVERT_SBD_BIN_OBJS)
+
+all: $(APP_CNN_BINS)
+
+clean:
+ $(RM) $(RMFLAGS) \
+ $(APP_CNN_OBJS) \
+ $(APP_CNN_BINS)
+
+# appCNNClass bins
+$(APP_CNN_CLASS_BINS): %.bin: %.o
+ $(CC) $(CFLAGS) $(LD_FLAGS) -o $@ $^ \
+ $(C_LIB_FLAG) $(C_LIB_PATH_FLAG)
+
+# appCNNClass objs
+$(APP_CNN_CLASS_BIN_OBJS): %.o: %.c $(APP_CNN_CLASS_HFILES)
+ $(CC) -c $(CFLAGS) $(LD_FLAGS) -o $@ $< \
+ $(C_INCLUDE_PATH_FLAG)
+
+# appCNNScene bins
+$(APP_CNN_SL_BINS): %.bin: %.o $(APP_CNN_SL_AUX_OBJS)
+ $(CC) $(CFLAGS) $(LD_FLAGS) -o $@ $^ \
+ $(C_LIB_FLAG) $(C_LIB_PATH_FLAG)
+
+# appCNNScene objs
+$(APP_CNN_SL_OBJS): %.o: %.c $(APP_CNN_SL_HFILES)
+ $(CC) -c $(CFLAGS) $(LD_FLAGS) -o $@ $< \
+ $(C_INCLUDE_PATH_FLAG)
+
+# appCNNConvertImageNet bins
+$(APP_CNN_CONVERT_IMAGENET_BINS): $(APP_CNN_CONVERT_IMAGENET_BIN_OBJS)
+ $(CPPC) $(CFLAGS) $(LD_FLAGS) -o $(APP_CNN_CONVERT_IMAGENET_BINS) $^ \
+ -lopencv_core -lopencv_highgui -lopencv_imgproc \
+ $(C_LIB_PATH_FLAG)
+
+# appCNNConvertImageNet objs
+$(APP_CNN_CONVERT_IMAGENET_BIN_OBJS): %.o: %.cpp $(APP_CNN_CONVERT_IMAGENET_HFILES)
+ $(CPPC) -c $(CFLAGS) $(LD_FLAGS) -o $@ $< \
+ $(C_INCLUDE_PATH_FLAG)
+
+# appCNNConvertSBD bins
+$(APP_CNN_CONVERT_SBD_BINS): $(APP_CNN_CONVERT_SBD_BIN_OBJS)
+ $(CPPC) $(CPPFLAGS) $(LD_FLAGS) -o $(APP_CNN_CONVERT_SBD_BINS) $^ \
+ -lopencv_core -lopencv_highgui -lopencv_imgproc \
+ $(C_LIB_PATH_FLAG)
+
+# appCNNConvertSBD objs
+$(APP_CNN_CONVERT_SBD_BIN_OBJS): %.o: %.cpp $(APP_CNN_CONVERT_SBD_HFILES)
+ $(CPPC) -c $(CFLAGS) $(LD_FLAGS) -o $@ $< \
+ $(C_INCLUDE_PATH_FLAG)
+
+# appCNNInteropCaffe bins
+$(APP_CNN_INTEROP_CAFFE_BINS): $(APP_CNN_INTEROP_CAFFE_BIN_OBJS)
+ $(CPPC) $(CFLAGS) $(LD_FLAGS) -o $(APP_CNN_INTEROP_CAFFE_BINS) $^ \
+ -lprotobuf \
+ $(C_LIB_FLAG) $(C_LIB_PATH_FLAG)
+
+# appCNNInteropCaffe objs
+$(APP_CNN_INTEROP_CAFFE_BIN_OBJS): %.o: %.cpp $(APP_CNN_INTEROP_CAFFE_HFILES)
+ $(CPPC) -c $(CFLAGS) $(LD_FLAGS) -o $(LD_FLAGS) $@ $< \
+ $(C_INCLUDE_PATH_FLAG)
diff --git a/src/benchmark/cnn/class/imagenet/benchmarkCNNClassCaffeNetTesting.c b/src/benchmark/cnn/class/imagenet/benchmarkCNNClassCaffeNetTesting.c
index fc3d8acaff61513536e81f360f0bee3712060af6..e7885107df0a87b1ba41010e7f1347f92c195066 100644 (file)
*
******************************************************************************/
-int main()
-{
- return benchmarkCNNClassCaffeNetTesting();
-}
+//int main()
+//{
+// return benchmarkCNNClassCaffeNetTesting();
+//}
/******************************************************************************/
setbuf(stdout, NULL); // do not buffer the console output
// read model
- timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH, 0);
+ timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH);
+ timlCNNReset(cnn);
timlCNNSetMode(cnn, Util_Test);
// read labels
}
// profiling
- timlCNNProfile(cnn, testImage, dim, testNum, testLabel, iter);
+ timlCNNProfile(cnn, testImage, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, testLabel, 1, 1, testNum, iter);
// clean up
timlCNNDelete(cnn);
diff --git a/src/benchmark/cnn/class/imagenet/benchmarkCNNClassVGGNetTesting.c b/src/benchmark/cnn/class/imagenet/benchmarkCNNClassVGGNetTesting.c
index 4ea529af1565603df4a857f8191973cd8c69b36e..03b3b9b65ff0a5512fc1322acf4200eab17156d0 100644 (file)
*
******************************************************************************/
-int main()
-{
-
- return benchmarkCNNClassVGGNetTesting();
-}
+//int main()
+//{
+//
+// return benchmarkCNNClassVGGNetTesting();
+//}
/******************************************************************************/
setbuf(stdout, NULL); // do not buffer the console output
// read model
- timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH, 0);
+ timlConvNeuralNetwork *cnn = timlCNNReadFromFile(MODEL_PATH);
+ timlCNNReset(cnn);
timlCNNSetMode(cnn, Util_Test);
// read labels
}
// profiling
- timlCNNProfile(cnn, testImage, dim, testNum, testLabel, iter);
+ timlCNNProfile(cnn, testImage, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, testLabel, 1, 1, testNum, iter);
// clean up
timlCNNDelete(cnn);
diff --git a/src/common/api/timl.h b/src/common/api/timl.h
index f83d08b58d0f0501689d5a4b4c3b0eabf5ac3771..3dbf111df649870d32ed3627896c62ed2bf4d7ef 100644 (file)
--- a/src/common/api/timl.h
+++ b/src/common/api/timl.h
*\r
******************************************************************************/\r
\r
-#define TIML_CPU\r
-#ifndef TIML_CPU\r
-#define TIML_ALT\r
-#endif\r
+//#define TIML_CPU_CPU\r
+#define TIML_ARM_DSP\r
+//#define TIML_ARM_ARM\r
+//#define TIML_CPU_ALT\r
\r
\r
/*******************************************************************************\r
*\r
******************************************************************************/\r
\r
-timlConvNeuralNetwork *timlCNNCreateConvNeuralNetwork(timlCNNTrainingParams params, int deviceId);\r
+timlConvNeuralNetwork *timlCNNCreateConvNeuralNetwork(timlCNNTrainingParams params);\r
\r
int timlCNNAddInputLayer(timlConvNeuralNetwork *cnn, int featureMapRow, int featureMapCol, int featureMapChannel, timlCNNInputParams params);\r
\r
@@ -115,12 +115,22 @@ int timlCNNAddLinearLayer(timlConvNeuralNetwork *cnn, int dim, timlCNNLinearPara
\r
int timlCNNAddDropoutLayer(timlConvNeuralNetwork *cnn, float prob);\r
\r
+int timlCNNAddSoftmaxLayer(timlConvNeuralNetwork *cnn);\r
+\r
+int timlCNNAddSoftmaxCostLayer(timlConvNeuralNetwork *cnn);\r
+\r
+int timlCNNAddAccuracyLayer(timlConvNeuralNetwork *cnn, int top);\r
+\r
int timlCNNInitialize(timlConvNeuralNetwork *cnn);\r
\r
int timlCNNReset(timlConvNeuralNetwork *cnn);\r
\r
int timlCNNDelete(timlConvNeuralNetwork *cnn);\r
\r
+int timlCNNDeleteLayer(timlCNNLayer *layer);\r
+\r
+int timlCNNFree(timlConvNeuralNetwork *cnn);\r
+\r
\r
/*******************************************************************************\r
*\r
*\r
******************************************************************************/\r
\r
-int timlCNNSupervisedTrainingWithLabelBatchMode(timlConvNeuralNetwork *cnn, float *data, int *label, int dim, int num);\r
-\r
-int timlCNNClassifyTopNBatchMode(timlConvNeuralNetwork *cnn, float *data, int dim, int num, int *label, float *percent, int topN);\r
+int timlCNNSupervisedTrainingWithLabel(timlConvNeuralNetwork *cnn, float *image, int row, int col, int channel, int *label, int labelRow, int labelCol, int batchUpdate);\r
\r
-int timlCNNClassifyTop1SingleMode(timlConvNeuralNetwork *cnn, float *data, int dim);\r
+int timlCNNClassifyAccuracy(timlConvNeuralNetwork *cnn, float *image, int row, int col, int channel, int *label, int labelRow, int labelCol, int num, int *success);\r
\r
int timlCNNSetMode(timlConvNeuralNetwork *cnn, timlUtilPhase phase);\r
\r
*\r
******************************************************************************/\r
\r
-timlConvNeuralNetwork* timlCNNClone(timlConvNeuralNetwork *cnn, int deviceId);\r
+timlConvNeuralNetwork* timlCNNClone(timlConvNeuralNetwork *cnn);\r
\r
-timlConvNeuralNetwork* timlCNNShareParams(timlConvNeuralNetwork *cnn, int deviceId);\r
-\r
-long timlCNNMemory(timlConvNeuralNetwork *cnn);\r
+timlConvNeuralNetwork* timlCNNShareParams(timlConvNeuralNetwork *cnn);\r
\r
long timlCNNGetParamsNum(timlConvNeuralNetwork *cnn);\r
\r
int timlCNNWriteToFile(const char * fileName, timlConvNeuralNetwork *cnn, timlUtilParamsLevel paramsLevel, const char* name, const char *floatFormat, const char *intFormat);\r
\r
-timlConvNeuralNetwork* timlCNNReadFromFile(const char * fileName, int deviceId);\r
+timlConvNeuralNetwork* timlCNNReadFromFile(const char * fileName);\r
+\r
+int timlCNNReadFromStatesMemory(timlConvNeuralNetwork *cnnCopy, timlConvNeuralNetwork *cnn);\r
+\r
+int timlCNNReadFromParamsMemory(timlConvNeuralNetwork *cnnCopy, timlConvNeuralNetwork *cnn);\r
+\r
+int timlCNNReadFromStatesBinaryFile(timlConvNeuralNetwork *cnn, const char *fileName);\r
+\r
+int timlCNNReadFromParamsBinaryFile(timlConvNeuralNetwork *cnn, const char *fileName);\r
\r
int timlCNNPrint(timlConvNeuralNetwork *cnn);\r
\r
-int timlCNNProfile(timlConvNeuralNetwork *cnn, float *data, int dim, int num, int *label, int iter);\r
+int timlCNNProfile(timlConvNeuralNetwork *cnn, float *image, int row, int col, int channel, int *label, int labelRow, int labelCol, int batchSize, int iter);\r
\r
int timlCNNResize(timlConvNeuralNetwork *cnn, int row, int col, int channel);\r
\r
int timlCNNGetLayerNum(timlConvNeuralNetwork *cnn);\r
\r
-int timlCNNResize(timlConvNeuralNetwork *cnn, int row, int col, int channel);\r
+\r
\r
//@}\r
#endif\r
index 93c22fa1f6609e29c663186164d4e769093111a2..bbdc5042cdd55150aabb8ce1e2da60e7fbec54ea 100644 (file)
--- a/src/common/cnn/timlCNN.h
+++ b/src/common/cnn/timlCNN.h
ERROR_CNN_NULL_PTR,\r
ERROR_CNN_EMPTY,\r
ERROR_CNN_READ_FILE,\r
- ERROR_CNN_CLASS\r
+ ERROR_CNN_CLASS,\r
+ ERROR_CNN_LOAD_IMAGE,\r
+ ERROR_CNN_LOAD_LABEL,\r
+ ERROR_CNN_IMAGE_DIM_MISMATCH,\r
+ ERROR_CNN_LABEL_DIM_MISMATCH\r
} timlCNNError;\r
\r
typedef enum {\r
CNN_Nonlinear,\r
CNN_Linear,\r
CNN_Norm,\r
- CNN_Dropout\r
+ CNN_Dropout,\r
+ CNN_Softmax,\r
+ CNN_SoftmaxCost,\r
+ CNN_Accuracy\r
} timlCNNLayerType;\r
\r
typedef enum {\r
*\r
******************************************************************************/\r
\r
+typedef struct {\r
+ float *max; /**< size = row*col*maxBatchSize */\r
+ float *sum; /**< size = row*col*maxBatchSize */\r
+ float *jacob; /**< Jacobian matrix */\r
+} timlCNNSoftmaxParams;\r
+\r
+typedef struct {\r
+ float *cost; /**< size = maxBatchSize*/\r
+ int *label;\r
+} timlCNNSoftmaxCostParams;\r
+\r
+typedef struct {\r
+ int *label;\r
+ int *trueLabel;\r
+ int top;\r
+ int success;\r
+} timlCNNAccuracyParams;\r
+\r
typedef struct {\r
timlCNNPoolingType type;\r
int scaleRow; /**< pooling kernel row size */\r
float *bias;\r
float *biasInc;\r
float *biasGradAccum;\r
+ float *biasMultiplier; /**< size = maxBatchSize */\r
float weightDecayFactor;\r
timlUtilInitializer weightInit;\r
timlUtilInitializer biasInit;\r
float *bias;\r
float *biasGradAccum;\r
float *biasInc;\r
- float *biasMultiplier;\r
+ float *biasMultiplier; /**< size = row*col */\r
float biasLearningFactor;\r
timlUtilInitializer biasInit;\r
bool shared; /** if this layer shares parameters from another layer */\r
} timlCNNInputParams;\r
\r
typedef struct {\r
- int *mask; /**< a mask matrix of values (0,1) */\r
- unsigned int *randomVector; /**< dropout random unsigned int vector */\r
+ unsigned int *mask; /**< a mask matrix of values (0,1) */\r
float prob; /**< dropout probability */\r
} timlCNNDropoutParams;\r
\r
int row;\r
int col;\r
int channel;\r
+ int batchSize; /*< batch size*/\r
+ int maxBatchSize; /**< max batch size */\r
float *featureMap;\r
float *delta; /**< partial derivative of the cost function with regard to each kernel */\r
timlUtilPhase phase;\r
timlCNNPoolingParams poolingParams;\r
timlCNNNonlinearParams nonlinearParams;\r
timlCNNLinearParams linearParams;\r
+ timlCNNSoftmaxParams softmaxParams;\r
+ timlCNNSoftmaxCostParams softmaxCostParams;\r
+ timlCNNAccuracyParams accuracyParams;\r
+ size_t forwardCompute;\r
+ size_t backwardCompute;\r
+ size_t forwardMemory;\r
+ size_t backwardMemory;\r
+ size_t forwardMemMov;\r
+ size_t backwardMemMov;\r
+ size_t fixedMemory;\r
+ size_t paramsMemory;\r
+ timlUtilMemPoolPos memPoolPos;\r
struct _timlCNNLayer_ *prev; /**< layers are connected with double linked list */\r
struct _timlCNNLayer_ *next;\r
struct _timlConvNeuralNetwork_ *cnn;\r
} timlCNNLayer;\r
\r
typedef struct {\r
- int count; /**< data count */\r
int batchCount; /**< batch count */\r
- int epoch; /**< how many iterations we need to run through the whole database */\r
+ int epoch; /**< how many iterations we need to run through the whole database */\r
timlUtilPhase phase;\r
timlUtilAllocatorLevel allocatorLevel;\r
- int batchSize; /**< how many samples do we process until we update the parameters */\r
+ int batchSize; /**< the batch size of the cnn */\r
+ int batchUpdate; /**< how many samples do we process until we update the parameters */\r
+ int maxBatchSize; /**< max batch size*/\r
float momentum;\r
float learningRate;\r
float weightDecay;\r
} timlCNNTrainingParams;\r
\r
typedef struct _timlConvNeuralNetwork_ {\r
- float *memPool; /**< used by allocatorLevel3 mode to store the feature maps */\r
- int memPoolSize; /**< size of the memory pool */\r
+ char *memPool; /**< used by allocatorLevel3 mode to store the feature maps */\r
+ size_t memPoolSize; /**< size of the memory pool */\r
+ size_t fixedMemory;\r
+ size_t forwardMemory;\r
+ size_t backwardMemory;\r
+ size_t forwardMemMov;\r
+ size_t backwardMemMov;\r
+ size_t paramsMemory;\r
+ size_t totalMemory;\r
int deviceId;\r
int threadId;\r
timlCNNLayer *head;\r
timlCNNLayer *tail;\r
timlCNNTrainingParams params;\r
+ char configFileName[TIML_UTIL_MAX_STR];\r
+ char paramsFileName[TIML_UTIL_MAX_STR];\r
+ char statesFileName[TIML_UTIL_MAX_STR];\r
} timlConvNeuralNetwork;\r
\r
\r
\r
int timlCNNDropoutInitialize(timlCNNLayer *layer);\r
\r
+int timlCNNSoftmaxInitialize(timlCNNLayer *layer);\r
+\r
+int timlCNNSoftmaxCostInitialize(timlCNNLayer *layer);\r
+\r
+int timlCNNAccuracyInitialize(timlCNNLayer *layer);\r
+\r
\r
/*******************************************************************************\r
*\r
*\r
******************************************************************************/\r
\r
-int timlCNNBackPropagation(timlConvNeuralNetwork *cnn, timlCNNLayer *layer);\r
+int timlCNNBackPropagation(timlConvNeuralNetwork *cnn);\r
\r
int timlCNNConvBackPropagation(timlCNNLayer *layer);\r
\r
\r
int timlCNNDropoutBackPropagation(timlCNNLayer *layer);\r
\r
-int timlCNNCostWithLabel(timlConvNeuralNetwork *cnn, int label, float *cost, timlCNNLayer **bpStartLayer);\r
+int timlCNNSoftmaxCostBackPropagation(timlCNNLayer *layer);\r
+\r
+int timlCNNSoftmaxBackPropagation(timlCNNLayer *layer);\r
\r
\r
/*******************************************************************************\r
@@ -332,9 +390,23 @@ int timlCNNCostWithLabel(timlConvNeuralNetwork *cnn, int label, float *cost, tim
*\r
******************************************************************************/\r
\r
-int timlCNNForwardPropagation(timlConvNeuralNetwork *cnn, float *data, int dim);\r
+int timlCNNLoadImage(timlConvNeuralNetwork *cnn, float *image, int row, int col, int channel, int batchSize);\r
+\r
+int timlCNNLoadLabel(timlConvNeuralNetwork *cnn, int *label, int row, int col, int batchSize);\r
\r
-int timlCNNInputForwardPropagation(timlCNNLayer *layer, float *data, int dim);\r
+int timlCNNLoadParamsFromMemory(timlConvNeuralNetwork *cnnCopy, timlConvNeuralNetwork *cnn);\r
+\r
+int timlCNNLoadStatesFromMemory(timlConvNeuralNetwork *cnnCopy, timlConvNeuralNetwork *cnn);\r
+\r
+int timlCNNLoadParamsFromBinaryFile(timlConvNeuralNetwork *cnn, const char *fileName);\r
+\r
+int timlCNNLoadStatesFromBinaryFile(timlConvNeuralNetwork *cnn, const char *fileName);\r
+\r
+\r
+\r
+int timlCNNForwardPropagation(timlConvNeuralNetwork *cnn);\r
+\r
+int timlCNNInputForwardPropagation(timlCNNLayer *layer);\r
\r
int timlCNNLinearForwardPropagation(timlCNNLayer *layer);\r
\r
\r
int timlCNNConvForwardPropagation(timlCNNLayer *layer);\r
\r
+int timlCNNSoftmaxCostForwardPropagation(timlCNNLayer *layer);\r
+\r
+int timlCNNSoftmaxForwardPropagation(timlCNNLayer *layer);\r
+\r
+int timlCNNAccuracyForwardPropagation(timlCNNLayer *layer);\r
\r
/*******************************************************************************\r
*\r
*\r
******************************************************************************/\r
\r
-int timlCNNDeleteConvLayer(timlCNNLayer *layer);\r
+int timlCNNFreeConvLayer(timlCNNLayer *layer);\r
+\r
+int timlCNNFreeInputLayer(timlCNNLayer * layer);\r
\r
-int timlCNNDeleteInputLayer(timlCNNLayer * layer);\r
+int timlCNNFreeNonlinearLayer(timlCNNLayer * layer);\r
\r
-int timlCNNDeleteNonlinearLayer(timlCNNLayer * layer);\r
+int timlCNNFreeNormLayer(timlCNNLayer *layer);\r
\r
-int timlCNNDeleteNormLayer(timlCNNLayer *layer);\r
+int timlCNNFreePoolingLayer(timlCNNLayer *layer);\r
\r
-int timlCNNDeletePoolingLayer(timlCNNLayer *layer);\r
+int timlCNNFreeLinearLayer(timlCNNLayer * layer);\r
\r
-int timlCNNDeleteLinearLayer(timlCNNLayer * layer);\r
+int timlCNNFreeDropoutLayer(timlCNNLayer *layer);\r
\r
-int timlCNNDeleteDropoutLayer(timlCNNLayer *layer);\r
+int timlCNNFreeSoftmaxCostLayer(timlCNNLayer *layer);\r
+\r
+int timlCNNFreeSoftmaxLayer(timlCNNLayer *layer);\r
+\r
+int timlCNNFreeAccuracyLayer(timlCNNLayer *layer);\r
\r
\r
/*******************************************************************************\r
\r
int timlCNNResetPoolingLayer(timlCNNLayer *layer);\r
\r
+int timlCNNResetSoftmaxCostLayer(timlCNNLayer *layer);\r
+\r
+int timlCNNResetSoftmaxLayer(timlCNNLayer *layer);\r
+\r
+int timlCNNResetAccuracyLayer(timlCNNLayer *layer);\r
\r
/*******************************************************************************\r
*\r
@@ -416,6 +504,10 @@ int timlCNNConvWriteToFile(FILE *fp1, FILE *fp2, FILE *fp3, timlCNNLayer *layer,
\r
int timlCNNNonlinearWriteToFile(FILE *fp1, FILE *fp2, FILE *fp3, timlCNNLayer *layer, timlUtilParamsLevel level, const char* name, const char *floatFormat, const char *intFormat);\r
\r
+int timlCNNSoftmaxWriteToFile(FILE *fp1, FILE *fp2, FILE *fp3, timlCNNLayer *layer, timlUtilParamsLevel level, const char* name, const char *floatFormat, const char *intFormat);\r
+\r
+int timlCNNSoftmaxCostWriteToFile(FILE *fp1, FILE *fp2, FILE *fp3, timlCNNLayer *layer, timlUtilParamsLevel level, const char* name, const char *floatFormat, const char *intFormat);\r
+\r
int timlCNNNormWriteToFile(FILE *fp1, FILE *fp2, FILE *fp3, timlCNNLayer *layer, timlUtilParamsLevel level, const char* name, const char *floatFormat, const char *intFormat);\r
\r
int timlCNNPoolingWriteToFile(FILE *fp1, FILE *fp2, FILE *fp3, timlCNNLayer *layer, timlUtilParamsLevel level, const char* name, const char *floatFormat, const char *intFormat);\r
\r
int timlCNNInputReadFromTextFile(FILE *fp, timlConvNeuralNetwork *cnn);\r
\r
-int timlCNNConvReadFromBinaryFile(FILE *fp2, FILE *fp3, timlCNNLayer *layer);\r
+int timlCNNSoftmaxReadFromTextFile(FILE *fp, timlConvNeuralNetwork *cnn);\r
+\r
+int timlCNNSoftmaxCostReadFromTextFile(FILE *fp, timlConvNeuralNetwork *cnn);\r
+\r
+\r
+int timlCNNConvReadFromParamsBinaryFile(FILE *fp2, timlCNNLayer *layer);\r
+\r
+int timlCNNConvReadFromParamsMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer);\r
+\r
+int timlCNNLinearReadFromParamsBinaryFile(FILE *fp2, timlCNNLayer *layer);\r
+\r
+int timlCNNLinearReadFromParamsMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer);\r
+\r
+int timlCNNInputReadFromParamsBinaryFile(FILE *fp2, timlCNNLayer *layer);\r
+\r
+int timlCNNInputReadFromParamsMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer);\r
+\r
+\r
+\r
+int timlCNNConvReadFromStatesBinaryFile(FILE *fp3, timlCNNLayer *layer);\r
+\r
+int timlCNNLinearReadFromStatesMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer);\r
+\r
+int timlCNNInputReadFromStatesBinaryFile(FILE *fp3, timlCNNLayer *layer);\r
+\r
+int timlCNNInputReadFromStatesMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer);\r
\r
-int timlCNNLinearReadFromBinaryFile(FILE *fp2, FILE *fp3, timlCNNLayer *layer);\r
+int timlCNNLinearReadFromStatesBinaryFile(FILE *fp3, timlCNNLayer *layer);\r
\r
-int timlCNNInputReadFromBinaryFile(FILE *fp2, FILE *fp3, timlCNNLayer *layer);\r
+int timlCNNDroputReadFromStatesBinaryFile(FILE *fp3, timlCNNLayer *layer);\r
\r
+int timlCNNDroputReadFromStatesMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer);\r
+\r
+int timlCNNPoolingReadFromStatesBinaryFile(FILE *fp3, timlCNNLayer *layer);\r
+\r
+int timlCNNPoolingReadFromStatesMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer);\r
+\r
+int timlCNNSoftmaxReadFromStatesBinaryFile(FILE *fp3, timlCNNLayer *layer);\r
+\r
+int timlCNNSoftmaxReadFromStatesMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer);\r
+\r
+int timlCNNSoftmaxCostReadFromStatesBinaryFile(FILE *fp3, timlCNNLayer *layer);\r
+\r
+int timlCNNSoftmaxCostReadFromStatesMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer);\r
+\r
+int timlCNNAccuracyReadFromStatesBinaryFile(FILE *fp3, timlCNNLayer *layer);\r
+\r
+int timlCNNAccuracyReadFromStatesMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer);\r
+\r
+int timlCNNNormReadFromStatesBinaryFile(FILE *fp3, timlCNNLayer *layer);\r
+\r
+int timlCNNNormReadFromStatesMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer);\r
\r
/*******************************************************************************\r
*\r
- * MISC FUNCTIONS\r
+ * MEMORY AND COMPUTE ESTIMATE FUNCTIONS\r
*\r
******************************************************************************/\r
\r
-int timlCNNAssignDevice(timlConvNeuralNetwork *cnn, int deviceId, int threadId);\r
+int timlCNNMemory(timlConvNeuralNetwork *cnn);\r
+\r
+int timlCNNAccurayMemory(timlCNNLayer *layer);\r
+\r
+int timlCNNAccurayCompute(timlCNNLayer *layer);\r
+\r
+int timlCNNInputMemory(timlCNNLayer *layer);\r
+\r
+int timlCNNInputCompute(timlCNNLayer *layer);\r
+\r
+int timlCNNConvMemory(timlCNNLayer *layer);\r
+\r
+int timlCNNConvCompute(timlCNNLayer *layer);\r
+\r
+int timlCNNNonlinearMemory(timlCNNLayer *layer);\r
+\r
+int timlCNNNonlinearCompute(timlCNNLayer *layer);\r
+\r
+int timlCNNLinearMemory(timlCNNLayer *layer);\r
+\r
+int timlCNNLinearCompute(timlCNNLayer *layer);\r
+\r
+int timlCNNPoolingMemory(timlCNNLayer *layer);\r
+\r
+int timlCNNPoolingCompute(timlCNNLayer *layer);\r
+\r
+int timlCNNMaxPoolingMemory(timlCNNLayer *layer);\r
+\r
+int timlCNNMaxPoolingCompute(timlCNNLayer *layer);\r
+\r
+int timlCNNMeanPoolingMemory(timlCNNLayer *layer);\r
+\r
+int timlCNNMeanPoolingCompute(timlCNNLayer *layer);\r
+\r
+int timlCNNNormMemory(timlCNNLayer *layer);\r
+\r
+int timlCNNNormCompute(timlCNNLayer *layer);\r
+\r
+int timlCNNSoftmaxMemory(timlCNNLayer *layer);\r
+\r
+int timlCNNSoftmaxCompute(timlCNNLayer *layer);\r
+\r
+int timlCNNSoftmaxCostMemory(timlCNNLayer *layer);\r
+\r
+int timlCNNSoftmaxCostCompute(timlCNNLayer *layer);\r
+\r
+/*******************************************************************************\r
+ *\r
+ * MISC FUNCTIONS\r
+ *\r
+ ******************************************************************************/\r
\r
const char* timlCNNLayerTypeStr(timlCNNLayer* layer);\r
\r
int timlCNNMemPoolSize(timlConvNeuralNetwork *cnn);\r
\r
+int timlCNNSetBatchSize(timlConvNeuralNetwork *cnn, int batchSize);\r
\r
/*******************************************************************************\r
*\r
*\r
******************************************************************************/\r
\r
-int timlCNNSupervisedTrainingWithLabelBatchModeOpenMP(timlConvNeuralNetwork *cnn, float *data, int *label, int dim, int num);\r
-\r
-int timlCNNClassifyTopNBatchModeOpenMP(timlConvNeuralNetwork *cnn, float *data, int dim, int num, int *label, float *percent, int topN);\r
-\r
-int timlCNNClassifyTopNTeamModeOpenMP(timlConvNeuralNetwork **cnnTeam, int num, float *data, int dim, int *label, float *percent, int topN);\r
+int timlCNNSupervisedTrainingWithLabelTeamModeOpenMP(timlConvNeuralNetwork **cnnTream, int teamNum, float *image, int row, int col, int channel, int *label, int labelRow, int labelCol, int batchUpdate);\r
\r
+int timlCNNClassifyAccuracyTeamModeOpenMP(timlConvNeuralNetwork **cnnTeam, int teamNum, float *image, int row, int col, int channel, int *label, int labelRow, int labelCol, int num, int *success);\r
//@}\r
\r
#endif /* TIMLCNN_H_ */\r
diff --git a/src/common/cnn/timlCNNAccuracyCompute.c b/src/common/cnn/timlCNNAccuracyCompute.c
--- /dev/null
@@ -0,0 +1,65 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNAccuracyCompute.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+/******************************************************************************/
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Calculate the compute requirement for the layer
+ * \param[in] layer CNN layer
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNAccurayCompute(timlCNNLayer *layer)
+{
+ int dataSize = sizeof(float);
+ int labelSize = sizeof(int);
+ layer->forwardCompute = 0;
+ layer->backwardCompute = 0;
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNAccuracyForwardPropagation.c b/src/common/cnn/timlCNNAccuracyForwardPropagation.c
--- /dev/null
@@ -0,0 +1,94 @@
+/******************************************************************************/\r
+/*!\r
+ * \file timlCNNAccuracyForwardPropagation.c\r
+ */\r
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/\r
+ *\r
+ * Redistribution and use in source and binary forms, with or without\r
+ * modification, are permitted provided that the following conditions\r
+ * are met:\r
+ *\r
+ * Redistributions of source code must retain the above copyright\r
+ * notice, this list of conditions and the following disclaimer.\r
+ *\r
+ * Redistributions in binary form must reproduce the above copyright\r
+ * notice, this list of conditions and the following disclaimer in the\r
+ * documentation and/or other materials provided with the\r
+ * distribution.\r
+ *\r
+ * Neither the name of Texas Instruments Incorporated nor the names of\r
+ * its contributors may be used to endorse or promote products derived\r
+ * from this software without specific prior written permission.\r
+ *\r
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+ *\r
+ ******************************************************************************/\r
+\r
+\r
+/*******************************************************************************\r
+ *\r
+ * INCLUDES\r
+ *\r
+ ******************************************************************************/\r
+\r
+#include "../api/timl.h"\r
+\r
+\r
+/******************************************************************************/\r
+/*!\r
+ * \ingroup cnn\r
+ * \brief Forward propagate form prevLayer to prevLayer->next\r
+ * \param[in] prevLayer Previous layer ptr\r
+ * \return Error code\r
+ */\r
+/******************************************************************************/\r
+\r
+int timlCNNAccuracyForwardPropagation(timlCNNLayer *prevLayer)\r
+{\r
+ timlCNNLayer *layer;\r
+ int dim;\r
+ int deviceId;\r
+ int threadId;\r
+ int r;\r
+ int c;\r
+ int b;\r
+ int offset;\r
+ int trueLabel;\r
+\r
+ // init\r
+ layer = prevLayer->next;\r
+ dim = prevLayer->channel*prevLayer->row*prevLayer->col*prevLayer->batchSize;\r
+ deviceId = prevLayer->cnn->deviceId;\r
+ threadId = prevLayer->cnn->threadId;\r
+\r
+ layer->accuracyParams.success = 0;\r
+ timlUtilMemcpy(layer->featureMap, prevLayer->featureMap, dim*sizeof(float), deviceId, threadId);\r
+\r
+\r
+ for(b = 0 ; b < layer->batchSize; b++) {\r
+ for (r = 0; r < layer->row; r++) {\r
+ for (c = 0; c < layer->col; c++) {\r
+ offset = b*layer->row*layer->col*layer->channel + r*layer->col + c;\r
+ trueLabel = layer->accuracyParams.trueLabel[b*layer->row*layer->col + r*layer->col + c];\r
+ timlUtilVectorSortIndexFloat(layer->featureMap + offset, layer->accuracyParams.label + offset, layer->channel, layer->row*layer->col, Util_Descend);\r
+ if (timlUtilHitLabel(layer->accuracyParams.label + offset, layer->accuracyParams.top, layer->row*layer->col, trueLabel)) {\r
+ layer->accuracyParams.success++;\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ return 0;\r
+\r
+\r
+}\r
diff --git a/src/common/cnn/timlCNNAccuracyInitialize.c b/src/common/cnn/timlCNNAccuracyInitialize.c
--- /dev/null
@@ -0,0 +1,68 @@
+/******************************************************************************/\r
+/*!\r
+ * \file timlCNNAccuracyInitialize.c\r
+ */\r
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/\r
+ *\r
+ * Redistribution and use in source and binary forms, with or without\r
+ * modification, are permitted provided that the following conditions\r
+ * are met:\r
+ *\r
+ * Redistributions of source code must retain the above copyright\r
+ * notice, this list of conditions and the following disclaimer.\r
+ *\r
+ * Redistributions in binary form must reproduce the above copyright\r
+ * notice, this list of conditions and the following disclaimer in the\r
+ * documentation and/or other materials provided with the\r
+ * distribution.\r
+ *\r
+ * Neither the name of Texas Instruments Incorporated nor the names of\r
+ * its contributors may be used to endorse or promote products derived\r
+ * from this software without specific prior written permission.\r
+ *\r
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+ *\r
+ ******************************************************************************/\r
+\r
+\r
+/*******************************************************************************\r
+ *\r
+ * INCLUDES\r
+ *\r
+ ******************************************************************************/\r
+\r
+#include "../api/timl.h"\r
+\r
+/******************************************************************************/\r
+/*!\r
+ * \ingroup cnn\r
+ * \brief Initialize the accuracy layer\r
+ * \param[in] layer Layer ptr\r
+ * \return Error code\r
+ */\r
+/******************************************************************************/\r
+\r
+int timlCNNAccuracyInitialize(timlCNNLayer *layer)\r
+{\r
+ timlConvNeuralNetwork *cnn = layer->cnn;\r
+ char *offset; // byte\r
+\r
+ // allocatorLevel 1, 2, 3\r
+ if (layer->allocatorLevel == Util_AllocatorLevel1 || layer->allocatorLevel == Util_AllocatorLevel2 || layer->allocatorLevel == Util_AllocatorLevel3) {\r
+ timlUtilMallocHost((void**)&layer->featureMap, sizeof(float)*layer->maxBatchSize*layer->row*layer->col*layer->channel);\r
+ timlUtilMallocHost((void**)&layer->accuracyParams.label, sizeof(int)*layer->maxBatchSize*layer->row*layer->col*layer->channel);\r
+ timlUtilMallocHost((void**)&layer->accuracyParams.trueLabel, sizeof(int)*layer->maxBatchSize*layer->row*layer->col);\r
+ }\r
+\r
+ return 0;\r
+}\r
diff --git a/src/common/cnn/timlCNNAccuracyMemory.c b/src/common/cnn/timlCNNAccuracyMemory.c
--- /dev/null
@@ -0,0 +1,75 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNAccuracyMemory.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+/******************************************************************************/
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Calculate the memory requirement for the layer
+ * \param[in] layer CNN layer
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNAccuracyMemory(timlCNNLayer *layer)
+{
+ int dataSize = sizeof(float);
+ int labelSize = sizeof(int);
+ layer->forwardMemory = 0;
+ layer->backwardMemory = 0;
+ layer->paramsMemory = 0;
+
+ // accuracy layer is special as all allocated memory is counted as paramsMemory
+
+ // feature map
+ layer->paramsMemory += dataSize*layer->maxBatchSize*layer->row*layer->col*layer->channel;
+ // label
+ layer->paramsMemory += labelSize*layer->maxBatchSize*layer->row*layer->col*layer->channel;
+ // true label
+ layer->paramsMemory += labelSize*layer->maxBatchSize*layer->row*layer->col;
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNAccuracyReadFromStatesBinaryFile.c b/src/common/cnn/timlCNNAccuracyReadFromStatesBinaryFile.c
--- /dev/null
@@ -0,0 +1,76 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNAccuracyReadFromStatesBinaryFile.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Read the accuracy layer states from a binary file
+ * \param[in] fp3 FILE ptr to the state bin file
+ * \param[in,out] layer Layer ptr
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNAccuracyReadFromStatesBinaryFile(FILE *fp3, timlCNNLayer *layer)
+{
+ int dim;
+ if(fp3 != NULL) {
+ // feature map
+ dim = layer->maxBatchSize*layer->row*layer->col*layer->channel;
+ if (layer->featureMap != NULL) {
+ timlUtilFread(layer->featureMap, sizeof(float), dim, fp3);
+ }
+ // label
+ if (layer->accuracyParams.label != NULL) {
+ timlUtilFread(layer->accuracyParams.label, sizeof(int), dim, fp3);
+ }
+ // true label
+ if (layer->accuracyParams.trueLabel != NULL) {
+ timlUtilFread(layer->accuracyParams.trueLabel, sizeof(int), layer->maxBatchSize*layer->row*layer->col, fp3);
+ }
+ }
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNAccuracyReadFromStatesMemory.c b/src/common/cnn/timlCNNAccuracyReadFromStatesMemory.c
--- /dev/null
@@ -0,0 +1,78 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNAccuracyReadFromStatesMemory.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Read the accuracy layer states from memory
+ * \param[in] layer Target layer ptr
+ * \param[out] layerCopy Copied layer ptr
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNAccuracyReadFromStatesMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer)
+{
+ int dim;
+ int deviceId;
+ int threadId;
+ deviceId = layerCopy->cnn->deviceId;
+ threadId = layerCopy->cnn->threadId;
+ // feature map
+ dim = layer->maxBatchSize*layer->row*layer->col*layer->channel;
+ if (layerCopy->featureMap != NULL && layer->featureMap != NULL) {
+ timlUtilMemcpy(layerCopy->featureMap, layer->featureMap, sizeof(float)*dim, deviceId, threadId);
+ }
+ // label
+ if (layerCopy->accuracyParams.label != NULL && layer->accuracyParams.label != NULL) {
+ timlUtilMemcpy(layerCopy->accuracyParams.label, layer->accuracyParams.label, sizeof(int)*dim, deviceId, threadId);
+ }
+ // true label
+ if (layerCopy->accuracyParams.trueLabel != NULL && layer->accuracyParams.trueLabel != NULL) {
+ timlUtilMemcpy(layerCopy->accuracyParams.trueLabel, layer->accuracyParams.trueLabel, sizeof(int)*layer->maxBatchSize*layer->row*layer->col, threadId, deviceId);
+ }
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNAccuracyReadFromTextFile.c b/src/common/cnn/timlCNNAccuracyReadFromTextFile.c
--- /dev/null
@@ -0,0 +1,100 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNAccuracyReadFromTextFile.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Read the accuracy layer from a text file
+ * \param[in] fp1 FILE ptr to the level 1 text file
+ * \param[in,out] cnn CNN
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNAccuracyReadFromTextFile(FILE *fp1, timlConvNeuralNetwork *cnn)
+{
+ int err;
+ int intBuffer;
+ int row;
+ int col;
+ int channel;
+ int top;
+ int read;
+ char str[TIML_UTIL_MAX_STR];
+
+ // init
+ err = 0;
+
+ read = fscanf(fp1, "%[^.].layer(%d).row = %d;\n", (char*)&str, &intBuffer, &row);
+ if (read != 3) {
+ timlCNNDelete(cnn);
+ return ERROR_CNN_READ_FILE;
+ }
+ read = fscanf(fp1, "%[^.].layer(%d).col = %d;\n", (char*)&str, &intBuffer, &col);
+ if (read != 3) {
+ timlCNNDelete(cnn);
+ return ERROR_CNN_READ_FILE;
+ }
+ read = fscanf(fp1, "%[^.].layer(%d).channel = %d;\n", (char*)&str, &intBuffer, &channel);
+ if (read != 3) {
+ timlCNNDelete(cnn);
+ return ERROR_CNN_READ_FILE;
+ }
+ read = fscanf(fp1, "%[^.].layer(%d).accuracyParams.top = %d;\n", (char*)&str, &intBuffer, &top);
+ if (read != 3) {
+ timlCNNDelete(cnn);
+ return ERROR_CNN_READ_FILE;
+ }
+
+ // create sturcture
+ err = timlCNNAddAccuracyLayer(cnn, top);
+ if (err) {
+ timlCNNDelete(cnn);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNAccuracyWriteToFile.c b/src/common/cnn/timlCNNAccuracyWriteToFile.c
--- /dev/null
@@ -0,0 +1,81 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNAccuracyWriteToFile.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Write the accuracy layer to file(s)
+ * \param[in,out] fp1 FILE ptr to the level 1 text file
+ * \param[in,out] fp2 FILE ptr to the level 2 bin file
+ * \param[in,out] fp3 FILE ptr to the level 3 bin file
+ * \param[in] layer Layer ptr
+ * \param[in] level Write level
+ * \param[in] name CNN name
+ * \param[in] floatFormat Format string for floats
+ * \param[in] intFormat Format string for ints
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNAccuracyWriteToFile(FILE *fp1, FILE *fp2, FILE *fp3, timlCNNLayer *layer, timlUtilParamsLevel level, const char* name, const char *floatFormat, const char *intFormat)
+{
+ int err;
+ int dim;
+
+ err = 0;
+ fprintf(fp1, "%s.layer(%d).accuracyParams.top = %d;\n", name, layer->id + 1, layer->accuracyParams.top);
+
+ if (fp3 != NULL) {
+ dim = layer->maxBatchSize*layer->row*layer->col*layer->channel;
+ //feature map
+ timlUtilFwrite(layer->featureMap, sizeof(float), dim, fp3);
+ // label
+ timlUtilFwrite(layer->accuracyParams.label, sizeof(int), dim, fp3);
+ // true label
+ timlUtilFwrite(layer->accuracyParams.trueLabel, sizeof(int), layer->maxBatchSize*layer->row*layer->col, fp3);
+ }
+ return err;
+}
diff --git a/src/common/cnn/timlCNNAddAccuracyLayer.c b/src/common/cnn/timlCNNAddAccuracyLayer.c
--- /dev/null
@@ -0,0 +1,98 @@
+/******************************************************************************/\r
+/*!\r
+ * \file timlCNNAddAccuracyLayer.c\r
+ */\r
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/\r
+ *\r
+ * Redistribution and use in source and binary forms, with or without\r
+ * modification, are permitted provided that the following conditions\r
+ * are met:\r
+ *\r
+ * Redistributions of source code must retain the above copyright\r
+ * notice, this list of conditions and the following disclaimer.\r
+ *\r
+ * Redistributions in binary form must reproduce the above copyright\r
+ * notice, this list of conditions and the following disclaimer in the\r
+ * documentation and/or other materials provided with the\r
+ * distribution.\r
+ *\r
+ * Neither the name of Texas Instruments Incorporated nor the names of\r
+ * its contributors may be used to endorse or promote products derived\r
+ * from this software without specific prior written permission.\r
+ *\r
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+ *\r
+ ******************************************************************************/\r
+\r
+\r
+/*******************************************************************************\r
+ *\r
+ * INCLUDES\r
+ *\r
+ ******************************************************************************/\r
+\r
+#include "../api/timl.h"\r
+\r
+\r
+/******************************************************************************/\r
+/*!\r
+ * \ingroup cnn\r
+ * \brief Add accuracy layer\r
+ * \param[in,out] cnn CNN\r
+ * \param[in] top top N label accuracy\r
+ * \return Error code\r
+ */\r
+/******************************************************************************/\r
+\r
+int timlCNNAddAccuracyLayer(timlConvNeuralNetwork *cnn, int top)\r
+{\r
+ timlCNNLayer *prev;\r
+ timlCNNLayer *accuracyLayer;\r
+\r
+ // error checking\r
+ if (cnn == NULL) {\r
+ return ERROR_CNN_NULL_PTR;\r
+ }\r
+ if (cnn->tail == NULL) {\r
+ return ERROR_CNN_EMPTY;\r
+ }\r
+\r
+ if (timlUtilMallocHost((void**)&accuracyLayer, sizeof(timlCNNLayer))) {\r
+ return ERROR_CNN_LAYER_ALLOCATION;\r
+ }\r
+\r
+ prev = cnn->tail;\r
+ accuracyLayer->type = CNN_Accuracy;\r
+ accuracyLayer->row = prev->row;\r
+ accuracyLayer->col = prev->col;\r
+ accuracyLayer->channel = prev->channel;\r
+ accuracyLayer->batchSize = prev->batchSize;\r
+ accuracyLayer->maxBatchSize = prev->maxBatchSize;\r
+ accuracyLayer->allocatorLevel = cnn->params.allocatorLevel;\r
+ accuracyLayer->phase = cnn->params.phase;\r
+ accuracyLayer->featureMap = NULL;\r
+ accuracyLayer->delta = NULL;\r
+ accuracyLayer->accuracyParams.label = NULL;\r
+ accuracyLayer->accuracyParams.top = top;\r
+\r
+ // link the list\r
+ accuracyLayer->cnn = cnn;\r
+ accuracyLayer->id = prev->id + 1;\r
+ accuracyLayer->prev = prev;\r
+ accuracyLayer->prev->next = accuracyLayer;\r
+ accuracyLayer->next = NULL;\r
+ cnn->tail = accuracyLayer;\r
+\r
+ return 0;\r
+\r
+}\r
index cd784ce4c34533950f86a2403a20e8b07f76d4ee..32677d69936c3b4c052deb2e4e8da09323b82557 100644 (file)
@@ -119,8 +119,7 @@ int timlCNNAddConvLayer(timlConvNeuralNetwork *cnn, int kernelRow, int kernelCol
}\r
\r
// allocate conv layer\r
- convLayer = (timlCNNLayer*) malloc(sizeof(timlCNNLayer));\r
- if (convLayer == NULL) {\r
+ if (timlUtilMallocHost((void**)&convLayer, sizeof(timlCNNLayer))) {\r
return ERROR_CNN_LAYER_ALLOCATION;\r
}\r
\r
@@ -137,12 +136,14 @@ int timlCNNAddConvLayer(timlConvNeuralNetwork *cnn, int kernelRow, int kernelCol
convLayer->convParams.biasInc = NULL;\r
convLayer->convParams.biasGradAccum = NULL;\r
convLayer->convParams.connectivity = NULL;\r
+ convLayer->convParams.biasMultiplier = NULL;\r
convLayer->convParams.inputFeatureMapChannel = prevFeatureMapChannel;\r
convLayer->convParams.outputFeatureMapChannel = featureMapChannel;\r
convLayer->convParams.kernelRow = kernelRow;\r
convLayer->convParams.kernelCol = kernelCol;\r
convLayer->convParams.strideX = strideX;\r
convLayer->convParams.strideY = strideY;\r
+ convLayer->convParams.shared = false;\r
\r
// // set inactive params\r
// convLayer->inputParams = timlCNNInputParamsDefault();\r
@@ -159,6 +160,8 @@ int timlCNNAddConvLayer(timlConvNeuralNetwork *cnn, int kernelRow, int kernelCol
convLayer->row = currFeatureMapRow;\r
convLayer->col = currFeatureMapCol;\r
convLayer->channel = featureMapChannel;\r
+ convLayer->batchSize = prev->batchSize;\r
+ convLayer->maxBatchSize = prev->maxBatchSize;\r
\r
// link the convLayer\r
convLayer->cnn = cnn;\r
index aa6460e121d1297ecb3bf2dbed99614300aeef29..3df9fb2a6262ee0768a7203bcad6275ab11ac2a8 100644 (file)
}
prev = cnn->tail;
- newLayer = (timlCNNLayer*) malloc(sizeof(timlCNNLayer));
- if (newLayer == NULL) {
+ if (timlUtilMallocHost((void**)&newLayer, sizeof(timlCNNLayer))) {
return ERROR_CNN_LAYER_ALLOCATION;
}
newLayer->row = prev->row;
newLayer->col = prev->col;
newLayer->channel = prev->channel;
+ newLayer->batchSize = prev->batchSize;
+ newLayer->maxBatchSize = prev->maxBatchSize;
newLayer->dropoutParams.prob = prob;
newLayer->dropoutParams.mask = NULL;
- newLayer->dropoutParams.randomVector = NULL;
newLayer->featureMap = NULL;
newLayer->delta = NULL;
newLayer->phase = cnn->params.phase;
index d2d2c51e3b489379f3d6dd703b7adf296747e670..c1576ce8a8d156c3f539ce02f29a86b69c5dd3be 100644 (file)
}
// setup input layer
- newLayer = malloc(sizeof(timlCNNLayer));
- if (newLayer == NULL) {
+ if (timlUtilMallocHost((void**)&newLayer, sizeof(timlCNNLayer))) {
return ERROR_CNN_LAYER_ALLOCATION;
}
newLayer->channel = featureMapChannel;
newLayer->row = featureMapRow;
newLayer->col = featureMapCol;
+ newLayer->batchSize = cnn->params.batchSize;
+ newLayer->maxBatchSize = cnn->params.maxBatchSize;
newLayer->inputParams = params;
if (newLayer->inputParams.row == -1) {
if (newLayer->inputParams.channel == -1) {
newLayer->inputParams.channel = newLayer->channel;
}
+ newLayer->inputParams.shared = false;
+ newLayer->inputParams.inputData = NULL;
newLayer->featureMap = NULL;
newLayer->delta = NULL;
index 1c49f00eb13538fd44e377d13bdfa24face193a7..fb0a2ef56d813274756b86e5b079411cfc30cdec 100644 (file)
prevDim = prev->row * prev->col * prev->channel;
// allocate linear layer
- linearLayer = (timlCNNLayer*)malloc(sizeof(timlCNNLayer));
- if (linearLayer == NULL) {
+ if (timlUtilMallocHost((void**)&linearLayer, sizeof(timlCNNLayer))) {
return ERROR_CNN_LAYER_ALLOCATION;
}
linearLayer->linearParams.bias = NULL;
linearLayer->linearParams.biasInc = NULL;
linearLayer->linearParams.biasGradAccum = NULL;
+ linearLayer->linearParams.biasMultiplier = NULL;
linearLayer->linearParams.prevDim = prevDim;
linearLayer->linearParams.dim = dim;
+ linearLayer->linearParams.shared = false;
linearLayer->type = CNN_Linear;
linearLayer->featureMap = NULL;
@@ -105,6 +106,8 @@ int timlCNNAddLinearLayer(timlConvNeuralNetwork *cnn, int dim, timlCNNLinearPara
linearLayer->row = 1;
linearLayer->col = 1;
linearLayer->channel = dim;
+ linearLayer->batchSize = prev->batchSize;
+ linearLayer->maxBatchSize = prev->maxBatchSize;
linearLayer->allocatorLevel = cnn->params.allocatorLevel;
linearLayer->phase = cnn->params.phase;
index fd58abad7c8842e0857aa5877304ba1840defbdf..9ed23a815a62c70392e3c46e7d6135b3d0d0bd06 100644 (file)
return ERROR_CNN_EMPTY;\r
}\r
\r
- nonlinearLayer = (timlCNNLayer*) malloc(sizeof(timlCNNLayer));\r
- if (nonlinearLayer == NULL) {\r
+ if (timlUtilMallocHost((void**)&nonlinearLayer, sizeof(timlCNNLayer))) {\r
return ERROR_CNN_LAYER_ALLOCATION;\r
}\r
\r
nonlinearLayer->row = prev->row;\r
nonlinearLayer->col = prev->col;\r
nonlinearLayer->channel = prev->channel;\r
+ nonlinearLayer->batchSize = prev->batchSize;\r
+ nonlinearLayer->maxBatchSize = prev->maxBatchSize;\r
nonlinearLayer->allocatorLevel = cnn->params.allocatorLevel;\r
nonlinearLayer->phase = cnn->params.phase;\r
nonlinearLayer->featureMap = NULL;\r
index ce3147a35d9d8d16e07778a4552ce16de2b0cfd3..86c370e6a7cbbc19ae3d927d5b7b9f84355f6247 100644 (file)
return ERROR_CNN_NORM_LAYER_PARAMS;\r
}\r
\r
- newLayer = (timlCNNLayer*) malloc(sizeof(timlCNNLayer));\r
- if (newLayer == NULL) {\r
+ if (timlUtilMallocHost((void**)&newLayer, sizeof(timlCNNLayer))) {\r
return ERROR_CNN_LAYER_ALLOCATION;\r
}\r
\r
newLayer->row = prev->row;\r
newLayer->col = prev->col;\r
newLayer->channel = prev->channel;\r
+ newLayer->batchSize = prev->batchSize;\r
+ newLayer->maxBatchSize = prev->maxBatchSize;\r
newLayer->normParams = params;\r
newLayer->allocatorLevel = cnn->params.allocatorLevel;\r
newLayer->phase = cnn->params.phase;\r
index 8656e49ffd41ecf5fb75e710e8a1666923d2d9bc..7ac3deef15975d71fcd58e8899e0270f7fc1e3b2 100644 (file)
return ERROR_CNN_POOLING_LAYER_STRIDE_SIZE;\r
}\r
\r
- newLayer = malloc(sizeof(timlCNNLayer));\r
- if (newLayer == NULL) {\r
+ if (timlUtilMallocHost((void**)&newLayer, sizeof(timlCNNLayer))) {\r
return ERROR_CNN_LAYER_ALLOCATION;\r
}\r
\r
@@ -118,6 +117,8 @@ int timlCNNAddPoolingLayer(timlConvNeuralNetwork *cnn, int scaleRow, int scaleCo
if (newLayer->row < 1 || newLayer->col < 1) {\r
return ERROR_CNN_FEATURE_MAP_SIZE;\r
}\r
+ newLayer->batchSize = prev->batchSize;\r
+ newLayer->maxBatchSize = prev->maxBatchSize;\r
newLayer->phase = cnn->params.phase;\r
newLayer->allocatorLevel = cnn->params.allocatorLevel;\r
newLayer->featureMap = NULL;\r
diff --git a/src/common/cnn/timlCNNAddSoftmaxCostLayer.c b/src/common/cnn/timlCNNAddSoftmaxCostLayer.c
--- /dev/null
@@ -0,0 +1,99 @@
+/******************************************************************************/\r
+/*!\r
+ * \file timlCNNAddSoftmaxCostLayer.c\r
+ */\r
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/\r
+ *\r
+ * Redistribution and use in source and binary forms, with or without\r
+ * modification, are permitted provided that the following conditions\r
+ * are met:\r
+ *\r
+ * Redistributions of source code must retain the above copyright\r
+ * notice, this list of conditions and the following disclaimer.\r
+ *\r
+ * Redistributions in binary form must reproduce the above copyright\r
+ * notice, this list of conditions and the following disclaimer in the\r
+ * documentation and/or other materials provided with the\r
+ * distribution.\r
+ *\r
+ * Neither the name of Texas Instruments Incorporated nor the names of\r
+ * its contributors may be used to endorse or promote products derived\r
+ * from this software without specific prior written permission.\r
+ *\r
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+ *\r
+ ******************************************************************************/\r
+\r
+\r
+/*******************************************************************************\r
+ *\r
+ * INCLUDES\r
+ *\r
+ ******************************************************************************/\r
+\r
+#include "../api/timl.h"\r
+\r
+/******************************************************************************/\r
+\r
+\r
+/******************************************************************************/\r
+/*!\r
+ * \ingroup cnn\r
+ * \brief Add softmax cost layer\r
+ * \param[in] cnn CNN\r
+ * \return Error code\r
+ */\r
+/******************************************************************************/\r
+\r
+int timlCNNAddSoftmaxCostLayer(timlConvNeuralNetwork *cnn)\r
+{\r
+ timlCNNLayer *prev;\r
+ timlCNNLayer *softmaxCostLayer;\r
+\r
+ // error checking\r
+ if (cnn == NULL) {\r
+ return ERROR_CNN_NULL_PTR;\r
+ }\r
+ if (cnn->tail == NULL) {\r
+ return ERROR_CNN_EMPTY;\r
+ }\r
+\r
+ if (timlUtilMallocHost((void**)&softmaxCostLayer, sizeof(timlCNNLayer))) {\r
+ return ERROR_CNN_LAYER_ALLOCATION;\r
+ }\r
+\r
+ prev = cnn->tail;\r
+ softmaxCostLayer->type = CNN_SoftmaxCost;\r
+ softmaxCostLayer->row = prev->row;\r
+ softmaxCostLayer->col = prev->col;\r
+ softmaxCostLayer->channel = prev->channel;\r
+ softmaxCostLayer->batchSize = prev->batchSize;\r
+ softmaxCostLayer->maxBatchSize = prev->maxBatchSize;\r
+ softmaxCostLayer->allocatorLevel = cnn->params.allocatorLevel;\r
+ softmaxCostLayer->phase = cnn->params.phase;\r
+ softmaxCostLayer->featureMap = NULL;\r
+ softmaxCostLayer->delta = NULL;\r
+ softmaxCostLayer->softmaxParams.max = NULL;\r
+ softmaxCostLayer->softmaxParams.sum = NULL;\r
+\r
+ // link the list\r
+ softmaxCostLayer->cnn = cnn;\r
+ softmaxCostLayer->id = prev->id + 1;\r
+ softmaxCostLayer->prev = prev;\r
+ softmaxCostLayer->prev->next = softmaxCostLayer;\r
+ softmaxCostLayer->next = NULL;\r
+ cnn->tail = softmaxCostLayer;\r
+\r
+ return 0;\r
+\r
+}\r
diff --git a/src/common/cnn/timlCNNAddSoftmaxLayer.c b/src/common/cnn/timlCNNAddSoftmaxLayer.c
--- /dev/null
@@ -0,0 +1,99 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNAddSoftmaxLayer.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+/******************************************************************************/
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Add softmax layer
+ * \param[in] cnn CNN
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNAddSoftmaxLayer(timlConvNeuralNetwork *cnn)
+{
+ timlCNNLayer *prev;
+ timlCNNLayer *softmaxLayer;
+
+ // error checking
+ if (cnn == NULL) {
+ return ERROR_CNN_NULL_PTR;
+ }
+ if (cnn->tail == NULL) {
+ return ERROR_CNN_EMPTY;
+ }
+
+ if (timlUtilMallocHost((void**)&softmaxLayer, sizeof(timlCNNLayer))) {
+ return ERROR_CNN_LAYER_ALLOCATION;
+ }
+
+ prev = cnn->tail;
+ softmaxLayer->type = CNN_Softmax;
+ softmaxLayer->row = prev->row;
+ softmaxLayer->col = prev->col;
+ softmaxLayer->channel = prev->channel;
+ softmaxLayer->batchSize = prev->batchSize;
+ softmaxLayer->maxBatchSize = prev->maxBatchSize;
+ softmaxLayer->allocatorLevel = cnn->params.allocatorLevel;
+ softmaxLayer->phase = cnn->params.phase;
+ softmaxLayer->featureMap = NULL;
+ softmaxLayer->delta = NULL;
+ softmaxLayer->softmaxParams.max = NULL;
+ softmaxLayer->softmaxParams.sum = NULL;
+ softmaxLayer->softmaxParams.jacob = NULL;
+
+ // link the list
+ softmaxLayer->cnn = cnn;
+ softmaxLayer->id = prev->id + 1;
+ softmaxLayer->prev = prev;
+ softmaxLayer->prev->next = softmaxLayer;
+ softmaxLayer->next = NULL;
+ cnn->tail = softmaxLayer;
+
+ return 0;
+
+}
index e630377d2b3ba6545f672d583e82f1e57badf608..67e56a6059961cac5f8281357a14ae87c409eb4e 100644 (file)
*/\r
/******************************************************************************/\r
\r
-int timlCNNBackPropagation(timlConvNeuralNetwork *cnn, timlCNNLayer *layer)\r
+int timlCNNBackPropagation(timlConvNeuralNetwork *cnn)\r
{\r
int err = 0;\r
+ timlCNNLayer *layer = cnn->tail;\r
// search backward until the second layer\r
- while (layer->prev != NULL) {\r
+ while (layer->type != CNN_Input) {\r
switch (layer->type) {\r
- case CNN_Input:\r
+ case CNN_SoftmaxCost:\r
+ err = timlCNNSoftmaxCostBackPropagation(layer);\r
+ if (err) return err;\r
+ break;\r
+ case CNN_Softmax:\r
+ err = timlCNNSoftmaxBackPropagation(layer);\r
+ if (err) return err;\r
break;\r
case CNN_Conv:\r
err = timlCNNConvBackPropagation(layer);\r
diff --git a/src/common/cnn/timlCNNClassifyAccuracy.c b/src/common/cnn/timlCNNClassifyAccuracy.c
--- /dev/null
@@ -0,0 +1,99 @@
+/*****************************************************************************/
+/*!
+ * \file timlCNNClassifyAccuracy.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Batch classification
+ * \param[in,out] cnn CNN
+ * \param[in] data Data batch
+ * \param[in] dataDim Data dimension
+ * \param[in] num Data number
+ * \param[out] label Label array ptr
+ * \param[out] labelDim Label dimension
+ * \param[out] success Number of successful classification
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNClassifyAccuracy(timlConvNeuralNetwork *cnn, float *image, int row, int col, int channel, int *label, int labelRow, int labelCol, int num, int *success)
+{
+ int i;
+ int batchSize;
+ int batchNum;
+ int dataDim;
+ int labelDim;
+ int err;
+
+ // error checking
+ if (cnn->head->inputParams.row != row || cnn->head->inputParams.col != col || cnn->head->inputParams.channel != channel) {
+ return ERROR_CNN_IMAGE_DIM_MISMATCH;
+ }
+ if (cnn->tail->row != labelRow || cnn->tail->col != labelCol ) {
+ return ERROR_CNN_LABEL_DIM_MISMATCH;
+ }
+ if (cnn->tail->type != CNN_Accuracy) {
+ return ERROR_CNN_CLASS;
+ }
+
+ err = 0;
+ *success = 0;
+ batchSize = cnn->params.batchSize;
+ batchNum = num/batchSize;
+ dataDim = row*col*channel;
+ labelDim = labelRow*labelCol;
+
+ for (i = 0; i < batchNum; i++) {
+ err = timlCNNLoadImage(cnn, image + i*dataDim*batchSize, row, col, channel, batchSize);
+ if (label != NULL) {
+ err = timlCNNLoadLabel(cnn, label + i*labelDim*batchSize, labelRow, labelCol, batchSize);
+ }
+ err = timlCNNForwardPropagation(cnn);
+ *success += cnn->tail->accuracyParams.success;
+ }
+
+ return err;
+}
diff --git a/src/common/cnn/timlCNNClassifyAccuracyTeamModeOpenMP.c b/src/common/cnn/timlCNNClassifyAccuracyTeamModeOpenMP.c
--- /dev/null
@@ -0,0 +1,108 @@
+/*****************************************************************************/
+/*!
+ * \file timlCNNClassifyAccuracyTeamModeOpenMP.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Batch classification
+ * \param[in,out] cnnTeam CNN team
+ * \param[in] teamNum CNN team number
+ * \param[in] data Data batch
+ * \param[in] dataDim Data dimension
+ * \param[in] num Data number
+ * \param[out] label Label array ptr
+ * \param[out] labelDim Label dimension
+ * \param[out] success Number of successful classification
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNClassifyAccuracyTeamModeOpenMP(timlConvNeuralNetwork **cnnTeam, int teamNum, float *image, int row, int col, int channel, int *label, int labelRow, int labelCol, int num, int *success)
+{
+ int i;
+ int batchSize;
+ int batchNum;
+ int err;
+ int successLocal;
+ int thread;
+ int t;
+ int dataDim;
+ int labelDim;
+ timlConvNeuralNetwork *cnn;
+
+ cnn = cnnTeam[0];
+ err = 0;
+ successLocal = 0;
+ batchSize = cnn->params.batchSize;
+ batchNum = num/batchSize;
+ dataDim = row*col*channel;
+ labelDim = labelRow*labelCol;
+
+
+ if (cnn->tail->type != CNN_Accuracy) {
+ return ERROR_CNN_CLASS;
+ }
+
+ thread = omp_get_max_threads();
+ if (thread > teamNum) { // more thread than cnn copies
+ thread = teamNum;
+ }
+
+ #pragma omp parallel num_threads(thread) private(t, i, err)
+ {
+ #pragma omp for reduction(+:successLocal)
+ for (i = 0; i < batchNum; i++) {
+ t = omp_get_thread_num(); // get thread id
+ err = timlCNNLoadImage(cnnTeam[t], image + i*dataDim*batchSize, row, col, channel, batchSize);
+ err = timlCNNLoadLabel(cnnTeam[t], label + i*labelDim*batchSize, labelRow, labelCol, batchSize);
+ err = timlCNNForwardPropagation(cnnTeam[t]);
+ successLocal += cnnTeam[t]->tail->accuracyParams.success;
+ }
+ }
+
+ *success = successLocal;
+ return err;
+}
index 70ca4eb09ca7937ac382c3bb7f0fe3b601b09ef5..29700e4cc83626b08c5358e681915962541a63b2 100644 (file)
* \ingroup cnn\r
* \brief Clone a cnn\r
* \param[in] cnn CNN to be cloned\r
- * \param[in] deviceId Device Id\r
* \return Cloned cnn\r
*/\r
/******************************************************************************/\r
\r
-timlConvNeuralNetwork *timlCNNClone(timlConvNeuralNetwork *cnn, int deviceId)\r
+timlConvNeuralNetwork *timlCNNClone(timlConvNeuralNetwork *cnn)\r
{\r
int err;\r
int dim;\r
timlCNNLayer *layer;\r
timlCNNLayer *layerCopy;\r
int threadId;\r
+ int deviceId;\r
\r
// init\r
layer = cnn->head;\r
+ deviceId = cnn->deviceId;\r
threadId = cnn->threadId;\r
- cnnCopy = timlCNNCreateConvNeuralNetwork(cnn->params, deviceId);\r
+ cnnCopy = timlCNNCreateConvNeuralNetwork(cnn->params);\r
if (cnnCopy == NULL) {\r
return NULL;\r
}\r
\r
+ cnnCopy->params = cnn->params;\r
+\r
+ // deep copy\r
+ strcpy(cnnCopy->configFileName, cnn->configFileName);\r
+ strcpy(cnnCopy->paramsFileName, cnn->paramsFileName);\r
+ strcpy(cnnCopy->statesFileName, cnn->statesFileName);\r
+\r
while (layer != NULL) {\r
switch (layer->type) {\r
case CNN_Input:\r
return NULL;\r
}\r
break;\r
- }\r
- layer = layer->next;\r
- }\r
-\r
- // allocation\r
- err = timlCNNInitialize(cnnCopy);\r
- if (err) {\r
- timlCNNDelete(cnnCopy);\r
- return NULL;\r
- }\r
-\r
- // copy parameters\r
- layerCopy = cnnCopy->head;\r
- layer = cnn->head;\r
- while (layer != NULL) {\r
- switch (layer->type) {\r
- case CNN_Input:\r
- // copy mean\r
- dim = layer->inputParams.row*layer->inputParams.col*layer->inputParams.channel;\r
- timlUtilBLASscopy(dim, layer->inputParams.mean, layerCopy->inputParams.mean, deviceId, threadId);\r
- break;\r
- case CNN_Conv:\r
- dim = layer->convParams.inputFeatureMapChannel*layer->convParams.outputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol;\r
- timlUtilBLASscopy(dim, layer->convParams.kernel, layerCopy->convParams.kernel, deviceId, threadId);\r
- timlUtilBLASscopy(layer->channel, layer->convParams.bias, layerCopy->convParams.bias, deviceId, threadId);\r
- if (layer->allocatorLevel == Util_AllocatorLevel1) { // training mode\r
- timlUtilBLASscopy(dim, layer->convParams.kernelInc, layerCopy->convParams.kernelInc, deviceId, threadId);\r
- timlUtilBLASscopy(dim, layer->convParams.kernelGradAccum, layerCopy->convParams.kernelGradAccum, deviceId, threadId);\r
- timlUtilBLASscopy(layer->channel, layer->convParams.biasInc, layerCopy->convParams.biasInc, deviceId, threadId);\r
- timlUtilBLASscopy(layer->channel, layer->convParams.biasGradAccum, layerCopy->convParams.biasGradAccum, deviceId, threadId);\r
+ case CNN_Softmax:\r
+ err = timlCNNAddSoftmaxLayer(cnnCopy);\r
+ if (err) {\r
+ timlCNNDelete(cnnCopy);\r
+ return NULL;\r
}\r
break;\r
- case CNN_Linear:\r
- dim = layer->linearParams.dim*layer->linearParams.prevDim;\r
- timlUtilBLASscopy(dim, layer->linearParams.weight, layerCopy->linearParams.weight, deviceId, threadId);\r
- timlUtilBLASscopy(layer->channel, layer->linearParams.bias, layerCopy->linearParams.bias, deviceId, threadId);\r
- if (layer->allocatorLevel == Util_AllocatorLevel1) { // training mode\r
- timlUtilBLASscopy(dim, layer->linearParams.weightInc, layerCopy->linearParams.weightInc, deviceId, threadId);\r
- timlUtilBLASscopy(dim, layer->linearParams.weightGradAccum, layerCopy->linearParams.weightGradAccum, deviceId, threadId);\r
- timlUtilBLASscopy(layer->channel, layer->linearParams.biasInc, layerCopy->linearParams.biasInc, deviceId, threadId);\r
- timlUtilBLASscopy(layer->channel, layer->linearParams.biasGradAccum, layerCopy->linearParams.biasGradAccum, deviceId, threadId);\r
+ case CNN_SoftmaxCost:\r
+ err = timlCNNAddSoftmaxCostLayer(cnnCopy);\r
+ if (err) {\r
+ timlCNNDelete(cnnCopy);\r
+ return NULL;\r
}\r
break;\r
- case CNN_Norm:\r
- break;\r
- case CNN_Pooling:\r
- break;\r
- case CNN_Nonlinear:\r
+ case CNN_Accuracy:\r
+ err = timlCNNAddAccuracyLayer(cnnCopy, layer->accuracyParams.top);\r
+ if (err) {\r
+ timlCNNDelete(cnnCopy);\r
+ return NULL;\r
+ }\r
break;\r
- case CNN_Dropout:\r
+ default:\r
break;\r
}\r
- layer = layer->next;\r
- layerCopy = layerCopy->next;\r
+ layer = layer->next;\r
}\r
-\r
return cnnCopy;\r
}\r
diff --git a/src/common/cnn/timlCNNConvBackPropagation.c b/src/common/cnn/timlCNNConvBackPropagation.c
index e5d1796c9fca8cc780de16603b371bd4108aae6b..2d9e1c316726e970e7cf186f88b67f72c28d4c16 100644 (file)
\r
int timlCNNConvBackPropagation(timlCNNLayer *layer)\r
{\r
- int j;\r
int M;\r
int K;\r
int N;\r
+ int b;\r
int err;\r
int prevRow;\r
int prevCol;\r
M = channel;\r
K = kernelRow*kernelCol*prevChannel;\r
N = row*col;\r
+ err = 0;\r
\r
// kernelGrad = delta * prevFeatureMapReshape' -- (M*N)*(N*K)\r
- #pragma omp critical\r
- {\r
- timlUtilBLASsgemm(CblasNoTrans, CblasTrans, M, K, N, 1.0, layer->delta, layer->convParams.prevFeatureMapReshape, 1.0, layer->convParams.kernelGradAccum, deviceId, threadId);\r
- timlUtilBLASsgemv(CblasNoTrans, M, N, 1.0, layer->delta, layer->convParams.biasMultiplier, 1.0, layer->convParams.biasGradAccum, deviceId, threadId);\r
+\r
+ for (b = 0; b < layer->batchSize; b++) {\r
+ timlUtilConv2ImageReshape(layer->convParams.prevFeatureMapReshape, layer->prev->featureMap + b*prevRow*prevCol*prevChannel, layer->convParams.prevFeatureMapReshapeIndex, prevChannel, prevRow*prevCol, kernelRow*kernelCol*row*col, deviceId, threadId);\r
+ #pragma omp critical\r
+ {\r
+ timlUtilBLASsgemm(CblasNoTrans, CblasTrans, M, K, N, 1.0, layer->delta + b*M*N, layer->convParams.prevFeatureMapReshape, 1.0, layer->convParams.kernelGradAccum, deviceId, threadId);\r
+ timlUtilBLASsgemv(CblasNoTrans, M, N, 1.0, layer->delta + b*M*N, layer->convParams.biasMultiplier, 1.0, layer->convParams.biasGradAccum, deviceId, threadId);\r
+ }\r
}\r
\r
// back propagate delta\r
if (layer->prev->delta != NULL) {\r
- // reset prevDelta to 0\r
- timlUtilVectorResetFloat(layer->prev->delta, prevRow*prevCol*prevChannel, 0.0, deviceId, threadId);\r
- // prevDeltaTemp = kernel' * delta -- (K*M)(M*N)\r
- timlUtilBLASsgemm(CblasTrans, CblasNoTrans, K, N, M, 1.0, layer->convParams.kernel, layer->delta, 0.0, layer->convParams.prevFeatureMapReshape, deviceId, threadId);\r
- // reshape prevDeltaTemp to prevDelta\r
- timlUtilConv2ImageReshapeBack(layer->prev->delta, layer->convParams.prevFeatureMapReshape, layer->convParams.prevFeatureMapReshapeIndex, prevChannel, prevRow*prevCol, kernelRow*kernelCol*row*col, deviceId, threadId);\r
+ for (b = 0; b < layer->batchSize; b++) {\r
+ // reset prevDelta to 0\r
+ timlUtilVectorResetFloat(layer->prev->delta + b*prevRow*prevCol*prevChannel, prevRow*prevCol*prevChannel, 0.0, deviceId, threadId);\r
+ // prevDeltaTemp = kernel' * delta -- (K*M)(M*N)\r
+ timlUtilBLASsgemm(CblasTrans, CblasNoTrans, K, N, M, 1.0, layer->convParams.kernel, layer->delta + b*M*N, 0.0, layer->convParams.prevFeatureMapReshape, deviceId, threadId);\r
+ // reshape prevDeltaTemp to prevDelta\r
+ timlUtilConv2ImageReshapeBack(layer->prev->delta + b*prevRow*prevCol*prevChannel, layer->convParams.prevFeatureMapReshape, layer->convParams.prevFeatureMapReshapeIndex, prevChannel, prevRow*prevCol, kernelRow*kernelCol*row*col, deviceId, threadId);\r
+ }\r
}\r
\r
-// double *kernelPtr;\r
-// double *kernelGradAccumPtr;\r
-// double *prevFeatureMapPtr;\r
-// double *prevDeltaPtr;\r
-// double *deltaPtr;\r
-// for (i = 0; i < prevChannel; i++){\r
-// // prevDelta[i]\r
-// prevDeltaPtr = layer->prev->delta + prevRow*prevCol*i;\r
-// // prevFeatureMap[i]\r
-// prevFeatureMapPtr = layer->prev->featureMap + prevRow*prevCol*i;\r
-// for(j = 0; j < channel; j++){\r
-// if (layer->convParams.connectivity[i*channel + j]){\r
-// // delta[j]\r
-// deltaPtr = layer->delta + row*col*j;\r
-// // kernel[i, j] =====> need to implement connectivity\r
-// kernelPtr = layer->convParams.kernel + kernelRow*kernelCol*i*channel + j*kernelRow*kernelCol;\r
-// kernelGradAccumPtr = layer->convParams.kernelGradAccum + kernelRow*kernelCol*i*channel + j*kernelRow*kernelCol;\r
-// // prevDeltaTemp = conv2(delta[j], rot180(kernel[i, j]), 'full')\r
-// timlUtilCorr2Full(deltaPtr, kernelPtr, layer->convParams.prevDeltaTemp,\r
-// row, col, kernelRow, kernelCol);\r
-// // prevDelta[i] += prevDeltaTemp\r
-// cblas_daxpy(prevRow*prevCol, 1.0, layer->convParams.prevDeltaTemp, 1, prevDeltaPtr, 1);\r
-// // kernel gradient[i, j] = conv2(rot180(prevFeatureMap[i]), delta[j], 'valid')\r
-// timlCNNKernalGradient(prevFeatureMapPtr, deltaPtr, layer->convParams.kernelTemp,\r
-// prevRow, prevCol, row, col);\r
-// // add kernel gradient[i, j] to kernel gradient accumulator\r
-// cblas_daxpy(kernelRow*kernelCol, 1.0, layer->convParams.kernelTemp, 1, kernelGradAccumPtr, 1);\r
-// }\r
-// }\r
-// }\r
-//\r
-// // bias gradient[j] = sum(delta[j]), and add to the bias gradient accumulator\r
-// for(j = 0; j < channel; j++){\r
-// layer->convParams.biasGradAccum[j] += timlUtilDoubleVectorSum(layer->delta + row*col*j, row*col);\r
-// }\r
-\r
- return 0;\r
+ return err;\r
}\r
diff --git a/src/common/cnn/timlCNNConvForwardPropagation.c b/src/common/cnn/timlCNNConvForwardPropagation.c
index 298eca054bbe31c1ff7ad0017cbad6309fe3784d..81e451b4c590a59196612671c3727fc46d4be008 100644 (file)
int kernelCol;\r
int deviceId;\r
int threadId;\r
+ int b;\r
\r
// init\r
err = 0;\r
kernelCol = layer->convParams.kernelCol;\r
deviceId = prevLayer->cnn->deviceId;\r
threadId = prevLayer->cnn->threadId;\r
+\r
M = featureMapChannel;\r
K = kernelRow*kernelCol*prevFeatureMapChannel;\r
N = featureMapRow*featureMapCol;\r
\r
- timlUtilConv2ImageReshape(layer->convParams.prevFeatureMapReshape, prevLayer->featureMap, layer->convParams.prevFeatureMapReshapeIndex, prevFeatureMapChannel, prevFeatureMapRow*prevFeatureMapCol, kernelRow*kernelCol*featureMapRow*featureMapCol, deviceId, threadId);\r
- // featureMap = kernel * prevFeatureMapReshape\r
- timlUtilBLASsgemm(CblasNoTrans, CblasNoTrans, M, N, K, 1.0, layer->convParams.kernel, layer->convParams.prevFeatureMapReshape, 0.0, layer->featureMap, deviceId, threadId);\r
- timlUtilBLASsgemm(CblasNoTrans, CblasNoTrans, M, N, 1, 1.0, layer->convParams.bias, layer->convParams.biasMultiplier, 1.0, layer->featureMap, deviceId, threadId);\r
+ char str[100];\r
+ FILE *fp;\r
\r
-// float *prevFeatureMapPtr;\r
-// float *featureMapPtr;\r
-// float *kernelPtr;\r
-//\r
-// // reshape the prev feature map\r
-// for (j = 0; j < prevFeatureMapChannel; j++){\r
-// timlUtilConv2ImageReshape(layer->convParams.prevFeatureMapReshape + j*(kernelRow*kernelCol*featureMapRow*featureMapCol),\r
-// prevLayer->featureMap + j*prevFeatureMapRow*prevFeatureMapCol, layer->convParams.prevFeatureMapReshapeIndex,\r
-// kernelRow*kernelCol*featureMapRow*featureMapCol);\r
-//\r
-// for (j = 0; j < featureMapChannel; j++){\r
-// // featureMap[j] = bias[j]\r
-// featureMapPtr = layer->featureMap + featureMapRow*featureMapCol*j;\r
-// timlUtilFloatVectorReset(featureMapPtr, featureMapCol * featureMapRow, layer->convParams.bias[j]);\r
-// for (i = 0 ; i < prevFeatureMapChannel; i++){\r
-// // prevFeatureMap[i]\r
-// prevFeatureMapPtr = prevLayer->featureMap + prevFeatureMapRow*prevFeatureMapCol*i;\r
-// // kernel[i, j]\r
-// kernelPtr = layer->convParams.kernel + kernelRow*kernelCol*i + kernelRow*kernelCol*prevFeatureMapChannel*j;\r
-// // featureMapTemp = conv2(prevFeatureMap[i], kernel[i, j], 'valid')\r
-// timlUtilConv2Valid(prevFeatureMapPtr, kernelPtr, featureMapPtr,\r
-// prevFeatureMapRow, prevFeatureMapCol, kernelRow, kernelCol);\r
-// // featureMap[j] += featureMapTemp\r
-// cblas_daxpy(featureMapRow*featureMapCol, 1.0, layer->convParams.featureMapTemp, 1, featureMapPtr, 1);\r
-// }\r
-// }\r
-//\r
-// // featureMap{i} = featureMap{i} + bias[i]\r
-// for (i = 0; i < featureMapChannel; i++){\r
-// for(j = 0; j < N; j++){\r
-// layer->featureMap[i*N + j] += layer->convParams.bias[i];\r
-// }\r
-// }\r
+ // featureMap = kernel * prevFeatureMapReshape\r
+ for (b = 0; b < layer->batchSize; b++) {\r
+ timlUtilConv2ImageReshape(layer->convParams.prevFeatureMapReshape, prevLayer->featureMap + b*prevFeatureMapRow*prevFeatureMapCol*prevFeatureMapChannel, layer->convParams.prevFeatureMapReshapeIndex, prevFeatureMapChannel, prevFeatureMapRow*prevFeatureMapCol, kernelRow*kernelCol*featureMapRow*featureMapCol, deviceId, threadId);\r
+ timlUtilBLASsgemm(CblasNoTrans, CblasNoTrans, M, N, K, 1.0, layer->convParams.kernel, layer->convParams.prevFeatureMapReshape, 0.0, layer->featureMap + b*M*N, deviceId, threadId);\r
+ timlUtilBLASsgemm(CblasNoTrans, CblasNoTrans, M, N, 1, 1.0, layer->convParams.bias, layer->convParams.biasMultiplier, 1.0, layer->featureMap + b*M*N, deviceId, threadId);\r
+ }\r
\r
return err;\r
}\r
index cc785f20e7199d88160f4182479976269e0a6ec0..5136fefc09b117814f27cd15c2c5d7c5d99c568f 100644 (file)
int deviceId;
int threadId;
int err;
-
+ char *offset;
// init
err = 0;
cnn = layer->cnn;
deviceId = layer->cnn->deviceId;
threadId = layer->cnn->threadId;
- // common to level 1, 2, 3
-
- // allocate prev feature map reshape, size = (prev->channel*kernelRow*kernelCol) * (row*col)
- if (timlUtilMalloc((void**) &(layer->convParams.prevFeatureMapReshape), sizeof(float)*layer->convParams.inputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol*layer->row*layer->col)) {
- return ERROR_CNN_LAYER_ALLOCATION;
- }
+ // params memory common to level 1, 2, 3
// allocate biasMultiplier
- if (timlUtilMalloc((void**) &(layer->convParams.biasMultiplier), sizeof(float)*layer->row*layer->col)) {
+ if (timlUtilMallocAcc((void**) &(layer->convParams.biasMultiplier), sizeof(float)*layer->row*layer->col)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
timlUtilVectorResetFloat(layer->convParams.biasMultiplier, layer->row*layer->col, 1.0, deviceId, threadId);
if (layer->convParams.shared == false) {
// allocate prevFeatureMapReshapeIndex, size = (kernelRow*kernelCol) * (row*col)
- if (timlUtilMalloc((void**) &(layer->convParams.prevFeatureMapReshapeIndex), sizeof(int)*layer->convParams.kernelRow*layer->convParams.kernelCol*layer->row*layer->col)) {
+ if (timlUtilMallocAcc((void**) &(layer->convParams.prevFeatureMapReshapeIndex), sizeof(int)*layer->convParams.kernelRow*layer->convParams.kernelCol*layer->row*layer->col)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
- timlUtilConv2ImageReshapeIndex(layer->convParams.prevFeatureMapReshapeIndex, prev->row, prev->col, layer->convParams.kernelRow, layer->convParams.kernelCol, layer->convParams.padUp, layer->convParams.padDown, layer->convParams.padLeft, layer->convParams.padRight, layer->convParams.strideX, layer->convParams.strideY, layer->convParams.type);
+ timlUtilConv2ImageReshapeIndex(layer->convParams.prevFeatureMapReshapeIndex, prev->row, prev->col, layer->convParams.kernelRow, layer->convParams.kernelCol, layer->convParams.padUp, layer->convParams.padDown, layer->convParams.padLeft, layer->convParams.padRight, layer->convParams.strideX, layer->convParams.strideY, layer->convParams.type, deviceId, threadId);
// allocate kernel
dim = layer->convParams.inputFeatureMapChannel*layer->convParams.outputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol;
- if (timlUtilMalloc((void**) &(layer->convParams.kernel), sizeof(float) * dim)) {
+ if (timlUtilMallocAcc((void**) &(layer->convParams.kernel), sizeof(float)*dim)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
// allocate bias
- if (timlUtilMalloc((void**) &(layer->convParams.bias), sizeof(float) * layer->channel)) {
+ if (timlUtilMallocAcc((void**) &(layer->convParams.bias), sizeof(float)*layer->channel)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
-
- // allocate connectivity
-// layer->convParams.connectivity =
-// malloc(sizeof(int) * layer->convParams.inputFeatureMapChannel * layer->convParams.outputFeatureMapChannel);
-// if (layer->convParams.connectivity == NULL) {
-// return ERROR_CNN_LAYER_ALLOCATION;
-// }
}
- // level 1 only
+ // level 1
if (layer->allocatorLevel == Util_AllocatorLevel1) {
// allocate feature map
- if (timlUtilMalloc((void**) &(layer->featureMap), sizeof(float)*layer->row*layer->col*layer->channel)) {
+ if (timlUtilMallocAcc((void**) &(layer->featureMap), sizeof(float)*layer->maxBatchSize*layer->row*layer->col*layer->channel)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
// allocate feature map delta
- if (timlUtilMalloc((void**) &(layer->delta), sizeof(float)*layer->row*layer->col*layer->channel)) {
+ if (timlUtilMallocAcc((void**) &(layer->delta), sizeof(float)*layer->maxBatchSize*layer->row*layer->col*layer->channel)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
- dim = layer->convParams.inputFeatureMapChannel*layer->convParams.outputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol;
+ // allocate prev feature map reshape, size = (prev->channel*kernelRow*kernelCol) * (row*col)
+ if (timlUtilMallocAcc((void**) &(layer->convParams.prevFeatureMapReshape), sizeof(float)*layer->convParams.inputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol*layer->row*layer->col)) {
+ return ERROR_CNN_LAYER_ALLOCATION;
+ }
+ // if not shared
if (layer->convParams.shared == false) {
+
// allocate kernelInc
- if (timlUtilMalloc((void**) &(layer->convParams.kernelInc), sizeof(float)*dim)) {
+ if (timlUtilMallocAcc((void**) &(layer->convParams.kernelInc), sizeof(float)*dim)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
// allocate kernelGradAccum
- if (timlUtilMalloc((void**) &(layer->convParams.kernelGradAccum), sizeof(float)*dim)) {
+ if (timlUtilMallocAcc((void**) &(layer->convParams.kernelGradAccum), sizeof(float)*dim)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
// allocate biasGradAccum
- if (timlUtilMalloc((void**) &(layer->convParams.biasGradAccum), sizeof(float)*layer->channel)) {
+ if (timlUtilMallocAcc((void**) &(layer->convParams.biasGradAccum), sizeof(float)*layer->channel)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
// allocate biasInc
- if (timlUtilMalloc((void**) &(layer->convParams.biasInc), sizeof(float)*layer->channel)) {
+ if (timlUtilMallocAcc((void**) &(layer->convParams.biasInc), sizeof(float)*layer->channel)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
}
}
+
// level 2 only
if (layer->allocatorLevel == Util_AllocatorLevel2) {
// allocate feature map
- if (timlUtilMalloc((void**) &(layer->featureMap), sizeof(float)*layer->row*layer->col*layer->channel)) {
- return ERROR_CNN_LAYER_ALLOCATION;
- }
+ if (timlUtilMallocAcc((void**) &(layer->featureMap), sizeof(float)*layer->maxBatchSize*layer->row*layer->col*layer->channel)) {
+ return ERROR_CNN_LAYER_ALLOCATION;
+ }
+
+ // allocate prev feature map reshape, size = (prev->channel*kernelRow*kernelCol) * (row*col)
+ if (timlUtilMallocAcc((void**) &(layer->convParams.prevFeatureMapReshape), sizeof(float)*layer->convParams.inputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol*layer->row*layer->col)) {
+ return ERROR_CNN_LAYER_ALLOCATION;
+ }
}
// level 3 only
if (layer->allocatorLevel == Util_AllocatorLevel3) {
- if (layer->id % 2 == 0) { // layer 2, 4, 6 8, ... allocate at the back end
- layer->featureMap = cnn->memPool + cnn->memPoolSize - layer->channel*layer->row*layer->col;
+
+ if (layer->prev->memPoolPos == Util_MemPoolTop) {
+ offset = cnn->memPool + cnn->memPoolSize - layer->forwardMemory;
+ layer->memPoolPos = Util_MemPoolBottom;
+ layer->convParams.prevFeatureMapReshape = offset;
+ offset += sizeof(float)*layer->convParams.inputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol*layer->row*layer->col;
+ layer->featureMap = offset;
}
- else { // layer 1, 3, 5, ... allocate at the front end
- layer->featureMap = cnn->memPool;
+ else { // allocate at the top
+ offset = cnn->memPool;
+ layer->memPoolPos = Util_MemPoolTop;
+ layer->featureMap = offset;
+ offset += sizeof(float)*layer->maxBatchSize*layer->row*layer->col*layer->channel;
+ layer->convParams.prevFeatureMapReshape = offset;
}
}
- return 0;
+ return err;
}
diff --git a/src/common/cnn/timlCNNConvMemory.c b/src/common/cnn/timlCNNConvMemory.c
--- /dev/null
@@ -0,0 +1,92 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNConvMemory.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+/******************************************************************************/
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Calculate the memory requirement for the layer
+ * \param[in] layer CNN layer
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNConvMemory(timlCNNLayer *layer)
+{
+ int dataSize = sizeof(float);
+ layer->forwardMemory = 0;
+ layer->backwardMemory = 0;
+ layer->paramsMemory = 0;
+
+ // feature map
+ layer->forwardMemory += dataSize*layer->maxBatchSize*layer->row*layer->col*layer->channel;
+ // reshape matrix
+ layer->forwardMemory += dataSize*layer->convParams.inputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol*layer->row*layer->col;
+ // delta
+ layer->backwardMemory += dataSize*layer->maxBatchSize*layer->row*layer->col*layer->channel;
+
+ if (layer->convParams.shared == false) {
+ // bias multiplier
+ layer->paramsMemory += dataSize*layer->row*layer->col;
+ // reshape index matrix
+ layer->paramsMemory += dataSize*layer->convParams.kernelRow*layer->convParams.kernelCol*layer->row*layer->col;
+
+ if (layer->allocatorLevel == Util_AllocatorLevel1) {
+ // kernel
+ layer->paramsMemory += 3*dataSize*layer->convParams.inputFeatureMapChannel*layer->convParams.outputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol;
+ // bias
+ layer->paramsMemory += 3*dataSize*layer->channel;
+ }
+ else {
+ // kernel
+ layer->paramsMemory += dataSize*layer->convParams.inputFeatureMapChannel*layer->convParams.outputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol;
+ // bias
+ layer->paramsMemory += dataSize*layer->channel;
+ }
+ }
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNConvReadFromParamsBinaryFile.c b/src/common/cnn/timlCNNConvReadFromParamsBinaryFile.c
--- /dev/null
@@ -0,0 +1,72 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNConvReadFromParamsBinaryFile.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Read the conv layer parameters from binary files
+ * \param[in] fp2 FILE ptr to the level 2 parameter bin file
+ * \param[in,out] layer Conv layer
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNConvReadFromParamsBinaryFile(FILE *fp2, timlCNNLayer *layer)
+{
+ int dim;
+ // read params
+ if (fp2 != NULL ) {
+ dim = layer->convParams.kernelRow*layer->convParams.kernelCol*layer->convParams.inputFeatureMapChannel*layer->convParams.outputFeatureMapChannel;
+ if (layer->convParams.kernel != NULL) {
+ timlUtilFread(layer->convParams.kernel, sizeof(float), dim, fp2);
+ }
+ if (layer->convParams.bias != NULL) {
+ timlUtilFread(layer->convParams.bias, sizeof(float), layer->channel, fp2);
+ }
+ }
+
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNConvReadFromParamsMemory.c b/src/common/cnn/timlCNNConvReadFromParamsMemory.c
--- /dev/null
@@ -0,0 +1,74 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNConvReadFromParamsMemory.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Read the conv layer parameters from memory
+ * \param[out] layerCopy Copied layer
+ * \param[in] layer Target layer
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNConvReadFromParamsMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer)
+{
+ int dim;
+ int deviceId;
+ int threadId;
+ deviceId = layerCopy->cnn->deviceId;
+ threadId = layerCopy->cnn->threadId;
+ // read params
+ dim = layer->convParams.kernelRow*layer->convParams.kernelCol*layer->convParams.inputFeatureMapChannel*layer->convParams.outputFeatureMapChannel;
+ if (layer->convParams.kernel != NULL && layerCopy->convParams.kernel != NULL) {
+ timlUtilMemcpy(layerCopy->convParams.kernel, layer->convParams.kernel, sizeof(float)*dim, deviceId, threadId);
+ }
+ if(layer->convParams.bias != NULL && layerCopy->convParams.bias != NULL) {
+ timlUtilMemcpy(layerCopy->convParams.bias, layer->convParams.bias, sizeof(float)*layer->channel, deviceId, threadId);
+ }
+
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNConvReadFromStatesBinaryFile.c b/src/common/cnn/timlCNNConvReadFromStatesBinaryFile.c
--- /dev/null
@@ -0,0 +1,91 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNConvReadFromStatesBinaryFile.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Read the conv layer states from a binary file
+ * \param[in] fp3 FILE ptr to the state bin file
+ * \param[in,out] layer Layer ptr
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNConvReadFromStatesBinaryFile(FILE *fp3, timlCNNLayer *layer)
+{
+ int dim;
+ if(fp3 != NULL) {
+ // feature map
+ dim = layer->maxBatchSize*layer->row*layer->col*layer->channel;
+ if (layer->featureMap != NULL) {
+ timlUtilFread(layer->featureMap, sizeof(float), dim, fp3);
+ }
+ if (layer->convParams.prevFeatureMapReshape != NULL) {
+ timlUtilFread(layer->convParams.prevFeatureMapReshape, sizeof(float), layer->convParams.inputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol*layer->row*layer->col, fp3);
+ }
+ // level 1 only
+ if (layer->allocatorLevel == Util_AllocatorLevel1) {
+ // delta
+ timlUtilFread(layer->delta, sizeof(float), dim, fp3);
+ dim = layer->convParams.kernelRow*layer->convParams.kernelCol*layer->convParams.inputFeatureMapChannel*layer->convParams.outputFeatureMapChannel;
+ // kernel Inc/GradAccum
+ if (layer->convParams.kernelInc != NULL) {
+ timlUtilFread(layer->convParams.kernelInc, sizeof(float), dim, fp3);
+ }
+ if (layer->convParams.kernelGradAccum != NULL) {
+ timlUtilFread(layer->convParams.kernelGradAccum, sizeof(float), dim, fp3);
+ }
+ // bias Inc/GradAccum
+ if (layer->convParams.biasInc != NULL) {
+ timlUtilFread(layer->convParams.biasInc, sizeof(float), layer->channel, fp3);
+ }
+ if (layer->convParams.biasGradAccum != NULL) {
+ timlUtilFread(layer->convParams.biasGradAccum, sizeof(float), layer->channel, fp3);
+ }
+ }
+ }
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNConvReadFromStatesMemory.c b/src/common/cnn/timlCNNConvReadFromStatesMemory.c
--- /dev/null
@@ -0,0 +1,97 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNConvReadFromStatesMemory.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Read the conv layer states from memory
+ * \param[out] layerCopy Copied layer
+ * \param[in] layer Target layer
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNConvReadFromStatesMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer)
+{
+ int dim;
+ int deviceId;
+ int threadId;
+ deviceId = layerCopy->cnn->deviceId;
+ threadId = layerCopy->cnn->threadId;
+
+ // feature map
+ dim = layer->maxBatchSize*layer->row*layer->col*layer->channel;
+ if (layer->featureMap != NULL && layerCopy->featureMap != NULL) {
+ timlUtilMemcpy(layerCopy->featureMap, layer->featureMap, sizeof(float)*dim, deviceId, threadId);
+ }
+ if (layer->convParams.prevFeatureMapReshape != NULL && layerCopy->convParams.prevFeatureMapReshape != NULL) {
+ timlUtilMemcpy(layerCopy->convParams.prevFeatureMapReshape, layer->convParams.prevFeatureMapReshape, sizeof(float)*layer->convParams.inputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol*layer->row*layer->col, deviceId, threadId);
+ }
+ // level 1 only
+ if (layer->allocatorLevel == Util_AllocatorLevel1) {
+ // delta
+ if (layer->delta != NULL && layerCopy->delta != NULL) {
+ timlUtilMemcpy(layerCopy->delta, layer->delta, sizeof(float)*dim, deviceId, threadId);
+ }
+ dim = layer->convParams.kernelRow*layer->convParams.kernelCol*layer->convParams.inputFeatureMapChannel*layer->convParams.outputFeatureMapChannel;
+ // kernel Inc/GradAccum
+ if (layer->convParams.kernelInc != NULL && layerCopy->convParams.kernelInc != NULL) {
+ timlUtilMemcpy(layerCopy->convParams.kernelInc, layer->convParams.kernelInc, sizeof(float)*dim, deviceId, threadId);
+ }
+ if (layer->convParams.kernelGradAccum != NULL && layerCopy->convParams.kernelGradAccum != NULL) {
+ timlUtilMemcpy(layerCopy->convParams.kernelGradAccum, layer->convParams.kernelGradAccum, sizeof(float)*dim, deviceId, threadId);
+ }
+ // bias Inc/GradAccum
+ if (layer->convParams.biasInc != NULL && layerCopy->convParams.biasInc != NULL) {
+ timlUtilMemcpy(layerCopy->convParams.biasInc, layer->convParams.biasInc, sizeof(float)*layer->channel, deviceId, threadId);
+ }
+ if (layer->convParams.biasGradAccum != NULL && layerCopy->convParams.biasGradAccum != NULL) {
+ timlUtilMemcpy(layerCopy->convParams.biasGradAccum, layer->convParams.biasGradAccum, sizeof(float)*layer->channel, deviceId, threadId);
+ }
+ }
+
+ return 0;
+}
index 5ce6bfb0869f1e89c3f81f13bd00298dbc574737..466b0500e281da3423fe44339bbc893a5e38720c 100644 (file)
int timlCNNConvShareParams(timlConvNeuralNetwork *cnnShare, timlCNNLayer *layer)
{
timlCNNLayer *layerCopy;
- int i;
- int dim;
int err;
err = timlCNNAddConvLayer(cnnShare, layer->convParams.kernelRow, layer->convParams.kernelCol, layer->convParams.strideX, layer->convParams.strideY, layer->channel, layer->convParams);
}
layerCopy = cnnShare->tail;
layerCopy->convParams.shared = true;
- layerCopy->convParams.connectivity = layer->convParams.connectivity;
+ layerCopy->convParams.biasMultiplier = layer->convParams.biasMultiplier;
layerCopy->convParams.prevFeatureMapReshapeIndex = layer->convParams.prevFeatureMapReshapeIndex;
layerCopy->convParams.bias = layer->convParams.bias;
layerCopy->convParams.biasInc = layer->convParams.biasInc;
index dd5acd7eedb824c86116b1615594d18b1a134c1b..fb1eb692b0e2d8d7321ef3dc5a23a3836439da9f 100644 (file)
int currFeatureMapChannel;\r
int kernelRow;\r
int kernelCol;\r
- int count;\r
+ int batchUpdate;\r
float kernelLearningRate;\r
float biasLearningRate;\r
float kernelDecay;\r
currFeatureMapChannel = layer->convParams.outputFeatureMapChannel;\r
kernelRow = layer->convParams.kernelRow;\r
kernelCol = layer->convParams.kernelCol;\r
- count = cnn->params.count;\r
+ batchUpdate = cnn->params.batchUpdate;\r
kernelLearningRate = layer->convParams.kernelLearningFactor * cnn->params.learningRate;\r
biasLearningRate = layer->convParams.biasLearningFactor * cnn->params.learningRate;\r
kernelDecay = layer->convParams.kernelDecayFactor * cnn->params.weightDecay;\r
deviceId = cnn->deviceId;\r
threadId = cnn->threadId;\r
\r
- // kernelGradAccum = kernelGradAccum / count\r
- timlUtilBLASsscal(kernelRow*kernelCol*prevFeatureMapChannel*currFeatureMapChannel, 1.0/count, layer->convParams.kernelGradAccum, deviceId, threadId);\r
+ // kernelGradAccum = kernelGradAccum/batchUpdate\r
+ timlUtilBLASsscal(kernelRow*kernelCol*prevFeatureMapChannel*currFeatureMapChannel, 1.0/batchUpdate, layer->convParams.kernelGradAccum, deviceId, threadId);\r
// kernelGradAccum = kernelGradAccum + kernelDecay * kernel\r
timlUtilBLASsaxpy(kernelRow*kernelCol*prevFeatureMapChannel*currFeatureMapChannel, kernelDecay, layer->convParams.kernel, layer->convParams.kernelGradAccum, deviceId, threadId);\r
// kernelGradAccum = kernelGradAccum * learningRate\r
// reset kernelGradAccum\r
timlUtilVectorResetFloat(layer->convParams.kernelGradAccum, kernelRow*kernelCol*prevFeatureMapChannel*currFeatureMapChannel, 0.0, deviceId, threadId);\r
\r
- // biasGradAccum = learningRate * biasGradAccum / count\r
- timlUtilBLASsscal(currFeatureMapChannel, biasLearningRate / count, layer->convParams.biasGradAccum, deviceId, threadId);\r
+ // biasGradAccum = learningRate * biasGradAccum/batchUpdate\r
+ timlUtilBLASsscal(currFeatureMapChannel, biasLearningRate/batchUpdate, layer->convParams.biasGradAccum, deviceId, threadId);\r
// biasInc = biasInc * momentum\r
timlUtilBLASsscal(currFeatureMapChannel, cnn->params.momentum, layer->convParams.biasInc, deviceId, threadId);\r
// biasInc = biasInc + biasGradAccum\r
index 4093998d680c756250f3f7bc7484b3b608e98f6b..e7af5fe5465913e0ff84fa0fe54a2b82ea2a1053 100644 (file)
@@ -97,10 +97,15 @@ int timlCNNConvWriteToFile(FILE *fp1, FILE *fp2, FILE *fp3, timlCNNLayer *layer,
}\r
\r
if (fp3 != NULL) {\r
- timlUtilFwrite(layer->convParams.kernelInc, sizeof(float), dim, fp3);\r
- timlUtilFwrite(layer->convParams.kernelGradAccum, sizeof(float), dim, fp3);\r
- timlUtilFwrite(layer->convParams.biasInc, sizeof(float), layer->channel, fp3);\r
- timlUtilFwrite(layer->convParams.biasGradAccum, sizeof(float), layer->channel, fp3);\r
+ timlUtilFwrite(layer->featureMap, sizeof(float), layer->maxBatchSize*layer->row*layer->col*layer->channel, fp3);\r
+ timlUtilFwrite(layer->convParams.prevFeatureMapReshape, sizeof(float), layer->convParams.inputFeatureMapChannel*layer->convParams.kernelRow*layer->convParams.kernelCol*layer->row*layer->col, fp3);\r
+ if (layer->allocatorLevel == Util_AllocatorLevel1) {\r
+ timlUtilFwrite(layer->delta, sizeof(float), layer->maxBatchSize*layer->row*layer->col*layer->channel, fp3);\r
+ timlUtilFwrite(layer->convParams.kernelInc, sizeof(float), dim, fp3);\r
+ timlUtilFwrite(layer->convParams.kernelGradAccum, sizeof(float), dim, fp3);\r
+ timlUtilFwrite(layer->convParams.biasInc, sizeof(float), layer->channel, fp3);\r
+ timlUtilFwrite(layer->convParams.biasGradAccum, sizeof(float), layer->channel, fp3);\r
+ }\r
}\r
\r
return err;\r
diff --git a/src/common/cnn/timlCNNCreateConvNeuralNetwork.c b/src/common/cnn/timlCNNCreateConvNeuralNetwork.c
index a98658796930e1807c407cfca38969ddc7e70840..e9759fc6b1238884a9b51e1bd0809b09dbc73326 100644 (file)
*/\r
/******************************************************************************/\r
\r
-timlConvNeuralNetwork *timlCNNCreateConvNeuralNetwork(timlCNNTrainingParams params, int deviceId)\r
+timlConvNeuralNetwork *timlCNNCreateConvNeuralNetwork(timlCNNTrainingParams params)\r
{\r
timlConvNeuralNetwork *cnn;\r
- cnn = malloc(sizeof(timlConvNeuralNetwork));\r
- if (cnn == NULL) {\r
+ timlUtilDeviceSetup();\r
+ if (timlUtilMallocHost((timlConvNeuralNetwork**)&cnn, sizeof(timlConvNeuralNetwork))) {\r
return NULL;\r
}\r
- timlCNNAssignDevice(cnn, deviceId, -1);\r
+ cnn->deviceId = 0;\r
+ cnn->threadId = -1;\r
cnn->params = params;\r
cnn->memPool = NULL;\r
cnn->memPoolSize = 0;\r
index cc36da59b479c459c3161968249a23cd025484b2..21750d3616ea042fbab8e0b94702dbd265b3a979 100644 (file)
{\r
int err;\r
timlCNNLayer *layer;\r
- timlCNNLayer *next;\r
+ timlCNNLayer *nextLayer;\r
\r
err = 0;\r
// error checking\r
if (cnn == NULL) {\r
return ERROR_CNN_NULL_PTR;\r
}\r
- if (cnn->params.allocatorLevel == Util_AllocatorLevel3) {\r
- free(cnn->memPool);\r
- }\r
+\r
+ timlCNNFree(cnn);\r
\r
layer = cnn->head;\r
- // delete the rest of the layers\r
while (layer != NULL) {\r
- next = layer->next;\r
- switch (layer->type) {\r
- case CNN_Input:\r
- timlCNNDeleteInputLayer(layer);\r
- break;\r
- case CNN_Conv:\r
- timlCNNDeleteConvLayer(layer);\r
- break;\r
- case CNN_Norm:\r
- timlCNNDeleteNormLayer(layer);\r
- break;\r
- case CNN_Pooling:\r
- timlCNNDeletePoolingLayer(layer);\r
- break;\r
- case CNN_Linear:\r
- timlCNNDeleteLinearLayer(layer);\r
- break;\r
- case CNN_Nonlinear:\r
- timlCNNDeleteNonlinearLayer(layer);\r
- break;\r
- case CNN_Dropout:\r
- timlCNNDeleteDropoutLayer(layer);\r
- break;\r
- }\r
- layer = next;\r
+ nextLayer = layer->next;\r
+ timlUtilFreeHost(layer);\r
+ layer = nextLayer;\r
}\r
- free(cnn);\r
+\r
+ timlUtilFreeHost(cnn);\r
\r
return err;\r
}\r
diff --git a/src/common/cnn/timlCNNDeleteLayer.c b/src/common/cnn/timlCNNDeleteLayer.c
--- /dev/null
@@ -0,0 +1,115 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNDeleteLayer.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Delete a layer
+ * \param[in] layer Layer ptr
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNDeleteLayer(timlCNNLayer *layer)
+{
+ int err;
+
+ if (layer->prev != NULL && layer->next != NULL) {
+ layer->prev->next = layer->next;
+ layer->next->prev = layer->prev;
+ }
+
+ // layer at the head
+ if (layer->prev == NULL && layer->next != NULL) {
+ layer->next->prev = NULL;
+ layer->cnn->head = layer->next;
+ }
+
+ // layer at the tail
+ if (layer->next == NULL && layer->prev != NULL) {
+ layer->prev->next = NULL;
+ layer->cnn->tail = layer->prev;
+ }
+
+ // delete the layer
+ switch(layer->type) {
+ case CNN_Input:
+ timlCNNFreeInputLayer(layer);
+ break;
+ case CNN_Conv:
+ timlCNNFreeConvLayer(layer);
+ break;
+ case CNN_Norm:
+ timlCNNFreeNormLayer(layer);
+ break;
+ case CNN_Pooling:
+ timlCNNFreePoolingLayer(layer);
+ break;
+ case CNN_Linear:
+ timlCNNFreeLinearLayer(layer);
+ break;
+ case CNN_Nonlinear:
+ timlCNNFreeNonlinearLayer(layer);
+ break;
+ case CNN_Dropout:
+ timlCNNFreeDropoutLayer(layer);
+ break;
+ case CNN_Softmax:
+ timlCNNFreeSoftmaxLayer(layer);
+ break;
+ case CNN_SoftmaxCost:
+ timlCNNFreeSoftmaxCostLayer(layer);
+ break;
+ case CNN_Accuracy:
+ timlCNNFreeAccuracyLayer(layer);
+ break;
+ default:
+ break;
+ }
+ timlUtilFreeHost(layer);
+
+ return err;
+}
diff --git a/src/common/cnn/timlCNNDropoutBackPropagation.c b/src/common/cnn/timlCNNDropoutBackPropagation.c
index 8b869394590bb32cc35443949ba78cdd854f94f9..8769ca9595c693018c5ff5f4a4e4960957a5002d 100644 (file)
int deviceId;
int threadId;
- prevDim = layer->prev->row * layer->prev->col * layer->prev->channel;
+ prevDim = layer->prev->row*layer->prev->col*layer->prev->channel*layer->prev->batchSize;
deviceId = layer->cnn->deviceId;
threadId = layer->cnn->threadId;
diff --git a/src/common/cnn/timlCNNDropoutForwardPropagation.c b/src/common/cnn/timlCNNDropoutForwardPropagation.c
index 13a117aa2901839f6a9c833bcce4a6ff52ef0da3..6d6f5792cb37e8c390e6122a1974ae08ef8afdfc 100644 (file)
// init
layer = prevLayer->next;
- dim = prevLayer->channel*prevLayer->row*prevLayer->col;
+ dim = prevLayer->channel*prevLayer->row*prevLayer->col*prevLayer->batchSize;
prob = layer->dropoutParams.prob;
deviceId = prevLayer->cnn->deviceId;
threadId = prevLayer->cnn->threadId;
- if (layer->phase == Util_Train) {
- timlUtilMasking(prevLayer->featureMap, layer->featureMap, layer->dropoutParams.mask, layer->dropoutParams.randomVector, dim, prob, deviceId, threadId);
+ if (prevLayer->allocatorLevel != Util_AllocatorLevel3) {
+ timlUtilMemcpy(layer->featureMap, prevLayer->featureMap, sizeof(float)*layer->row*layer->col*layer->channel*layer->batchSize, deviceId, threadId);
}
- else { // test phase
- timlUtilBLASscopy(dim, prevLayer->featureMap, layer->featureMap, deviceId, threadId);
+ if (layer->phase == Util_Train) {
+ timlUtilMasking(layer->featureMap, layer->dropoutParams.mask, dim, prob, deviceId, threadId);
}
-
return 0;
}
index 8b80e913e7eb21a9f3c392b126833b993451517b..fcf58cbd8d616f7aa59ae9a466084eb8f2a35b0d 100644 (file)
timlConvNeuralNetwork *cnn = layer->cnn;
if (layer->allocatorLevel == Util_AllocatorLevel1) {
// allocate feature map
- if (timlUtilMalloc((void**) &(layer->featureMap), sizeof(float) * layer->row * layer->col * layer->channel)) {
+ if (timlUtilMallocAcc((void**) &(layer->featureMap), sizeof(float)*layer->maxBatchSize*layer->row*layer->col*layer->channel)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
// allocate delta
- if (timlUtilMalloc((void**) &(layer->delta), sizeof(float) * layer->row * layer->col * layer->channel)) {
+ if (timlUtilMallocAcc((void**) &(layer->delta), sizeof(float)*layer->maxBatchSize*layer->row*layer->col*layer->channel)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
// allocate mask
- if (timlUtilMalloc((void**) &(layer->dropoutParams.mask), sizeof(int) * layer->row * layer->col * layer->channel)) {
- return ERROR_CNN_LAYER_ALLOCATION;
- }
-
- // allocate random vector
- if (timlUtilMalloc((void**) &(layer->dropoutParams.randomVector), sizeof(unsigned int) * layer->row * layer->col * layer->channel)) {
+ if (timlUtilMallocAcc((void**) &(layer->dropoutParams.mask), sizeof(unsigned int)*layer->maxBatchSize*layer->row*layer->col*layer->channel)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
}
if (layer->allocatorLevel == Util_AllocatorLevel2) {
// allocate feature map
- if (timlUtilMalloc((void**) &(layer->featureMap), sizeof(float) * layer->row * layer->col * layer->channel)) {
+ if (timlUtilMallocAcc((void**) &(layer->featureMap), sizeof(float)*layer->maxBatchSize*layer->row*layer->col*layer->channel)) {
return ERROR_CNN_LAYER_ALLOCATION;
}
}
if (layer->allocatorLevel == Util_AllocatorLevel3) {
- if (layer->id%2 == 0) { // layer 2, 4, 6 8, ... allocate at the back end
- layer->featureMap = cnn->memPool + cnn->memPoolSize - layer->channel*layer->row*layer->col;
- }
- else { // layer 1, 3, 5, ... allocate at the front end
- layer->featureMap = cnn->memPool;
- }
+ layer->featureMap = layer->prev->featureMap;
+ layer->memPoolPos = layer->prev->memPoolPos;
}
return 0;
diff --git a/src/common/cnn/timlCNNDropoutMemory.c b/src/common/cnn/timlCNNDropoutMemory.c
--- /dev/null
@@ -0,0 +1,73 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNDropoutMemory.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+/******************************************************************************/
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Calculate the memory requirement for the layer
+ * \param[in] layer CNN layer
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNDropoutMemory(timlCNNLayer *layer)
+{
+ int dataSize = sizeof(float);
+ int maskSize = sizeof(unsigned int);
+ layer->forwardMemory = 0;
+ layer->backwardMemory = 0;
+ layer->paramsMemory = 0;
+ //feature map
+ layer->forwardMemory += dataSize*layer->maxBatchSize*layer->row*layer->col*layer->channel;
+ // delta
+ layer->backwardMemory += dataSize*layer->maxBatchSize*layer->row*layer->col*layer->channel;
+ // mask
+ layer->backwardMemory += maskSize*layer->maxBatchSize*layer->row*layer->col*layer->channel;
+
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNDropoutReadFromStatesBinaryFile.c b/src/common/cnn/timlCNNDropoutReadFromStatesBinaryFile.c
--- /dev/null
@@ -0,0 +1,79 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNDropoutReadFromStatesBinaryFile.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Read the dropout layer states from a binary file
+ * \param[in] fp3 FILE ptr to the state bin file
+ * \param[in,out] layer Layer ptr
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNDropoutReadFromStatesBinaryFile(FILE *fp3, timlCNNLayer *layer)
+{
+ if (fp3 == NULL) {
+ return 0;
+ }
+ // feature map
+ if (layer->featureMap != NULL) {
+ timlUtilFread(layer->featureMap , sizeof(float), layer->maxBatchSize*layer->row*layer->col*layer->channel, fp3);
+ }
+
+ if (layer->allocatorLevel == Util_AllocatorLevel1) {
+ // delta
+ if (layer->delta != NULL) {
+ timlUtilFread(layer->delta, sizeof(float), layer->maxBatchSize*layer->row*layer->col*layer->channel, fp3);
+ }
+ // mask
+ if (layer->dropoutParams.mask != NULL) {
+ timlUtilFread(layer->dropoutParams.mask, sizeof(unsigned int), layer->maxBatchSize*layer->row*layer->col*layer->channel, fp3);
+ }
+ }
+
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNDropoutReadFromStatesMemory.c b/src/common/cnn/timlCNNDropoutReadFromStatesMemory.c
--- /dev/null
@@ -0,0 +1,78 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNDropoutReadFromMemory.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Read the dropout layer states from a binary file
+ * \param[out] layerCopy Copied layer
+ * \param[in] layer Target layer
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNDropoutReadFromStatesMemory(timlCNNLayer *layerCopy, timlCNNLayer *layer)
+{
+ int deviceId = layerCopy->cnn->deviceId;
+ int threadId = layerCopy->cnn->threadId;
+ // feature map
+ if (layer->featureMap != NULL && layerCopy->featureMap != NULL) {
+ timlUtilMemcpy(layerCopy->featureMap, layer->featureMap , sizeof(float)*layer->maxBatchSize*layer->row*layer->col*layer->channel, deviceId, threadId);
+ }
+
+ if (layer->allocatorLevel == Util_AllocatorLevel1) {
+ // delta
+ if (layer->delta != NULL && layerCopy->delta != NULL) {
+ timlUtilMemcpy(layerCopy->delta, layer->delta, sizeof(float)*layer->maxBatchSize*layer->row*layer->col*layer->channel, deviceId, threadId);
+ }
+ // mask
+ if (layer->dropoutParams.mask != NULL && layerCopy->dropoutParams.mask != NULL) {
+ timlUtilMemcpy(layerCopy->dropoutParams.mask, layer->dropoutParams.mask, sizeof(unsigned int)*layer->maxBatchSize*layer->row*layer->col*layer->channel, deviceId, threadId);
+ }
+ }
+
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNDropoutWriteToFile.c b/src/common/cnn/timlCNNDropoutWriteToFile.c
index aae085d15604b37d5b1606526461d87dcf867dd7..ead0fd2573ea989d8f50f1b91b8fff46839b1e20 100644 (file)
fprintf(fp1, floatFormat, layer->dropoutParams.prob);
fprintf(fp1, "\n");
+ if (fp3 != NULL) {
+ // feature map
+ timlUtilFwrite(layer->featureMap , sizeof(float), layer->maxBatchSize*layer->row*layer->col*layer->channel, fp3);
+ if (layer->allocatorLevel == Util_AllocatorLevel1) {
+ // delta
+ timlUtilFwrite(layer->delta, sizeof(float), layer->maxBatchSize*layer->row*layer->col*layer->channel, fp3);
+ // mask
+ timlUtilFwrite(layer->dropoutParams.mask, sizeof(unsigned int), layer->maxBatchSize*layer->row*layer->col*layer->channel, fp3);
+ }
+ }
return 0;
}
diff --git a/src/common/cnn/timlCNNForwardPropagation.c b/src/common/cnn/timlCNNForwardPropagation.c
index 78e0a52d90bb56eddbc769c42ffecf1af49d676a..0b256d7723ef2dc5d83a8c24b2b86fbaa3b95f54 100644 (file)
\r
#include "../api/timl.h"\r
\r
-\r
/******************************************************************************/\r
/*!\r
* \ingroup cnn\r
*/\r
/******************************************************************************/\r
\r
-int timlCNNForwardPropagation(timlConvNeuralNetwork *cnn, float *data, int dim)\r
+int timlCNNForwardPropagation(timlConvNeuralNetwork *cnn)\r
{\r
int err;\r
timlCNNLayer *layer;\r
+ char str[100];\r
+ FILE *fp;\r
\r
err = 0;\r
if (cnn == NULL) {\r
return ERROR_CNN_NULL_PTR;\r
}\r
layer = cnn->head;\r
- timlCNNInputForwardPropagation(layer, data, dim);\r
+ err = timlCNNInputForwardPropagation(layer);\r
+\r
+\r
+\r
while (layer->next != NULL) {\r
+// sprintf(str, "gpu_level_%d_layer_%d_fm", layer->allocatorLevel, layer->id);\r
+// fp = fopen(str, "w");\r
+// timlUtilFwrite(layer->featureMap, sizeof(float), layer->row*layer->col*layer->channel*layer->maxBatchSize, fp);\r
+// fclose(fp);\r
switch (layer->next->type) {\r
+ case CNN_Accuracy:\r
+ err = timlCNNAccuracyForwardPropagation(layer);\r
+ if (err) return err;\r
+ break;\r
case CNN_Conv:\r
err = timlCNNConvForwardPropagation(layer);\r
if (err) return err;\r
break;\r
case CNN_Nonlinear:\r
err = timlCNNNonlinearForwardPropagation(layer);\r
- if (err) return err;\r
break;\r
case CNN_Dropout:\r
err = timlCNNDropoutForwardPropagation(layer);\r
+ if (err) return err;\r
+ break;\r
+ case CNN_SoftmaxCost:\r
+ err = timlCNNSoftmaxCostForwardPropagation(layer);\r
+ if (err) return err;\r
+ break;\r
+ case CNN_Softmax:\r
+ err = timlCNNSoftmaxForwardPropagation(layer);\r
+ if (err) return err;\r
break;\r
default:\r
break;\r
}\r
layer = layer->next;\r
+\r
}\r
\r
+// sprintf(str, "gpu_level_%d_layer_%d_fm", layer->allocatorLevel, layer->id);\r
+// fp = fopen(str, "w");\r
+// timlUtilFwrite(layer->featureMap, sizeof(float), layer->row*layer->col*layer->channel*layer->maxBatchSize, fp);\r
+// fclose(fp);\r
+\r
return err;\r
}\r
diff --git a/src/common/cnn/timlCNNFree.c b/src/common/cnn/timlCNNFree.c
--- /dev/null
@@ -0,0 +1,112 @@
+/******************************************************************************/\r
+/*!\r
+ * \file timlCNNFree.c\r
+ */\r
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/\r
+ *\r
+ * Redistribution and use in source and binary forms, with or without\r
+ * modification, are permitted provided that the following conditions\r
+ * are met:\r
+ *\r
+ * Redistributions of source code must retain the above copyright\r
+ * notice, this list of conditions and the following disclaimer.\r
+ *\r
+ * Redistributions in binary form must reproduce the above copyright\r
+ * notice, this list of conditions and the following disclaimer in the\r
+ * documentation and/or other materials provided with the\r
+ * distribution.\r
+ *\r
+ * Neither the name of Texas Instruments Incorporated nor the names of\r
+ * its contributors may be used to endorse or promote products derived\r
+ * from this software without specific prior written permission.\r
+ *\r
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+ *\r
+ ******************************************************************************/\r
+\r
+\r
+/*******************************************************************************\r
+ *\r
+ * INCLUDES\r
+ *\r
+ ******************************************************************************/\r
+\r
+#include "../api/timl.h"\r
+\r
+\r
+/******************************************************************************/\r
+/*!\r
+ * \ingroup cnn\r
+ * \brief Free a cnn structure\r
+ * \param[in] cnn CNN structure\r
+ * \return Error code\r
+ */\r
+/******************************************************************************/\r
+\r
+int timlCNNFree(timlConvNeuralNetwork *cnn)\r
+{\r
+ int err;\r
+ timlCNNLayer *layer;\r
+\r
+ err = 0;\r
+ // error checking\r
+ if (cnn == NULL) {\r
+ return ERROR_CNN_NULL_PTR;\r
+ }\r
+ if (cnn->params.allocatorLevel == Util_AllocatorLevel3) {\r
+ timlUtilFreeAcc(cnn->memPool);\r
+ cnn->memPool = NULL;\r
+ }\r
+\r
+ layer = cnn->head;\r
+ // delete the rest of the layers\r
+ while (layer != NULL) {\r
+ switch (layer->type) {\r
+ case CNN_Input:\r
+ timlCNNFreeInputLayer(layer);\r
+ break;\r
+ case CNN_Conv:\r
+ timlCNNFreeConvLayer(layer);\r
+ break;\r
+ case CNN_Norm:\r
+ timlCNNFreeNormLayer(layer);\r
+ break;\r
+ case CNN_Pooling:\r
+ timlCNNFreePoolingLayer(layer);\r
+ break;\r
+ case CNN_Linear:\r
+ timlCNNFreeLinearLayer(layer);\r
+ break;\r
+ case CNN_Nonlinear:\r
+ timlCNNFreeNonlinearLayer(layer);\r
+ break;\r
+ case CNN_Dropout:\r
+ timlCNNFreeDropoutLayer(layer);\r
+ break;\r
+ case CNN_Softmax:\r
+ timlCNNFreeSoftmaxLayer(layer);\r
+ break;\r
+ case CNN_SoftmaxCost:\r
+ timlCNNFreeSoftmaxCostLayer(layer);\r
+ break;\r
+ case CNN_Accuracy:\r
+ timlCNNFreeAccuracyLayer(layer);\r
+ break;\r
+ default:\r
+ break;\r
+ }\r
+ layer = layer->next;\r
+ }\r
+\r
+ return err;\r
+}\r
diff --git a/src/common/cnn/timlCNNFreeAccuracyLayer.c b/src/common/cnn/timlCNNFreeAccuracyLayer.c
--- /dev/null
@@ -0,0 +1,78 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNFreeAccuracyLayer.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Delete accuracy layer
+ * \param[in] layer Layer ptr
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNFreeAccuracyLayer(timlCNNLayer *layer)
+{
+ if (layer->featureMap != NULL) {
+ timlUtilFreeHost(layer->featureMap);
+ layer->featureMap = NULL;
+ }
+
+ if (layer->delta != NULL) {
+ timlUtilFreeHost(layer->delta);
+ layer->delta = NULL;
+ }
+
+ if (layer->accuracyParams.label != NULL) {
+ timlUtilFreeHost(layer->accuracyParams.label);
+ layer->accuracyParams.label = NULL;
+ }
+ if (layer->accuracyParams.trueLabel != NULL) {
+ timlUtilFreeHost(layer->accuracyParams.trueLabel);
+ layer->accuracyParams.trueLabel = NULL;
+ }
+
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNFreeConvLayer.c b/src/common/cnn/timlCNNFreeConvLayer.c
--- /dev/null
@@ -0,0 +1,118 @@
+/******************************************************************************/\r
+/*!\r
+ * \file timlCNNFreeConvLayer.c\r
+ */\r
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/\r
+ *\r
+ * Redistribution and use in source and binary forms, with or without\r
+ * modification, are permitted provided that the following conditions\r
+ * are met:\r
+ *\r
+ * Redistributions of source code must retain the above copyright\r
+ * notice, this list of conditions and the following disclaimer.\r
+ *\r
+ * Redistributions in binary form must reproduce the above copyright\r
+ * notice, this list of conditions and the following disclaimer in the\r
+ * documentation and/or other materials provided with the\r
+ * distribution.\r
+ *\r
+ * Neither the name of Texas Instruments Incorporated nor the names of\r
+ * its contributors may be used to endorse or promote products derived\r
+ * from this software without specific prior written permission.\r
+ *\r
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+ *\r
+ ******************************************************************************/\r
+\r
+\r
+/*******************************************************************************\r
+ *\r
+ * INCLUDES\r
+ *\r
+ ******************************************************************************/\r
+\r
+#include "../api/timl.h"\r
+\r
+\r
+/******************************************************************************/\r
+/*!\r
+ * \ingroup cnn\r
+ * \brief Free conv layer\r
+ * \param[in] layer Layer ptr\r
+ * \return Error code\r
+ */\r
+/******************************************************************************/\r
+\r
+int timlCNNFreeConvLayer(timlCNNLayer *layer)\r
+{\r
+\r
+ // free params memory\r
+ if (layer->convParams.shared == false) {\r
+ if (layer->convParams.prevFeatureMapReshapeIndex != NULL) {\r
+ timlUtilFreeAcc(layer->convParams.prevFeatureMapReshapeIndex);\r
+ layer->convParams.prevFeatureMapReshapeIndex = NULL;\r
+ }\r
+\r
+ if (layer->convParams.biasMultiplier != NULL) {\r
+ timlUtilFreeAcc(layer->convParams.biasMultiplier);\r
+ layer->convParams.biasMultiplier = NULL;\r
+ }\r
+ if (layer->convParams.kernel != NULL) {\r
+ timlUtilFreeAcc(layer->convParams.kernel);\r
+ layer->convParams.kernel = NULL;\r
+ }\r
+ if (layer->convParams.kernelInc != NULL) {\r
+ timlUtilFreeAcc(layer->convParams.kernelInc);\r
+ layer->convParams.kernelInc = NULL;\r
+ }\r
+ if (layer->convParams.kernelGradAccum != NULL) {\r
+ timlUtilFreeAcc(layer->convParams.kernelGradAccum);\r
+ layer->convParams.kernelGradAccum = NULL;\r
+ }\r
+ if (layer->convParams.bias != NULL) {\r
+ timlUtilFreeAcc(layer->convParams.bias);\r
+ layer->convParams.bias = NULL;\r
+ }\r
+ if (layer->convParams.biasInc != NULL) {\r
+ timlUtilFreeAcc(layer->convParams.biasInc);\r
+ layer->convParams.biasInc = NULL;\r
+ }\r
+ if (layer->convParams.biasGradAccum != NULL) {\r
+ timlUtilFreeAcc(layer->convParams.biasGradAccum);\r
+ layer->convParams.biasGradAccum = NULL;\r
+ }\r
+ }\r
+\r
+ // free level 1 or level 2\r
+ if (layer->allocatorLevel != Util_AllocatorLevel3) {\r
+\r
+ if (layer->featureMap != NULL) {\r
+ timlUtilFreeAcc(layer->featureMap);\r
+ layer->featureMap = NULL;\r
+ }\r
+\r
+ if (layer->delta != NULL) {\r
+ timlUtilFreeAcc(layer->delta);\r
+ layer->delta = NULL;\r
+ }\r
+\r
+ if (layer->convParams.prevFeatureMapReshape != NULL) {\r
+ timlUtilFreeAcc(layer->convParams.prevFeatureMapReshape);\r
+ layer->convParams.prevFeatureMapReshape = NULL;\r
+ }\r
+ }\r
+\r
+\r
+ return 0;\r
+\r
+}\r
diff --git a/src/common/cnn/timlCNNFreeDropoutLayer.c b/src/common/cnn/timlCNNFreeDropoutLayer.c
--- /dev/null
@@ -0,0 +1,78 @@
+/******************************************************************************/
+/*!
+ * \file timlCNNFreeDropoutLayer.c
+ */
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************/
+
+
+/*******************************************************************************
+ *
+ * INCLUDES
+ *
+ ******************************************************************************/
+
+#include "../api/timl.h"
+
+
+/******************************************************************************/
+/*!
+ * \ingroup cnn
+ * \brief Free dropout layer
+ * \param[in] layer Layer ptr
+ * \return Error code
+ */
+/******************************************************************************/
+
+int timlCNNFreeDropoutLayer(timlCNNLayer *layer)
+{
+ // free level 1 or level 2
+ if (layer->allocatorLevel != Util_AllocatorLevel3) {
+
+ if (layer->featureMap != NULL) {
+ timlUtilFreeAcc(layer->featureMap);
+ layer->featureMap = NULL;
+ }
+
+ if (layer->delta != NULL) {
+ timlUtilFreeAcc(layer->delta);
+ layer->delta = NULL;
+ }
+
+ if (layer->dropoutParams.mask != NULL) {
+ timlUtilFreeAcc(layer->dropoutParams.mask);
+ layer->dropoutParams.mask = NULL;
+ }
+ }
+
+ return 0;
+}
diff --git a/src/common/cnn/timlCNNFreeInputLayer.c b/src/common/cnn/timlCNNFreeInputLayer.c
--- /dev/null
@@ -0,0 +1,82 @@
+/******************************************************************************/\r
+/*!\r
+ * \file timlCNNFreeInputLayer.c\r
+ */\r
+/* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/\r
+ *\r
+ * Redistribution and use in source and binary forms, with or without\r
+ * modification, are permitted provided that the following conditions\r
+ * are met:\r
+ *\r
+ * Redistributions of source code must retain the above copyright\r
+ * notice, this list of conditions and the following disclaimer.\r
+ *\r
+ * Redistributions in binary form must reproduce the above copyright\r
+ * notice, this list of conditions and the following disclaimer in the\r
+ * documentation and/or other materials provided with the\r
+ * distribution.\r
+ *\r
+ * Neither the name of Texas Instruments Incorporated nor the names of\r
+ * its contributors may be used to endorse or promote products derived\r
+ * from this software without specific prior written permission.\r
+ *\r
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+ *\r
+ ******************************************************************************/\r
+\r
+\r
+/*******************************************************************************\r
+ *\r
+ * INCLUDES\r
+ *\r
+ ******************************************************************************/\r
+\r
+#include "../api/timl.h"\r
+\r
+\r
+/******************************************************************************/\r
+/*!\r
+ * \ingroup cnn\r
+ * \brief Free input layer\r
+ * \param[in] layer Layer ptr\r
+ * \return Error code\r
+ */\r
+/******************************************************************************/\r
+\r
+int timlCNNFreeInputLayer(timlCNNLayer * layer)\r
+{\r
+\r
+ // free params memory\r
+ if (layer->inputParams.shared == false) {\r
+ if (layer->inputParams.mean != NULL) {\r
+ timlUtilFreeAcc(layer->inputParams.mean);\r
+ layer->inputParams.mean = NULL;\r
+ }\r
+ }\r
+\r
+ if (layer->allocatorLevel != Util_AllocatorLevel3) {\r
+ if (layer->featureMap != NULL) {\r
+ timlUtilFreeAcc(layer->featureMap);\r
+ layer->featureMap = NULL;\r
+ }\r
+ if (layer->inputParams.inputData != NULL) {\r
+ timlUtilFreeAcc(layer->inputParams.inputData);\r
+ layer->inputParams.inputData = NULL;\r
+ }\r
+ if (layer->delta != NULL) {\r
+ timlUtilFreeAcc(layer->delta);\r
+ layer->delta&nb