]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - ti-machine-learning/ti-machine-learning.git/blobdiff - src/app/cnn/class/cifar10/appCNNClassCIFAR10Training.c
1. Enable network state write/read
[ti-machine-learning/ti-machine-learning.git] / src / app / cnn / class / cifar10 / appCNNClassCIFAR10Training.c
index b082a84facf71d9ff97bd4ad82ea71b27d5c9495..b756e05aa048e0836e4a5483e3a0337335ffc3da 100644 (file)
@@ -55,6 +55,7 @@
 #define IMAGE_COL        32\r
 #define IMAGE_CHANNEL    3\r
 #define BATCH_SIZE       100\r
+#define EPOCH            10\r
 \r
 /*******************************************************************************\r
  *\r
@@ -77,7 +78,7 @@ int main()
 \r
 int appCNNClassCIFAR10Training()\r
 {\r
-   int              i;\r
+   int              i,j;\r
    int              dim;\r
    int              batchSize;\r
    int              batchNum;\r
@@ -98,8 +99,11 @@ int appCNNClassCIFAR10Training()
 \r
    // build up the CNN\r
    printf("1. Build up CNN\n");\r
-   timlConvNeuralNetwork *cnn = timlCNNCreateConvNeuralNetwork(timlCNNTrainingParamsDefault(), 0);\r
+   timlConvNeuralNetwork *cnn = timlCNNCreateConvNeuralNetwork(timlCNNTrainingParamsDefault());\r
    cnn->params.learningRate = 0.01;\r
+   cnn->params.maxBatchSize = BATCH_SIZE;\r
+   cnn->params.batchSize = BATCH_SIZE;\r
+   cnn->params.batchUpdate = BATCH_SIZE;\r
    timlCNNAddInputLayer(cnn, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, timlCNNInputParamsDefault());  // input layer\r
    timlCNNAddConvLayer(cnn, 5, 5, 1, 1, 32, timlCNNConvParamsDefault());                         // conv layer\r
    timlCNNAddPoolingLayer(cnn, 3, 3, 2, 2, CNN_MaxPooling, timlCNNPoolingParamsDefault());       // max pooling layer\r
@@ -112,13 +116,11 @@ int appCNNClassCIFAR10Training()
    timlCNNAddPoolingLayer(cnn, 3, 3, 2, 2, CNN_MeanPooling, timlCNNPoolingParamsDefault());      // max pooling layer\r
    timlCNNAddLinearLayer(cnn, 64, timlCNNLinearParamsDefault());                                 // linear layer\r
    timlCNNAddLinearLayer(cnn, 10, timlCNNLinearParamsDefault());                                 // linear layer\r
-   timlCNNAddNonlinearLayer(cnn, Util_Softmax);                                                  // softmax layer\r
+   timlCNNAddSoftmaxCostLayer(cnn);                                                              // softmax cost layer\r
    timlCNNInitialize(cnn);\r
    timlCNNReset(cnn);\r
-   mem = timlCNNMemory(cnn);\r
    timlCNNPrint(cnn);\r
-   printf("CNN memory allocation = %.10f MB.\n", (float)mem/1024.0/1024.0);\r
-   printf("CNN parameter #       = %lu.\n", timlCNNGetParamsNum(cnn));\r
+\r
 \r
    // read the CIFAR10 database\r
    printf("2. Read CIFAR10 database\n");\r
@@ -128,8 +130,10 @@ int appCNNClassCIFAR10Training()
    printf("3. Start training\n");\r
    batchNum = training.num/batchSize;\r
    clock_gettime(CLOCK_REALTIME, &startTime);\r
-   for (i = 0; i < batchNum; i++) {\r
-      timlCNNSupervisedTrainingWithLabelBatchMode(cnn, training.data + i * batchSize * dim, training.label + i * batchSize, dim, batchSize);\r
+   for (j = 0; j < EPOCH; j++) {\r
+      for (i = 0; i < batchNum; i++) {\r
+         timlCNNSupervisedTrainingWithLabel(cnn, training.data + i*batchSize*dim, IMAGE_ROW, IMAGE_COL, IMAGE_CHANNEL, training.label + i*batchSize, 1, 1, batchSize);\r
+      }\r
    }\r
    clock_gettime(CLOCK_REALTIME, &endTime);\r
    trainingTime = timlUtilDiffTime(startTime, endTime);\r