]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - ti-machine-learning/ti-machine-learning.git/blob - debian/ti-timl/usr/src/timl/src/common/cnn/timlCNNLinearInitialize.c
modified
[ti-machine-learning/ti-machine-learning.git] / debian / ti-timl / usr / src / timl / src / common / cnn / timlCNNLinearInitialize.c
1 /******************************************************************************/
2 /*!
3  * \file timlCNNLinearInitialize.c
4  */
5 /* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  *    Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the
17  *    distribution.
18  *
19  *    Neither the name of Texas Instruments Incorporated nor the names of
20  *    its contributors may be used to endorse or promote products derived
21  *    from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
29  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  ******************************************************************************/
38 /*******************************************************************************
39  *
40  * INCLUDES
41  *
42  ******************************************************************************/
44 #include "../api/timl.h"
47 /******************************************************************************/
48 /*!
49  * \ingroup   cnn
50  * \brief     Initialize the linear layer
51  * \param[in] layer Layer ptr
52  * \return    Error code
53  */
54 /******************************************************************************/
56 int timlCNNLinearInitialize(timlCNNLayer *layer)
57 {
58    int                   prevDim;
59    int                   dim;
60    timlConvNeuralNetwork *cnn;
62    prevDim = layer->linearParams.prevDim;
63    dim     = layer->linearParams.dim;
64    cnn     = layer->cnn;
66    // common to level 1, 2, 3
67    if (layer->linearParams.shared == false) {
68       // allocate weight
69       if (timlUtilMalloc((void**) &(layer->linearParams.weight), sizeof(float)*prevDim*dim) != 0) {
70          return ERROR_CNN_LAYER_ALLOCATION;
71       }
72       // allocate bias
73       if (timlUtilMalloc((void**) &(layer->linearParams.bias), sizeof(float)*dim) != 0) {
74          return ERROR_CNN_LAYER_ALLOCATION;
75       }
76    }
78    // level 1
79    if (layer->allocatorLevel == Util_AllocatorLevel1) {
80       // allocate feature map
81       if (timlUtilMalloc((void**) &(layer->featureMap), sizeof(float)*dim) != 0) {
82          return ERROR_CNN_LAYER_ALLOCATION;
83       }
85       // allocate feature map delta
86       if (timlUtilMalloc((void**) &(layer->delta), sizeof(float)*dim) != 0) {
87          return ERROR_CNN_LAYER_ALLOCATION;
88       }
90       if (layer->linearParams.shared == false) {
91          // allocate weightInc
92          if (timlUtilMalloc((void**) &(layer->linearParams.weightInc), sizeof(float)*prevDim*dim) != 0) {
93             return ERROR_CNN_LAYER_ALLOCATION;
94          }
96          // allocate weight GradAccum
97          if (timlUtilMalloc((void**) &(layer->linearParams.weightGradAccum), sizeof(float)*prevDim*dim) != 0) {
98             return ERROR_CNN_LAYER_ALLOCATION;
99          }
101          // allocate biasGradAccum
102          if (timlUtilMalloc((void**) &(layer->linearParams.biasGradAccum), sizeof(float)*dim) != 0) {
103             return ERROR_CNN_LAYER_ALLOCATION;
104          }
106          // allocate biasInc
107          if (timlUtilMalloc((void**) &(layer->linearParams.biasInc), sizeof(float)*dim) != 0) {
108             return ERROR_CNN_LAYER_ALLOCATION;
109          }
110       }
111    }
113    // level 2
114    if (layer->allocatorLevel == Util_AllocatorLevel2) {
115       // allocate feature map
116       if (timlUtilMalloc((void**) &(layer->featureMap), sizeof(float)*dim) != 0) {
117          return ERROR_CNN_LAYER_ALLOCATION;
118       }
119    }
121    // level 3
122    if (layer->allocatorLevel == Util_AllocatorLevel3) {
123       if (layer->id%2 == 0) { // layer 2, 4, 6 8, ... allocate at the back end
124          layer->featureMap = cnn->memPool + cnn->memPoolSize - layer->channel*layer->row*layer->col;
125       }
126       else { // layer 1, 3, 5, ... allocate at the front end
127          layer->featureMap = cnn->memPool;
128       }
129    }
131    return 0;