updated python package requirements (don't need tensorflow for tensorboard). not...
authorManu Mathew <a0393608@ti.com>
Tue, 30 Jun 2020 08:37:56 +0000 (14:07 +0530)
committerManu Mathew <a0393608@ti.com>
Tue, 30 Jun 2020 08:42:15 +0000 (14:12 +0530)
release commit

modules/pytorch_jacinto_ai/engine/train_pixel2pixel.py
modules/pytorch_jacinto_ai/xnn/layers/__init__.py
modules/pytorch_jacinto_ai/xnn/layers/activation.py
modules/pytorch_jacinto_ai/xnn/layers/conv_blocks.py
modules/pytorch_jacinto_ai/xnn/layers/model_utils.py
modules/pytorch_jacinto_ai/xnn/quantize/quant_graph_module.py
modules/pytorch_jacinto_ai/xnn/utils/logger.py
requirements.txt
requirements_conda.txt [new file with mode: 0644]
setup.sh

index 6f6ffd92acc1e20f49401f507ae4ede3242561ea..0602f3296b024e9d9a5ed54cad5f113e9da0d95e 100644 (file)
@@ -13,7 +13,7 @@ import torch.onnx
 import onnx
 
 import datetime
-from tensorboardX import SummaryWriter
+from torch.utils.tensorboard import SummaryWriter
 import numpy as np
 import random
 import cv2
index bf977cf99ec4ffc3491528c3ba7c6da0bab225e8..a623edd0a1c4d01a6c3febfb5cda2ff9049f774c 100644 (file)
@@ -1,8 +1,7 @@
-from .model_utils import *
 from .import functional
+from .layer_config import *
 from .normalization import *
 from .activation import *
-from .layer_config import *
 
 from .common_blocks import *
 from .conv_blocks import *
@@ -12,6 +11,8 @@ from .resize_blocks import *
 from .multi_task import *
 from .rf_blocks import *
 
+from .model_utils import *
+
 # optional/experimental
 try:
     from .blocks_internal import *
index a70b464297156e1b4610e5f8c497d627a1a26ac1..fafb6af71b68430bc983883cfaae1026e9df2373 100644 (file)
@@ -13,7 +13,7 @@ class PAct2(torch.nn.Module):
     PACT2_RANGE_LEARN = False   # False : Running Avg, True  : Backprop
     PACT2_RANGE_SHRINK = 0.01   # 0.01
     PACT2_RANGE_INIT = 8.0      # this is the starting range
-    PACT2_RANGE_EXPANSION = 1.1 # expand the calculated range for margin
+    PACT2_RANGE_EXPANSION = 1.0 # expand the calculated range for margin
 
     def __init__(self, inplace=False, signed=None, percentile_range_shrink=PACT2_RANGE_SHRINK, clip_range=None,
                  power2_activation_range=True, **kwargs):
index b6fa6488b084e09ad0b168053a22473175a93f57..4160918cc3e5bc32965ef07cbfdaff57f9da7630 100644 (file)
@@ -1,14 +1,17 @@
 import torch
 from .layer_config import *
 from . import functional
+from .import common_blocks
 
-def check_groups(in_planes, out_planes, groups, group_size):
+def check_groups(in_planes, out_planes, groups, group_size, with_assert=True):
     assert groups is None or group_size is None, 'only one of groups or group_size must be specified'
     assert groups is not None or group_size is not None, 'atleast one of groups or group_size must be specified'
     groups = (in_planes//group_size) if groups is None else groups
     group_size = (in_planes//groups) if group_size is None else group_size
-    assert in_planes%groups == 0, 'in_planes must be a multiple of groups'
-    assert group_size != 1 or in_planes == out_planes, 'in DW layer channels must not change'
+    if with_assert:
+        assert in_planes%groups == 0, 'in_planes must be a multiple of groups'
+        assert group_size != 1 or in_planes == out_planes, 'in DW layer channels must not change'
+    #
     return groups, group_size
 
 ############################################################### 
@@ -47,7 +50,7 @@ def ConvNormAct2d(in_planes, out_planes, kernel_size=None, stride=1, groups=1, d
         layers.append(normalization(out_planes))
 
     if activation:
-        layers.append(activation(inplace=True))
+        layers.append(activation())
 
     layers = torch.nn.Sequential(*layers)
     return layers
@@ -63,8 +66,9 @@ def ConvDWNormAct2d(in_planes, out_planes, kernel_size=None, stride=1, dilation=
 
 
 ###########################################################
-def ConvDWSepNormAct2d(in_planes, out_planes, kernel_size=None, stride=1, groups=1, groups_dw=None, group_size_dw=None, dilation=1, bias=False, padding=None, \
-                   first_1x1=False, normalization=(DefaultNorm2d,DefaultNorm2d), activation=(DefaultAct2d,DefaultAct2d)):
+def ConvDWSepNormAct2d(in_planes, out_planes, kernel_size=None, stride=1,
+                       groups=1, groups_dw=None, group_size_dw=None, dilation=1, bias=False, padding=None, \
+                       first_1x1=False, normalization=(DefaultNorm2d,DefaultNorm2d), activation=(DefaultAct2d,DefaultAct2d)):
     bias = bias if isinstance(bias, (list,tuple)) else (bias,bias)
     if first_1x1:
         layers = [ConvNormAct2d(in_planes, out_planes, kernel_size=1, bias=bias[0], groups=groups,
@@ -82,3 +86,88 @@ def ConvDWSepNormAct2d(in_planes, out_planes, kernel_size=None, stride=1, groups
     layers = torch.nn.Sequential(*layers)
     return layers
 
+
+###########################################################
+def ConvDWTripletNormAct2d(in_planes, out_planes, kernel_size=None, stride=1,
+                           groups=1, groups_dw=None, group_size_dw=None, dilation=1, bias=False, padding=None, \
+                           intermediate_planes=None, expansion=1,
+                           normalization=(DefaultNorm2d,DefaultNorm2d,DefaultNorm2d),
+                           activation=(DefaultAct2d,DefaultAct2d,DefaultAct2d)):
+    bias = bias if isinstance(bias, (list,tuple)) else (bias,bias,bias)
+
+    if intermediate_planes is None:
+        intermediate_planes = in_planes*expansion
+    #
+    groups_dw_, group_size_dw_ = check_groups(intermediate_planes, intermediate_planes, groups_dw, group_size_dw, with_assert=False)
+    intermediate_planes = (intermediate_planes//groups_dw_) * groups_dw_
+
+    layers = [ConvNormAct2d(in_planes, intermediate_planes, kernel_size=1, bias=bias[0], groups=groups,
+                  normalization=normalization[0], activation=activation[0]),
+              ConvDWNormAct2d(intermediate_planes, intermediate_planes, kernel_size=kernel_size, stride=stride, dilation=dilation, bias=bias[1],
+                  groups_dw=groups_dw, group_size_dw=group_size_dw,
+                  padding=padding, normalization=normalization[1], activation=activation[1]),
+              ConvNormAct2d(intermediate_planes, out_planes, kernel_size=1, bias=bias[2], groups=groups,
+                            normalization=normalization[2], activation=activation[2])]
+
+    layers = torch.nn.Sequential(*layers)
+    return layers
+
+
+class ConvDWTripletRes2d(torch.nn.Module):
+    def __init__(self, *args, with_residual=True, force_residual=False, activation_after_residual=True, **kwargs):
+        super().__init__()
+
+        in_planes = args[0]
+        out_planes = args[1]
+
+        # kernel_size = kwargs.get('kernel_size', None) or args[2]
+        stride = kwargs.get('stride', 1) if len(args)<4 else args[3]
+        bias = kwargs.get('bias', False)
+        normalization = list(kwargs.get('normalization', [True,True,True]))
+        activation = list(kwargs.get('activation', [True,True,True]))
+
+        assert isinstance(normalization, (list, tuple)) and len(normalization) == 3, \
+            'normalization must be a list/tuple with length 3'
+        assert isinstance(activation, (list, tuple)) and len(activation) == 3, \
+            'activation must be a list/tuple with length 3'
+
+        is_shape_same = (in_planes == out_planes) or (stride == 1)
+        self.use_residual = (with_residual and is_shape_same) or force_residual
+
+        if self.use_residual:
+            if activation_after_residual:
+                # remove the last act fn from the list activation in kwargs
+                # before creating the conv module ConvDWTripletNormAct2d
+                activation_res, activation[-1] = activation[-1], False
+                kwargs['activation'] = activation
+
+        self.conv = ConvDWTripletNormAct2d(*args, **kwargs)
+
+        if self.use_residual:
+            # create residual connection if required
+            if not is_shape_same:
+                bias_last = bias[-1] if isinstance(bias, (list, tuple)) else bias
+                self.res = ConvNormAct2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=bias_last,
+                                normalization=normalization[-1], activation=activation[-1])
+            else:
+                self.res = None
+            #
+            # the residual addition module
+            self.add = common_blocks.AddBlock()
+            # create the last activation module if required
+            if activation_after_residual:
+                activation_res = DefaultAct2d if (activation_res is True) else activation_res
+                self.act = activation_res() if activation_res else None
+            else:
+                self.act = None
+            #
+        #
+
+    def forward(self, x):
+        y = self.conv(x)
+        if self.use_residual:
+            x = self.res(x) if self.res is not None else x
+            y = self.add((x,y))
+            y = self.act(y) if self.act is not None else y
+        #
+        return y
index a52496f61848288dc53b4fe488a4011fefd90065..bdffbbdcebc1e7fcf416bedacce48a2723886950 100644 (file)
@@ -1,8 +1,37 @@
 import torch
-from . import activation
-from . import common_blocks
-from .. import utils
 
+def _get_last_bias_module_sequential(module):
+    last_m = None
+    for m_idx, m in enumerate(list(module)[::-1]):
+        if isinstance(m, torch.nn.Sequential):
+            return _get_last_bias_module_sequential(m)
+        elif hasattr(m, 'bias') and m.bias is not None:
+            return m
+        #
+    return last_m
+
+
+def get_last_bias_modules(module):
+    last_ms = []
+    if hasattr(module, 'conv') and hasattr(module, 'res'):
+        last_ms.append(_get_last_bias_module_sequential(module.conv))
+        if hasattr(module, 'res') and module.res is not None:
+            last_ms.append(_get_last_bias_module_sequential(module.res))
+        #
+    elif isinstance(module, torch.nn.Sequential):
+        last_ms.append(_get_last_bias_module_sequential(module))
+    elif hasattr(module, 'bias') and module.bias is not None:
+        last_ms.append(module)
+    else:
+        for m in list(module.modules())[::-1]:
+            if hasattr(m, 'bias') and m.bias is not None:
+                last_ms.append(m)
+                break
+            #
+        #
+    #
+    last_ms = [m for m in last_ms if m is not None]
+    return last_ms
 
 
 
index b9dc4c1311bb2f90480a789efd8947ee01f9b727..2a5b60d94833ee2a58a3e0abee85aa930a91dffd 100644 (file)
@@ -135,8 +135,8 @@ class QuantGraphModule(HookedModule):
                 #
             elif qparams.quantize_in:
                 if not hasattr(module, 'activation_in'):
-                    # do not want to clip input, so set percentile_range_shrink=0.0
-                    activation_in = layers.PAct2(signed=None, percentile_range_shrink=0.0)
+                    # TODO: set percentile_range_shrink=0.0 to avoid shrinking of input range, if needed.
+                    activation_in = layers.PAct2(signed=None)
                     activation_in.train(self.training)
                     module.activation_in = activation_in
                 #
index 7818563d52db910501e08bb681c4df487813720b..27b9371f5e16204692c43f8cc7c502bd7e9b1ee6 100644 (file)
@@ -23,11 +23,12 @@ class BasicLogger:
 
 
 class TeeLogger:
-    def __init__(self, filename, log_level=logging.INFO):
+    def __init__(self, filename, log_level=logging.INFO, append=False):
         assert log_level == logging.INFO, 'for now we support only INFO logging level'
 
+        mode = "a" if append else "w"
         self.term = sys.stdout
-        self.file = open(filename, "w")
+        self.file = open(filename, mode)
         sys.stdout = self
         self.count = 0
 
index 3587d29a79f73a9b5c0d9df8ac8d91768339adf8..8546ff7dbc6c58fe246ff7ad55a866b4befd5a31 100644 (file)
@@ -6,13 +6,8 @@ pillow
 colorama
 progiter
 protobuf
-h5py
-hdf5
-opencv
-tensorflow-gpu
-tensorboardx
-cudatoolkit
-pytorch
+torch
 torchvision
+tensorboard
 onnx
 packaging
diff --git a/requirements_conda.txt b/requirements_conda.txt
new file mode 100644 (file)
index 0000000..c552abf
--- /dev/null
@@ -0,0 +1,3 @@
+h5py
+hdf5
+opencv
\ No newline at end of file
index 3e26b7c518015e95ce5002e8a5e7a7957a7c9f29..abfcf245e9e0b3bb355e5eafcca72385247e28a0 100755 (executable)
--- a/setup.sh
+++ b/setup.sh
@@ -10,7 +10,9 @@ fi
 ######################################################################
 # Installing dependencies
 echo 'Installing python packages...'
-while read req; do echo ---------- $req ----------; conda install --yes $req || pip install $req; done < requirements.txt
+#while read req; do echo ---------- $req ----------; conda install --yes $req || pip install $req; done < requirements.txt
+conda install --yes --file requirements_conda.txt
+pip install -r requirements.txt
 
 ######################################################################
 #NOTE: THIS STEP INSTALLS THE EDITABLE LOCAL MODULE pytorch-jacinto-ai
@@ -18,10 +20,6 @@ while read req; do echo ---------- $req ----------; conda install --yes $req ||
 echo 'Installing pytorch-jacinto-ai as a local module using setup.py'
 pip install -e ./
 
-######################################################################
-# Installing dependencies for mmdetection - call this seperately
-# Not calling this here, as everyone doesn't need to use object detection
-#./setup_mmdetection.sh