remove unused files
authorManu Mathew <a0393608@ti.com>
Thu, 20 Feb 2020 09:29:36 +0000 (14:59 +0530)
committerManu Mathew <a0393608@ti.com>
Thu, 20 Feb 2020 09:29:36 +0000 (14:59 +0530)
modules/pytorch_jacinto_ai/vision/datasets/pixel2pixel/calculate_class_weight.py [deleted file]
modules/pytorch_jacinto_ai/xnn/layers/deconv_blocks.py [deleted file]
modules/pytorch_jacinto_ai/xnn/utils/weights_init.py [deleted file]

diff --git a/modules/pytorch_jacinto_ai/vision/datasets/pixel2pixel/calculate_class_weight.py b/modules/pytorch_jacinto_ai/vision/datasets/pixel2pixel/calculate_class_weight.py
deleted file mode 100644 (file)
index a75269c..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-import numpy as np
-import os
-import scipy.misc as misc
-from .... import xnn
-
-from .cityscapes_plus import CityscapesBaseSegmentationLoader, CityscapesBaseMotionLoader
-
-
-def calc_median_frequency(classes, present_num):
-    """
-    Class balancing by median frequency balancing method.
-    Reference: https://arxiv.org/pdf/1411.4734.pdf
-       'a = median_freq / freq(c) where freq(c) is the number of pixels
-        of class c divided by the total number of pixels in images where
-        c is present, and median_freq is the median of these frequencies.'
-    """
-    class_freq = classes / present_num
-    median_freq = np.median(class_freq)
-    return median_freq / class_freq
-
-
-def calc_log_frequency(classes, value=1.02):
-    """Class balancing by ERFNet method.
-       prob = each_sum_pixel / each_sum_pixel.max()
-       a = 1 / (log(1.02 + prob)).
-    """
-    class_freq = classes / classes.sum()  # ERFNet is max, but ERFNet is sum
-    # print(class_freq)
-    # print(np.log(value + class_freq))
-    return 1 / np.log(value + class_freq)
-
-
-if __name__ == '__main__':
-
-    method = "median"
-    result_path = "/afs/cg.cs.tu-bs.de/home/zhang/SEDPShuffleNet/datasets"
-
-    traval = "gtFine"
-    imgs_path = "./data/tiad/data/leftImg8bit/train"    #"./data/cityscapes/data/leftImg8bit/train"   #"./data/TIAD/data/leftImg8bit/train"
-    lbls_path = "./data/tiad/data/gtFine/train"         #"./data/cityscapes/data/gtFine/train"   # "./data/tiad/data/gtFine/train"  #"./data/cityscapes_frame_pair/data/gtFine/train"
-    labels = xnn.utils.recursive_glob(rootdir=lbls_path, suffix='labelTrainIds_motion.png')  #'labelTrainIds_motion.png'  #'labelTrainIds.png'
-
-    num_classes = 2       #5  #2
-
-    local_path = "./data/checkpoints"
-    dst = CityscapesBaseMotionLoader() #TiadBaseSegmentationLoader()  #CityscapesBaseSegmentationLoader()  #CityscapesBaseMotionLoader()
-
-    classes, present_num = ([0 for i in range(num_classes)] for i in range(2))
-
-    for idx, lbl_path in enumerate(labels):
-        lbl = misc.imread(lbl_path)
-        lbl = dst.encode_segmap(np.array(lbl, dtype=np.uint8))
-
-        for nc in range(num_classes):
-            num_pixel = (lbl == nc).sum()
-            if num_pixel:
-                classes[nc] += num_pixel
-                present_num[nc] += 1
-
-    if 0 in classes:
-        raise Exception("Some classes are not found")
-
-    classes = np.array(classes, dtype="f")
-    presetn_num = np.array(classes, dtype="f")
-    if method == "median":
-        class_weight = calc_median_frequency(classes, present_num)
-    elif method == "log":
-        class_weight = calc_log_frequency(classes)
-    else:
-        raise Exception("Please assign method to 'mean' or 'log'")
-
-    print("class weight", class_weight)
-    print("Done!")
diff --git a/modules/pytorch_jacinto_ai/xnn/layers/deconv_blocks.py b/modules/pytorch_jacinto_ai/xnn/layers/deconv_blocks.py
deleted file mode 100644 (file)
index e91c958..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-import torch
-from .conv_blocks import *
-from .layer_config import *
-from .common_blocks import *
-
-###############################################################
-def UpsampleTo(input_channels, output_channels, upstride, interpolation_type, interpolation_mode):
-    upsample = []
-    if interpolation_type == 'upsample':
-        upsample = [ResizeTo(mode=interpolation_mode)]
-    elif interpolation_type == 'deconv':
-        upsample = [SplitListTakeFirst(),
-                    DeConvDWLayer2d(input_channels, output_channels, kernel_size=upstride * 2, stride=upstride)]
-    elif interpolation_type == 'upsample_conv':
-        upsample = [ResizeTo(mode=interpolation_mode),
-                    ConvDWLayer2d(input_channels, output_channels, kernel_size=int(upstride * 1.5 + 1))]
-    #
-    upsample = torch.nn.Sequential(*upsample)
-    return upsample
-
-
-class UpsampleGenericTo(torch.nn.Module):
-    def __init__(self, input_channels, output_channels, upstride, interpolation_type, interpolation_mode):
-        super().__init__()
-        self.upsample_list = torch.nn.ModuleList()
-        self.upstride_list = []
-        while upstride >= 2:
-            upstride_layer = 4 if upstride > 4 else upstride
-            upsample = UpsampleTo(input_channels, output_channels, upstride_layer, interpolation_type, interpolation_mode)
-            self.upsample_list.append(upsample)
-            self.upstride_list.append(upstride_layer)
-            upstride = upstride//4
-
-    def forward(self, x):
-        assert isinstance(x, (list,tuple)) and len(x)==2, 'input must be a tuple/list of size 2'
-        x, x_target = x
-        xt_shape = x.shape
-        for idx, (upsample, upstride) in enumerate(zip(self.upsample_list,self.upstride_list)):
-            xt_shape = (xt_shape[0], xt_shape[1], xt_shape[2]*upstride, xt_shape[3]*upstride)
-            xt = torch.zeros(xt_shape).to(x.device)
-            x = upsample((x, xt))
-            xt_shape = x.shape
-        #
-        return x
-
-
-############################################################### 
-def DeConvLayer2d(in_planes, out_planes, kernel_size, stride=1, groups=1, dilation=1, padding=None, output_padding=None, bias=False):
-    """convolution with padding"""
-    if (output_padding is None) and (padding is None):
-        if kernel_size % 2 == 0:
-            padding = (kernel_size - stride) // 2
-            output_padding = 0
-        else:
-            padding = (kernel_size - stride + 1) // 2
-            output_padding = 1
-
-    return torch.nn.ConvTranspose2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding,
-                                    output_padding=output_padding, bias=bias, groups=groups)
-
-
-def DeConvDWLayer2d(in_planes, out_planes, stride=1, dilation=1, kernel_size=None, padding=None, output_padding=None, bias=False):
-    """convolution with padding"""
-    assert in_planes == out_planes, 'in DW layer channels must not change'
-    return DeConvLayer2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=in_planes,
-                       padding=padding, output_padding=output_padding, bias=bias)
-    
-
-############################################################### 
-def DeConvBNAct(in_planes, out_planes, kernel_size=None, stride=1, groups=1, dilation=1, padding=None, output_padding=None, bias=False, \
-              normalization=DefaultNorm2d, activation=DefaultAct2d):
-    """convolution with padding, BN, ReLU"""
-    if (output_padding is None) and (padding is None):
-        if kernel_size % 2 == 0:
-            padding = (kernel_size - stride) // 2
-            output_padding = 0
-        else:
-            padding = (kernel_size - stride + 1) // 2
-            output_padding = 1
-
-    if activation is True:
-        activation = DefaultAct2d
-
-    if normalization is True:
-        normalization = DefaultNorm2d
-
-    layers = [torch.nn.ConvTranspose2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding,
-                                       output_padding=output_padding, bias=bias, groups=groups)]
-    if normalization:
-        layers.append(normalization(out_planes))
-
-    if activation:
-        layers.append(activation(inplace=True))
-    #
-    layers = torch.nn.Sequential(*layers)
-    return layers
-
-    
-def DeConvDWBNAct(in_planes, out_planes, stride=1, kernel_size=None, dilation=1, padding=None, output_padding=None, bias=False,
-                  normalization=DefaultNorm2d, activation=DefaultAct2d):
-    """convolution with padding, BN, ReLU"""
-    assert in_planes == out_planes, 'in DW layer channels must not change'
-    return DeConvBNAct(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding, output_padding=output_padding,
-                       bias=bias, groups=in_planes, normalization=normalization, activation=activation)
-
-
-###########################################################
-def DeConvDWSepBNAct(in_planes, out_planes, stride=1, kernel_size=None, groups=1, dilation=1, bias=False, \
-                   first_1x1=False, normalization=(DefaultNorm2d,DefaultNorm2d), activation=(DefaultAct2d,DefaultAct2d)):
-    if first_1x1:
-        layers = [
-            ConvNormAct2d(in_planes, out_planes, kernel_size=1, groups=groups, bias=bias,
-                      normalization=normalization[0], activation=activation[0]),
-            DeConvDWBNAct(out_planes, out_planes, stride=stride, kernel_size=kernel_size, dilation=dilation, bias=bias,
-                        normalization=normalization[1], activation=activation[1])]
-    else:
-        layers = [DeConvDWBNAct(in_planes, in_planes, stride=stride, kernel_size=kernel_size, dilation=dilation, bias=bias,
-                              normalization=normalization[0], activation=activation[0]),
-                  ConvNormAct2d(in_planes, out_planes, groups=groups, kernel_size=1, bias=bias,
-                            normalization=normalization[1], activation=activation[1])]
-
-    layers = torch.nn.Sequential(*layers)
-    return layers
-
diff --git a/modules/pytorch_jacinto_ai/xnn/utils/weights_init.py b/modules/pytorch_jacinto_ai/xnn/utils/weights_init.py
deleted file mode 100644 (file)
index 22f638d..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-import torch
-
-def module_weights_init(module):
-    # weight initialization
-    for m in module.modules():
-        if isinstance(m, torch.nn.Conv2d):
-            torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
-            if m.bias is not None:
-                torch.nn.init.zeros_(m.bias)
-        elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.GroupNorm)):
-            torch.nn.init.ones_(m.weight)
-            if m.bias is not None:
-                torch.nn.init.zeros_(m.bias)
-        elif isinstance(m, torch.nn.Linear):
-            torch.nn.init.normal_(m.weight, 0, 0.01)
-            torch.nn.init.zeros_(m.bias)
\ No newline at end of file