[jacinto-ai/pytorch-jacinto-ai-devkit.git] / modules / pytorch_jacinto_ai / xvision / datasets / ucf101.py
diff --git a/modules/pytorch_jacinto_ai/xvision/datasets/ucf101.py b/modules/pytorch_jacinto_ai/xvision/datasets/ucf101.py
index 651729be470b9363b8ab70642b4ee3b16a1494f4..71f62257bcb5d3543abd32079e2b80ee66017493 100644 (file)
-import glob
import os
+from .folder import find_classes, make_dataset
from .video_utils import VideoClips
-from .utils import list_dir
-from .folder import make_dataset
from .vision import VisionDataset
class UCF101(VisionDataset):
"""
- UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset.
+ `UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset.
UCF101 is an action recognition video dataset.
This dataset consider every video as a collection of video clips of fixed size, specified
and returns a transformed version.
Returns:
- video (Tensor[T, H, W, C]): the `T` video frames
- audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
- and `L` is the number of points
- label (int): class of the video clip
+ tuple: A 3-tuple with the following entries:
+
+ - video (Tensor[T, H, W, C]): the `T` video frames
+ - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
+ and `L` is the number of points
+ - label (int): class of the video clip
"""
def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1,
- fold=1, train=True, transform=None):
+ frame_rate=None, fold=1, train=True, transform=None,
+ _precomputed_metadata=None, num_workers=1, _video_width=0,
+ _video_height=0, _video_min_dimension=0, _audio_samples=0):
super(UCF101, self).__init__(root)
if not 1 <= fold <= 3:
raise ValueError("fold should be between 1 and 3, got {}".format(fold))
self.fold = fold
self.train = train
- classes = list(sorted(list_dir(root)))
- class_to_idx = {classes[i]: i for i in range(len(classes))}
+ self.classes, class_to_idx = find_classes(self.root)
self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
- self.classes = classes
video_list = [x[0] for x in self.samples]
- video_clips = VideoClips(video_list, frames_per_clip, step_between_clips)
+ video_clips = VideoClips(
+ video_list,
+ frames_per_clip,
+ step_between_clips,
+ frame_rate,
+ _precomputed_metadata,
+ num_workers=num_workers,
+ _video_width=_video_width,
+ _video_height=_video_height,
+ _video_min_dimension=_video_min_dimension,
+ _audio_samples=_audio_samples,
+ )
+ # we bookkeep the full version of video clips because we want to be able
+ # to return the meta data of full version rather than the subset version of
+ # video clips
+ self.full_video_clips = video_clips
self.indices = self._select_fold(video_list, annotation_path, fold, train)
self.video_clips = video_clips.subset(self.indices)
self.transform = transform
+ @property
+ def metadata(self):
+ return self.full_video_clips.metadata
+
def _select_fold(self, video_list, annotation_path, fold, train):
name = "train" if train else "test"
name = "{}list{:02d}.txt".format(name, fold)
with open(f, "r") as fid:
data = fid.readlines()
data = [x.strip().split(" ") for x in data]
- data = [x[0] for x in data]
+ data = [os.path.join(self.root, x[0]) for x in data]
selected_files.extend(data)
selected_files = set(selected_files)
- indices = [i for i in range(len(video_list)) if video_list[i][len(self.root) + 1:] in selected_files]
+ indices = [i for i in range(len(video_list)) if video_list[i] in selected_files]
return indices
def __len__(self):