import urllib.parse import datasets import numpy as np import pandas as pd import requests _CITATION = """\ @inproceedings{Wu2020not, title={Not only Look, but also Listen: Learning Multimodal Violence Detection under Weak Supervision}, author={Wu, Peng and Liu, jing and Shi, Yujia and Sun, Yujia and Shao, Fangtao and Wu, Zhaoyang and Yang, Zhiwei}, booktitle={European Conference on Computer Vision (ECCV)}, year={2020} } """ _DESCRIPTION = """\ Dataset for the paper "Not only Look, but also Listen: Learning Multimodal Violence Detection under Weak Supervision". \ The dataset is downloaded from the authors' website (https://roc-ng.github.io/XD-Violence/). Hosting this dataset on HuggingFace \ is just to make it easier for my own project to use this dataset. Please cite the original paper if you use this dataset. """ _NAME = "xd-violence" _HOMEPAGE = f"https://huggingface.co/datasets/jherng/{_NAME}" _LICENSE = "MIT" _URL = f"https://huggingface.co/datasets/jherng/{_NAME}/resolve/main/data/" class XDViolenceConfig(datasets.BuilderConfig): def __init__(self, **kwargs): """BuilderConfig for XD-Violence. Args: **kwargs: keyword arguments forwarded to super. """ super(XDViolenceConfig, self).__init__(**kwargs) class XDViolence(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ XDViolenceConfig( name="video", description="Video dataset.", ), XDViolenceConfig( name="i3d_rgb", description="RGB features of the dataset extracted with pretrained I3D ResNet50 model.", ), # TODO: Add swin_rgb features # XDViolenceConfig( # name="swin_rgb", # description="RGB features of the dataset extracted with pretrained Video Swin Transformer model.", # ), ] DEFAULT_CONFIG_NAME = "video" BUILDER_CONFIG_CLASS = XDViolenceConfig CODE2IDX = { "A": 0, # Normal "B1": 1, # Fighting "B2": 2, # Shooting "B4": 3, # Riot "B5": 4, # Abuse "B6": 5, # Car accident "G": 6, # Explosion } def _info(self): if self.config.name == "i3d_rgb": features = datasets.Features( { "id": datasets.Value("string"), "feature": datasets.Array3D( shape=(None, 5, 2048), dtype="float32", # (num_frames, num_crops, feature_dim) use 5 crops by default as of now ), "binary_target": datasets.ClassLabel( names=["Non-violence", "Violence"] ), "multilabel_target": datasets.Sequence( datasets.ClassLabel( names=[ "Normal", "Fighting", "Shooting", "Riot", "Abuse", "Car accident", "Explosion", ] ) ), "frame_annotations": datasets.Sequence( { "start": datasets.Value("int32"), "end": datasets.Value("int32"), } ), } ) else: # default = "video" features = datasets.Features( { "id": datasets.Value("string"), "path": datasets.Value("string"), "binary_target": datasets.ClassLabel( names=["Non-violence", "Violence"] ), "multilabel_target": datasets.Sequence( datasets.ClassLabel( names=[ "Normal", "Fighting", "Shooting", "Riot", "Abuse", "Car accident", "Explosion", ] ) ), "frame_annotations": datasets.Sequence( { "start": datasets.Value("int32"), "end": datasets.Value("int32"), } ), } ) return datasets.DatasetInfo( features=features, description=_DESCRIPTION, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): # Download train list train_list_path = dl_manager.download_and_extract( urllib.parse.urljoin(_URL, "train_list.txt") ) train_list = ( pd.read_csv( train_list_path, header=None, sep=" ", usecols=[0], names=["id"] )["id"] .apply(lambda x: x.rstrip(".mp4")) .tolist() ) train_ids = [ x.split("/")[1] for x in train_list ] # remove subfolder prefix, e.g., "1-1004" # Download test list test_list_path = dl_manager.download_and_extract( urllib.parse.urljoin(_URL, "test_list.txt") ) test_list = ( pd.read_csv( test_list_path, header=None, sep=" ", usecols=[0], names=["id"] )["id"] .apply(lambda x: x.rstrip(".mp4")) .tolist() ) test_ids = [x.split("/")[1] for x in test_list] # Download test annotation file test_annotations_path = dl_manager.download_and_extract( urllib.parse.urljoin(_URL, "test_annotations.txt") ) if self.config.name == "i3d_rgb": # Download features train_paths = dl_manager.download( [ urllib.parse.quote( urllib.parse.urljoin(_URL, f"i3d_rgb/{x}.npy"), safe=":/" ) for x in train_list ] ) test_paths = dl_manager.download( [ urllib.parse.quote( urllib.parse.urljoin(_URL, f"i3d_rgb/{x}.npy"), safe=":/" ) for x in test_list ] ) else: # Download videos train_paths = dl_manager.download( [ urllib.parse.quote( urllib.parse.urljoin(_URL, f"video/{x}.mp4"), safe=":/" ) for x in train_list ] ) test_paths = dl_manager.download( [ urllib.parse.quote( urllib.parse.urljoin(_URL, f"video/{x}.mp4"), safe=":/" ) for x in test_list ] ) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "ids": train_ids, "paths": train_paths, "annotations_path": None, }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "ids": test_ids, "paths": test_paths, "annotations_path": test_annotations_path, }, ), ] def _generate_examples(self, ids, paths, annotations_path): frame_annots_mapper = ( self._read_frame_annotations(annotations_path) if annotations_path else dict() ) labels = [self._extract_labels(f_id) for f_id in ids] # Extract labels if self.config.name == "i3d_rgb": for key, (f_id, f_path, f_label) in enumerate(zip(ids, paths, labels)): binary, multilabel = f_label frame_annotations = frame_annots_mapper.get(f_id, []) feature = np.load(f_path) yield ( key, { "id": f_id, "feature": feature, "binary_target": binary, "multilabel_target": multilabel, "frame_annotations": frame_annotations, }, ) else: for key, (f_id, f_path, f_label) in enumerate(zip(ids, paths, labels)): binary, multilabel = f_label frame_annotations = frame_annots_mapper.get(f_id, []) yield ( key, { "id": f_id, "path": f_path, "binary_target": binary, "multilabel_target": multilabel, "frame_annotations": frame_annotations, }, ) def _read_frame_annotations(self, path): mapper = {} is_url = urllib.parse.urlparse(path).scheme in ("http", "https") if is_url: with requests.get(path, stream=True) as r: r.raise_for_status() for line in r.iter_lines(): parts = line.decode("utf-8").strip().split(" ") f_id = parts[0].rstrip(".mp4") frame_annotations = [ {"start": parts[start_idx], "end": parts[start_idx + 1]} for start_idx in range(1, len(parts), 2) ] mapper[f_id] = frame_annotations else: with open(path, "r") as f: for line in f: parts = line.strip().split(" ") f_id = parts[0].rstrip(".mp4") frame_annotations = [ {"start": parts[start_idx], "end": parts[start_idx + 1]} for start_idx in range(1, len(parts), 2) ] mapper[f_id] = frame_annotations return mapper def _extract_labels(self, f_id): """Extracts labels from a given file id.""" codes = f_id.split("_")[-1].split("-") binary = 1 if len(codes) > 1 else 0 multilabel = [self.CODE2IDX[code] for code in codes if code != "0"] return binary, multilabel