| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """PP4AV dataset.""" |
|
|
| import os |
| from glob import glob |
| from tqdm import tqdm |
| from pathlib import Path |
| from typing import List |
| import re |
| from collections import defaultdict |
| import datasets |
|
|
|
|
|
|
| _HOMEPAGE = "http://shuoyang1213.me/WIDERFACE/" |
|
|
| _LICENSE = "Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)" |
|
|
| _CITATION = """\ |
| @inproceedings{yang2016wider, |
| Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou}, |
| Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, |
| Title = {WIDER FACE: A Face Detection Benchmark}, |
| Year = {2016}} |
| """ |
|
|
| _DESCRIPTION = """\ |
| WIDER FACE dataset is a face detection benchmark dataset, of which images are |
| selected from the publicly available WIDER dataset. We choose 32,203 images and |
| label 393,703 faces with a high degree of variability in scale, pose and |
| occlusion as depicted in the sample images. WIDER FACE dataset is organized |
| based on 61 event classes. For each event class, we randomly select 40%/10%/50% |
| data as training, validation and testing sets. We adopt the same evaluation |
| metric employed in the PASCAL VOC dataset. Similar to MALF and Caltech datasets, |
| we do not release bounding box ground truth for the test images. Users are |
| required to submit final prediction files, which we shall proceed to evaluate. |
| """ |
|
|
|
|
| _REPO = "https://huggingface.co/datasets/khaclinh/testdata/resolve/main/data" |
| _URLS = { |
| "test": f"{_REPO}/fisheye.zip", |
| "annot": f"{_REPO}/annotations.zip", |
| } |
|
|
| IMG_EXT = ['png', 'jpeg', 'jpg'] |
| _SUBREDDITS = ["zurich"] |
|
|
| class TestDataConfig(datasets.BuilderConfig): |
| """BuilderConfig for TestData.""" |
|
|
| def __init__(self, name, **kwargs): |
| """BuilderConfig for TestData. |
| Args: |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(TestDataConfig, self).__init__(version=datasets.Version("1.0.0", ""), name=name, **kwargs) |
|
|
| class TestData(datasets.GeneratorBasedBuilder): |
| """WIDER FACE dataset.""" |
| |
| BUILDER_CONFIGS = [ |
| TestDataConfig("fisheye"), |
| ] |
| |
| BUILDER_CONFIGS += [TestDataConfig(subreddit) for subreddit in _SUBREDDITS] |
| |
| DEFAULT_CONFIG_NAME = "fisheye" |
| |
| VERSION = datasets.Version("1.0.0") |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "image": datasets.Image(), |
| "faces": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=4)), |
| "plates": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=4)), |
| |
| } |
| ), |
| supervised_keys=None, |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
| |
| def _split_generators(self, dl_manager): |
| data_dir = dl_manager.download_and_extract(_URLS) |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "name": self.config.name, |
| "data_dir": data_dir["test"], |
| "annot_dir": data_dir["annot"], |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, name, data_dir, annot_dir): |
| |
| image_dir = os.path.join(data_dir, name) |
| annotation_dir = os.path.join(annot_dir, name) |
| files = [] |
| |
| idx = 0 |
| for i_file in glob(os.path.join(image_dir, "*.png")): |
| plates = [] |
| faces = [] |
| |
| img_relative_file = os.path.relpath(i_file, image_dir) |
| gt_relative_path = img_relative_file.replace(".png", ".txt") |
| |
| gt_path = os.path.join(annotation_dir, gt_relative_path) |
| |
| annotation = defaultdict(list) |
| with open(gt_path, "r", encoding="utf-8") as f: |
| line = f.readline().strip() |
| while line: |
| assert re.match(r"^\d( [\d\.]+){4,5}$", line), "Incorrect line: %s" % line |
| cls, cx, cy, w, h = line.split()[:5] |
| cls, cx, cy, w, h = int(cls), float(cx), float(cy), float(w), float(h) |
| x1, y1, x2, y2 = cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2 |
| annotation[cls].append([x1, y1, x2, y2]) |
| line = f.readline().strip() |
|
|
| for cls, bboxes in annotation.items(): |
| for x1, y1, x2, y2 in bboxes: |
| if cls == 0: |
| faces.append([x1, y1, x2, y2]) |
| else: |
| plates.append([x1, y1, x2, y2]) |
| |
| yield idx, {"image": i_file, "faces": faces, "plates": plates} |
| |
| idx += 1 |