| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """NoCaps loading script.""" |
|
|
|
|
| import json |
|
|
| from collections import defaultdict |
| import datasets |
|
|
| _CITATION = """\ |
| @inproceedings{agrawal2019nocaps, |
| title={nocaps: novel object captioning at scale}, |
| author={Agrawal, Harsh and Desai, Karan and Wang, Yufei and Chen, Xinlei and Jain, Rishabh and Johnson, Mark and Batra, Dhruv and Parikh, Devi and Lee, Stefan and Anderson, Peter}, |
| booktitle={Proceedings of the IEEE International Conference on Computer Vision}, |
| pages={8948--8957}, |
| year={2019} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| Dubbed NoCaps, for novel object captioning at scale, NoCaps consists of 166,100 human-generated captions describing 15,100 images from the Open Images validation and test sets. |
| The associated training data consists of COCO image-caption pairs, plus Open Images image-level labels and object bounding boxes. |
| Since Open Images contains many more classes than COCO, nearly 400 object classes seen in test images have no or very few associated training captions (hence, nocaps). |
| """ |
|
|
| _HOMEPAGE = "https://nocaps.org/" |
|
|
| _LICENSE = "CC BY 2.0" |
|
|
| _URLS = { |
| "validation": "https://nocaps.s3.amazonaws.com/nocaps_val_4500_captions.json", |
| "test": "https://s3.amazonaws.com/nocaps/nocaps_test_image_info.json", |
| } |
|
|
|
|
| class NoCaps(datasets.GeneratorBasedBuilder): |
|
|
| VERSION = datasets.Version("1.0.0") |
|
|
| def _info(self): |
| features = datasets.Features( |
| { |
| "image": datasets.Image(), |
| "image_coco_url": datasets.Value("string"), |
| "image_date_captured": datasets.Value("string"), |
| "image_file_name": datasets.Value("string"), |
| "image_height": datasets.Value("int32"), |
| "image_width": datasets.Value("int32"), |
| "image_id": datasets.Value("int32"), |
| "image_license": datasets.Value("int8"), |
| "image_open_images_id": datasets.Value("string"), |
| "annotations_ids": datasets.Sequence(datasets.Value("int32")), |
| "annotations_captions": datasets.Sequence(datasets.Value("string")), |
| } |
| ) |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| data_file = dl_manager.download_and_extract(_URLS) |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "data_file": data_file["validation"], |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "data_file": data_file["test"], |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, data_file): |
| with open(data_file, encoding="utf-8") as f: |
| data = json.load(f) |
|
|
| annotations = defaultdict(list) |
| if "annotations" in data: |
| |
| for ann in data["annotations"]: |
| image_id = ann["image_id"] |
| caption_id = ann["id"] |
| caption = ann["caption"] |
| annotations[image_id].append((caption_id, caption)) |
|
|
| counter = 0 |
| for im in data["images"]: |
| image_coco_url = im["coco_url"] |
| image_date_captured = im["date_captured"] |
| image_file_name = im["file_name"] |
| image_height = im["height"] |
| image_width = im["width"] |
| image_id = im["id"] |
| image_license = im["license"] |
| image_open_images_id = im["open_images_id"] |
| yield counter, { |
| "image": image_coco_url, |
| "image_coco_url": image_coco_url, |
| "image_date_captured": image_date_captured, |
| "image_file_name": image_file_name, |
| "image_height": image_height, |
| "image_width": image_width, |
| "image_id": image_id, |
| "image_license": image_license, |
| "image_open_images_id": image_open_images_id, |
| "annotations_ids": [ann[0] for ann in annotations[image_id]], |
| "annotations_captions": [ann[1] for ann in annotations[image_id]], |
| } |
| counter += 1 |
|
|