| | |
| | |
| | """Loading script for DiffusionDB.""" |
| |
|
| | import re |
| | import numpy as np |
| | import pandas as pd |
| |
|
| | from json import load, dump |
| | from os.path import join, basename |
| | from huggingface_hub import hf_hub_url |
| |
|
| | import datasets |
| |
|
| | |
| | _CITATION = """\ |
| | @article{wangDiffusionDBLargescalePrompt2022, |
| | title = {{{DiffusionDB}}: {{A}} Large-Scale Prompt Gallery Dataset for Text-to-Image Generative Models}, |
| | author = {Wang, Zijie J. and Montoya, Evan and Munechika, David and Yang, Haoyang and Hoover, Benjamin and Chau, Duen Horng}, |
| | year = {2022}, |
| | journal = {arXiv:2210.14896 [cs]}, |
| | url = {https://arxiv.org/abs/2210.14896} |
| | } |
| | """ |
| |
|
| | |
| | _DESCRIPTION = """ |
| | DiffusionDB is the first large-scale text-to-image prompt dataset. It contains 2 |
| | million images generated by Stable Diffusion using prompts and hyperparameters |
| | specified by real users. The unprecedented scale and diversity of this |
| | human-actuated dataset provide exciting research opportunities in understanding |
| | the interplay between prompts and generative models, detecting deepfakes, and |
| | designing human-AI interaction tools to help users more easily use these models. |
| | """ |
| |
|
| | _HOMEPAGE = "https://poloclub.github.io/diffusiondb" |
| | _LICENSE = "CC0 1.0" |
| | _VERSION = datasets.Version("0.9.1") |
| |
|
| | |
| | |
| | |
| | _URLS = {} |
| | _URLS_LARGE = {} |
| | _PART_IDS = range(1, 2001) |
| | _PART_IDS_LARGE = range(1, 14001) |
| |
|
| | for i in _PART_IDS: |
| | _URLS[i] = hf_hub_url( |
| | "poloclub/diffusiondb", |
| | filename=f"images/part-{i:06}.zip", |
| | repo_type="dataset", |
| | ) |
| |
|
| | for i in _PART_IDS_LARGE: |
| | if i < 10001: |
| | _URLS_LARGE[i] = hf_hub_url( |
| | "poloclub/diffusiondb", |
| | filename=f"diffusiondb-large-part-1/part-{i:06}.zip", |
| | repo_type="dataset", |
| | ) |
| | else: |
| | _URLS_LARGE[i] = hf_hub_url( |
| | "poloclub/diffusiondb", |
| | filename=f"diffusiondb-large-part-2/part-{i:06}.zip", |
| | repo_type="dataset", |
| | ) |
| |
|
| | |
| | _URLS["metadata"] = hf_hub_url( |
| | "poloclub/diffusiondb", filename="metadata.parquet", repo_type="dataset" |
| | ) |
| |
|
| | _URLS_LARGE["metadata"] = hf_hub_url( |
| | "poloclub/diffusiondb", |
| | filename="metadata-large.parquet", |
| | repo_type="dataset", |
| | ) |
| |
|
| | _SAMPLER_DICT = { |
| | 1: "ddim", |
| | 2: "plms", |
| | 3: "k_euler", |
| | 4: "k_euler_ancestral", |
| | 5: "ddik_heunm", |
| | 6: "k_dpm_2", |
| | 7: "k_dpm_2_ancestral", |
| | 8: "k_lms", |
| | 9: "others", |
| | } |
| |
|
| |
|
| | class DiffusionDBConfig(datasets.BuilderConfig): |
| | """BuilderConfig for DiffusionDB.""" |
| |
|
| | def __init__(self, part_ids, is_large, **kwargs): |
| | """BuilderConfig for DiffusionDB. |
| | Args: |
| | part_ids([int]): A list of part_ids. |
| | is_large(bool): If downloading data from DiffusionDB Large (14 million) |
| | **kwargs: keyword arguments forwarded to super. |
| | """ |
| | super(DiffusionDBConfig, self).__init__(version=_VERSION, **kwargs) |
| | self.part_ids = part_ids |
| | self.is_large = is_large |
| |
|
| |
|
| | class DiffusionDB(datasets.GeneratorBasedBuilder): |
| | """A large-scale text-to-image prompt gallery dataset based on Stable Diffusion.""" |
| |
|
| | BUILDER_CONFIGS = [] |
| |
|
| | |
| | |
| | for num_k in [1, 5, 10, 50, 100, 500, 1000]: |
| | for sampling in ["first", "random"]: |
| | for is_large in [False, True]: |
| | num_k_str = f"{num_k}k" if num_k < 1000 else f"{num_k // 1000}m" |
| | subset_str = "large_" if is_large else "2m_" |
| |
|
| | if sampling == "random": |
| | |
| | cur_name = subset_str + "random_" + num_k_str |
| |
|
| | |
| | cur_description = ( |
| | f"Random {num_k_str} images with their prompts and parameters" |
| | ) |
| |
|
| | |
| | total_part_ids = _PART_IDS_LARGE if is_large else _PART_IDS |
| | part_ids = np.random.choice( |
| | total_part_ids, num_k, replace=False |
| | ).tolist() |
| | else: |
| | |
| | cur_name = subset_str + "first_" + num_k_str |
| |
|
| | |
| | cur_description = f"The first {num_k_str} images in this dataset with their prompts and parameters" |
| |
|
| | |
| | total_part_ids = _PART_IDS_LARGE if is_large else _PART_IDS |
| | part_ids = total_part_ids[1 : num_k + 1] |
| |
|
| | |
| | BUILDER_CONFIGS.append( |
| | DiffusionDBConfig( |
| | name=cur_name, |
| | part_ids=part_ids, |
| | is_large=is_large, |
| | description=cur_description, |
| | ), |
| | ) |
| |
|
| | |
| | for num_k in [5000, 10000]: |
| | for sampling in ["first", "random"]: |
| | num_k_str = f"{num_k // 1000}m" |
| | subset_str = "large_" |
| |
|
| | if sampling == "random": |
| | |
| | cur_name = subset_str + "random_" + num_k_str |
| |
|
| | |
| | cur_description = ( |
| | f"Random {num_k_str} images with their prompts and parameters" |
| | ) |
| |
|
| | |
| | total_part_ids = _PART_IDS_LARGE |
| | part_ids = np.random.choice( |
| | total_part_ids, num_k, replace=False |
| | ).tolist() |
| | else: |
| | |
| | cur_name = subset_str + "first_" + num_k_str |
| |
|
| | |
| | cur_description = f"The first {num_k_str} images in this dataset with their prompts and parameters" |
| |
|
| | |
| | total_part_ids = _PART_IDS_LARGE |
| | part_ids = total_part_ids[1 : num_k + 1] |
| |
|
| | |
| | BUILDER_CONFIGS.append( |
| | DiffusionDBConfig( |
| | name=cur_name, |
| | part_ids=part_ids, |
| | is_large=True, |
| | description=cur_description, |
| | ), |
| | ) |
| |
|
| | |
| | BUILDER_CONFIGS.append( |
| | DiffusionDBConfig( |
| | name="2m_all", |
| | part_ids=_PART_IDS, |
| | is_large=False, |
| | description="All images with their prompts and parameters", |
| | ), |
| | ) |
| |
|
| | BUILDER_CONFIGS.append( |
| | DiffusionDBConfig( |
| | name="large_all", |
| | part_ids=_PART_IDS_LARGE, |
| | is_large=True, |
| | description="All images with their prompts and parameters", |
| | ), |
| | ) |
| |
|
| | |
| | BUILDER_CONFIGS.append( |
| | DiffusionDBConfig( |
| | name="2m_text_only", |
| | part_ids=[], |
| | is_large=False, |
| | description="Only include all prompts and parameters (no image)", |
| | ), |
| | ) |
| |
|
| | BUILDER_CONFIGS.append( |
| | DiffusionDBConfig( |
| | name="large_text_only", |
| | part_ids=[], |
| | is_large=True, |
| | description="Only include all prompts and parameters (no image)", |
| | ), |
| | ) |
| |
|
| | |
| | |
| | part_ids = np.random.choice(_PART_IDS, 1000, replace=False).tolist() |
| | BUILDER_CONFIGS.append( |
| | DiffusionDBConfig( |
| | name="1k_random_2m", |
| | part_ids=part_ids, |
| | is_large=False, |
| | description="Another random 1k images with meta data from DiffusionDB 2M", |
| | ), |
| | ) |
| |
|
| | |
| | DEFAULT_CONFIG_NAME = "2m_random_1k" |
| |
|
| | def _info(self): |
| | """Specify the information of DiffusionDB.""" |
| |
|
| | if "text_only" in self.config.name: |
| | features = datasets.Features( |
| | { |
| | "image_name": datasets.Value("string"), |
| | "prompt": datasets.Value("string"), |
| | "part_id": datasets.Value("uint16"), |
| | "seed": datasets.Value("uint32"), |
| | "step": datasets.Value("uint16"), |
| | "cfg": datasets.Value("float32"), |
| | "sampler": datasets.Value("string"), |
| | "width": datasets.Value("uint16"), |
| | "height": datasets.Value("uint16"), |
| | "user_name": datasets.Value("string"), |
| | "timestamp": datasets.Value("timestamp[us, tz=UTC]"), |
| | "image_nsfw": datasets.Value("float32"), |
| | "prompt_nsfw": datasets.Value("float32"), |
| | }, |
| | ) |
| |
|
| | else: |
| | features = datasets.Features( |
| | { |
| | "image": datasets.Image(), |
| | "prompt": datasets.Value("string"), |
| | "seed": datasets.Value("uint32"), |
| | "step": datasets.Value("uint16"), |
| | "cfg": datasets.Value("float32"), |
| | "sampler": datasets.Value("string"), |
| | "width": datasets.Value("uint16"), |
| | "height": datasets.Value("uint16"), |
| | "user_name": datasets.Value("string"), |
| | "timestamp": datasets.Value("timestamp[us, tz=UTC]"), |
| | "image_nsfw": datasets.Value("float32"), |
| | "prompt_nsfw": datasets.Value("float32"), |
| | }, |
| | ) |
| |
|
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=features, |
| | supervised_keys=None, |
| | homepage=_HOMEPAGE, |
| | license=_LICENSE, |
| | citation=_CITATION, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | data_dirs = [] |
| | json_paths = [] |
| |
|
| | |
| | if self.config.is_large: |
| | urls = _URLS_LARGE |
| | else: |
| | urls = _URLS |
| |
|
| | for cur_part_id in self.config.part_ids: |
| | cur_url = urls[cur_part_id] |
| | data_dir = dl_manager.download_and_extract(cur_url) |
| |
|
| | data_dirs.append(data_dir) |
| | json_paths.append(join(data_dir, f"part-{cur_part_id:06}.json")) |
| |
|
| | |
| | metadata_path = dl_manager.download(urls["metadata"]) |
| |
|
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | |
| | gen_kwargs={ |
| | "data_dirs": data_dirs, |
| | "json_paths": json_paths, |
| | "metadata_path": metadata_path, |
| | }, |
| | ), |
| | ] |
| |
|
| | def _generate_examples(self, data_dirs, json_paths, metadata_path): |
| | |
| | |
| | |
| | |
| |
|
| | |
| | if "text_only" in self.config.name: |
| | metadata_df = pd.read_parquet(metadata_path) |
| | for _, row in metadata_df.iterrows(): |
| | yield row["image_name"], { |
| | "image_name": row["image_name"], |
| | "prompt": row["prompt"], |
| | "part_id": row["part_id"], |
| | "seed": row["seed"], |
| | "step": row["step"], |
| | "cfg": row["cfg"], |
| | "sampler": _SAMPLER_DICT[int(row["sampler"])], |
| | "width": row["width"], |
| | "height": row["height"], |
| | "user_name": row["user_name"], |
| | "timestamp": None |
| | if pd.isnull(row["timestamp"]) |
| | else row["timestamp"], |
| | "image_nsfw": row["image_nsfw"], |
| | "prompt_nsfw": row["prompt_nsfw"], |
| | } |
| |
|
| | else: |
| | num_data_dirs = len(data_dirs) |
| | assert num_data_dirs == len(json_paths) |
| |
|
| | |
| | part_ids = [] |
| | for path in json_paths: |
| | cur_id = int(re.sub(r"part-(\d+)\.json", r"\1", basename(path))) |
| | part_ids.append(cur_id) |
| |
|
| | |
| | |
| | metadata_table = pd.read_parquet( |
| | metadata_path, |
| | filters=[("part_id", "in", part_ids)], |
| | ) |
| |
|
| | |
| | for k in range(num_data_dirs): |
| | cur_data_dir = data_dirs[k] |
| | cur_json_path = json_paths[k] |
| |
|
| | json_data = load(open(cur_json_path, "r", encoding="utf8")) |
| |
|
| | for img_name in json_data: |
| | img_params = json_data[img_name] |
| | img_path = join(cur_data_dir, img_name) |
| |
|
| | |
| | query_result = metadata_table.query(f'`image_name` == "{img_name}"') |
| |
|
| | |
| | yield img_name, { |
| | "image": { |
| | "path": img_path, |
| | "bytes": open(img_path, "rb").read(), |
| | }, |
| | "prompt": img_params["p"], |
| | "seed": int(img_params["se"]), |
| | "step": int(img_params["st"]), |
| | "cfg": float(img_params["c"]), |
| | "sampler": img_params["sa"], |
| | "width": query_result["width"].to_list()[0], |
| | "height": query_result["height"].to_list()[0], |
| | "user_name": query_result["user_name"].to_list()[0], |
| | "timestamp": None |
| | if pd.isnull(query_result["timestamp"].to_list()[0]) |
| | else query_result["timestamp"].to_list()[0], |
| | "image_nsfw": query_result["image_nsfw"].to_list()[0], |
| | "prompt_nsfw": query_result["prompt_nsfw"].to_list()[0], |
| | } |
| |
|