| | """ |
| | Copyright (c) 2023, salesforce.com, inc. |
| | All rights reserved. |
| | SPDX-License-Identifier: Apache License 2.0 |
| | For full license text, see the LICENSE file in the repo root or https://www.apache.org/licenses/LICENSE-2.0 |
| | """ |
| |
|
| |
|
| | |
| | import os |
| | import json |
| | import datasets |
| | from datasets import (GeneratorBasedBuilder, |
| | BuilderConfig, |
| | SplitGenerator, |
| | DatasetInfo, |
| | Features, |
| | Sequence, |
| | Value, |
| | Version) |
| |
|
| | logger = datasets.logging.get_logger(__name__) |
| | datasets.logging.disable_progress_bar() |
| |
|
| | _VERSION = Version("1.0.0") |
| | _CITATION = """ |
| | @misc{zhang2023dialogstudio, |
| | title={DialogStudio: Towards Richest and Most Diverse Unified Dataset Collection for Conversational AI}, |
| | author={Jianguo Zhang and Kun Qian and Zhiwei Liu and Shelby Heinecke and Rui Meng and Ye Liu and Zhou Yu and and Huan Wang and Silvio Savarese and Caiming Xiong}, |
| | year={2023}, |
| | eprint={2307.10172}, |
| | archivePrefix={arXiv}, |
| | primaryClass={cs.CL} |
| | """ |
| |
|
| | DATASETS = { |
| | |
| | "open_domain": [ |
| | "chitchat-dataset", "ConvAI2", "AntiScam", "Empathetic", "HH-RLHF", |
| | "PLACES3.5", "Prosocial", "SODA" |
| | ], |
| | "knowledge_grounded": [ |
| | "CompWebQ", "CoQA", "CoSQL", "DART", "FeTaQA", |
| | "GrailQA", "HybridQA", "MTOP", "MultiModalQA", "SParC", |
| | "Spider", "SQA", "ToTTo", "WebQSP", "WikiSQL", |
| | "WikiTQ", "wizard_of_internet", "wizard_of_wikipedia" |
| | ], |
| | "dialogue_summarization": [ |
| | "AMI", "CRD3", "DialogSum", "ECTSum", "ICSI", |
| | "MediaSum", "QMSum", "SAMSum", "TweetSumm", "ConvoSumm", |
| | "SummScreen_ForeverDreaming", "SummScreen_TVMegaSite" |
| | ], |
| | "natural_language_understanding": [ |
| | "ATIS", "ATIS-NER", "BANKING77", "BANKING77-OOS", "CLINC-Single-Domain-OOS-banking", |
| | "CLINC-Single-Domain-OOS-credit_cards", "CLINC150", "DSTC8-SGD", "HWU64", "MIT-Movie", |
| | "MIT-Restaurant", "RESTAURANTS8K", "SNIPS", "SNIPS-NER", "TOP", "TOP-NER" |
| | ], |
| | "task_oriented": [ |
| | "ABCD", "AirDialogue", "BiTOD", "CaSiNo", "CraigslistBargains", |
| | "Disambiguation", "DSTC2-Clean", "FRAMES", "GECOR", "HDSA-Dialog", |
| | "KETOD", "KVRET", "MetaLWOZ", "MS-DC", "MuDoCo", |
| | "MulDoGO", "MultiWOZ_2.1", "MULTIWOZ2_2", "SGD", "SimJointGEN", |
| | "SimJointMovie", "SimJointRestaurant", "STAR", "Taskmaster1", "Taskmaster2", |
| | "Taskmaster3", "WOZ2_0" |
| | ], |
| | "conversational_recommendation": [ |
| | "Redial", "DuRecDial-2.0", "OpenDialKG", "SalesBot", |
| | ] |
| | } |
| |
|
| | _URL = "https://huggingface.co/datasets/Salesforce/dialogstudio/tree/main/" |
| |
|
| | class DialogStudioConfig(BuilderConfig): |
| | """BuilderConfig for DialogStudio.""" |
| |
|
| | def __init__(self, extra_features, category, data_name, citation, url, **kwargs): |
| | """BuilderConfig for DialogStudio. |
| | Args: |
| | extra_features: `list[string]`, list of the features that will appear in the |
| | feature dict. Should not include "label". |
| | data_url: `string`, url to download the zip file from. |
| | citation: `string`, citation for the data set. |
| | url: `string`, url for information about the data set. |
| | label_classes: `list[string]`, the list of classes for the label if the |
| | label is present as a string. Non-string labels will be cast to either |
| | 'False' or 'True'. |
| | **kwargs: keyword arguments forwarded to super. |
| | """ |
| | super(DialogStudioConfig, self).__init__(version=_VERSION, **kwargs) |
| | self.extra_features = extra_features |
| | self.category = category |
| | self.data_name = data_name |
| | self.compressed_file = f"{data_name}.zip" |
| | self.citation = citation |
| | self.url = url |
| |
|
| |
|
| | class DialogStudio(GeneratorBasedBuilder): |
| | """DialogStudio""" |
| | BUILDER_CONFIGS = [] |
| | for category, dataset_list in DATASETS.items(): |
| | if category in ["task_oriented", "conversational_recommendation"]: |
| | extra_features = { |
| | "dialog":[ |
| | "external knowledge non-flat", |
| | "external knowledge", |
| | "dst knowledge", |
| | "intent knowledge", |
| | ], |
| | "turn":[ |
| | "dst", |
| | "dst accumulated", |
| | "intent", |
| | "external knowledge", |
| | "external knowledge non-flat" |
| | ] |
| | } |
| | elif category in ["natural_language_understanding"]: |
| | extra_features = {"dialog":[], "turn":["external knowledge", "external knowledge non-flat"]} |
| | else: |
| | extra_features = {"dialog":[], "turn":[]} |
| |
|
| | for data_name in dataset_list: |
| | BUILDER_CONFIGS.append( |
| | DialogStudioConfig( |
| | name=data_name, |
| | data_name=data_name, |
| | description="", |
| | extra_features=extra_features, |
| | category=category, |
| | citation=_CITATION, |
| | url="https://github.com/salesforce/DialogStudio/tree/main", |
| | )) |
| | |
| | DEFAULT_CONFIG_NAME = "WOZ2_0" |
| |
|
| | def _info(self): |
| | features = { |
| | "original dialog id": Value("string"), |
| | "new dialog id": Value("string"), |
| | "dialog index": Value("int32"), |
| | "original dialog info": Value("string"), |
| | "log": [ |
| | { |
| | "turn id": Value("int32"), |
| | "user utterance": Value("string"), |
| | "system response": Value("string"), |
| | "dialog history": Value("string"), |
| | "original user side information": Value("string"), |
| | "original system side information": Value("string"), |
| | } |
| | ], |
| | "prompt": [Value("string")] |
| | } |
| | if self.config.extra_features["dialog"]: |
| | for name in self.config.extra_features["dialog"]: |
| | features[name] = Value("string") |
| | if self.config.extra_features["turn"]: |
| | log_config = { |
| | "turn id": Value("int32"), |
| | "user utterance": Value("string"), |
| | "system response": Value("string"), |
| | "dialog history": Value("string"), |
| | "original user side information": Value("string"), |
| | "original system side information": Value("string"), |
| | } |
| | for name in self.config.extra_features["turn"]: |
| | log_config[name] = Value("string") |
| | features["log"] = [log_config] |
| | |
| | return DatasetInfo( |
| | description="", |
| | features=Features(features), |
| | homepage=self.config.url, |
| | citation=_CITATION, |
| | ) |
| |
|
| | |
| | def _split_generators(self, dl_manager): |
| | """ |
| | This script assume the datset is not stored in zip file |
| | Instead, data is stored in format: |
| | . |
| | |-task-oriented |
| | |-WOZ2.0 |
| | |-train |
| | |-dialogues_1.json |
| | |-dialogues_2.json |
| | |-... |
| | this script would download the json file one-by-one |
| | """ |
| |
|
| | splits = [] |
| | file_path_list = {"train":[], "val":[], "test":[]} |
| | print("❤️Attention❤️: Dataset download may take some time. We appreciate your patience!") |
| | for mode in ["train", "val", "test"]: |
| | for file_idx in range(1, 1000000): |
| | file_to_download = os.path.join(self.config.category, self.config.data_name, mode, f"dialogues_{file_idx}.json") |
| | try: |
| | dl_path = dl_manager.download(file_to_download) |
| | except: |
| | break |
| | file_path_list[mode].append(dl_path) |
| |
|
| | if file_path_list["train"]: |
| | if any(split.name == datasets.Split.TRAIN for split in splits): |
| | raise ValueError("Split 'train' already added.") |
| | splits.append( |
| | SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={ |
| | "file_path_list": file_path_list["train"], |
| | "split": datasets.Split.TRAIN, |
| | }, |
| | ) |
| | ) |
| | if file_path_list["val"]: |
| | if any(split.name == datasets.Split.VALIDATION for split in splits): |
| | raise ValueError("Split 'validation' already added.") |
| | splits.append( |
| | SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | gen_kwargs={ |
| | "file_path_list": file_path_list["val"], |
| | "split": datasets.Split.VALIDATION, |
| | }, |
| | ) |
| | ) |
| | if file_path_list["test"]: |
| | if any(split.name == datasets.Split.TEST for split in splits): |
| | raise ValueError("Split 'test' already added.") |
| | splits.append( |
| | SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={ |
| | "file_path_list": file_path_list["test"], |
| | "split": datasets.Split.TEST, |
| | }, |
| | ) |
| | ) |
| | return splits |
| |
|
| |
|
| | def _load_json(self, file_path): |
| | with open(file_path, encoding="utf-8") as f: |
| | data = json.loads(f.read()) |
| | return data |
| |
|
| |
|
| | def _generate_examples(self, file_path_list, split): |
| | """This function returns the examples in the raw (text) form.""" |
| | data = {} |
| | for filepath in file_path_list: |
| | data.update(self._load_json(filepath)) |
| | |
| | logger.info(f"generating {len(data)} examples from = {split}") |
| |
|
| | for dial_id, dial in data.items(): |
| | if type(dial["log"]) == dict: |
| | dial["log"] = [dial["log"]] |
| | example = { |
| | "original dialog id": dial["original dialog id"], |
| | "new dialog id": dial_id, |
| | "dialog index": dial["dialog index"], |
| | "original dialog info": json.dumps(dial["original dialog info"]), |
| | "log": [{ |
| | "turn id": turn["turn id"], |
| | "user utterance": turn["user utterance"], |
| | "system response": turn["system response"], |
| | "dialog history": turn["dialog history"], |
| | "original user side information": json.dumps(turn["original user side information"]), |
| | "original system side information": json.dumps(turn["original system side information"]), |
| | } for turn in dial["log"]], |
| | "prompt": dial["prompt"] if "prompt" in dial and dial["prompt"] else [""] |
| | } |
| | if self.config.extra_features["dialog"]: |
| | for name in self.config.extra_features["dialog"]: |
| | example[name] = json.dumps(dial[name]) if name in dial else "" |
| | if self.config.extra_features["turn"]: |
| | for idx, turn in enumerate(example["log"]): |
| | for name in self.config.extra_features["turn"]: |
| | example["log"][idx][name] = json.dumps(dial["log"][idx][name]) if name in dial["log"][idx] else "" |
| | yield dial["dialog index"], example |
| |
|
| |
|