| """ Zero-zero_scrolls benchmark metric. """ |
|
|
| from collections import defaultdict |
| from copy import deepcopy |
| import datasets |
|
|
| |
| from .rouge import compute_rouge, postprocess_text as rouge_postprocess_text |
| from .accuracy import compute_accuracy |
| from .f1 import compute_f1 |
| from .exp_similarity import compute_exp_similarity |
| from .concordance_index import compute_concordance_index |
|
|
| |
|
|
| _CITATION = """ |
| """ |
|
|
| _DESCRIPTION = """ |
| ZeroSCROLLS: Zero-Shot CompaRison Over Long Language Sequences. |
| A zero shot benchmark for long text reasoning. |
| https://zero.scrolls-benchmark.com/ |
| """ |
|
|
| _KWARGS_DESCRIPTION = """ |
| Compute zero_scrolls evaluation metric associated to each zero_scrolls dataset. |
| Args: |
| predictions: list of predictions to score. |
| Each prediction should be a string. |
| references: list of lists of references for each example. |
| Each reference should be a string. |
| Returns: depending on the zero_scrolls subset, one or several of: |
| "accuracy": Accuracy score |
| "f1": F1 score |
| "rouge": ROUGE score |
| "exp_similarity": Exponential Similarity score |
| "concordance_index": Concordance Index score |
| |
| Use the following code to download the metric: |
| ``` |
| import os, shutil |
| from huggingface_hub import hf_hub_download |
| def download_metric(): |
| zero_scrolls_metric_path = hf_hub_download(repo_id="tau/zero_scrolls", repo_type="dataset", filename="metrics/zero_scrolls.py") |
| updated_zero_scrolls_metric_path = ( |
| os.path.dirname(zero_scrolls_metric_path) + os.path.basename(zero_scrolls_metric_path).replace(".", "_") + ".py" |
| ) |
| shutil.copy(zero_scrolls_metric_path, updated_zero_scrolls_metric_path) |
| return updated_zero_scrolls_metric_path |
| |
| zero_scrolls_metric_path = download_metric() |
| ``` |
| |
| Examples: |
| |
| >>> predictions = ["hello there", "general kenobi"] # List[str] |
| >>> references = [["hello", "hi there"], ["commander kenobi"]] # List[List[str]] |
| >>> zero_scrolls_metric = datasets.load_metric(zero_scrolls_metric_path, 'gov_report') # "gov_report" or "summ_screen_fd" or "qmsum" or "squality] |
| >>> results = zero_scrolls_metric.compute(predictions=predictions, references=references) |
| >>> print(results) |
| {'rouge/rouge1': 72.2222, 'rouge/rouge2': 33.3333, 'rouge/rougeL': 72.2222, 'rouge/rougeLsum': 72.2222, 'rouge/geometric_mean': 55.8136, |
| 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'zero_scrolls_score': 55.8136, |
| 'display_keys': ['rouge/rouge1', 'rouge/rouge2', 'rouge/rougeL'], 'display': [72.2222, 33.3333, 72.2222]} |
| |
| >>> zero_scrolls_metric = datasets.load_metric(zero_scrolls_metric_path, 'narrative_qa') # "qasper" or "narrative_qa" or "musique" |
| >>> results = zero_scrolls_metric.compute(predictions=predictions, references=references) |
| >>> print(results) |
| {'f1': 72.2222, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'zero_scrolls_score': 72.2222, |
| 'display_keys': ['f1'], 'display': [72.2222]} |
| |
| >>> predictions = ["The answer is (B)", "D", "A"] # List[str] |
| >>> references = [["B"], ["C"], ["C"]] # List[List[str]] |
| >>> zero_scrolls_metric = datasets.load_metric(zero_scrolls_metric_path, 'quality') |
| >>> results = zero_scrolls_metric.compute(predictions=predictions, references=references) |
| >>> print(results) |
| {'accuracy': 33.3333, 'num_predicted': 3, 'mean_prediction_length_characters': 6.3333, 'zero_scrolls_score': 33.3333, 'display_keys': ['accuracy'], 'display': [33.3333]} |
| 'display_keys': ['accuracy'], 'display': [33.3333]} |
| |
| >>> predictions = ["Answer: 4,1,2,3", "2,4,5,4,1"] # List[str] |
| >>> references = [["1,2,3,4"], ["5,3,2,1,4"]] # List[List[str]] |
| >>> zero_scrolls_metric = datasets.load_metric(zero_scrolls_metric_path, 'book_sum_sort') |
| >>> results = zero_scrolls_metric.compute(predictions=predictions, references=references) |
| >>> print(results) |
| {'concordance_index': 25.0, 'num_predicted': 2, 'mean_prediction_length_characters': 12.0, 'zero_scrolls_score': 25.0, 'display_keys': ['concordance_index'], 'display': [25.0]} |
| |
| >>> predictions = ["There are 30% positive reviews", "25%"] # List[str] |
| >>> references = [["40%"], ["82%"]] # List[List[str]] |
| >>> zero_scrolls_metric = datasets.load_metric(zero_scrolls_metric_path, 'space_digest') |
| >>> results = zero_scrolls_metric.compute(predictions=predictions, references=references) |
| >>> print(results) |
| {'exp_similarity': 25.9618, 'num_predicted': 2, 'mean_prediction_length_characters': 16.5, 'zero_scrolls_score': 25.9618, 'display_keys': ['exp_similarity'], 'display': [25.9618]} |
| """ |
|
|
| DATASET_TO_METRICS = { |
| "gov_report": { |
| "metrics_to_compute": ["rouge"], |
| "zero_scrolls_score_key": "rouge/geometric_mean", |
| "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"], |
| }, |
| "narrative_qa": { |
| "metrics_to_compute": ["f1"], |
| "zero_scrolls_score_key": "f1", |
| "display_keys": ["f1"], |
| }, |
| "qasper": { |
| "metrics_to_compute": ["f1"], |
| "zero_scrolls_score_key": "f1", |
| "display_keys": ["f1"], |
| }, |
| "qmsum": { |
| "metrics_to_compute": ["rouge"], |
| "zero_scrolls_score_key": "rouge/geometric_mean", |
| "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"], |
| }, |
| "summ_screen_fd": { |
| "metrics_to_compute": ["rouge"], |
| "zero_scrolls_score_key": "rouge/geometric_mean", |
| "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"], |
| }, |
| "quality": { |
| "metrics_to_compute": ["accuracy"], |
| "zero_scrolls_score_key": "accuracy", |
| "display_keys": ["accuracy"], |
| }, |
| "quality_hard": { |
| "metrics_to_compute": ["accuracy"], |
| "zero_scrolls_score_key": None, |
| "display_keys": ["accuracy"], |
| }, |
| "squality": { |
| "metrics_to_compute": ["rouge"], |
| "zero_scrolls_score_key": "rouge/geometric_mean", |
| "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"], |
| }, |
| "musique": { |
| "metrics_to_compute": ["f1"], |
| "zero_scrolls_score_key": "f1", |
| "display_keys": ["f1"], |
| }, |
| "space_digest": { |
| "metrics_to_compute": ["exp_similarity"], |
| "zero_scrolls_score_key": "exp_similarity", |
| "display_keys": ["exp_similarity"], |
| }, |
| "book_sum_sort": { |
| "metrics_to_compute": ["concordance_index"], |
| "zero_scrolls_score_key": "concordance_index", |
| "display_keys": ["concordance_index"], |
| }, |
| } |
|
|
|
|
| @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) |
| class ZeroScrolls(datasets.Metric): |
| def __init__(self, *args, **kwargs): |
| super().__init__(*args, **kwargs) |
|
|
| self._compute_helper_kwargs_fn = { |
| "rouge": lambda: { |
| "metric_fn": compute_rouge, |
| "agg_fn": max, |
| "metric_fn_kwargs": {"use_stemmer": False}, |
| "metric_returns_per_example": True, |
| "transform_single_input_fn": lambda text: rouge_postprocess_text(text), |
| "transform_result_fn": lambda output: { |
| key: (value[0] if isinstance(value, list) else value).fmeasure * 100 |
| for key, value in output.items() |
| }, |
| "transform_aggregated_result_fn": lambda output: output.update( |
| {"geometric_mean": (output["rouge1"] * output["rouge2"] * output["rougeL"]) ** (1.0 / 3.0)} |
| ) |
| or output, |
| }, |
| "accuracy": lambda: { |
| "metric_fn": compute_accuracy, |
| "agg_fn": None, |
| "transform_result_fn": lambda output: {None: output}, |
| }, |
| "f1": lambda: { |
| "metric_fn": compute_f1, |
| "agg_fn": None, |
| "transform_result_fn": lambda output: {None: output}, |
| }, |
| "exp_similarity": lambda: { |
| "metric_fn": compute_exp_similarity, |
| "agg_fn": None, |
| "transform_result_fn": lambda output: {None: output}, |
| }, |
| "concordance_index": lambda: { |
| "metric_fn": compute_concordance_index, |
| "agg_fn": None, |
| "transform_result_fn": lambda output: {None: output}, |
| }, |
| } |
|
|
| custom_metrics = ( |
| [metric for metric in self.config_name.split(",") if len(metric) > 0] |
| if self.config_name.startswith(",") |
| else None |
| ) |
| if custom_metrics is not None: |
| for metric in custom_metrics: |
| if metric not in self._compute_helper_kwargs_fn: |
| raise KeyError( |
| f"You should supply a metric name selected in {list(self._compute_helper_kwargs_fn.keys())}" |
| ) |
| self._metrics_to_compute = custom_metrics |
| else: |
| if self.config_name not in DATASET_TO_METRICS: |
| raise KeyError(f"You should supply a configuration name selected in {list(DATASET_TO_METRICS.keys())}") |
| self._metrics_to_compute = DATASET_TO_METRICS[self.config_name]["metrics_to_compute"] |
|
|
| def _info(self): |
| return datasets.MetricInfo( |
| description=_DESCRIPTION, |
| citation=_CITATION, |
| inputs_description=_KWARGS_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "predictions": datasets.Value("string"), |
| "references": datasets.Sequence(datasets.Value("string")), |
| } |
| ), |
| codebase_urls=[], |
| reference_urls=[], |
| ) |
|
|
| def convert_from_map_format(self, id_to_pred, id_to_labels): |
| index_to_id = list(id_to_pred.keys()) |
| predictions = [id_to_pred[id_] for id_ in index_to_id] |
| references = [id_to_labels[id_] for id_ in index_to_id] |
| return {"predictions": predictions, "references": references} |
|
|
| def _compute(self, predictions, references): |
| metrics = {} |
| for metric in self._metrics_to_compute: |
| result = _compute_helper( |
| deepcopy(predictions), |
| deepcopy(references), |
| **self._compute_helper_kwargs_fn[metric](), |
| ) |
| metrics.update( |
| {(f"{metric}/{key}" if key is not None else metric): value for key, value in result.items()} |
| ) |
| metrics["num_predicted"] = len(predictions) |
| prediction_lengths = [len(prediction) for prediction in predictions] |
| metrics["mean_prediction_length_characters"] = sum(prediction_lengths) / len(prediction_lengths) |
|
|
| metrics = {key: round(value, 4) for key, value in metrics.items()} |
|
|
| if self.config_name in DATASET_TO_METRICS: |
| zero_scrolls_score_key = DATASET_TO_METRICS[self.config_name]["zero_scrolls_score_key"] |
| if zero_scrolls_score_key is not None: |
| metrics["zero_scrolls_score"] = metrics[zero_scrolls_score_key] |
| else: |
| metrics["zero_scrolls_score"] = None |
|
|
| display_keys = DATASET_TO_METRICS[self.config_name]["display_keys"] |
| metrics["display_keys"] = display_keys |
| metrics["display"] = [] |
| for display_key in display_keys: |
| metrics["display"].append(metrics[display_key]) |
|
|
| return metrics |
|
|
|
|
| def _compute_helper( |
| predictions, |
| references, |
| metric_fn, |
| agg_fn, |
| metric_fn_kwargs=None, |
| transform_single_input_fn=None, |
| transform_result_fn=None, |
| transform_aggregated_result_fn=None, |
| metric_returns_per_example=False, |
| ): |
| if metric_fn_kwargs is None: |
| metric_fn_kwargs = {} |
|
|
| if agg_fn is None: |
| assert metric_returns_per_example is False |
|
|
| if transform_single_input_fn is not None: |
| predictions = [transform_single_input_fn(prediction) for prediction in predictions] |
| references = [ |
| [transform_single_input_fn(reference) for reference in reference_list] for reference_list in references |
| ] |
|
|
| if transform_result_fn is None: |
| transform_result_fn = lambda x: x |
| do_transform_result = False |
| else: |
| do_transform_result = True |
|
|
| if transform_aggregated_result_fn is None: |
| transform_aggregated_result_fn = lambda x: x |
|
|
| if agg_fn is not None: |
| |
| scores = defaultdict(list) |
| if metric_returns_per_example is False: |
| |
| |
| |
| |
| for prediction, reference_list in zip(predictions, references): |
| prediction_scores = defaultdict(list) |
| for reference in reference_list: |
| result = transform_result_fn(metric_fn([prediction], [reference], **metric_fn_kwargs)) |
| for key in result: |
| prediction_scores[key].append(result[key]) |
| for key in prediction_scores: |
| scores[key].append(agg_fn(prediction_scores[key])) |
| else: |
| |
| mapping = [[] for _ in range(len(predictions))] |
| flattened_predictions = [] |
| flattened_references = [] |
| for i, prediction in enumerate(predictions): |
| for reference in references[i]: |
| flattened_predictions.append(prediction) |
| flattened_references.append(reference) |
| mapping[i].append(len(flattened_references) - 1) |
|
|
| results = metric_fn(flattened_predictions, flattened_references, **metric_fn_kwargs) |
| if isinstance(results, dict): |
| |
| results_list = [{k: None for k in results} for _ in range(len(flattened_predictions))] |
| for k, v in results.items(): |
| for i in range(len(v)): |
| results_list[i][k] = v[i] |
| else: |
| results_list = results |
|
|
| if do_transform_result: |
| for i in range(len(results_list)): |
| results_list[i] = transform_result_fn(results_list[i]) |
|
|
| for reference_indexes in mapping: |
| prediction_scores = defaultdict(list) |
| for reference_index in reference_indexes: |
| result = results_list[reference_index] |
| for key in result: |
| prediction_scores[key].append(result[key]) |
| for key in prediction_scores: |
| scores[key].append(agg_fn(prediction_scores[key])) |
|
|
| return transform_aggregated_result_fn({key: sum(value) / len(value) for key, value in scores.items()}) |
| else: |
| return transform_aggregated_result_fn( |
| transform_result_fn(metric_fn(predictions, references, **metric_fn_kwargs)) |
| ) |
|
|