| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| Fine-tuning the library models for token classification. |
| """ |
| |
| |
|
|
| import logging |
| import os |
| import sys |
| from dataclasses import dataclass, field |
| from typing import Optional |
|
|
| import datasets |
| import evaluate |
| import numpy as np |
| from datasets import ClassLabel, load_dataset |
|
|
| import transformers |
| from transformers import ( |
| AutoConfig, |
| AutoModelForTokenClassification, |
| AutoTokenizer, |
| DataCollatorForTokenClassification, |
| HfArgumentParser, |
| PretrainedConfig, |
| PreTrainedTokenizerFast, |
| Trainer, |
| TrainingArguments, |
| set_seed, |
| ) |
| from transformers.trainer_utils import get_last_checkpoint |
| from transformers.utils import check_min_version, send_example_telemetry |
| from transformers.utils.versions import require_version |
|
|
|
|
| |
| check_min_version("4.52.0.dev0") |
|
|
| require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") |
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| @dataclass |
| class ModelArguments: |
| """ |
| Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. |
| """ |
|
|
| model_name_or_path: str = field( |
| metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} |
| ) |
| config_name: Optional[str] = field( |
| default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} |
| ) |
| tokenizer_name: Optional[str] = field( |
| default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} |
| ) |
| cache_dir: Optional[str] = field( |
| default=None, |
| metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, |
| ) |
| model_revision: str = field( |
| default="main", |
| metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
| ) |
| token: str = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " |
| "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." |
| ) |
| }, |
| ) |
| trust_remote_code: bool = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to trust the execution of code from datasets/models defined on the Hub." |
| " This option should only be set to `True` for repositories you trust and in which you have read the" |
| " code, as it will execute code present on the Hub on your local machine." |
| ) |
| }, |
| ) |
| ignore_mismatched_sizes: bool = field( |
| default=False, |
| metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, |
| ) |
|
|
|
|
| @dataclass |
| class DataTrainingArguments: |
| """ |
| Arguments pertaining to what data we are going to input our model for training and eval. |
| """ |
|
|
| task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) |
| dataset_name: Optional[str] = field( |
| default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} |
| ) |
| dataset_config_name: Optional[str] = field( |
| default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} |
| ) |
| train_file: Optional[str] = field( |
| default=None, metadata={"help": "The input training data file (a csv or JSON file)."} |
| ) |
| validation_file: Optional[str] = field( |
| default=None, |
| metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, |
| ) |
| test_file: Optional[str] = field( |
| default=None, |
| metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, |
| ) |
| text_column_name: Optional[str] = field( |
| default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."} |
| ) |
| label_column_name: Optional[str] = field( |
| default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."} |
| ) |
| overwrite_cache: bool = field( |
| default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} |
| ) |
| preprocessing_num_workers: Optional[int] = field( |
| default=None, |
| metadata={"help": "The number of processes to use for the preprocessing."}, |
| ) |
| max_seq_length: int = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "The maximum total input sequence length after tokenization. If set, sequences longer " |
| "than this will be truncated, sequences shorter will be padded." |
| ) |
| }, |
| ) |
| pad_to_max_length: bool = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to pad all samples to model maximum sentence length. " |
| "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " |
| "efficient on GPU but very bad for TPU." |
| ) |
| }, |
| ) |
| max_train_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of training examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| max_eval_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of evaluation examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| max_predict_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of prediction examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| label_all_tokens: bool = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to put the label for one word on all tokens of generated by that word or just on the " |
| "one (in which case the other tokens will have a padding index)." |
| ) |
| }, |
| ) |
| return_entity_level_metrics: bool = field( |
| default=False, |
| metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, |
| ) |
|
|
| def __post_init__(self): |
| if self.dataset_name is None and self.train_file is None and self.validation_file is None: |
| raise ValueError("Need either a dataset name or a training/validation file.") |
| else: |
| if self.train_file is not None: |
| extension = self.train_file.split(".")[-1] |
| assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." |
| if self.validation_file is not None: |
| extension = self.validation_file.split(".")[-1] |
| assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." |
| self.task_name = self.task_name.lower() |
|
|
|
|
| def main(): |
| |
| |
| |
|
|
| parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) |
| if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
| |
| |
| model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
| else: |
| model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
|
| |
| |
| send_example_telemetry("run_ner", model_args, data_args) |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| handlers=[logging.StreamHandler(sys.stdout)], |
| ) |
|
|
| if training_args.should_log: |
| |
| transformers.utils.logging.set_verbosity_info() |
|
|
| log_level = training_args.get_process_log_level() |
| logger.setLevel(log_level) |
| datasets.utils.logging.set_verbosity(log_level) |
| transformers.utils.logging.set_verbosity(log_level) |
| transformers.utils.logging.enable_default_handler() |
| transformers.utils.logging.enable_explicit_format() |
|
|
| |
| logger.warning( |
| f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " |
| + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" |
| ) |
| logger.info(f"Training/evaluation parameters {training_args}") |
|
|
| |
| last_checkpoint = None |
| if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: |
| last_checkpoint = get_last_checkpoint(training_args.output_dir) |
| if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: |
| raise ValueError( |
| f"Output directory ({training_args.output_dir}) already exists and is not empty. " |
| "Use --overwrite_output_dir to overcome." |
| ) |
| elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: |
| logger.info( |
| f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " |
| "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." |
| ) |
|
|
| |
| set_seed(training_args.seed) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| if data_args.dataset_name is not None: |
| |
| raw_datasets = load_dataset( |
| data_args.dataset_name, |
| data_args.dataset_config_name, |
| cache_dir=model_args.cache_dir, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
| else: |
| data_files = {} |
| if data_args.train_file is not None: |
| data_files["train"] = data_args.train_file |
| extension = data_args.train_file.split(".")[-1] |
| if data_args.validation_file is not None: |
| data_files["validation"] = data_args.validation_file |
| extension = data_args.validation_file.split(".")[-1] |
| if data_args.test_file is not None: |
| data_files["test"] = data_args.test_file |
| extension = data_args.test_file.split(".")[-1] |
| raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) |
| |
| |
|
|
| if training_args.do_train: |
| column_names = raw_datasets["train"].column_names |
| features = raw_datasets["train"].features |
| else: |
| column_names = raw_datasets["validation"].column_names |
| features = raw_datasets["validation"].features |
|
|
| if data_args.text_column_name is not None: |
| text_column_name = data_args.text_column_name |
| elif "tokens" in column_names: |
| text_column_name = "tokens" |
| else: |
| text_column_name = column_names[0] |
|
|
| if data_args.label_column_name is not None: |
| label_column_name = data_args.label_column_name |
| elif f"{data_args.task_name}_tags" in column_names: |
| label_column_name = f"{data_args.task_name}_tags" |
| else: |
| label_column_name = column_names[1] |
|
|
| |
| |
| def get_label_list(labels): |
| unique_labels = set() |
| for label in labels: |
| unique_labels = unique_labels | set(label) |
| label_list = list(unique_labels) |
| label_list.sort() |
| return label_list |
|
|
| |
| |
| labels_are_int = isinstance(features[label_column_name].feature, ClassLabel) |
| if labels_are_int: |
| label_list = features[label_column_name].feature.names |
| label_to_id = {i: i for i in range(len(label_list))} |
| else: |
| label_list = get_label_list(raw_datasets["train"][label_column_name]) |
| label_to_id = {l: i for i, l in enumerate(label_list)} |
|
|
| num_labels = len(label_list) |
|
|
| |
| |
| |
| |
| |
| config = AutoConfig.from_pretrained( |
| model_args.config_name if model_args.config_name else model_args.model_name_or_path, |
| num_labels=num_labels, |
| finetuning_task=data_args.task_name, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
|
|
| tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path |
| if config.model_type in {"bloom", "gpt2", "roberta", "deberta"}: |
| tokenizer = AutoTokenizer.from_pretrained( |
| tokenizer_name_or_path, |
| cache_dir=model_args.cache_dir, |
| use_fast=True, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| add_prefix_space=True, |
| ) |
| else: |
| tokenizer = AutoTokenizer.from_pretrained( |
| tokenizer_name_or_path, |
| cache_dir=model_args.cache_dir, |
| use_fast=True, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
|
|
| model = AutoModelForTokenClassification.from_pretrained( |
| model_args.model_name_or_path, |
| from_tf=bool(".ckpt" in model_args.model_name_or_path), |
| config=config, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, |
| ) |
|
|
| |
| if not isinstance(tokenizer, PreTrainedTokenizerFast): |
| raise ValueError( |
| "This example script only works for models that have a fast tokenizer. Checkout the big table of models at" |
| " https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet" |
| " this requirement" |
| ) |
|
|
| |
| if model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id: |
| if sorted(model.config.label2id.keys()) == sorted(label_list): |
| |
| if labels_are_int: |
| label_to_id = {i: int(model.config.label2id[l]) for i, l in enumerate(label_list)} |
| label_list = [model.config.id2label[i] for i in range(num_labels)] |
| else: |
| label_list = [model.config.id2label[i] for i in range(num_labels)] |
| label_to_id = {l: i for i, l in enumerate(label_list)} |
| else: |
| logger.warning( |
| "Your model seems to have been trained with labels, but they don't match the dataset: " |
| f"model labels: {sorted(model.config.label2id.keys())}, dataset labels:" |
| f" {sorted(label_list)}.\nIgnoring the model labels as a result.", |
| ) |
|
|
| |
| model.config.label2id = {l: i for i, l in enumerate(label_list)} |
| model.config.id2label = dict(enumerate(label_list)) |
|
|
| |
| b_to_i_label = [] |
| for idx, label in enumerate(label_list): |
| if label.startswith("B-") and label.replace("B-", "I-") in label_list: |
| b_to_i_label.append(label_list.index(label.replace("B-", "I-"))) |
| else: |
| b_to_i_label.append(idx) |
|
|
| |
| |
| padding = "max_length" if data_args.pad_to_max_length else False |
|
|
| |
| def tokenize_and_align_labels(examples): |
| tokenized_inputs = tokenizer( |
| examples[text_column_name], |
| padding=padding, |
| truncation=True, |
| max_length=data_args.max_seq_length, |
| |
| is_split_into_words=True, |
| ) |
| labels = [] |
| for i, label in enumerate(examples[label_column_name]): |
| word_ids = tokenized_inputs.word_ids(batch_index=i) |
| previous_word_idx = None |
| label_ids = [] |
| for word_idx in word_ids: |
| |
| |
| if word_idx is None: |
| label_ids.append(-100) |
| |
| elif word_idx != previous_word_idx: |
| label_ids.append(label_to_id[label[word_idx]]) |
| |
| |
| else: |
| if data_args.label_all_tokens: |
| label_ids.append(b_to_i_label[label_to_id[label[word_idx]]]) |
| else: |
| label_ids.append(-100) |
| previous_word_idx = word_idx |
|
|
| labels.append(label_ids) |
| tokenized_inputs["labels"] = labels |
| return tokenized_inputs |
|
|
| if training_args.do_train: |
| if "train" not in raw_datasets: |
| raise ValueError("--do_train requires a train dataset") |
| train_dataset = raw_datasets["train"] |
| if data_args.max_train_samples is not None: |
| max_train_samples = min(len(train_dataset), data_args.max_train_samples) |
| train_dataset = train_dataset.select(range(max_train_samples)) |
| with training_args.main_process_first(desc="train dataset map pre-processing"): |
| train_dataset = train_dataset.map( |
| tokenize_and_align_labels, |
| batched=True, |
| num_proc=data_args.preprocessing_num_workers, |
| load_from_cache_file=not data_args.overwrite_cache, |
| desc="Running tokenizer on train dataset", |
| ) |
|
|
| if training_args.do_eval: |
| if "validation" not in raw_datasets: |
| raise ValueError("--do_eval requires a validation dataset") |
| eval_dataset = raw_datasets["validation"] |
| if data_args.max_eval_samples is not None: |
| max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) |
| eval_dataset = eval_dataset.select(range(max_eval_samples)) |
| with training_args.main_process_first(desc="validation dataset map pre-processing"): |
| eval_dataset = eval_dataset.map( |
| tokenize_and_align_labels, |
| batched=True, |
| num_proc=data_args.preprocessing_num_workers, |
| load_from_cache_file=not data_args.overwrite_cache, |
| desc="Running tokenizer on validation dataset", |
| ) |
|
|
| if training_args.do_predict: |
| if "test" not in raw_datasets: |
| raise ValueError("--do_predict requires a test dataset") |
| predict_dataset = raw_datasets["test"] |
| if data_args.max_predict_samples is not None: |
| max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) |
| predict_dataset = predict_dataset.select(range(max_predict_samples)) |
| with training_args.main_process_first(desc="prediction dataset map pre-processing"): |
| predict_dataset = predict_dataset.map( |
| tokenize_and_align_labels, |
| batched=True, |
| num_proc=data_args.preprocessing_num_workers, |
| load_from_cache_file=not data_args.overwrite_cache, |
| desc="Running tokenizer on prediction dataset", |
| ) |
|
|
| |
| data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) |
|
|
| |
| metric = evaluate.load("seqeval", cache_dir=model_args.cache_dir) |
|
|
| def compute_metrics(p): |
| predictions, labels = p |
| if not training_args.eval_do_concat_batches: |
| predictions = np.hstack(predictions) |
| labels = np.hstack(labels) |
| predictions = np.argmax(predictions, axis=2) |
|
|
| |
| true_predictions = [ |
| [label_list[p] for (p, l) in zip(prediction, label) if l != -100] |
| for prediction, label in zip(predictions, labels) |
| ] |
| true_labels = [ |
| [label_list[l] for (p, l) in zip(prediction, label) if l != -100] |
| for prediction, label in zip(predictions, labels) |
| ] |
|
|
| results = metric.compute(predictions=true_predictions, references=true_labels) |
| if data_args.return_entity_level_metrics: |
| |
| final_results = {} |
| for key, value in results.items(): |
| if isinstance(value, dict): |
| for n, v in value.items(): |
| final_results[f"{key}_{n}"] = v |
| else: |
| final_results[key] = value |
| return final_results |
| else: |
| return { |
| "precision": results["overall_precision"], |
| "recall": results["overall_recall"], |
| "f1": results["overall_f1"], |
| "accuracy": results["overall_accuracy"], |
| } |
|
|
| |
| trainer = Trainer( |
| model=model, |
| args=training_args, |
| train_dataset=train_dataset if training_args.do_train else None, |
| eval_dataset=eval_dataset if training_args.do_eval else None, |
| processing_class=tokenizer, |
| data_collator=data_collator, |
| compute_metrics=compute_metrics, |
| ) |
|
|
| |
| if training_args.do_train: |
| checkpoint = None |
| if training_args.resume_from_checkpoint is not None: |
| checkpoint = training_args.resume_from_checkpoint |
| elif last_checkpoint is not None: |
| checkpoint = last_checkpoint |
| train_result = trainer.train(resume_from_checkpoint=checkpoint) |
| metrics = train_result.metrics |
| trainer.save_model() |
|
|
| max_train_samples = ( |
| data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) |
| ) |
| metrics["train_samples"] = min(max_train_samples, len(train_dataset)) |
|
|
| trainer.log_metrics("train", metrics) |
| trainer.save_metrics("train", metrics) |
| trainer.save_state() |
|
|
| |
| if training_args.do_eval: |
| logger.info("*** Evaluate ***") |
|
|
| metrics = trainer.evaluate() |
|
|
| max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) |
| metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) |
|
|
| trainer.log_metrics("eval", metrics) |
| trainer.save_metrics("eval", metrics) |
|
|
| |
| if training_args.do_predict: |
| logger.info("*** Predict ***") |
|
|
| predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict") |
| predictions = np.argmax(predictions, axis=2) |
|
|
| |
| true_predictions = [ |
| [label_list[p] for (p, l) in zip(prediction, label) if l != -100] |
| for prediction, label in zip(predictions, labels) |
| ] |
|
|
| trainer.log_metrics("predict", metrics) |
| trainer.save_metrics("predict", metrics) |
|
|
| |
| output_predictions_file = os.path.join(training_args.output_dir, "predictions.txt") |
| if trainer.is_world_process_zero(): |
| with open(output_predictions_file, "w") as writer: |
| for prediction in true_predictions: |
| writer.write(" ".join(prediction) + "\n") |
|
|
| kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"} |
| if data_args.dataset_name is not None: |
| kwargs["dataset_tags"] = data_args.dataset_name |
| if data_args.dataset_config_name is not None: |
| kwargs["dataset_args"] = data_args.dataset_config_name |
| kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" |
| else: |
| kwargs["dataset"] = data_args.dataset_name |
|
|
| if training_args.push_to_hub: |
| trainer.push_to_hub(**kwargs) |
| else: |
| trainer.create_model_card(**kwargs) |
|
|
|
|
| def _mp_fn(index): |
| |
| main() |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|