| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """BERT finetuning runner. |
| Finetuning the library models for multiple choice on SWAG (Bert). |
| """ |
|
|
| import argparse |
| import csv |
| import glob |
| import logging |
| import os |
| import random |
|
|
| import numpy as np |
| import torch |
| from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset |
| from torch.utils.data.distributed import DistributedSampler |
| from tqdm import tqdm, trange |
|
|
| import transformers |
| from transformers import ( |
| WEIGHTS_NAME, |
| AutoConfig, |
| AutoModelForMultipleChoice, |
| AutoTokenizer, |
| get_linear_schedule_with_warmup, |
| ) |
| from transformers.trainer_utils import is_main_process |
|
|
|
|
| try: |
| from torch.utils.tensorboard import SummaryWriter |
| except ImportError: |
| from tensorboardX import SummaryWriter |
|
|
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| class SwagExample: |
| """A single training/test example for the SWAG dataset.""" |
|
|
| def __init__(self, swag_id, context_sentence, start_ending, ending_0, ending_1, ending_2, ending_3, label=None): |
| self.swag_id = swag_id |
| self.context_sentence = context_sentence |
| self.start_ending = start_ending |
| self.endings = [ |
| ending_0, |
| ending_1, |
| ending_2, |
| ending_3, |
| ] |
| self.label = label |
|
|
| def __str__(self): |
| return self.__repr__() |
|
|
| def __repr__(self): |
| attributes = [ |
| f"swag_id: {self.swag_id}", |
| f"context_sentence: {self.context_sentence}", |
| f"start_ending: {self.start_ending}", |
| f"ending_0: {self.endings[0]}", |
| f"ending_1: {self.endings[1]}", |
| f"ending_2: {self.endings[2]}", |
| f"ending_3: {self.endings[3]}", |
| ] |
|
|
| if self.label is not None: |
| attributes.append(f"label: {self.label}") |
|
|
| return ", ".join(attributes) |
|
|
|
|
| class InputFeatures: |
| def __init__(self, example_id, choices_features, label): |
| self.example_id = example_id |
| self.choices_features = [ |
| {"input_ids": input_ids, "input_mask": input_mask, "segment_ids": segment_ids} |
| for _, input_ids, input_mask, segment_ids in choices_features |
| ] |
| self.label = label |
|
|
|
|
| def read_swag_examples(input_file, is_training=True): |
| with open(input_file, encoding="utf-8") as f: |
| lines = list(csv.reader(f)) |
|
|
| if is_training and lines[0][-1] != "label": |
| raise ValueError("For training, the input file must contain a label column.") |
|
|
| examples = [ |
| SwagExample( |
| swag_id=line[2], |
| context_sentence=line[4], |
| start_ending=line[5], |
| |
| |
| ending_0=line[7], |
| ending_1=line[8], |
| ending_2=line[9], |
| ending_3=line[10], |
| label=int(line[11]) if is_training else None, |
| ) |
| for line in lines[1:] |
| ] |
|
|
| return examples |
|
|
|
|
| def convert_examples_to_features(examples, tokenizer, max_seq_length, is_training): |
| """Loads a data file into a list of `InputBatch`s.""" |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| features = [] |
| for example_index, example in tqdm(enumerate(examples)): |
| context_tokens = tokenizer.tokenize(example.context_sentence) |
| start_ending_tokens = tokenizer.tokenize(example.start_ending) |
|
|
| choices_features = [] |
| for ending_index, ending in enumerate(example.endings): |
| |
| |
| context_tokens_choice = context_tokens[:] |
| ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) |
| |
| |
| |
| |
| _truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3) |
|
|
| tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"] |
| segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1) |
|
|
| input_ids = tokenizer.convert_tokens_to_ids(tokens) |
| input_mask = [1] * len(input_ids) |
|
|
| |
| padding = [0] * (max_seq_length - len(input_ids)) |
| input_ids += padding |
| input_mask += padding |
| segment_ids += padding |
|
|
| assert len(input_ids) == max_seq_length |
| assert len(input_mask) == max_seq_length |
| assert len(segment_ids) == max_seq_length |
|
|
| choices_features.append((tokens, input_ids, input_mask, segment_ids)) |
|
|
| label = example.label |
| if example_index < 5: |
| logger.info("*** Example ***") |
| logger.info(f"swag_id: {example.swag_id}") |
| for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features): |
| logger.info(f"choice: {choice_idx}") |
| logger.info("tokens: {}".format(" ".join(tokens))) |
| logger.info("input_ids: {}".format(" ".join(map(str, input_ids)))) |
| logger.info("input_mask: {}".format(" ".join(map(str, input_mask)))) |
| logger.info("segment_ids: {}".format(" ".join(map(str, segment_ids)))) |
| if is_training: |
| logger.info(f"label: {label}") |
|
|
| features.append(InputFeatures(example_id=example.swag_id, choices_features=choices_features, label=label)) |
|
|
| return features |
|
|
|
|
| def _truncate_seq_pair(tokens_a, tokens_b, max_length): |
| """Truncates a sequence pair in place to the maximum length.""" |
|
|
| |
| |
| |
| |
| while True: |
| total_length = len(tokens_a) + len(tokens_b) |
| if total_length <= max_length: |
| break |
| if len(tokens_a) > len(tokens_b): |
| tokens_a.pop() |
| else: |
| tokens_b.pop() |
|
|
|
|
| def accuracy(out, labels): |
| outputs = np.argmax(out, axis=1) |
| return np.sum(outputs == labels) |
|
|
|
|
| def select_field(features, field): |
| return [[choice[field] for choice in feature.choices_features] for feature in features] |
|
|
|
|
| def set_seed(args): |
| random.seed(args.seed) |
| np.random.seed(args.seed) |
| torch.manual_seed(args.seed) |
| if args.n_gpu > 0: |
| torch.cuda.manual_seed_all(args.seed) |
|
|
|
|
| def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): |
| if args.local_rank not in [-1, 0]: |
| torch.distributed.barrier() |
|
|
| |
| input_file = args.predict_file if evaluate else args.train_file |
| cached_features_file = os.path.join( |
| os.path.dirname(input_file), |
| "cached_{}_{}_{}".format( |
| "dev" if evaluate else "train", |
| list(filter(None, args.model_name_or_path.split("/"))).pop(), |
| str(args.max_seq_length), |
| ), |
| ) |
| if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: |
| logger.info("Loading features from cached file %s", cached_features_file) |
| features = torch.load(cached_features_file, weights_only=True) |
| else: |
| logger.info("Creating features from dataset file at %s", input_file) |
| examples = read_swag_examples(input_file) |
| features = convert_examples_to_features(examples, tokenizer, args.max_seq_length, not evaluate) |
|
|
| if args.local_rank in [-1, 0]: |
| logger.info("Saving features into cached file %s", cached_features_file) |
| torch.save(features, cached_features_file) |
|
|
| if args.local_rank == 0: |
| torch.distributed.barrier() |
|
|
| |
| all_input_ids = torch.tensor(select_field(features, "input_ids"), dtype=torch.long) |
| all_input_mask = torch.tensor(select_field(features, "input_mask"), dtype=torch.long) |
| all_segment_ids = torch.tensor(select_field(features, "segment_ids"), dtype=torch.long) |
| all_label = torch.tensor([f.label for f in features], dtype=torch.long) |
|
|
| if evaluate: |
| dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) |
| else: |
| dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) |
|
|
| if output_examples: |
| return dataset, examples, features |
| return dataset |
|
|
|
|
| def train(args, train_dataset, model, tokenizer): |
| """Train the model""" |
| if args.local_rank in [-1, 0]: |
| tb_writer = SummaryWriter() |
|
|
| args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) |
| train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) |
| train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) |
|
|
| if args.max_steps > 0: |
| t_total = args.max_steps |
| args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 |
| else: |
| t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs |
|
|
| |
| no_decay = ["bias", "LayerNorm.weight"] |
| optimizer_grouped_parameters = [ |
| { |
| "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], |
| "weight_decay": args.weight_decay, |
| }, |
| {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, |
| ] |
| optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) |
| scheduler = get_linear_schedule_with_warmup( |
| optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total |
| ) |
| if args.fp16: |
| try: |
| from apex import amp |
| except ImportError: |
| raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") |
| model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) |
|
|
| |
| if args.n_gpu > 1: |
| model = torch.nn.DataParallel(model) |
|
|
| |
| if args.local_rank != -1: |
| model = torch.nn.parallel.DistributedDataParallel( |
| model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True |
| ) |
|
|
| |
| logger.info("***** Running training *****") |
| logger.info(" Num examples = %d", len(train_dataset)) |
| logger.info(" Num Epochs = %d", args.num_train_epochs) |
| logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) |
| logger.info( |
| " Total train batch size (w. parallel, distributed & accumulation) = %d", |
| args.train_batch_size |
| * args.gradient_accumulation_steps |
| * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), |
| ) |
| logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) |
| logger.info(" Total optimization steps = %d", t_total) |
|
|
| global_step = 0 |
| tr_loss, logging_loss = 0.0, 0.0 |
| model.zero_grad() |
| train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) |
| set_seed(args) |
| for _ in train_iterator: |
| epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) |
| for step, batch in enumerate(epoch_iterator): |
| model.train() |
| batch = tuple(t.to(args.device) for t in batch) |
| inputs = { |
| "input_ids": batch[0], |
| "attention_mask": batch[1], |
| |
| "token_type_ids": batch[2], |
| "labels": batch[3], |
| } |
| |
| |
| |
| outputs = model(**inputs) |
| loss = outputs[0] |
|
|
| if args.n_gpu > 1: |
| loss = loss.mean() |
| if args.gradient_accumulation_steps > 1: |
| loss = loss / args.gradient_accumulation_steps |
|
|
| if args.fp16: |
| with amp.scale_loss(loss, optimizer) as scaled_loss: |
| scaled_loss.backward() |
| torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) |
| else: |
| loss.backward() |
| torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) |
|
|
| tr_loss += loss.item() |
| if (step + 1) % args.gradient_accumulation_steps == 0: |
| optimizer.step() |
| scheduler.step() |
| model.zero_grad() |
| global_step += 1 |
|
|
| if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: |
| |
| if ( |
| args.local_rank == -1 and args.evaluate_during_training |
| ): |
| results = evaluate(args, model, tokenizer) |
| for key, value in results.items(): |
| tb_writer.add_scalar(f"eval_{key}", value, global_step) |
| tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) |
| tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) |
| logging_loss = tr_loss |
|
|
| if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: |
| |
| output_dir = os.path.join(args.output_dir, f"checkpoint-{global_step}") |
| model_to_save = ( |
| model.module if hasattr(model, "module") else model |
| ) |
| model_to_save.save_pretrained(output_dir) |
| tokenizer.save_vocabulary(output_dir) |
| torch.save(args, os.path.join(output_dir, "training_args.bin")) |
| logger.info("Saving model checkpoint to %s", output_dir) |
|
|
| if args.max_steps > 0 and global_step > args.max_steps: |
| epoch_iterator.close() |
| break |
| if args.max_steps > 0 and global_step > args.max_steps: |
| train_iterator.close() |
| break |
|
|
| if args.local_rank in [-1, 0]: |
| tb_writer.close() |
|
|
| return global_step, tr_loss / global_step |
|
|
|
|
| def evaluate(args, model, tokenizer, prefix=""): |
| dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) |
|
|
| if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: |
| os.makedirs(args.output_dir) |
|
|
| args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) |
| |
| eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) |
| eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) |
|
|
| |
| logger.info(f"***** Running evaluation {prefix} *****") |
| logger.info(" Num examples = %d", len(dataset)) |
| logger.info(" Batch size = %d", args.eval_batch_size) |
|
|
| eval_loss, eval_accuracy = 0, 0 |
| nb_eval_steps, nb_eval_examples = 0, 0 |
|
|
| for batch in tqdm(eval_dataloader, desc="Evaluating"): |
| model.eval() |
| batch = tuple(t.to(args.device) for t in batch) |
| with torch.no_grad(): |
| inputs = { |
| "input_ids": batch[0], |
| "attention_mask": batch[1], |
| |
| "token_type_ids": batch[2], |
| "labels": batch[3], |
| } |
|
|
| |
| |
| |
| outputs = model(**inputs) |
| tmp_eval_loss, logits = outputs[:2] |
| eval_loss += tmp_eval_loss.mean().item() |
|
|
| logits = logits.detach().cpu().numpy() |
| label_ids = inputs["labels"].to("cpu").numpy() |
| tmp_eval_accuracy = accuracy(logits, label_ids) |
| eval_accuracy += tmp_eval_accuracy |
|
|
| nb_eval_steps += 1 |
| nb_eval_examples += inputs["input_ids"].size(0) |
|
|
| eval_loss = eval_loss / nb_eval_steps |
| eval_accuracy = eval_accuracy / nb_eval_examples |
| result = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy} |
|
|
| output_eval_file = os.path.join(args.output_dir, "eval_results.txt") |
| with open(output_eval_file, "w") as writer: |
| logger.info("***** Eval results *****") |
| for key in sorted(result.keys()): |
| logger.info("%s = %s", key, str(result[key])) |
| writer.write("{} = {}\n".format(key, str(result[key]))) |
|
|
| return result |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser() |
|
|
| |
| parser.add_argument( |
| "--train_file", default=None, type=str, required=True, help="SWAG csv for training. E.g., train.csv" |
| ) |
| parser.add_argument( |
| "--predict_file", |
| default=None, |
| type=str, |
| required=True, |
| help="SWAG csv for predictions. E.g., val.csv or test.csv", |
| ) |
| parser.add_argument( |
| "--model_name_or_path", |
| default=None, |
| type=str, |
| required=True, |
| help="Path to pretrained model or model identifier from huggingface.co/models", |
| ) |
| parser.add_argument( |
| "--output_dir", |
| default=None, |
| type=str, |
| required=True, |
| help="The output directory where the model checkpoints and predictions will be written.", |
| ) |
|
|
| |
| parser.add_argument( |
| "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" |
| ) |
| parser.add_argument( |
| "--tokenizer_name", |
| default="", |
| type=str, |
| help="Pretrained tokenizer name or path if not the same as model_name", |
| ) |
| parser.add_argument( |
| "--max_seq_length", |
| default=384, |
| type=int, |
| help=( |
| "The maximum total input sequence length after tokenization. Sequences " |
| "longer than this will be truncated, and sequences shorter than this will be padded." |
| ), |
| ) |
| parser.add_argument("--do_train", action="store_true", help="Whether to run training.") |
| parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") |
| parser.add_argument( |
| "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." |
| ) |
|
|
| parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") |
| parser.add_argument( |
| "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." |
| ) |
| parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") |
| parser.add_argument( |
| "--gradient_accumulation_steps", |
| type=int, |
| default=1, |
| help="Number of updates steps to accumulate before performing a backward/update pass.", |
| ) |
| parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") |
| parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") |
| parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") |
| parser.add_argument( |
| "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." |
| ) |
| parser.add_argument( |
| "--max_steps", |
| default=-1, |
| type=int, |
| help="If > 0: set total number of training steps to perform. Override num_train_epochs.", |
| ) |
| parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") |
|
|
| parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") |
| parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") |
| parser.add_argument( |
| "--eval_all_checkpoints", |
| action="store_true", |
| help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", |
| ) |
| parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") |
| parser.add_argument( |
| "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" |
| ) |
| parser.add_argument( |
| "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" |
| ) |
| parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") |
|
|
| parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") |
| parser.add_argument( |
| "--fp16", |
| action="store_true", |
| help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", |
| ) |
| parser.add_argument( |
| "--fp16_opt_level", |
| type=str, |
| default="O1", |
| help=( |
| "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " |
| "See details at https://nvidia.github.io/apex/amp.html" |
| ), |
| ) |
| parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") |
| parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") |
| args = parser.parse_args() |
|
|
| if ( |
| os.path.exists(args.output_dir) |
| and os.listdir(args.output_dir) |
| and args.do_train |
| and not args.overwrite_output_dir |
| ): |
| raise ValueError( |
| "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( |
| args.output_dir |
| ) |
| ) |
|
|
| |
| if args.server_ip and args.server_port: |
| |
| import ptvsd |
|
|
| print("Waiting for debugger attach") |
| ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) |
| ptvsd.wait_for_attach() |
|
|
| |
| if args.local_rank == -1 or args.no_cuda: |
| device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") |
| args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() |
| else: |
| torch.cuda.set_device(args.local_rank) |
| device = torch.device("cuda", args.local_rank) |
| torch.distributed.init_process_group(backend="nccl") |
| args.n_gpu = 1 |
| args.device = device |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, |
| ) |
| logger.warning( |
| "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", |
| args.local_rank, |
| device, |
| args.n_gpu, |
| bool(args.local_rank != -1), |
| args.fp16, |
| ) |
| |
| if is_main_process(args.local_rank): |
| transformers.utils.logging.set_verbosity_info() |
| transformers.utils.logging.enable_default_handler() |
| transformers.utils.logging.enable_explicit_format() |
|
|
| |
| set_seed(args) |
|
|
| |
| if args.local_rank not in [-1, 0]: |
| torch.distributed.barrier() |
|
|
| config = AutoConfig.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) |
| tokenizer = AutoTokenizer.from_pretrained( |
| args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, |
| ) |
| model = AutoModelForMultipleChoice.from_pretrained( |
| args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config |
| ) |
|
|
| if args.local_rank == 0: |
| torch.distributed.barrier() |
|
|
| model.to(args.device) |
|
|
| logger.info("Training/evaluation parameters %s", args) |
|
|
| |
| if args.do_train: |
| train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) |
| global_step, tr_loss = train(args, train_dataset, model, tokenizer) |
| logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) |
|
|
| |
| if args.local_rank == -1 or torch.distributed.get_rank() == 0: |
| logger.info("Saving model checkpoint to %s", args.output_dir) |
| |
| |
| model_to_save = ( |
| model.module if hasattr(model, "module") else model |
| ) |
| model_to_save.save_pretrained(args.output_dir) |
| tokenizer.save_pretrained(args.output_dir) |
|
|
| |
| torch.save(args, os.path.join(args.output_dir, "training_args.bin")) |
|
|
| |
| model = AutoModelForMultipleChoice.from_pretrained(args.output_dir) |
| tokenizer = AutoTokenizer.from_pretrained(args.output_dir) |
| model.to(args.device) |
|
|
| |
| results = {} |
| if args.do_eval and args.local_rank in [-1, 0]: |
| if args.do_train: |
| checkpoints = [args.output_dir] |
| else: |
| |
| checkpoints = [args.model_name_or_path] |
|
|
| if args.eval_all_checkpoints: |
| checkpoints = [ |
| os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) |
| ] |
|
|
| logger.info("Evaluate the following checkpoints: %s", checkpoints) |
|
|
| for checkpoint in checkpoints: |
| |
| global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" |
| model = AutoModelForMultipleChoice.from_pretrained(checkpoint) |
| tokenizer = AutoTokenizer.from_pretrained(checkpoint) |
| model.to(args.device) |
|
|
| |
| result = evaluate(args, model, tokenizer, prefix=global_step) |
|
|
| result = {k + (f"_{global_step}" if global_step else ""): v for k, v in result.items()} |
| results.update(result) |
|
|
| logger.info(f"Results: {results}") |
|
|
| return results |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|