| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| Fine-tuning the library models for translation. |
| """ |
| |
|
|
| import json |
| import logging |
| import os |
| import sys |
| from dataclasses import dataclass, field |
| from typing import Optional |
|
|
| import datasets |
| import evaluate |
| import numpy as np |
| import tensorflow as tf |
| from datasets import load_dataset |
|
|
| import transformers |
| from transformers import ( |
| AutoConfig, |
| AutoTokenizer, |
| DataCollatorForSeq2Seq, |
| HfArgumentParser, |
| KerasMetricCallback, |
| M2M100Tokenizer, |
| MBart50Tokenizer, |
| MBart50TokenizerFast, |
| MBartTokenizer, |
| MBartTokenizerFast, |
| PushToHubCallback, |
| TFAutoModelForSeq2SeqLM, |
| TFTrainingArguments, |
| create_optimizer, |
| set_seed, |
| ) |
| from transformers.trainer_utils import get_last_checkpoint |
| from transformers.utils import check_min_version, send_example_telemetry |
| from transformers.utils.versions import require_version |
|
|
|
|
| |
| |
| check_min_version("4.52.0.dev0") |
|
|
| require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") |
|
|
| logger = logging.getLogger(__name__) |
| MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast, M2M100Tokenizer] |
| |
|
|
|
|
| |
| @dataclass |
| class ModelArguments: |
| """ |
| Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. |
| """ |
|
|
| model_name_or_path: str = field( |
| metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} |
| ) |
| config_name: Optional[str] = field( |
| default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} |
| ) |
| tokenizer_name: Optional[str] = field( |
| default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} |
| ) |
| cache_dir: Optional[str] = field( |
| default=None, |
| metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, |
| ) |
| use_fast_tokenizer: bool = field( |
| default=True, |
| metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, |
| ) |
| model_revision: str = field( |
| default="main", |
| metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
| ) |
| token: str = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " |
| "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." |
| ) |
| }, |
| ) |
| trust_remote_code: bool = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to trust the execution of code from datasets/models defined on the Hub." |
| " This option should only be set to `True` for repositories you trust and in which you have read the" |
| " code, as it will execute code present on the Hub on your local machine." |
| ) |
| }, |
| ) |
|
|
|
|
| @dataclass |
| class DataTrainingArguments: |
| """ |
| Arguments pertaining to what data we are going to input our model for training and eval. |
| """ |
|
|
| source_lang: str = field(default=None, metadata={"help": "Source language id for translation."}) |
| target_lang: str = field(default=None, metadata={"help": "Target language id for translation."}) |
| dataset_name: Optional[str] = field( |
| default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} |
| ) |
| dataset_config_name: Optional[str] = field( |
| default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} |
| ) |
| train_file: Optional[str] = field( |
| default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} |
| ) |
| validation_file: Optional[str] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." |
| ) |
| }, |
| ) |
| test_file: Optional[str] = field( |
| default=None, |
| metadata={ |
| "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." |
| }, |
| ) |
| overwrite_cache: bool = field( |
| default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} |
| ) |
| preprocessing_num_workers: Optional[int] = field( |
| default=None, |
| metadata={"help": "The number of processes to use for the preprocessing."}, |
| ) |
| max_source_length: Optional[int] = field( |
| default=1024, |
| metadata={ |
| "help": ( |
| "The maximum total input sequence length after tokenization. Sequences longer " |
| "than this will be truncated, sequences shorter will be padded." |
| ) |
| }, |
| ) |
| max_target_length: Optional[int] = field( |
| default=128, |
| metadata={ |
| "help": ( |
| "The maximum total sequence length for target text after tokenization. Sequences longer " |
| "than this will be truncated, sequences shorter will be padded." |
| ) |
| }, |
| ) |
| val_max_target_length: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "The maximum total sequence length for validation target text after tokenization. Sequences longer " |
| "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. " |
| "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " |
| "during ``evaluate`` and ``predict``." |
| ) |
| }, |
| ) |
| pad_to_max_length: bool = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to pad all samples to model maximum sentence length. " |
| "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " |
| "efficient on GPU but very bad for TPU." |
| ) |
| }, |
| ) |
| max_train_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of training examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| max_eval_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of evaluation examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| max_predict_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of prediction examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| num_beams: Optional[int] = field( |
| default=1, |
| metadata={ |
| "help": ( |
| "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " |
| "which is used during ``evaluate`` and ``predict``." |
| ) |
| }, |
| ) |
| ignore_pad_token_for_loss: bool = field( |
| default=True, |
| metadata={ |
| "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." |
| }, |
| ) |
| source_prefix: Optional[str] = field( |
| default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."} |
| ) |
| forced_bos_token: Optional[str] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "The token to force as the first generated token after the :obj:`decoder_start_token_id`.Useful for" |
| " multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token needs to" |
| " be the target language token.(Usually it is the target language token)" |
| ) |
| }, |
| ) |
|
|
| def __post_init__(self): |
| if self.dataset_name is None and self.train_file is None and self.validation_file is None: |
| raise ValueError("Need either a dataset name or a training/validation file.") |
| else: |
| if self.train_file is not None: |
| extension = self.train_file.split(".")[-1] |
| assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." |
| if self.validation_file is not None: |
| extension = self.validation_file.split(".")[-1] |
| assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." |
| if self.val_max_target_length is None: |
| self.val_max_target_length = self.max_target_length |
|
|
|
|
| |
|
|
|
|
| def main(): |
| |
| |
| |
| |
|
|
| parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) |
| if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
| |
| |
| model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
| else: |
| model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
|
| |
| |
| send_example_telemetry("run_translation", model_args, data_args, framework="tensorflow") |
| |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| handlers=[logging.StreamHandler(sys.stdout)], |
| ) |
| logger.setLevel(logging.INFO) |
| datasets.utils.logging.set_verbosity(logging.INFO) |
| transformers.utils.logging.set_verbosity(logging.INFO) |
|
|
| |
| logger.info(f"Training/evaluation parameters {training_args}") |
| |
|
|
| |
| last_checkpoint = None |
| if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: |
| last_checkpoint = get_last_checkpoint(training_args.output_dir) |
| if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: |
| raise ValueError( |
| f"Output directory ({training_args.output_dir}) already exists and is not empty. " |
| "Use --overwrite_output_dir to overcome." |
| ) |
| elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: |
| logger.info( |
| f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " |
| "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." |
| ) |
| |
|
|
| |
| set_seed(training_args.seed) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| if data_args.dataset_name is not None: |
| |
| raw_datasets = load_dataset( |
| data_args.dataset_name, |
| data_args.dataset_config_name, |
| cache_dir=model_args.cache_dir, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
| else: |
| data_files = {} |
| if data_args.train_file is not None: |
| data_files["train"] = data_args.train_file |
| extension = data_args.train_file.split(".")[-1] |
| if data_args.validation_file is not None: |
| data_files["validation"] = data_args.validation_file |
| extension = data_args.validation_file.split(".")[-1] |
| raw_datasets = load_dataset( |
| extension, |
| data_files=data_files, |
| cache_dir=model_args.cache_dir, |
| token=model_args.token, |
| ) |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| config = AutoConfig.from_pretrained( |
| model_args.config_name if model_args.config_name else model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
| tokenizer = AutoTokenizer.from_pretrained( |
| model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| use_fast=model_args.use_fast_tokenizer, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
|
|
| prefix = data_args.source_prefix if data_args.source_prefix is not None else "" |
| |
|
|
| |
| |
| if training_args.do_train: |
| column_names = raw_datasets["train"].column_names |
| elif training_args.do_eval: |
| column_names = raw_datasets["validation"].column_names |
| else: |
| logger.info("There is nothing to do. Please pass `do_train`, and/or `do_eval`.") |
| return |
|
|
| column_names = raw_datasets["train"].column_names |
|
|
| |
| |
| if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): |
| assert data_args.target_lang is not None and data_args.source_lang is not None, ( |
| f"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --source_lang and " |
| "--target_lang arguments." |
| ) |
| tokenizer.src_lang = data_args.source_lang |
| tokenizer.tgt_lang = data_args.target_lang |
| forced_bos_token_id = ( |
| tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None |
| ) |
|
|
| |
| source_lang = data_args.source_lang.split("_")[0] |
| target_lang = data_args.target_lang.split("_")[0] |
|
|
| padding = "max_length" if data_args.pad_to_max_length else False |
|
|
| |
| max_target_length = data_args.max_target_length |
| padding = "max_length" if data_args.pad_to_max_length else False |
|
|
| def preprocess_function(examples): |
| inputs = [ex[source_lang] for ex in examples["translation"]] |
| targets = [ex[target_lang] for ex in examples["translation"]] |
| inputs = [prefix + inp for inp in inputs] |
| model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) |
|
|
| |
| labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) |
|
|
| |
| |
| if padding == "max_length" and data_args.ignore_pad_token_for_loss: |
| labels["input_ids"] = [ |
| [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] |
| ] |
|
|
| model_inputs["labels"] = labels["input_ids"] |
| return model_inputs |
|
|
| if training_args.do_train: |
| if "train" not in raw_datasets: |
| raise ValueError("--do_train requires a train dataset") |
| train_dataset = raw_datasets["train"] |
| if data_args.max_train_samples is not None: |
| max_train_samples = min(len(train_dataset), data_args.max_train_samples) |
| train_dataset = train_dataset.select(range(max_train_samples)) |
| train_dataset = train_dataset.map( |
| preprocess_function, |
| batched=True, |
| num_proc=data_args.preprocessing_num_workers, |
| remove_columns=column_names, |
| load_from_cache_file=not data_args.overwrite_cache, |
| desc="Running tokenizer on train dataset", |
| ) |
| else: |
| train_dataset = None |
|
|
| if training_args.do_eval: |
| max_target_length = data_args.val_max_target_length |
| if "validation" not in raw_datasets: |
| raise ValueError("--do_eval requires a validation dataset") |
| eval_dataset = raw_datasets["validation"] |
| if data_args.max_eval_samples is not None: |
| max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) |
| eval_dataset = eval_dataset.select(range(max_eval_samples)) |
| eval_dataset = eval_dataset.map( |
| preprocess_function, |
| batched=True, |
| num_proc=data_args.preprocessing_num_workers, |
| remove_columns=column_names, |
| load_from_cache_file=not data_args.overwrite_cache, |
| desc="Running tokenizer on validation dataset", |
| ) |
| else: |
| eval_dataset = None |
| |
|
|
| with training_args.strategy.scope(): |
| |
| model = TFAutoModelForSeq2SeqLM.from_pretrained( |
| model_args.model_name_or_path, |
| config=config, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
|
|
| |
| |
| embeddings = model.get_input_embeddings() |
|
|
| |
| |
| |
| if hasattr(embeddings, "embeddings"): |
| embedding_size = embeddings.embeddings.shape[0] |
| else: |
| embedding_size = embeddings.weight.shape[0] |
| if len(tokenizer) > embedding_size: |
| model.resize_token_embeddings(len(tokenizer)) |
|
|
| if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): |
| model.config.forced_bos_token_id = forced_bos_token_id |
| |
|
|
| |
| if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): |
| assert data_args.target_lang is not None and data_args.source_lang is not None, ( |
| "mBart requires --target_lang and --source_lang" |
| ) |
| if isinstance(tokenizer, MBartTokenizer): |
| model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang] |
| else: |
| model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang) |
|
|
| if model.config.decoder_start_token_id is None: |
| raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") |
| |
|
|
| |
| label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id |
| data_collator = DataCollatorForSeq2Seq( |
| tokenizer, |
| model=model, |
| label_pad_token_id=label_pad_token_id, |
| pad_to_multiple_of=64, |
| return_tensors="np", |
| ) |
| num_replicas = training_args.strategy.num_replicas_in_sync |
| total_train_batch_size = training_args.per_device_train_batch_size * num_replicas |
| total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas |
|
|
| dataset_options = tf.data.Options() |
| dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| tf_train_dataset = model.prepare_tf_dataset( |
| train_dataset, |
| collate_fn=data_collator, |
| batch_size=total_train_batch_size, |
| shuffle=True, |
| ).with_options(dataset_options) |
| tf_eval_dataset = model.prepare_tf_dataset( |
| eval_dataset, collate_fn=data_collator, batch_size=total_eval_batch_size, shuffle=False |
| ).with_options(dataset_options) |
| |
|
|
| |
| num_train_steps = int(len(tf_train_dataset) * training_args.num_train_epochs) |
| if training_args.warmup_steps > 0: |
| num_warmup_steps = training_args.warmup_steps |
| elif training_args.warmup_ratio > 0: |
| num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) |
| else: |
| num_warmup_steps = 0 |
| if training_args.do_train: |
| optimizer, lr_schedule = create_optimizer( |
| init_lr=training_args.learning_rate, |
| num_train_steps=num_train_steps, |
| num_warmup_steps=num_warmup_steps, |
| adam_beta1=training_args.adam_beta1, |
| adam_beta2=training_args.adam_beta2, |
| adam_epsilon=training_args.adam_epsilon, |
| weight_decay_rate=training_args.weight_decay, |
| adam_global_clipnorm=training_args.max_grad_norm, |
| ) |
| else: |
| optimizer = "sgd" |
| |
|
|
| |
| if training_args.do_eval: |
| metric = evaluate.load("sacrebleu", cache_dir=model_args.cache_dir) |
|
|
| if data_args.val_max_target_length is None: |
| data_args.val_max_target_length = data_args.max_target_length |
|
|
| gen_kwargs = { |
| "max_length": data_args.val_max_target_length, |
| "num_beams": data_args.num_beams, |
| "no_repeat_ngram_size": 0, |
| } |
|
|
| def postprocess_text(preds, labels): |
| preds = [pred.strip() for pred in preds] |
| labels = [[label.strip()] for label in labels] |
|
|
| return preds, labels |
|
|
| def compute_metrics(preds): |
| predictions, labels = preds |
| if isinstance(predictions, tuple): |
| predictions = predictions[0] |
| decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) |
| labels = np.where(labels != -100, labels, tokenizer.pad_token_id) |
| decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) |
| decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) |
| metrics = metric.compute(predictions=decoded_preds, references=decoded_labels) |
| return {"bleu": metrics["score"]} |
|
|
| |
| |
| |
| |
| |
|
|
| metric_callback = KerasMetricCallback( |
| metric_fn=compute_metrics, |
| eval_dataset=tf_eval_dataset, |
| predict_with_generate=True, |
| use_xla_generation=True, |
| generate_kwargs=gen_kwargs, |
| ) |
| callbacks = [metric_callback] |
| else: |
| callbacks = [] |
|
|
| |
|
|
| |
| push_to_hub_model_id = training_args.push_to_hub_model_id |
| model_name = model_args.model_name_or_path.split("/")[-1] |
| if not push_to_hub_model_id: |
| push_to_hub_model_id = f"{model_name}-finetuned-{data_args.source_lang}-{data_args.target_lang}" |
|
|
| model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "translation"} |
| if data_args.dataset_name is not None: |
| model_card_kwargs["dataset_tags"] = data_args.dataset_name |
| if data_args.dataset_config_name is not None: |
| model_card_kwargs["dataset_args"] = data_args.dataset_config_name |
| model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" |
| else: |
| model_card_kwargs["dataset"] = data_args.dataset_name |
|
|
| languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None] |
| if len(languages) > 0: |
| model_card_kwargs["language"] = languages |
|
|
| if training_args.push_to_hub: |
| |
| callbacks.append( |
| PushToHubCallback( |
| output_dir=training_args.output_dir, |
| hub_model_id=push_to_hub_model_id, |
| hub_token=training_args.push_to_hub_token, |
| tokenizer=tokenizer, |
| **model_card_kwargs, |
| ) |
| ) |
| |
|
|
| |
| eval_metrics = None |
| |
| |
| model.compile(optimizer=optimizer, jit_compile=training_args.xla) |
|
|
| if training_args.do_train: |
| logger.info("***** Running training *****") |
| logger.info(f" Num examples = {len(train_dataset)}") |
| logger.info(f" Num Epochs = {training_args.num_train_epochs}") |
| logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") |
| logger.info(f" Total train batch size = {total_train_batch_size}") |
| logger.info(f" Total optimization steps = {num_train_steps}") |
|
|
| if training_args.xla and not data_args.pad_to_max_length: |
| logger.warning( |
| "XLA training may be slow at first when --pad_to_max_length is not set " |
| "until all possible shapes have been compiled." |
| ) |
|
|
| history = model.fit(tf_train_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks) |
| eval_metrics = {key: val[-1] for key, val in history.history.items()} |
| |
|
|
| |
| if training_args.do_eval and not training_args.do_train: |
| |
| @tf.function(jit_compile=True) |
| def generate(**kwargs): |
| return model.generate(**kwargs) |
|
|
| if training_args.do_eval: |
| logger.info("Evaluation...") |
| for batch, labels in tf_eval_dataset: |
| batch.update(gen_kwargs) |
| generated_tokens = generate(**batch) |
| if isinstance(generated_tokens, tuple): |
| generated_tokens = generated_tokens[0] |
| decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) |
| labels = np.where(labels != -100, labels, tokenizer.pad_token_id) |
| decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) |
| decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) |
|
|
| metric.add_batch(predictions=decoded_preds, references=decoded_labels) |
|
|
| eval_metrics = metric.compute() |
| logger.info({"bleu": eval_metrics["score"]}) |
| |
|
|
| if training_args.output_dir is not None and eval_metrics is not None: |
| output_eval_file = os.path.join(training_args.output_dir, "all_results.json") |
| with open(output_eval_file, "w") as writer: |
| writer.write(json.dumps(eval_metrics)) |
|
|
| if training_args.output_dir is not None and not training_args.push_to_hub: |
| |
| model.save_pretrained(training_args.output_dir) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|