| from typing import Callable |
|
|
| import dotenv |
| import hydra |
| from omegaconf import OmegaConf, DictConfig |
|
|
| |
| |
| dotenv.load_dotenv(override=True) |
|
|
| OmegaConf.register_new_resolver('eval', eval) |
| OmegaConf.register_new_resolver('div_up', lambda x, y: (x + y - 1) // y) |
| |
| |
| OmegaConf.register_new_resolver('datamodule', lambda attr: '${datamodule:' + str(attr) + '}') |
|
|
| |
| import torch.backends |
| torch.backends.cuda.matmul.allow_tf32 = True |
| torch.backends.cudnn.allow_tf32 = True |
|
|
|
|
| def dictconfig_filter_key(d: DictConfig, fn: Callable) -> DictConfig: |
| """Only keep keys where fn(key) is True. Support nested DictConfig. |
| """ |
| |
| |
| return DictConfig({k: dictconfig_filter_key(v, fn) if isinstance(v, DictConfig) else v |
| |
| for k, v in d.items() if fn(k)}) |
|
|
|
|
| @hydra.main(config_path="configs/", config_name="config.yaml") |
| def main(config: DictConfig): |
|
|
| |
| |
| config = dictconfig_filter_key(config, lambda k: not k.startswith('__')) |
|
|
| |
| |
| from src.train import train |
| from src.eval import evaluate |
| from src.utils import utils |
|
|
| |
| |
| |
| |
| |
| utils.extras(config) |
|
|
| |
| if config.get("print_config"): |
| utils.print_config(config, resolve=True) |
|
|
| |
| mode = config.get('mode', 'train') |
| if mode not in ['train', 'eval']: |
| raise NotImplementedError(f'mode {mode} not supported') |
| if mode == 'train': |
| return train(config) |
| elif mode == 'eval': |
| return evaluate(config) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|