| | import os |
| | import logging |
| | from functools import partial |
| | import hydra |
| | import torch |
| | import wandb |
| | from hydra.core.hydra_config import HydraConfig |
| | from hydra.utils import instantiate |
| | from lightning import seed_everything |
| | from omegaconf import DictConfig, OmegaConf |
| | from pdeinvbench.utils import validate_git_status |
| | from pdeinvbench.utils.config_utils import resolve_pde_resolution |
| | import sys |
| | import pdeinvbench |
| |
|
| | |
| |
|
| |
|
| | @hydra.main( |
| | version_base=None, |
| | config_path="configs/", |
| | config_name="1dkdv", |
| | ) |
| | def main(cfg: DictConfig) -> None: |
| | |
| | validate_git_status() |
| |
|
| | hydra_cfg = HydraConfig.get() |
| | overrides = hydra_cfg.overrides.task |
| | overrides = [ |
| | override |
| | for override in overrides |
| | if "data_root" not in override |
| | and "batch_size" not in override |
| | and "logging.project" not in override |
| | and "logging.save_dir" not in override |
| | and "num_nodes" not in override |
| | and "log_model" not in override |
| | and "supervised_learning_min_epoch" not in override |
| | and "supervised_learning_max_epoch" not in override |
| | ] |
| |
|
| | |
| | resolve_pde_resolution(cfg) |
| |
|
| | |
| | wandb_name = hydra_cfg.job["config_name"] + "_" + "_".join(overrides) |
| |
|
| | |
| | seed_everything(cfg.seed) |
| |
|
| | |
| | |
| | wandb_args = OmegaConf.to_container( |
| | cfg.logging, resolve=True, throw_on_missing=True |
| | ) |
| | |
| | wandb_args.pop("_target_") |
| | |
| | |
| | wandb_args["dir"] = wandb_args.pop("save_dir") |
| | cfg_as_dict = OmegaConf.to_container( |
| | cfg, resolve=True, throw_on_missing=True) |
| | |
| | |
| | |
| | experiment = wandb.init(**wandb_args, name=wandb_name, config=cfg_as_dict) |
| | |
| | train_dataloader = instantiate(cfg.data.train_dataloader) |
| | val_dataloaders = [] |
| | test_dataloaders = [] |
| |
|
| | val_dataloader = instantiate(cfg.data.val_dataloader) |
| |
|
| | val_dataloaders.append(val_dataloader) |
| |
|
| | |
| | test_dataloaders.append(val_dataloader) |
| | if "ood_dataloader" in cfg.data.keys() and os.path.exists(cfg.data.ood_data_root): |
| | print("ood loader") |
| | test_dataloaders.append(instantiate(cfg.data.ood_dataloader)) |
| | if "ood_dataloader_extreme" in cfg.data.keys() and os.path.exists( |
| | cfg.data.ood_data_root_extreme |
| | ): |
| | print("ood loader extreme") |
| | test_dataloaders.append(instantiate(cfg.data.ood_dataloader_extreme)) |
| | if "test_dataloader" in cfg.data.keys() and os.path.exists(cfg.data.test_data_root): |
| | print("test iid loader") |
| | test_dataloaders.append(instantiate(cfg.data.test_dataloader)) |
| |
|
| | |
| | model = instantiate(cfg.model.model_config) |
| |
|
| | if "inverse_model_wandb_run" in cfg.keys() and cfg.inverse_model_wandb_run != "": |
| | logging.info("Loading inverse model checkpoint from wandb") |
| | inverse_model_run_path = cfg.inverse_model_wandb_run |
| | artifact = experiment.use_artifact( |
| | inverse_model_run_path, type="model") |
| | checkpoint_path = os.path.join(artifact.download(), "model.ckpt") |
| | state_dict = torch.load(checkpoint_path, weights_only=False)[ |
| | "state_dict"] |
| | state_dict = {k.partition( |
| | "model.")[2]: v for k, v in state_dict.items()} |
| | model.load_state_dict(state_dict) |
| |
|
| | optimizer = instantiate(cfg.optimizer, params=model.parameters()) |
| | lr_scheduler = instantiate(cfg.lr_scheduler, optimizer=optimizer) |
| |
|
| | module_kwargs = { |
| | "model": model, |
| | "optimizer": optimizer, |
| | "lr_scheduler": lr_scheduler, |
| | } |
| |
|
| | if "tailoring_optimizer" in cfg.keys(): |
| | |
| | def tailoring_optimizer(x): |
| | return instantiate(cfg.tailoring_optimizer, params=x) |
| |
|
| | module_kwargs["tailoring_optimizer"] = tailoring_optimizer |
| | else: |
| | tailoring_optimizer = None |
| | |
| | if tailoring_optimizer != None: |
| | print(cfg.tailoring_optimizer) |
| |
|
| | print("instantiate inverse module") |
| | Inverse_module = instantiate( |
| | cfg.lightning_module, |
| | **module_kwargs, |
| | ) |
| | print("instantiate inverse module done") |
| | |
| | log_model = "all" |
| | if "log_model" in cfg: |
| | log_model = cfg.log_model |
| | logger = instantiate( |
| | cfg.logging, experiment=experiment, log_model=log_model) |
| | logger._save_dir += "/" + \ |
| | cfg_as_dict["logging"]["project"] + "/" + wandb_name |
| | logger.watch(model, log="all") |
| | |
| | if tailoring_optimizer is not None: |
| | L_trainer = instantiate( |
| | cfg.trainer, |
| | logger=logger, |
| | inference_mode=False, |
| | ) |
| | else: |
| | L_trainer = instantiate(cfg.trainer, logger=logger) |
| |
|
| | if "test_run" in cfg and cfg.test_run: |
| | L_trainer.test(model=Inverse_module, |
| | dataloaders=test_dataloaders, weights_only=False) |
| | else: |
| | |
| | L_trainer.fit( |
| | Inverse_module, |
| | train_dataloaders=train_dataloader, |
| | val_dataloaders=val_dataloaders, |
| | ) |
| | L_trainer.test(dataloaders=test_dataloaders, |
| | ckpt_path="best", weights_only=False) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|