| import torch |
| from model import DiffusionModel, UNet |
| from torchvision.utils import save_image |
| import argparse |
| from PIL import Image |
|
|
| def generate(prompts, model_path="diffusion_model.pth", image_size=256, device="cuda"): |
| |
| model = UNet().to(device) |
| model.load_state_dict(torch.load(model_path, map_location=device)) |
| model.eval() |
| |
| |
| betas = torch.linspace(1e-4, 0.02, 1000).to(device) |
| diffusion = DiffusionModel(model, betas, device) |
| |
| |
| with torch.no_grad(): |
| images = diffusion.sample(prompts, image_size=image_size, batch_size=len(prompts)) |
| |
| |
| os.makedirs("generated", exist_ok=True) |
| for i, img in enumerate(images): |
| img = Image.fromarray(img.permute(1, 2, 0).cpu().numpy()) |
| img.save(f"generated/sample_{i}.png") |
| |
| print(f"Generated {len(images)} images saved in 'generated' folder") |
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--prompts", nargs="+", required=True, help="Text prompts for generation") |
| parser.add_argument("--model", default="diffusion_model.pth", help="Path to trained model") |
| parser.add_argument("--size", type=int, default=256, help="Image size") |
| args = parser.parse_args() |
| |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| generate(args.prompts, args.model, args.size, device) |