| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import argparse |
| | import os |
| | import torch |
| | from diffusers import AutoPipelineForText2Image |
| | from huggingface_hub import HfApi, login |
| | from slugify import slugify |
| |
|
| | def main(prompt: str, repo: str, hf_token: str = None): |
| | HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") |
| |
|
| | if HF_TOKEN: |
| | login(token=HF_TOKEN) |
| |
|
| | name = slugify(prompt) |
| | filename = f"{name}.png" |
| | model_id = "stabilityai/stable-diffusion-xl-base-1.0" |
| | api = HfApi() |
| |
|
| | print(f"Loading model: {model_id}...") |
| | pipe = AutoPipelineForText2Image.from_pretrained( |
| | model_id, torch_dtype=torch.float16, variant="fp16" |
| | ).to("cuda") |
| |
|
| | print(f"Generating image for prompt: '{prompt}'...") |
| | image = pipe(prompt=prompt).images[0] |
| | temp_image_path = f"/tmp/{filename}" |
| | image.save(temp_image_path) |
| | print(f"Image saved temporarily to {temp_image_path}") |
| |
|
| | print(f"Uploading {filename} to dataset repository: {repo}...") |
| | api.upload_file( |
| | path_or_fileobj=temp_image_path, |
| | path_in_repo=filename, |
| | repo_id=repo, |
| | repo_type="dataset", |
| | commit_message=f"add {filename}" |
| | ) |
| | repo_url = f"https://huggingface.co/datasets/{repo}/tree/main" |
| | print(f"View it here: {repo_url}") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | parser = argparse.ArgumentParser(description="Generate a single image using HF Jobs.") |
| | parser.add_argument("--prompt", required=True, help="The text prompt to generate an image from.") |
| | parser.add_argument("--repo", required=True, help="Your destination dataset repository ID.") |
| | parser.add_argument("--hf-token", help="Hugging Face API token.") |
| | args = parser.parse_args() |
| | main(prompt=args.prompt, repo=args.repo, hf_token=args.hf_token) |
| |
|