Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler | |
| from diffusers.utils import export_to_video | |
| import uuid | |
| # Model Yükleme | |
| model_id = "vdo/zeroscope_v2_576w" | |
| pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32) | |
| pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) | |
| pipe.to("cpu") | |
| def generate_video(prompt): | |
| try: | |
| # Kaliteyi koruyalım (Patronun istediği gibi net olsun) | |
| # num_inference_steps=20 (Dengeli kalite) | |
| frames = pipe( | |
| prompt, | |
| num_inference_steps=20, | |
| height=320, | |
| width=576, | |
| num_frames=16 | |
| ).frames | |
| output_filename = f"viral_{uuid.uuid4()}.mp4" | |
| export_to_video(frames[0], output_filename, fps=8) | |
| # SADECE dosya adını döndür, Gradio bunu otomatik URL'ye çevirir | |
| return output_filename | |
| except Exception as e: | |
| print(f"HATA: {str(e)}") | |
| return None | |
| # API İsmi: predict | |
| demo = gr.Interface(fn=generate_video, inputs="text", outputs="video", api_name="predict") | |
| demo.launch() |