| import gradio as gr |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
| import torch |
|
|
| model_id = "nvidia/OpenReasoning-Nemotron-1.5B" |
|
|
| tokenizer = AutoTokenizer.from_pretrained(model_id) |
| model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32) |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| model.to(device) |
|
|
| def chat_api(prompt, max_new_tokens=200, temperature=0.7): |
| inputs = tokenizer(prompt, return_tensors="pt").to(device) |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=max_new_tokens, |
| temperature=temperature, |
| do_sample=True |
| ) |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| return response |
|
|
| demo = gr.Interface( |
| fn=chat_api, |
| inputs=[ |
| gr.Textbox(label="Prompt", placeholder="Ask me anything..."), |
| gr.Slider(50, 512, value=200, step=10, label="Max Tokens"), |
| gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Temperature") |
| ], |
| outputs="text", |
| title="OpenReasoning Nemotron-1.5B API", |
| description="Public Hugging Face Space that runs NVIDIA's Nemotron-1.5B model." |
| ) |
|
|
| demo.launch() |