| | import gradio as gr |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| |
|
| | |
| | model_name = "Salesforce/codegen-6B-mono" |
| | tokenizer = AutoTokenizer.from_pretrained(model_name) |
| | model = AutoModelForCausalLM.from_pretrained(model_name, device_map=None) |
| |
|
| | |
| | def generate_code(prompt): |
| | inputs = tokenizer(prompt, return_tensors="pt").to("cuda") |
| | outputs = model.generate(inputs["input_ids"], max_length=100, num_beams=5) |
| | return tokenizer.decode(outputs[0], skip_special_tokens=True) |
| |
|
| | |
| | interface = gr.Interface( |
| | fn=generate_code, |
| | inputs=gr.Textbox(lines=2, placeholder="Enter your code prompt here..."), |
| | outputs="text", |
| | title="CodeGen Code Generator", |
| | description="Generate code from text prompts using Salesforce CodeGen-6B-mono model." |
| | ) |
| |
|
| | |
| | interface.launch() |
| |
|