Spaces:
Sleeping
Sleeping
File size: 1,441 Bytes
2181892 dd7ee06 2181892 39eab6f 2181892 c884a4b 2181892 35b9cfc 2181892 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 | import gradio as gr #web interface
from transformers import AutoModelForCausalLM, AutoTokenizer #for loading the model and making the input into tokens
model_name="Salesforce/codegen-350M-multi"
#initialize the tokenizer and model
tokenizer=AutoTokenizer.from_pretrained(model_name)
model=AutoModelForCausalLM.from_pretrained(model_name)
def generate_code(prompt, max_length=100, temperature=0.7, top_p=0.95):
inputs=tokenizer(prompt, return_tensors='pt')
outputs=model.generate(**inputs, max_length=max_length, temperature=temperature, top_p=top_p, do_sample=True) #input: input_id, weight_number
generated_code=tokenizer.decode(outputs[0],skip_special_tokens=True) #skip_special_tokens will remove <EOS> and <eol>
return generated_code
#gradio interface
with gr.Blocks() as demo:
gr.Markdown("## CODE GENERATION WITH CODEGEN MODEL")
#input box to add prompt
prompt=gr.Textbox(lines=10, label='enter your prompt for code generation')
max_length=gr.Slider(50,500, value=100, label='Max Length')
temperature=gr.Slider(0.1,0.9, value=0.7, label='Temperature')
top_p=gr.Slider(0.1,1.0, value=0.95,label='Top P value')
output_box=gr.Textbox(lines=20, label='generated_code')
generate_button = gr.Button("Generate Code")
generate_button.click(
fn=generate_code,
inputs=[prompt, max_length, temperature, top_p],
outputs=output_box
)
demo.launch()
|