Spaces:
Sleeping
Sleeping
| import gradio as gr #web interface | |
| from transformers import AutoModelForCausalLM, AutoTokenizer #for loading the model and making the input into tokens | |
| model_name="Salesforce/codegen-350M-multi" | |
| #initialize the tokenizer and model | |
| tokenizer=AutoTokenizer.from_pretrained(model_name) | |
| model=AutoModelForCausalLM.from_pretrained(model_name) | |
| def generate_code(prompt, max_length=100, temperature=0.7, top_p=0.95): | |
| inputs=tokenizer(prompt, return_tensors='pt') | |
| outputs=model.generate(**inputs, max_length=max_length, temperature=temperature, top_p=top_p, do_sample=True) #input: input_id, weight_number | |
| generated_code=tokenizer.decode(outputs[0],skip_special_tokens=True) #skip_special_tokens will remove <EOS> and <eol> | |
| return generated_code | |
| #gradio interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## CODE GENERATION WITH CODEGEN MODEL") | |
| #input box to add prompt | |
| prompt=gr.Textbox(lines=10, label='enter your prompt for code generation') | |
| max_length=gr.Slider(50,500, value=100, label='Max Length') | |
| temperature=gr.Slider(0.1,0.9, value=0.7, label='Temperature') | |
| top_p=gr.Slider(0.1,1.0, value=0.95,label='Top P value') | |
| output_box=gr.Textbox(lines=20, label='generated_code') | |
| generate_button = gr.Button("Generate Code") | |
| generate_button.click( | |
| fn=generate_code, | |
| inputs=[prompt, max_length, temperature, top_p], | |
| outputs=output_box | |
| ) | |
| demo.launch() | |