| import gradio as gr |
| from transformers import pipeline |
|
|
| |
| |
| MODEL_NAME = "deepset/roberta-large-squad2" |
| qa_pipeline = pipeline( |
| "question-answering", |
| model=MODEL_NAME, |
| tokenizer=MODEL_NAME |
| |
| ) |
|
|
| |
| def answer_question(question, context): |
| |
| result = qa_pipeline( |
| question=question, |
| context=context, |
| handle_impossible_answer=True, |
| top_k=1, |
| max_answer_len=30 |
| ) |
| answer = result.get("answer", "").strip() |
| score = result.get("score", 0.0) |
| |
| if answer == "" or score < 0.1: |
| |
| return "🤔 I’m not sure – the model couldn’t find a clear answer in the text." |
| return answer |
|
|
| |
| interface = gr.Interface( |
| fn=answer_question, |
| inputs=[ |
| gr.components.Textbox(lines=2, label="Question"), |
| gr.components.Textbox(lines=10, label="Context") |
| ], |
| outputs=gr.components.Textbox(label="Answer"), |
| title="Question Answering Demo", |
| description="Ask a question and get an answer from the provided context. " \ |
| "Supports unanswerable questions." |
| ) |
|
|
| if __name__ == "__main__": |
| interface.launch() |
|
|