| |
| |
|
|
| |
| |
| |
|
|
| |
|
|
| |
|
|
|
|
| |
| |
|
|
| |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline |
| import gradio as gr |
|
|
| model = AutoModelForQuestionAnswering.from_pretrained('sundea/Work-QA') |
| tokenizer = AutoTokenizer.from_pretrained('sundea/Work-QA') |
| QA = pipeline('question-answering', model=model, tokenizer=tokenizer) |
|
|
| def get_out(text1, text2): |
| QA_input = {'question': text1, 'context': text2} |
| res = QA(QA_input) |
| return res['answer'] |
|
|
| |
| examples = [ |
| ['李理居住在哪','李理住在南京,他养了只小狗,名字叫丢丢,它是棕色毛色。'], |
| [ '李理的小狗叫什么','李理住在南京,他养了只小狗,名字叫丢丢,它是棕色毛色。'], |
| ['李理的小狗是什么颜色的','李理住在南京,他养了只小狗,名字叫丢丢,它是棕色毛色。'] |
| ] |
|
|
| |
| with gr.Interface(fn=get_out, |
| inputs=[gr.inputs.Textbox(label='question'), gr.inputs.Textbox(label='context')], |
| outputs=gr.outputs.Textbox(label='answer'), |
| title='Question Answering', |
| examples=examples) as app: |
| app.launch() |
|
|
|
|
|
|