File size: 1,524 Bytes
a2f95a1
52e8e18
b593152
52e8e18
a2f95a1
 
 
 
52e8e18
beead2c
a2f95a1
 
 
 
 
 
 
 
 
 
 
 
e868b5a
a2f95a1
beead2c
 
 
6a6f723
 
 
 
beead2c
 
 
a2f95a1
beead2c
 
a2f95a1
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
import gradio as gr
import torch, torchvision

# Load model and tokenizer
model_name = "mbwolff/distilbert-base-uncased-finetuned-squad"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)

def answer_question(question, context):
    """
    Answers a question based on a given context.
    """
    inputs = tokenizer.encode_plus(question, context, add_special_tokens=True, return_tensors="pt")
    input_ids = inputs["input_ids"].tolist()[0]
    outputs = model(**inputs)
    answer_start_scores = outputs.start_logits
    answer_end_scores = outputs.end_logits
    answer_start = torch.argmax(answer_start_scores)
    answer_end = torch.argmax(answer_end_scores) + 1
    answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
    return answer

# Define Gradio interface
iface = gr.Interface(
    fn=answer_question,
    inputs=[
#        gr.inputs.Textbox(lines=2, placeholder="Enter your question here..."),
#        gr.inputs.Textbox(lines=5, placeholder="Enter the context here...")
        gr.Textbox(label="Enter your question here...", lines=2),
        gr.Textbox(label="Enter the context here...", lines=5)
    ],
    outputs="text",
    title="Question Answering Chatbot",
    description="Ask a question and provide a context, and the chatbot will try to answer it."
)

if __name__ == "__main__":
    iface.launch()