| import gradio as gr | |
| # from huggingface_hub import InferenceClient | |
| import random | |
| from transformers import pipeline | |
| def random_response(message, history): | |
| return random.choice(["Yes", "No"]) | |
| def eqa(message, history): | |
| if len(history) >= 1 and history[-1]['role'] == 'assistant' and history[-1]['content'] == 'Okay': | |
| pipe = pipeline(model="mbwolff/distilbert-base-uncased-finetuned-squad") | |
| return pipe(question=message, context=history[-2]['content'])['answer'] | |
| else: | |
| return 'Okay' | |
| gr.ChatInterface( | |
| # fn=random_response, | |
| fn=eqa, | |
| type="messages" | |
| ).launch() | |