| import gradio as gr | |
| from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
| # Load fine-tuned model | |
| model = AutoModelForCausalLM.from_pretrained("./my_ai_assistant") | |
| tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") | |
| pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| def respond(message): | |
| response = pipe(message, max_new_tokens=200)[0]["generated_text"] | |
| return response | |
| gr.Interface(fn=respond, inputs="text", outputs="text", title="My AI Assistant").launch() | |