| import streamlit as st |
| from huggingface_hub import InferenceClient |
|
|
| |
| client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") |
|
|
| |
| def format_prompt(message, history): |
| prompt = "<s>" |
| for user_prompt, bot_response in history: |
| prompt += f"[INST] {user_prompt} [/INST]" |
| prompt += f" {bot_response}</s> " |
| prompt += f"[INST] {message} [/INST]" |
| return prompt |
|
|
| |
| def generate(prompt, history, temperature=0.2, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0): |
| temperature = max(float(temperature), 1e-2) |
| top_p = float(top_p) |
|
|
| generate_kwargs = dict( |
| temperature=temperature, |
| max_new_tokens=max_new_tokens, |
| top_p=top_p, |
| repetition_penalty=repetition_penalty, |
| do_sample=True, |
| seed=42, |
| ) |
|
|
| formatted_prompt = format_prompt(prompt, history) |
|
|
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) |
| output = "" |
| for response in stream: |
| output += response.token.text |
| return output |
|
|
| |
| st.title("Mistral 8x7b Chat") |
|
|
| |
| if 'history' not in st.session_state: |
| st.session_state.history = [] |
|
|
| |
| user_input = st.text_input("Your message:", key="user_input") |
|
|
| |
| if st.button("Send"): |
| if user_input: |
| bot_response = generate(user_input, st.session_state.history) |
| st.session_state.history.append((user_input, bot_response)) |
| |
|
|
| |
| chat_text = "" |
| for user_msg, bot_msg in st.session_state.history: |
| chat_text += f"You: {user_msg}\nBot: {bot_msg}\n\n" |
| st.text_area("Chat", value=chat_text, height=300, disabled=False) |
|
|