Spaces:
Build error
Build error
| import streamlit as st | |
| import pandas as pd | |
| from fuzzywuzzy import process | |
| from langchain_community.llms import LlamaCpp | |
| from langchain_core.callbacks import StreamingStdOutCallbackHandler | |
| from langchain_core.prompts import PromptTemplate | |
| # Load the CSV files into DataFrames with Windows-1252 encoding | |
| df = pd.read_csv('location.csv', encoding='Windows-1252') | |
| df2 = pd.read_csv('train.csv') | |
| # Initialize the LlamaCpp model | |
| llm = LlamaCpp( | |
| model_path="unsloth.Q5_K_M.gguf", | |
| temperature=0.01, | |
| max_tokens=500, | |
| top_p=3, | |
| callbacks=[StreamingStdOutCallbackHandler()], | |
| verbose=False, | |
| stop=["###"] | |
| ) | |
| # Define the prompt template | |
| template = """Below is an instruction that describes a task, paired with an input that provides further context. Write a lengthy detailed response that appropriately completes the request. | |
| ### Instruction: | |
| {instruction} | |
| ### Input: | |
| {input} | |
| ### Response: | |
| {response}""" | |
| prompt = PromptTemplate.from_template(template) | |
| # Function to find the best matching context based on user input | |
| def find_best_match(query): | |
| questions = df2['Question'].tolist() | |
| contexts = df2['Context'].tolist() | |
| # Find the best match | |
| best_match = process.extractOne(query, questions) | |
| if best_match: | |
| index = questions.index(best_match[0]) | |
| return contexts[index] | |
| return "No relevant information found." | |
| # Function to truncate response at the nearest full stop | |
| def truncate_at_full_stop(text, max_length=500): | |
| if len(text) <= max_length: | |
| return text | |
| truncated = text[:max_length] | |
| print(f"Truncated text: {truncated}") | |
| last_period = truncated.rfind('.') | |
| print(f"Last period index: {last_period}") | |
| if last_period != -1: | |
| return truncated[:last_period + 1] | |
| return truncated | |
| # Initialize session state for selected service, chat history, and AI history | |
| if 'selected_service' not in st.session_state: | |
| st.session_state.selected_service = "Home" | |
| if 'chat_history' not in st.session_state: | |
| st.session_state.chat_history = [] | |
| if 'history' not in st.session_state: | |
| st.session_state.history = [] | |
| if 'input' not in st.session_state: | |
| st.session_state['input'] = '' | |
| # Sidebar for selecting services | |
| with st.sidebar: | |
| st.title("Select the Service") | |
| # Create buttons for each service | |
| if st.button('Medicine Services'): | |
| st.session_state.selected_service = "Medicine Services" | |
| if st.button('Kendra Locator'): | |
| st.session_state.selected_service = "Kendra Locator" | |
| if st.button('Assistant'): | |
| st.session_state.selected_service = "Assistant" | |
| # Main content area based on selected service | |
| if st.session_state.selected_service == "Home": | |
| st.title("Welcome to Medical Service Center") | |
| st.write("Explore the options in the sidebar to get started.") | |
| elif st.session_state.selected_service == "Medicine Services": | |
| st.title("Medicine Services") | |
| # Display chat history | |
| for chat in st.session_state.chat_history: | |
| st.write(f"**User:** {chat['user']}") | |
| st.write(f"**Bot:** {chat['bot']}") | |
| # User input section | |
| def handle_input(): | |
| user_input = st.session_state['input'] | |
| if user_input: | |
| response = find_best_match(user_input) | |
| st.session_state.chat_history.append({"user": user_input, "bot": response}) | |
| st.session_state['input'] = '' | |
| # Persistent text input at the top | |
| st.text_input("Enter medicine:", key="input", on_change=handle_input) | |
| elif st.session_state.selected_service == "Kendra Locator": | |
| st.title("Kendra Locator") | |
| display_option = st.selectbox("Select:", ["Address", "Email"]) | |
| pin_code_input = st.text_input("Enter Pin Code:") | |
| if st.button("Locate"): | |
| if pin_code_input: | |
| result = df[df['Pin'].astype(str) == pin_code_input] | |
| if not result.empty: | |
| if display_option == "Address": | |
| st.write(f"Address: {result['Address'].values[0]}") | |
| elif display_option == "Email": | |
| st.write(f"Email: {result['Email'].values[0]}") | |
| else: | |
| st.write("No results found.") | |
| else: | |
| st.write("Please enter a pin code.") | |
| elif st.session_state.selected_service == "Assistant": | |
| st.title("Query Assistance") | |
| # Display AI chat history | |
| for chat in st.session_state.history: | |
| st.write(f"**User Query:** {chat['user']}") | |
| st.write(f"**Chatbot:** {chat['bot']}") | |
| # Function to handle user input | |
| def handle_input(): | |
| user_input = st.session_state['input'] | |
| if user_input: | |
| # Format the prompt | |
| formatted_prompt = prompt.format( | |
| instruction="You are an all-knowing Medical AI. Provide detailed responses to only medicine-related queries.", | |
| input=user_input, | |
| response="" # Leave this blank for generation! | |
| ) | |
| # Generate response | |
| response = llm.invoke(formatted_prompt) | |
| # Truncate response if necessary | |
| truncated_response = truncate_at_full_stop(response) | |
| # Update the chat history | |
| st.session_state.history.append({"user": user_input, "bot": truncated_response}) | |
| # Clear the input box | |
| st.session_state['input'] = '' | |
| # Persistent text input at the top | |
| st.text_input("Enter Query:", key="input", on_change=handle_input) | |