|
|
|
|
|
|
|
|
import gradio as gr |
|
|
import os |
|
|
from openai import OpenAI |
|
|
|
|
|
|
|
|
os.environ["HF_TOKEN"] |
|
|
|
|
|
try: |
|
|
hf_client = OpenAI( |
|
|
base_url="https://router.huggingface.co/v1", |
|
|
api_key=os.environ["HF_TOKEN"], |
|
|
) |
|
|
except KeyError: |
|
|
print("FATAL ERROR: Please set the 'HF_TOKEN' environment variable.") |
|
|
raise |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
yacht_models = { |
|
|
"Princess Y85": { |
|
|
"Compact": "https://vrcloud.com/?pv=1550754573&play=1", |
|
|
"Tall": "https://vrcloud.com/?pv=1550754573&play=1", |
|
|
"Full": "https://vrcloud.com/?pv=1550754573&play=1" |
|
|
}, |
|
|
"Princess X95": { |
|
|
"Compact": "https://vrcloud.com/?pv=1620886782&play=1", |
|
|
"Tall": "https://vrcloud.com/?pv=1620886782&play=1", |
|
|
"Full": "https://vrcloud.com/?pv=1620886782&play=1" |
|
|
} |
|
|
} |
|
|
|
|
|
DEFAULT_MODEL = "Princess Y85" |
|
|
DEFAULT_VIEW = "Full" |
|
|
|
|
|
DEFAULT_VR_HTML = """ |
|
|
<div style="text-align:center; padding: 20px; border: 1px dashed #ccc; border-radius: 5px;"> |
|
|
<h3>🚢 Ask the chat to start the Virtual Reality Experience!</h3> |
|
|
<p>Try phrases like: <b>Show me the Y85 full VR view</b> or <b>Show me the X95 compact view</b>.</p> |
|
|
</div> |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_vr_html(model, view): |
|
|
try: |
|
|
link = yacht_models[model][view] |
|
|
except KeyError: |
|
|
model = DEFAULT_MODEL |
|
|
view = DEFAULT_VIEW |
|
|
link = yacht_models[model][view] |
|
|
|
|
|
return f""" |
|
|
<div style="text-align:center"> |
|
|
<h3>🚢 {model} – {view} VR View</h3> |
|
|
<iframe src="{link}" width="100%" height="600px" style="border:none; border-radius:10px;"></iframe> |
|
|
</div> |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def yacht_chatbot(user_input, chat_history): |
|
|
|
|
|
|
|
|
messages = [ |
|
|
{ |
|
|
"role": "system", |
|
|
"content": ( |
|
|
"You are a knowledgeable yacht expert who provides information about " |
|
|
"Princess Yachts. You may also trigger VR views for Y85 or X95 " |
|
|
"in Compact, Tall, or Full view modes." |
|
|
) |
|
|
} |
|
|
] |
|
|
|
|
|
for item in chat_history: |
|
|
if isinstance(item, tuple) and len(item) == 2: |
|
|
user_msg, bot_msg = item |
|
|
if user_msg: |
|
|
messages.append({"role": "user", "content": user_msg}) |
|
|
if bot_msg: |
|
|
messages.append({"role": "assistant", "content": bot_msg}) |
|
|
elif isinstance(item, dict) and "role" in item and "content" in item: |
|
|
messages.append(item) |
|
|
|
|
|
|
|
|
messages.append({"role": "user", "content": user_input}) |
|
|
|
|
|
bot_reply = "Let me help with that." |
|
|
|
|
|
|
|
|
try: |
|
|
resp = hf_client.chat.completions.create( |
|
|
|
|
|
|
|
|
model="meta-llama/Llama-3.1-8B-Instruct", |
|
|
messages=messages, |
|
|
temperature=0.7 |
|
|
) |
|
|
bot_reply = resp.choices[0].message.content |
|
|
|
|
|
except Exception as e: |
|
|
bot_reply = f"AI service error: {e}" |
|
|
chat_history.append((user_input, bot_reply)) |
|
|
return format_history(chat_history), DEFAULT_VR_HTML |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
text = user_input.lower() |
|
|
|
|
|
|
|
|
if "y85" in text: |
|
|
model = "Princess Y85" |
|
|
elif "x95" in text: |
|
|
model = "Princess X95" |
|
|
else: |
|
|
model = DEFAULT_MODEL |
|
|
|
|
|
|
|
|
if "compact" in text: |
|
|
view = "Compact" |
|
|
elif "tall" in text: |
|
|
view = "Tall" |
|
|
elif "full" in text or "3d" in text or "vr" in text: |
|
|
view = "Full" |
|
|
else: |
|
|
view = DEFAULT_VIEW |
|
|
|
|
|
|
|
|
vr_html = generate_vr_html(model, view) |
|
|
|
|
|
|
|
|
chat_history.append((user_input, bot_reply)) |
|
|
return format_history(chat_history), vr_html |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def format_history(history_items): |
|
|
formatted = [] |
|
|
for item in history_items: |
|
|
|
|
|
if isinstance(item, tuple) and len(item) == 2: |
|
|
formatted.append({"role": "user", "content": item[0]}) |
|
|
formatted.append({"role": "assistant", "content": item[1]}) |
|
|
|
|
|
elif isinstance(item, dict) and "role" in item: |
|
|
formatted.append(item) |
|
|
|
|
|
return formatted |
|
|
|
|
|
|
|
|
def clear_textbox(): |
|
|
return "" |
|
|
|
|
|
def clear_all(): |
|
|
return [], DEFAULT_VR_HTML, "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(title="Princess Yacht Virtual Chatbot") as demo: |
|
|
|
|
|
gr.Markdown(""" |
|
|
# 🛥️ Princess Yacht Virtual Experience |
|
|
Chat with the AI and load a **live 3D VR yacht view** for Princess Y85 or X95. |
|
|
""") |
|
|
|
|
|
dynamic_vr_html = gr.HTML(DEFAULT_VR_HTML) |
|
|
|
|
|
chatbot = gr.Chatbot(label="Princess Yacht Chat", height=400) |
|
|
|
|
|
msg = gr.Textbox( |
|
|
label="Ask about the yacht or VR", |
|
|
placeholder="Try: show me the X95 full VR" |
|
|
) |
|
|
|
|
|
clear = gr.Button("Clear Chat") |
|
|
|
|
|
msg.submit( |
|
|
yacht_chatbot, |
|
|
inputs=[msg, chatbot], |
|
|
outputs=[chatbot, dynamic_vr_html], |
|
|
queue=False |
|
|
).then( |
|
|
clear_textbox, |
|
|
None, |
|
|
[msg], |
|
|
queue=False |
|
|
) |
|
|
|
|
|
clear.click( |
|
|
clear_all, |
|
|
None, |
|
|
[chatbot, dynamic_vr_html, msg], |
|
|
queue=False |
|
|
) |
|
|
|
|
|
|
|
|
demo.launch( |
|
|
debug=True, |
|
|
share=False, |
|
|
ssr_mode=False |
|
|
) |
|
|
|