Update app.py
Browse files
app.py
CHANGED
|
@@ -1,26 +1,30 @@
|
|
| 1 |
-
|
| 2 |
-
from huggingface_hub import InferenceClient, HfApi
|
| 3 |
import os
|
|
|
|
|
|
|
|
|
|
| 4 |
import requests
|
| 5 |
-
|
| 6 |
import traceback
|
| 7 |
-
from PIL import Image
|
| 8 |
-
from io import BytesIO
|
| 9 |
-
import asyncio
|
| 10 |
-
from gradio_client import Client
|
| 11 |
import time
|
| 12 |
import threading
|
| 13 |
import json
|
| 14 |
-
import
|
|
|
|
|
|
|
| 15 |
|
| 16 |
# HuggingFace ๊ด๋ จ API ํค (์คํ์ด์ค ๋ถ์ ์ฉ)
|
| 17 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 18 |
hf_api = HfApi(token=HF_TOKEN)
|
| 19 |
|
| 20 |
-
# Gemini 2.0 Thinking ๋ชจ๋ธ ๊ด๋ จ API ํค ๋ฐ ํด๋ผ์ด์ธํธ (LLM ์ฉ)
|
| 21 |
-
|
| 22 |
-
|
|
|
|
| 23 |
|
|
|
|
|
|
|
|
|
|
| 24 |
def get_headers():
|
| 25 |
if not HF_TOKEN:
|
| 26 |
raise ValueError("Hugging Face token not found in environment variables")
|
|
@@ -75,57 +79,6 @@ def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
|
|
| 75 |
formatted += format_tree_structure(child, indent + " ")
|
| 76 |
return formatted
|
| 77 |
|
| 78 |
-
def summarize_code(app_content: str):
|
| 79 |
-
system_message = "๋น์ ์ Python ์ฝ๋๋ฅผ ๋ถ์ํ๊ณ ์์ฝํ๋ AI ์กฐ์์
๋๋ค. ์ฃผ์ด์ง ์ฝ๋๋ฅผ 3์ค ์ด๋ด๋ก ๊ฐ๊ฒฐํ๊ฒ ์์ฝํด์ฃผ์ธ์."
|
| 80 |
-
user_message = f"๋ค์ Python ์ฝ๋๋ฅผ 3์ค ์ด๋ด๋ก ์์ฝํด์ฃผ์ธ์:\n\n{app_content}"
|
| 81 |
-
|
| 82 |
-
messages = [
|
| 83 |
-
{"role": "system", "content": system_message},
|
| 84 |
-
{"role": "user", "content": user_message}
|
| 85 |
-
]
|
| 86 |
-
|
| 87 |
-
try:
|
| 88 |
-
response = gemini_client.chat_completion(messages, max_tokens=200, temperature=0.7)
|
| 89 |
-
return response.choices[0].message.content
|
| 90 |
-
except Exception as e:
|
| 91 |
-
return f"์์ฝ ์์ฑ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"
|
| 92 |
-
|
| 93 |
-
def analyze_code(app_content: str):
|
| 94 |
-
system_message = """๋น์ ์ Python ์ฝ๋๋ฅผ ๋ถ์ํ๋ AI ์กฐ์์
๋๋ค. ์ฃผ์ด์ง ์ฝ๋๋ฅผ ๋ถ์ํ์ฌ ๋ค์ ํญ๋ชฉ์ ๋ํด ์ค๋ช
ํด์ฃผ์ธ์:
|
| 95 |
-
A. ๋ฐฐ๊ฒฝ ๋ฐ ํ์์ฑ
|
| 96 |
-
B. ๊ธฐ๋ฅ์ ํจ์ฉ์ฑ ๋ฐ ๊ฐ์น
|
| 97 |
-
C. ํน์ฅ์
|
| 98 |
-
D. ์ ์ฉ ๋์ ๋ฐ ํ๊ฒ
|
| 99 |
-
E. ๊ธฐ๋ํจ๊ณผ
|
| 100 |
-
๊ธฐ์กด ๋ฐ ์ ์ฌ ํ๋ก์ ํธ์ ๋น๊ตํ์ฌ ๋ถ์ํด์ฃผ์ธ์. Markdown ํ์์ผ๋ก ์ถ๋ ฅํ์ธ์."""
|
| 101 |
-
user_message = f"๋ค์ Python ์ฝ๋๋ฅผ ๋ถ์ํด์ฃผ์ธ์:\n\n{app_content}"
|
| 102 |
-
|
| 103 |
-
messages = [
|
| 104 |
-
{"role": "system", "content": system_message},
|
| 105 |
-
{"role": "user", "content": user_message}
|
| 106 |
-
]
|
| 107 |
-
|
| 108 |
-
try:
|
| 109 |
-
response = gemini_client.chat_completion(messages, max_tokens=1000, temperature=0.7)
|
| 110 |
-
return response.choices[0].message.content
|
| 111 |
-
except Exception as e:
|
| 112 |
-
return f"๋ถ์ ์์ฑ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"
|
| 113 |
-
|
| 114 |
-
def explain_usage(app_content: str):
|
| 115 |
-
system_message = "๋น์ ์ Python ์ฝ๋๋ฅผ ๋ถ์ํ์ฌ ์ฌ์ฉ๋ฒ์ ์ค๋ช
ํ๋ AI ์กฐ์์
๋๋ค. ์ฃผ์ด์ง ์ฝ๋๋ฅผ ๋ฐํ์ผ๋ก ๋ง์น ํ๋ฉด์ ๋ณด๋ ๊ฒ์ฒ๋ผ ์ฌ์ฉ๋ฒ์ ์์ธํ ์ค๋ช
ํด์ฃผ์ธ์. Markdown ํ์์ผ๋ก ์ถ๋ ฅํ์ธ์."
|
| 116 |
-
user_message = f"๋ค์ Python ์ฝ๋์ ์ฌ์ฉ๋ฒ์ ์ค๋ช
ํด์ฃผ์ธ์:\n\n{app_content}"
|
| 117 |
-
|
| 118 |
-
messages = [
|
| 119 |
-
{"role": "system", "content": system_message},
|
| 120 |
-
{"role": "user", "content": user_message}
|
| 121 |
-
]
|
| 122 |
-
|
| 123 |
-
try:
|
| 124 |
-
response = gemini_client.chat_completion(messages, max_tokens=800, temperature=0.7)
|
| 125 |
-
return response.choices[0].message.content
|
| 126 |
-
except Exception as e:
|
| 127 |
-
return f"์ฌ์ฉ๋ฒ ์ค๋ช
์์ฑ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"
|
| 128 |
-
|
| 129 |
def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
|
| 130 |
"""
|
| 131 |
์ฝ๋ ๋ด์ฉ์ ๋ฐ๋ผ lines ์๋ฅผ ๋์ ์ผ๋ก ์กฐ์ ํฉ๋๋ค.
|
|
@@ -138,16 +91,13 @@ def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int
|
|
| 138 |
Returns:
|
| 139 |
- int: ์ค์ ๋ lines ์
|
| 140 |
"""
|
| 141 |
-
# ์ฝ๋์ ์ค ์ ๊ณ์ฐ
|
| 142 |
num_lines = len(code_content.split('\n'))
|
| 143 |
-
# ์ค ์๊ฐ min_lines๋ณด๋ค ์ ๋ค๋ฉด min_lines ์ฌ์ฉ, max_lines๋ณด๋ค ํฌ๋ฉด max_lines ์ฌ์ฉ
|
| 144 |
return min(max(num_lines, min_lines), max_lines)
|
| 145 |
|
| 146 |
def analyze_space(url: str, progress=gr.Progress()):
|
| 147 |
try:
|
| 148 |
space_id = url.split('spaces/')[-1]
|
| 149 |
|
| 150 |
-
# Space ID ์ ํจ์ฑ ๊ฒ์ฌ ์์
|
| 151 |
if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
|
| 152 |
raise ValueError(f"Invalid Space ID format: {space_id}")
|
| 153 |
|
|
@@ -169,7 +119,6 @@ def analyze_space(url: str, progress=gr.Progress()):
|
|
| 169 |
progress(0.9, desc="์ฌ์ฉ๋ฒ ์ค๋ช
์์ฑ ์ค...")
|
| 170 |
usage = explain_usage(app_content)
|
| 171 |
|
| 172 |
-
# ์ค ์ ๊ณ์ฐํ์ฌ lines ์ค์
|
| 173 |
app_py_lines = adjust_lines_for_code(app_content)
|
| 174 |
|
| 175 |
progress(1.0, desc="์๋ฃ")
|
|
@@ -179,38 +128,178 @@ def analyze_space(url: str, progress=gr.Progress()):
|
|
| 179 |
print(traceback.format_exc())
|
| 180 |
return f"์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {str(e)}", "", None, "", "", "", "", 10
|
| 181 |
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
)
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 213 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
def create_ui():
|
| 215 |
try:
|
| 216 |
css = """
|
|
@@ -292,12 +381,12 @@ def create_ui():
|
|
| 292 |
"""
|
| 293 |
|
| 294 |
with gr.Blocks(theme="default", css=css) as demo:
|
| 295 |
-
gr.Markdown("# MOUSE
|
| 296 |
|
| 297 |
with gr.Tabs() as tabs:
|
| 298 |
with gr.TabItem("๋ถ์"):
|
| 299 |
with gr.Row():
|
| 300 |
-
with gr.Column(scale=6):
|
| 301 |
url_input = gr.Textbox(label="HuggingFace Space URL", placeholder="์: https://huggingface.co/spaces/username/space_name")
|
| 302 |
analyze_button = gr.Button("๋ถ์", variant="primary")
|
| 303 |
|
|
@@ -313,7 +402,7 @@ def create_ui():
|
|
| 313 |
with gr.Group(elem_classes="output-group tree-view-scroll"):
|
| 314 |
tree_view_output = gr.Textbox(label="ํ์ผ ๊ตฌ์กฐ (Tree View)", lines=30)
|
| 315 |
|
| 316 |
-
with gr.Column(scale=4):
|
| 317 |
with gr.Group(elem_classes="output-group full-height"):
|
| 318 |
code_tabs = gr.Tabs()
|
| 319 |
with code_tabs:
|
|
@@ -332,7 +421,10 @@ def create_ui():
|
|
| 332 |
)
|
| 333 |
|
| 334 |
with gr.TabItem("AI ์ฝ๋ฉ"):
|
| 335 |
-
chatbot = gr.Chatbot(
|
|
|
|
|
|
|
|
|
|
| 336 |
|
| 337 |
msg = gr.Textbox(label="๋ฉ์์ง", placeholder="๋ฉ์์ง๋ฅผ ์
๋ ฅํ์ธ์...")
|
| 338 |
|
|
@@ -352,14 +444,9 @@ def create_ui():
|
|
| 352 |
gr.Examples(examples, inputs=msg)
|
| 353 |
|
| 354 |
def respond_wrapper(message, chat_history, max_tokens, temperature, top_p):
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
bot_message = response # ๋ง์ง๋ง ์๋ต ์ ์ฅ
|
| 358 |
-
yield "", chat_history + [(message, bot_message)]
|
| 359 |
|
| 360 |
-
chat_history.append((message, bot_message))
|
| 361 |
-
return "", chat_history
|
| 362 |
-
|
| 363 |
msg.submit(respond_wrapper, [msg, chatbot, max_tokens, temperature, top_p], [msg, chatbot])
|
| 364 |
|
| 365 |
with gr.TabItem("Recommended Best"):
|
|
@@ -383,7 +470,6 @@ def create_ui():
|
|
| 383 |
outputs=[requirements_content]
|
| 384 |
)
|
| 385 |
|
| 386 |
-
# ๋์ ์ผ๋ก app.py์ ์ค ์ ์กฐ์
|
| 387 |
app_py_content.change(lambda lines: gr.update(lines=lines), inputs=[app_py_content_lines], outputs=[app_py_content])
|
| 388 |
|
| 389 |
return demo
|
|
|
|
| 1 |
+
|
|
|
|
| 2 |
import os
|
| 3 |
+
import gradio as gr
|
| 4 |
+
from gradio import ChatMessage
|
| 5 |
+
from huggingface_hub import HfApi
|
| 6 |
import requests
|
| 7 |
+
import re
|
| 8 |
import traceback
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
import time
|
| 10 |
import threading
|
| 11 |
import json
|
| 12 |
+
import asyncio
|
| 13 |
+
from typing import List, Dict, Tuple, Iterator
|
| 14 |
+
import google.generativeai as genai
|
| 15 |
|
| 16 |
# HuggingFace ๊ด๋ จ API ํค (์คํ์ด์ค ๋ถ์ ์ฉ)
|
| 17 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 18 |
hf_api = HfApi(token=HF_TOKEN)
|
| 19 |
|
| 20 |
+
# Gemini 2.0 Flash Thinking ๋ชจ๋ธ ๊ด๋ จ API ํค ๋ฐ ํด๋ผ์ด์ธํธ (LLM ์ฉ)
|
| 21 |
+
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
| 22 |
+
genai.configure(api_key=GEMINI_API_KEY)
|
| 23 |
+
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
|
| 24 |
|
| 25 |
+
# --------------------------------------------------
|
| 26 |
+
# ํ์ผ ๋ฐ ์คํ์ด์ค ๋ถ์ ๊ด๋ จ ํจ์๋ค (๊ธฐ์กด ์ฝ๋ ์ ์ง)
|
| 27 |
+
# --------------------------------------------------
|
| 28 |
def get_headers():
|
| 29 |
if not HF_TOKEN:
|
| 30 |
raise ValueError("Hugging Face token not found in environment variables")
|
|
|
|
| 79 |
formatted += format_tree_structure(child, indent + " ")
|
| 80 |
return formatted
|
| 81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
|
| 83 |
"""
|
| 84 |
์ฝ๋ ๋ด์ฉ์ ๋ฐ๋ผ lines ์๋ฅผ ๋์ ์ผ๋ก ์กฐ์ ํฉ๋๋ค.
|
|
|
|
| 91 |
Returns:
|
| 92 |
- int: ์ค์ ๋ lines ์
|
| 93 |
"""
|
|
|
|
| 94 |
num_lines = len(code_content.split('\n'))
|
|
|
|
| 95 |
return min(max(num_lines, min_lines), max_lines)
|
| 96 |
|
| 97 |
def analyze_space(url: str, progress=gr.Progress()):
|
| 98 |
try:
|
| 99 |
space_id = url.split('spaces/')[-1]
|
| 100 |
|
|
|
|
| 101 |
if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
|
| 102 |
raise ValueError(f"Invalid Space ID format: {space_id}")
|
| 103 |
|
|
|
|
| 119 |
progress(0.9, desc="์ฌ์ฉ๋ฒ ์ค๋ช
์์ฑ ์ค...")
|
| 120 |
usage = explain_usage(app_content)
|
| 121 |
|
|
|
|
| 122 |
app_py_lines = adjust_lines_for_code(app_content)
|
| 123 |
|
| 124 |
progress(1.0, desc="์๋ฃ")
|
|
|
|
| 128 |
print(traceback.format_exc())
|
| 129 |
return f"์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {str(e)}", "", None, "", "", "", "", 10
|
| 130 |
|
| 131 |
+
# --------------------------------------------------
|
| 132 |
+
# Gemini 2.0 Flash Thinking ๋ชจ๋ธ ๊ด๋ จ ํฌํผ ํจ์๋ค
|
| 133 |
+
# --------------------------------------------------
|
| 134 |
+
def format_chat_history(messages: list) -> list:
|
| 135 |
+
"""
|
| 136 |
+
Gradio ChatMessage ๊ฐ์ฒด ๋ฆฌ์คํธ๋ฅผ Gemini๊ฐ ์ดํดํ ์ ์๋ ํ์์ผ๋ก ๋ณํํฉ๋๋ค.
|
| 137 |
+
"""
|
| 138 |
+
formatted_history = []
|
| 139 |
+
for message in messages:
|
| 140 |
+
# thinking ๋ฉ์์ง(๋ฉํ๋ฐ์ดํฐ๊ฐ ์๋ ๋ฉ์์ง)๋ ๊ฑด๋๋๋๋ค.
|
| 141 |
+
if not (hasattr(message, "metadata") and message.metadata):
|
| 142 |
+
formatted_history.append({
|
| 143 |
+
"role": "user" if message.role == "user" else "assistant",
|
| 144 |
+
"parts": [message.content or ""]
|
| 145 |
+
})
|
| 146 |
+
return formatted_history
|
| 147 |
+
|
| 148 |
+
def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
|
| 149 |
+
"""
|
| 150 |
+
์์คํ
๋ฐ ์ ์ ๋ฉ์์ง๋ฅผ ๋ณด๋ด๊ณ ์คํธ๋ฆฌ๋ฐ ์๋ต์ ๋์ ํ์ฌ ์ต์ข
ํ
์คํธ๋ฅผ ๋ฐํํฉ๋๋ค.
|
| 151 |
+
"""
|
| 152 |
+
# ์ด๊ธฐ ๋ํ ๊ธฐ๋ก ๊ตฌ์ฑ (๋จ์ ๋ฌธ์์ด ๊ธฐ๋ฐ)
|
| 153 |
+
initial_messages = [
|
| 154 |
+
ChatMessage(role="system", content=system_message),
|
| 155 |
+
ChatMessage(role="user", content=user_message)
|
| 156 |
+
]
|
| 157 |
+
chat_history = format_chat_history(initial_messages)
|
| 158 |
+
chat = model.start_chat(history=chat_history)
|
| 159 |
+
final_response = ""
|
| 160 |
+
try:
|
| 161 |
+
for chunk in chat.send_message(user_message, stream=True):
|
| 162 |
+
parts = chunk.candidates[0].content.parts
|
| 163 |
+
# ์๊ฐ(thinking) ๋ถ๋ถ๊ณผ ์ต์ข
์๋ต ๊ตฌ๋ถ์ด ์๋ค๋ฉด ์ต์ข
์๋ต์ ์ฌ์ฉ
|
| 164 |
+
if len(parts) == 2:
|
| 165 |
+
final_response += parts[1].text
|
| 166 |
+
else:
|
| 167 |
+
final_response += parts[0].text
|
| 168 |
+
return final_response.strip()
|
| 169 |
+
except Exception as e:
|
| 170 |
+
return f"LLM ํธ์ถ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"
|
| 171 |
|
| 172 |
+
def summarize_code(app_content: str):
|
| 173 |
+
system_message = "๋น์ ์ Python ์ฝ๋๋ฅผ ๋ถ์ํ๊ณ ์์ฝํ๋ AI ์กฐ์์
๋๋ค. ์ฃผ์ด์ง ์ฝ๋๋ฅผ 3์ค ์ด๋ด๋ก ๊ฐ๊ฒฐํ๊ฒ ์์ฝํด์ฃผ์ธ์."
|
| 174 |
+
user_message = f"๋ค์ Python ์ฝ๋๋ฅผ 3์ค ์ด๋ด๋ก ์์ฝํด์ฃผ์ธ์:\n\n{app_content}"
|
| 175 |
+
try:
|
| 176 |
+
return gemini_chat_completion(system_message, user_message, max_tokens=200, temperature=0.7)
|
| 177 |
+
except Exception as e:
|
| 178 |
+
return f"์์ฝ ์์ฑ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"
|
| 179 |
+
|
| 180 |
+
def analyze_code(app_content: str):
|
| 181 |
+
system_message = (
|
| 182 |
+
"๋น์ ์ Python ์ฝ๋๋ฅผ ๋ถ์ํ๋ AI ์กฐ์์
๋๋ค. ์ฃผ์ด์ง ์ฝ๋๋ฅผ ๋ถ์ํ์ฌ ๋ค์ ํญ๋ชฉ์ ๋ํด ์ค๋ช
ํด์ฃผ์ธ์:\n"
|
| 183 |
+
"A. ๋ฐฐ๊ฒฝ ๋ฐ ํ์์ฑ\n"
|
| 184 |
+
"B. ๊ธฐ๋ฅ์ ํจ์ฉ์ฑ ๋ฐ ๊ฐ์น\n"
|
| 185 |
+
"C. ํน์ฅ์ \n"
|
| 186 |
+
"D. ์ ์ฉ ๋์ ๋ฐ ํ๊ฒ\n"
|
| 187 |
+
"E. ๊ธฐ๋ํจ๊ณผ\n"
|
| 188 |
+
"๊ธฐ์กด ๋ฐ ์ ์ฌ ํ๋ก์ ํธ์ ๋น๊ตํ์ฌ ๋ถ์ํด์ฃผ์ธ์. Markdown ํ์์ผ๋ก ์ถ๋ ฅํ์ธ์."
|
| 189 |
+
)
|
| 190 |
+
user_message = f"๋ค์ Python ์ฝ๋๋ฅผ ๋ถ์ํด์ฃผ์ธ์:\n\n{app_content}"
|
| 191 |
+
try:
|
| 192 |
+
return gemini_chat_completion(system_message, user_message, max_tokens=1000, temperature=0.7)
|
| 193 |
+
except Exception as e:
|
| 194 |
+
return f"๋ถ์ ์์ฑ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"
|
| 195 |
+
|
| 196 |
+
def explain_usage(app_content: str):
|
| 197 |
+
system_message = "๋น์ ์ Python ์ฝ๋๋ฅผ ๋ถ์ํ์ฌ ์ฌ์ฉ๋ฒ์ ์ค๋ช
ํ๋ AI ์กฐ์์
๋๋ค. ์ฃผ์ด์ง ์ฝ๋๋ฅผ ๋ฐํ์ผ๋ก ๋ง์น ํ๋ฉด์ ๋ณด๋ ๊ฒ์ฒ๋ผ ์ฌ์ฉ๋ฒ์ ์์ธํ ์ค๋ช
ํด์ฃผ์ธ์. Markdown ํ์์ผ๋ก ์ถ๋ ฅํ์ธ์."
|
| 198 |
+
user_message = f"๋ค์ Python ์ฝ๋์ ๏ฟฝ๏ฟฝ๏ฟฝ์ฉ๋ฒ์ ์ค๋ช
ํด์ฃผ์ธ์:\n\n{app_content}"
|
| 199 |
+
try:
|
| 200 |
+
return gemini_chat_completion(system_message, user_message, max_tokens=800, temperature=0.7)
|
| 201 |
+
except Exception as e:
|
| 202 |
+
return f"์ฌ์ฉ๋ฒ ์ค๋ช
์์ฑ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"
|
| 203 |
+
|
| 204 |
+
def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
|
| 205 |
+
"""
|
| 206 |
+
Gemini 2.0 Flash Thinking ๋ชจ๋ธ์ ์ฌ์ฉํ์ฌ ์คํธ๋ฆฌ๋ฐ ์๋ต์ ์ ๊ณตํฉ๋๋ค.
|
| 207 |
+
์ด ํจ์๋ Gradio์ ChatMessage ๊ฐ์ฒด๋ฅผ ์ฌ์ฉํ๋ฉฐ, ์ค์๊ฐ 'thinking' ๋ฐ ์ต์ข
์๋ต์ ๋ณด์ฌ์ค๋๋ค.
|
| 208 |
+
"""
|
| 209 |
+
if not user_message.strip():
|
| 210 |
+
messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message. Empty input is not allowed."))
|
| 211 |
+
yield messages
|
| 212 |
+
return
|
| 213 |
+
|
| 214 |
+
try:
|
| 215 |
+
print(f"\n=== New Request (Text) ===")
|
| 216 |
+
print(f"User message: {user_message}")
|
| 217 |
+
|
| 218 |
+
# ํฌ๋งท๋ ๋ํ ๊ธฐ๋ก ์์ฑ
|
| 219 |
+
chat_history = format_chat_history(messages)
|
| 220 |
+
chat = model.start_chat(history=chat_history)
|
| 221 |
+
response = chat.send_message(user_message, stream=True)
|
| 222 |
+
|
| 223 |
+
thought_buffer = ""
|
| 224 |
+
response_buffer = ""
|
| 225 |
+
thinking_complete = False
|
| 226 |
+
|
| 227 |
+
# ์ด๊ธฐ 'thinking' ๋ฉ์์ง ์ถ๊ฐ
|
| 228 |
+
messages.append(
|
| 229 |
+
ChatMessage(
|
| 230 |
+
role="assistant",
|
| 231 |
+
content="",
|
| 232 |
+
metadata={"title": "โ๏ธ Thinking: *The thoughts produced by the model are experimental"}
|
| 233 |
+
)
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
for chunk in response:
|
| 237 |
+
parts = chunk.candidates[0].content.parts
|
| 238 |
+
current_chunk = parts[0].text
|
| 239 |
+
|
| 240 |
+
if len(parts) == 2 and not thinking_complete:
|
| 241 |
+
thought_buffer += current_chunk
|
| 242 |
+
print(f"\n=== Complete Thought ===\n{thought_buffer}")
|
| 243 |
+
|
| 244 |
+
messages[-1] = ChatMessage(
|
| 245 |
+
role="assistant",
|
| 246 |
+
content=thought_buffer,
|
| 247 |
+
metadata={"title": "โ๏ธ Thinking: *The thoughts produced by the model are experimental"}
|
| 248 |
+
)
|
| 249 |
+
yield messages
|
| 250 |
+
|
| 251 |
+
response_buffer = parts[1].text
|
| 252 |
+
print(f"\n=== Starting Response ===\n{response_buffer}")
|
| 253 |
+
|
| 254 |
+
messages.append(
|
| 255 |
+
ChatMessage(
|
| 256 |
+
role="assistant",
|
| 257 |
+
content=response_buffer
|
| 258 |
+
)
|
| 259 |
+
)
|
| 260 |
+
thinking_complete = True
|
| 261 |
+
|
| 262 |
+
elif thinking_complete:
|
| 263 |
+
response_buffer += current_chunk
|
| 264 |
+
print(f"\n=== Response Chunk ===\n{current_chunk}")
|
| 265 |
+
|
| 266 |
+
messages[-1] = ChatMessage(
|
| 267 |
+
role="assistant",
|
| 268 |
+
content=response_buffer
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
else:
|
| 272 |
+
thought_buffer += current_chunk
|
| 273 |
+
print(f"\n=== Thinking Chunk ===\n{current_chunk}")
|
| 274 |
+
|
| 275 |
+
messages[-1] = ChatMessage(
|
| 276 |
+
role="assistant",
|
| 277 |
+
content=thought_buffer,
|
| 278 |
+
metadata={"title": "โ๏ธ Thinking: *The thoughts produced by the model are experimental"}
|
| 279 |
+
)
|
| 280 |
+
yield messages
|
| 281 |
+
|
| 282 |
+
print(f"\n=== Final Response ===\n{response_buffer}")
|
| 283 |
+
|
| 284 |
+
except Exception as e:
|
| 285 |
+
print(f"\n=== Error ===\n{str(e)}")
|
| 286 |
+
messages.append(
|
| 287 |
+
ChatMessage(
|
| 288 |
+
role="assistant",
|
| 289 |
+
content=f"I apologize, but I encountered an error: {str(e)}"
|
| 290 |
+
)
|
| 291 |
+
)
|
| 292 |
+
yield messages
|
| 293 |
+
|
| 294 |
+
def respond(message: str, history: list) -> Iterator[list]:
|
| 295 |
+
"""
|
| 296 |
+
๊ธฐ์กด respond() ํจ์๋ stream_gemini_response()๋ฅผ ํธ์ถํ์ฌ ๋ํ ํ์์ผ๋ก ์คํธ๋ฆฌ๋ฐ ์๋ต์ ์ ๊ณตํฉ๋๋ค.
|
| 297 |
+
"""
|
| 298 |
+
return stream_gemini_response(message, history)
|
| 299 |
+
|
| 300 |
+
# --------------------------------------------------
|
| 301 |
+
# Gradio UI ๊ตฌ์ฑ
|
| 302 |
+
# --------------------------------------------------
|
| 303 |
def create_ui():
|
| 304 |
try:
|
| 305 |
css = """
|
|
|
|
| 381 |
"""
|
| 382 |
|
| 383 |
with gr.Blocks(theme="default", css=css) as demo:
|
| 384 |
+
gr.Markdown("# MOUSE Space Analysis", elem_classes="header-markdown")
|
| 385 |
|
| 386 |
with gr.Tabs() as tabs:
|
| 387 |
with gr.TabItem("๋ถ์"):
|
| 388 |
with gr.Row():
|
| 389 |
+
with gr.Column(scale=6):
|
| 390 |
url_input = gr.Textbox(label="HuggingFace Space URL", placeholder="์: https://huggingface.co/spaces/username/space_name")
|
| 391 |
analyze_button = gr.Button("๋ถ์", variant="primary")
|
| 392 |
|
|
|
|
| 402 |
with gr.Group(elem_classes="output-group tree-view-scroll"):
|
| 403 |
tree_view_output = gr.Textbox(label="ํ์ผ ๊ตฌ์กฐ (Tree View)", lines=30)
|
| 404 |
|
| 405 |
+
with gr.Column(scale=4):
|
| 406 |
with gr.Group(elem_classes="output-group full-height"):
|
| 407 |
code_tabs = gr.Tabs()
|
| 408 |
with code_tabs:
|
|
|
|
| 421 |
)
|
| 422 |
|
| 423 |
with gr.TabItem("AI ์ฝ๋ฉ"):
|
| 424 |
+
chatbot = gr.Chatbot(
|
| 425 |
+
label="๋ํ",
|
| 426 |
+
elem_classes="output-group full-height"
|
| 427 |
+
)
|
| 428 |
|
| 429 |
msg = gr.Textbox(label="๋ฉ์์ง", placeholder="๋ฉ์์ง๋ฅผ ์
๋ ฅํ์ธ์...")
|
| 430 |
|
|
|
|
| 444 |
gr.Examples(examples, inputs=msg)
|
| 445 |
|
| 446 |
def respond_wrapper(message, chat_history, max_tokens, temperature, top_p):
|
| 447 |
+
# stream_gemini_response๊ฐ ์ ๋๋ ์ดํฐ๋ฅผ ๋ฐํํฉ๋๋ค.
|
| 448 |
+
return "", stream_gemini_response(message, chat_history)
|
|
|
|
|
|
|
| 449 |
|
|
|
|
|
|
|
|
|
|
| 450 |
msg.submit(respond_wrapper, [msg, chatbot, max_tokens, temperature, top_p], [msg, chatbot])
|
| 451 |
|
| 452 |
with gr.TabItem("Recommended Best"):
|
|
|
|
| 470 |
outputs=[requirements_content]
|
| 471 |
)
|
| 472 |
|
|
|
|
| 473 |
app_py_content.change(lambda lines: gr.update(lines=lines), inputs=[app_py_content_lines], outputs=[app_py_content])
|
| 474 |
|
| 475 |
return demo
|