ACloudCenter commited on
Commit
0c38efd
·
1 Parent(s): 407cef4

Chore: Remove Chatbot, add chatInterface, Remove buttons that are not needed with new interface and handlers

Browse files
Files changed (1) hide show
  1. app.py +53 -42
app.py CHANGED
@@ -108,11 +108,11 @@ def transcript_qa(transcript, question, history):
108
  import time
109
  time.sleep(0.01)
110
 
111
- def disable_buttons():
112
- return gr.update(interactive=False), gr.update(interactive=False)
113
 
114
- def enable_buttons():
115
- return gr.update(interactive=True), gr.update(interactive=True)
116
 
117
  # Build the Gradio interface
118
  with gr.Blocks(theme=theme) as demo:
@@ -200,60 +200,71 @@ with gr.Blocks(theme=theme) as demo:
200
  )
201
 
202
  gr.Markdown("### Step3 - Interactive Q&A")
203
- chatbot = gr.ChatInterface("Q&A",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  type="messages",
205
- chatbot=gr.Chatbot(height=450),
206
- textbox=gr.Textbox(placeholder="Type your message here..."),
 
 
 
 
 
 
 
 
207
  examples=[
208
  "Can you please summarize this?",
209
  "What were the key points discussed?",
210
  "What was the main topic?",
211
  "What is the TLDR version so I can just leave this conference call early?"
212
  ],
213
- run_examples_on_click=True,
214
- cache_examples=True,
215
- cache_mode="lazy"
 
216
  )
217
-
218
- def user(user_message, history: list):
219
- return "", history + [{"role": "user", "content": user_message}]
220
-
221
- with gr.Row():
222
- ask_btn = gr.Button("Ask", variant="primary", scale=1, size="lg")
223
- clear_chat_btn = gr.Button("Clear", variant="secondary", scale=1, size="lg")
224
 
225
- # Event handlers
226
  transcribe_btn.click(
227
- fn=disable_buttons,
228
  inputs=None,
229
- outputs=[transcribe_btn, ask_btn]
230
  ).then(
231
  fn=transcribe_audio,
232
  inputs=[audio_input],
233
- outputs=[transcript_output, transcript_state, chatbot]
234
  ).then(
235
- fn=enable_buttons,
236
  inputs=None,
237
- outputs=[transcribe_btn, ask_btn]
238
- )
239
-
240
- ask_btn.click(
241
- fn=transcript_qa,
242
- inputs=[transcript_state, question_input, chatbot],
243
- outputs=[chatbot, question_input]
244
- )
245
-
246
- question_input.submit(
247
- fn=transcript_qa,
248
- inputs=[transcript_state, question_input, chatbot],
249
- outputs=[chatbot, question_input]
250
- )
251
-
252
- clear_chat_btn.click(
253
- fn=lambda: [],
254
- inputs=None,
255
- outputs=chatbot
256
  )
257
 
258
  demo.queue()
259
- demo.launch()
 
 
108
  import time
109
  time.sleep(0.01)
110
 
111
+ def disable_transcribe():
112
+ return gr.update(interactive=False)
113
 
114
+ def enable_transcribe():
115
+ return gr.update(interactive=True)
116
 
117
  # Build the Gradio interface
118
  with gr.Blocks(theme=theme) as demo:
 
200
  )
201
 
202
  gr.Markdown("### Step3 - Interactive Q&A")
203
+
204
+ # Create a wrapper function for ChatInterface
205
+ def qa_wrapper(message, history):
206
+ # Get the transcript from state
207
+ if transcript_state.value:
208
+ # Convert history format if needed
209
+ formatted_history = []
210
+ for msg in history:
211
+ if isinstance(msg, dict):
212
+ formatted_history.append(msg)
213
+ else:
214
+ # Handle tuple format if needed
215
+ formatted_history.append({"role": "user", "content": msg[0]})
216
+ if len(msg) > 1 and msg[1]:
217
+ formatted_history.append({"role": "assistant", "content": msg[1]})
218
+
219
+ # Process the Q&A
220
+ for response_history, _ in transcript_qa(transcript_state.value, message, formatted_history):
221
+ # Return just the last assistant message
222
+ if response_history and response_history[-1]["role"] == "assistant":
223
+ yield response_history[-1]["content"]
224
+ else:
225
+ yield "Please transcribe audio first before asking questions."
226
+
227
+ # Use ChatInterface for cleaner UI
228
+ chatbot = gr.ChatInterface(
229
+ fn=qa_wrapper,
230
  type="messages",
231
+ chatbot=gr.Chatbot(
232
+ height=450,
233
+ label="",
234
+ bubble_full_width=False
235
+ ),
236
+ textbox=gr.Textbox(
237
+ placeholder="Ask a question about the transcript...",
238
+ container=False,
239
+ scale=7
240
+ ),
241
  examples=[
242
  "Can you please summarize this?",
243
  "What were the key points discussed?",
244
  "What was the main topic?",
245
  "What is the TLDR version so I can just leave this conference call early?"
246
  ],
247
+ submit_btn="Ask",
248
+ retry_btn=None,
249
+ undo_btn=None,
250
+ clear_btn="Clear"
251
  )
 
 
 
 
 
 
 
252
 
253
+ # Event handlers - simplified since ChatInterface handles Q&A
254
  transcribe_btn.click(
255
+ fn=disable_transcribe,
256
  inputs=None,
257
+ outputs=[transcribe_btn]
258
  ).then(
259
  fn=transcribe_audio,
260
  inputs=[audio_input],
261
+ outputs=[transcript_output, transcript_state, chatbot.chatbot] # Update the chatbot component
262
  ).then(
263
+ fn=enable_transcribe,
264
  inputs=None,
265
+ outputs=[transcribe_btn]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
  )
267
 
268
  demo.queue()
269
+ demo.launch()
270
+