Adityahulk commited on
Commit
3ccc955
·
1 Parent(s): 1110dbd

adding pdf parsing logic correctly

Browse files
manimator/agents/reflexion_agent.py CHANGED
@@ -21,6 +21,7 @@ import litellm
21
  from ..utils.system_prompts import get_system_prompt
22
  from ..utils.code_postprocessor import post_process_code
23
  from ..utils.code_validator import CodeValidator
 
24
 
25
  logger = logging.getLogger(__name__)
26
 
@@ -238,8 +239,78 @@ class ReflexionAgent:
238
  YOU MUST APPLY THESE LESSONS IN YOUR CODE! Do not repeat these mistakes.
239
  """
240
 
241
- # Build user message
242
- user_message = f"""Create a video about:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
 
244
  {goal}
245
 
@@ -290,11 +361,18 @@ self.play(items[2].animate.scale(1.1).set_color(GREEN))
290
  ]
291
 
292
  try:
293
- response = litellm.completion(
294
- model=self.actor_model,
295
- messages=messages,
296
- num_retries=2
297
- )
 
 
 
 
 
 
 
298
 
299
  content = response.choices[0].message.content
300
  code = self._extract_code(content)
 
21
  from ..utils.system_prompts import get_system_prompt
22
  from ..utils.code_postprocessor import post_process_code
23
  from ..utils.code_validator import CodeValidator
24
+ from ..utils.content_preprocessor import preprocess_long_content, get_script_mode_prompt_for_long_content
25
 
26
  logger = logging.getLogger(__name__)
27
 
 
239
  YOU MUST APPLY THESE LESSONS IN YOUR CODE! Do not repeat these mistakes.
240
  """
241
 
242
+ # Detect if input is a ready-made script (long content) vs short prompt
243
+ word_count = len(goal.split())
244
+ is_script_mode = word_count > 200
245
+
246
+ # For very long content, preprocess into sections
247
+ processed_goal = goal
248
+ section_count = 0
249
+ if word_count > 1000:
250
+ processed_goal, section_count = preprocess_long_content(goal)
251
+
252
+ if section_count > 0:
253
+ # Very long content - use sectioned prompt
254
+ logger.info(f"📝 LONG DOCUMENT MODE: {word_count} words -> {section_count} sections")
255
+ user_message = get_script_mode_prompt_for_long_content(processed_goal, section_count)
256
+ elif is_script_mode:
257
+ logger.info(f"📝 SCRIPT MODE: Input has {word_count} words - treating as ready-made script")
258
+ user_message = f"""# 🎬 SCRIPT MODE - ANIMATE THE USER'S CONTENT
259
+
260
+ ## IMPORTANT: The user has provided their COMPLETE script/content below.
261
+ This is NOT a topic to research - this IS the exact narration/content they want animated.
262
+
263
+ ## YOUR TASK:
264
+ 1. **Use the content below AS the voiceover text** - split it into logical sections
265
+ 2. **Create beautiful animations that MATCH each section** of their content
266
+ 3. **Do NOT rewrite, summarize, or generate new information** - animate THEIR words
267
+ 4. **Every paragraph/section should become a voiceover block** with matching visuals
268
+ 5. **Create visualizations that illustrate what THEIR text describes**
269
+
270
+ ## USER'S SCRIPT TO ANIMATE:
271
+ ---
272
+ {goal}
273
+ ---
274
+
275
+ # ============================================================================
276
+ # 🚨 CRITICAL REQUIREMENTS - YOU MUST FOLLOW THESE
277
+ # ============================================================================
278
+
279
+ ## SCREEN BOUNDARIES (CRITICAL!)
280
+ - **ALL content MUST stay on screen** - nothing should be cut off
281
+ - For any VGroup with 4+ items: USE `group.scale_to_fit_height(config.frame_height - 2.5)`
282
+ - Maximum 4-5 items visible at once, use smaller fonts (28-32pt) for lists
283
+ - Always leave margins: top 1.0, bottom 0.8, sides 0.5
284
+
285
+ ## DYNAMIC ANIMATIONS (CRITICAL!)
286
+ - **NEVER use only Write()** - mix at least 4 different animation types
287
+ - **MUST use LaggedStart** for any list of items: `LaggedStart(*[FadeIn(x, shift=RIGHT) for x in items], lag_ratio=0.2)`
288
+ - **MUST include emphasis animations**: `Indicate()`, `Circumscribe()`, `Flash()` on key elements
289
+ - **Use motion during voiceover**: `obj.animate.scale(1.05)` while explaining
290
+ - **Creative transitions**: `FadeOut(old, shift=LEFT), FadeIn(new, shift=RIGHT)`
291
+
292
+ ## VOICEOVER STRUCTURE FOR SCRIPT MODE:
293
+ Use their content directly in voiceover blocks:
294
+ ```python
295
+ # Section 1 - use their first paragraph/section
296
+ with self.voiceover(text="[First section of their content here]") as tracker:
297
+ # Create animations that ILLUSTRATE what this section describes
298
+
299
+ # Section 2 - use their next paragraph/section
300
+ with self.voiceover(text="[Next section of their content here]") as tracker:
301
+ # Create animations that ILLUSTRATE what this section describes
302
+ ```
303
+
304
+ ## NO STATIC/BORING MOMENTS
305
+ - NEVER have blank screens - always show something
306
+ - NEVER use `self.wait()` longer than 0.5s without animation
307
+ - Every section should have at least one emphasis animation
308
+ - Objects should move and transform, not just appear
309
+ """
310
+ else:
311
+ logger.info(f"📝 GENERATION MODE: Input has {word_count} words - LLM will generate content")
312
+ # Short prompt - LLM generates content (existing behavior)
313
+ user_message = f"""Create a video about:
314
 
315
  {goal}
316
 
 
361
  ]
362
 
363
  try:
364
+ # Only set max_tokens for long documents where we need extended output
365
+ # For short prompts, let the model use its default behavior to avoid errors
366
+ kwargs = {
367
+ "model": self.actor_model,
368
+ "messages": messages,
369
+ "num_retries": 2
370
+ }
371
+
372
+ if section_count > 0:
373
+ kwargs["max_tokens"] = 12000 # Increased limit for long docs
374
+
375
+ response = litellm.completion(**kwargs)
376
 
377
  content = response.choices[0].message.content
378
  code = self._extract_code(content)
manimator/api/animation_generation.py CHANGED
@@ -8,6 +8,7 @@ from ..utils.code_postprocessor import post_process_code
8
  from ..utils.code_validator import CodeValidator
9
  from ..utils.code_fixer import CodeFixer
10
  from ..inputs.processor import InputProcessor
 
11
 
12
  logger = logging.getLogger(__name__)
13
 
@@ -65,6 +66,48 @@ def _generate_legacy(prompt: str, category: str, max_attempts: int = 3) -> str:
65
  # Get dynamic system prompt based on category
66
  system_prompt = get_system_prompt(category)
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  messages = [
69
  {
70
  "role": "system",
@@ -72,26 +115,53 @@ def _generate_legacy(prompt: str, category: str, max_attempts: int = 3) -> str:
72
  },
73
  {
74
  "role": "user",
75
- "content": f"Create a video about:\n\n{prompt}\n\n NOTE!!!:\n1. NO BLANK SCREENS: Keep the screen populated. If a voiceover is playing, show something.\n2. NO OVERLAPS: Ensure text and objects do not overlap. Use `next_to` and `arrange`.\n3. CLEAN TRANSITIONS: Fade out old content before showing new content, but don't leave the screen empty for long.\n4. VARIED ANIMATIONS: Use a mix of Write, FadeIn, GrowFromCenter, etc.\n5. STAY ON SCREEN: Ensure all text and objects are within the screen boundaries. Use .scale_to_fit_width(config.frame_width - 1) for large groups.",
76
  },
77
  ]
78
 
79
  logger.info(f"Generating code (attempt {attempt + 1}/{max_attempts}) with model {model}")
80
 
81
- response = litellm.completion(
82
- model=model,
83
- messages=messages,
84
- num_retries=2
85
- )
 
 
 
 
 
 
86
 
87
  raw_code = response.choices[0].message.content
88
 
89
- # Extract code if wrapped in markdown
90
- if "```python" in raw_code:
91
- import re
92
- match = re.search(r"```python\n(.*?)```", raw_code, re.DOTALL)
 
 
 
 
 
 
 
 
 
93
  if match:
94
  raw_code = match.group(1).strip()
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
  # Post-process the code to fix common issues
97
  processed_code = post_process_code(raw_code)
 
8
  from ..utils.code_validator import CodeValidator
9
  from ..utils.code_fixer import CodeFixer
10
  from ..inputs.processor import InputProcessor
11
+ from ..utils.content_preprocessor import preprocess_long_content, get_script_mode_prompt_for_long_content
12
 
13
  logger = logging.getLogger(__name__)
14
 
 
66
  # Get dynamic system prompt based on category
67
  system_prompt = get_system_prompt(category)
68
 
69
+ # Detect if input is a ready-made script (long content) vs short prompt
70
+ word_count = len(prompt.split())
71
+ is_script_mode = word_count > 200
72
+
73
+ # For very long content, preprocess into sections
74
+ processed_prompt = prompt
75
+ section_count = 0
76
+ if word_count > 1000:
77
+ processed_prompt, section_count = preprocess_long_content(prompt)
78
+
79
+ if section_count > 0:
80
+ # Very long content - use sectioned prompt
81
+ logger.info(f"📝 LONG DOCUMENT MODE (Legacy): {word_count} words -> {section_count} sections")
82
+ user_content = get_script_mode_prompt_for_long_content(processed_prompt, section_count)
83
+ elif is_script_mode:
84
+ logger.info(f"📝 SCRIPT MODE (Legacy): Input has {word_count} words - treating as ready-made script")
85
+ user_content = f"""# 🎬 SCRIPT MODE - ANIMATE THE USER'S CONTENT
86
+
87
+ ## IMPORTANT: The user has provided their COMPLETE script/content below.
88
+ This is NOT a topic to research - this IS the exact narration/content they want animated.
89
+
90
+ ## YOUR TASK:
91
+ 1. **Use the content below AS the voiceover text** - split it into logical sections
92
+ 2. **Create beautiful animations that MATCH each section** of their content
93
+ 3. **Do NOT rewrite, summarize, or generate new information** - animate THEIR words
94
+ 4. **Every paragraph/section should become a voiceover block** with matching visuals
95
+
96
+ ## USER'S SCRIPT TO ANIMATE:
97
+ ---
98
+ {prompt}
99
+ ---
100
+
101
+ NOTE!!!:
102
+ 1. NO BLANK SCREENS: Keep the screen populated. If a voiceover is playing, show something.
103
+ 2. NO OVERLAPS: Ensure text and objects do not overlap. Use `next_to` and `arrange`.
104
+ 3. CLEAN TRANSITIONS: Fade out old content before showing new content, but don't leave the screen empty for long.
105
+ 4. VARIED ANIMATIONS: Use a mix of Write, FadeIn, GrowFromCenter, etc.
106
+ 5. STAY ON SCREEN: Ensure all text and objects are within the screen boundaries. Use .scale_to_fit_width(config.frame_width - 1) for large groups."""
107
+ else:
108
+ logger.info(f"📝 GENERATION MODE (Legacy): Input has {word_count} words - LLM will generate content")
109
+ user_content = f"Create a video about:\n\n{prompt}\n\n NOTE!!!:\n1. NO BLANK SCREENS: Keep the screen populated. If a voiceover is playing, show something.\n2. NO OVERLAPS: Ensure text and objects do not overlap. Use `next_to` and `arrange`.\n3. CLEAN TRANSITIONS: Fade out old content before showing new content, but don't leave the screen empty for long.\n4. VARIED ANIMATIONS: Use a mix of Write, FadeIn, GrowFromCenter, etc.\n5. STAY ON SCREEN: Ensure all text and objects are within the screen boundaries. Use .scale_to_fit_width(config.frame_width - 1) for large groups."
110
+
111
  messages = [
112
  {
113
  "role": "system",
 
115
  },
116
  {
117
  "role": "user",
118
+ "content": user_content,
119
  },
120
  ]
121
 
122
  logger.info(f"Generating code (attempt {attempt + 1}/{max_attempts}) with model {model}")
123
 
124
+ # Only set max_tokens for long documents
125
+ kwargs = {
126
+ "model": model,
127
+ "messages": messages,
128
+ "num_retries": 2
129
+ }
130
+
131
+ if section_count > 0:
132
+ kwargs["max_tokens"] = 12000
133
+
134
+ response = litellm.completion(**kwargs)
135
 
136
  raw_code = response.choices[0].message.content
137
 
138
+ # Extract code if wrapped in markdown (handle various formats)
139
+ import re
140
+
141
+ # Try different markdown patterns
142
+ code_patterns = [
143
+ r'```python\n(.*?)```', # Standard: ```python ... ```
144
+ r'````python\n(.*?)````', # Quad backticks
145
+ r'```py\n(.*?)```', # ```py
146
+ r'```\n(.*?)```', # Just backticks without language
147
+ ]
148
+
149
+ for pattern in code_patterns:
150
+ match = re.search(pattern, raw_code, re.DOTALL)
151
  if match:
152
  raw_code = match.group(1).strip()
153
+ break
154
+
155
+ # If still has backticks, try to clean up
156
+ if raw_code.startswith('```'):
157
+ lines = raw_code.split('\n')
158
+ # Remove first line if it's just ```python or similar
159
+ if lines[0].strip().startswith('```'):
160
+ lines = lines[1:]
161
+ # Remove last line if it's just ```
162
+ if lines and lines[-1].strip() == '```':
163
+ lines = lines[:-1]
164
+ raw_code = '\n'.join(lines)
165
 
166
  # Post-process the code to fix common issues
167
  processed_code = post_process_code(raw_code)
manimator/services/voiceover.py CHANGED
@@ -144,19 +144,33 @@ class SimpleElevenLabsService:
144
 
145
  logger.info(f"Generating Edge TTS ({edge_voice}) for: {text[:30]}...")
146
 
147
- # Edge-tts is async, so we need to run it in an event loop
148
  async def _generate():
149
  communicate = edge_tts.Communicate(text, edge_voice)
150
  await communicate.save(str(output_path))
151
 
152
- # Run the async function
153
  try:
154
- loop = asyncio.get_event_loop()
155
- except RuntimeError:
156
- loop = asyncio.new_event_loop()
157
- asyncio.set_event_loop(loop)
158
 
159
- loop.run_until_complete(_generate())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
  # Verify file was created successfully
162
  if output_path.exists() and output_path.stat().st_size > 0:
@@ -167,35 +181,5 @@ class SimpleElevenLabsService:
167
  return output_path
168
 
169
  except Exception as e:
170
- logger.error(f"Edge TTS failed: {str(e)}. Falling back to gTTS.")
171
- return self._generate_with_gtts(text)
172
-
173
- def _generate_with_gtts(self, text: str) -> Path:
174
- """
175
- Last resort fallback using Google Text-to-Speech.
176
- """
177
- try:
178
- from gtts import gTTS
179
-
180
- # Use absolute path for gTTS cache (important for containerized environments)
181
- gtts_cache_dir = BASE_DIR / "media" / "voiceover" / "gtts"
182
- gtts_cache_dir.mkdir(parents=True, exist_ok=True)
183
-
184
- content_hash = hashlib.md5(text.encode("utf-8")).hexdigest()
185
- output_path = gtts_cache_dir / f"{content_hash}.mp3"
186
-
187
- if output_path.exists() and output_path.stat().st_size > 0:
188
- logger.info(f"Using cached gTTS voiceover for hash {content_hash}")
189
- return output_path
190
-
191
- logger.info(f"Generating gTTS fallback for: {text[:30]}...")
192
- tts = gTTS(text=text, lang='en')
193
- tts.save(str(output_path))
194
-
195
- logger.info(f"gTTS voiceover saved to {output_path}")
196
- return output_path
197
-
198
- except Exception as e:
199
- logger.error(f"gTTS fallback failed: {str(e)}")
200
- raise RuntimeError(f"All TTS methods failed: {str(e)}")
201
-
 
144
 
145
  logger.info(f"Generating Edge TTS ({edge_voice}) for: {text[:30]}...")
146
 
147
+ # Edge-tts is async, handle event loop properly for Streamlit/Flask contexts
148
  async def _generate():
149
  communicate = edge_tts.Communicate(text, edge_voice)
150
  await communicate.save(str(output_path))
151
 
152
+ # Try to use nest_asyncio for Streamlit/Jupyter compatibility
153
  try:
154
+ import nest_asyncio
155
+ nest_asyncio.apply()
156
+ except ImportError:
157
+ pass # nest_asyncio not available, continue anyway
158
 
159
+ # Run the async function with proper event loop handling
160
+ try:
161
+ # Try asyncio.run() first (Python 3.7+, creates new loop)
162
+ asyncio.run(_generate())
163
+ except RuntimeError as e:
164
+ # If there's already an event loop running (e.g., in Streamlit/Jupyter)
165
+ if "cannot be called from a running event loop" in str(e) or "There is no current event loop" in str(e):
166
+ loop = asyncio.new_event_loop()
167
+ asyncio.set_event_loop(loop)
168
+ try:
169
+ loop.run_until_complete(_generate())
170
+ finally:
171
+ loop.close()
172
+ else:
173
+ raise
174
 
175
  # Verify file was created successfully
176
  if output_path.exists() and output_path.stat().st_size > 0:
 
181
  return output_path
182
 
183
  except Exception as e:
184
+ logger.error(f"Edge TTS failed: {str(e)}")
185
+ raise RuntimeError(f"Edge TTS voiceover generation failed: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
manimator/utils/content_preprocessor.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Content Preprocessor for Long Inputs
3
+
4
+ Handles very long content (PDFs, large text) by:
5
+ 1. Chunking content into logical sections
6
+ 2. Numbering sections for explicit coverage
7
+ 3. Ensuring proportional representation in the video
8
+ """
9
+
10
+ import logging
11
+ import re
12
+ from typing import List, Tuple
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ def chunk_content(content: str, max_words_per_chunk: int = 150) -> List[str]:
18
+ """
19
+ Split content into logical chunks based on paragraphs and sentences.
20
+
21
+ Args:
22
+ content: The full text content
23
+ max_words_per_chunk: Target words per chunk (will be approximate)
24
+
25
+ Returns:
26
+ List of content chunks
27
+ """
28
+ # First, split by double newlines (paragraphs)
29
+ paragraphs = re.split(r'\n\s*\n', content.strip())
30
+ paragraphs = [p.strip() for p in paragraphs if p.strip()]
31
+
32
+ chunks = []
33
+ current_chunk = []
34
+ current_word_count = 0
35
+
36
+ for para in paragraphs:
37
+ para_words = len(para.split())
38
+
39
+ # If paragraph itself is too long, split by sentences
40
+ if para_words > max_words_per_chunk:
41
+ # Commit current chunk first
42
+ if current_chunk:
43
+ chunks.append(' '.join(current_chunk))
44
+ current_chunk = []
45
+ current_word_count = 0
46
+
47
+ # Split paragraph by sentences
48
+ sentences = re.split(r'(?<=[.!?])\s+', para)
49
+ temp_chunk = []
50
+ temp_count = 0
51
+
52
+ for sentence in sentences:
53
+ sent_words = len(sentence.split())
54
+ if temp_count + sent_words > max_words_per_chunk and temp_chunk:
55
+ chunks.append(' '.join(temp_chunk))
56
+ temp_chunk = [sentence]
57
+ temp_count = sent_words
58
+ else:
59
+ temp_chunk.append(sentence)
60
+ temp_count += sent_words
61
+
62
+ if temp_chunk:
63
+ chunks.append(' '.join(temp_chunk))
64
+ else:
65
+ # Normal paragraph - add to current chunk
66
+ if current_word_count + para_words > max_words_per_chunk and current_chunk:
67
+ chunks.append(' '.join(current_chunk))
68
+ current_chunk = [para]
69
+ current_word_count = para_words
70
+ else:
71
+ current_chunk.append(para)
72
+ current_word_count += para_words
73
+
74
+ # Don't forget the last chunk
75
+ if current_chunk:
76
+ chunks.append(' '.join(current_chunk))
77
+
78
+ return chunks
79
+
80
+
81
+ def preprocess_long_content(content: str) -> Tuple[str, int]:
82
+ """
83
+ Preprocess long content by chunking and adding section markers.
84
+
85
+ For very long content (>1000 words), this creates a structured format
86
+ with numbered sections that the LLM MUST cover proportionally.
87
+
88
+ Args:
89
+ content: The raw content from PDF/text input
90
+
91
+ Returns:
92
+ Tuple of (processed_content, section_count)
93
+ """
94
+ word_count = len(content.split())
95
+
96
+ # For shorter content, return as-is
97
+ if word_count <= 1000:
98
+ return content, 0
99
+
100
+ logger.info(f"📄 Preprocessing very long content: {word_count} words")
101
+
102
+ # Calculate appropriate chunk size based on content length
103
+ # Longer content = smaller chunks to ensure coverage
104
+ if word_count > 5000:
105
+ max_words = 120 # Very long - more sections
106
+ elif word_count > 3000:
107
+ max_words = 150
108
+ elif word_count > 2000:
109
+ max_words = 180
110
+ else:
111
+ max_words = 200
112
+
113
+ chunks = chunk_content(content, max_words_per_chunk=max_words)
114
+ section_count = len(chunks)
115
+
116
+ logger.info(f"📄 Split into {section_count} sections (avg ~{word_count // section_count} words each)")
117
+
118
+ # Create structured content with numbered sections
119
+ structured_parts = []
120
+ structured_parts.append(f"# STRUCTURED CONTENT ({section_count} SECTIONS)")
121
+ structured_parts.append(f"# YOU MUST CREATE A VOICEOVER BLOCK FOR EACH SECTION BELOW")
122
+ structured_parts.append(f"# Video should cover ALL {section_count} sections proportionally")
123
+ structured_parts.append("")
124
+
125
+ for i, chunk in enumerate(chunks, 1):
126
+ structured_parts.append(f"=== SECTION {i} OF {section_count} ===")
127
+ structured_parts.append(chunk)
128
+ structured_parts.append("")
129
+
130
+ return '\n'.join(structured_parts), section_count
131
+
132
+
133
+ def get_script_mode_prompt_for_long_content(goal: str, section_count: int) -> str:
134
+ """
135
+ Generate the user prompt for very long (chunked) content.
136
+
137
+ This prompt explicitly instructs the LLM to cover ALL sections
138
+ with DETAILED, HIGH-QUALITY animations - not rushed content.
139
+ """
140
+ # Cap sections to a reasonable number for quality
141
+ effective_sections = min(section_count, 12)
142
+
143
+ return f"""Create a DETAILED animated video from this document.
144
+
145
+ CONTENT TO ANIMATE:
146
+ {goal}
147
+
148
+ CRITICAL REQUIREMENTS:
149
+
150
+ 1. CREATE {effective_sections} DISTINCT SECTIONS - each with its own voiceover block
151
+ 2. EACH SECTION MUST BE 20-40 SECONDS with rich animations
152
+ 3. USE VARIED ANIMATIONS: FadeIn, Write, GrowFromCenter, LaggedStart, Indicate, Circumscribe
153
+ 4. DO NOT RUSH - build visuals progressively in each section
154
+ 5. CLEAN TRANSITIONS between sections using FadeOut before new content
155
+ 6. USE THE ACTUAL TEXT from each section as voiceover content
156
+
157
+ DO NOT:
158
+ - Create only 1-2 voiceover blocks
159
+ - Rush through in 5 seconds
160
+ - Skip middle content
161
+ - Use only Write() for everything
162
+
163
+ VIDEO DURATION: Approximately {effective_sections * 30} seconds total
164
+
165
+ Each section should have:
166
+ - A title/header animation
167
+ - Multiple visual elements built progressively
168
+ - Emphasis animations (Indicate, Circumscribe)
169
+ - Clean transition to next section
170
+ """
171
+
172
+
requirements.txt CHANGED
@@ -12,4 +12,5 @@ requests
12
  beautifulsoup4>=4.12.0
13
  lxml>=4.9.0
14
  readability-lxml>=0.8.1
15
- edge-tts>=6.1.0
 
 
12
  beautifulsoup4>=4.12.0
13
  lxml>=4.9.0
14
  readability-lxml>=0.8.1
15
+ edge-tts>=6.1.0
16
+ nest_asyncio>=1.5.0