| from llms import LLM | |
| from utils.remote_client import execute_remote_task | |
| def text_summarization(text: str, model: str, summary_length: str, use_llm: bool = True) -> str: | |
| """ | |
| Summarize the input text using either LLM or traditional (Modal API) method. | |
| """ | |
| if not text.strip(): | |
| return "" | |
| if use_llm: | |
| return _summarization_with_llm(text, model, summary_length) | |
| else: | |
| return _summarization_with_traditional(text, model, summary_length) | |
| def _summarization_with_llm(text: str, model: str, summary_length: str) -> str: | |
| try: | |
| llm = LLM(model=model) | |
| prompt = ( | |
| f"Summarize the following text in {summary_length} detail. " | |
| f"Text: {text}\nSummary:" | |
| ) | |
| summary = llm.generate(prompt) | |
| return summary.strip() | |
| except Exception as e: | |
| print(f"Error in LLM summarization: {str(e)}") | |
| return "Oops! Something went wrong. Please try again later." | |
| def _summarization_with_traditional(text: str, model: str, summary_length: str) -> str: | |
| try: | |
| payload = { | |
| "text": text, | |
| "model": model, | |
| "summary_length": summary_length, | |
| } | |
| resp = execute_remote_task("summarization", payload) | |
| if "error" in resp: | |
| return "Oops! Something went wrong. Please try again later." | |
| return resp.get("summary", "") | |
| except Exception as e: | |
| print(f"Error in traditional summarization: {str(e)}") | |
| return "Oops! Something went wrong. Please try again later." |