humanizer / app.py
makhdoomnaeem's picture
Update app.py
4eb4cb7 verified
import os
import subprocess
import random
import streamlit as st
from groq import Groq
import language_tool_python
from language_tool_python.download_lt import download_lt
# Function to check and install Java
def install_java():
try:
st.write("Checking Java installation...")
result = subprocess.run(["java", "-version"], check=True, capture_output=True, text=True)
st.write("Java is already installed:", result.stderr.strip())
except FileNotFoundError:
st.write("Java is not installed. Installing now...")
try:
subprocess.run(["apt-get", "update"], check=True)
subprocess.run(["apt-get", "install", "-y", "default-jdk"], check=True)
st.write("Java installed successfully.")
except Exception as e:
st.error(f"Java installation failed: {str(e)}")
raise e
# Function to ensure LanguageTool server files are downloaded
def ensure_languagetool_server():
try:
st.write("Downloading LanguageTool server files...")
download_lt()
st.write("LanguageTool server files downloaded successfully.")
except Exception as e:
st.error(f"Failed to download LanguageTool server files. Error: {str(e)}")
raise e
# Function to initialize LanguageTool
def initialize_language_tool():
try:
st.write("Initializing LanguageTool...")
ensure_languagetool_server()
tool = language_tool_python.LanguageTool('en-US')
st.write("LanguageTool initialized successfully.")
return tool
except Exception as e:
st.error(f"Unable to initialize LanguageTool. Error: {e}")
raise e
# Run Java installation check
install_java()
# Initialize Groq client
GROQ_API_KEY = "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" # Replace with your Groq API key
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
# Initialize LanguageTool
tool = None
try:
tool = initialize_language_tool()
except Exception as e:
st.error("Unable to initialize LanguageTool. Please ensure Java is installed, and all dependencies are configured.")
st.error(f"Details: {str(e)}")
# Helper Functions
def adjust_sentence_structure(output_text):
sentences = output_text.split(". ")
adjusted_sentences = []
for sentence in sentences:
words = sentence.split()
if len(words) > 20: # Split overly long sentences
split_point = random.randint(len(words) // 3, 2 * len(words) // 3)
adjusted_sentences.append(" ".join(words[:split_point]))
adjusted_sentences.append(" ".join(words[split_point:]))
else:
adjusted_sentences.append(sentence)
return ". ".join(adjusted_sentences)
def check_grammar(output_text):
if tool is None:
return output_text
matches = tool.check(output_text)
return tool.correct(output_text)
def refine_humanization(output_text, tone, iteration):
refinement_prompt = (
f"Refine the following text to sound natural, human-like, and professional. "
f"Preserve the original meaning, avoid irrelevant details, and align with a {tone} tone: {output_text}"
)
try:
refinement_response = client.chat.completions.create(
messages=[{"role": "user", "content": refinement_prompt}],
model="llama-3.3-70b-versatile",
stream=False,
)
refined_text = refinement_response.choices[0].message.content.strip()
# Apply structural adjustments after the first iteration
if iteration > 1:
refined_text = adjust_sentence_structure(refined_text)
return check_grammar(refined_text)
except Exception:
return output_text
def split_text_into_chunks(text, max_words=500):
words = text.split()
return [" ".join(words[i:i + max_words]) for i in range(0, len(words), max_words)]
# Streamlit App
st.title("Humanizer & Rephraser App (Groq-powered)")
st.subheader("Create human-like and grammatically accurate text")
# User Input
input_text = st.text_area("Enter text to process:", "")
input_word_count = len(input_text.split()) if input_text.strip() else 0
st.write(f"**Input Word Count:** {input_word_count}")
# Options
task_option = st.radio(
"Choose an option:",
("Humanize Text", "Rephrase Text"),
index=0
)
tone = st.selectbox("Select tone (for humanizing):", ["Casual", "Professional", "Neutral", "Engaging", "Friendly"])
# Depth of Humanization
humanization_depth = st.slider(
"Select depth of humanization:",
min_value=1,
max_value=5,
value=3,
help="Higher values apply more refinements to make the text appear less AI-like."
)
# Generate Output
if st.button("Generate Output"):
if not input_text.strip():
st.error("Please enter some text to process.")
else:
try:
with st.spinner("Processing..."):
text_chunks = split_text_into_chunks(input_text, max_words=500)
output_chunks = []
for chunk in text_chunks:
task_prompt = (
f"{'Humanize' if task_option == 'Humanize Text' else 'Rephrase'} "
f"this text while ensuring clarity, professionalism, and original meaning: {chunk}"
)
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": task_prompt}],
model="llama-3.3-70b-versatile",
stream=False,
)
output_text = chat_completion.choices[0].message.content.strip()
for i in range(humanization_depth):
output_text = refine_humanization(output_text, tone, i + 1)
output_chunks.append(output_text)
final_output = " ".join(output_chunks)
output_word_count = len(final_output.split())
st.success("Done!")
st.text_area("Generated Output:", value=final_output, height=300)
st.write(f"**Output Word Count:** {output_word_count}")
except Exception as e:
st.error(f"An error occurred: {str(e)}")
# Footer
st.markdown("---")
st.markdown(
"<p style='text-align: center; font-size: 14px;'>Designed by: <b>Engr. Makhdoom Muhammad Naeem Hashmi</b></p>",
unsafe_allow_html=True
)