AIHumanizer / chathuman.py
Jay-Rajput's picture
universal humanizer
7dec80a
import gradio as gr
from transformers import pipeline
# Load Hugging Face model pipeline for text generation/paraphrasing
# Using a general-purpose LLM like flan-t5 or bart for paraphrasing
paraphraser = pipeline("text2text-generation", model="Vamsi/T5_Paraphrase_Paws")
# Grammar correction can be handled with a seq2seq grammar model
# or by reprompting paraphraser with grammar-specific instructions
grammar_corrector = pipeline("text2text-generation", model="prithivida/grammar_error_correcter_v1")
def humanize_text(input_text, tone):
if not input_text.strip():
return ""
# Map tone to style instructions
tone_map = {
"Natural": "Paraphrase this text in a natural human-like style.",
"Formal": "Paraphrase this text in a formal professional tone.",
"Casual": "Paraphrase this text in a casual conversational tone."
}
instruction = tone_map.get(tone, tone_map["Natural"])
# Step 1: Paraphrase with tone
paraphrased = paraphraser(f"{instruction} Preserve meaning and paragraph breaks. Input: {input_text}",
max_length=512, num_return_sequences=1, do_sample=False)[0]['generated_text']
# Step 2: Grammar correction
corrected = grammar_corrector(f"Correct grammar and spelling, keep structure: {paraphrased}",
max_length=512, num_return_sequences=1, do_sample=False)[0]['generated_text']
return corrected
# Gradio UI
demo = gr.Interface(
fn=humanize_text,
inputs=[
gr.Textbox(label="Input Text", lines=10, placeholder="Paste your text here..."),
gr.Radio(["Natural", "Formal", "Casual"], label="Tone", value="Natural")
],
outputs=gr.Textbox(label="Humanized Output", lines=10),
title="AI Humanizer",
description="Humanize AI text into natural, formal, or casual tones while preserving meaning and structure."
)
if __name__ == "__main__":
demo.launch()