taha092 commited on
Commit
4c51074
·
verified ·
1 Parent(s): 8d17045

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -129
app.py DELETED
@@ -1,129 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification
4
- from transformers.pipelines import pipeline
5
- from sentence_transformers import SentenceTransformer, util
6
- import numpy as np
7
- import gradio.themes as grthemes
8
-
9
- # Paraphrasing model: tuner007/pegasus_paraphrase
10
- PARAPHRASE_MODEL_NAME = "tuner007/pegasus_paraphrase"
11
- paraphrase_tokenizer = AutoTokenizer.from_pretrained(PARAPHRASE_MODEL_NAME)
12
- paraphrase_model = AutoModelForSeq2SeqLM.from_pretrained(PARAPHRASE_MODEL_NAME)
13
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
- paraphrase_model = paraphrase_model.to(device)
15
-
16
- # AI Detector: desklib/ai-text-detector-v1.01
17
- AI_DETECTOR_MODEL = "desklib/ai-text-detector-v1.01"
18
- ai_detector = pipeline("text-classification", model=AI_DETECTOR_MODEL, device=0 if torch.cuda.is_available() else -1)
19
-
20
- # Semantic similarity model
21
- similarity_model = SentenceTransformer('all-MiniLM-L6-v2')
22
-
23
- def paraphrase(text):
24
- prompt = text.strip()
25
- batch = paraphrase_tokenizer([prompt], truncation=True, padding='longest', max_length=60, return_tensors="pt").to(device)
26
- translated = paraphrase_model.generate(
27
- **batch,
28
- max_length=60,
29
- num_beams=5,
30
- num_return_sequences=1,
31
- temperature=1.0
32
- )
33
- tgt_text = paraphrase_tokenizer.batch_decode(translated, skip_special_tokens=True)
34
- return tgt_text[0] if tgt_text else ""
35
-
36
- def semantic_similarity(text1, text2):
37
- emb1 = similarity_model.encode(text1, convert_to_tensor=True)
38
- emb2 = similarity_model.encode(text2, convert_to_tensor=True)
39
- sim = util.pytorch_cos_sim(emb1, emb2).item()
40
- return sim
41
-
42
- def ai_detect(text):
43
- # Returns probability of being AI-generated (label 'LABEL_1' = AI, 'LABEL_0' = Human)
44
- result = ai_detector(text)
45
- for r in result:
46
- if r['label'] in ['LABEL_1', 'Fake']:
47
- return r['score']
48
- elif r['label'] in ['LABEL_0', 'Real']:
49
- return 1.0 - r['score']
50
- return 0.5 # fallback
51
-
52
- def humanization_score(sim, ai_prob):
53
- # Lower similarity and lower AI probability = more human
54
- score = (1.0 - sim) * 0.5 + (1.0 - ai_prob) * 0.5
55
- return score
56
-
57
- def humanization_rating(score):
58
- if score < 0.7:
59
- return f"⚠️ Still robotic ({score:.2f})"
60
- elif score < 0.85:
61
- return f"👍 Acceptable ({score:.2f})"
62
- else:
63
- return f"✅ Highly Human ({score:.2f})"
64
-
65
- def process(text, tone):
66
- if not text.strip():
67
- return "", "", 0.0, "", 0.0
68
- # Pre-humanization AI detection
69
- pre_ai_prob = ai_detect(text)
70
- # Paraphrase
71
- try:
72
- paraphrased = paraphrase(text)
73
- except Exception as e:
74
- return "[Error in paraphrasing: {}]".format(str(e)), "", 0.0, "", 0.0
75
- # Post-humanization AI detection
76
- post_ai_prob = ai_detect(paraphrased)
77
- # Semantic similarity
78
- sim = semantic_similarity(text, paraphrased)
79
- # Humanization score
80
- score = humanization_score(sim, post_ai_prob)
81
- rating = humanization_rating(score)
82
- ai_score_str = f"Pre: {pre_ai_prob*100:.1f}% | Post: {post_ai_prob*100:.1f}%"
83
- return (
84
- paraphrased, # gr.Textbox (string)
85
- ai_score_str, # gr.Markdown (string)
86
- sim, # gr.Number (float)
87
- rating, # gr.Markdown (string)
88
- score * 100 # gr.Number (float)
89
- )
90
-
91
- # Custom dark theme using gradio.themes.Base
92
- custom_theme = grthemes.Base(
93
- primary_hue="blue",
94
- secondary_hue="blue",
95
- neutral_hue="slate"
96
- )
97
-
98
- with gr.Blocks(theme=custom_theme) as demo:
99
- gr.Markdown("""
100
- # 🧠 AI Humanizer
101
- <div style='display:flex;justify-content:space-between;align-items:center;'>
102
- <span style='font-size:1.2em;color:#7bb1ff;'>Rewrite AI text to sound 100% human</span>
103
- <span style='font-weight:bold;color:#7bb1ff;'>Made by Taha</span>
104
- </div>
105
- """, elem_id="header")
106
- with gr.Row():
107
- with gr.Column():
108
- text_in = gr.Textbox(label="Paste AI-generated text here", lines=8, placeholder="Paste your text...")
109
- tone = gr.Radio(["Academic", "Casual", "Friendly", "Stealth"], value="Stealth", label="Tone Selector")
110
- btn = gr.Button("Humanize", elem_id="humanize-btn")
111
- with gr.Column():
112
- text_out = gr.Textbox(label="Humanized Output", lines=8, interactive=False)
113
- ai_scores = gr.Markdown("", elem_id="ai-scores")
114
- sim_score = gr.Number(label="Similarity (0=very different, 1=very similar)", interactive=False)
115
- rating = gr.Markdown("", elem_id="rating")
116
- human_score = gr.Number(label="Humanization Score (%)", interactive=False)
117
- btn.click(
118
- process,
119
- inputs=[text_in, tone],
120
- outputs=[text_out, ai_scores, sim_score, rating, human_score],
121
- api_name="humanize"
122
- )
123
- gr.Markdown("""
124
- <div style='text-align:center;color:#7bb1ff;margin-top:2em;'>
125
- <b>Made by Taha</b> | Free for unlimited use | Optimized for students
126
- </div>
127
- """, elem_id="footer")
128
-
129
- demo.launch()