File size: 3,790 Bytes
52b0385
 
 
 
 
 
6e9bab7
52b0385
b8c9b58
a233401
52b0385
d4399fb
52b0385
7609276
52b0385
d4399fb
52b0385
b8c9b58
52b0385
 
49be6a8
52b0385
 
 
 
 
 
 
49be6a8
52b0385
 
 
6e9bab7
52b0385
 
 
 
6e9bab7
52b0385
 
 
 
6e9bab7
52b0385
 
6e9bab7
52b0385
 
 
 
6e9bab7
52b0385
 
 
6e9bab7
52b0385
6e9bab7
52b0385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
from flask import Flask, render_template, request
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
from nltk import pos_tag, ne_chunk
import textblob
from polyglot.detect import Detector
import numpy as np
from keras.models import load_model

app = Flask(__name__)
model = load_model("emotion_detector.h5")

@app.route("/")
def index():
    return render_template("index.html")

@app.route("/paraphrase", methods=["POST"])
def paraphrase():
    input_text = request.form["input_text"]
    options = request.form.getlist("options")

    # Remove special characters
    if "remove_special_characters" in options:
        input_text = remove_special_characters(input_text)
    
    # Correct grammar
    if "correct_grammar" in options:
        input_text = correct_grammar(input_text)

    # Summarize text
    if "summarize_text" in options:
        input_text = summarize_text(input_text)

    # Multilingual support
    target_language = request.form.get("target_language")
    if target_language:
        input_text = translate(input_text, target_language)

    # Custom synonyms
    custom_synonyms = request.form.getlist("custom_synonyms")
    for word, synonym in custom_synonyms:
        input_text = replace_word(input_text, word, synonym)

    # Output customization
    input_text = customise_output(input_text, options)

    # Integration with other NLP tools
    named_entities = get_named_entities(input_text)
    part_of_speech = get_part_of_speech(input_text)
    sentiment = get_sentiment(input_text)

    # Emotion detector
    emotion = detect_emotion(input_text)
    input_text = adjust_tone(input_text, emotion)

    return render_template("index.html", paraphrased_text=input_text, named_entities=named_entities, part_of_speech=part_of_speech, sentiment=sentiment)

def remove_special_characters(input_text):
    # Code to remove special characters
    return input_text

def summarize_text(input_text):
    # Code to summarize the text
    return input_text

def detect_language(input_text):
    detector = Detector(input_text)
    language = detector.language.code
    return language

def translate(input_text, target_language):
    blob = textblob.TextBlob(input_text)
    translated_text = blob.translate(to=target_language)
    return translated_text

def get_synonyms(word):
    synonyms = []
    for syn in wordnet.synsets(word):
        for lemma in syn.lemmas():
            synonyms.append(lemma.name())
    return synonyms

def replace_word(input_text, word, synonym):
    words = word_tokenize(input_text)
    words = [synonym if w == word else w for w in words]
    input_text = " ".join(words)
    return input_text

def customise_output(input_text, options):
    # Code to customise output based on options
    return input_text

def get_named_entities(input_text):
    named_entities = ne_chunk(pos_tag(word_tokenize(input_text)))
    return named_entities

def get_part_of_speech(input_text):
    pos = pos_tag(word_tokenize(input_text))
    return pos

def get_sentiment(input_text):
    blob = textblob.TextBlob(input_text)
    sentiment = blob.sentiment.polarity
    return sentiment

def correct_grammar(input_text):
    blob = textblob.TextBlob(input_text)
    corrected_text = str(blob.correct())
    return corrected_text

def detect_emotion(input_text):
    words = word_tokenize(input_text)
    words = [w.lower() for w in words]
    words = [w for w in words if w.isalpha()]
    input_text = " ".join(words)
    input_text = np.array([input_text])
    sentiment = model.predict(input_text, batch_size=1, verbose=0)[0]
    return sentiment

def adjust_tone(input_text, emotion):
    # Code to adjust tone based on emotion
    return input_text

if __name__ == "__main__":
    app.run(debug=True,port=7860,host="0.0.0.0")