Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,13 @@
|
|
1 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
2 |
import gradio as gr
|
3 |
-
checkpoint = "Mr-Vicky-01/
|
|
|
|
|
4 |
|
5 |
def language_translator(text):
|
6 |
-
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
7 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("finetune-EN-to-Ta/")
|
8 |
-
# model = AutoModelForSeq2SeqLM.from_pretrained("finetune-EN-to-Ta/")
|
9 |
tokenized = tokenizer([text], return_tensors='pt')
|
10 |
out = model.generate(**tokenized, max_length=128)
|
11 |
-
|
12 |
-
return tokenizer.decode(out[0],skip_special_tokens=True)
|
13 |
|
14 |
# examples = [
|
15 |
# ["how are you today?"],
|
|
|
1 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
2 |
import gradio as gr
|
3 |
+
checkpoint = "Mr-Vicky-01/English-Tamil-Translator"
|
4 |
+
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
5 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
|
6 |
|
7 |
def language_translator(text):
|
|
|
|
|
|
|
8 |
tokenized = tokenizer([text], return_tensors='pt')
|
9 |
out = model.generate(**tokenized, max_length=128)
|
10 |
+
return tokenizer.decode(out[0],skip_special_tokens=True)
|
|
|
11 |
|
12 |
# examples = [
|
13 |
# ["how are you today?"],
|