Diptaraj Sen
commited on
Commit
·
bc9b706
1
Parent(s):
d10976f
story generation model changed
Browse files- app/storytelling.py +28 -33
app/storytelling.py
CHANGED
@@ -1,14 +1,15 @@
|
|
1 |
from app.logger import get_logger
|
2 |
logger = get_logger(__name__)
|
3 |
|
4 |
-
from transformers import AutoTokenizer,
|
5 |
import torch
|
6 |
|
7 |
-
|
|
|
8 |
# Load tokenizer and model
|
9 |
-
tokenizer =AutoTokenizer.from_pretrained(
|
10 |
-
model =
|
11 |
-
|
12 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
13 |
model.to(device)
|
14 |
|
@@ -16,38 +17,32 @@ def generate_story(caption: str, max_length: int = 256) -> str:
|
|
16 |
logger.info("Generating story...")
|
17 |
try:
|
18 |
# Turn caption into a story prompt
|
19 |
-
prompt =
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
Caption: "{caption}"
|
28 |
-
|
29 |
-
Write the story below:
|
30 |
-
""".strip()
|
31 |
|
32 |
# Tokenize and run through model
|
33 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
34 |
outputs = model.generate(
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
return story.replace(prompt, "").strip()
|
51 |
except Exception as e:
|
52 |
logger.exception(f"Failed to generate story: {str(e)}")
|
53 |
raise
|
|
|
1 |
from app.logger import get_logger
|
2 |
logger = get_logger(__name__)
|
3 |
|
4 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
import torch
|
6 |
|
7 |
+
model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
8 |
+
|
9 |
# Load tokenizer and model
|
10 |
+
tokenizer =AutoTokenizer.from_pretrained(model_id)
|
11 |
+
model = AutoModelForCausalLM.from_pretrained(model_id)
|
12 |
+
|
13 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
14 |
model.to(device)
|
15 |
|
|
|
17 |
logger.info("Generating story...")
|
18 |
try:
|
19 |
# Turn caption into a story prompt
|
20 |
+
prompt = (
|
21 |
+
"<|system|>\n"
|
22 |
+
"You are a helpful assistant.</s>\n"
|
23 |
+
"<|user|>\n"
|
24 |
+
f"Write a complete, short story about {caption}. Make sure the story has a clear ending.\n</s>\n"
|
25 |
+
"<|assistant|>\n"
|
26 |
+
)
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
# Tokenize and run through model
|
29 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
30 |
outputs = model.generate(
|
31 |
+
**inputs,
|
32 |
+
max_new_tokens=1000,
|
33 |
+
do_sample=True,
|
34 |
+
temperature=0.8,
|
35 |
+
top_p=0.9,
|
36 |
+
top_k=50,
|
37 |
+
eos_token_id=tokenizer.eos_token_id
|
38 |
+
)
|
39 |
+
|
40 |
+
|
41 |
+
# Decode and clean output
|
42 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
43 |
+
generated_story = generated_text[len(prompt):] # Strip prompt part
|
44 |
+
|
45 |
+
return generated_story.replace(prompt, "").strip()
|
|
|
46 |
except Exception as e:
|
47 |
logger.exception(f"Failed to generate story: {str(e)}")
|
48 |
raise
|