DemahAlmutairi commited on
Commit
e4baaed
·
verified ·
1 Parent(s): 8c3215b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -19
app.py CHANGED
@@ -1,8 +1,8 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import torch
4
- import spaces
5
 
 
6
  def load_model(model_name):
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
  model = AutoModelForCausalLM.from_pretrained(
@@ -18,34 +18,34 @@ def load_model(model_name):
18
  tokenizer=tokenizer,
19
  return_full_text=False,
20
  max_new_tokens=500,
21
- do_sample=False
22
  )
23
  return generator
24
 
25
- @spaces.GPU
26
- def generate_text(prompt, model_name):
27
- generator = load_model(model_name)
28
- messages = [{"role": "user", "content": prompt}]
29
- output = generator(messages)
 
 
 
 
30
  return output[0]["generated_text"]
31
 
32
  # Create Gradio interface
33
  demo = gr.Interface(
34
- fn=generate_text,
35
  inputs=[
36
- gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
37
- gr.Dropdown(
38
- choices=["Qwen/Qwen2.5-1.5B-Instruct","microsoft/Phi-3-mini-4k-instruct", "ALLaM-AI/ALLaM-7B-Instruct-preview"],
39
- label="Choose Model",
40
- value="ALLaM-AI/ALLaM-7B-Instruct-preview"
41
- )
42
  ],
43
- outputs=gr.Textbox(label="Generated Text"),
44
- title="Text Generator",
45
- description="Enter a prompt and generate text using one of the available models.",
46
  examples=[
47
- ["Tell me a funny joke about chickens.", "microsoft/Phi-3-mini-4k-instruct"],
48
- ["أخبرني نكتة مضحكة عن الدجاج.", "ALLaM-AI/ALLaM-7B-Instruct-preview"]
49
  ]
50
  )
51
 
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import torch
 
4
 
5
+ # Load models for English and Arabic
6
  def load_model(model_name):
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
  model = AutoModelForCausalLM.from_pretrained(
 
18
  tokenizer=tokenizer,
19
  return_full_text=False,
20
  max_new_tokens=500,
21
+ do_sample=True # Changed to enable sampling for more creative outputs
22
  )
23
  return generator
24
 
25
+ # Global storage for models
26
+ models = {
27
+ "English": load_model("microsoft/Phi-3-mini-4k-instruct"),
28
+ "Arabic": load_model("ALLaM-AI/ALLaM-7B-Instruct-preview")
29
+ }
30
+
31
+ def generate_story(language, prompt):
32
+ generator = models[language]
33
+ output = generator(prompt)
34
  return output[0]["generated_text"]
35
 
36
  # Create Gradio interface
37
  demo = gr.Interface(
38
+ fn=generate_story,
39
  inputs=[
40
+ gr.Radio(choices=["English", "Arabic"], label="Select Language"),
41
+ gr.Textbox(lines=2, placeholder="Enter your story prompt here...")
 
 
 
 
42
  ],
43
+ outputs=gr.Textbox(label="Generated Story"),
44
+ title="Kids Storyteller",
45
+ description="Choose a language and enter a prompt to generate a fun story for kids!",
46
  examples=[
47
+ ["English", "Once upon a time in a magical forest..."],
48
+ ["Arabic", "في قديم الزمان في غابة سحرية..."]
49
  ]
50
  )
51