janbanot commited on
Commit
d5bee47
·
1 Parent(s): a0e50f9

fix: another approach

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -1,22 +1,21 @@
1
  import gradio as gr
2
  import spaces
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import logging
5
 
6
  # Configure logging
7
  logging.basicConfig(level=logging.INFO)
8
 
9
- # Load model and tokenizer
10
  logging.info("Loading model and tokenizer...")
11
  model_name = "speakleash/Bielik-11B-v2.3-Instruct"
12
- model = AutoModelForCausalLM.from_pretrained(model_name)
13
- tokenizer = AutoTokenizer.from_pretrained(model_name)
14
 
15
 
16
  @spaces.GPU
17
  def process_text(input_text):
18
- inputs = tokenizer(input_text, return_tensors="pt")
19
- outputs = model(**inputs)
20
  # Process outputs as needed
21
  return outputs
22
 
 
1
  import gradio as gr
2
  import spaces
3
+ from transformers import pipeline
4
  import logging
5
 
6
  # Configure logging
7
  logging.basicConfig(level=logging.INFO)
8
 
9
+ # Load model and tokenizer using pipeline
10
  logging.info("Loading model and tokenizer...")
11
  model_name = "speakleash/Bielik-11B-v2.3-Instruct"
12
+ pipe = pipeline("text-generation", model=model_name)
 
13
 
14
 
15
  @spaces.GPU
16
  def process_text(input_text):
17
+ messages = [{"role": "user", "content": input_text}]
18
+ outputs = pipe(messages)
19
  # Process outputs as needed
20
  return outputs
21