Spaces:
Running
on
Zero
Running
on
Zero
fix: another approach
Browse files
app.py
CHANGED
@@ -1,22 +1,21 @@
|
|
1 |
import gradio as gr
|
2 |
import spaces
|
3 |
-
from transformers import
|
4 |
import logging
|
5 |
|
6 |
# Configure logging
|
7 |
logging.basicConfig(level=logging.INFO)
|
8 |
|
9 |
-
# Load model and tokenizer
|
10 |
logging.info("Loading model and tokenizer...")
|
11 |
model_name = "speakleash/Bielik-11B-v2.3-Instruct"
|
12 |
-
|
13 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
14 |
|
15 |
|
16 |
@spaces.GPU
|
17 |
def process_text(input_text):
|
18 |
-
|
19 |
-
outputs =
|
20 |
# Process outputs as needed
|
21 |
return outputs
|
22 |
|
|
|
1 |
import gradio as gr
|
2 |
import spaces
|
3 |
+
from transformers import pipeline
|
4 |
import logging
|
5 |
|
6 |
# Configure logging
|
7 |
logging.basicConfig(level=logging.INFO)
|
8 |
|
9 |
+
# Load model and tokenizer using pipeline
|
10 |
logging.info("Loading model and tokenizer...")
|
11 |
model_name = "speakleash/Bielik-11B-v2.3-Instruct"
|
12 |
+
pipe = pipeline("text-generation", model=model_name)
|
|
|
13 |
|
14 |
|
15 |
@spaces.GPU
|
16 |
def process_text(input_text):
|
17 |
+
messages = [{"role": "user", "content": input_text}]
|
18 |
+
outputs = pipe(messages)
|
19 |
# Process outputs as needed
|
20 |
return outputs
|
21 |
|