janbanot's picture
fix: another approach
d5bee47
raw
history blame
827 Bytes
import gradio as gr
import spaces
from transformers import pipeline
import logging
# Configure logging
logging.basicConfig(level=logging.INFO)
# Load model and tokenizer using pipeline
logging.info("Loading model and tokenizer...")
model_name = "speakleash/Bielik-11B-v2.3-Instruct"
pipe = pipeline("text-generation", model=model_name)
@spaces.GPU
def process_text(input_text):
messages = [{"role": "user", "content": input_text}]
outputs = pipe(messages)
# Process outputs as needed
return outputs
def generate(text):
hardcoded_prompt = "Stwórz zwięzłe podsumowanie tekstu, zachowując kluczowe punkty. Maksymalnie 3 zdania"
combined_text = hardcoded_prompt + text
return process_text(combined_text)
gr.Interface(
fn=generate,
inputs=gr.Text(),
outputs=gr.Text(),
).launch()