File size: 896 Bytes
5f24a61
ff9698f
36704dc
 
ff9698f
a2ced42
 
 
ff9698f
 
36704dc
 
a2ced42
36704dc
a2ced42
 
36704dc
 
 
ff9698f
36704dc
 
 
 
ff9698f
36704dc
ff9698f
5f24a61
36704dc
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import gradio as gr
import spaces
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

MODEL_NAME = "speakleash/Bielik-11B-v2.3-Instruct-GGUF"
MODEL_FILE = "Bielik-11B-v2.3-Instruct.Q4_K_M.gguf"


@spaces.GPU
def test():
    device = torch.device("cuda")
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
    model = AutoModelForCausalLM.from_pretrained(
            MODEL_NAME,
            model_file=MODEL_FILE,
            model_type="mistral", gpu_layers=50, hf=True).to(device)

    inputs = tokenizer("Cześć Bielik, jak się masz?", return_tensors="pt").to(device)

    with torch.no_grad():
        outputs = model.generate(
            **inputs, max_new_tokens=128, pad_token_id=tokenizer.eos_token_id
        )

    return tokenizer.decode(outputs[0], skip_special_tokens=True)


demo = gr.Interface(fn=test, inputs=None, outputs=gr.Text())
demo.launch()