janbanot commited on
Commit
a0e50f9
1 Parent(s): 80b6933

fix: another approach

Browse files
Files changed (1) hide show
  1. app.py +18 -85
app.py CHANGED
@@ -1,101 +1,34 @@
1
- import json
2
  import gradio as gr
3
- import torch
4
- from transformers import AutoModelForCausalLM, AutoTokenizer
5
  import spaces
 
6
  import logging
7
 
8
  # Configure logging
9
  logging.basicConfig(level=logging.INFO)
10
 
11
- # Configuration
12
- MODEL_NAME = "speakleash/Bielik-11B-v2.3-Instruct"
13
- # DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
14
- DEVICE = "cuda"
15
- TORCH_DTYPE = torch.bfloat16 if torch.cuda.is_available() else torch.float32
16
- MAX_TOKENS = 1000
17
-
18
  # Load model and tokenizer
19
  logging.info("Loading model and tokenizer...")
20
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
21
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=TORCH_DTYPE).to(
22
- DEVICE
23
- )
24
-
25
- logging.info("Model and tokenizer loaded successfully.")
26
-
27
- # Load prompts
28
- logging.info("Loading prompts from prompts.json...")
29
- with open("prompts.json") as f:
30
- prompts = json.load(f)
31
 
32
 
33
  @spaces.GPU
34
- def transform_text(prompt_name, user_input):
35
- """Transform text using selected prompt and Bielik model"""
36
- try:
37
- logging.info(f"Transforming text with prompt: {prompt_name}")
38
-
39
- if not prompt_name:
40
- logging.error("No prompt selected.")
41
- return "Error: No prompt selected."
42
-
43
- # Get selected prompt
44
- selected_prompt = next((p for p in prompts if p["name"] == prompt_name), None)
45
- if selected_prompt is None:
46
- logging.error(f"Prompt '{prompt_name}' not found.")
47
- return f"Error: Prompt '{prompt_name}' not found."
48
-
49
- # Create messages structure
50
- messages = [
51
- {"role": "system", "content": selected_prompt["system_message"]},
52
- {"role": "user", "content": user_input},
53
- ]
54
-
55
- logging.info("Tokenizing input and generating output...")
56
- input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt").to(
57
- DEVICE
58
- )
59
-
60
- generated_ids = model.generate(
61
- input_ids, max_new_tokens=MAX_TOKENS, do_sample=True
62
- )
63
-
64
- result = tokenizer.batch_decode(generated_ids)[0]
65
- logging.info("Text transformation successful.")
66
- return result
67
-
68
- except Exception as e:
69
- logging.error("An error occurred during text transformation", exc_info=True)
70
- return f"Error: {str(e)}"
71
-
72
-
73
- # Create Gradio interface
74
- with gr.Blocks(title="Bielik Goblin") as interface:
75
- gr.Markdown("# Bielik Goblin")
76
-
77
- prompt_select = gr.Dropdown(
78
- choices=[p["name"] for p in prompts],
79
- label="Wybierz prompt",
80
- interactive=True,
81
- value="Parafraza", # Set "Parafraza" as the default value
82
- )
83
-
84
- user_input = gr.Textbox(
85
- label="Tw贸j tekst", placeholder="Wpisz tutaj sw贸j tekst...", lines=5
86
- )
87
 
88
- transform_btn = gr.Button("Przekszta艂膰 tekst", variant="primary")
89
 
90
- with gr.Column():
91
- output = gr.Textbox(label="Wynik", interactive=False)
 
 
92
 
93
- transform_btn.click(
94
- fn=transform_text,
95
- inputs=[prompt_select, user_input],
96
- outputs=output,
97
- )
98
 
99
- logging.info("Launching Gradio interface...")
100
- interface.queue().launch(debug=True)
101
- logging.info("Gradio interface launched successfully and ready to accept requests.")
 
 
 
 
1
  import gradio as gr
 
 
2
  import spaces
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import logging
5
 
6
  # Configure logging
7
  logging.basicConfig(level=logging.INFO)
8
 
 
 
 
 
 
 
 
9
  # Load model and tokenizer
10
  logging.info("Loading model and tokenizer...")
11
+ model_name = "speakleash/Bielik-11B-v2.3-Instruct"
12
+ model = AutoModelForCausalLM.from_pretrained(model_name)
13
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
 
 
 
 
 
 
14
 
15
 
16
  @spaces.GPU
17
+ def process_text(input_text):
18
+ inputs = tokenizer(input_text, return_tensors="pt")
19
+ outputs = model(**inputs)
20
+ # Process outputs as needed
21
+ return outputs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
 
23
 
24
+ def generate(text):
25
+ hardcoded_prompt = "Stw贸rz zwi臋z艂e podsumowanie tekstu, zachowuj膮c kluczowe punkty. Maksymalnie 3 zdania"
26
+ combined_text = hardcoded_prompt + text
27
+ return process_text(combined_text)
28
 
 
 
 
 
 
29
 
30
+ gr.Interface(
31
+ fn=generate,
32
+ inputs=gr.Text(),
33
+ outputs=gr.Text(),
34
+ ).launch()