Dark-O-Ether commited on
Commit
8a2c233
·
1 Parent(s): 1e371a7

testing v-0.0.1

Browse files
Files changed (3) hide show
  1. app.py +49 -0
  2. init.sh +44 -0
  3. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import json
3
+ import streamlit as st
4
+ # import time
5
+
6
+ prompt_format = \
7
+ '''Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
8
+
9
+ ## Instruction:
10
+ Normalize entities in a given sentence, including dates (various formats), currencies (multiple symbols and notations), and scientific units (single and compound). Convert them into their full, standardized textual representations in the same language.
11
+
12
+ ### Example Input:
13
+ 15/03/1990 को, वैज्ञानिक ने $120 में 500mg यौगिक का एक नमूना खरीदा।
14
+
15
+ ### Example Response:
16
+ पंद्रह मार्च उन्नीस सौ नब्बे को, वैज्ञानिक ने एक सौ बीस अमेरिकी डॉलर में पाँच सौ मिलीग्राम यौगिक का एक नमूना खरीदा।
17
+
18
+ Just as entities like dates, currencies, and scientific units have been normalized into simple terms, you must do the same. Do not leave any entity un-normalised.
19
+
20
+ ## Input:
21
+ {}
22
+
23
+ ## Response:
24
+ {}'''
25
+
26
+ prompt = "हा अहवाल 30 pages लांब आणि 10 MB आकाराचा आहे."
27
+
28
+ prompt = prompt_format.format (
29
+ prompt, # input
30
+ "", # output - leave this blank for generation!
31
+ )
32
+
33
+ prompt = prompt.replace('\n','\\n')
34
+
35
+ command = \
36
+ '''curl --request POST \
37
+ --url http://localhost:8081/completion \
38
+ --header "Content-Type: application/json" \
39
+ --data '{"prompt": "'''+prompt+'''", "n_predict": 256}\''''
40
+
41
+ # print(command)
42
+ # Display the variable on the page
43
+ st.write("executing command ... \n")
44
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
45
+ output = json.loads(result.stdout)['content']
46
+ print(output)
47
+ # output = "hello me tasmay!"
48
+ # time.sleep(5)
49
+ st.write(f"Output:\n{output}")
init.sh ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e # Exit immediately if a command fails
3
+
4
+ echo "Starting initialization of llama.cpp environment..."
5
+
6
+ # Clone llama.cpp repository if not already present
7
+ if [ ! -d "llama.cpp" ]; then
8
+ echo "Cloning llama.cpp repository..."
9
+ git clone https://github.com/ggml-org/llama.cpp
10
+ else
11
+ echo "llama.cpp repository already exists. Skipping clone."
12
+ fi
13
+
14
+ cd llama.cpp
15
+
16
+ # Build llama-server
17
+ echo "Configuring build with cmake..."
18
+ cmake -B build
19
+ echo "Building llama-server..."
20
+ cmake --build build --config Release -t llama-server
21
+
22
+ # Download the model if not already present
23
+ if [ ! -f "models/sarvam_entity_normalisation_llama_3.1_8b_unsloth.Q4_K_M.gguf" ]; then
24
+ echo "Model not found. Downloading the model..."
25
+ cd models
26
+ wget https://huggingface.co/Tasmay-Tib/sarvam-entity-normalisation-llama-3.1-8b-gguf/resolve/main/sarvam_entity_normalisation_llama_3.1_8b_unsloth.Q4_K_M.gguf
27
+ cd ..
28
+ else
29
+ echo "Model already exists. Skipping download."
30
+ fi
31
+
32
+ # Launch llama-server in the background
33
+ echo "Starting llama-server in the background..."
34
+ cd build
35
+ ./bin/llama-server \
36
+ -m ../llama.cpp/models/sarvam_entity_normalisation_llama_3.1_8b_unsloth.Q4_K_M.gguf \
37
+ -n 256 -c 1024 -t 2 -b 1 \
38
+ --temp 0.1 --repeat-penalty 1.1 --top-k 20 \
39
+ --port 8081 --mlock --numa numactl &
40
+
41
+ echo "llama-server launched. Waiting for the server to initialize..."
42
+ # (Optional) Wait a few seconds for the server to start before launching Streamlit
43
+ sleep 5
44
+ echo "Initialization complete. Proceeding with Streamlit app startup..."
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ requests
2
+ json
3
+ os
4
+ subprocess