Spaces:
Sleeping
Sleeping
Commit
·
b50e8e0
1
Parent(s):
13dde62
switch back to ollama
Browse files- app.py +25 -1
- requirements.txt +1 -2
app.py
CHANGED
@@ -1,6 +1,11 @@
|
|
1 |
-
|
|
|
|
|
2 |
import gradio
|
3 |
|
|
|
|
|
|
|
4 |
history = []
|
5 |
|
6 |
|
@@ -13,8 +18,27 @@ def get_history_messages():
|
|
13 |
|
14 |
|
15 |
def predict(prompt):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
print("Predict:", prompt)
|
|
|
17 |
pipe = pipeline("conversational", model="cognitivecomputations/TinyDolphin-2.8-1.1b")
|
|
|
18 |
response = pipe(
|
19 |
[
|
20 |
*get_history_messages(),
|
|
|
1 |
+
import subprocess
|
2 |
+
#from transformers import pipeline
|
3 |
+
import ollama
|
4 |
import gradio
|
5 |
|
6 |
+
|
7 |
+
subprocess.run("curl -fsSL https://ollama.com/install.sh | sh")
|
8 |
+
|
9 |
history = []
|
10 |
|
11 |
|
|
|
18 |
|
19 |
|
20 |
def predict(prompt):
|
21 |
+
response = ollama.chat(
|
22 |
+
model="tinydolphin",
|
23 |
+
messages=[
|
24 |
+
*get_history_messages(),
|
25 |
+
{"role": "user", "content": prompt}
|
26 |
+
],
|
27 |
+
stream=True
|
28 |
+
)
|
29 |
+
history.append((prompt, ""))
|
30 |
+
message = ""
|
31 |
+
for chunk in response:
|
32 |
+
message += chunk["message"]["content"]
|
33 |
+
history[-1] = (prompt, message)
|
34 |
+
yield "", history
|
35 |
+
|
36 |
+
|
37 |
+
def predict_t(prompt):
|
38 |
print("Predict:", prompt)
|
39 |
+
print("Loading model")
|
40 |
pipe = pipeline("conversational", model="cognitivecomputations/TinyDolphin-2.8-1.1b")
|
41 |
+
print("Running pipeline")
|
42 |
response = pipe(
|
43 |
[
|
44 |
*get_history_messages(),
|
requirements.txt
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
|
2 |
-
transformers
|
3 |
gradio
|
4 |
sentencepiece
|
|
|
1 |
+
ollama
|
|
|
2 |
gradio
|
3 |
sentencepiece
|