Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -73,10 +73,14 @@ from langchain_core.callbacks.manager import AsyncCallbackManagerForLLMRun
|
|
73 |
from langchain_core.runnables import run_in_executor
|
74 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
75 |
import requests
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
|
|
80 |
|
81 |
#from transformers import pipeline,AutoModelForCausalLM as M,AutoTokenizer as T
|
82 |
#m=M.from_pretrained("peterpeter8585/syai4.3")
|
|
|
73 |
from langchain_core.runnables import run_in_executor
|
74 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
75 |
import requests
|
76 |
+
import os
|
77 |
+
if os.path.exists("./llama-3-open-ko-8b-instruct-preview-q5_k_m.gguf"):
|
78 |
+
pass
|
79 |
+
else:
|
80 |
+
req=requests.get("https://huggingface.co/peterpeter8585/Llama-3-Open-Ko-8B-Instruct-preview-Q5_K_M-GGUF/resolve/main/llama-3-open-ko-8b-instruct-preview-q5_k_m.gguf",stream=True)
|
81 |
+
with open("./llama-3-open-ko-8b-instruct-preview-q5_k_m.gguf","wb") as f:
|
82 |
+
for i in req.iter_content(100000000000000000000):
|
83 |
+
f.write(i)
|
84 |
|
85 |
#from transformers import pipeline,AutoModelForCausalLM as M,AutoTokenizer as T
|
86 |
#m=M.from_pretrained("peterpeter8585/syai4.3")
|