Hjgugugjhuhjggg commited on
Commit
ba2afb0
·
verified ·
1 Parent(s): 544dfe8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -1
app.py CHANGED
@@ -28,7 +28,6 @@ cache = Cache()
28
  hf_token = os.environ.get("HF_TOKEN")
29
 
30
  llm_models = {
31
- "4": VLLM(model="lilmeaty/4", trust_remote_code=True, use_cuda=False, max_new_tokens=50, temperature=0.1, use_auth_token=hf_token, device="cpu"),
32
  "yi-coder": VLLM(model="01-ai/Yi-Coder-1.5B", trust_remote_code=True, use_cuda=False, max_new_tokens=50, temperature=0.6, use_auth_token=hf_token, device="cpu"),
33
  "llama": VLLM(model="meta-llama/Llama-3.2-3B-Instruct", trust_remote_code=True, use_cuda=False, max_new_tokens=50, temperature=0.1, use_auth_token=hf_token, device="cpu"),
34
  "qwen": VLLM(model="Qwen/Qwen2.5-1.5B-Instruct", trust_remote_code=True, use_cuda=False, max_new_tokens=50, temperature=0.6, use_auth_token=hf_token, device="cpu"),
 
28
  hf_token = os.environ.get("HF_TOKEN")
29
 
30
  llm_models = {
 
31
  "yi-coder": VLLM(model="01-ai/Yi-Coder-1.5B", trust_remote_code=True, use_cuda=False, max_new_tokens=50, temperature=0.6, use_auth_token=hf_token, device="cpu"),
32
  "llama": VLLM(model="meta-llama/Llama-3.2-3B-Instruct", trust_remote_code=True, use_cuda=False, max_new_tokens=50, temperature=0.1, use_auth_token=hf_token, device="cpu"),
33
  "qwen": VLLM(model="Qwen/Qwen2.5-1.5B-Instruct", trust_remote_code=True, use_cuda=False, max_new_tokens=50, temperature=0.6, use_auth_token=hf_token, device="cpu"),