daviddwlee84 commited on
Commit
9f8c7c1
·
1 Parent(s): 11f97a3

Use larger model as default model for non-space and non-openai solution

Browse files
Files changed (2) hide show
  1. README.md +4 -1
  2. app.py +19 -6
README.md CHANGED
@@ -27,7 +27,7 @@ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-
27
  python app.py
28
  # Streamlit (TODO: able to clear message history)
29
  streamlit run streamlit_app.py
30
- # Streamlit chat_input (TODO: move ReAct process to `st.expander`)
31
  streamlit run streamlit_app_chat.py
32
  # Chainlit (TODO: move the ReAct process from agent message to "step")
33
  chainlit run chainlit_app.py
@@ -38,6 +38,9 @@ chainlit run chainlit_app.py
38
  - `What time is it?`
39
  - Expect to use tool `get_current_time_in_timezone`
40
  - Result expect the current local time in UTC
 
 
 
41
 
42
  ---
43
 
 
27
  python app.py
28
  # Streamlit (TODO: able to clear message history)
29
  streamlit run streamlit_app.py
30
+ # Streamlit chat_input (TODO: move ReAct process to `st.expander`; able to render image)
31
  streamlit run streamlit_app_chat.py
32
  # Chainlit (TODO: move the ReAct process from agent message to "step")
33
  chainlit run chainlit_app.py
 
38
  - `What time is it?`
39
  - Expect to use tool `get_current_time_in_timezone`
40
  - Result expect the current local time in UTC
41
+ - `Draw a bear for me`
42
+ - Expect to use tool `image_generator`
43
+ - Result will be something like `{'path': '/var/folders/g3/gsbp4bsx1gs51t1805r34ztw0000gn/T/tmpch4mrr13/2fa94447-e9e5-4964-b9d2-b4b9a83ccf84.png', 'mime_type': 'image/png'}`
44
 
45
  ---
46
 
app.py CHANGED
@@ -62,7 +62,7 @@ if IS_IN_HF_SPACE := os.getenv("SPACE_ID"):
62
  print("Using HfApiModel with model_id:", model_id)
63
 
64
  model = HfApiModel(
65
- max_tokens=2096,
66
  temperature=0.5,
67
  model_id=model_id,
68
  custom_role_conversions=None,
@@ -80,16 +80,29 @@ else:
80
 
81
  model = OpenAIServerModel(model_id="gpt-3.5-turbo", api_key=OPENAI_API_KEY)
82
  else:
83
- # NOTE: this model is not good enough for agent
 
 
 
 
 
 
 
 
 
 
 
84
  print(
85
- "Using TransformersModel with model_id: HuggingFaceTB/SmolLM2-1.7B-Instruct"
86
  )
87
 
88
- model = TransformersModel(
89
- model_id="HuggingFaceTB/SmolLM2-1.7B-Instruct", trust_remote_code=True
 
 
 
90
  )
91
 
92
-
93
  # Import tool from Hub
94
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
95
 
 
62
  print("Using HfApiModel with model_id:", model_id)
63
 
64
  model = HfApiModel(
65
+ max_tokens=2096 if IS_IN_HF_SPACE else None,
66
  temperature=0.5,
67
  model_id=model_id,
68
  custom_role_conversions=None,
 
80
 
81
  model = OpenAIServerModel(model_id="gpt-3.5-turbo", api_key=OPENAI_API_KEY)
82
  else:
83
+ # NOTE: this model is not good enough for agent (and still might be too heavy to run on a normal computer)
84
+ # print(
85
+ # "Using TransformersModel with model_id: HuggingFaceTB/SmolLM2-1.7B-Instruct"
86
+ # )
87
+ #
88
+ # model = TransformersModel(
89
+ # model_id="HuggingFaceTB/SmolLM2-1.7B-Instruct", trust_remote_code=True
90
+ # )
91
+
92
+ # NOTE: this model is well enough to use simple tools (but need authentication)
93
+ # https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct
94
+ # https://huggingface.co/settings/gated-repos
95
  print(
96
+ "Using HfApiModel with model_id: meta-llama/Llama-3.2-11B-Vision-Instruct"
97
  )
98
 
99
+ # NOTE: this use InferenceClient under the hood
100
+ model = HfApiModel(
101
+ temperature=0.5,
102
+ model_id="meta-llama/Llama-3.2-11B-Vision-Instruct",
103
+ custom_role_conversions=None,
104
  )
105
 
 
106
  # Import tool from Hub
107
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
108