Ligeng-Zhu commited on
Commit
c857c8b
·
verified ·
1 Parent(s): eb202aa

Upload files with `vila-upload`.

Browse files

Upload __init__.py
Upload utils.py
Upload config.json
Upload README.md
Upload model_utils_packing.py
Upload auto_processor.py
Upload modeling_vila.py
Upload loss.py
Upload distributed.py
Upload llm/model.safetensors
Upload llm/config.json
Upload mm_projector/model.safetensors
Upload mm_projector/config.json
Upload vision_tower/model.safetensors
Upload vision_tower/config.json

README.md CHANGED
@@ -52,34 +52,36 @@ print(colored(response, "cyan", attrs=["bold"]))
52
 
53
  we also support `AutoProcessor` class to ease data preparation for training and finetuning.
54
 
 
 
 
55
  ```python
56
  from transformers import AutoProcessor, AutoModel
57
 
58
  model_path = "Efficient-Large-Model/NVILA-Lite-2B-hf-preview"
59
  processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
 
 
 
60
 
61
- gpt_conv = [ {
62
  "role": "user",
63
  "content": [
64
  {"type": "image", "path": "demo_images/demo_img_1.png"},
65
  {"type": "text", "text": "Describe this image."}
66
  ]
67
  }]
 
 
68
 
69
- inputs = processor.apply_chat_template(conversation=gpt_conv, padding=True, return_tensors="pt")
70
- model = AutoModel.from_pretrained(model_path, trust_remote_code=True, device_map="auto")
71
  output_ids = model.generate(
72
  input_ids=inputs.input_ids,
73
- media={
74
- "image": inputs.image,
75
- },
76
- media_config={
77
- "image": {}
78
- },
79
  generation_config=model.generation_config,
80
  max_new_tokens=256,
81
  )
82
- print(processor.tokenizer.decode(output_ids[0], skip_special_tokens=True))
83
 
84
  ##### the above code is equivalent to
85
  # response = model.generate_content([
@@ -89,6 +91,54 @@ print(processor.tokenizer.decode(output_ids[0], skip_special_tokens=True))
89
  # print(colored(response, "cyan", attrs=["bold"]))
90
  ```
91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  ## Model Convert
93
 
94
  The follwing code converts a convetional NVILA model to a HF compatible model.
 
52
 
53
  we also support `AutoProcessor` class to ease data preparation for training and finetuning.
54
 
55
+
56
+ ### single call
57
+
58
  ```python
59
  from transformers import AutoProcessor, AutoModel
60
 
61
  model_path = "Efficient-Large-Model/NVILA-Lite-2B-hf-preview"
62
  processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
63
+ model = AutoModel.from_pretrained(model_path, trust_remote_code=True, device_map="auto")
64
+ # important: set model to eval mode, otherwise the model will be in training mode and will pad to right.
65
+ model.eval()
66
 
67
+ gpt_conv = [{
68
  "role": "user",
69
  "content": [
70
  {"type": "image", "path": "demo_images/demo_img_1.png"},
71
  {"type": "text", "text": "Describe this image."}
72
  ]
73
  }]
74
+ text = processor.apply_chat_template(gpt_conv, tokenize=False, add_generation_prompt=True)
75
+ inputs = processor([text])
76
 
 
 
77
  output_ids = model.generate(
78
  input_ids=inputs.input_ids,
79
+ media=inputs.media,
80
+ media_config=inputs.media_config,
 
 
 
 
81
  generation_config=model.generation_config,
82
  max_new_tokens=256,
83
  )
84
+ print(processor.tokenizer.batch_decode(output_ids, skip_special_tokens=True))
85
 
86
  ##### the above code is equivalent to
87
  # response = model.generate_content([
 
91
  # print(colored(response, "cyan", attrs=["bold"]))
92
  ```
93
 
94
+ ### batch call
95
+
96
+ ```python
97
+ from transformers import AutoProcessor, AutoModel
98
+
99
+ model_path = "Efficient-Large-Model/NVILA-Lite-2B-hf-preview"
100
+ model_path = "./NVILA-Lite-2B-hf-preview"
101
+ processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
102
+ model = AutoModel.from_pretrained(model_path, trust_remote_code=True, device_map="auto")
103
+ # important: set model to eval mode, otherwise the model will be in training mode and will pad to right.
104
+ model.eval()
105
+
106
+ gpt_conv1 = [{
107
+ "role": "user",
108
+ "content": [
109
+ {"type": "image", "path": "demo_images/demo_img_1.png"},
110
+ {"type": "text", "text": "Describe this image."}
111
+ ]
112
+ }]
113
+ gpt_conv2 = [{
114
+ "role": "user",
115
+ "content": [
116
+ {"type": "image", "path": "demo_images/demo_img_2.png"},
117
+ {"type": "text", "text": "Describe this image for me. Provide a detailed description of the image."}
118
+ ]
119
+ }]
120
+
121
+ messages = [gpt_conv1, gpt_conv2]
122
+ texts = [
123
+ processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True)
124
+ for msg in messages
125
+ ]
126
+ inputs = processor(texts)
127
+
128
+ output_ids = model.generate(
129
+ input_ids=inputs.input_ids,
130
+ media=inputs.media,
131
+ media_config=inputs.media_config,
132
+ generation_config=model.generation_config,
133
+ max_new_tokens=256,
134
+ )
135
+ output_texts = processor.tokenizer.batch_decode(output_ids, skip_special_tokens=True)
136
+ print(output_texts[0])
137
+ print("---" * 40)
138
+ print(output_texts[1])
139
+ ```
140
+
141
+
142
  ## Model Convert
143
 
144
  The follwing code converts a convetional NVILA model to a HF compatible model.
__init__.py ADDED
File without changes
auto_processor.py CHANGED
@@ -1,8 +1,11 @@
 
1
  import os
2
  import os.path as osp
 
3
  from collections import defaultdict
4
  from typing import List, Union
5
 
 
6
  from transformers import AutoConfig, AutoImageProcessor, AutoModel, AutoProcessor, AutoTokenizer
7
  from transformers.feature_extraction_utils import BatchFeature
8
  from transformers.image_utils import ImageInput, VideoInput
@@ -16,6 +19,26 @@ from .mm_utils import process_image, process_images
16
  from .tokenizer_utils import tokenize_conversation
17
 
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  class VILAProcessorKwargs(ProcessingKwargs, total=False):
20
  _defaults = {
21
  "text_kwargs": {
@@ -40,6 +63,7 @@ class VILAProcessor(ProcessorMixin):
40
  self.config = config
41
  self.image_processor = image_processor
42
  self.tokenizer = tokenizer
 
43
  super().__init__(image_processor, tokenizer, chat_template=chat_template)
44
 
45
  @classmethod
@@ -48,7 +72,7 @@ class VILAProcessor(ProcessorMixin):
48
  pretrained_model_name_or_path = pretrained_model_name_or_path
49
  else:
50
  print(f"pretrained_model_name_or_path {pretrained_model_name_or_path} is not a directory, downloading")
51
- from huggingface_hub import HfApi, snapshot_download
52
 
53
  pretrained_model_name_or_path = snapshot_download(pretrained_model_name_or_path)
54
 
@@ -59,7 +83,6 @@ class VILAProcessor(ProcessorMixin):
59
  osp.join(pretrained_model_name_or_path, "llm"), trust_remote_code=True
60
  )
61
  config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=True)
62
-
63
  return cls(image_processor=image_processor, tokenizer=tokenizer, config=config)
64
 
65
  def __repr__(self):
@@ -74,9 +97,45 @@ class VILAProcessor(ProcessorMixin):
74
  text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
75
  videos: VideoInput = None,
76
  **kwargs: Unpack[VILAProcessorKwargs],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  ) -> BatchFeature:
78
  # TODO: should be merged with llava_arch.py/generate_content()
79
  # TODO (extract and preprocess should be done together, as the preprocess of image and video can be different, i.e. when dynamic res is used)
 
80
  media = extract_media(conversation, self.config)
81
  # Process media
82
  media_config = defaultdict(dict)
@@ -107,11 +166,9 @@ class VILAProcessor(ProcessorMixin):
107
  ]
108
  else:
109
  raise ValueError(f"Unsupported media type: {name}")
110
-
111
  input_ids = tokenize_conversation(conversation, self.tokenizer, add_generation_prompt=True).cuda().unsqueeze(0)
112
  # Set up the generation config
113
- # print(input_ids.shape); print(media); input()
114
- return BatchFeature(data={"input_ids": input_ids, **media})
115
 
116
  def batch_decode(self, *args, **kwargs):
117
  """
@@ -152,7 +209,6 @@ class VILAProcessor(ProcessorMixin):
152
  # inputs = processor(conversation=llavaconv, padding=True, return_tensors="pt")
153
  def apply_chat_template(self, conversation, add_generation_prompt=True, **kwargs):
154
  vila_conv = []
155
-
156
  for chat in conversation:
157
  vila_chat = {"from": "", "value": []}
158
  if chat["role"] == "user":
@@ -172,7 +228,8 @@ class VILAProcessor(ProcessorMixin):
172
  vila_chat["value"].append(content["text"])
173
  vila_conv.append(vila_chat)
174
 
175
- return self(vila_conv)
 
176
 
177
 
178
  if __name__ == "__main__":
 
1
+ import copy
2
  import os
3
  import os.path as osp
4
+ import warnings
5
  from collections import defaultdict
6
  from typing import List, Union
7
 
8
+ import torch
9
  from transformers import AutoConfig, AutoImageProcessor, AutoModel, AutoProcessor, AutoTokenizer
10
  from transformers.feature_extraction_utils import BatchFeature
11
  from transformers.image_utils import ImageInput, VideoInput
 
19
  from .tokenizer_utils import tokenize_conversation
20
 
21
 
22
+ def vila_pad_fn(input_ids_list, padding_value=0, target_len=None, padding_side="left"):
23
+ # tensor shape is (batch_size, seq_len)
24
+ max_len = max([ids.shape[1] for ids in input_ids_list])
25
+ if target_len is not None:
26
+ assert target_len >= max_len, "target_len must be greater than or equal to max_len"
27
+ max_len = target_len
28
+
29
+ new_input_ids_list = []
30
+ for i, input_ids in enumerate(input_ids_list):
31
+ pad_tensor = torch.ones_like(input_ids) * padding_value
32
+ curr_len = input_ids.shape[1]
33
+ pad_tensor = pad_tensor[:, : max_len - curr_len]
34
+ if padding_side == "right":
35
+ input_ids = torch.cat((input_ids, pad_tensor), dim=1)
36
+ else:
37
+ input_ids = torch.cat((pad_tensor, input_ids), dim=1)
38
+ new_input_ids_list.append(input_ids)
39
+ return torch.cat(new_input_ids_list, dim=0)
40
+
41
+
42
  class VILAProcessorKwargs(ProcessingKwargs, total=False):
43
  _defaults = {
44
  "text_kwargs": {
 
63
  self.config = config
64
  self.image_processor = image_processor
65
  self.tokenizer = tokenizer
66
+
67
  super().__init__(image_processor, tokenizer, chat_template=chat_template)
68
 
69
  @classmethod
 
72
  pretrained_model_name_or_path = pretrained_model_name_or_path
73
  else:
74
  print(f"pretrained_model_name_or_path {pretrained_model_name_or_path} is not a directory, downloading")
75
+ from huggingface_hub import snapshot_download
76
 
77
  pretrained_model_name_or_path = snapshot_download(pretrained_model_name_or_path)
78
 
 
83
  osp.join(pretrained_model_name_or_path, "llm"), trust_remote_code=True
84
  )
85
  config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=True)
 
86
  return cls(image_processor=image_processor, tokenizer=tokenizer, config=config)
87
 
88
  def __repr__(self):
 
97
  text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
98
  videos: VideoInput = None,
99
  **kwargs: Unpack[VILAProcessorKwargs],
100
+ ) -> BatchFeature:
101
+ if images is not None:
102
+ warnings.warn("images is not supported in __call__")
103
+
104
+ input_ids = []
105
+ media = defaultdict(list)
106
+ media_config = defaultdict(dict)
107
+ for conv in conversation:
108
+ feat = self.__single_call__(conv, images, text, videos, **kwargs)
109
+ input_ids.append(feat.input_ids)
110
+ for name in feat.media:
111
+ media[name] += feat.media[name]
112
+ for name in feat.media_config:
113
+ media_config[name].update(feat.media_config[name])
114
+
115
+ return BatchFeature(
116
+ data={
117
+ # "input_ids": torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True, padding_value=self.pad_token_id),
118
+ "input_ids": vila_pad_fn(
119
+ input_ids,
120
+ padding_value=self.tokenizer.pad_token_id,
121
+ padding_side="left",
122
+ ),
123
+ "media": media,
124
+ "media_config": media_config,
125
+ }
126
+ )
127
+
128
+ def __single_call__(
129
+ self,
130
+ conversation,
131
+ images: ImageInput = None,
132
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
133
+ videos: VideoInput = None,
134
+ **kwargs: Unpack[VILAProcessorKwargs],
135
  ) -> BatchFeature:
136
  # TODO: should be merged with llava_arch.py/generate_content()
137
  # TODO (extract and preprocess should be done together, as the preprocess of image and video can be different, i.e. when dynamic res is used)
138
+ conversation = copy.deepcopy(conversation)
139
  media = extract_media(conversation, self.config)
140
  # Process media
141
  media_config = defaultdict(dict)
 
166
  ]
167
  else:
168
  raise ValueError(f"Unsupported media type: {name}")
 
169
  input_ids = tokenize_conversation(conversation, self.tokenizer, add_generation_prompt=True).cuda().unsqueeze(0)
170
  # Set up the generation config
171
+ return BatchFeature(data={"input_ids": input_ids, "media": media, "media_config": media_config})
 
172
 
173
  def batch_decode(self, *args, **kwargs):
174
  """
 
209
  # inputs = processor(conversation=llavaconv, padding=True, return_tensors="pt")
210
  def apply_chat_template(self, conversation, add_generation_prompt=True, **kwargs):
211
  vila_conv = []
 
212
  for chat in conversation:
213
  vila_chat = {"from": "", "value": []}
214
  if chat["role"] == "user":
 
228
  vila_chat["value"].append(content["text"])
229
  vila_conv.append(vila_chat)
230
 
231
+ # return self(vila_conv)
232
+ return vila_conv
233
 
234
 
235
  if __name__ == "__main__":
config.json CHANGED
@@ -1,19 +1,26 @@
1
  {
2
  "_attn_implementation_autoset": true,
3
- "_name_or_path": "runs/train/qwen25_2B_3x3-sft-20241118122815/model",
4
  "architectures": [
5
  "VILAForCasualLM"
6
  ],
 
 
 
 
 
 
7
  "chat_template": null,
8
  "drop_path_rate": 0.0,
9
  "dynamic_s2": false,
10
  "fps": 0.0,
11
  "hidden_size": 1536,
12
  "image_aspect_ratio": "dynamic",
 
13
  "interpolate_mode": "linear",
14
  "llm_cfg": {
15
  "_attn_implementation_autoset": false,
16
- "_name_or_path": "runs/train/qwen25_2B_3x3-sft-20241118122815/model/llm",
17
  "add_cross_attention": false,
18
  "architectures": [
19
  "Qwen2ForCausalLM"
@@ -53,7 +60,7 @@
53
  "max_position_embeddings": 32768,
54
  "max_window_layers": 28,
55
  "min_length": 0,
56
- "model_max_length": 4096,
57
  "model_type": "qwen2",
58
  "no_repeat_ngram_size": 0,
59
  "num_attention_heads": 12,
@@ -89,18 +96,20 @@
89
  "tokenizer_padding_side": "right",
90
  "top_k": 50,
91
  "top_p": 1.0,
92
- "torch_dtype": "bfloat16",
93
  "torchscript": false,
94
  "typical_p": 1.0,
95
  "use_bfloat16": false,
96
  "use_cache": true,
97
  "use_sliding_window": false,
98
- "vocab_size": 151648
99
  },
 
 
100
  "mm_hidden_size": 1152,
101
  "mm_projector_cfg": {
102
  "_attn_implementation_autoset": false,
103
- "_name_or_path": "runs/train/qwen25_2B_3x3-sft-20241118122815/model/mm_projector",
104
  "add_cross_attention": false,
105
  "architectures": [
106
  "MultimodalProjector"
@@ -160,7 +169,7 @@
160
  "tokenizer_class": null,
161
  "top_k": 50,
162
  "top_p": 1.0,
163
- "torch_dtype": "bfloat16",
164
  "torchscript": false,
165
  "typical_p": 1.0,
166
  "use_bfloat16": false
@@ -186,10 +195,12 @@
186
  "tune_language_model": true,
187
  "tune_mm_projector": true,
188
  "tune_vision_tower": true,
 
 
189
  "vision_resolution": -1,
190
  "vision_tower_cfg": {
191
  "_attn_implementation_autoset": false,
192
- "_name_or_path": "runs/train/qwen25_2B_3x3-sft-20241118122815/model/vision_tower",
193
  "add_cross_attention": false,
194
  "architectures": [
195
  "SiglipVisionModel"
@@ -261,17 +272,11 @@
261
  "tokenizer_class": null,
262
  "top_k": 50,
263
  "top_p": 1.0,
264
- "torch_dtype": "bfloat16",
265
  "torchscript": false,
266
  "typical_p": 1.0,
267
  "use_bfloat16": false,
268
  "vision_use_head": false
269
  },
270
- "version": "2.0",
271
- "auto_map": {
272
- "AutoProcessor": "auto_processor.VILAProcessor",
273
- "AutoConfig": "modeling_vila.VILAConfig",
274
- "AutoModel": "modeling_vila.VILAForCasualLM",
275
- "AutoModelForCausalLM": "modeling_vila.VILAForCasualLM"
276
- }
277
- }
 
1
  {
2
  "_attn_implementation_autoset": true,
3
+ "_name_or_path": "NVILA-Lite-2B-hf-preview",
4
  "architectures": [
5
  "VILAForCasualLM"
6
  ],
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_vila.VILAConfig",
9
+ "AutoModel": "modeling_vila.VILAForCasualLM",
10
+ "AutoModelForCausalLM": "modeling_vila.VILAForCasualLM",
11
+ "AutoProcessor": "auto_processor.VILAProcessor"
12
+ },
13
  "chat_template": null,
14
  "drop_path_rate": 0.0,
15
  "dynamic_s2": false,
16
  "fps": 0.0,
17
  "hidden_size": 1536,
18
  "image_aspect_ratio": "dynamic",
19
+ "image_encoder": "{\"_target_\": \"llava.model.encoders.BasicImageEncoder\"}",
20
  "interpolate_mode": "linear",
21
  "llm_cfg": {
22
  "_attn_implementation_autoset": false,
23
+ "_name_or_path": "NVILA-Lite-2B-hf-preview/llm",
24
  "add_cross_attention": false,
25
  "architectures": [
26
  "Qwen2ForCausalLM"
 
60
  "max_position_embeddings": 32768,
61
  "max_window_layers": 28,
62
  "min_length": 0,
63
+ "model_max_length": null,
64
  "model_type": "qwen2",
65
  "no_repeat_ngram_size": 0,
66
  "num_attention_heads": 12,
 
96
  "tokenizer_padding_side": "right",
97
  "top_k": 50,
98
  "top_p": 1.0,
99
+ "torch_dtype": "float16",
100
  "torchscript": false,
101
  "typical_p": 1.0,
102
  "use_bfloat16": false,
103
  "use_cache": true,
104
  "use_sliding_window": false,
105
+ "vocab_size": 151651
106
  },
107
+ "max_tiles": 12,
108
+ "min_tiles": 1,
109
  "mm_hidden_size": 1152,
110
  "mm_projector_cfg": {
111
  "_attn_implementation_autoset": false,
112
+ "_name_or_path": "NVILA-Lite-2B-hf-preview/mm_projector",
113
  "add_cross_attention": false,
114
  "architectures": [
115
  "MultimodalProjector"
 
169
  "tokenizer_class": null,
170
  "top_k": 50,
171
  "top_p": 1.0,
172
+ "torch_dtype": "float16",
173
  "torchscript": false,
174
  "typical_p": 1.0,
175
  "use_bfloat16": false
 
195
  "tune_language_model": true,
196
  "tune_mm_projector": true,
197
  "tune_vision_tower": true,
198
+ "version": "2.0",
199
+ "video_encoder": "{\"_target_\": \"llava.model.encoders.BasicVideoEncoder\"}",
200
  "vision_resolution": -1,
201
  "vision_tower_cfg": {
202
  "_attn_implementation_autoset": false,
203
+ "_name_or_path": "NVILA-Lite-2B-hf-preview/vision_tower",
204
  "add_cross_attention": false,
205
  "architectures": [
206
  "SiglipVisionModel"
 
272
  "tokenizer_class": null,
273
  "top_k": 50,
274
  "top_p": 1.0,
275
+ "torch_dtype": "float16",
276
  "torchscript": false,
277
  "typical_p": 1.0,
278
  "use_bfloat16": false,
279
  "vision_use_head": false
280
  },
281
+ "vision_tower_lr": null
282
+ }
 
 
 
 
 
 
distributed.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import warnings
3
+ from typing import Any, List, Optional
4
+
5
+ from torch import distributed as dist
6
+
7
+ __all__ = [
8
+ "init",
9
+ "is_initialized",
10
+ "size",
11
+ "rank",
12
+ "local_size",
13
+ "local_rank",
14
+ "is_main",
15
+ "barrier",
16
+ "gather",
17
+ "all_gather",
18
+ ]
19
+
20
+
21
+ def init() -> None:
22
+ if "RANK" not in os.environ:
23
+ warnings.warn("Environment variable `RANK` is not set. Skipping distributed initialization.")
24
+ return
25
+ dist.init_process_group(backend="nccl", init_method="env://")
26
+
27
+
28
+ def is_initialized() -> bool:
29
+ return dist.is_initialized()
30
+
31
+
32
+ def size() -> int:
33
+ return int(os.environ.get("WORLD_SIZE", 1))
34
+
35
+
36
+ def rank() -> int:
37
+ return int(os.environ.get("RANK", 0))
38
+
39
+
40
+ def local_size() -> int:
41
+ return int(os.environ.get("LOCAL_WORLD_SIZE", 1))
42
+
43
+
44
+ def local_rank() -> int:
45
+ return int(os.environ.get("LOCAL_RANK", 0))
46
+
47
+
48
+ def is_main() -> bool:
49
+ return rank() == 0
50
+
51
+
52
+ def barrier() -> None:
53
+ dist.barrier()
54
+
55
+
56
+ def gather(obj: Any, dst: int = 0) -> Optional[List[Any]]:
57
+ if not is_initialized():
58
+ return [obj]
59
+ if is_main():
60
+ objs = [None for _ in range(size())]
61
+ dist.gather_object(obj, objs, dst=dst)
62
+ return objs
63
+ else:
64
+ dist.gather_object(obj, dst=dst)
65
+ return None
66
+
67
+
68
+ def all_gather(obj: Any) -> List[Any]:
69
+ if not is_initialized():
70
+ return [obj]
71
+ objs = [None for _ in range(size())]
72
+ dist.all_gather_object(objs, obj)
73
+ return objs
llm/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "runs/train/qwen25_2B_3x3-sft-20241118122815/model/llm",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
@@ -12,7 +12,7 @@
12
  "intermediate_size": 8960,
13
  "max_position_embeddings": 32768,
14
  "max_window_layers": 28,
15
- "model_max_length": 4096,
16
  "model_type": "qwen2",
17
  "num_attention_heads": 12,
18
  "num_hidden_layers": 28,
@@ -24,9 +24,9 @@
24
  "tie_word_embeddings": true,
25
  "tokenizer_model_max_length": 4096,
26
  "tokenizer_padding_side": "right",
27
- "torch_dtype": "bfloat16",
28
  "transformers_version": "4.46.0",
29
  "use_cache": true,
30
  "use_sliding_window": false,
31
- "vocab_size": 151648
32
  }
 
1
  {
2
+ "_name_or_path": "NVILA-Lite-2B-hf-preview/llm",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
 
12
  "intermediate_size": 8960,
13
  "max_position_embeddings": 32768,
14
  "max_window_layers": 28,
15
+ "model_max_length": null,
16
  "model_type": "qwen2",
17
  "num_attention_heads": 12,
18
  "num_hidden_layers": 28,
 
24
  "tie_word_embeddings": true,
25
  "tokenizer_model_max_length": 4096,
26
  "tokenizer_padding_side": "right",
27
+ "torch_dtype": "float16",
28
  "transformers_version": "4.46.0",
29
  "use_cache": true,
30
  "use_sliding_window": false,
31
+ "vocab_size": 151651
32
  }
llm/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe90977102f7c2ffc3c0c9fff9cb6bad16937a2e93c49a0a41976fc2a50dd077
3
- size 3086582408
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e025b53d85054b09b0dc887b14a07d8e1faeda7430842fbf6940f1c1177adf3a
3
+ size 3086591288
loss.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union
2
+
3
+ import torch
4
+ from torch.nn.functional import cross_entropy
5
+
6
+ from .constants import IGNORE_INDEX
7
+
8
+ __all__ = ["soft_cross_entropy"]
9
+
10
+
11
+ def soft_cross_entropy(
12
+ outputs: torch.Tensor,
13
+ targets: torch.Tensor,
14
+ soft_tokens: Union[torch.Tensor, List[int]],
15
+ std: float = 1,
16
+ ignore_index: int = IGNORE_INDEX,
17
+ ) -> torch.Tensor:
18
+ # Remove last token from outputs and first token from targets
19
+ outputs = outputs[..., :-1, :].contiguous()
20
+ targets = targets[..., 1:].contiguous()
21
+
22
+ # Flatten outputs and targets
23
+ targets = targets.view(-1)
24
+ outputs = outputs.view(targets.size(0), -1)
25
+
26
+ # Remove outputs and targets with ignore_index
27
+ indices = targets != ignore_index
28
+ outputs = outputs[indices]
29
+ targets = targets[indices]
30
+
31
+ # Convert soft token IDs to tensor
32
+ if isinstance(soft_tokens, list):
33
+ soft_tokens = torch.tensor(soft_tokens).to(targets)
34
+
35
+ # Calculate loss for non-soft tokens
36
+ indices = torch.isin(targets, soft_tokens, invert=True)
37
+ loss = cross_entropy(outputs[indices], targets[indices], reduction="sum")
38
+
39
+ # Calculate loss for soft tokens
40
+ indices = torch.isin(targets, soft_tokens)
41
+ targets_indices = torch.zeros_like(outputs[indices])
42
+ for k, target in enumerate(targets[indices]):
43
+ dist = torch.exp(-((target - soft_tokens) ** 2) / (2 * std**2))
44
+ targets_indices[k][soft_tokens] = dist / dist.sum()
45
+ loss += cross_entropy(outputs[indices], targets_indices, reduction="sum")
46
+
47
+ # Return average loss
48
+ return loss / targets.size(0)
mm_projector/config.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
- "_name_or_path": "runs/train/qwen25_2B_3x3-sft-20241118122815/model/mm_projector",
3
  "architectures": [
4
  "MultimodalProjector"
5
  ],
6
  "mm_projector_type": "mlp_downsample_3x3_fix",
7
  "model_type": "v2l_projector",
8
- "torch_dtype": "bfloat16",
9
  "transformers_version": "4.46.0"
10
  }
 
1
  {
2
+ "_name_or_path": "NVILA-Lite-2B-hf-preview/mm_projector",
3
  "architectures": [
4
  "MultimodalProjector"
5
  ],
6
  "mm_projector_type": "mlp_downsample_3x3_fix",
7
  "model_type": "v2l_projector",
8
+ "torch_dtype": "float16",
9
  "transformers_version": "4.46.0"
10
  }
mm_projector/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:191ffde694a269d7b0ca2f1e30da5d5fecf2a9bb8f4879fbf0b780368c6a9cc4
3
- size 87068272
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a884468cfc9b3ff37715e3bdb1bbea378fba5eb7a1b883e2958a73313eb5d35
3
+ size 87068256
model_utils_packing.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from importlib import import_module
2
+ from typing import Tuple
3
+
4
+ import torch
5
+ import transformers
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+
9
+ __all__ = ["patch"]
10
+
11
+
12
+ def _get_unpad_data(attention_mask: torch.Tensor, *args, **kwargs) -> Tuple[torch.Tensor, torch.Tensor, int]:
13
+ if hasattr(_get_unpad_data, "seqlens_in_batch"):
14
+ seqlens_in_batch = _get_unpad_data.seqlens_in_batch
15
+ else:
16
+ seqlens_in_batch = torch.sum(attention_mask, dim=1)
17
+
18
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
19
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
20
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
21
+ return indices, cu_seqlens, max_seqlen_in_batch
22
+
23
+
24
+ def set_seqlens_in_batch(seqlens_in_batch: torch.Tensor) -> None:
25
+ _get_unpad_data.seqlens_in_batch = seqlens_in_batch
26
+
27
+
28
+ def patch(model: nn.Module) -> None:
29
+ if transformers.__version__ < "4.43.0":
30
+ m = import_module(model.__module__)
31
+ if not hasattr(m, "_get_unpad_data"):
32
+ raise ValueError(f"Module {m} does not have function '_get_unpad_data' for packing")
33
+ m._get_unpad_data = _get_unpad_data
34
+ else:
35
+ transformers.modeling_flash_attention_utils._get_unpad_data = _get_unpad_data
modeling_vila.py CHANGED
@@ -44,15 +44,21 @@ from .builder import build_llm_and_tokenizer
44
  from .configuration_vila import VILAConfig
45
  from .constants import *
46
  from .conversation import SeparatorStyle, default_conversation
 
 
47
  from .media import extract_media
48
  from .media_encoder import BasicImageEncoder, BasicVideoEncoder
49
  from .mm_utils import process_image, process_images
 
50
  from .siglip_encoder import SiglipVisionTower, SiglipVisionTowerDynamicS2, SiglipVisionTowerS2
51
  from .tokenizer_utils import tokenize_conversation
52
  from .utils import get_model_config, load_tokenizer_then_handle_media_tokens_and_chat_template
53
 
54
-
55
  # from llava.constants import DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, NUM_EXTRA_TOKENS
 
 
 
 
56
  # quick hack for remote code
57
  def get_pg_manager():
58
  return None
@@ -97,28 +103,29 @@ def get_vila_version(model_path: str) -> str:
97
 
98
  def generate_jinja_template(conv_mode: str) -> str:
99
  if conv_mode == "vicuna_v1":
100
- return """{% set system_prompt = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions." %}
101
- {% set roles = ["USER", "ASSISTANT"] %}
102
  {% set sep = " " %}
103
- {% set sep2 = "</s>" %}
104
 
105
  {{ system_prompt }}
106
 
107
  {% for message in messages %}
108
  {% if message['role'] == roles[0] %}
109
- {{ roles[0] }}{{ sep }}{{ message['content'] }}{{ sep2 }}
110
  {% else %}
111
- {{ roles[1] }}{{ sep }}{{ message['content'] }}{{ sep2 }}
112
  {% endif %}
113
- {% endfor %}"""
 
 
 
 
114
  elif conv_mode == "llama_3":
115
- return """{% set system_prompt = "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language." %}
116
- {% set roles = ["<|start_header_id|>user<|end_header_id|>\n\n", "<|start_header_id|>assistant<|end_header_id|>\n\n"] %}
117
  {% set sep = "<|eot_id|>" %}
118
- {% set sep2 = "<|end_of_text|>" %}
119
 
120
  {{ system_prompt }}
121
-
122
  {% for message in messages %}
123
  {% if message['role'] == 'user' %}
124
  {{ roles[0] }}{{ message['content'] }}{{ sep }}
@@ -126,8 +133,10 @@ def generate_jinja_template(conv_mode: str) -> str:
126
  {{ roles[1] }}{{ message['content'] }}{{ sep }}
127
  {% endif %}
128
  {% endfor %}
129
-
130
- {{ sep2 }}"""
 
 
131
  elif conv_mode == "hermes_2":
132
  return """{% set system_prompt = "<|im_start|>system\nAnswer the questions." %}
133
  {% set roles = ["<|im_start|>user\n", "<|im_start|>assistant\n"] %}
@@ -202,6 +211,7 @@ class VILAPretrainedModel(PreTrainedModel):
202
  # set device_map auto can autoamtically shard llm to different devices
203
  self.llm, self.tokenizer = self.init_llm(llm_cfg, config, device_map=device_map)
204
 
 
205
  self.encoders = {"image": BasicImageEncoder(self), "video": BasicVideoEncoder(self)}
206
 
207
  self.post_config()
@@ -218,18 +228,20 @@ class VILAPretrainedModel(PreTrainedModel):
218
  output_dir: str = None,
219
  vila_version: str | None = None,
220
  conv_mode: str | None = None,
221
- copy: bool = True,
 
 
222
  *model_args,
223
  **kwargs,
224
  ):
225
  # assert type(self) == VILAForCasualLM, "This method is only available for VILAForCasualLM."
226
- from huggingface_hub import HfApi, snapshot_download
227
-
228
  if os.path.isdir(model_path):
229
  model_path = model_path
230
  else:
231
- api = HfApi()
232
- model_path = snapshot_download(model_path, local_dir=output_dir)
 
233
  print("downloading HF model to", model_path)
234
 
235
  if check_dot_in_model_path(model_path) and output_dir is None:
@@ -240,10 +252,18 @@ class VILAPretrainedModel(PreTrainedModel):
240
  raise ValueError(
241
  f"Output directory {output_dir} contains a dot, which will affect the remote code loading. Please specify a valid output directory without dots."
242
  )
 
 
 
 
 
 
 
 
243
  if vila_version is None:
244
- vila_version = get_vila_version(model_path)
245
 
246
- cfg_path = os.path.join(model_path, "config.json")
247
  config = json.load(open(cfg_path))
248
  config["version"] = "2.0" # nvila tag
249
  config["architectures"] = ["VILAForCasualLM"]
@@ -253,24 +273,53 @@ class VILAPretrainedModel(PreTrainedModel):
253
  "AutoModel": "modeling_vila.VILAForCasualLM",
254
  "AutoModelForCausalLM": "modeling_vila.VILAForCasualLM",
255
  }
 
256
  config["model_type"] = "vila"
257
  if vila_version in ["vila1.5", "vila-m3"]:
258
  if conv_mode is None:
259
- raise ValueError(f"Please specify the conversation mode for {model_path}.")
260
  config["chat_template"] = conv_mode
261
  jinja_template = generate_jinja_template(conv_mode)
262
- jinja_path = os.path.join(model_path, f"{conv_mode}.jinja")
263
  with open(jinja_path, "w") as f:
264
  f.write(jinja_template)
265
  json.dump(config, open(cfg_path, "w"), indent=2)
266
- self.copy_remote_py_files(model_path, copy=copy)
267
 
268
  ##########################################################################################
269
- config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
270
- tokenizer = load_tokenizer_then_handle_media_tokens_and_chat_template(model_path, config)
271
  tokenizer.save_pretrained(osp.join(output_dir, "llm"))
272
  ##########################################################################################
273
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274
  @classmethod
275
  def copy_remote_py_files(cls, output_dir, copy=True):
276
  ## copy .py and REAMDE for next loading remote code
@@ -539,6 +588,15 @@ class VILAForCasualLM(VILAPretrainedModel):
539
  image_features = self.get_mm_projector()(image_features)
540
  return image_features
541
 
 
 
 
 
 
 
 
 
 
542
  def _embed(
543
  self,
544
  input_ids: torch.Tensor,
@@ -547,18 +605,25 @@ class VILAForCasualLM(VILAPretrainedModel):
547
  labels: Optional[torch.Tensor],
548
  attention_mask: Optional[torch.Tensor],
549
  ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
 
 
 
 
550
  labels = labels if labels is not None else torch.full_like(input_ids, IGNORE_INDEX)
551
  attention_mask = attention_mask if attention_mask is not None else torch.ones_like(input_ids, dtype=torch.bool)
552
 
553
- # PROCESS_GROUP_MANAGER = get_pg_manager()
554
- PROCESS_GROUP_MANAGER = None
555
  if PROCESS_GROUP_MANAGER is not None:
556
  for name in media:
557
  self.encoders[name].end_tokens = None
558
 
559
  # Extract text and media embeddings
560
  text_embeds = self.llm.model.embed_tokens(input_ids)
561
- media_embeds = self.__embed_media_tokens(media, media_config)
 
 
 
 
562
 
563
  # This is a workaround to make sure the dummy embeddings are consumed
564
  while media_embeds.get("dummy"):
@@ -586,12 +651,20 @@ class VILAForCasualLM(VILAPretrainedModel):
586
  name = media_tokens[input_ids[k][pos].item()]
587
  input = media_embeds[name].popleft()
588
  label = torch.full([input.shape[0]], IGNORE_INDEX, device=labels[k].device, dtype=labels[k].dtype)
 
 
 
 
 
 
589
  else:
590
  end = pos
591
  while end < len(labels[k]) and input_ids[k][end].item() not in media_tokens:
592
  end += 1
593
  input = text_embeds[k][pos:end]
594
  label = labels[k][pos:end]
 
 
595
  inputs_mk.append(input)
596
  labels_mk.append(label)
597
  pos = end
@@ -602,7 +675,7 @@ class VILAForCasualLM(VILAPretrainedModel):
602
  # Check if all media embeddings are consumed
603
  for name in media_embeds:
604
  if media_embeds[name]:
605
- raise ValueError(f"Not all {name} embeddings are consumed!")
606
 
607
  # Truncate sequences to `model_max_length` as media embeddings are inserted
608
  inputs, labels = self.__truncate_sequence(inputs, labels)
@@ -620,7 +693,7 @@ class VILAForCasualLM(VILAPretrainedModel):
620
  if self.training:
621
  # Gather metainfo of media objects from all ranks
622
  info = [{"shape": tensor.shape, "dtype": tensor.dtype} for tensor in media.get(name, [])]
623
- infos = list(chain(*distributed.all_gather(info)))
624
 
625
  # The entire batch does not contain any media objects of this type.
626
  if not infos:
@@ -1011,6 +1084,10 @@ class VILAForCasualLM(VILAPretrainedModel):
1011
  attention_mask: Optional[torch.LongTensor] = None,
1012
  **generation_kwargs,
1013
  ):
 
 
 
 
1014
  inputs_embeds, _, attention_mask = self._embed(input_ids, media, media_config, None, attention_mask)
1015
  return self.llm.generate(inputs_embeds=inputs_embeds, attention_mask=attention_mask, **generation_kwargs)
1016
 
 
44
  from .configuration_vila import VILAConfig
45
  from .constants import *
46
  from .conversation import SeparatorStyle, default_conversation
47
+ from .distributed import all_gather as vila_all_gather
48
+ from .loss import soft_cross_entropy
49
  from .media import extract_media
50
  from .media_encoder import BasicImageEncoder, BasicVideoEncoder
51
  from .mm_utils import process_image, process_images
52
+ from .model_utils_packing import set_seqlens_in_batch
53
  from .siglip_encoder import SiglipVisionTower, SiglipVisionTowerDynamicS2, SiglipVisionTowerS2
54
  from .tokenizer_utils import tokenize_conversation
55
  from .utils import get_model_config, load_tokenizer_then_handle_media_tokens_and_chat_template
56
 
 
57
  # from llava.constants import DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, NUM_EXTRA_TOKENS
58
+
59
+ # ease debugging
60
+ python_input = input
61
+
62
  # quick hack for remote code
63
  def get_pg_manager():
64
  return None
 
103
 
104
  def generate_jinja_template(conv_mode: str) -> str:
105
  if conv_mode == "vicuna_v1":
106
+ return """{% set system_prompt = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. " %}
107
+ {% set roles = ["user", "assistant"] %}
108
  {% set sep = " " %}
 
109
 
110
  {{ system_prompt }}
111
 
112
  {% for message in messages %}
113
  {% if message['role'] == roles[0] %}
114
+ {{ "USER: " }}{{ sep }}{{ message['content'] }}{{ sep }}
115
  {% else %}
116
+ {{ "ASSISTANT: " }}{{ sep }}{{ message['content'] }}{{ sep }}
117
  {% endif %}
118
+ {% endfor %}
119
+ {% if messages[-1]['role'] == 'user' %}
120
+ {{ "ASSISTANT:" }}
121
+ {% endif %}
122
+ """
123
  elif conv_mode == "llama_3":
124
+ return """{% set system_prompt = "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\\n\\nYou are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.<|eot_id|>" %}
125
+ {% set roles = ["<|start_header_id|>user<|end_header_id|>\\n\\n", "<|start_header_id|>assistant<|end_header_id|>\\n\\n"]%}
126
  {% set sep = "<|eot_id|>" %}
 
127
 
128
  {{ system_prompt }}
 
129
  {% for message in messages %}
130
  {% if message['role'] == 'user' %}
131
  {{ roles[0] }}{{ message['content'] }}{{ sep }}
 
133
  {{ roles[1] }}{{ message['content'] }}{{ sep }}
134
  {% endif %}
135
  {% endfor %}
136
+ {% if messages[-1]['role'] == 'user' %}
137
+ {{ roles[1] }}
138
+ {% endif %}
139
+ """
140
  elif conv_mode == "hermes_2":
141
  return """{% set system_prompt = "<|im_start|>system\nAnswer the questions." %}
142
  {% set roles = ["<|im_start|>user\n", "<|im_start|>assistant\n"] %}
 
211
  # set device_map auto can autoamtically shard llm to different devices
212
  self.llm, self.tokenizer = self.init_llm(llm_cfg, config, device_map=device_map)
213
 
214
+ # NOTE(ligeng): need to add other decoders from config
215
  self.encoders = {"image": BasicImageEncoder(self), "video": BasicVideoEncoder(self)}
216
 
217
  self.post_config()
 
228
  output_dir: str = None,
229
  vila_version: str | None = None,
230
  conv_mode: str | None = None,
231
+ copy: bool = False,
232
+ copy_weights: bool = True,
233
+ copy_code: bool = True,
234
  *model_args,
235
  **kwargs,
236
  ):
237
  # assert type(self) == VILAForCasualLM, "This method is only available for VILAForCasualLM."
238
+ assert model_path != output_dir, "model_path and output_dir cannot be the same"
 
239
  if os.path.isdir(model_path):
240
  model_path = model_path
241
  else:
242
+ from huggingface_hub import HfApi, snapshot_download
243
+
244
+ model_path = snapshot_download(model_path)
245
  print("downloading HF model to", model_path)
246
 
247
  if check_dot_in_model_path(model_path) and output_dir is None:
 
252
  raise ValueError(
253
  f"Output directory {output_dir} contains a dot, which will affect the remote code loading. Please specify a valid output directory without dots."
254
  )
255
+
256
+ if copy:
257
+ print("copy is set to True, copying weights and code to output_dir")
258
+ copy_weights = copy_code = True
259
+ # copy weights and code to output_dir
260
+ self.copy_or_symlink_directory(model_path, output_dir, copy=copy_weights)
261
+ self.copy_remote_py_files(output_dir, copy=copy_code)
262
+
263
  if vila_version is None:
264
+ vila_version = get_vila_version(output_dir)
265
 
266
+ cfg_path = os.path.join(output_dir, "config.json")
267
  config = json.load(open(cfg_path))
268
  config["version"] = "2.0" # nvila tag
269
  config["architectures"] = ["VILAForCasualLM"]
 
273
  "AutoModel": "modeling_vila.VILAForCasualLM",
274
  "AutoModelForCausalLM": "modeling_vila.VILAForCasualLM",
275
  }
276
+ # vila1.5 legacy support
277
  config["model_type"] = "vila"
278
  if vila_version in ["vila1.5", "vila-m3"]:
279
  if conv_mode is None:
280
+ raise ValueError(f"Please specify the conversation mode for {output_dir}.")
281
  config["chat_template"] = conv_mode
282
  jinja_template = generate_jinja_template(conv_mode)
283
+ jinja_path = os.path.join(output_dir, f"{conv_mode}.jinja")
284
  with open(jinja_path, "w") as f:
285
  f.write(jinja_template)
286
  json.dump(config, open(cfg_path, "w"), indent=2)
 
287
 
288
  ##########################################################################################
289
+ config = AutoConfig.from_pretrained(output_dir, trust_remote_code=True)
290
+ tokenizer = load_tokenizer_then_handle_media_tokens_and_chat_template(output_dir, config)
291
  tokenizer.save_pretrained(osp.join(output_dir, "llm"))
292
  ##########################################################################################
293
 
294
+ @classmethod
295
+ def copy_or_symlink_directory(cls, model_path, output_dir, copy=True):
296
+ # Create output directory if it doesn't exist
297
+ os.makedirs(output_dir, exist_ok=True)
298
+ # Create symlinks for all files in model_path to output_dir
299
+ for item in os.listdir(model_path):
300
+ src_path = os.path.join(model_path, item)
301
+ dst_path = os.path.join(output_dir, item)
302
+
303
+ # Remove existing file/directory at destination if it exists
304
+ if os.path.exists(dst_path):
305
+ if os.path.islink(dst_path):
306
+ os.unlink(dst_path)
307
+ elif os.path.isdir(dst_path):
308
+ shutil.rmtree(dst_path)
309
+ else:
310
+ os.remove(dst_path)
311
+
312
+ # Create symlink
313
+ if copy:
314
+ if os.path.isdir(src_path):
315
+ shutil.copytree(src_path, dst_path)
316
+ else:
317
+ shutil.copy2(src_path, dst_path)
318
+ print(f"Copied {src_path} to {dst_path}")
319
+ else:
320
+ os.symlink(src_path, dst_path)
321
+ print(f"Created symlink from {src_path} to {dst_path}")
322
+
323
  @classmethod
324
  def copy_remote_py_files(cls, output_dir, copy=True):
325
  ## copy .py and REAMDE for next loading remote code
 
588
  image_features = self.get_mm_projector()(image_features)
589
  return image_features
590
 
591
+ def train(self, mode: bool = True):
592
+ if mode:
593
+ print(f"Set padding side to right for training, {mode=}")
594
+ self.tokenizer.padding_side = "right"
595
+ else:
596
+ print(f"Set padding side to left for evaluation, {mode=}")
597
+ self.tokenizer.padding_side = "left"
598
+ super().train(mode)
599
+
600
  def _embed(
601
  self,
602
  input_ids: torch.Tensor,
 
605
  labels: Optional[torch.Tensor],
606
  attention_mask: Optional[torch.Tensor],
607
  ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
608
+ # NOTE(ligeng): deep copy to avoid modifying the original media and media_config
609
+ media = copy.deepcopy(media)
610
+ media_config = copy.deepcopy(media_config)
611
+
612
  labels = labels if labels is not None else torch.full_like(input_ids, IGNORE_INDEX)
613
  attention_mask = attention_mask if attention_mask is not None else torch.ones_like(input_ids, dtype=torch.bool)
614
 
615
+ PROCESS_GROUP_MANAGER = get_pg_manager()
 
616
  if PROCESS_GROUP_MANAGER is not None:
617
  for name in media:
618
  self.encoders[name].end_tokens = None
619
 
620
  # Extract text and media embeddings
621
  text_embeds = self.llm.model.embed_tokens(input_ids)
622
+ if media is not None:
623
+ media_embeds = self.__embed_media_tokens(media, media_config)
624
+ else:
625
+ # no media was provided, so we just return an empty dict
626
+ media_embeds = {}
627
 
628
  # This is a workaround to make sure the dummy embeddings are consumed
629
  while media_embeds.get("dummy"):
 
651
  name = media_tokens[input_ids[k][pos].item()]
652
  input = media_embeds[name].popleft()
653
  label = torch.full([input.shape[0]], IGNORE_INDEX, device=labels[k].device, dtype=labels[k].dtype)
654
+ # print(f"{self.tokenizer.padding_side} [media] {k=} {pos=}, {self.tokenizer.batch_decode(input_ids[k][pos:pos+1])}"); python_input()
655
+ elif input_ids[k][pos].item() in (self.tokenizer.pad_token_id, self.tokenizer.eos_token_id):
656
+ end = pos + 1
657
+ pos = end
658
+ # print(f"[skip PAD/EOS] {k=} {pos=}, {self.tokenizer.batch_decode(input_ids[k][pos:end])}"); python_input()
659
+ continue
660
  else:
661
  end = pos
662
  while end < len(labels[k]) and input_ids[k][end].item() not in media_tokens:
663
  end += 1
664
  input = text_embeds[k][pos:end]
665
  label = labels[k][pos:end]
666
+ # print(f"[text] {k=} {pos=}, {self.tokenizer.batch_decode(input_ids[k][pos:end])}"); python_input()
667
+
668
  inputs_mk.append(input)
669
  labels_mk.append(label)
670
  pos = end
 
675
  # Check if all media embeddings are consumed
676
  for name in media_embeds:
677
  if media_embeds[name]:
678
+ raise ValueError(f"Not all {name} embeddings are consumed! Still {len(media_embeds[name])} left.")
679
 
680
  # Truncate sequences to `model_max_length` as media embeddings are inserted
681
  inputs, labels = self.__truncate_sequence(inputs, labels)
 
693
  if self.training:
694
  # Gather metainfo of media objects from all ranks
695
  info = [{"shape": tensor.shape, "dtype": tensor.dtype} for tensor in media.get(name, [])]
696
+ infos = list(chain(vila_all_gather(info)))
697
 
698
  # The entire batch does not contain any media objects of this type.
699
  if not infos:
 
1084
  attention_mask: Optional[torch.LongTensor] = None,
1085
  **generation_kwargs,
1086
  ):
1087
+ if self.training:
1088
+ warnings.warn(
1089
+ "Model is in training mode, using default padding strategy to right. This is not recommended for generation."
1090
+ )
1091
  inputs_embeds, _, attention_mask = self._embed(input_ids, media, media_config, None, attention_mask)
1092
  return self.llm.generate(inputs_embeds=inputs_embeds, attention_mask=attention_mask, **generation_kwargs)
1093
 
utils.py CHANGED
@@ -41,7 +41,7 @@ def load_tokenizer_then_handle_media_tokens_and_chat_template(
41
  print(f"Using chat template: {config.chat_template}")
42
  fpath = os.path.join(os.path.dirname(__file__), "chat_templates", f"{config.chat_template}.jinja")
43
  if not os.path.exists(fpath):
44
- fpath = os.path.join(os.path.dirname(model_name_or_path), f"{config.chat_template}.jinja")
45
  with open(fpath) as fd:
46
  chat_template = fd.read()
47
  tokenizer.chat_template = chat_template.replace(" ", "").replace("\n", "")
 
41
  print(f"Using chat template: {config.chat_template}")
42
  fpath = os.path.join(os.path.dirname(__file__), "chat_templates", f"{config.chat_template}.jinja")
43
  if not os.path.exists(fpath):
44
+ fpath = os.path.join(model_name_or_path, f"{config.chat_template}.jinja")
45
  with open(fpath) as fd:
46
  chat_template = fd.read()
47
  tokenizer.chat_template = chat_template.replace(" ", "").replace("\n", "")
vision_tower/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "runs/train/qwen25_2B_3x3-sft-20241118122815/model/vision_tower",
3
  "architectures": [
4
  "SiglipVisionModel"
5
  ],
@@ -17,7 +17,7 @@
17
  "patch_size": 14,
18
  "projection_dim": 2048,
19
  "projector_hidden_act": "gelu_fast",
20
- "torch_dtype": "bfloat16",
21
  "transformers_version": "4.46.0",
22
  "vision_use_head": false
23
  }
 
1
  {
2
+ "_name_or_path": "NVILA-Lite-2B-hf-preview/vision_tower",
3
  "architectures": [
4
  "SiglipVisionModel"
5
  ],
 
17
  "patch_size": 14,
18
  "projection_dim": 2048,
19
  "projector_hidden_act": "gelu_fast",
20
+ "torch_dtype": "float16",
21
  "transformers_version": "4.46.0",
22
  "vision_use_head": false
23
  }
vision_tower/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:38c39a77befc808483de3c3a5d20f02af1e3111e15a3e9909f64b2ea7b553fd9
3
- size 826707904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ea53807bd8094c4acea02b1e4c9677207bcd635c44e9c6ec9a0862d558a26d0
3
+ size 826707464