mknolan commited on
Commit
13b1a08
·
verified ·
1 Parent(s): 8e6ddeb

Copy from mknolan/internvl25-image-analyzer

Browse files
Files changed (1) hide show
  1. app.py +292 -0
app.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import math
4
+ import numpy as np
5
+ import torch
6
+ import torchvision.transforms as T
7
+ from torchvision.transforms.functional import InterpolationMode
8
+ from PIL import Image
9
+ import gradio as gr
10
+ from transformers import AutoModel, AutoTokenizer
11
+
12
+ # Constants
13
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
14
+ IMAGENET_STD = (0.229, 0.224, 0.225)
15
+
16
+ # Configuration
17
+ MODEL_NAME = "OpenGVLab/InternVL2_5-8B" # Smaller model for faster loading
18
+ IMAGE_SIZE = 448
19
+
20
+ # Set up environment variables
21
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
22
+
23
+ # Utility functions for image processing
24
+ def build_transform(input_size):
25
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
26
+ transform = T.Compose([
27
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
28
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
29
+ T.ToTensor(),
30
+ T.Normalize(mean=MEAN, std=STD)
31
+ ])
32
+ return transform
33
+
34
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
35
+ best_ratio_diff = float('inf')
36
+ best_ratio = (1, 1)
37
+ area = width * height
38
+ for ratio in target_ratios:
39
+ target_aspect_ratio = ratio[0] / ratio[1]
40
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
41
+ if ratio_diff < best_ratio_diff:
42
+ best_ratio_diff = ratio_diff
43
+ best_ratio = ratio
44
+ elif ratio_diff == best_ratio_diff:
45
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
46
+ best_ratio = ratio
47
+ return best_ratio
48
+
49
+ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
50
+ orig_width, orig_height = image.size
51
+ aspect_ratio = orig_width / orig_height
52
+
53
+ # calculate the existing image aspect ratio
54
+ target_ratios = set(
55
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
56
+ i * j <= max_num and i * j >= min_num)
57
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
58
+
59
+ # find the closest aspect ratio to the target
60
+ target_aspect_ratio = find_closest_aspect_ratio(
61
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
62
+
63
+ # calculate the target width and height
64
+ target_width = image_size * target_aspect_ratio[0]
65
+ target_height = image_size * target_aspect_ratio[1]
66
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
67
+
68
+ # resize the image
69
+ resized_img = image.resize((target_width, target_height))
70
+ processed_images = []
71
+ for i in range(blocks):
72
+ box = (
73
+ (i % (target_width // image_size)) * image_size,
74
+ (i // (target_width // image_size)) * image_size,
75
+ ((i % (target_width // image_size)) + 1) * image_size,
76
+ ((i // (target_width // image_size)) + 1) * image_size
77
+ )
78
+ # split the image
79
+ split_img = resized_img.crop(box)
80
+ processed_images.append(split_img)
81
+ assert len(processed_images) == blocks
82
+ if use_thumbnail and len(processed_images) != 1:
83
+ thumbnail_img = image.resize((image_size, image_size))
84
+ processed_images.append(thumbnail_img)
85
+ return processed_images
86
+
87
+ # Load and preprocess image for the model - following the official documentation pattern
88
+ def load_image(image_pil, max_num=12):
89
+ # Process the image using dynamic_preprocess
90
+ processed_images = dynamic_preprocess(image_pil, image_size=IMAGE_SIZE, max_num=max_num)
91
+
92
+ # Convert PIL images to tensor format expected by the model
93
+ transform = build_transform(IMAGE_SIZE)
94
+ pixel_values = [transform(img) for img in processed_images]
95
+ pixel_values = torch.stack(pixel_values)
96
+
97
+ # Convert to appropriate data type
98
+ if torch.cuda.is_available():
99
+ pixel_values = pixel_values.cuda().to(torch.bfloat16)
100
+ else:
101
+ pixel_values = pixel_values.to(torch.float32)
102
+
103
+ return pixel_values
104
+
105
+ # Function to split model across GPUs
106
+ def split_model(model_name):
107
+ device_map = {}
108
+ world_size = torch.cuda.device_count()
109
+ if world_size <= 1:
110
+ return "auto"
111
+
112
+ num_layers = {
113
+ 'InternVL2_5-1B': 24,
114
+ 'InternVL2_5-2B': 24,
115
+ 'InternVL2_5-4B': 36,
116
+ 'InternVL2_5-8B': 32,
117
+ 'InternVL2_5-26B': 48,
118
+ 'InternVL2_5-38B': 64,
119
+ 'InternVL2_5-78B': 80
120
+ }[model_name]
121
+
122
+ # Since the first GPU will be used for ViT, treat it as half a GPU.
123
+ num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
124
+ num_layers_per_gpu = [num_layers_per_gpu] * world_size
125
+ num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
126
+ layer_cnt = 0
127
+ for i, num_layer in enumerate(num_layers_per_gpu):
128
+ for j in range(num_layer):
129
+ device_map[f'language_model.model.layers.{layer_cnt}'] = i
130
+ layer_cnt += 1
131
+ device_map['vision_model'] = 0
132
+ device_map['mlp1'] = 0
133
+ device_map['language_model.model.tok_embeddings'] = 0
134
+ device_map['language_model.model.embed_tokens'] = 0
135
+ device_map['language_model.model.rotary_emb'] = 0
136
+ device_map['language_model.output'] = 0
137
+ device_map['language_model.model.norm'] = 0
138
+ device_map['language_model.lm_head'] = 0
139
+ device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
140
+
141
+ return device_map
142
+
143
+ # Get model dtype
144
+ def get_model_dtype():
145
+ return torch.bfloat16 if torch.cuda.is_available() else torch.float32
146
+
147
+ # Model loading function
148
+ def load_model():
149
+ print(f"\n=== Loading {MODEL_NAME} ===")
150
+ print(f"CUDA available: {torch.cuda.is_available()}")
151
+
152
+ model_dtype = get_model_dtype()
153
+ print(f"Using model dtype: {model_dtype}")
154
+
155
+ if torch.cuda.is_available():
156
+ print(f"GPU count: {torch.cuda.device_count()}")
157
+ for i in range(torch.cuda.device_count()):
158
+ print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
159
+
160
+ # Memory info
161
+ print(f"Total GPU memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
162
+ print(f"Allocated GPU memory: {torch.cuda.memory_allocated() / 1e9:.2f} GB")
163
+ print(f"Reserved GPU memory: {torch.cuda.memory_reserved() / 1e9:.2f} GB")
164
+
165
+ # Determine device map
166
+ device_map = "auto"
167
+ if torch.cuda.is_available() and torch.cuda.device_count() > 1:
168
+ model_short_name = MODEL_NAME.split('/')[-1]
169
+ device_map = split_model(model_short_name)
170
+
171
+ # Load model and tokenizer
172
+ try:
173
+ model = AutoModel.from_pretrained(
174
+ MODEL_NAME,
175
+ torch_dtype=model_dtype,
176
+ low_cpu_mem_usage=True,
177
+ trust_remote_code=True,
178
+ device_map=device_map
179
+ )
180
+
181
+ tokenizer = AutoTokenizer.from_pretrained(
182
+ MODEL_NAME,
183
+ use_fast=False,
184
+ trust_remote_code=True
185
+ )
186
+
187
+ print(f"✓ Model and tokenizer loaded successfully!")
188
+ return model, tokenizer
189
+ except Exception as e:
190
+ print(f"❌ Error loading model: {e}")
191
+ import traceback
192
+ traceback.print_exc()
193
+ return None, None
194
+
195
+ # Image analysis function using the chat method from documentation
196
+ def analyze_image(model, tokenizer, image, prompt):
197
+ try:
198
+ # Check if image is valid
199
+ if image is None:
200
+ return "Please upload an image first."
201
+
202
+ # Process the image following official pattern
203
+ pixel_values = load_image(image)
204
+
205
+ # Debug info
206
+ print(f"Image processed: tensor shape {pixel_values.shape}, dtype {pixel_values.dtype}")
207
+
208
+ # Define generation config
209
+ generation_config = {
210
+ "max_new_tokens": 512,
211
+ "do_sample": False
212
+ }
213
+
214
+ # Use the model.chat method as shown in the official documentation
215
+ question = f"<image>\n{prompt}"
216
+ response, _ = model.chat(
217
+ tokenizer=tokenizer,
218
+ pixel_values=pixel_values,
219
+ question=question,
220
+ generation_config=generation_config,
221
+ history=None,
222
+ return_history=True
223
+ )
224
+
225
+ return response
226
+ except Exception as e:
227
+ import traceback
228
+ error_msg = f"Error analyzing image: {str(e)}\n{traceback.format_exc()}"
229
+ return error_msg
230
+
231
+ # Main function
232
+ def main():
233
+ # Load the model
234
+ model, tokenizer = load_model()
235
+
236
+ if model is None:
237
+ # Create an error interface if model loading failed
238
+ demo = gr.Interface(
239
+ fn=lambda x: "Model loading failed. Please check the logs for details.",
240
+ inputs=gr.Textbox(),
241
+ outputs=gr.Textbox(),
242
+ title="InternVL2.5 Image Analyzer - Error",
243
+ description="The model failed to load. Please check the logs for more information."
244
+ )
245
+ return demo
246
+
247
+ # Predefined prompts for analysis
248
+ prompts = [
249
+ "Describe this image in detail.",
250
+ "What can you tell me about this image?",
251
+ "Is there any text in this image? If so, can you read it?",
252
+ "What is the main subject of this image?",
253
+ "What emotions or feelings does this image convey?",
254
+ "Describe the composition and visual elements of this image.",
255
+ "Summarize what you see in this image in one paragraph."
256
+ ]
257
+
258
+ # Create the interface
259
+ demo = gr.Interface(
260
+ fn=lambda img, prompt: analyze_image(model, tokenizer, img, prompt),
261
+ inputs=[
262
+ gr.Image(type="pil", label="Upload Image"),
263
+ gr.Dropdown(choices=prompts, value=prompts[0], label="Select a prompt or write your own below",
264
+ allow_custom_value=True)
265
+ ],
266
+ outputs=gr.Textbox(label="Analysis Results", lines=15),
267
+ title="InternVL2.5 Image Analyzer",
268
+ description="Upload an image and ask the InternVL2.5 model to analyze it.",
269
+ examples=[
270
+ ["example_images/example1.jpg", "Describe this image in detail."],
271
+ ["example_images/example2.jpg", "What can you tell me about this image?"]
272
+ ],
273
+ theme=gr.themes.Soft(),
274
+ allow_flagging="never"
275
+ )
276
+
277
+ return demo
278
+
279
+ # Run the application
280
+ if __name__ == "__main__":
281
+ try:
282
+ # Check for GPU
283
+ if not torch.cuda.is_available():
284
+ print("WARNING: CUDA is not available. The model requires a GPU to function properly.")
285
+
286
+ # Create and launch the interface
287
+ demo = main()
288
+ demo.launch(server_name="0.0.0.0")
289
+ except Exception as e:
290
+ print(f"Error starting the application: {e}")
291
+ import traceback
292
+ traceback.print_exc()