Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -29,9 +29,6 @@ model = torch.quantization.quantize_dynamic(
|
|
29 |
|
30 |
model = model.to(device)
|
31 |
|
32 |
-
# Convert model to TorchScript
|
33 |
-
model = torch.jit.script(model)
|
34 |
-
|
35 |
processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
|
36 |
|
37 |
color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
|
@@ -39,23 +36,29 @@ color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFER
|
|
39 |
input_tensor = torch.zeros((1, 3, 128, 128), dtype=torch.float32, device=device)
|
40 |
|
41 |
def preprocess_image(image):
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
@torch.inference_mode()
|
45 |
def process_frame(image):
|
46 |
if image is None:
|
47 |
return None
|
48 |
preprocessed = preprocess_image(image)
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
depth_map = predicted_depth.squeeze().cpu().numpy()
|
53 |
depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())
|
54 |
depth_map = (depth_map * 255).astype(np.uint8)
|
55 |
depth_map_colored = cv2.applyColorMap(depth_map, cv2.COLORMAP_INFERNO)
|
56 |
|
57 |
return cv2.cvtColor(depth_map_colored, cv2.COLOR_BGR2RGB)
|
58 |
-
|
59 |
interface = gr.Interface(
|
60 |
fn=process_frame,
|
61 |
inputs=gr.Image(sources="webcam", streaming=True),
|
|
|
29 |
|
30 |
model = model.to(device)
|
31 |
|
|
|
|
|
|
|
32 |
processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
|
33 |
|
34 |
color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
|
|
|
36 |
input_tensor = torch.zeros((1, 3, 128, 128), dtype=torch.float32, device=device)
|
37 |
|
38 |
def preprocess_image(image):
|
39 |
+
image = torch.from_numpy(image).to(device)
|
40 |
+
image = torch.nn.functional.interpolate(image.permute(2, 0, 1).unsqueeze(0), size=(128, 128), mode='bilinear', align_corners=False)
|
41 |
+
return (image.squeeze(0) / 255.0)
|
42 |
+
|
43 |
+
static_input = torch.zeros((1, 3, 128, 128), device=device, dtype=torch.float16) # Use float16 if using FP16
|
44 |
+
g = torch.cuda.CUDAGraph()
|
45 |
+
with torch.cuda.graph(g):
|
46 |
+
static_output = model(static_input)
|
47 |
|
48 |
@torch.inference_mode()
|
49 |
def process_frame(image):
|
50 |
if image is None:
|
51 |
return None
|
52 |
preprocessed = preprocess_image(image)
|
53 |
+
static_input.copy_(preprocessed)
|
54 |
+
g.replay()
|
55 |
+
depth_map = static_output.predicted_depth.squeeze().cpu().numpy()
|
|
|
56 |
depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())
|
57 |
depth_map = (depth_map * 255).astype(np.uint8)
|
58 |
depth_map_colored = cv2.applyColorMap(depth_map, cv2.COLORMAP_INFERNO)
|
59 |
|
60 |
return cv2.cvtColor(depth_map_colored, cv2.COLOR_BGR2RGB)
|
61 |
+
|
62 |
interface = gr.Interface(
|
63 |
fn=process_frame,
|
64 |
inputs=gr.Image(sources="webcam", streaming=True),
|