radames commited on
Commit
2acb2ce
·
0 Parent(s):
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python build
2
+ .eggs/
3
+ gradio.egg-info/*
4
+ !gradio.egg-info/requires.txt
5
+ !gradio.egg-info/PKG-INFO
6
+ dist/
7
+ *.pyc
8
+ __pycache__/
9
+ *.py[cod]
10
+ *$py.class
11
+ build/
12
+
13
+ # JS build
14
+ gradio/templates/frontend
15
+ # Secrets
16
+ .env
17
+
18
+ # Gradio run artifacts
19
+ *.db
20
+ *.sqlite3
21
+ gradio/launches.json
22
+ flagged/
23
+ gradio_cached_examples/
24
+
25
+ # Tests
26
+ .coverage
27
+ coverage.xml
28
+ test.txt
29
+
30
+ # Demos
31
+ demo/tmp.zip
32
+ demo/files/*.avi
33
+ demo/files/*.mp4
34
+
35
+ # Etc
36
+ .idea/*
37
+ .DS_Store
38
+ *.bak
39
+ workspace.code-workspace
40
+ *.h5
41
+ .vscode/
42
+
43
+ # log files
44
+ .pnpm-debug.log
45
+ venv/
46
+ *.db-journal
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Depth Image to Autostereogram (Magic Eye)
3
+ emoji: 👀 😵‍💫 👀
4
+ colorFrom: green
5
+ colorTo: black
6
+ sdk: gradio
7
+ sdk_version: 2.9.4
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
13
+
14
+ Depth Image to Autostereogram (Magic Eye)
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from doctest import Example
2
+ import gradio as gr
3
+ from transformers import DPTFeatureExtractor, DPTForDepthEstimation
4
+ import torch
5
+ import numpy as np
6
+ from PIL import Image, ImageOps
7
+ from pathlib import Path
8
+ import os
9
+ import glob
10
+ from autostereogram.sirds_converter import SirdsConverter
11
+ from skimage import color
12
+
13
+ feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
14
+ model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
15
+
16
+ stereo_converter = SirdsConverter()
17
+
18
+
19
+ def process_image(image_path):
20
+ image_raw = Image.open(Path(image_path))
21
+
22
+ image = image_raw.resize(
23
+ (1280, int(1280 * image_raw.size[1] / image_raw.size[0])),
24
+ Image.Resampling.LANCZOS)
25
+
26
+ # prepare image for the model
27
+ encoding = feature_extractor(image, return_tensors="pt")
28
+
29
+ # forward pass
30
+ with torch.no_grad():
31
+ outputs = model(**encoding)
32
+ predicted_depth = outputs.predicted_depth
33
+
34
+ # interpolate to original size
35
+ prediction = torch.nn.functional.interpolate(
36
+ predicted_depth.unsqueeze(1),
37
+ size=image.size[::-1],
38
+ mode="bicubic",
39
+ align_corners=False,
40
+ ).squeeze()
41
+ output = prediction.cpu().numpy()
42
+ depth_image = (output * 255 / np.max(output)).astype('uint8')
43
+ depth_image_padded = np.array(ImageOps.pad(
44
+ Image.fromarray(depth_image), (1280, 720)))
45
+
46
+ stereo_image = stereo_converter.convert_depth_to_stereogram_with_sird(
47
+ depth_image_padded, False, 0.5).astype(np.uint8)
48
+
49
+ return [depth_image_padded, stereo_image]
50
+
51
+
52
+ title = "Demo: zero-shot depth estimation with DPT + 3D Voxels reconstruction"
53
+ description = "This demo is a variation from the original <a href='https://huggingface.co/spaces/nielsr/dpt-depth-estimation' target='_blank'>DPT Demo</a>. It uses the DPT model to predict the depth of an image and then reconstruct the 3D model as voxels."
54
+
55
+ examples = sorted(glob.glob('examples/*.jpg'))
56
+
57
+ iface = gr.Interface(fn=process_image,
58
+ inputs=[
59
+ gr.inputs.Image(
60
+ type="filepath", label="Input Image")
61
+ ],
62
+ outputs=[
63
+ gr.outputs.Image(label="Predicted Depth", type="pil"),
64
+ gr.outputs.Image(label="Stereogram", type="pil")
65
+ ],
66
+ description=description,
67
+ examples=examples,
68
+ allow_flagging="never",
69
+ # cache_examples=False
70
+ )
71
+
72
+ if __name__ == "__main__":
73
+ iface.launch(debug=True, enable_queue=False)
examples/1-tim-gouw-JsjXnWlh8-g-unsplash.jpg ADDED
examples/gary-bendig-6GMq7AGxNbE-unsplash.jpg ADDED
examples/ricky-kharawala-adK3Vu70DEQ-unsplash.jpg ADDED
examples/suheyl-burak-AwKokEFkLhM-unsplash.jpg ADDED
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ libgl1-mesa-glx
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ git+https://github.com/nielsrogge/transformers.git@add_dpt_redesign#egg=transformers
3
+ numpy
4
+ Pillow
5
+ gradio==2.9.4
6
+ jinja2
7
+ transformers
8
+ scikit-image
9
+ pystereogram