debaleena82001 commited on
Commit
560350a
·
verified ·
1 Parent(s): c4b456e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -0
app.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from PIL import Image, ImageFilter
5
+ from transformers import pipeline, SegformerFeatureExtractor, SegformerForSemanticSegmentation
6
+
7
+ # Load the pre-trained segmentation model
8
+ feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b1-finetuned-cityscapes-1024-1024")
9
+ segmentation_model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b1-finetuned-cityscapes-1024-1024")
10
+
11
+ def apply_blur_effect(image, blur_type):
12
+ """
13
+ Applies either Gaussian blur to the whole image,
14
+ depth-based blur, or background blur while keeping the foreground sharp.
15
+ """
16
+ image = image.resize((512, 512)) # Resize input image
17
+
18
+ if blur_type == "Gaussian Blur":
19
+ # Apply a fixed Gaussian blur to the entire image
20
+ blurred_image = image.filter(ImageFilter.GaussianBlur(radius=15))
21
+ return blurred_image
22
+
23
+ elif blur_type == "Depth-Based Lens Blur":
24
+ # Use depth estimation model to get depth map
25
+ depth_estimator = pipeline(task="depth-estimation", model="Intel/zoedepth-nyu-kitti")
26
+ outputs = depth_estimator(image)
27
+ depth_map = np.array(outputs["depth"])
28
+
29
+ # Normalize depth map
30
+ depth_map_normalized = (depth_map - np.min(depth_map)) / (np.max(depth_map) - np.min(depth_map))
31
+ depth_array = np.clip(depth_map_normalized * 5, 0, 5).astype(int) # Scale depth to select blur levels
32
+
33
+ # Generate different levels of Gaussian blur
34
+ blur_levels = [image.filter(ImageFilter.GaussianBlur(radius=r)) for r in range(6)]
35
+
36
+ # Create depth-based blur image
37
+ depth_based_blur_image = Image.new("RGB", image.size)
38
+ for i in range(image.width):
39
+ for j in range(image.height):
40
+ blur_level = depth_array[j, i]
41
+ depth_based_blur_image.putpixel((i, j), blur_levels[blur_level].getpixel((i, j)))
42
+
43
+ return depth_based_blur_image
44
+
45
+ elif blur_type == "Background Blur (Zoom Effect)":
46
+ # Perform segmentation to get foreground and background masks
47
+ inputs = feature_extractor(images=image, return_tensors="pt")
48
+ outputs = segmentation_model(**inputs)
49
+ logits = outputs.logits # Shape: (batch, num_classes, height, width)
50
+ predicted_mask = torch.argmax(logits, dim=1).squeeze().cpu().numpy()
51
+
52
+ # Create a binary mask (1 = foreground, 0 = background)
53
+ foreground_mask = (predicted_mask == 24).astype(np.uint8) # 'Person' class in Cityscapes dataset
54
+
55
+ # Convert the mask into a PIL image for processing
56
+ mask_image = Image.fromarray((foreground_mask * 255).astype(np.uint8))
57
+
58
+ # Apply Gaussian blur to the entire image
59
+ blurred_image = image.filter(ImageFilter.GaussianBlur(radius=15))
60
+
61
+ # Blend the sharp foreground and blurred background
62
+ final_image = Image.composite(image, blurred_image, mask_image)
63
+
64
+ return final_image
65
+
66
+ return image
67
+
68
+ # Gradio UI
69
+ interface = gr.Interface(
70
+ fn=apply_blur_effect,
71
+ inputs=[
72
+ gr.Image(type="pil"),
73
+ gr.Radio(["Gaussian Blur", "Depth-Based Lens Blur", "Background Blur (Zoom Effect)"], label="Blur Type"),
74
+ ],
75
+ outputs="image",
76
+ title="Image Blur Effects: Gaussian, Depth-Based & Background Blur",
77
+ description="Upload an image and apply Gaussian blur, depth-based blur, or background blur (Zoom-like effect).",
78
+ )
79
+
80
+ # Launch the Gradio app
81
+ if __name__ == "__main__":
82
+ interface.launch()