|
import gradio as gr |
|
import numpy as np |
|
import torch |
|
from PIL import Image, ImageFilter |
|
from transformers import pipeline, SegformerFeatureExtractor, SegformerForSemanticSegmentation |
|
|
|
|
|
feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b1-finetuned-cityscapes-1024-1024") |
|
segmentation_model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b1-finetuned-cityscapes-1024-1024") |
|
|
|
def apply_blur_effect(image, blur_type): |
|
""" |
|
Applies either Gaussian blur to the whole image, |
|
depth-based blur, or background blur while keeping the foreground sharp. |
|
""" |
|
image = image.resize((512, 512)) |
|
|
|
if blur_type == "Depth-Based Blur(Lens Blur)": |
|
|
|
depth_estimator = pipeline(task="depth-estimation", model="Intel/zoedepth-nyu-kitti") |
|
outputs = depth_estimator(image) |
|
depth_map = np.array(outputs["depth"]) |
|
|
|
|
|
depth_map_normalized = (depth_map - np.min(depth_map)) / (np.max(depth_map) - np.min(depth_map)) |
|
depth_array = np.clip(depth_map_normalized * 5, 0, 5).astype(int) |
|
|
|
|
|
blur_levels = [image.filter(ImageFilter.GaussianBlur(radius=r)) for r in range(6)] |
|
|
|
|
|
depth_based_blur_image = Image.new("RGB", image.size) |
|
for i in range(image.width): |
|
for j in range(image.height): |
|
blur_level = depth_array[j, i] |
|
depth_based_blur_image.putpixel((i, j), blur_levels[blur_level].getpixel((i, j))) |
|
|
|
return depth_based_blur_image |
|
|
|
elif blur_type == "Gaussian Background Blur": |
|
|
|
inputs = feature_extractor(images=image, return_tensors="pt") |
|
outputs = segmentation_model(**inputs) |
|
logits = outputs.logits |
|
predicted_mask = torch.argmax(logits, dim=1).squeeze().cpu().numpy() |
|
|
|
|
|
foreground_mask = (predicted_mask == 11).astype(np.uint8) |
|
|
|
|
|
mask_image = Image.fromarray((foreground_mask * 255).astype(np.uint8)).resize(image.size) |
|
|
|
|
|
blurred_image = image.filter(ImageFilter.GaussianBlur(radius=15)) |
|
|
|
|
|
final_image = Image.composite(image, blurred_image, mask_image) |
|
|
|
return final_image |
|
|
|
return image |
|
|
|
|
|
interface = gr.Interface( |
|
fn=apply_blur_effect, |
|
inputs=[ |
|
gr.Image(type="pil"), |
|
gr.Radio(["Depth-Based Blur(Lens Blur)", "Gaussian Background Blur"], label="Blur Type"), |
|
], |
|
outputs="image", |
|
title="Image Blur Effects: Gaussian Background Blur, Depth-Based Blur(Lens Blur)", |
|
description="Upload a selfie (needs some background, not just the face!) and apply Gaussian Background Blur or Depth-Based Blur(Lens Blur)", |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
interface.launch() |
|
|