Photo-Filter-2.0 / filters.py
eienmojiki's picture
Add new image processing filters to filters.py
e6956bc
import cv2
import numpy as np
from registry import registry
@registry.register("Original")
def original(image):
return image
@registry.register("Dot Effect", defaults={
"dot_size": 10,
"dot_spacing": 2,
"invert": False,
}, min_vals={
"dot_size": 1,
"dot_spacing": 1,
}, max_vals={
"dot_size": 20,
"dot_spacing": 10,
}, step_vals={
"dot_size": 1,
"dot_spacing": 1,
})
def dot_effect(image, dot_size: int = 10, dot_spacing: int = 2, invert: bool = False):
"""
## Convert your image into a dotted pattern.
**Args:**
* `image` (numpy.ndarray): Input image (BGR or grayscale)
* `dot_size` (int): Size of each dot
* `dot_spacing` (int): Spacing between dots
* `invert` (bool): Invert the dots
**Returns:**
* `numpy.ndarray`: Dotted image
"""
# Convert to grayscale if image is color
if len(image.shape) == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
# Apply adaptive thresholding to improve contrast
gray = cv2.adaptiveThreshold(
gray,
255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY,
25, # Block size
5 # Constant subtracted from mean
)
height, width = gray.shape
canvas = np.zeros_like(gray) if not invert else np.full_like(gray, 255)
y_dots = range(0, height, dot_size + dot_spacing)
x_dots = range(0, width, dot_size + dot_spacing)
dot_color = 255 if not invert else 0
for y in y_dots:
for x in x_dots:
region = gray[y:min(y+dot_size, height), x:min(x+dot_size, width)]
if region.size > 0:
brightness = np.mean(region)
# Dynamic dot sizing based on brightness
relative_brightness = brightness / 255.0
if invert:
relative_brightness = 1 - relative_brightness
# Draw circle with size proportional to brightness
radius = int((dot_size/2) * relative_brightness)
if radius > 0:
cv2.circle(canvas,
(x + dot_size//2, y + dot_size//2),
radius,
(dot_color),
-1)
return canvas
@registry.register("Pixelize", defaults={
"pixel_size": 10,
}, min_vals={
"pixel_size": 1,
}, max_vals={
"pixel_size": 50,
}, step_vals={
"pixel_size": 1,
})
def pixelize(image, pixel_size: int = 10):
"""
## Apply a pixelization effect to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR or grayscale)
* `pixel_size` (int): Size of each pixel block
**Returns:**
* `numpy.ndarray`: Pixelized image
"""
height, width = image.shape[:2]
# Resize the image to a smaller size
small_height = height // pixel_size
small_width = width // pixel_size
small_image = cv2.resize(
image, (small_width, small_height), interpolation=cv2.INTER_LINEAR)
# Resize back to the original size with nearest neighbor interpolation
pixelized_image = cv2.resize(
small_image, (width, height), interpolation=cv2.INTER_NEAREST)
return pixelized_image
@registry.register("Sketch Effect")
def sketch_effect(image):
"""
## Apply a sketch effect to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR or grayscale)
**Returns:**
* `numpy.ndarray`: Sketch effect applied image
"""
# Convert the image to grayscale
if len(image.shape) == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
# Invert the grayscale image
inverted_gray = cv2.bitwise_not(gray)
# Apply Gaussian blur to the inverted image
blurred = cv2.GaussianBlur(inverted_gray, (21, 21), 0) # Fixed kernel size
# Blend the grayscale image with the blurred inverted image
sketch = cv2.divide(gray, 255 - blurred, scale=256)
return sketch
@registry.register("Warm", defaults={
"intensity": 30,
}, min_vals={
"intensity": 0,
}, max_vals={
"intensity": 100,
}, step_vals={
"intensity": 1,
})
def warm_filter(image, intensity: int = 30):
"""
## Adds a warm color effect to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `intensity` (int): Intensity of the warm effect (0-100)
**Returns:**
* `numpy.ndarray`: Image with warm color effect
"""
# Convert intensity to actual adjustment values
intensity_scale = intensity / 100.0
# Split the image into BGR channels
b, g, r = cv2.split(image.astype(np.float32))
# Increase red, slightly increase green, decrease blue
r = np.clip(r * (1 + 0.5 * intensity_scale), 0, 255)
g = np.clip(g * (1 + 0.1 * intensity_scale), 0, 255)
b = np.clip(b * (1 - 0.1 * intensity_scale), 0, 255)
return cv2.merge([b, g, r]).astype(np.uint8)
@registry.register("Cool", defaults={
"intensity": 30,
}, min_vals={
"intensity": 0,
}, max_vals={
"intensity": 100,
}, step_vals={
"intensity": 1,
})
def cool_filter(image, intensity: int = 30):
"""
## Adds a cool color effect to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `intensity` (int): Intensity of the cool effect (0-100)
**Returns:**
* `numpy.ndarray`: Image with cool color effect
"""
# Convert intensity to actual adjustment values
intensity_scale = intensity / 100.0
# Split the image into BGR channels
b, g, r = cv2.split(image.astype(np.float32))
# Increase blue, slightly increase green, decrease red
b = np.clip(b * (1 + 0.5 * intensity_scale), 0, 255)
g = np.clip(g * (1 + 0.1 * intensity_scale), 0, 255)
r = np.clip(r * (1 - 0.1 * intensity_scale), 0, 255)
return cv2.merge([b, g, r]).astype(np.uint8)
@registry.register("Saturation", defaults={
"factor": 50,
}, min_vals={
"factor": 0,
}, max_vals={
"factor": 100,
}, step_vals={
"factor": 1,
})
def adjust_saturation(image, factor: int = 50):
"""
## Adjusts the saturation of the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `factor` (int): Saturation factor (0-100, 50 is normal)
**Returns:**
* `numpy.ndarray`: Image with adjusted saturation
"""
# Convert factor to multiplication value (0.0 to 2.0)
factor = (factor / 50.0)
# Convert to HSV
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)
# Adjust saturation
hsv[:, :, 1] = np.clip(hsv[:, :, 1] * factor, 0, 255)
# Convert back to BGR
return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR)
@registry.register("Vintage", defaults={
"intensity": 50,
}, min_vals={
"intensity": 0,
}, max_vals={
"intensity": 100,
}, step_vals={
"intensity": 1,
})
def vintage_filter(image, intensity: int = 50):
"""
## Adds a vintage/retro effect to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `intensity` (int): Intensity of the vintage effect (0-100)
**Returns:**
* `numpy.ndarray`: Image with vintage effect
"""
intensity_scale = intensity / 100.0
# Split channels
b, g, r = cv2.split(image.astype(np.float32))
# Adjust colors for vintage look
r = np.clip(r * (1 + 0.3 * intensity_scale), 0, 255)
g = np.clip(g * (1 - 0.1 * intensity_scale), 0, 255)
b = np.clip(b * (1 - 0.2 * intensity_scale), 0, 255)
# Create sepia-like effect
result = cv2.merge([b, g, r]).astype(np.uint8)
# Add slight blur for softness
if intensity > 0:
blur_amount = int(3 * intensity_scale) * 2 + 1
result = cv2.GaussianBlur(result, (blur_amount, blur_amount), 0)
return result
@registry.register("Vignette", defaults={
"intensity": 50,
}, min_vals={
"intensity": 0,
}, max_vals={
"intensity": 100,
}, step_vals={
"intensity": 1,
})
def vignette_effect(image, intensity: int = 50):
"""
## Adds a vignette effect (darker corners) to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `intensity` (int): Intensity of the vignette (0-100)
**Returns:**
* `numpy.ndarray`: Image with vignette effect
"""
height, width = image.shape[:2]
# Create a vignette mask
X_resultant = np.abs(np.linspace(-1, 1, width)[None, :])
Y_resultant = np.abs(np.linspace(-1, 1, height)[:, None])
mask = np.sqrt(X_resultant**2 + Y_resultant**2)
mask = 1 - np.clip(mask, 0, 1)
# Adjust mask based on intensity
mask = (mask - mask.min()) / (mask.max() - mask.min())
mask = mask ** (1 + intensity/50)
# Apply mask to image
mask = mask[:, :, None]
result = image.astype(np.float32) * mask
return np.clip(result, 0, 255).astype(np.uint8)
@registry.register("HDR Effect", defaults={
"strength": 50,
}, min_vals={
"strength": 0,
}, max_vals={
"strength": 100,
}, step_vals={
"strength": 1,
})
def hdr_effect(image, strength: int = 50):
"""
## Applies an HDR-like effect to enhance image details.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `strength` (int): Strength of the HDR effect (0-100)
**Returns:**
* `numpy.ndarray`: Image with HDR-like effect
"""
strength_scale = strength / 100.0
# Convert to LAB color space
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB).astype(np.float32)
# Split channels
l, a, b = cv2.split(lab)
# Apply CLAHE to L channel
clahe = cv2.createCLAHE(clipLimit=3.0 * strength_scale, tileGridSize=(8, 8))
l = clahe.apply(l.astype(np.uint8)).astype(np.float32)
# Enhance local contrast
if strength > 0:
blur = cv2.GaussianBlur(l, (0, 0), 3)
detail = cv2.addWeighted(l, 1 + strength_scale, blur, -strength_scale, 0)
l = cv2.addWeighted(l, 1 - strength_scale/2, detail, strength_scale/2, 0)
# Merge channels and convert back
enhanced_lab = cv2.merge([l, a, b])
result = cv2.cvtColor(enhanced_lab.astype(np.uint8), cv2.COLOR_LAB2BGR)
return result
@registry.register("Gaussian Blur", defaults={
"kernel_size": 5,
}, min_vals={
"kernel_size": 1,
}, max_vals={
"kernel_size": 31,
}, step_vals={
"kernel_size": 2,
})
def gaussian_blur(image, kernel_size: int = 5):
"""
## Apply Gaussian blur effect to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `kernel_size` (int): Size of the Gaussian kernel (must be odd)
**Returns:**
* `numpy.ndarray`: Blurred image
"""
# Ensure kernel size is odd
if kernel_size % 2 == 0:
kernel_size += 1
return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
@registry.register("Sharpen", defaults={
"amount": 50,
}, min_vals={
"amount": 0,
}, max_vals={
"amount": 100,
}, step_vals={
"amount": 1,
})
def sharpen(image, amount: int = 50):
"""
## Sharpen the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `amount` (int): Sharpening intensity (0-100)
**Returns:**
* `numpy.ndarray`: Sharpened image
"""
amount = amount / 100.0
# Create the sharpening kernel
kernel = np.array([[-1,-1,-1],
[-1, 9,-1],
[-1,-1,-1]])
# Apply the kernel
sharpened = cv2.filter2D(image, -1, kernel)
# Blend with original image based on amount
return cv2.addWeighted(image, 1 - amount, sharpened, amount, 0)
@registry.register("Emboss", defaults={
"strength": 50,
"direction": 0,
}, min_vals={
"strength": 0,
"direction": 0,
}, max_vals={
"strength": 100,
"direction": 7,
}, step_vals={
"strength": 1,
"direction": 1,
})
def emboss(image, strength: int = 50, direction: int = 0):
"""
## Apply emboss effect to create a 3D look.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `strength` (int): Emboss strength (0-100)
* `direction` (int): Direction of emboss effect (0-7)
**Returns:**
* `numpy.ndarray`: Embossed image
"""
strength = strength / 100.0 * 2.0 # Scale to 0-2 range
# Define kernels for different directions
kernels = [
np.array([[-1,-1, 0],
[-1, 1, 1],
[ 0, 1, 1]]), # 0 - top left to bottom right
np.array([[-1, 0, 1],
[-1, 1, 1],
[-1, 0, 1]]), # 1 - left to right
np.array([[ 0, 1, 1],
[-1, 1, 1],
[-1,-1, 0]]), # 2 - bottom left to top right
np.array([[ 1, 1, 1],
[ 0, 1, 0],
[-1,-1,-1]]), # 3 - bottom to top
np.array([[ 1, 1, 0],
[ 1, 1,-1],
[ 0,-1,-1]]), # 4 - bottom right to top left
np.array([[ 1, 0,-1],
[ 1, 1,-1],
[ 1, 0,-1]]), # 5 - right to left
np.array([[ 0,-1,-1],
[ 1, 1,-1],
[ 1, 1, 0]]), # 6 - top right to bottom left
np.array([[-1,-1,-1],
[ 0, 1, 0],
[ 1, 1, 1]]) # 7 - top to bottom
]
# Apply the kernel
kernel = kernels[direction % 8]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
embossed = cv2.filter2D(gray, -1, kernel * strength)
# Normalize to ensure good contrast
embossed = cv2.normalize(embossed, None, 0, 255, cv2.NORM_MINMAX)
# Convert back to BGR
return cv2.cvtColor(embossed.astype(np.uint8), cv2.COLOR_GRAY2BGR)
@registry.register("Oil Painting", defaults={
"size": 5,
"dynRatio": 1,
}, min_vals={
"size": 1,
"dynRatio": 1,
}, max_vals={
"size": 15,
"dynRatio": 7,
}, step_vals={
"size": 2,
"dynRatio": 1,
})
def oil_painting(image, size: int = 5, dynRatio: int = 1):
"""
## Apply oil painting effect to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `size` (int): Size of the neighborhood considered
* `dynRatio` (int): Dynamic ratio affecting the intensity binning
**Returns:**
* `numpy.ndarray`: Image with oil painting effect
"""
return cv2.xphoto.oilPainting(image, size, dynRatio)