Spaces:
Running
Running
import cv2 | |
import numpy as np | |
from registry import registry | |
def original(image): | |
return image | |
def dot_effect(image, dot_size: int = 10, dot_spacing: int = 2, invert: bool = False): | |
""" | |
## Convert your image into a dotted pattern. | |
**Args:** | |
* `image` (numpy.ndarray): Input image (BGR or grayscale) | |
* `dot_size` (int): Size of each dot | |
* `dot_spacing` (int): Spacing between dots | |
* `invert` (bool): Invert the dots | |
**Returns:** | |
* `numpy.ndarray`: Dotted image | |
""" | |
# Convert to grayscale if image is color | |
if len(image.shape) == 3: | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
else: | |
gray = image | |
# Apply adaptive thresholding to improve contrast | |
gray = cv2.adaptiveThreshold( | |
gray, | |
255, | |
cv2.ADAPTIVE_THRESH_GAUSSIAN_C, | |
cv2.THRESH_BINARY, | |
25, # Block size | |
5 # Constant subtracted from mean | |
) | |
height, width = gray.shape | |
canvas = np.zeros_like(gray) if not invert else np.full_like(gray, 255) | |
y_dots = range(0, height, dot_size + dot_spacing) | |
x_dots = range(0, width, dot_size + dot_spacing) | |
dot_color = 255 if not invert else 0 | |
for y in y_dots: | |
for x in x_dots: | |
region = gray[y:min(y+dot_size, height), x:min(x+dot_size, width)] | |
if region.size > 0: | |
brightness = np.mean(region) | |
# Dynamic dot sizing based on brightness | |
relative_brightness = brightness / 255.0 | |
if invert: | |
relative_brightness = 1 - relative_brightness | |
# Draw circle with size proportional to brightness | |
radius = int((dot_size/2) * relative_brightness) | |
if radius > 0: | |
cv2.circle(canvas, | |
(x + dot_size//2, y + dot_size//2), | |
radius, | |
(dot_color), | |
-1) | |
return canvas | |
def pixelize(image, pixel_size: int = 10): | |
""" | |
## Apply a pixelization effect to the image. | |
**Args:** | |
* `image` (numpy.ndarray): Input image (BGR or grayscale) | |
* `pixel_size` (int): Size of each pixel block | |
**Returns:** | |
* `numpy.ndarray`: Pixelized image | |
""" | |
height, width = image.shape[:2] | |
# Resize the image to a smaller size | |
small_height = height // pixel_size | |
small_width = width // pixel_size | |
small_image = cv2.resize( | |
image, (small_width, small_height), interpolation=cv2.INTER_LINEAR) | |
# Resize back to the original size with nearest neighbor interpolation | |
pixelized_image = cv2.resize( | |
small_image, (width, height), interpolation=cv2.INTER_NEAREST) | |
return pixelized_image | |
def sketch_effect(image): | |
""" | |
## Apply a sketch effect to the image. | |
**Args:** | |
* `image` (numpy.ndarray): Input image (BGR or grayscale) | |
**Returns:** | |
* `numpy.ndarray`: Sketch effect applied image | |
""" | |
# Convert the image to grayscale | |
if len(image.shape) == 3: | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
else: | |
gray = image | |
# Invert the grayscale image | |
inverted_gray = cv2.bitwise_not(gray) | |
# Apply Gaussian blur to the inverted image | |
blurred = cv2.GaussianBlur(inverted_gray, (21, 21), 0) # Fixed kernel size | |
# Blend the grayscale image with the blurred inverted image | |
sketch = cv2.divide(gray, 255 - blurred, scale=256) | |
return sketch | |
def warm_filter(image, intensity: int = 30): | |
""" | |
## Adds a warm color effect to the image. | |
**Args:** | |
* `image` (numpy.ndarray): Input image (BGR) | |
* `intensity` (int): Intensity of the warm effect (0-100) | |
**Returns:** | |
* `numpy.ndarray`: Image with warm color effect | |
""" | |
# Convert intensity to actual adjustment values | |
intensity_scale = intensity / 100.0 | |
# Split the image into BGR channels | |
b, g, r = cv2.split(image.astype(np.float32)) | |
# Increase red, slightly increase green, decrease blue | |
r = np.clip(r * (1 + 0.5 * intensity_scale), 0, 255) | |
g = np.clip(g * (1 + 0.1 * intensity_scale), 0, 255) | |
b = np.clip(b * (1 - 0.1 * intensity_scale), 0, 255) | |
return cv2.merge([b, g, r]).astype(np.uint8) | |
def cool_filter(image, intensity: int = 30): | |
""" | |
## Adds a cool color effect to the image. | |
**Args:** | |
* `image` (numpy.ndarray): Input image (BGR) | |
* `intensity` (int): Intensity of the cool effect (0-100) | |
**Returns:** | |
* `numpy.ndarray`: Image with cool color effect | |
""" | |
# Convert intensity to actual adjustment values | |
intensity_scale = intensity / 100.0 | |
# Split the image into BGR channels | |
b, g, r = cv2.split(image.astype(np.float32)) | |
# Increase blue, slightly increase green, decrease red | |
b = np.clip(b * (1 + 0.5 * intensity_scale), 0, 255) | |
g = np.clip(g * (1 + 0.1 * intensity_scale), 0, 255) | |
r = np.clip(r * (1 - 0.1 * intensity_scale), 0, 255) | |
return cv2.merge([b, g, r]).astype(np.uint8) | |
def adjust_saturation(image, factor: int = 50): | |
""" | |
## Adjusts the saturation of the image. | |
**Args:** | |
* `image` (numpy.ndarray): Input image (BGR) | |
* `factor` (int): Saturation factor (0-100, 50 is normal) | |
**Returns:** | |
* `numpy.ndarray`: Image with adjusted saturation | |
""" | |
# Convert factor to multiplication value (0.0 to 2.0) | |
factor = (factor / 50.0) | |
# Convert to HSV | |
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) | |
# Adjust saturation | |
hsv[:, :, 1] = np.clip(hsv[:, :, 1] * factor, 0, 255) | |
# Convert back to BGR | |
return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR) | |
def vintage_filter(image, intensity: int = 50): | |
""" | |
## Adds a vintage/retro effect to the image. | |
**Args:** | |
* `image` (numpy.ndarray): Input image (BGR) | |
* `intensity` (int): Intensity of the vintage effect (0-100) | |
**Returns:** | |
* `numpy.ndarray`: Image with vintage effect | |
""" | |
intensity_scale = intensity / 100.0 | |
# Split channels | |
b, g, r = cv2.split(image.astype(np.float32)) | |
# Adjust colors for vintage look | |
r = np.clip(r * (1 + 0.3 * intensity_scale), 0, 255) | |
g = np.clip(g * (1 - 0.1 * intensity_scale), 0, 255) | |
b = np.clip(b * (1 - 0.2 * intensity_scale), 0, 255) | |
# Create sepia-like effect | |
result = cv2.merge([b, g, r]).astype(np.uint8) | |
# Add slight blur for softness | |
if intensity > 0: | |
blur_amount = int(3 * intensity_scale) * 2 + 1 | |
result = cv2.GaussianBlur(result, (blur_amount, blur_amount), 0) | |
return result | |
def vignette_effect(image, intensity: int = 50): | |
""" | |
## Adds a vignette effect (darker corners) to the image. | |
**Args:** | |
* `image` (numpy.ndarray): Input image (BGR) | |
* `intensity` (int): Intensity of the vignette (0-100) | |
**Returns:** | |
* `numpy.ndarray`: Image with vignette effect | |
""" | |
height, width = image.shape[:2] | |
# Create a vignette mask | |
X_resultant = np.abs(np.linspace(-1, 1, width)[None, :]) | |
Y_resultant = np.abs(np.linspace(-1, 1, height)[:, None]) | |
mask = np.sqrt(X_resultant**2 + Y_resultant**2) | |
mask = 1 - np.clip(mask, 0, 1) | |
# Adjust mask based on intensity | |
mask = (mask - mask.min()) / (mask.max() - mask.min()) | |
mask = mask ** (1 + intensity/50) | |
# Apply mask to image | |
mask = mask[:, :, None] | |
result = image.astype(np.float32) * mask | |
return np.clip(result, 0, 255).astype(np.uint8) | |
def hdr_effect(image, strength: int = 50): | |
""" | |
## Applies an HDR-like effect to enhance image details. | |
**Args:** | |
* `image` (numpy.ndarray): Input image (BGR) | |
* `strength` (int): Strength of the HDR effect (0-100) | |
**Returns:** | |
* `numpy.ndarray`: Image with HDR-like effect | |
""" | |
strength_scale = strength / 100.0 | |
# Convert to LAB color space | |
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB).astype(np.float32) | |
# Split channels | |
l, a, b = cv2.split(lab) | |
# Apply CLAHE to L channel | |
clahe = cv2.createCLAHE(clipLimit=3.0 * strength_scale, tileGridSize=(8, 8)) | |
l = clahe.apply(l.astype(np.uint8)).astype(np.float32) | |
# Enhance local contrast | |
if strength > 0: | |
blur = cv2.GaussianBlur(l, (0, 0), 3) | |
detail = cv2.addWeighted(l, 1 + strength_scale, blur, -strength_scale, 0) | |
l = cv2.addWeighted(l, 1 - strength_scale/2, detail, strength_scale/2, 0) | |
# Merge channels and convert back | |
enhanced_lab = cv2.merge([l, a, b]) | |
result = cv2.cvtColor(enhanced_lab.astype(np.uint8), cv2.COLOR_LAB2BGR) | |
return result | |