Version 2.0c Release!

Sharpness and some other improvements added!
This commit is contained in:
Kenneth Estanislao 2025-10-12 22:33:09 +08:00
parent f9270c5d1c
commit ae2d21456d
12 changed files with 1896 additions and 558 deletions

7
modules/custom_types.py Normal file
View File

@ -0,0 +1,7 @@
from typing import Any
from insightface.app.common import Face
import numpy
Face = Face
Frame = numpy.ndarray[Any, Any]

View File

@ -1,3 +1,5 @@
# --- START OF FILE globals.py ---
import os
from typing import List, Dict, Any
@ -9,35 +11,61 @@ file_types = [
("Video", ("*.mp4", "*.mkv")),
]
source_target_map = []
simple_map = {}
# Face Mapping Data
souce_target_map: List[Dict[str, Any]] = [] # Stores detailed map for image/video processing
simple_map: Dict[str, Any] = {} # Stores simplified map (embeddings/faces) for live/simple mode
source_path = None
target_path = None
output_path = None
# Paths
source_path: str | None = None
target_path: str | None = None
output_path: str | None = None
# Processing Options
frame_processors: List[str] = []
keep_fps = True
keep_audio = True
keep_frames = False
many_faces = False
map_faces = False
color_correction = False # New global variable for color correction toggle
nsfw_filter = False
video_encoder = None
video_quality = None
live_mirror = False
live_resizable = True
max_memory = None
execution_providers: List[str] = []
execution_threads = None
headless = None
log_level = "error"
keep_fps: bool = True
keep_audio: bool = True
keep_frames: bool = False
many_faces: bool = False # Process all detected faces with default source
map_faces: bool = False # Use souce_target_map or simple_map for specific swaps
color_correction: bool = False # Enable color correction (implementation specific)
nsfw_filter: bool = False
# Video Output Options
video_encoder: str | None = None
video_quality: int | None = None # Typically a CRF value or bitrate
# Live Mode Options
live_mirror: bool = False
live_resizable: bool = True
camera_input_combobox: Any | None = None # Placeholder for UI element if needed
webcam_preview_running: bool = False
show_fps: bool = False
# System Configuration
max_memory: int | None = None # Memory limit in GB? (Needs clarification)
execution_providers: List[str] = [] # e.g., ['CUDAExecutionProvider', 'CPUExecutionProvider']
execution_threads: int | None = None # Number of threads for CPU execution
headless: bool | None = None # Run without UI?
log_level: str = "error" # Logging level (e.g., 'debug', 'info', 'warning', 'error')
# Face Processor UI Toggles (Example)
fp_ui: Dict[str, bool] = {"face_enhancer": False}
camera_input_combobox = None
webcam_preview_running = False
show_fps = False
mouth_mask = False
show_mouth_mask_box = False
mask_feather_ratio = 8
mask_down_size = 0.50
mask_size = 1
# Face Swapper Specific Options
face_swapper_enabled: bool = True # General toggle for the swapper processor
opacity: float = 1.0 # Blend factor for the swapped face (0.0-1.0)
sharpness: float = 0.0 # Sharpness enhancement for swapped face (0.0-1.0+)
# Mouth Mask Options
mouth_mask: bool = False # Enable mouth area masking/pasting
show_mouth_mask_box: bool = False # Visualize the mouth mask area (for debugging)
mask_feather_ratio: int = 12 # Denominator for feathering calculation (higher = smaller feather)
mask_down_size: float = 0.1 # Expansion factor for lower lip mask (relative)
mask_size: float = 1.0 # Expansion factor for upper lip mask (relative)
# --- START: Added for Frame Interpolation ---
enable_interpolation: bool = True # Toggle temporal smoothing
interpolation_weight: float = 0 # Blend weight for current frame (0.0-1.0). Lower=smoother.
# --- END: Added for Frame Interpolation ---
# --- END OF FILE globals.py ---

View File

@ -1,3 +1,3 @@
name = 'Deep-Live-Cam'
version = '1.8.1'
version = '2.0c'
edition = 'GitHub Edition'

View File

@ -1,16 +1,18 @@
# --- START OF FILE face_enhancer.py ---
from typing import Any, List
import cv2
import threading
import gfpgan
import os
import platform
import torch # Make sure torch is imported
import modules.globals
import modules.processors.frame.core
from modules.core import update_status
from modules.face_analyser import get_one_face
from modules.typing import Frame, Face
import platform
import torch
from modules.utilities import (
conditional_download,
is_image,
@ -48,83 +50,157 @@ def pre_start() -> bool:
return True
TENSORRT_AVAILABLE = False
try:
import torch_tensorrt
TENSORRT_AVAILABLE = True
except ImportError as im:
print(f"TensorRT is not available: {im}")
pass
except Exception as e:
print(f"TensorRT is not available: {e}")
pass
def get_face_enhancer() -> Any:
"""
Initializes and returns the GFPGAN face enhancer instance,
prioritizing CUDA, then MPS (Mac), then CPU.
"""
global FACE_ENHANCER
with THREAD_LOCK:
if FACE_ENHANCER is None:
model_path = os.path.join(models_dir, "GFPGANv1.4.pth")
device = None
try:
# Priority 1: CUDA
if torch.cuda.is_available():
device = torch.device("cuda")
print(f"{NAME}: Using CUDA device.")
# Priority 2: MPS (Mac Silicon)
elif platform.system() == "Darwin" and torch.backends.mps.is_available():
device = torch.device("mps")
print(f"{NAME}: Using MPS device.")
# Priority 3: CPU
else:
device = torch.device("cpu")
print(f"{NAME}: Using CPU device.")
selected_device = None
device_priority = []
FACE_ENHANCER = gfpgan.GFPGANer(
model_path=model_path,
upscale=1, # upscale=1 means enhancement only, no resizing
arch='clean',
channel_multiplier=2,
bg_upsampler=None,
device=device
)
print(f"{NAME}: GFPGANer initialized successfully on {device}.")
if TENSORRT_AVAILABLE and torch.cuda.is_available():
selected_device = torch.device("cuda")
device_priority.append("TensorRT+CUDA")
elif torch.cuda.is_available():
selected_device = torch.device("cuda")
device_priority.append("CUDA")
elif torch.backends.mps.is_available() and platform.system() == "Darwin":
selected_device = torch.device("mps")
device_priority.append("MPS")
elif not torch.cuda.is_available():
selected_device = torch.device("cpu")
device_priority.append("CPU")
except Exception as e:
print(f"{NAME}: Error initializing GFPGANer: {e}")
# Fallback to CPU if initialization with GPU fails for some reason
if device is not None and device.type != 'cpu':
print(f"{NAME}: Falling back to CPU due to error.")
try:
device = torch.device("cpu")
FACE_ENHANCER = gfpgan.GFPGANer(
model_path=model_path,
upscale=1,
arch='clean',
channel_multiplier=2,
bg_upsampler=None,
device=device
)
print(f"{NAME}: GFPGANer initialized successfully on CPU after fallback.")
except Exception as fallback_e:
print(f"{NAME}: FATAL: Could not initialize GFPGANer even on CPU: {fallback_e}")
FACE_ENHANCER = None # Ensure it's None if totally failed
else:
# If it failed even on the first CPU attempt or device was already CPU
print(f"{NAME}: FATAL: Could not initialize GFPGANer on CPU: {e}")
FACE_ENHANCER = None # Ensure it's None if totally failed
FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1, device=selected_device)
# for debug:
print(f"Selected device: {selected_device} and device priority: {device_priority}")
# Check if enhancer is still None after attempting initialization
if FACE_ENHANCER is None:
raise RuntimeError(f"{NAME}: Failed to initialize GFPGANer. Check logs for errors.")
return FACE_ENHANCER
def enhance_face(temp_frame: Frame) -> Frame:
"""Enhances faces in a single frame using the global GFPGANer instance."""
# Ensure enhancer is ready
enhancer = get_face_enhancer()
try:
with THREAD_SEMAPHORE:
_, _, temp_frame = get_face_enhancer().enhance(temp_frame, paste_back=True)
# The enhance method returns: _, restored_faces, restored_img
_, _, restored_img = enhancer.enhance(
temp_frame,
has_aligned=False, # Assume faces are not pre-aligned
only_center_face=False, # Enhance all detected faces
paste_back=True # Paste enhanced faces back onto the original image
)
# GFPGAN might return None if no face is detected or an error occurs
if restored_img is None:
# print(f"{NAME}: Warning: GFPGAN enhancement returned None. Returning original frame.")
return temp_frame
return restored_img
except Exception as e:
print(f"{NAME}: Error during face enhancement: {e}")
# Return the original frame in case of error during enhancement
return temp_frame
def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
target_face = get_one_face(temp_frame)
if target_face:
def process_frame(source_face: Face | None, temp_frame: Frame) -> Frame:
"""Processes a frame: enhances face if detected."""
# We don't strictly need source_face for enhancement only
# Check if any face exists to potentially save processing time, though GFPGAN also does detection.
# For simplicity and ensuring enhancement is attempted if possible, we can rely on enhance_face.
# target_face = get_one_face(temp_frame) # This gets only ONE face
# If you want to enhance ONLY if a face is detected by your *own* analyser first:
# has_face = get_one_face(temp_frame) is not None # Or use get_many_faces
# if has_face:
# temp_frame = enhance_face(temp_frame)
# else: # Enhance regardless, let GFPGAN handle detection
temp_frame = enhance_face(temp_frame)
return temp_frame
def process_frames(
source_path: str, temp_frame_paths: List[str], progress: Any = None
source_path: str | None, temp_frame_paths: List[str], progress: Any = None
) -> None:
"""Processes multiple frames from file paths."""
for temp_frame_path in temp_frame_paths:
if not os.path.exists(temp_frame_path):
print(f"{NAME}: Warning: Frame path not found {temp_frame_path}, skipping.")
if progress:
progress.update(1)
continue
temp_frame = cv2.imread(temp_frame_path)
result = process_frame(None, temp_frame)
cv2.imwrite(temp_frame_path, result)
if temp_frame is None:
print(f"{NAME}: Warning: Failed to read frame {temp_frame_path}, skipping.")
if progress:
progress.update(1)
continue
result_frame = process_frame(None, temp_frame)
cv2.imwrite(temp_frame_path, result_frame)
if progress:
progress.update(1)
def process_image(source_path: str, target_path: str, output_path: str) -> None:
def process_image(source_path: str | None, target_path: str, output_path: str) -> None:
"""Processes a single image file."""
target_frame = cv2.imread(target_path)
result = process_frame(None, target_frame)
cv2.imwrite(output_path, result)
if target_frame is None:
print(f"{NAME}: Error: Failed to read target image {target_path}")
return
result_frame = process_frame(None, target_frame)
cv2.imwrite(output_path, result_frame)
print(f"{NAME}: Enhanced image saved to {output_path}")
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
modules.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
def process_video(source_path: str | None, temp_frame_paths: List[str]) -> None:
"""Processes video frames using the frame processor core."""
# source_path might be optional depending on how process_video is called
modules.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames)
# Optional: Keep process_frame_v2 if it's used elsewhere, otherwise it's redundant
# def process_frame_v2(temp_frame: Frame) -> Frame:
# target_face = get_one_face(temp_frame)
# if target_face:
# temp_frame = enhance_face(temp_frame)
# return temp_frame
def process_frame_v2(temp_frame: Frame) -> Frame:
target_face = get_one_face(temp_frame)
if target_face:
temp_frame = enhance_face(temp_frame)
return temp_frame
# --- END OF FILE face_enhancer.py ---

View File

@ -0,0 +1,609 @@
import cv2
import numpy as np
from modules.typing import Face, Frame
import modules.globals
def apply_color_transfer(source, target):
"""
Apply color transfer from target to source image
"""
source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32")
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32")
source_mean, source_std = cv2.meanStdDev(source)
target_mean, target_std = cv2.meanStdDev(target)
# Reshape mean and std to be broadcastable
source_mean = source_mean.reshape(1, 1, 3)
source_std = source_std.reshape(1, 1, 3)
target_mean = target_mean.reshape(1, 1, 3)
target_std = target_std.reshape(1, 1, 3)
# Perform the color transfer
source = (source - source_mean) * (target_std / source_std) + target_mean
return cv2.cvtColor(np.clip(source, 0, 255).astype("uint8"), cv2.COLOR_LAB2BGR)
def create_face_mask(face: Face, frame: Frame) -> np.ndarray:
mask = np.zeros(frame.shape[:2], dtype=np.uint8)
landmarks = face.landmark_2d_106
if landmarks is not None:
# Convert landmarks to int32
landmarks = landmarks.astype(np.int32)
# Extract facial features
right_side_face = landmarks[0:16]
left_side_face = landmarks[17:32]
right_eye = landmarks[33:42]
right_eye_brow = landmarks[43:51]
left_eye = landmarks[87:96]
left_eye_brow = landmarks[97:105]
# Calculate padding
padding = int(
np.linalg.norm(right_side_face[0] - left_side_face[-1]) * 0.05
) # 5% of face width
# Create a slightly larger convex hull for padding
hull = cv2.convexHull(face_outline)
hull_padded = []
for point in hull:
x, y = point[0]
center = np.mean(face_outline, axis=0)
direction = np.array([x, y]) - center
direction = direction / np.linalg.norm(direction)
padded_point = np.array([x, y]) + direction * padding
hull_padded.append(padded_point)
hull_padded = np.array(hull_padded, dtype=np.int32)
# Fill the padded convex hull
cv2.fillConvexPoly(mask, hull_padded, 255)
# Smooth the mask edges
mask = cv2.GaussianBlur(mask, (5, 5), 3)
return mask
def create_lower_mouth_mask(
face: Face, frame: Frame
) -> (np.ndarray, np.ndarray, tuple, np.ndarray):
mask = np.zeros(frame.shape[:2], dtype=np.uint8)
mouth_cutout = None
landmarks = face.landmark_2d_106
if landmarks is not None:
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
lower_lip_order = [
65,
66,
62,
70,
69,
18,
19,
20,
21,
22,
23,
24,
0,
8,
7,
6,
5,
4,
3,
2,
65,
]
lower_lip_landmarks = landmarks[lower_lip_order].astype(
np.float32
) # Use float for precise calculations
# Calculate the center of the landmarks
center = np.mean(lower_lip_landmarks, axis=0)
# Expand the landmarks outward using the mouth_mask_size
expansion_factor = (
1 + modules.globals.mask_down_size * modules.globals.mouth_mask_size
) # Adjust expansion based on slider
expanded_landmarks = (lower_lip_landmarks - center) * expansion_factor + center
# Extend the top lip part
toplip_indices = [
20,
0,
1,
2,
3,
4,
5,
] # Indices for landmarks 2, 65, 66, 62, 70, 69, 18
toplip_extension = (
modules.globals.mask_size * modules.globals.mouth_mask_size * 0.5
) # Adjust extension based on slider
for idx in toplip_indices:
direction = expanded_landmarks[idx] - center
direction = direction / np.linalg.norm(direction)
expanded_landmarks[idx] += direction * toplip_extension
# Extend the bottom part (chin area)
chin_indices = [
11,
12,
13,
14,
15,
16,
] # Indices for landmarks 21, 22, 23, 24, 0, 8
chin_extension = 2 * 0.2 # Adjust this factor to control the extension
for idx in chin_indices:
expanded_landmarks[idx][1] += (
expanded_landmarks[idx][1] - center[1]
) * chin_extension
# Convert back to integer coordinates
expanded_landmarks = expanded_landmarks.astype(np.int32)
# Calculate bounding box for the expanded lower mouth
min_x, min_y = np.min(expanded_landmarks, axis=0)
max_x, max_y = np.max(expanded_landmarks, axis=0)
# Add some padding to the bounding box
padding = int((max_x - min_x) * 0.1) # 10% padding
min_x = max(0, min_x - padding)
min_y = max(0, min_y - padding)
max_x = min(frame.shape[1], max_x + padding)
max_y = min(frame.shape[0], max_y + padding)
# Ensure the bounding box dimensions are valid
if max_x <= min_x or max_y <= min_y:
if (max_x - min_x) <= 1:
max_x = min_x + 1
if (max_y - min_y) <= 1:
max_y = min_y + 1
# Create the mask
mask_roi = np.zeros((max_y - min_y, max_x - min_x), dtype=np.uint8)
cv2.fillPoly(mask_roi, [expanded_landmarks - [min_x, min_y]], 255)
# Apply Gaussian blur to soften the mask edges
mask_roi = cv2.GaussianBlur(mask_roi, (15, 15), 5)
# Place the mask ROI in the full-sized mask
mask[min_y:max_y, min_x:max_x] = mask_roi
# Extract the masked area from the frame
mouth_cutout = frame[min_y:max_y, min_x:max_x].copy()
# Return the expanded lower lip polygon in original frame coordinates
lower_lip_polygon = expanded_landmarks
return mask, mouth_cutout, (min_x, min_y, max_x, max_y), lower_lip_polygon
def create_eyes_mask(face: Face, frame: Frame) -> (np.ndarray, np.ndarray, tuple, np.ndarray):
mask = np.zeros(frame.shape[:2], dtype=np.uint8)
eyes_cutout = None
landmarks = face.landmark_2d_106
if landmarks is not None:
# Left eye landmarks (87-96) and right eye landmarks (33-42)
left_eye = landmarks[87:96]
right_eye = landmarks[33:42]
# Calculate centers and dimensions for each eye
left_eye_center = np.mean(left_eye, axis=0).astype(np.int32)
right_eye_center = np.mean(right_eye, axis=0).astype(np.int32)
# Calculate eye dimensions with size adjustment
def get_eye_dimensions(eye_points):
x_coords = eye_points[:, 0]
y_coords = eye_points[:, 1]
width = int((np.max(x_coords) - np.min(x_coords)) * (1 + modules.globals.mask_down_size * modules.globals.eyes_mask_size))
height = int((np.max(y_coords) - np.min(y_coords)) * (1 + modules.globals.mask_down_size * modules.globals.eyes_mask_size))
return width, height
left_width, left_height = get_eye_dimensions(left_eye)
right_width, right_height = get_eye_dimensions(right_eye)
# Add extra padding
padding = int(max(left_width, right_width) * 0.2)
# Calculate bounding box for both eyes
min_x = min(left_eye_center[0] - left_width//2, right_eye_center[0] - right_width//2) - padding
max_x = max(left_eye_center[0] + left_width//2, right_eye_center[0] + right_width//2) + padding
min_y = min(left_eye_center[1] - left_height//2, right_eye_center[1] - right_height//2) - padding
max_y = max(left_eye_center[1] + left_height//2, right_eye_center[1] + right_height//2) + padding
# Ensure coordinates are within frame bounds
min_x = max(0, min_x)
min_y = max(0, min_y)
max_x = min(frame.shape[1], max_x)
max_y = min(frame.shape[0], max_y)
# Create mask for the eyes region
mask_roi = np.zeros((max_y - min_y, max_x - min_x), dtype=np.uint8)
# Draw ellipses for both eyes
left_center = (left_eye_center[0] - min_x, left_eye_center[1] - min_y)
right_center = (right_eye_center[0] - min_x, right_eye_center[1] - min_y)
# Calculate axes lengths (half of width and height)
left_axes = (left_width//2, left_height//2)
right_axes = (right_width//2, right_height//2)
# Draw filled ellipses
cv2.ellipse(mask_roi, left_center, left_axes, 0, 0, 360, 255, -1)
cv2.ellipse(mask_roi, right_center, right_axes, 0, 0, 360, 255, -1)
# Apply Gaussian blur to soften mask edges
mask_roi = cv2.GaussianBlur(mask_roi, (15, 15), 5)
# Place the mask ROI in the full-sized mask
mask[min_y:max_y, min_x:max_x] = mask_roi
# Extract the masked area from the frame
eyes_cutout = frame[min_y:max_y, min_x:max_x].copy()
# Create polygon points for visualization
def create_ellipse_points(center, axes):
t = np.linspace(0, 2*np.pi, 32)
x = center[0] + axes[0] * np.cos(t)
y = center[1] + axes[1] * np.sin(t)
return np.column_stack((x, y)).astype(np.int32)
# Generate points for both ellipses
left_points = create_ellipse_points((left_eye_center[0], left_eye_center[1]), (left_width//2, left_height//2))
right_points = create_ellipse_points((right_eye_center[0], right_eye_center[1]), (right_width//2, right_height//2))
# Combine points for both eyes
eyes_polygon = np.vstack([left_points, right_points])
return mask, eyes_cutout, (min_x, min_y, max_x, max_y), eyes_polygon
def create_curved_eyebrow(points):
if len(points) >= 5:
# Sort points by x-coordinate
sorted_idx = np.argsort(points[:, 0])
sorted_points = points[sorted_idx]
# Calculate dimensions
x_min, y_min = np.min(sorted_points, axis=0)
x_max, y_max = np.max(sorted_points, axis=0)
width = x_max - x_min
height = y_max - y_min
# Create more points for smoother curve
num_points = 50
x = np.linspace(x_min, x_max, num_points)
# Fit quadratic curve through points for more natural arch
coeffs = np.polyfit(sorted_points[:, 0], sorted_points[:, 1], 2)
y = np.polyval(coeffs, x)
# Increased offsets to create more separation
top_offset = height * 0.5 # Increased from 0.3 to shift up more
bottom_offset = height * 0.2 # Increased from 0.1 to shift down more
# Create smooth curves
top_curve = y - top_offset
bottom_curve = y + bottom_offset
# Create curved endpoints with more pronounced taper
end_points = 5
start_x = np.linspace(x[0] - width * 0.15, x[0], end_points) # Increased taper
end_x = np.linspace(x[-1], x[-1] + width * 0.15, end_points) # Increased taper
# Create tapered ends
start_curve = np.column_stack((
start_x,
np.linspace(bottom_curve[0], top_curve[0], end_points)
))
end_curve = np.column_stack((
end_x,
np.linspace(bottom_curve[-1], top_curve[-1], end_points)
))
# Combine all points to form a smooth contour
contour_points = np.vstack([
start_curve,
np.column_stack((x, top_curve)),
end_curve,
np.column_stack((x[::-1], bottom_curve[::-1]))
])
# Add slight padding for better coverage
center = np.mean(contour_points, axis=0)
vectors = contour_points - center
padded_points = center + vectors * 1.2 # Increased padding slightly
return padded_points
return points
def create_eyebrows_mask(face: Face, frame: Frame) -> (np.ndarray, np.ndarray, tuple, np.ndarray):
mask = np.zeros(frame.shape[:2], dtype=np.uint8)
eyebrows_cutout = None
landmarks = face.landmark_2d_106
if landmarks is not None:
# Left eyebrow landmarks (97-105) and right eyebrow landmarks (43-51)
left_eyebrow = landmarks[97:105].astype(np.float32)
right_eyebrow = landmarks[43:51].astype(np.float32)
# Calculate centers and dimensions for each eyebrow
left_center = np.mean(left_eyebrow, axis=0)
right_center = np.mean(right_eyebrow, axis=0)
# Calculate bounding box with padding adjusted by size
all_points = np.vstack([left_eyebrow, right_eyebrow])
padding_factor = modules.globals.eyebrows_mask_size
min_x = np.min(all_points[:, 0]) - 25 * padding_factor
max_x = np.max(all_points[:, 0]) + 25 * padding_factor
min_y = np.min(all_points[:, 1]) - 20 * padding_factor
max_y = np.max(all_points[:, 1]) + 15 * padding_factor
# Ensure coordinates are within frame bounds
min_x = max(0, int(min_x))
min_y = max(0, int(min_y))
max_x = min(frame.shape[1], int(max_x))
max_y = min(frame.shape[0], int(max_y))
# Create mask for the eyebrows region
mask_roi = np.zeros((max_y - min_y, max_x - min_x), dtype=np.uint8)
try:
# Convert points to local coordinates
left_local = left_eyebrow - [min_x, min_y]
right_local = right_eyebrow - [min_x, min_y]
def create_curved_eyebrow(points):
if len(points) >= 5:
# Sort points by x-coordinate
sorted_idx = np.argsort(points[:, 0])
sorted_points = points[sorted_idx]
# Calculate dimensions
x_min, y_min = np.min(sorted_points, axis=0)
x_max, y_max = np.max(sorted_points, axis=0)
width = x_max - x_min
height = y_max - y_min
# Create more points for smoother curve
num_points = 50
x = np.linspace(x_min, x_max, num_points)
# Fit quadratic curve through points for more natural arch
coeffs = np.polyfit(sorted_points[:, 0], sorted_points[:, 1], 2)
y = np.polyval(coeffs, x)
# Increased offsets to create more separation
top_offset = height * 0.5 # Increased from 0.3 to shift up more
bottom_offset = height * 0.2 # Increased from 0.1 to shift down more
# Create smooth curves
top_curve = y - top_offset
bottom_curve = y + bottom_offset
# Create curved endpoints with more pronounced taper
end_points = 5
start_x = np.linspace(x[0] - width * 0.15, x[0], end_points) # Increased taper
end_x = np.linspace(x[-1], x[-1] + width * 0.15, end_points) # Increased taper
# Create tapered ends
start_curve = np.column_stack((
start_x,
np.linspace(bottom_curve[0], top_curve[0], end_points)
))
end_curve = np.column_stack((
end_x,
np.linspace(bottom_curve[-1], top_curve[-1], end_points)
))
# Combine all points to form a smooth contour
contour_points = np.vstack([
start_curve,
np.column_stack((x, top_curve)),
end_curve,
np.column_stack((x[::-1], bottom_curve[::-1]))
])
# Add slight padding for better coverage
center = np.mean(contour_points, axis=0)
vectors = contour_points - center
padded_points = center + vectors * 1.2 # Increased padding slightly
return padded_points
return points
# Generate and draw eyebrow shapes
left_shape = create_curved_eyebrow(left_local)
right_shape = create_curved_eyebrow(right_local)
# Apply multi-stage blurring for natural feathering
# First, strong Gaussian blur for initial softening
mask_roi = cv2.GaussianBlur(mask_roi, (21, 21), 7)
# Second, medium blur for transition areas
mask_roi = cv2.GaussianBlur(mask_roi, (11, 11), 3)
# Finally, light blur for fine details
mask_roi = cv2.GaussianBlur(mask_roi, (5, 5), 1)
# Normalize mask values
mask_roi = cv2.normalize(mask_roi, None, 0, 255, cv2.NORM_MINMAX)
# Place the mask ROI in the full-sized mask
mask[min_y:max_y, min_x:max_x] = mask_roi
# Extract the masked area from the frame
eyebrows_cutout = frame[min_y:max_y, min_x:max_x].copy()
# Combine points for visualization
eyebrows_polygon = np.vstack([
left_shape + [min_x, min_y],
right_shape + [min_x, min_y]
]).astype(np.int32)
except Exception as e:
# Fallback to simple polygons if curve fitting fails
left_local = left_eyebrow - [min_x, min_y]
right_local = right_eyebrow - [min_x, min_y]
cv2.fillPoly(mask_roi, [left_local.astype(np.int32)], 255)
cv2.fillPoly(mask_roi, [right_local.astype(np.int32)], 255)
mask_roi = cv2.GaussianBlur(mask_roi, (21, 21), 7)
mask[min_y:max_y, min_x:max_x] = mask_roi
eyebrows_cutout = frame[min_y:max_y, min_x:max_x].copy()
eyebrows_polygon = np.vstack([left_eyebrow, right_eyebrow]).astype(np.int32)
return mask, eyebrows_cutout, (min_x, min_y, max_x, max_y), eyebrows_polygon
def apply_mask_area(
frame: np.ndarray,
cutout: np.ndarray,
box: tuple,
face_mask: np.ndarray,
polygon: np.ndarray,
) -> np.ndarray:
min_x, min_y, max_x, max_y = box
box_width = max_x - min_x
box_height = max_y - min_y
if (
cutout is None
or box_width is None
or box_height is None
or face_mask is None
or polygon is None
):
return frame
try:
resized_cutout = cv2.resize(cutout, (box_width, box_height))
roi = frame[min_y:max_y, min_x:max_x]
if roi.shape != resized_cutout.shape:
resized_cutout = cv2.resize(
resized_cutout, (roi.shape[1], roi.shape[0])
)
color_corrected_area = apply_color_transfer(resized_cutout, roi)
# Create mask for the area
polygon_mask = np.zeros(roi.shape[:2], dtype=np.uint8)
# Split points for left and right parts if needed
if len(polygon) > 50: # Arbitrary threshold to detect if we have multiple parts
mid_point = len(polygon) // 2
left_points = polygon[:mid_point] - [min_x, min_y]
right_points = polygon[mid_point:] - [min_x, min_y]
cv2.fillPoly(polygon_mask, [left_points], 255)
cv2.fillPoly(polygon_mask, [right_points], 255)
else:
adjusted_polygon = polygon - [min_x, min_y]
cv2.fillPoly(polygon_mask, [adjusted_polygon], 255)
# Apply strong initial feathering
polygon_mask = cv2.GaussianBlur(polygon_mask, (21, 21), 7)
# Apply additional feathering
feather_amount = min(
30,
box_width // modules.globals.mask_feather_ratio,
box_height // modules.globals.mask_feather_ratio,
)
feathered_mask = cv2.GaussianBlur(
polygon_mask.astype(float), (0, 0), feather_amount
)
feathered_mask = feathered_mask / feathered_mask.max()
# Apply additional smoothing to the mask edges
feathered_mask = cv2.GaussianBlur(feathered_mask, (5, 5), 1)
face_mask_roi = face_mask[min_y:max_y, min_x:max_x]
combined_mask = feathered_mask * (face_mask_roi / 255.0)
combined_mask = combined_mask[:, :, np.newaxis]
blended = (
color_corrected_area * combined_mask + roi * (1 - combined_mask)
).astype(np.uint8)
# Apply face mask to blended result
face_mask_3channel = (
np.repeat(face_mask_roi[:, :, np.newaxis], 3, axis=2) / 255.0
)
final_blend = blended * face_mask_3channel + roi * (1 - face_mask_3channel)
frame[min_y:max_y, min_x:max_x] = final_blend.astype(np.uint8)
except Exception as e:
pass
return frame
def draw_mask_visualization(
frame: Frame,
mask_data: tuple,
label: str,
draw_method: str = "polygon"
) -> Frame:
mask, cutout, (min_x, min_y, max_x, max_y), polygon = mask_data
vis_frame = frame.copy()
# Ensure coordinates are within frame bounds
height, width = vis_frame.shape[:2]
min_x, min_y = max(0, min_x), max(0, min_y)
max_x, max_y = min(width, max_x), min(height, max_y)
if draw_method == "ellipse" and len(polygon) > 50: # For eyes
# Split points for left and right parts
mid_point = len(polygon) // 2
left_points = polygon[:mid_point]
right_points = polygon[mid_point:]
try:
# Fit ellipses to points - need at least 5 points
if len(left_points) >= 5 and len(right_points) >= 5:
# Convert points to the correct format for ellipse fitting
left_points = left_points.astype(np.float32)
right_points = right_points.astype(np.float32)
# Fit ellipses
left_ellipse = cv2.fitEllipse(left_points)
right_ellipse = cv2.fitEllipse(right_points)
# Draw the ellipses
cv2.ellipse(vis_frame, left_ellipse, (0, 255, 0), 2)
cv2.ellipse(vis_frame, right_ellipse, (0, 255, 0), 2)
except Exception as e:
# If ellipse fitting fails, draw simple rectangles as fallback
left_rect = cv2.boundingRect(left_points)
right_rect = cv2.boundingRect(right_points)
cv2.rectangle(vis_frame,
(left_rect[0], left_rect[1]),
(left_rect[0] + left_rect[2], left_rect[1] + left_rect[3]),
(0, 255, 0), 2)
cv2.rectangle(vis_frame,
(right_rect[0], right_rect[1]),
(right_rect[0] + right_rect[2], right_rect[1] + right_rect[3]),
(0, 255, 0), 2)
else: # For mouth and eyebrows
# Draw the polygon
if len(polygon) > 50: # If we have multiple parts
mid_point = len(polygon) // 2
left_points = polygon[:mid_point]
right_points = polygon[mid_point:]
cv2.polylines(vis_frame, [left_points], True, (0, 255, 0), 2, cv2.LINE_AA)
cv2.polylines(vis_frame, [right_points], True, (0, 255, 0), 2, cv2.LINE_AA)
else:
cv2.polylines(vis_frame, [polygon], True, (0, 255, 0), 2, cv2.LINE_AA)
# Add label
cv2.putText(
vis_frame,
label,
(min_x, min_y - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
1,
)
return vis_frame

File diff suppressed because it is too large Load Diff

9
modules/run.py Normal file
View File

@ -0,0 +1,9 @@
#!/usr/bin/env python3
# Import the tkinter fix to patch the ScreenChanged error
import tkinter_fix
import core
if __name__ == '__main__':
core.run()

26
modules/tkinter_fix.py Normal file
View File

@ -0,0 +1,26 @@
import tkinter
# Only needs to be imported once at the beginning of the application
def apply_patch():
# Create a monkey patch for the internal _tkinter module
original_init = tkinter.Tk.__init__
def patched_init(self, *args, **kwargs):
# Call the original init
original_init(self, *args, **kwargs)
# Define the missing ::tk::ScreenChanged procedure
self.tk.eval("""
if {[info commands ::tk::ScreenChanged] == ""} {
proc ::tk::ScreenChanged {args} {
# Do nothing
return
}
}
""")
# Apply the monkey patch
tkinter.Tk.__init__ = patched_init
# Apply the patch automatically when this module is imported
apply_patch()

View File

@ -27,6 +27,7 @@ from modules.utilities import (
)
from modules.video_capture import VideoCapturer
from modules.gettext import LanguageManager
from modules import globals
import platform
if platform.system() == "Windows":
@ -35,7 +36,7 @@ if platform.system() == "Windows":
ROOT = None
POPUP = None
POPUP_LIVE = None
ROOT_HEIGHT = 700
ROOT_HEIGHT = 750
ROOT_WIDTH = 600
PREVIEW = None
@ -152,20 +153,20 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
root.protocol("WM_DELETE_WINDOW", lambda: destroy())
source_label = ctk.CTkLabel(root, text=None)
source_label.place(relx=0.1, rely=0.1, relwidth=0.3, relheight=0.25)
source_label.place(relx=0.1, rely=0.05, relwidth=0.275, relheight=0.225)
target_label = ctk.CTkLabel(root, text=None)
target_label.place(relx=0.6, rely=0.1, relwidth=0.3, relheight=0.25)
target_label.place(relx=0.6, rely=0.05, relwidth=0.275, relheight=0.225)
select_face_button = ctk.CTkButton(
root, text=_("Select a face"), cursor="hand2", command=lambda: select_source_path()
)
select_face_button.place(relx=0.1, rely=0.4, relwidth=0.3, relheight=0.1)
select_face_button.place(relx=0.1, rely=0.30, relwidth=0.3, relheight=0.1)
swap_faces_button = ctk.CTkButton(
root, text="", cursor="hand2", command=lambda: swap_faces_paths()
)
swap_faces_button.place(relx=0.45, rely=0.4, relwidth=0.1, relheight=0.1)
swap_faces_button.place(relx=0.45, rely=0.30, relwidth=0.1, relheight=0.1)
select_target_button = ctk.CTkButton(
root,
@ -173,7 +174,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
cursor="hand2",
command=lambda: select_target_path(),
)
select_target_button.place(relx=0.6, rely=0.4, relwidth=0.3, relheight=0.1)
select_target_button.place(relx=0.6, rely=0.30, relwidth=0.3, relheight=0.1)
keep_fps_value = ctk.BooleanVar(value=modules.globals.keep_fps)
keep_fps_checkbox = ctk.CTkSwitch(
@ -186,7 +187,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
save_switch_states(),
),
)
keep_fps_checkbox.place(relx=0.1, rely=0.6)
keep_fps_checkbox.place(relx=0.1, rely=0.5)
keep_frames_value = ctk.BooleanVar(value=modules.globals.keep_frames)
keep_frames_switch = ctk.CTkSwitch(
@ -199,7 +200,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
save_switch_states(),
),
)
keep_frames_switch.place(relx=0.1, rely=0.65)
keep_frames_switch.place(relx=0.1, rely=0.55)
enhancer_value = ctk.BooleanVar(value=modules.globals.fp_ui["face_enhancer"])
enhancer_switch = ctk.CTkSwitch(
@ -212,7 +213,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
save_switch_states(),
),
)
enhancer_switch.place(relx=0.1, rely=0.7)
enhancer_switch.place(relx=0.1, rely=0.6)
keep_audio_value = ctk.BooleanVar(value=modules.globals.keep_audio)
keep_audio_switch = ctk.CTkSwitch(
@ -225,7 +226,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
save_switch_states(),
),
)
keep_audio_switch.place(relx=0.6, rely=0.6)
keep_audio_switch.place(relx=0.6, rely=0.5)
many_faces_value = ctk.BooleanVar(value=modules.globals.many_faces)
many_faces_switch = ctk.CTkSwitch(
@ -238,7 +239,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
save_switch_states(),
),
)
many_faces_switch.place(relx=0.6, rely=0.65)
many_faces_switch.place(relx=0.6, rely=0.55)
color_correction_value = ctk.BooleanVar(value=modules.globals.color_correction)
color_correction_switch = ctk.CTkSwitch(
@ -251,7 +252,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
save_switch_states(),
),
)
color_correction_switch.place(relx=0.6, rely=0.70)
color_correction_switch.place(relx=0.6, rely=0.6)
# nsfw_value = ctk.BooleanVar(value=modules.globals.nsfw_filter)
# nsfw_switch = ctk.CTkSwitch(root, text='NSFW filter', variable=nsfw_value, cursor='hand2', command=lambda: setattr(modules.globals, 'nsfw_filter', nsfw_value.get()))
@ -269,7 +270,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
close_mapper_window() if not map_faces.get() else None
),
)
map_faces_switch.place(relx=0.1, rely=0.75)
map_faces_switch.place(relx=0.1, rely=0.65)
show_fps_value = ctk.BooleanVar(value=modules.globals.show_fps)
show_fps_switch = ctk.CTkSwitch(
@ -282,7 +283,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
save_switch_states(),
),
)
show_fps_switch.place(relx=0.6, rely=0.75)
show_fps_switch.place(relx=0.6, rely=0.65)
mouth_mask_var = ctk.BooleanVar(value=modules.globals.mouth_mask)
mouth_mask_switch = ctk.CTkSwitch(
@ -292,7 +293,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
cursor="hand2",
command=lambda: setattr(modules.globals, "mouth_mask", mouth_mask_var.get()),
)
mouth_mask_switch.place(relx=0.1, rely=0.55)
mouth_mask_switch.place(relx=0.1, rely=0.45)
show_mouth_mask_box_var = ctk.BooleanVar(value=modules.globals.show_mouth_mask_box)
show_mouth_mask_box_switch = ctk.CTkSwitch(
@ -304,7 +305,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
modules.globals, "show_mouth_mask_box", show_mouth_mask_box_var.get()
),
)
show_mouth_mask_box_switch.place(relx=0.6, rely=0.55)
show_mouth_mask_box_switch.place(relx=0.6, rely=0.45)
start_button = ctk.CTkButton(
root, text=_("Start"), cursor="hand2", command=lambda: analyze_target(start, root)
@ -365,6 +366,72 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
live_button.place(relx=0.65, rely=0.86, relwidth=0.2, relheight=0.05)
# --- End Camera Selection ---
# 1) Define a DoubleVar for transparency (0 = fully transparent, 1 = fully opaque)
transparency_var = ctk.DoubleVar(value=1.0)
def on_transparency_change(value: float):
# Convert slider value to float
val = float(value)
modules.globals.opacity = val # Set global opacity
percentage = int(val * 100)
if percentage == 0:
modules.globals.fp_ui["face_enhancer"] = False
update_status("Transparency set to 0% - Face swapping disabled.")
elif percentage == 100:
modules.globals.face_swapper_enabled = True
update_status("Transparency set to 100%.")
else:
modules.globals.face_swapper_enabled = True
update_status(f"Transparency set to {percentage}%")
# 2) Transparency label and slider (placed ABOVE sharpness)
transparency_label = ctk.CTkLabel(root, text="Transparency:")
transparency_label.place(relx=0.15, rely=0.69, relwidth=0.2, relheight=0.05)
transparency_slider = ctk.CTkSlider(
root,
from_=0.0,
to=1.0,
variable=transparency_var,
command=on_transparency_change,
fg_color="#E0E0E0",
progress_color="#007BFF",
button_color="#FFFFFF",
button_hover_color="#CCCCCC",
height=5,
border_width=1,
corner_radius=3,
)
transparency_slider.place(relx=0.35, rely=0.71, relwidth=0.5, relheight=0.02)
# 3) Sharpness label & slider
sharpness_var = ctk.DoubleVar(value=0.0) # start at 0.0
def on_sharpness_change(value: float):
modules.globals.sharpness = float(value)
update_status(f"Sharpness set to {value:.1f}")
sharpness_label = ctk.CTkLabel(root, text="Sharpness:")
sharpness_label.place(relx=0.15, rely=0.74, relwidth=0.2, relheight=0.05)
sharpness_slider = ctk.CTkSlider(
root,
from_=0.0,
to=5.0,
variable=sharpness_var,
command=on_sharpness_change,
fg_color="#E0E0E0",
progress_color="#007BFF",
button_color="#FFFFFF",
button_hover_color="#CCCCCC",
height=5,
border_width=1,
corner_radius=3,
)
sharpness_slider.place(relx=0.35, rely=0.76, relwidth=0.5, relheight=0.02)
# Status and link at the bottom
global status_label
status_label = ctk.CTkLabel(root, text=None, justify="center")
status_label.place(relx=0.1, rely=0.9, relwidth=0.8)
@ -381,6 +448,7 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C
return root
def close_mapper_window():
global POPUP, POPUP_LIVE
if POPUP and POPUP.winfo_exists():
@ -397,7 +465,7 @@ def analyze_target(start: Callable[[], None], root: ctk.CTk):
return
if modules.globals.map_faces:
modules.globals.source_target_map = []
modules.globals.souce_target_map = []
if is_image(modules.globals.target_path):
update_status("Getting unique faces")
@ -406,8 +474,8 @@ def analyze_target(start: Callable[[], None], root: ctk.CTk):
update_status("Getting unique faces")
get_unique_faces_from_target_video()
if len(modules.globals.source_target_map) > 0:
create_source_target_popup(start, root, modules.globals.source_target_map)
if len(modules.globals.souce_target_map) > 0:
create_source_target_popup(start, root, modules.globals.souce_target_map)
else:
update_status("No faces found in target")
else:
@ -429,7 +497,7 @@ def create_source_target_popup(
POPUP.destroy()
select_output_path(start)
else:
update_pop_status("At least 1 source with target is required!")
update_pop_status("Atleast 1 source with target is required!")
scrollable_frame = ctk.CTkScrollableFrame(
POPUP, width=POPUP_SCROLL_WIDTH, height=POPUP_SCROLL_HEIGHT
@ -489,7 +557,7 @@ def update_popup_source(
global source_label_dict
source_path = ctk.filedialog.askopenfilename(
title=_("select a source image"),
title=_("select an source image"),
initialdir=RECENT_DIRECTORY_SOURCE,
filetypes=[img_ft],
)
@ -584,7 +652,7 @@ def select_source_path() -> None:
PREVIEW.withdraw()
source_path = ctk.filedialog.askopenfilename(
title=_("select a source image"),
title=_("select an source image"),
initialdir=RECENT_DIRECTORY_SOURCE,
filetypes=[img_ft],
)
@ -627,7 +695,7 @@ def select_target_path() -> None:
PREVIEW.withdraw()
target_path = ctk.filedialog.askopenfilename(
title=_("select a target image or video"),
title=_("select an target image or video"),
initialdir=RECENT_DIRECTORY_TARGET,
filetypes=[img_ft, vid_ft],
)
@ -696,21 +764,17 @@ def check_and_ignore_nsfw(target, destroy: Callable = None) -> bool:
def fit_image_to_size(image, width: int, height: int):
if width is None or height is None or width <= 0 or height <= 0:
if width is None and height is None:
return image
h, w, _ = image.shape
ratio_h = 0.0
ratio_w = 0.0
ratio_w = width / w
if width > height:
ratio_h = height / h
# Use the smaller ratio to ensure the image fits within the given dimensions
ratio = min(ratio_w, ratio_h)
# Compute new dimensions, ensuring they're at least 1 pixel
new_width = max(1, int(ratio * w))
new_height = max(1, int(ratio * h))
new_size = (new_width, new_height)
else:
ratio_w = width / w
ratio = max(ratio_w, ratio_h)
new_size = (int(ratio * w), int(ratio * h))
return cv2.resize(image, dsize=new_size)
@ -791,9 +855,9 @@ def webcam_preview(root: ctk.CTk, camera_index: int):
return
create_webcam_preview(camera_index)
else:
modules.globals.source_target_map = []
modules.globals.souce_target_map = []
create_source_target_popup_for_webcam(
root, modules.globals.source_target_map, camera_index
root, modules.globals.souce_target_map, camera_index
)
@ -1108,7 +1172,7 @@ def update_webcam_source(
global source_label_dict_live
source_path = ctk.filedialog.askopenfilename(
title=_("select a source image"),
title=_("select an source image"),
initialdir=RECENT_DIRECTORY_SOURCE,
filetypes=[img_ft],
)
@ -1160,7 +1224,7 @@ def update_webcam_target(
global target_label_dict_live
target_path = ctk.filedialog.askopenfilename(
title=_("select a target image"),
title=_("select an target image"),
initialdir=RECENT_DIRECTORY_SOURCE,
filetypes=[img_ft],
)

View File

@ -19,3 +19,5 @@ onnxruntime-gpu==1.22.0; sys_platform != 'darwin'
tensorflow; sys_platform != 'darwin'
opennsfw2==0.10.2
protobuf==4.25.1
git+https://github.com/xinntao/BasicSR.git@master
git+https://github.com/TencentARC/GFPGAN.git@master

3
run.py
View File

@ -1,5 +1,8 @@
#!/usr/bin/env python3
# Import the tkinter fix to patch the ScreenChanged error
import tkinter_fix
from modules import core
if __name__ == '__main__':

26
tkinter_fix.py Normal file
View File

@ -0,0 +1,26 @@
import tkinter
# Only needs to be imported once at the beginning of the application
def apply_patch():
# Create a monkey patch for the internal _tkinter module
original_init = tkinter.Tk.__init__
def patched_init(self, *args, **kwargs):
# Call the original init
original_init(self, *args, **kwargs)
# Define the missing ::tk::ScreenChanged procedure
self.tk.eval("""
if {[info commands ::tk::ScreenChanged] == ""} {
proc ::tk::ScreenChanged {args} {
# Do nothing
return
}
}
""")
# Apply the monkey patch
tkinter.Tk.__init__ = patched_init
# Apply the patch automatically when this module is imported
apply_patch()