mirror of
https://github.com/s0md3v/roop.git
synced 2025-12-06 18:08:29 +00:00
Merge branch 's0md3v:next' into next
This commit is contained in:
commit
d8987498f2
@ -1,6 +1,6 @@
|
||||
import sys
|
||||
import importlib
|
||||
import threading
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Any, List
|
||||
from tqdm import tqdm
|
||||
|
||||
@ -38,24 +38,13 @@ def get_frame_processors_modules(frame_processors):
|
||||
|
||||
|
||||
def multi_process_frame(source_path: str, temp_frame_paths: List[str], process_frames, progress) -> None:
|
||||
threads = []
|
||||
frames_per_thread = len(temp_frame_paths) // roop.globals.execution_threads
|
||||
remaining_frames = len(temp_frame_paths) % roop.globals.execution_threads
|
||||
start_index = 0
|
||||
# create threads by frames
|
||||
for _ in range(roop.globals.execution_threads):
|
||||
end_index = start_index + frames_per_thread
|
||||
if remaining_frames > 0:
|
||||
end_index += 1
|
||||
remaining_frames -= 1
|
||||
thread_paths = temp_frame_paths[start_index:end_index]
|
||||
thread = threading.Thread(target=process_frames, args=(source_path, thread_paths, progress))
|
||||
threads.append(thread)
|
||||
thread.start()
|
||||
start_index = end_index
|
||||
# join threads
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
with ThreadPoolExecutor(max_workers=roop.globals.execution_threads) as executor:
|
||||
futures = []
|
||||
for path in temp_frame_paths:
|
||||
future = executor.submit(process_frames, source_path, [path], progress)
|
||||
futures.append(future)
|
||||
for future in futures:
|
||||
future.result()
|
||||
|
||||
|
||||
def process_video(source_path: str, frame_paths: list[str], process_frames: Any) -> None:
|
||||
|
||||
@ -6,7 +6,7 @@ import gfpgan
|
||||
import roop.globals
|
||||
import roop.processors.frame.core
|
||||
from roop.core import update_status
|
||||
from roop.face_analyser import get_one_face, get_many_faces
|
||||
from roop.face_analyser import get_one_face
|
||||
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
|
||||
|
||||
FACE_ENHANCER = None
|
||||
@ -35,53 +35,40 @@ def get_face_enhancer() -> None:
|
||||
if FACE_ENHANCER is None:
|
||||
model_path = resolve_relative_path('../models/GFPGANv1.3.pth')
|
||||
# todo: set models path https://github.com/TencentARC/GFPGAN/issues/399
|
||||
FACE_ENHANCER = gfpgan.GFPGANer(
|
||||
model_path=model_path,
|
||||
channel_multiplier=2
|
||||
)
|
||||
FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1)
|
||||
return FACE_ENHANCER
|
||||
|
||||
|
||||
def enhance_face(source_face: Any, target_face: Any, temp_frame: Any) -> Any:
|
||||
THREAD_SEMAPHORE.acquire()
|
||||
if target_face:
|
||||
def enhance_face(temp_frame: Any) -> Any:
|
||||
with THREAD_SEMAPHORE:
|
||||
_, _, temp_frame = get_face_enhancer().enhance(
|
||||
temp_frame,
|
||||
paste_back=True
|
||||
)
|
||||
THREAD_SEMAPHORE.release()
|
||||
return temp_frame
|
||||
|
||||
|
||||
def process_frame(source_face: Any, temp_frame: Any) -> Any:
|
||||
if roop.globals.many_faces:
|
||||
many_faces = get_many_faces(temp_frame)
|
||||
if many_faces:
|
||||
for target_face in many_faces:
|
||||
temp_frame = enhance_face(source_face, target_face, temp_frame)
|
||||
else:
|
||||
target_face = get_one_face(temp_frame)
|
||||
if target_face:
|
||||
temp_frame = enhance_face(source_face, target_face, temp_frame)
|
||||
target_face = get_one_face(temp_frame)
|
||||
if target_face:
|
||||
temp_frame = enhance_face(temp_frame)
|
||||
return temp_frame
|
||||
|
||||
|
||||
def process_frames(source_path: str, temp_frame_paths: List[str], progress=None) -> None:
|
||||
source_face = get_one_face(cv2.imread(source_path))
|
||||
for temp_frame_path in temp_frame_paths:
|
||||
temp_frame = cv2.imread(temp_frame_path)
|
||||
result = process_frame(source_face, temp_frame)
|
||||
result = process_frame(None, temp_frame)
|
||||
cv2.imwrite(temp_frame_path, result)
|
||||
if progress:
|
||||
progress.update(1)
|
||||
|
||||
|
||||
def process_image(source_path: str, target_path: str, output_path: str) -> None:
|
||||
source_face = get_one_face(cv2.imread(source_path))
|
||||
target_frame = cv2.imread(target_path)
|
||||
result = process_frame(source_face, target_frame)
|
||||
result = process_frame(None, target_frame)
|
||||
cv2.imwrite(output_path, result)
|
||||
|
||||
|
||||
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
|
||||
roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames)
|
||||
roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
|
||||
|
||||
@ -44,9 +44,7 @@ def get_face_swapper() -> None:
|
||||
|
||||
|
||||
def swap_face(source_face: Any, target_face: Any, temp_frame: Any) -> Any:
|
||||
if target_face:
|
||||
return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)
|
||||
return temp_frame
|
||||
return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)
|
||||
|
||||
|
||||
def process_frame(source_face: Any, temp_frame: Any) -> Any:
|
||||
|
||||
Loading…
Reference in New Issue
Block a user