updated & improved code

This commit is contained in:
tfrymnn 2023-06-17 13:20:51 +02:00 committed by GitHub
parent 1b637f9b32
commit 1b1f5b882d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -6,7 +6,7 @@ import gfpgan
import roop.globals
import roop.processors.frame.core
from roop.core import update_status
from roop.face_analyser import get_one_face, get_many_faces
from roop.face_analyser import get_one_face
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
from PIL import Image
from numpy import asarray
@ -19,7 +19,7 @@ NAME = 'ROOP.FACE-ENHANCER'
def pre_check() -> bool:
download_directory_path = resolve_relative_path('../models')
conditional_download(download_directory_path, ['https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth'])
conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/GFPGANv1.3.pth'])
return True
@ -37,58 +37,42 @@ def get_face_enhancer() -> None:
if FACE_ENHANCER is None:
model_path = resolve_relative_path('../models/GFPGANv1.3.pth')
# todo: set models path https://github.com/TencentARC/GFPGAN/issues/399
FACE_ENHANCER = gfpgan.GFPGANer(
model_path=model_path,
channel_multiplier=2
)
FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1)
return FACE_ENHANCER
def enhance_face(source_face: Any, target_face: Any, temp_frame: Any) -> Any:
THREAD_SEMAPHORE.acquire()
if target_face:
temp_frame_original = temp_frame
def enhance_face(temp_frame: Any) -> Any:
with THREAD_SEMAPHORE:
temp_frame_original = Image.fromarray(temp_frame)
_, _, temp_frame = get_face_enhancer().enhance(
temp_frame,
paste_back=True
)
temp_frame_original=Image.fromarray(temp_frame_original)
temp_frame_original=temp_frame_original.resize([temp_frame_original.width*2,temp_frame_original.height*2])
temp_frame = Image.blend(temp_frame_original, Image.fromarray(temp_frame), 0.75)
temp_frame = asarray(temp_frame)
THREAD_SEMAPHORE.release()
return temp_frame
)
temp_frame = Image.blend(temp_frame_original, Image.fromarray(temp_frame), 0.75)
return asarray(temp_frame)
def process_frame(source_face: Any, temp_frame: Any) -> Any:
if roop.globals.many_faces:
many_faces = get_many_faces(temp_frame)
if many_faces:
for target_face in many_faces:
temp_frame = enhance_face(source_face, target_face, temp_frame)
else:
target_face = get_one_face(temp_frame)
if target_face:
temp_frame = enhance_face(source_face, target_face, temp_frame)
target_face = get_one_face(temp_frame)
if target_face:
temp_frame = enhance_face(temp_frame)
return temp_frame
def process_frames(source_path: str, temp_frame_paths: List[str], progress=None) -> None:
source_face = get_one_face(cv2.imread(source_path))
for temp_frame_path in temp_frame_paths:
temp_frame = cv2.imread(temp_frame_path)
result = process_frame(source_face, temp_frame)
result = process_frame(None, temp_frame)
cv2.imwrite(temp_frame_path, result)
if progress:
progress.update(1)
def process_image(source_path: str, target_path: str, output_path: str) -> None:
source_face = get_one_face(cv2.imread(source_path))
target_frame = cv2.imread(target_path)
result = process_frame(source_face, target_frame)
result = process_frame(None, target_frame)
cv2.imwrite(output_path, result)
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames)
roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames)