Merge branch 'next' into issue_377_rethink

# Conflicts:
#	roop/processors/frame/face_enhancer.py
#	roop/utilities.py
This commit is contained in:
Pozitronik 2023-06-19 13:08:38 +04:00
commit e08c235e9d
7 changed files with 52 additions and 52 deletions

View File

@ -16,10 +16,10 @@ import argparse
import torch
import onnxruntime
import tensorflow
from opennsfw2 import predict_video_frames, predict_image
import roop.globals
import roop.ui as ui
from roop.predicter import predict_image, predict_video
from roop.processors.frame.core import get_frame_processors_modules
from roop.utilities import has_image_extension, is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clean_temp, normalize_output_path
from roop import state
@ -75,6 +75,7 @@ def parse_args() -> None:
if args.source_path_deprecated:
print('\033[33mArgument -f and --face are deprecated. Use -s and --source instead.\033[0m')
roop.globals.source_path = args.source_path_deprecated
roop.globals.output_path = normalize_output_path(args.source_path_deprecated, roop.globals.target_path, args.output_path)
if args.cpu_cores_deprecated:
print('\033[33mArgument --cpu-cores is deprecated. Use --execution-threads instead.\033[0m')
roop.globals.execution_threads = args.cpu_cores_deprecated
@ -165,12 +166,12 @@ def start() -> None:
return
# process image to image
if has_image_extension(roop.globals.target_path):
if predict_image(roop.globals.target_path) > 0.85:
if predict_image(roop.globals.target_path):
destroy()
# todo: this needs a temp path for images to work with multiple frame processors
shutil.copy2(roop.globals.target_path, roop.globals.output_path)
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
update_status('Progressing...', frame_processor.NAME)
frame_processor.process_image(roop.globals.source_path, roop.globals.target_path, roop.globals.output_path)
frame_processor.process_image(roop.globals.source_path, roop.globals.output_path, roop.globals.output_path)
release_resources()
if is_image(roop.globals.target_path):
update_status('Processing to image succeed!')
@ -178,8 +179,7 @@ def start() -> None:
update_status('Processing to image failed!')
return
# process image to videos
seconds, probabilities = predict_video_frames(video_path=roop.globals.target_path, frame_interval=100)
if any(probability > 0.85 for probability in probabilities):
if predict_video(roop.globals.target_path):
destroy()
if state.is_resumable(roop.globals.target_path):
update_status(f'Temp resources for this target already exists with {state.processed_frames_count(roop.globals.target_path)} frames processed, continue processing...')

23
roop/predicter.py Normal file
View File

@ -0,0 +1,23 @@
import numpy
import opennsfw2
from PIL import Image
MAX_PROBABILITY = 0.85
def predict_frame(target_frame: Image) -> bool:
image = Image.fromarray(target_frame)
image = opennsfw2.preprocess_image(image, opennsfw2.Preprocessing.YAHOO)
model = opennsfw2.make_open_nsfw_model()
views = numpy.expand_dims(image, axis=0)
_, probability = model.predict(views)[0]
return probability > MAX_PROBABILITY
def predict_image(target_path: str) -> bool:
return opennsfw2.predict_image(target_path) > MAX_PROBABILITY
def predict_video(target_path: str) -> bool:
_, probabilities = opennsfw2.predict_video_frames(video_path=target_path, frame_interval=100)
return any(probability > MAX_PROBABILITY for probability in probabilities)

View File

@ -1,6 +1,6 @@
import sys
import importlib
import threading
from concurrent.futures import ThreadPoolExecutor
from typing import Any, List
from tqdm import tqdm
@ -39,24 +39,13 @@ def get_frame_processors_modules(frame_processors):
def multi_process_frame(source_path: str, temp_frame_paths: List[str], process_frames, progress) -> None:
threads = []
frames_per_thread = len(temp_frame_paths) // roop.globals.execution_threads
remaining_frames = len(temp_frame_paths) % roop.globals.execution_threads
start_index = 0
# create threads by frames
for _ in range(roop.globals.execution_threads):
end_index = start_index + frames_per_thread
if remaining_frames > 0:
end_index += 1
remaining_frames -= 1
thread_paths = temp_frame_paths[start_index:end_index]
thread = threading.Thread(target=process_frames, args=(source_path, thread_paths, progress))
threads.append(thread)
thread.start()
start_index = end_index
# join threads
for thread in threads:
thread.join()
with ThreadPoolExecutor(max_workers=roop.globals.execution_threads) as executor:
futures = []
for path in temp_frame_paths:
future = executor.submit(process_frames, source_path, [path], progress)
futures.append(future)
for future in futures:
future.result()
def process_video(source_path: str, frame_paths: list[str], process_frames: Any) -> None:

View File

@ -7,7 +7,7 @@ import gfpgan
import roop.globals
import roop.processors.frame.core
from roop.core import update_status
from roop.face_analyser import get_one_face, get_many_faces
from roop.face_analyser import get_one_face
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
from roop import state
@ -37,42 +37,30 @@ def get_face_enhancer() -> None:
if FACE_ENHANCER is None:
model_path = resolve_relative_path('../models/GFPGANv1.3.pth')
# todo: set models path https://github.com/TencentARC/GFPGAN/issues/399
FACE_ENHANCER = gfpgan.GFPGANer(
model_path=model_path,
channel_multiplier=2
)
FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1)
return FACE_ENHANCER
def enhance_face(source_face: Any, target_face: Any, temp_frame: Any) -> Any:
THREAD_SEMAPHORE.acquire()
if target_face:
def enhance_face(temp_frame: Any) -> Any:
with THREAD_SEMAPHORE:
_, _, temp_frame = get_face_enhancer().enhance(
temp_frame,
paste_back=True
)
THREAD_SEMAPHORE.release()
return temp_frame
def process_frame(source_face: Any, temp_frame: Any) -> Any:
if roop.globals.many_faces:
many_faces = get_many_faces(temp_frame)
if many_faces:
for target_face in many_faces:
temp_frame = enhance_face(source_face, target_face, temp_frame)
else:
target_face = get_one_face(temp_frame)
if target_face:
temp_frame = enhance_face(source_face, target_face, temp_frame)
target_face = get_one_face(temp_frame)
if target_face:
temp_frame = enhance_face(temp_frame)
return temp_frame
def process_frames(source_path: str, temp_frame_paths: List[str], progress=None) -> None:
source_face = get_one_face(cv2.imread(source_path))
for temp_frame_path in temp_frame_paths:
temp_frame = cv2.imread(temp_frame_path)
result = process_frame(source_face, temp_frame)
result = process_frame(None, temp_frame)
processed_frame_path = state.get_frame_processed_name(temp_frame_path)
cv2.imwrite(processed_frame_path, result)
os.remove(temp_frame_path)
@ -81,11 +69,10 @@ def process_frames(source_path: str, temp_frame_paths: List[str], progress=None)
def process_image(source_path: str, target_path: str, output_path: str) -> None:
source_face = get_one_face(cv2.imread(source_path))
target_frame = cv2.imread(target_path)
result = process_frame(source_face, target_frame)
result = process_frame(None, target_frame)
cv2.imwrite(output_path, result)
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames)
roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames)

View File

@ -46,9 +46,7 @@ def get_face_swapper() -> None:
def swap_face(source_face: Any, target_face: Any, temp_frame: Any) -> Any:
if target_face:
return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)
return temp_frame
return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)
def process_frame(source_face: Any, temp_frame: Any) -> Any:

View File

@ -8,6 +8,7 @@ from PIL import Image, ImageOps
import roop.globals
from roop.face_analyser import get_one_face
from roop.capturer import get_video_frame, get_video_frame_total
from roop.predicter import predict_frame
from roop.processors.frame.core import get_frame_processors_modules
from roop.utilities import is_image, is_video, resolve_relative_path
@ -200,6 +201,8 @@ def init_preview() -> None:
def update_preview(frame_number: int = 0) -> None:
if roop.globals.source_path and roop.globals.target_path:
temp_frame = get_video_frame(roop.globals.target_path, frame_number)
if predict_frame(temp_frame):
quit()
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
temp_frame = frame_processor.process_frame(
get_one_face(cv2.imread(roop.globals.source_path)),

View File

@ -63,7 +63,7 @@ def restore_audio(target_path: str, output_path: str) -> None:
def get_temp_frame_paths(target_path: str) -> List[str]:
temp_directory_path = get_temp_directory_path(target_path)
return [file for file in glob.glob(os.path.join(temp_directory_path, '*.png')) if not os.path.basename(file).startswith(state.PROCESSED_PREFIX)]
return [file for file in glob.glob(os.path.join(glob.escape(temp_directory_path), '*.png')) if not os.path.basename(file).startswith(state.PROCESSED_PREFIX)]
def get_temp_directory_path(target_path: str) -> str: