Split render() and listen() part2

This commit is contained in:
henryruhs 2023-08-07 10:43:36 +02:00
parent 9d8e0f32d3
commit c9fe66e8ef
9 changed files with 110 additions and 54 deletions

View File

@ -54,5 +54,5 @@ def find_similar_face(frame: Frame, reference_face: Face) -> Optional[Face]:
return None
def count_faces(frame: Frame) -> int:
def get_faces_total(frame: Frame) -> int:
return len(get_many_faces(frame))

View File

@ -8,7 +8,7 @@ from roop.typing import Frame
PREDICTOR = None
THREAD_LOCK = threading.Lock()
MAX_PROBABILITY = 2
MAX_PROBABILITY = 0.85
def get_predictor() -> Model:

View File

@ -1,4 +1,4 @@
from typing import Any, Dict, Tuple
from typing import Any, Dict, Tuple, Optional
import gradio
import roop.globals
@ -6,15 +6,29 @@ from roop.core import start
from roop.utilities import has_image_extension, has_video_extension, normalize_output_path
START_BUTTON: Optional[gradio.Button] = None
CLEAR_BUTTON: Optional[gradio.Button] = None
OUTPUT_IMAGE: Optional[gradio.Image] = None
OUTPUT_VIDEO: Optional[gradio.Video] = None
def render() -> None:
global START_BUTTON
global CLEAR_BUTTON
global OUTPUT_IMAGE
global OUTPUT_VIDEO
with gradio.Column():
with gradio.Row():
start_button = gradio.Button('Start')
clear_button = gradio.Button('Clear')
output_image = gradio.Image(label='output_image', visible=False)
output_video = gradio.Video(label='output_video', visible=False)
start_button.click(update, outputs=[output_image, output_video])
clear_button.click(clear, outputs=[output_image, output_video])
START_BUTTON = gradio.Button('Start')
CLEAR_BUTTON = gradio.Button('Clear')
OUTPUT_IMAGE = gradio.Image(label='output_image', visible=False)
OUTPUT_VIDEO = gradio.Video(label='output_video', visible=False)
def listen() -> None:
START_BUTTON.click(update, outputs=[OUTPUT_IMAGE, OUTPUT_VIDEO])
CLEAR_BUTTON.click(clear, outputs=[OUTPUT_IMAGE, OUTPUT_VIDEO])
def update() -> Tuple[Dict[str, Any], Dict[str, Any]]:

View File

@ -1,5 +1,5 @@
from time import sleep
from typing import Any, Dict, Tuple, List
from typing import Any, Dict, Tuple, List, Optional
import cv2
import gradio
@ -15,8 +15,8 @@ from roop.uis import core as ui
from roop.uis.typing import ComponentName
from roop.utilities import is_video, is_image
PREVIEW_IMAGE = None
PREVIEW_SLIDER = None
PREVIEW_IMAGE: Optional[gradio.Image] = None
PREVIEW_SLIDER: Optional[gradio.Slider] = None
def render() -> None:

View File

@ -1,4 +1,4 @@
from typing import Dict, Any, List
from typing import Dict, Any, List, Optional
from time import sleep
import cv2
@ -6,14 +6,14 @@ import gradio
import roop.globals
from roop.capturer import get_video_frame
from roop.face_analyser import count_faces
from roop.face_analyser import get_faces_total
from roop.face_reference import clear_face_reference
from roop.uis import core as ui
from roop.uis.typing import ComponentName
from roop.utilities import is_image, is_video
REFERENCE_FACE_POSITION_SLIDER = None
SIMILAR_FACE_DISTANCE_SLIDER = None
REFERENCE_FACE_POSITION_SLIDER: Optional[gradio.Slider] = None
SIMILAR_FACE_DISTANCE_SLIDER: Optional[gradio.Slider] = None
def render() -> None:
@ -29,10 +29,10 @@ def render() -> None:
}
if is_image(roop.globals.target_path):
target_frame = cv2.imread(roop.globals.target_path)
reference_face_position_slider_args['maximum'] = count_faces(target_frame)
reference_face_position_slider_args['maximum'] = get_faces_total(target_frame)
if is_video(roop.globals.target_path):
target_frame = get_video_frame(roop.globals.target_path, roop.globals.reference_frame_number)
reference_face_position_slider_args['maximum'] = count_faces(target_frame)
reference_face_position_slider_args['maximum'] = get_faces_total(target_frame)
REFERENCE_FACE_POSITION_SLIDER = gradio.Slider(**reference_face_position_slider_args)
SIMILAR_FACE_DISTANCE_SLIDER = gradio.Slider(
label='similar_face_distance',
@ -54,12 +54,12 @@ def listen() -> None:
component = ui.get_component(component_name)
if component:
component.change(update_face_reference_position, inputs=REFERENCE_FACE_POSITION_SLIDER, outputs=REFERENCE_FACE_POSITION_SLIDER)
REFERENCE_FACE_POSITION_SLIDER.change(self_update_face_reference_position, inputs=REFERENCE_FACE_POSITION_SLIDER)
REFERENCE_FACE_POSITION_SLIDER.change(clear_and_update_face_reference_position, inputs=REFERENCE_FACE_POSITION_SLIDER)
def self_update_face_reference_position(reference_face_position: int) -> Dict[Any, Any]:
def clear_and_update_face_reference_position(reference_face_position: int) -> Dict[Any, Any]:
clear_face_reference()
update_face_reference_position(reference_face_position)
return update_face_reference_position(reference_face_position)
def update_face_reference_position(reference_face_position: int) -> Dict[Any, Any]:
@ -68,10 +68,10 @@ def update_face_reference_position(reference_face_position: int) -> Dict[Any, An
roop.globals.reference_face_position = reference_face_position
if is_image(roop.globals.target_path):
target_frame = cv2.imread(roop.globals.target_path)
maximum = count_faces(target_frame)
maximum = get_faces_total(target_frame)
if is_video(roop.globals.target_path):
target_frame = get_video_frame(roop.globals.target_path, roop.globals.reference_frame_number)
maximum = count_faces(target_frame)
maximum = get_faces_total(target_frame)
return gradio.update(value=reference_face_position, maximum=maximum)

View File

@ -1,4 +1,4 @@
from typing import Any, Dict, List
from typing import Any, Dict, List, Optional
import gradio
import onnxruntime
@ -6,23 +6,39 @@ import roop.globals
from roop.processors.frame.core import list_frame_processors_names, load_frame_processor_module, clear_frame_processors_modules
from roop.uis import core as ui
FRAME_PROCESSORS_CHECKBOX_GROUP: Optional[gradio.CheckboxGroup] = None
EXECUTION_PROVIDERS_CHECKBOX_GROUP: Optional[gradio.CheckboxGroup] = None
EXECUTION_THREADS_SLIDER: Optional[gradio.Slider] = None
KEEP_FPS_CHECKBOX: Optional[gradio.Checkbox] = None
KEEP_TEMP_CHECKBOX: Optional[gradio.Checkbox] = None
SKIP_AUDIO_CHECKBOX: Optional[gradio.Checkbox] = None
MANY_FACES_CHECKBOX: Optional[gradio.Checkbox] = None
def render() -> None:
global FRAME_PROCESSORS_CHECKBOX_GROUP
global EXECUTION_PROVIDERS_CHECKBOX_GROUP
global EXECUTION_THREADS_SLIDER
global KEEP_FPS_CHECKBOX
global KEEP_TEMP_CHECKBOX
global SKIP_AUDIO_CHECKBOX
global MANY_FACES_CHECKBOX
with gradio.Column():
with gradio.Box():
frame_processors_checkbox_group = gradio.CheckboxGroup(
FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label='frame_processors',
choices=sort_frame_processors(roop.globals.frame_processors),
value=roop.globals.frame_processors
)
ui.register_component('frame_processors_checkbox_group', frame_processors_checkbox_group)
ui.register_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP)
with gradio.Box():
execution_providers_checkbox_group = gradio.CheckboxGroup(
EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
label='execution_providers',
choices=onnxruntime.get_available_providers(),
value=roop.globals.execution_providers
)
execution_threads_slider = gradio.Slider(
EXECUTION_THREADS_SLIDER = gradio.Slider(
label='execution_threads',
value=roop.globals.execution_threads,
step=1,
@ -30,31 +46,33 @@ def render() -> None:
maximum=64
)
with gradio.Box():
keep_fps_checkbox = gradio.Checkbox(
KEEP_FPS_CHECKBOX = gradio.Checkbox(
label='keep_fps',
value=roop.globals.keep_fps
)
keep_temp_checkbox = gradio.Checkbox(
KEEP_TEMP_CHECKBOX = gradio.Checkbox(
label='keep_temp',
value=roop.globals.keep_fps
)
skip_audio_checkbox = gradio.Checkbox(
SKIP_AUDIO_CHECKBOX = gradio.Checkbox(
label='skip_audio',
value=roop.globals.skip_audio
)
many_faces_checkbox = gradio.Checkbox(
MANY_FACES_CHECKBOX = gradio.Checkbox(
label='many_faces',
value=roop.globals.many_faces
)
ui.register_component('many_faces_checkbox', many_faces_checkbox)
ui.register_component('many_faces_checkbox', MANY_FACES_CHECKBOX)
frame_processors_checkbox_group.change(update_frame_processors, inputs=frame_processors_checkbox_group, outputs=frame_processors_checkbox_group)
execution_providers_checkbox_group.change(update_execution_providers, inputs=execution_providers_checkbox_group, outputs=execution_providers_checkbox_group)
execution_threads_slider.change(update_execution_threads, inputs=execution_threads_slider, outputs=execution_threads_slider)
keep_fps_checkbox.change(lambda value: update_checkbox('keep_fps', value), inputs=keep_fps_checkbox, outputs=keep_fps_checkbox)
keep_temp_checkbox.change(lambda value: update_checkbox('keep_temp', value), inputs=keep_temp_checkbox, outputs=keep_temp_checkbox)
skip_audio_checkbox.change(lambda value: update_checkbox('skip_audio', value), inputs=skip_audio_checkbox, outputs=skip_audio_checkbox)
many_faces_checkbox.change(lambda value: update_checkbox('many_faces', value), inputs=many_faces_checkbox, outputs=many_faces_checkbox)
def listen() -> None:
FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs=FRAME_PROCESSORS_CHECKBOX_GROUP, outputs=FRAME_PROCESSORS_CHECKBOX_GROUP)
EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs=EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs=EXECUTION_PROVIDERS_CHECKBOX_GROUP)
EXECUTION_THREADS_SLIDER.change(update_execution_threads, inputs=EXECUTION_THREADS_SLIDER, outputs=EXECUTION_THREADS_SLIDER)
KEEP_FPS_CHECKBOX.change(lambda value: update_checkbox('keep_fps', value), inputs=KEEP_FPS_CHECKBOX, outputs=KEEP_FPS_CHECKBOX)
KEEP_TEMP_CHECKBOX.change(lambda value: update_checkbox('keep_temp', value), inputs=KEEP_TEMP_CHECKBOX, outputs=KEEP_TEMP_CHECKBOX)
SKIP_AUDIO_CHECKBOX.change(lambda value: update_checkbox('skip_audio', value), inputs=SKIP_AUDIO_CHECKBOX, outputs=SKIP_AUDIO_CHECKBOX)
MANY_FACES_CHECKBOX.change(lambda value: update_checkbox('many_faces', value), inputs=MANY_FACES_CHECKBOX, outputs=MANY_FACES_CHECKBOX)
def update_frame_processors(frame_processors: List[str]) -> Dict[Any, Any]:

View File

@ -1,27 +1,36 @@
from typing import Any, Dict, IO
from typing import Any, Dict, IO, Optional
import gradio
import roop.globals
from roop.uis import core as ui
from roop.utilities import is_image
SOURCE_FILE: Optional[gradio.File] = None
SOURCE_IMAGE: Optional[gradio.Image] = None
def render() -> None:
global SOURCE_FILE
global SOURCE_IMAGE
with gradio.Box():
is_source_image = is_image(roop.globals.source_path)
source_file = gradio.File(
SOURCE_FILE = gradio.File(
file_count='single',
file_types=['.png', '.jpg', '.jpeg', '.webp'],
label='source_path',
value=roop.globals.source_path if is_source_image else None
)
ui.register_component('source_file', source_file)
source_image = gradio.Image(
ui.register_component('source_file', SOURCE_FILE)
SOURCE_IMAGE = gradio.Image(
label='source_image',
value=source_file.value['name'] if is_source_image else None,
value=SOURCE_FILE.value['name'] if is_source_image else None,
visible=is_source_image
)
source_file.change(update, inputs=source_file, outputs=source_image)
def listen() -> None:
SOURCE_FILE.change(update, inputs=SOURCE_FILE, outputs=SOURCE_IMAGE)
def update(file: IO[Any]) -> Dict[str, Any]:

View File

@ -1,4 +1,4 @@
from typing import Any, Dict, IO, Tuple
from typing import Any, Dict, IO, Tuple, Optional
import gradio
import roop.globals
@ -6,29 +6,40 @@ from roop.face_reference import clear_face_reference
from roop.uis import core as ui
from roop.utilities import is_image, is_video
TARGET_FILE: Optional[gradio.File] = None
TARGET_IMAGE: Optional[gradio.Image] = None
TARGET_VIDEO: Optional[gradio.Video] = None
def render() -> None:
global TARGET_FILE
global TARGET_IMAGE
global TARGET_VIDEO
with gradio.Box():
is_target_image = is_image(roop.globals.target_path)
is_target_video = is_video(roop.globals.target_path)
target_file = gradio.File(
TARGET_FILE = gradio.File(
file_count='single',
file_types=['.png', '.jpg', '.jpeg', '.webp', '.mp4'],
label='target_path',
value=roop.globals.target_path if is_target_image or is_target_video else None
)
ui.register_component('target_file', target_file)
target_image = gradio.Image(
ui.register_component('target_file', TARGET_FILE)
TARGET_IMAGE = gradio.Image(
label='target_image',
value=target_file.value['name'] if is_target_image else None,
value=TARGET_FILE.value['name'] if is_target_image else None,
visible=is_target_image
)
target_video = gradio.Video(
TARGET_VIDEO = gradio.Video(
label='target_video',
value=target_file.value['name'] if is_target_video else None,
value=TARGET_FILE.value['name'] if is_target_video else None,
visible=is_target_video
)
target_file.change(update, inputs=target_file, outputs=[target_image, target_video])
def listen() -> None:
TARGET_FILE.change(update, inputs=TARGET_FILE, outputs=[TARGET_IMAGE, TARGET_VIDEO])
def update(file: IO[Any]) -> Tuple[Dict[str, Any], Dict[str, Any]]:

View File

@ -20,5 +20,9 @@ def render() -> gradio.Blocks:
def listen() -> None:
settings.listen()
source.listen()
target.listen()
preview.listen()
reference.listen()
output.listen()