Skip to content

Commit

Permalink
Bugfix: Enhancement and upscaling working again in virtual cam
Browse files Browse the repository at this point in the history
Corrupt videos caught when adding to target files, displaying warning msg
Added masking and mouth restore options to virtual cam
  • Loading branch information
C0untFloyd committed Sep 16, 2024
1 parent b8da445 commit f27ce11
Show file tree
Hide file tree
Showing 6 changed files with 48 additions and 12 deletions.
31 changes: 31 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,37 @@ Additional commandline arguments are currently unsupported and settings should b

### Changelog

**16.9.2024** v4.2.8

- Bugfix: Starting roop-unleashed without NVIDIA gpu but cuda option enabled
- Bugfix: Target Faces couldn't be moved left/right
- Bugfix: Enhancement and upscaling working again in virtual cam
- Corrupt videos caught when adding to target files, displaying warning msg
- Source Files Component cleared after face detection to release temp files
- Added masking and mouth restore options to virtual cam


**9.9.2024** v4.2.3

- Hotfix for gradio pydantic issue with fastapi
- Upgraded to Gradio 4.43 hoping it will fix remaining issues
- Added new action when no face detected -> use last swapped
- Specified image format for image controls - opening new tabs on preview images possible again!
- Hardcoded image output format for livecam to jpeg - might be faster than previous webp
- Chain events to be only executed if previous was a success


**5.9.2024** v4.2.0

- Added ability to move input & target faces order
- New CLI Arguments override settings
- Small UI changes to faceswapping tab
- Added mask option and code for restoration of original mouth area
- Updated gradio to v4.42.0
- Added CLI Arguments --server_share and --cuda_device_id
- Added webp image support


**15.07.2024** v4.1.1

- Bugfix: Post-processing after swapping
Expand Down
1 change: 0 additions & 1 deletion roop/capturer.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ def get_video_frame(video_path: str, frame_number: int = 0) -> Optional[Frame]:

if video_path != current_video_path:
release_video()

current_capture = cv2.VideoCapture(video_path)
current_video_path = video_path
current_frame_total = current_capture.get(cv2.CAP_PROP_FRAME_COUNT)
Expand Down
2 changes: 1 addition & 1 deletion roop/metadata.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
name = 'roop unleashed'
version = '4.2.6'
version = '4.2.8'
12 changes: 6 additions & 6 deletions roop/virtualcam.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
cam_thread = None
vcam = None

def virtualcamera(streamobs, cam_num,width,height):
def virtualcamera(streamobs, use_xseg, use_mouthrestore, cam_num,width,height):
from roop.ProcessOptions import ProcessOptions
from roop.core import live_swap, get_processing_plugins

Expand Down Expand Up @@ -46,9 +46,9 @@ def virtualcamera(streamobs, cam_num,width,height):
print(f'Not streaming to virtual camera!')
subsample_size = roop.globals.subsample_size

# always use xseg masking & restore mouth
options = ProcessOptions(get_processing_plugins("mask_xseg"), roop.globals.distance_threshold, roop.globals.blend_ratio,
"all", 0, None, None, 1, subsample_size, False, True)

options = ProcessOptions(get_processing_plugins("mask_xseg" if use_xseg else None), roop.globals.distance_threshold, roop.globals.blend_ratio,
"all", 0, None, None, 1, subsample_size, False, use_mouthrestore)
while cam_active:
ret, frame = cap.read()
if not ret:
Expand All @@ -68,12 +68,12 @@ def virtualcamera(streamobs, cam_num,width,height):



def start_virtual_cam(streamobs, cam_number, resolution):
def start_virtual_cam(streamobs, use_xseg, use_mouthrestore, cam_number, resolution):
global cam_thread, cam_active

if not cam_active:
width, height = map(int, resolution.split('x'))
cam_thread = threading.Thread(target=virtualcamera, args=[streamobs, cam_number, width, height])
cam_thread = threading.Thread(target=virtualcamera, args=[streamobs, use_xseg, use_mouthrestore, cam_number, width, height])
cam_thread.start()


Expand Down
6 changes: 5 additions & 1 deletion ui/tabs/faceswap_tab.py
Original file line number Diff line number Diff line change
Expand Up @@ -762,7 +762,11 @@ def on_destfiles_changed(destfiles):

if util.is_video(filename) or filename.lower().endswith('gif'):
total_frames = get_video_frame_total(filename)
current_video_fps = util.detect_fps(filename)
if total_frames is None or total_frames < 1:
total_frames = 1
gr.Warning(f"Corrupted video {filename}, can't detect number of frames!")
else:
current_video_fps = util.detect_fps(filename)
else:
total_frames = 1
list_files_process[idx].endframe = total_frames
Expand Down
8 changes: 5 additions & 3 deletions ui/tabs/livecam_tab.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,22 +25,24 @@ def livecam_tab():
cb_obs = gr.Checkbox(label="Forward stream to virtual camera", interactive=True)
with gr.Column():
dd_reso = gr.Dropdown(choices=["640x480","1280x720", "1920x1080"], value="1280x720", label="Fake Camera Resolution", interactive=True)
cb_xseg = gr.Checkbox(label="Use DFL Xseg masking", interactive=True, value=True)
cb_mouthrestore = gr.Checkbox(label="Restore original mouth area", interactive=True, value=False)

with gr.Row():
fake_cam_image = gr.Image(label='Fake Camera Output', interactive=False, format="jpeg")

start_event = bt_start.click(fn=start_cam, inputs=[cb_obs, camera_num, dd_reso, ui.globals.ui_selected_enhancer, ui.globals.ui_blend_ratio, ui.globals.ui_upscale],outputs=[bt_start, bt_stop,fake_cam_image])
start_event = bt_start.click(fn=start_cam, inputs=[cb_obs, cb_xseg, cb_mouthrestore, camera_num, dd_reso, ui.globals.ui_selected_enhancer, ui.globals.ui_blend_ratio, ui.globals.ui_upscale],outputs=[bt_start, bt_stop,fake_cam_image])
bt_stop.click(fn=stop_swap, cancels=[start_event], outputs=[bt_start, bt_stop], queue=False)


def start_cam(stream_to_obs, cam, reso, enhancer, blend_ratio, upscale):
def start_cam(stream_to_obs, use_xseg, use_mouthrestore, cam, reso, enhancer, blend_ratio, upscale):
from roop.virtualcam import start_virtual_cam
from roop.utilities import convert_to_gradio

start_virtual_cam(stream_to_obs, cam, reso)
roop.globals.selected_enhancer = enhancer
roop.globals.blend_ratio = blend_ratio
roop.globals.subsample_size = int(upscale[:3])
start_virtual_cam(stream_to_obs, use_xseg, use_mouthrestore, cam, reso)
while True:
yield gr.Button(interactive=False), gr.Button(interactive=True), convert_to_gradio(ui.globals.ui_camera_frame)

Expand Down

0 comments on commit f27ce11

Please sign in to comment.