-
Notifications
You must be signed in to change notification settings - Fork 28
Commit
Full App Rewrite
- Loading branch information
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,9 @@ | ||
|
||
*.onnx | ||
**/__pycache__/** | ||
**/*.pyc | ||
*.cfg | ||
*.log | ||
BabbleApp/babble_settings.json | ||
BabbleApp/babble_settings.backup | ||
BabbleApp/build | ||
BabbleApp/dist | ||
/vivetest |
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
# ProjectBabble | ||
A Mouth tracking system for VR, 100% Opensource! | ||
|
||
Currently the architecture is a modified efficientnetv2-b0 training on blenshapes from ARKit! The model outputs 31 blenshapes of which most cross over with SRanipals blendshapes. | ||
|
||
### One thing to note | ||
1. This model is not a 1 to 1 translation to SRanipal blenshapes. While most blendshapes do cross over with SRanipal cheek and tounge movements are limited. | ||
|
||
### How to run | ||
Go to the releases tab, download the latest exe. Clone this repo, and then move the calibration file (calib.json) to the same location as the exe. Next get the .onnx model in the repo and move it to the same location as the exe. Run the exe and enjoy! | ||
|
||
### PLEASE HIT THE TURN OFF CALIBRATION BUTTON UNTILL FURTHER NOTICE | ||
It ruins the model's quality | ||
|
||
### Warranty | ||
Lol | ||
|
||
### Credit | ||
|
||
Thank you to dfgHiatus#7426 for providing the unity demo as well as the intergrations with other apps! | ||
|
||
Thank you to Rames The Generic#3540 who made the dataset, tested and evaluated the performence for the models, and for being awesome! | ||
|
||
Copyright (c) 2022 Sameer Suri |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,130 @@ | ||
import PySimpleGUI as sg | ||
|
||
from config import BabbleSettingsConfig | ||
from osc import Tab | ||
from queue import Queue | ||
from threading import Event | ||
|
||
class AlgoSettingsWidget: | ||
def __init__(self, widget_id: Tab, main_config: BabbleSettingsConfig, osc_queue: Queue): | ||
|
||
|
||
self.gui_general_settings_layout = f"-GENERALSETTINGSLAYOUT{widget_id}-" | ||
self.gui_multiply = f"-MULTIPLY{widget_id}-" | ||
self.gui_model_file = f"-MODLEFILE{widget_id}-" | ||
self.gui_use_gpu = f"USEGPU{widget_id}" | ||
self.gui_speed_coefficient = f"-SPEEDCOEFFICIENT{widget_id}-" | ||
self.gui_min_cutoff = f"-MINCUTOFF{widget_id}-" | ||
self.main_config = main_config | ||
self.config = main_config.settings | ||
self.osc_queue = osc_queue | ||
|
||
# Define the window's contents | ||
self.general_settings_layout = [ | ||
|
||
[sg.Text("Model file:", background_color='#424042'), | ||
sg.InputText( | ||
self.config.gui_model_file, | ||
key=self.gui_model_file, | ||
size=(32), | ||
tooltip="Name of the model file.", | ||
), | ||
], | ||
[sg.Checkbox( | ||
"Use GPU (DirectML)", | ||
default=self.config.gui_use_gpu, | ||
key=self.gui_use_gpu, | ||
background_color='#424042', | ||
tooltip="Toggle GPU execution.", | ||
), | ||
], | ||
[sg.Text("Model output multiplier:", background_color='#424042'), | ||
sg.InputText( | ||
self.config.gui_multiply, | ||
key=self.gui_multiply, | ||
size=(4), | ||
tooltip = "Model output modifier.", | ||
), | ||
], | ||
[ | ||
sg.Text("One Euro Filter Paramaters:", background_color='#242224'), | ||
], | ||
[ | ||
|
||
sg.Text("Min Frequency Cutoff", background_color='#424042'), | ||
sg.InputText( | ||
self.config.gui_min_cutoff, | ||
key=self.gui_min_cutoff, | ||
size=(7), | ||
), | ||
# ], | ||
# [ | ||
sg.Text("Speed Coefficient", background_color='#424042'), | ||
sg.InputText( | ||
self.config.gui_speed_coefficient, | ||
key=self.gui_speed_coefficient, | ||
size=(5), | ||
), | ||
], | ||
|
||
|
||
] | ||
|
||
|
||
self.widget_layout = [ | ||
[ | ||
sg.Text("Model Settings:", background_color='#242224'), | ||
], | ||
[ | ||
sg.Column(self.general_settings_layout, key=self.gui_general_settings_layout, background_color='#424042' ), | ||
], | ||
] | ||
|
||
self.cancellation_event = Event() # Set the event until start is called, otherwise we can block if shutdown is called. | ||
self.cancellation_event.set() | ||
self.image_queue = Queue() | ||
|
||
|
||
def started(self): | ||
return not self.cancellation_event.is_set() | ||
|
||
def start(self): | ||
# If we're already running, bail | ||
if not self.cancellation_event.is_set(): | ||
return | ||
self.cancellation_event.clear() | ||
|
||
def stop(self): | ||
# If we're not running yet, bail | ||
if self.cancellation_event.is_set(): | ||
return | ||
self.cancellation_event.set() | ||
|
||
def render(self, window, event, values): | ||
# If anything has changed in our configuration settings, change/update those. | ||
changed = False | ||
|
||
if self.config.gui_multiply != int(values[self.gui_multiply]): | ||
self.config.gui_multiply = int(values[self.gui_multiply]) | ||
changed = True | ||
|
||
if self.config.gui_model_file != values[self.gui_model_file]: | ||
self.config.gui_model_file = values[self.gui_model_file] | ||
changed = True | ||
|
||
if self.config.gui_use_gpu != values[self.gui_use_gpu]: | ||
self.config.gui_use_gpu = values[self.gui_use_gpu] | ||
changed = True | ||
|
||
if self.config.gui_min_cutoff != values[self.gui_min_cutoff]: | ||
self.config.gui_min_cutoff = values[self.gui_min_cutoff] | ||
changed = True | ||
|
||
if self.config.gui_speed_coefficient != values[self.gui_speed_coefficient]: | ||
self.config.gui_speed_coefficient = values[self.gui_speed_coefficient] | ||
changed = True | ||
|
||
if changed: | ||
self.main_config.save() | ||
#print(self.main_config) | ||
self.osc_queue.put(Tab.ALGOSETTINGS) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,37 @@ | ||
import os | ||
import json | ||
os.environ["OMP_NUM_THREADS"] = "1" | ||
import onnxruntime as ort | ||
import time | ||
import PySimpleGUI as sg | ||
import cv2 | ||
import numpy as np | ||
from pythonosc import udp_client | ||
from torchvision.transforms.functional import to_grayscale | ||
import PIL.Image as Image | ||
from torchvision import transforms | ||
from threading import Thread | ||
from one_euro_filter import OneEuroFilter | ||
|
||
def run_model(self): | ||
|
||
frame = cv2.resize(self.current_image_gray, (256, 256)) | ||
# make it pil | ||
frame = Image.fromarray(frame) | ||
# make it grayscale | ||
frame = to_grayscale(frame) | ||
# make it a tensor | ||
frame = transforms.ToTensor()(frame) | ||
# make it a batch | ||
frame = frame.unsqueeze(0) | ||
# make it a numpy array | ||
frame = frame.numpy() | ||
|
||
out = self.sess.run([self.output_name], {self.input_name: frame}) | ||
#end = time.time() | ||
output = out[0] | ||
output = output[0] | ||
output = self.one_euro_filter(output) | ||
for i in range(len(output)): # Clip values between 0 - 1 | ||
output[i] = max(min(output[i], 1), 0) | ||
self.output = output |