Skip to content

Commit

Permalink
trimmed unrelated codes and added README.md
Browse files Browse the repository at this point in the history
  • Loading branch information
lylajeon committed Mar 3, 2023
1 parent d410dbf commit 56a3d12
Show file tree
Hide file tree
Showing 7 changed files with 80 additions and 276 deletions.
58 changes: 58 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# Deep 3d Portrait from a Single Image

## Summary

This repostiory is forked from [sicxu/Deep3dPortrait](https://github.com/sicxu/Deep3dPortrait).

## Prepare GAN Training

```
git clone https://github.com/lylajeon/Deep3dPortrait Deep3dPortrait
cd Deep3dPortrait
git clone https://github.com/kingsj0405/Face-Landmark-Parsing face-parsing.PyTorch
cd ..
```
Put all files in outputs/step4 to step5_ui_expression&pose_change/result

```
python integrated_process.py --input_img [IMG_NAME]
cd step5_ui_expression&pose_change
chmod +x run.sh
./run.sh
```

# Origin README.md of Deep 3d Portrait from a Single Image (CVPR2020)
This is a tensorflow implementation of the following paper: [Deep 3d Portrait from a Single Image](https://arxiv.org/abs/2004.11598). We propose a two-step geometry learning scheme which first learn 3DMM face reconstruction from single images then learn to estimate hair and ear depth in a stereo setup.

## Getting Started
### System Requirements
- Software: Ubuntu 16.04, CUDA 9.0
- Python >= 3.5
### Usage
1. Clone the repository
```
git clone https://github.com/sicxu/Deep3dPortrait.git
cd Deep3dPortrait
pip install -r requirements.txt
```
2. Follow the intructions in [Deep3DFaceReconstruction](https://github.com/microsoft/Deep3DFaceReconstruction) to prepare the [BFM folder](/BFM)
3. Download the pretrained [face reconstruction model](https://drive.google.com/file/d/1fPsvLKghlCK8rknb9GPiKwIq9HIqWWwV/view?usp=sharing) and [depth estimation model](https://drive.google.com/file/d/1QUSK4k6ZONOZWpph9GHW0VGAiKFggEU5/view?usp=sharing), then put the pb files into the [model folder](model).
4. Run the following steps.

```
python step1_recon_3d_face.py
python step2_face_segmentation.py
python step3_get_head_geometry.py
python step4_save_obj.py
```

5. To check the results, see ./output subfolders which contain the results of corresponding steps.

## Others
- An image pre-alignment is necessary for face reconstruction. We recommend using [Bulat et al.'s method](https://github.com/1adrianb/2D-and-3D-face-alignment) to get facial landmarks (3D definition). We also need to use the masks of face, hair and ear as input to the depth estimation network. We recommend using [Lin et al.'s method](https://arxiv.org/pdf/1906.01342.pdf) for semantic segmentation.
- The face reconstruction code is heavily borrowed from [Deep3DFaceReconstruction](https://github.com/microsoft/Deep3DFaceReconstruction).
- The [render](utils/render) code is modified from [tf_mesh_render](https://github.com/google/tf_mesh_renderer/tree/ba27ea1798f6ee8d03ddbc52f42ab4241f9328bb). Note that the renderer we complied does not support other tensorflow versions and can only be used on Linux.
- The manipulation code will not be released. If you want to make a comparison with our method, please use the results in our paper, or you can contact me(sicheng_xu@yeah.net) for more comparisons.

## Citation
If you find this code helpful for your research, please cite our paper.
Binary file added inputs/AI_gen.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
53 changes: 21 additions & 32 deletions integrated_process.py
Original file line number Diff line number Diff line change
@@ -1,47 +1,36 @@
import os
#from step0_68points import get_facial_landmark
from step1_recon_3d_face import face_recon
import os, argparse
from step1_1_manipulate_expression import face_recon
from step2_face_segmentation import prepare_mask
from step3_get_head_geometry import depth_recon
from step4_save_obj import save_obj
from step3_1_modify_texture import depth_recon
from step4_1_modify_texture import save_obj
import sys
sys.path.append('face-parsing.PyTorch')
from step0_get_segmentation import get_face_alignment
sys.path.remove('face-parsing.PyTorch')
from flask import Flask, flash, request, redirect, url_for
from flask import send_file

def filemaking():
def filemaking(input_img):
# run function
get_face_alignment(
'/workspace/FaceReenactment/Deep3DPortrait/inputs',
'/workspace/FaceReenactment/Deep3DPortrait/inputs',
'./inputs',
'./inputs',
debug='store_true')
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
face_recon('inputs', 'outputs/step1','outputs/step1/vis')
expr_path = 'expressions'
input_path = 'inputs'
expr_img = [i for i in os.listdir(expr_path) if (i.endswith('png') or i.endswith('jpg') or i.endswith('jpeg')) and (i[:2] != '._') ]
get_face_alignment(expr_path, expr_path, debug='store_true')
src_img = os.path.join(input_path, input_img)
for target in expr_img :
for degree in range(0, 110, 10):
output_name = src_img.split(os.path.sep)[1].split('.')[0] + '_' + target.split('.')[0] +'_' + str(degree) + '%'
face_recon(src_img, os.path.join(expr_path, target), input_path, 'outputs/step1', output_name,'outputs/step1/vis', degree=degree)
prepare_mask('outputs/step1', 'outputs/step2', 'outputs/step1', 'outputs/step2/vis')
depth_recon('outputs/step2', 'outputs/step3')
save_obj('outputs/step3','outputs/step4', True)

app = Flask (__name__)

@app.route('/',methods = ['POST'])
def upload_file():
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
# print(file)
print(os.path.join(os.getcwd(), "inputs",file.filename))
file.save(os.path.join(os.getcwd(), "inputs",file.filename))
filemaking()
return send_file(os.path.join(os.getcwd(),"outputs/step4","input.obj"),as_attachment=True)

if __name__ == "__main__":
app.run(host='0.0.0.0',port=8888)
parser = argparse.ArgumentParser()
parser.add_argument('--input_img', default='AI_gen.jpg')
args = parser.parse_args()
filemaking(args.input_img)

59 changes: 0 additions & 59 deletions integrated_process_v0.py

This file was deleted.

25 changes: 1 addition & 24 deletions step5_ui_expression&pose_change/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,18 +10,10 @@
import PyQt5
import sys
from pywavefront import visualization, Wavefront
def file_upload():
files = {'file': open(os.path.join(os.getcwd(),'input.png'),'rb')}
r = requests.post("http://s3.ciplab.ml:5443/", files=files)
# print(r.content.decode('utf-8'))
f = open("output.obj", "w")
f.write(r.content.decode('utf-8'))
f.close()
# print("gi")

vertss=[]
colorss = []
class MyApp(QWidget):

def __init__(self):
self.rotation = 0
self.d = gl.GLViewWidget()
Expand All @@ -38,9 +30,6 @@ def initUI(self):
objectBox = QVBoxLayout()
controlBox = QVBoxLayout()
mainBox.addChildLayout(controlBox)
# objectBox.addStretch(1)
# objectBox.addStretch(1)
# controlBox.addStretch(2)
mainBox.addLayout(objectBox)
labels = ["Head Position", "Anger", "Contempt", "Disgust", "Fear", "Joy", "Sadness", "Surprise"]
for i in range(8):
Expand All @@ -64,10 +53,6 @@ def initUI(self):
hbox.addWidget(sld)
objectBox.addLayout(hbox)

# objectBox.addWidget(sld)
# root_path = os.path.dirname(__file__)
# file_upload()
# exit(1)
imgs_path = "result"
sorted_list = sorted(os.listdir(imgs_path))
# change the sequence on files in imgs_path
Expand Down Expand Up @@ -102,11 +87,6 @@ def initUI(self):
self.d.addItem(self.m2)
self.d.setFixedWidth(1000)
self.d.setFixedHeight(1000)
# self.d.setCameraPosition(fov=45)
# self.d.setCameraPosition(pos=QtGui.QVector3D(0,0,0), distance=10, elevation=0, azimuth=0, rotation=45)
# self.d.opts["fov"]=45
# print(self.d.opts['vie(wport'])
# self.d.opts["rotation"]=(45,1,1,100)
self.d.setCameraPosition(distance=5)
self.d.opts["rotationMethod"]="quaternion"
controlBox.addWidget(self.d)
Expand All @@ -116,7 +96,6 @@ def initUI(self):
self.setGeometry(0,0,1600,1300)
self.show()


def rotateChange(self, value):
q = QtGui.QQuaternion.fromEulerAngles(
0, value, 0
Expand All @@ -126,13 +105,11 @@ def rotateChange(self, value):

def expressChanged(self, value):
idx = (value//11)
# print(vertss[value])
self.m2.setMeshData(vertexes=vertss[value],vertexColors=colorss[value])
self.m2.meshDataChanged()
for i in range(len(self.bars)):
if i != idx:
self.bars[i].setValue(i*10)
# self.bars[i].update()
self.d.update()

if __name__ == '__main__':
Expand Down
78 changes: 0 additions & 78 deletions step6_ui.py

This file was deleted.

Loading

0 comments on commit 56a3d12

Please sign in to comment.