Skip to content

Commit

Permalink
added model, utils, BFM folder
Browse files Browse the repository at this point in the history
  • Loading branch information
lylajeon committed Mar 24, 2023
1 parent 581fc24 commit ada1606
Show file tree
Hide file tree
Showing 14 changed files with 1,431 additions and 0 deletions.
Binary file added BFM/border_index.mat
Binary file not shown.
Binary file added BFM/center.mat
Binary file not shown.
Empty file added model/.gitkeep
Empty file.
76 changes: 76 additions & 0 deletions utils/construct_triangles.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import numpy as np
from skimage.morphology import label, remove_small_objects
from sklearn.neighbors import NearestNeighbors
from scipy.spatial import Delaunay
import cv2

#######################################################################################
# Auxiliary functions for triangulation
#######################################################################################

def remove_small_area(mask, thres=20):
labels = label(mask.astype(np.int32), connectivity=1)
mask = remove_small_objects(labels.astype(np.bool), thres, connectivity=1)
return mask.astype(np.float32)

def dis(a, b):
return np.sqrt(np.sum((a - b) ** 2))

def filter_tri(tri, points, thres=np.sqrt(5)):
tri_list = []
for i in range(tri.shape[0]):
dis_ab = dis(points[tri[i][0]], points[tri[i][1]])
dis_bc = dis(points[tri[i][1]], points[tri[i][2]])
dis_ac = dis(points[tri[i][0]], points[tri[i][2]])
max_dis = np.max([dis_ab, dis_bc, dis_ac])
if max_dis > thres: continue
tri_list.append(tri[i])
return np.array(tri_list)

def padding_tri(points, tri_list, max_num=28000, max_list=54000, OUTLIER=1000):
assert points.shape[0] <= max_num and tri_list.shape[0] <= max_list
padding = OUTLIER * np.ones([max_num - points.shape[0], 2])
padded_points = np.concatenate([points, padding], axis=0).astype(np.int32)
padding_list = np.tile(np.array(
[max_num-3, max_num-2, max_num-1]).reshape([1, 3]), [max_list - tri_list.shape[0], 1])
padding_list = np.concatenate(
[tri_list,padding_list], axis=0).astype(np.int32)
return padded_points, padding_list


def find_boundary_ind(hair_shape, face_shape, points_index, border, mask):

# find the boundary between rendered face and hair on hair in image plane
boundary = cv2.dilate(mask.astype(np.uint8),
np.uint8(np.ones((3, 3))), iterations=1).astype(np.float32) - mask
boundary = remove_small_area(boundary, thres=200)
index = np.where(boundary == 1)
boundary_ind = np.concatenate(
[np.expand_dims(index[0], axis=1), np.expand_dims(index[1], axis=1)], axis=1)

# filter the boundary points on hair
nbrs = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(points_index)
dist, idx = nbrs.kneighbors(boundary_ind)
dist, idx = dist.squeeze(1), idx.squeeze(1)
match_boundary_ind = idx[dist<2]
_, tmp = np.unique(match_boundary_ind, return_index=True)
hair_boundary_ind = match_boundary_ind[np.sort(tmp)]

# find the boundary between face and hair on face in 3d space
hair_boundary = hair_shape[hair_boundary_ind[:]].copy()
nbrs3d = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(face_shape)
dist3d, idx3d = nbrs3d.kneighbors(hair_boundary)
dist3d, idx3d = dist3d.squeeze(1), idx3d.squeeze(1)
match_boundary_ind = np.concatenate([idx3d, border])
_, tmp = np.unique(match_boundary_ind, return_index=True)
face_boundary_ind = match_boundary_ind[np.sort(tmp)]

return hair_boundary_ind, face_boundary_ind

def construct_triangle(hair_xyz, hair_texture, face_xyz, face_texture, hb_ind, fb_ind):
xyz = np.concatenate([face_xyz[fb_ind[:]], hair_xyz[hb_ind[:]]], axis=0)
texture = np.concatenate([face_texture[fb_ind[:]], hair_texture[hb_ind[:]]], axis=0)
tri = Delaunay(xyz[:, :2])
tri_list = tri.simplices.copy()
tri_list = filter_tri(tri_list, xyz, thres=0.05) + 1
return xyz, texture, tri_list
30 changes: 30 additions & 0 deletions utils/create_renderer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import tensorflow as tf
from .render.mesh_renderer import mesh_renderer
import numpy as np

#######################################################################################
# Auxiliary functions for create renderer
# the mesh_renderer is modified from https://github.com/google/tf_mesh_renderer
#######################################################################################


def create_renderer_graph(v_num=35709, t_num=70789, img_size=256):
with tf.Graph().as_default() as graph:
focal = tf.placeholder(dtype=tf.float32, shape=[1])
center = tf.placeholder(dtype=tf.float32, shape=[1, 1, 2])
depth = tf.placeholder(dtype=tf.float32, shape=[1, v_num, 3])
vertex = tf.placeholder(dtype=tf.float32, shape=[1, v_num, 3])
tri = tf.placeholder(dtype=tf.int32, shape=[1, t_num, 3])
fov_y = 2 * tf.atan2(img_size//2 * tf.ones_like(focal), focal) / np.pi * 180
delta_center = tf.concat([(center - img_size//2)/(img_size//2), tf.zeros([center.shape[0], 1, 1])], axis=-1)
camera_position = tf.constant([0, 0, 10.0])
camera_lookat = tf.constant([0, 0, 0.0])
camera_up = tf.constant([0, 1.0, 0])
light_positions = tf.reshape(tf.constant([0, 0, 1e5]), [1, 1, 3])
light_intensities = tf.zeros([1, 1, 3])
depthmap = mesh_renderer(vertex, tri, tf.zeros_like(vertex), depth,
camera_position=camera_position, camera_lookat=camera_lookat, camera_up=camera_up,
light_positions=light_positions, light_intensities=light_intensities,
image_width=img_size,image_height=img_size,
fov_y=fov_y, far_clip=30.0, ambient_color=tf.ones([1, 3]), delta_center=delta_center)
return graph, focal, center, depth, vertex, tri, depthmap
24 changes: 24 additions & 0 deletions utils/face_parsing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import numpy as np
from scipy.io import loadmat

#######################################################################################
# Auxiliary functions for face segmentation
# for face parsing, please refer to https://arxiv.org/pdf/1906.01342.pdf
#######################################################################################


def faceparsing():
# return a label with 5 classes:
# 0: bg 1: face 2: hair 3: left ear 4: right ear 5(optional): inner mouth
return NotImplemented

def split_segmask(mask):
face_mask, hairear_mask, mouth_mask = np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask)
face_mask[mask==1] = 1
face_mask[mask==5] = 1
hairear_mask[mask==2] = 1
hairear_mask[mask==3] = 1
hairear_mask[mask==4] = 1
mouth_mask[mask==5] = 1
return face_mask, hairear_mask, mouth_mask

149 changes: 149 additions & 0 deletions utils/loader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
import os
import numpy as np
from scipy.io import loadmat, savemat
import cv2
from array import array

#######################################################################################
# Auxiliary functions for loading data
# Partially adapted/modified from https://github.com/microsoft/Deep3DFaceReconstruction
#######################################################################################


def load_data(im_path, txt_path):
return cv2.imread(im_path), np.loadtxt(txt_path)

def load_center3d(path='BFM/center.mat'):
return loadmat(path)['head_center'].reshape([1, 1, 3])

def load_lm3d(nums=5):
Lm3D = loadmat('BFM/similarity_Lm3D_all.mat')
Lm3D = Lm3D['lm']

# calculate 5 facial landmarks using 68 landmarks
if nums == 5:
lm_idx = np.array([31,37,40,43,46,49,55]) - 1
Lm3D = np.stack([Lm3D[lm_idx[0],:],np.mean(Lm3D[lm_idx[[1,2]],:],0),np.mean(Lm3D[lm_idx[[3,4]],:],0),Lm3D[lm_idx[5],:],Lm3D[lm_idx[6],:]], axis = 0)
Lm3D = Lm3D[[1,2,0,3,4],:]
elif nums == 10:
lm_idx = np.array([1, 2, 16, 17, 31, 34, 37, 46, 49, 55]) - 1
Lm3D = Lm3D[lm_idx, :]
else:
return None
return Lm3D

def load_mask(path):
mask = loadmat(path)['mask']
return mask

def load_boundary_ind(path='BFM/border_index.mat'):
return loadmat(path)['border_index'].squeeze(1)

def read_facemodel(facemodel_pth='BFM/BFM_model_front.mat'):
if not os.path.isfile(facemodel_pth):
transferBFM09(os.path.dirname(facemodel_pth))
facemodel = BFM(facemodel_pth)
return facemodel

# define facemodel for reconstruction
class BFM():
def __init__(self, model_path):
model = loadmat(model_path)
self.meanshape = model['meanshape'] # mean face shape
self.idBase = model['idBase'] # identity basis
self.exBase = model['exBase'] # expression basis
self.meantex = model['meantex'] # mean face texture
self.texBase = model['texBase'] # texture basis
self.point_buf = model['point_buf'] # adjacent face index for each vertex, starts from 1 (only used for calculating face normal)
self.tri = model['tri'] # vertex index for each triangle face, starts from 1
self.keypoints = np.squeeze(model['keypoints']).astype(np.int32) - 1 # 68 face landmark index, starts from 0

# load expression basis
def LoadExpBasis():
n_vertex = 53215
Expbin = open('BFM/Exp_Pca.bin','rb')
exp_dim = array('i')
exp_dim.fromfile(Expbin,1)
expMU = array('f')
expPC = array('f')
expMU.fromfile(Expbin,3*n_vertex)
expPC.fromfile(Expbin,3*exp_dim[0]*n_vertex)

expPC = np.array(expPC)
expPC = np.reshape(expPC,[exp_dim[0],-1])
expPC = np.transpose(expPC)

expEV = np.loadtxt('BFM/std_exp.txt')

return expPC,expEV

# transfer original BFM09 to our face model
def transferBFM09(path):
original_BFM = loadmat(os.path.join(path, '01_MorphableModel.mat'))
shapePC = original_BFM['shapePC'] # shape basis
shapeEV = original_BFM['shapeEV'] # corresponding eigen value
shapeMU = original_BFM['shapeMU'] # mean face
texPC = original_BFM['texPC'] # texture basis
texEV = original_BFM['texEV'] # eigen value
texMU = original_BFM['texMU'] # mean texture

expPC,expEV = LoadExpBasis()

# transfer BFM09 to face model

idBase = shapePC*np.reshape(shapeEV,[-1,199])
idBase = idBase/1e5 # unify the scale to decimeter
idBase = idBase[:,:80] # use only first 80 basis

exBase = expPC*np.reshape(expEV,[-1,79])
exBase = exBase/1e5 # unify the scale to decimeter
exBase = exBase[:,:64] # use only first 64 basis

texBase = texPC*np.reshape(texEV,[-1,199])
texBase = texBase[:,:80] # use only first 80 basis

# our face model is cropped align face landmarks which contains only 35709 vertex.
# original BFM09 contains 53490 vertex, and expression basis provided by JuYong contains 53215 vertex.
# thus we select corresponding vertex to get our face model.

index_exp = loadmat(os.path.join(path,'BFM_front_idx.mat'))
index_exp = index_exp['idx'].astype(np.int32) - 1 #starts from 0 (to 53215)

index_shape = loadmat(os.path.join(path,'BFM_exp_idx.mat'))
index_shape = index_shape['trimIndex'].astype(np.int32) - 1 #starts from 0 (to 53490)
index_shape = index_shape[index_exp]


idBase = np.reshape(idBase,[-1,3,80])
idBase = idBase[index_shape,:,:]
idBase = np.reshape(idBase,[-1,80])

texBase = np.reshape(texBase,[-1,3,80])
texBase = texBase[index_shape,:,:]
texBase = np.reshape(texBase,[-1,80])

exBase = np.reshape(exBase,[-1,3,64])
exBase = exBase[index_exp,:,:]
exBase = np.reshape(exBase,[-1,64])

meanshape = np.reshape(shapeMU,[-1,3])/1e5
meanshape = meanshape[index_shape,:]
meanshape = np.reshape(meanshape,[1,-1])

meantex = np.reshape(texMU,[-1,3])
meantex = meantex[index_shape,:]
meantex = np.reshape(meantex,[1,-1])

# other info contains triangles, region used for computing photometric loss,
# region used for skin texture regularization, and 68 landmarks index etc.
other_info = loadmat(os.path.join(path,'facemodel_info.mat'))
frontmask2_idx = other_info['frontmask2_idx']
skinmask = other_info['skinmask']
keypoints = other_info['keypoints']
point_buf = other_info['point_buf']
tri = other_info['tri']
tri_mask2 = other_info['tri_mask2']

# save our face model
savemat(os.path.join(path,'BFM_model_front.mat'),{'meanshape':meanshape,'meantex':meantex,'idBase':idBase,'exBase':exBase,'texBase':texBase,'tri':tri,'point_buf':point_buf,'tri_mask2':tri_mask2\
,'keypoints':keypoints,'frontmask2_idx':frontmask2_idx,'skinmask':skinmask})
Loading

0 comments on commit ada1606

Please sign in to comment.