Commit 67f1b14e authored by Richard Torenvliet's avatar Richard Torenvliet

Add dlib as a capability

parent 71eee9a5
......@@ -6,33 +6,48 @@ data/imm_face_db: data/imm_face_db.tar.gz
tar -xvzf imm_face_db.tar.gz -C imm_face_db
)
train_model:train_shape train_texture
train_texture: data/pca_texture_model.npy
train_shape: data/pca_shape_model.npy
train_model: train_shape train_texture
train_texture: data/pca_imm_texture_model.npy data/pca_ibug_texture_model.npy
train_shape: data/pca_imm_shape_model.npy data/pca_ibug_shape_model.npy
data/imm_face_db.tar.gz:
(cd data; wget http://www.imm.dtu.dk/~aam/datasets/imm_face_db.tar.gz)
data/pca_shape_model.npy:
data/pca_imm_shape_model.npy:
python src/main.py \
--save_pca_shape \
--files `./scripts/imm_train_set.sh` \
--model_shape_file data/pca_shape_model \
--model_shape_file data/pca_imm_shape_model \
--shape_type imm
data/pca_texture_model.npy:
data/pca_ibug_shape_model.npy:
python src/main.py \
--save_pca_shape \
--files `./scripts/ibug_train_set.sh` \
--model_shape_file data/pca_ibug_shape_model \
--shape_type ibug
data/pca_ibug_texture_model.npy:
python src/main.py \
--save_pca_texture \
--files `./scripts/ibug_train_set.sh` \
--model_texture_file data/pca_ibug_texture_model \
--model_shape_file data/pca_ibug_shape_model.npy \
--shape_type ibug
data/pca_imm_texture_model.npy:
python src/main.py \
--save_pca_texture \
--files `./scripts/imm_train_set.sh` \
--model_texture_file data/pca_texture_model \
--model_shape_file data/pca_shape_model.npy \
--model_texture_file data/pca_imm_texture_model \
--model_shape_file data/pca_imm_shape_model.npy \
--shape_type imm
test_model:
python src/main.py \
--reconstruct \
--files `./scripts/imm_test_set.sh` \
--model_texture_file data/pca_texture_model \
--model_texture_file data/pca_imm_texture_model \
--model_shape_file data/pca_shape_model.npy \
--n_components 6
......@@ -40,16 +55,24 @@ show_reconstruction:
python src/main.py \
--reconstruct \
--files data/imm_face_db/*.asf \
--model_texture_file data/pca_texture_model.npy \
--model_shape_file data/pca_shape_model.npy \
--model_texture_file data/pca_imm_texture_model.npy \
--model_shape_file data/pca_imm_shape_model.npy \
--shape_type imm \
--n_components 6
show_ibug:
python src/main.py \
--reconstruct \
--files data/imm_face_db/*.jpg\
--model_texture_file data/pca_ibug_texture_model.npy \
--model_shape_file data/pca_ibug_shape_model.npy \
--shape_type ibug
profile_reconstruction:
python -m cProfile src/main.py \
--reconstruct \
--files data/imm_face_db/*.asf \
--model_texture_file data/pca_texture_model.npy \
--model_texture_file data/pca_imm_texture_model.npy \
--model_shape_file data/pca_shape_model.npy \
--shape_type imm \
--n_components 6
......@@ -58,11 +81,17 @@ graph_reconstruction:
python ./src/main.py \
--generate_call_graph \
--files data/imm_face_db/*.asf \
--model_texture_file data/pca_texture_model.npy \
--model_texture_file data/pca_imm_texture_model.npy \
--model_shape_file data/pca_shape_model.npy \
--shape_type imm \
--n_components 6
test_landmarks:
./src/main.py \
--test_landmarks \
--image data/test_data/lenna.jpg
.PHONY:= test
test:
python -m py.test -f src/test/*_test.py
......
......@@ -3,16 +3,19 @@ PYTHON := python2.7
PYTHON_BIN_PATH := /usr/local/bin/$(PYTHON)
SITE_PACKAGES := $(VIRTUALENV)/lib/$(PYTHON)/site-packages
OPENCV:= $(SITE_PACKAGES)/cv.py $(SITE_PACKAGES)/cv2.so
OPENCV := $(SITE_PACKAGES)/cv.py $(SITE_PACKAGES)/cv2.so
TARGETS := $(OPENCV) $(VIRTUALENV) data reconstruction
include build.mk
TARGETS:= $(OPENCV) $(VIRTUALENV) data reconstruction
all: $(TARGETS)
include actions.mk
include src/reconstruction/build.mk
data: data/imm_face_db
reconstruction: texture.so src/reconstruction/texture_halide
reconstruction: texture.so
OS := $(shell uname)
......
files=`ls data/imm_face_db/ | grep -E "^[0-3][0-9].*.jpg"`
for f in $files; do
echo "data/imm_face_db/$f"
done
......@@ -228,7 +228,8 @@ def sample_from_triangles(src, points2d_src, points2d_dst, triangles, dst):
)
def build_texture_feature_vectors(files, get_image_with_points, mean_points, triangles):
def build_texture_feature_vectors(
files, get_image_with_points, mean_points, triangles):
"""
Args:
files (list): list files
......@@ -242,15 +243,17 @@ def build_texture_feature_vectors(files, get_image_with_points, mean_points, tri
mean_texture = []
image, points = get_image_with_points(files[0])
mean_points.get_scaled_points(image.shape) # improve this, see issue #1
mean_points.get_scaled_points(image.shape) # improve this, see issue #1
actual_shape = mean_points.actual_shape
x, y, w_slice, h_slice = mean_points.get_bounding_box()
for i, f in enumerate(files):
for i, f in enumerate(files[:10]):
image, points = get_image_with_points(f)
Points = AAMPoints(
normalized_flattened_points_list=points,
actual_shape=(58, 2)
actual_shape=actual_shape
)
# empty colored image
......
"""
.. module:: datasets
:synopsis: Contains ibug dataset abstraction layer
"""
from time import time
import cv2
import numpy as np
import aam
import landmarks
from settings import logger
class IBUGPoints(aam.AAMPoints):
SHAPE = (68, 2)
"""IBUG datapoints abstraction"""
def __init__(self, filename=None, points_list=None):
"""
Args:
filename: optional image file
points: optional list of x,y points
"""
assert filename is not None or points_list is not None, 'filename or \
a ndarray of points list should be given'
self.filename = filename
if self.filename:
self.__get_image()
self.detector = landmarks.Detector()
points_list = self.detector.detect_shape(self.image)[0]
points_list = np.asarray(points_list, dtype=np.float32)
# normalizing data by dividing it by the image
points_list[:, 0] /= self.image.shape[1]
points_list[:, 1] /= self.image.shape[0]
aam.AAMPoints.__init__(
self, normalized_flattened_points_list=points_list.flatten(),
actual_shape=self.SHAPE
)
def get_points(self):
"""
Get the flattened list of points
Returns:
ndarray. flattened array of points, see AAMPoints for more
information.
"""
return self.normalized_flattened_points_list
def __get_image(self):
"""
Get the image corresponding to the self.filename
Returns:
ndarray image
"""
assert hasattr(self, 'filename'), 'filename name should be set, \
import file must be invoked first'
self.image = cv2.imread(self.filename)
def get_image(self):
"""
Get the image corresponding to the filename
If filename == image_1.asf, then we read image_1.jpg from disk
and return this to the user.
Returns:
ndarray image
"""
return self.image
def show_on_image(self, image, window_name='image', multiply=True):
self.draw_triangles(image, self.points_list, multiply=multiply)
def show(self, window_name='image'):
"""show the image and datapoints on the image"""
assert(len(self.points_list) > 0)
assert(len(self.filename) > 0)
image = self.get_image()
self.draw_triangles(image, self.points_list)
def factory(**kwargs):
"""
Returns an instance of the dataset aam extending class
Note that all dataset implementations (in this folder) need to have this
function to enable transparent use of different datasets throughout this
project. The reason for this is that we don't want to worry different about
amounts of landmarks or locations of those landmarks, we just want to use
them.
"""
return IBUGPoints(**kwargs)
def get_points(files):
"""
Args:
files (array): Array of images
Returns:
ndarray. Array of landmarks.
"""
points = []
total_files = len(files)
for i, filename in enumerate(files[:10]):
t1 = time()
ibug = IBUGPoints(filename=filename)
points.append(ibug.get_points())
logger.debug('processed %s %f, %d/%d', filename, time() - t1, i, total_files)
return np.asarray(points)
def get_image_with_landmarks(filename):
"""
Get Points with image and landmarks/points
Args:
filename(fullpath): .asf file
Returns:
image, points
"""
ibug = IBUGPoints(filename=filename)
return ibug.get_image(), ibug.get_points()
"""
.. module:: datasets
:platform: Unix, Windows
:synopsis: Contains imm dataset abstraction layer
"""
from matplotlib.tri import Triangulation
import cv2
import numpy as np
import argparse
......@@ -17,6 +13,8 @@ import aam
class IMMPoints(aam.AAMPoints):
SHAPE = (58, 2)
"""Accepts IMM datapoint file which can be shown or used"""
def __init__(self, filename=None, points_list=None):
"""
......@@ -34,7 +32,7 @@ class IMMPoints(aam.AAMPoints):
aam.AAMPoints.__init__(
self, normalized_flattened_points_list=points_list.flatten(),
actual_shape=(58, 2)
actual_shape=self.SHAPE
)
def get_points(self):
......@@ -106,10 +104,23 @@ class IMMPoints(aam.AAMPoints):
self.draw_triangles(image, self.points_list)
def get_imm_points(files):
# TODO: move this to a shared location such that all dataset implementation
# return an instance of themselves when this function is envoked.
def factory(**kwargs):
"""
This function does something.
Returns an instance of the dataset aam extending class
Note that all dataset implementations (in this folder) need to have this
function to enable transparent use of different datasets throughout this
project. The reason for this is that we don't want to worry different about
amounts of landmarks or locations of those landmarks, we just want to use
them.
"""
return IMMPoints(**kwargs)
def get_points(files):
"""
Args:
files (array): Array of .asf full or relative path to .asf files.
......@@ -126,7 +137,7 @@ def get_imm_points(files):
return np.asarray(points)
def get_imm_image_with_landmarks(filename):
def get_image_with_landmarks(filename):
"""
Get Points with image and landmarks/points
......@@ -138,22 +149,3 @@ def get_imm_image_with_landmarks(filename):
"""
imm = IMMPoints(filename=filename)
return imm.get_image(), imm.get_points()
def add_parser_options():
parser = argparse.ArgumentParser(description='IMMPoints tool')
# asf files
parser.add_argument(
'asf', type=str, nargs='+', help='asf files to process'
)
return parser
if __name__ == '__main__':
parser = add_parser_options()
args = parser.parse_args()
for f in args.asf:
imm = IMMPoints(f)
imm.show()
import argparse
SUPPORTED_DATASETS = ['imm', 'ibug']
def get_argument_parser():
parser = argparse.ArgumentParser(description='AAM tool')
pca_group = parser.add_argument_group('show_reconstruction')
pca_group.add_argument(
'--reconstruct', action='store_true',
help='Reconstruct one face with a given pca model'
)
pca_group.add_argument(
'--test_landmarks', action='store_true',
help='Test landmark detection of dlib using a test image'
)
pca_group.add_argument(
'--image', type=str,
help='Use this file as an image, can be used with different commands'
)
pca_group.add_argument(
'--generate_call_graph', action='store_true',
help='Generate call graph from the reconstruction'
)
pca_group.add_argument(
'--save_pca_shape', action='store_true',
help='save the pca shape model'
)
pca_group.add_argument(
'--save_pca_texture', action='store_true',
help='save the pca texture model'
)
pca_group.add_argument(
'--files', nargs='+', help='files to process'
)
pca_group.add_argument(
'--n_components', default=10, type=int,
help='number of principle components to keep and are able to manipulate'
)
pca_group.add_argument(
'--model_shape_file', type=str,
help='pca model file that contains or is going to contain the pca shape model'
)
pca_group.add_argument(
'--shape_type', type=str, choices=SUPPORTED_DATASETS,
help='type of shape, annotated dataset'
)
pca_group.add_argument(
'--model_texture_file', type=str,
help='pca model file that contains or is going to contain the pca texture model'
)
return parser
import cv2
import dlib
from settings import LANDMARK_DETECTOR_PATH
def test_detect(image):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(LANDMARK_DETECTOR_PATH)
dets = detector(image, 1)
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
shape = predictor(image, d)
cv2.rectangle(
image,
(d.left(), d.top()),
(d.right(), d.bottom()),
[255, 0, 0],
thickness=2
)
for i, p in enumerate(shape.parts()):
cv2.circle(image, tuple((p.x, p.y)), 3, color=(0, 255, 100))
cv2.imshow('lenna', image)
cv2.imwrite('data/out.jpg', image)
class Detector():
def __init__(self):
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(LANDMARK_DETECTOR_PATH)
def detect_faces(self, image):
# The 1 in the second argument indicates that we should upsample the
# image 1 time. This will make everything bigger and allow us to
# detect more faces.
return self.detector(image, 1)
def detect_shape(self, image):
detections = self.detect_faces(image)
all_points = []
for k, d in enumerate(detections):
points_list = []
shape = self.predictor(image, d)
for p in shape.parts():
points_list.append([p.x, p.y])
all_points.append(points_list)
return all_points
#!/usr/local/bin/python
# python std
import argparse
import importlib
import copy
# installed packages
import cv2
import numpy as np
import copy
# local imports
import pca
import aam
import numpy as np
# import imm
from reconstruction import reconstruction
from settings import logger
def add_parser_options():
parser = argparse.ArgumentParser(description='IMMPoints tool')
pca_group = parser.add_argument_group('show_reconstruction')
pca_group.add_argument(
'--reconstruct', action='store_true',
help='Reconstruct one face with a given pca model'
)
pca_group.add_argument(
'--generate_call_graph', action='store_true',
help='Generate call graph from the reconstruction'
)
pca_group.add_argument(
'--save_pca_shape', action='store_true',
help='save the pca shape model'
)
pca_group.add_argument(
'--save_pca_texture', action='store_true',
help='save the pca texture model'
)
pca_group.add_argument(
'--files', nargs='+', help='files to process'
)
pca_group.add_argument(
'--n_components', default=10, type=int,
help='number of principle components to keep and are able to manipulate'
)
pca_group.add_argument(
'--model_shape_file', type=str,
help='pca model file that contains or is going to contain the pca shape model'
)
pca_group.add_argument(
'--shape_type', type=str, choices=['imm'],
help='type of shape, annotated dataset'
)
pca_group.add_argument(
'--model_texture_file', type=str,
help='pca model file that contains or is going to contain the pca texture model'
)
return parser
def import_dataset_module(shape_type):
"""
Includes the right implementation for the right dataset implementation for
the given shape type, see --help for the available options.
Args:
shape_type(string): Name of the python file inside the
`src/datasets` folder.
"""
return importlib.import_module('datasets.{}'.format(shape_type))
from input_parser import get_argument_parser
from utility import import_dataset_module
def save_pca_model_texture(args):
......@@ -102,13 +36,14 @@ def save_pca_model_texture(args):
assert args.model_shape_file, '--model_texture_file needs to be provided to save the pca model'
assert args.shape_type, '--shape_type the type of dataset, see datasets module'
dataset_module = import_dataset_module(args.shape_type)
shape_model = pca.PCAModel(args.model_shape_file)
mean_points = dataset_module.IMMPoints(points_list=shape_model.mean_values)
dataset_module = import_dataset_module(args.shape_type)
mean_points = dataset_module.factory(points_list=shape_model.mean_values)
textures = aam.build_texture_feature_vectors(
args.files,
dataset_module.get_imm_image_with_landmarks, # function
dataset_module.get_image_with_landmarks, # function
mean_points,
shape_model.triangles
)
......@@ -145,7 +80,7 @@ def save_pca_model_shape(args):
dataset_module = import_dataset_module(args.shape_type)
points = aam.build_shape_feature_vectors(
args.files, dataset_module.get_imm_points, flattened=True
args.files, dataset_module.get_points, flattened=True
)
mean_values = aam.get_mean(points)
......@@ -194,35 +129,33 @@ def show_reconstruction(args):
assert args.model_shape_file, '--model_texture_file needs to be provided to save the pca model'
assert args.model_texture_file, '--model_texture_file needs to be provided to save the pca model'
assert args.shape_type, '--shape_type the type of dataset, see datasets module'
assert args.files, '--files should be given'
dataset_module = import_dataset_module(args.shape_type)
shape_model = pca.PCAModel(args.model_shape_file)
texture_model = pca.PCAModel(args.model_texture_file)
input_points = dataset_module.IMMPoints(
filename='data/imm_face_db/01-1m.asf'
)
dataset_module = import_dataset_module(args.shape_type)
mean_points = dataset_module.factory(points_list=shape_model.mean_values)
input_image = input_points.get_image()
shape_eigenvalues_multiplier = np.ones(5, dtype=np.float32)
mean_points = dataset_module.IMMPoints(points_list=shape_model.mean_values)
mean_points.get_scaled_points(input_image.shape)
for face in args.files:
input_points = dataset_module.factory(filename=face)
input_image = input_points.get_image()
n_components = 58
count = 0
shape_eigenvalues_multiplier = np.ones(15, dtype=np.float32)
mean_points.get_scaled_points(input_image.shape)
while True:
input_image_copy = input_image.copy()
input_points_copy = copy.deepcopy(input_points)
output_points = dataset_module.IMMPoints(
output_points = dataset_module.factory(
points_list=input_points.get_points()
)
# scale by scaling the Vt matrix
shape_Vt = shape_model.Vt
shape_Vt = reconstruction.scale_eigenvalues(
shape_Vt, shape_eigenvalues_multiplier
)
......@@ -231,8 +164,7 @@ def show_reconstruction(args):
reconstruction.reconstruct_shape(
output_points,
shape_model,
shape_Vt=shape_Vt, # overwrite by scaled Vt
n_components=n_components - count
shape_Vt=shape_Vt # overwrite by scaled Vt
)
# use the new shape ane mean points to reconstruct
......@@ -252,15 +184,26 @@ def show_reconstruction(args):
mean_points, texture_model.mean_values
)
#count += 2
shape_eigenvalues_multiplier[0] += 0.1
cv2.imshow('dst', input_image_copy)
k = cv2.waitKey(0) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
def test_landmarks(args):
import landmarks
filename = args.image
image = cv2.imread(filename)
landmarks.detect(image)
def main():
"""main"""
parser = add_parser_options()
parser = get_argument_parser()
args = parser.parse_args()
if args.save_pca_shape:
......@@ -271,6 +214,8 @@ def main():
show_reconstruction(args)
elif args.generate_call_graph:
generate_call_graph(args)
elif args.test_landmarks:
test_landmarks(args)
if __name__ == '__main__':
main()
......@@ -133,7 +133,7 @@ def pca(data, mean_values, variance_percentage=90):
i += 1
n_components = i
logger.debug('%s components form %s of the variance', n_components, variance_percentage)
logger.debug('%s components form %s% of the variance', n_components, variance_percentage)
return U, s, Vt, n_components
......
......@@ -3,7 +3,6 @@ import numpy as np
import pca
import aam
from .texture import fill_triangle_src_dst
def cartesian2barycentric(r1, r2, r3, r):
......@@ -78,7 +77,7 @@ def get_texture(Points, flattened_texture):
def scale_eigenvalues(Vt, multiplier_array):
multipliers = np.ones(Vt.shape[1], dtype=np.float32)
multipliers = np.ones(Vt.shape[0], dtype=np.float32)
multipliers[:len(multiplier_array)] = multiplier_array
Vt = np.dot(np.diag(multipliers), Vt)
......
......@@ -59,11 +59,13 @@ cdef inline cartesian2barycentric(
ndarray (of dim 3) weights of the barycentric coordinates
"""
lambdas[0] = ((y_2 - y_3) * (x - x_3) + (x_3 - x_2) * (y - y_3)) / \
((y_2 - y_3) * (x_1 - x_3) + (x_3 - x_2) * (y_1 - y_3))
cdef float cross_2 = ((y_2 - y_3) * (x_1 - x_3) + (x_3 - x_2) * (y_1 - y_3))
lambdas[1] = ((y_3 - y_1) * (x - x_3) + (x_1 - x_3) * (y - y_3)) / \
((y_2 - y_3) * (x_1 - x_3) + (x_3 - x_2) * (y_1 - y_3))
if (cross_2 <= 0.0):
cross_2 = 0.01
lambdas[0] = ((y_2 - y_3) * (x - x_3) + (x_3 - x_2) * (y - y_3)) / cross_2
lambdas[1] = ((y_3 - y_1) * (x - x_3) + (x_1 - x_3) * (y - y_3)) / cross_2
lambdas[2] = 1 - lambdas[0] - lambdas[1]
......@@ -174,12 +176,11 @@ def fill_triangle_src_dst(np.ndarray[unsigned char, ndim=3] src,
cdef int x_max = np.argmax(triangle_x)
cdef int y_min = np.argmin(triangle_y)
cdef int y_max = np.argmax(triangle_y)
###
# walk over x and y values of this bounding box see if the
# pixel is in or out the boudning box
for y in xrange(triangle_y[y_min], triangle_y[y_max]):
for x in xrange(triangle_x[x_min], triangle_x[x_max]):
for y in xrange(triangle_y[y_min] + 1, triangle_y[y_max] - 1):
for x in xrange(triangle_x[x_min] + 1, triangle_x[x_max] - 1):
cartesian2barycentric(
triangle_x[0], triangle_y[0],
triangle_x[1], triangle_y[1],
......
......@@ -11,6 +11,7 @@ import pca
from datasets import imm
from reconstruction import reconstruction
from settings import logger
from utility import import_dataset_module
BASE = '../viewer/app'
FILES_DIR = '../data/'
......@@ -71,10 +72,11 @@ class ImageWebSocketHandler(websocket.WebSocketHandler):
asf_filename = self.asf[image_index]
dataset_module = import_dataset_module(args.shape_type)
input_points = imm.IMMPoints(filename=asf_filename)
input_image = input_points.get_image()
mean_points = imm.IMMPoints(points_list=self.shape_model.mean_values)
mean_points = dataset_module.factory(points_list=shape_model.mean_values)
mean_points.get_scaled_points(input_image.shape)
# set dst image to an empty image if value is None
......
......@@ -8,6 +8,8 @@ import logging
import logging.config
import os
LANDMARK_DETECTOR_PATH = 'data/shape_predictor_68_face_landmarks.dat'
#logging.basicConfig(level=logging.INFO,
# format='%(asctime)s %(levelname)s %(name)s: %(message)s')
......
import importlib
def import_dataset_module(shape_type):
"""
Includes the right implementation for the right dataset implementation for
the given shape type, see --help for the available options.
Args:
shape_type(string): Name of the python file inside the
`src/datasets` folder.
"""
return importlib.import_module('datasets.{}'.format(shape_type))
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment