Commit be1ae552 authored by Richard Torenvliet's avatar Richard Torenvliet

Add a way of altering the amount of shape components needed for the reconstruction.

parent 8ef44a7e
...@@ -82,6 +82,30 @@ class AAMPoints(): ...@@ -82,6 +82,30 @@ class AAMPoints():
return self.points_list return self.points_list
def draw_triangles(self, image=None, show_points=True):
assert self.points_list is not None, \
'the list points already need to be scaled order to correctly work,\
this requires that get_scaled_points is executed first.'
if image is None:
image = self.image
triangles = Triangulation(self.points_list[:, 0], self.points_list[:, 1])
for t, tri in enumerate(triangles.triangles):
p1, p2, p3 = self.points_list[tri]
cv2.line(image, tuple(p1), tuple(p2), (255, 0, 100), 1)
cv2.line(image, tuple(p2), tuple(p3), (255, 0, 100), 1)
cv2.line(image, tuple(p3), tuple(p1), (255, 0, 100), 1)
if show_points:
point_indices = list(range(0, max(self.actual_shape)))
for i, p in enumerate(self.points_list):
point_index = int(point_indices[i])
cv2.putText(image, str(point_index), tuple((p[0], p[1])),
cv2.FONT_HERSHEY_SIMPLEX, .5, (100, 0, 255))
cv2.circle(image, tuple(p), 3, color=(0, 255, 100))
def calculate_bounding_box(self): def calculate_bounding_box(self):
""" """
Calculate bounding box in the **scaled** points list Calculate bounding box in the **scaled** points list
......
...@@ -92,27 +92,6 @@ class IMMPoints(aam.AAMPoints): ...@@ -92,27 +92,6 @@ class IMMPoints(aam.AAMPoints):
return np.asarray(points_list, dtype='f') return np.asarray(points_list, dtype='f')
def draw_triangles(self, image, points, multiply=True):
if multiply:
h, w, c = image.shape
points[:, 0] = points[:, 0] * w
points[:, 1] = points[:, 1] * h
point_indices = list(range(0, 58))
triangles = Triangulation(points[:, 0], points[:, 1])
for t, tri in enumerate(triangles.triangles):
p1, p2, p3 = points[tri]
cv2.line(image, tuple(p1), tuple(p2), (255, 0, 100), 1)
cv2.line(image, tuple(p2), tuple(p3), (255, 0, 100), 1)
cv2.line(image, tuple(p3), tuple(p1), (255, 0, 100), 1)
for i, p in enumerate(points):
point_index = int(point_indices[i])
cv2.putText(image, str(point_index), tuple((p[0], p[1])),
cv2.FONT_HERSHEY_SIMPLEX, .5, (100, 0, 255))
cv2.circle(image, tuple(p), 3, color=(0, 255, 100))
def show_on_image(self, image, window_name='image', multiply=True): def show_on_image(self, image, window_name='image', multiply=True):
self.draw_triangles(image, self.points_list, multiply=multiply) self.draw_triangles(image, self.points_list, multiply=multiply)
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
# python std # python std
import argparse import argparse
import importlib import importlib
import copy
# installed packages # installed packages
import cv2 import cv2
...@@ -207,21 +208,35 @@ def show_reconstruction(args): ...@@ -207,21 +208,35 @@ def show_reconstruction(args):
mean_points = dataset_module.IMMPoints(points_list=shape_model.mean_values) mean_points = dataset_module.IMMPoints(points_list=shape_model.mean_values)
mean_points.get_scaled_points(input_image.shape) mean_points.get_scaled_points(input_image.shape)
n_components = 58
count = 0
while True: while True:
input_image_copy = input_image.copy()
input_points_copy = copy.deepcopy(input_points)
reconstruction.reconstruct_shape(
input_image_copy, input_points_copy, shape_model,
n_components=n_components - count
)
reconstruction.reconstruct_texture( reconstruction.reconstruct_texture(
input_image, # src image input_image_copy, # src image
input_image, # dst image input_image_copy, # dst image
texture_model, texture_model,
input_points, # shape points input input_points_copy, # shape points input
mean_points, # shape points mean mean_points # shape points mean
) )
input_points_copy.get_scaled_points(input_image.shape)
input_points_copy.draw_triangles(image=input_image_copy, show_points=False)
dst = reconstruction.get_texture( dst = reconstruction.get_texture(
mean_points, texture_model.mean_values mean_points, texture_model.mean_values
) )
cv2.imshow('original', input_points.get_image()) cv2.imshow('original', input_image)
cv2.imshow('reconstructed', input_image) cv2.imshow('reconstructed', input_image_copy)
cv2.imshow('main face', dst) cv2.imshow('main face', dst)
k = cv2.waitKey(0) & 0xFF k = cv2.waitKey(0) & 0xFF
...@@ -229,6 +244,8 @@ def show_reconstruction(args): ...@@ -229,6 +244,8 @@ def show_reconstruction(args):
if k == 27: if k == 27:
break break
count += 2
cv2.destroyAllWindows() cv2.destroyAllWindows()
......
...@@ -153,12 +153,19 @@ def reconstruct(feature_vector, Vt, mean_values, n_components=None): ...@@ -153,12 +153,19 @@ def reconstruct(feature_vector, Vt, mean_values, n_components=None):
""" """
# if n_components is None, use all components.
if n_components is None: if n_components is None:
n_components = Vt.shape[1] n_components = Vt.shape[1]
# create the zero mean, is always needed for PCA reconstruction.
# we can only alter the data in zero_mean.
zm = feature_vector - mean_values zm = feature_vector - mean_values
# project the zero mean onto the the VT bases (with potentially less
# amount of dimensions than the original.
yk = np.dot(Vt[:n_components], zm.T) yk = np.dot(Vt[:n_components], zm.T)
# project back to the original and add the mean_values again.
return np.dot(Vt[:n_components].T, yk) + mean_values return np.dot(Vt[:n_components].T, yk) + mean_values
......
...@@ -78,45 +78,79 @@ def get_texture(Points, flattened_texture): ...@@ -78,45 +78,79 @@ def get_texture(Points, flattened_texture):
return np.asarray(flattened_texture, np.uint8).reshape((h_slice, w_slice, 3)) return np.asarray(flattened_texture, np.uint8).reshape((h_slice, w_slice, 3))
def reconstruct_shape(image, points, shape_model, n_components=None):
input_points = points.get_points()
mean_points = shape_model.mean_values
reconstructed = pca.reconstruct(
input_points,
shape_model.Vt,
mean_points,
n_components=n_components
)
points.normalized_flattened_points_list = reconstructed
def reconstruct_texture(src_image, dst_image, texture_model, src_points, dst_points): def reconstruct_texture(src_image, dst_image, texture_model, src_points, dst_points):
""" """
Recontruct texture given the src and dst image Recontruct texture given the src and dst image
Args: Args:
src_points(aam.AAMPoints) src_image(ndarray): numpy / OpenCV image in BGR
dst_points(aam.AAMPoints) dst_image(ndarray): numpy image / OpenCVImage, may be None, if None
then we create an image just as big a the input image but then with a
black background.
texture_model(PCAModel): The PCAModel that holds the information that
we need to reconstruct the image, see pca module.
Make one by doing this: (see PCAModel on how it is stored in numpy
file).
texture_model = pca.PCAModel(model_texture_file)
src_points(aam.AAMPoints): The AAMPoints object contains the location
of the landmarks on the face that we need to perform piece wise affine
warping.
dst_points(aam.AAMPoints): The AAMPoints object contains the location
of the landmarks on the face that we need to perform piece wise affine
warping.
""" """
Vt = texture_model.Vt Vt = texture_model.Vt
triangles = texture_model.triangles triangles = texture_model.triangles
mean_texture = texture_model.mean_values mean_texture = texture_model.mean_values
# n_components = texture_model.n_components
# S_mean format
h, w, c = src_image.shape h, w, c = src_image.shape
# empty input_texture
input_texture = np.full((h, w, 3), fill_value=0, dtype=np.uint8) input_texture = np.full((h, w, 3), fill_value=0, dtype=np.uint8)
points2d_src = src_points.get_scaled_points(src_image.shape) points2d_src = src_points.get_scaled_points(src_image.shape)
points2d_dst = dst_points.get_scaled_points(dst_image.shape) points2d_dst = dst_points.get_scaled_points(dst_image.shape)
# get the texture from the rectangles.
aam.sample_from_triangles( aam.sample_from_triangles(
src_image, src_image,
points2d_src, points2d_src,
points2d_dst, points2d_dst,
triangles, triangles,
input_texture input_texture # this will be filled with the texture afterwards.
) )
# define the rectangle around the dst_points.
offset_x, offset_y, w_slice, h_slice = dst_points.get_bounding_box() offset_x, offset_y, w_slice, h_slice = dst_points.get_bounding_box()
# cut out this region from the input_texture.
input_texture = input_texture[offset_y: offset_y + h_slice, input_texture = input_texture[offset_y: offset_y + h_slice,
offset_x: offset_x + w_slice].flatten() offset_x: offset_x + w_slice].flatten()
# Still in S_mean format # perfrom the PCA reconstruction using the input_texture.
r_texture = pca.reconstruct(input_texture, Vt, mean_texture) r_texture = pca.reconstruct(input_texture, Vt, mean_texture)
# Make an image from the float data # Make an image from the data, the texture is still of type `float`.
r_texture = np.asarray(r_texture, np.uint8).reshape((h_slice, w_slice, 3)) r_texture = np.asarray(r_texture, np.uint8).reshape((h_slice, w_slice, 3))
# subtract the offset # subtract the offset, this is needed because the image is now a
# small rectangle around the face which starts at [0,0], wheras it first
# was located at offset_x, offset_y. We need both rectangles to start at
# [0, 0]. Please note that this should be improved to avoid confusion.
points2d_dst[:, 0] -= offset_x points2d_dst[:, 0] -= offset_x
points2d_dst[:, 1] -= offset_y points2d_dst[:, 1] -= offset_y
......
...@@ -4,11 +4,13 @@ import base64 ...@@ -4,11 +4,13 @@ import base64
from glob import glob from glob import glob
import cv2 import cv2
import numpy as np
from tornado import websocket, web, ioloop, autoreload from tornado import websocket, web, ioloop, autoreload
import pca import pca
from datasets import imm from datasets import imm
from reconstruction import reconstruction from reconstruction import reconstruction
from settings import logger
BASE = '../viewer/app' BASE = '../viewer/app'
FILES_DIR = '../data/' FILES_DIR = '../data/'
...@@ -19,7 +21,7 @@ FACE_DB = '{}{}'.format(FILES_DIR, FACE_DB_NAME) ...@@ -19,7 +21,7 @@ FACE_DB = '{}{}'.format(FILES_DIR, FACE_DB_NAME)
class ImageWebSocketHandler(websocket.WebSocketHandler): class ImageWebSocketHandler(websocket.WebSocketHandler):
handlers = { handlers = {
'filename': 'handle_return_image', 'filename': 'handle_return_image',
'reconstruction_index': 'handle_return_reconstruction' 'reconstruction': 'handle_return_reconstruction'
} }
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
...@@ -57,6 +59,12 @@ class ImageWebSocketHandler(websocket.WebSocketHandler): ...@@ -57,6 +59,12 @@ class ImageWebSocketHandler(websocket.WebSocketHandler):
def handle_return_reconstruction(self, message): def handle_return_reconstruction(self, message):
""" Return the reconstruction of the given image """ """ Return the reconstruction of the given image """
image_index = message['reconstruction_index'] image_index = message['reconstruction_index']
image_as_background = message.get('background_image', True)
shape_components = message.get('shape_components', 58)
print message
logger.info('using %s shape_components', shape_components)
asf_filename = self.asf[image_index] asf_filename = self.asf[image_index]
input_points = imm.IMMPoints(filename=asf_filename) input_points = imm.IMMPoints(filename=asf_filename)
...@@ -65,16 +73,29 @@ class ImageWebSocketHandler(websocket.WebSocketHandler): ...@@ -65,16 +73,29 @@ class ImageWebSocketHandler(websocket.WebSocketHandler):
mean_points = imm.IMMPoints(points_list=self.shape_model.mean_values) mean_points = imm.IMMPoints(points_list=self.shape_model.mean_values)
mean_points.get_scaled_points(input_image.shape) mean_points.get_scaled_points(input_image.shape)
#TODO This one is VERY SLOW, try to optimize # set dst image to an empty image if value is None
if image_as_background is False:
h, w, _ = input_image.shape
dst_image = np.full((h, w, 3), fill_value=0, dtype=np.uint8)
else:
dst_image = input_image
reconstruction.reconstruct_shape(
input_image, input_points, self.shape_model,
n_components=shape_components
)
reconstruction.reconstruct_texture( reconstruction.reconstruct_texture(
input_image, # src image input_image, # src image
input_image, # dst image dst_image, # dst image
self.texture_model, self.texture_model,
input_points, # shape points input input_points, # shape points input
mean_points, # shape points mean mean_points # shape points mean
) )
_, reconstructed = cv2.imencode('.jpg', input_image) input_points.draw_triangles(show_points=False)
_, reconstructed = cv2.imencode('.jpg', dst_image)
reconstructed = base64.b64encode(reconstructed) reconstructed = base64.b64encode(reconstructed)
self.write_message(json.dumps({'reconstructed': reconstructed})) self.write_message(json.dumps({'reconstructed': reconstructed}))
...@@ -96,7 +117,7 @@ class ImageWebSocketHandler(websocket.WebSocketHandler): ...@@ -96,7 +117,7 @@ class ImageWebSocketHandler(websocket.WebSocketHandler):
print(msg, e) print(msg, e)
self.__return_error(msg) self.__return_error(msg)
handler(message) handler(message[m])
def on_close(self): def on_close(self):
print("WebSocket closed") print("WebSocket closed")
......
...@@ -8,13 +8,10 @@ const ThreeComponent = Ember.Component.extend({ ...@@ -8,13 +8,10 @@ const ThreeComponent = Ember.Component.extend({
scene: null, scene: null,
willRender() { willRender() {
if (this.scene) { if (this.scene) { return; }
return;
}
var scene = new THREE.Scene(); var scene = new THREE.Scene();
var gui = new dat.GUI(); var gui = new dat.GUI();
var camera = new THREE.PerspectiveCamera( var camera = new THREE.PerspectiveCamera(
75, window.innerWidth / window.innerHeight, 0.1, 1000 75, window.innerWidth / window.innerHeight, 0.1, 1000
); );
...@@ -23,10 +20,25 @@ const ThreeComponent = Ember.Component.extend({ ...@@ -23,10 +20,25 @@ const ThreeComponent = Ember.Component.extend({
var renderer = new THREE.WebGLRenderer(); var renderer = new THREE.WebGLRenderer();
// the sidebar 'dat-gui' controls
var reconstructionControls = {
index: 0,
shape_components: 58,
background_image: true,
};
for(var i = 0; i < 15; i++) {
reconstructionControls['shape_eigen_value_' + i] = 0.0;
}
var shapeEigenValueSliders = {};
this.set('scene', scene); this.set('scene', scene);
this.set('camera', camera); this.set('camera', camera);
this.set('renderer', renderer); this.set('renderer', renderer);
this.set('gui', gui); this.set('gui', gui);
this.set('controls', reconstructionControls);
this.set('shapeEigenValueSliders', shapeEigenValueSliders);
this.get('store').findAll('face').then((faces) => { this.get('store').findAll('face').then((faces) => {
this.set('faces', faces); this.set('faces', faces);
...@@ -45,29 +57,47 @@ const ThreeComponent = Ember.Component.extend({ ...@@ -45,29 +57,47 @@ const ThreeComponent = Ember.Component.extend({
}), }),
/**
* Adds the 'dat-gui' sliders
*
* See:
* http://learningthreejs.com/blog/2011/08/14/dat-gui-simple-ui-for-demos/
*/
addSliders() { addSliders() {
var self = this; var self = this;
var gui = this.get('gui'); var gui = this.get('gui');
var obj = { var reconstructionControls = this.get('controls');
name: "Image filename", var shapeEigenValueSliders = this.get('shapeEigenValueSliders');
index: 0
};
var components = {
name: "Components",
components: 0
};
var length = this.get('faces').get('length'); var length = this.get('faces').get('length');
var imagesSlider = gui.add(obj, "index").min(0).max( var index = gui.add(reconstructionControls, 'index', 0, length - 1);
length - 1).step(1); var shape_components = gui.add(reconstructionControls, 'shape_components', 0, 58);
var background = gui.add(reconstructionControls, 'background_image');
var shapeEigenValueControls = gui.addFolder('shape_eigen_values');
for(var i = 0; i < 15; i++) {
shapeEigenValueControls.add(reconstructionControls, 'shape_eigen_value_' + i, 0.0, 10.0);
}
// on index change
index.onChange(function(newValue) {
// update the image_index, which is on the controller
self.set('image_index', parseInt(newValue));
self.sendAction('updateIndex', parseInt(newValue));
});
gui.add(components, "components").min(0).max(length - 1).step(1); background.onChange(function(newValue) {
self.sendAction('updateBackground', newValue);
});
shape_components.onChange(function(newValue) {
self.sendAction('updateShapeComponents', newValue);
});
imagesSlider.onChange(function(newValue) { reconstructionControls.onChange(function(newValue) {
self.set('image_index', newValue); console.log(newValue);
self.sendAction('update', newValue);
}); });
} }
}); });
......
...@@ -10,7 +10,8 @@ export default Ember.Controller.extend({ ...@@ -10,7 +10,8 @@ export default Ember.Controller.extend({
image: null, image: null,
image_index: 0, image_index: 0,
n_components: null, background_image: true,
shape_components: null,
n_images: null, n_images: null,
reconstructed: null, reconstructed: null,
...@@ -64,7 +65,8 @@ export default Ember.Controller.extend({ ...@@ -64,7 +65,8 @@ export default Ember.Controller.extend({
this.set('loading', false); this.set('loading', false);
}, },
getReconstruction: Ember.observer('image_index', function() { getReconstruction: Ember.observer(
'image_index', 'background_image', 'shape_components', function() {
this.send('getReconstruction'); this.send('getReconstruction');
}), }),
...@@ -75,7 +77,6 @@ export default Ember.Controller.extend({ ...@@ -75,7 +77,6 @@ export default Ember.Controller.extend({
actions: { actions: {
getImage(faceModel) { getImage(faceModel) {
this.set('loading', true); this.set('loading', true);
var filename = faceModel.get('filename'); var filename = faceModel.get('filename');
const socket = this.get('socketRef'); const socket = this.get('socketRef');
...@@ -86,18 +87,33 @@ export default Ember.Controller.extend({ ...@@ -86,18 +87,33 @@ export default Ember.Controller.extend({
getReconstruction() { getReconstruction() {
this.set('loading', true); this.set('loading', true);
const socket = this.get('socketRef'); const socket = this.get('socketRef');
socket.send( socket.send(
JSON.stringify({reconstruction_index: this.get('image_index')} JSON.stringify({
reconstruction: {
reconstruction_index: this.get('image_index'),
background_image: this.get('background_image'),
shape_components: this.get('shape_components')
}
}
)); ));
}, },
// connects components together // connects components together
// handles the upate action passed to a component // handles the upate action passed to a component
updateComponentConnector(index) { updateIndexComponentConnector(index) {
this.set('image_index', index); this.set('image_index', index);
},
updateBackgroundComponentConnector(showBackground) {
this.set('background_image', showBackground);
},
updateShapeComponents(components) {
console.log('shape_components', components);
this.set('shape_components', components);
} }
} }
}); });
{{# if current_face_filename }} {{# if current_face_filename }}
<img src='{{current_face_filename}}' alt='missing original'> <img src='{{current_face_filename}}' alt='missing original'>
{{else}}
<div class="table">
<div class="table-cell align-middle">
{{fa-icon "spinner" spin=true size='lg'}}
Loading..
</div>
</div>
{{/if}} {{/if}}
{{yield}} {{yield}}
...@@ -3,25 +3,34 @@ ...@@ -3,25 +3,34 @@
<div class="container"> <div class="container">
<div class="clearfix"> <div class="clearfix">
<div class="col col-6"> <div class="col col-6">
{{images-loader image_index=image_index}} {{images-loader image_index=image_index
background_image=background_image
shape_components=shape_components
loading=loading
}}
</div> </div>
<div class="col col-6"> <div class="col col-6">
{{#if loading }} <div class="table">
<div class="table"> <div class="table-cell align-middle">
<div class="table-cell align-middle"> {{#if reconstructed }}
{{fa-icon "spinner" spin=true size='lg'}} <img src='data:image/jpg;base64,{{reconstructed}}'
Loading.. alt='missing image'>
</div> {{/if}}
{{#if loading }}
{{fa-icon "spinner" spin=true size='lg'}} Loading..
{{/if}}
</div> </div>
{{else if reconstructed }} </div>
<img src='data:image/jpg;base64,{{reconstructed}}'
alt='missing image'>
{{/if}}
</div> </div>
</div> </div>
<div class="col col-12 px2"> <div class="col col-12 px2">
{{three-js-reconstruction update=(action 'updateComponentConnector')}} {{three-js-reconstruction
updateIndex=(action 'updateIndexComponentConnector')
updateBackground=(action 'updateBackgroundComponentConnector')
updateShapeComponents=(action 'updateShapeComponents')
}}
</div> </div>
</div> </div>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment